Merge "Remove common_time code" am: d22857101e
am: 3b2ceb3175
Change-Id: I91a45fc18dbc51a1be23c589d1af06af9c9421a7
diff --git a/camera/CaptureResult.cpp b/camera/CaptureResult.cpp
index e6c0d00..928a6bc 100644
--- a/camera/CaptureResult.cpp
+++ b/camera/CaptureResult.cpp
@@ -60,6 +60,39 @@
return OK;
}
+status_t PhysicalCaptureResultInfo::readFromParcel(const android::Parcel* parcel) {
+ status_t res;
+
+ mPhysicalCameraId.remove(mPhysicalCameraId.size());
+ mPhysicalCameraMetadata.clear();
+
+ if ((res = parcel->readString16(&mPhysicalCameraId)) != OK) {
+ ALOGE("%s: Failed to read camera id: %d", __FUNCTION__, res);
+ return res;
+ }
+
+ if ((res = mPhysicalCameraMetadata.readFromParcel(parcel)) != OK) {
+ ALOGE("%s: Failed to read metadata from parcel: %d", __FUNCTION__, res);
+ return res;
+ }
+ return OK;
+}
+
+status_t PhysicalCaptureResultInfo::writeToParcel(android::Parcel* parcel) const {
+ status_t res;
+ if ((res = parcel->writeString16(mPhysicalCameraId)) != OK) {
+ ALOGE("%s: Failed to write physical camera ID to parcel: %d",
+ __FUNCTION__, res);
+ return res;
+ }
+ if ((res = mPhysicalCameraMetadata.writeToParcel(parcel)) != OK) {
+ ALOGE("%s: Failed to write physical camera metadata to parcel: %d",
+ __FUNCTION__, res);
+ return res;
+ }
+ return OK;
+}
+
CaptureResult::CaptureResult() :
mMetadata(), mResultExtras() {
}
@@ -67,6 +100,7 @@
CaptureResult::CaptureResult(const CaptureResult &otherResult) {
mResultExtras = otherResult.mResultExtras;
mMetadata = otherResult.mMetadata;
+ mPhysicalMetadatas = otherResult.mPhysicalMetadatas;
}
status_t CaptureResult::readFromParcel(android::Parcel *parcel) {
@@ -79,6 +113,7 @@
}
mMetadata.clear();
+ mPhysicalMetadatas.clear();
status_t res = OK;
res = mMetadata.readFromParcel(parcel);
@@ -89,6 +124,34 @@
}
ALOGV("%s: Read metadata from parcel", __FUNCTION__);
+ int32_t physicalMetadataCount;
+ if ((res = parcel->readInt32(&physicalMetadataCount)) != OK) {
+ ALOGE("%s: Failed to read the physical metadata count from parcel: %d", __FUNCTION__, res);
+ return res;
+ }
+ if (physicalMetadataCount < 0) {
+ ALOGE("%s: Invalid physical metadata count from parcel: %d",
+ __FUNCTION__, physicalMetadataCount);
+ return BAD_VALUE;
+ }
+
+ for (int32_t i = 0; i < physicalMetadataCount; i++) {
+ String16 cameraId;
+ if ((res = parcel->readString16(&cameraId)) != OK) {
+ ALOGE("%s: Failed to read camera id: %d", __FUNCTION__, res);
+ return res;
+ }
+
+ CameraMetadata physicalMetadata;
+ if ((res = physicalMetadata.readFromParcel(parcel)) != OK) {
+ ALOGE("%s: Failed to read metadata from parcel: %d", __FUNCTION__, res);
+ return res;
+ }
+
+ mPhysicalMetadatas.emplace(mPhysicalMetadatas.end(), cameraId, physicalMetadata);
+ }
+ ALOGV("%s: Read physical metadata from parcel", __FUNCTION__);
+
res = mResultExtras.readFromParcel(parcel);
if (res != OK) {
ALOGE("%s: Failed to read result extras from parcel.",
@@ -118,6 +181,27 @@
}
ALOGV("%s: Wrote metadata to parcel", __FUNCTION__);
+ int32_t physicalMetadataCount = static_cast<int32_t>(mPhysicalMetadatas.size());
+ res = parcel->writeInt32(physicalMetadataCount);
+ if (res != OK) {
+ ALOGE("%s: Failed to write physical metadata count to parcel: %d",
+ __FUNCTION__, res);
+ return BAD_VALUE;
+ }
+ for (const auto& physicalMetadata : mPhysicalMetadatas) {
+ if ((res = parcel->writeString16(physicalMetadata.mPhysicalCameraId)) != OK) {
+ ALOGE("%s: Failed to write physical camera ID to parcel: %d",
+ __FUNCTION__, res);
+ return res;
+ }
+ if ((res = physicalMetadata.mPhysicalCameraMetadata.writeToParcel(parcel)) != OK) {
+ ALOGE("%s: Failed to write physical camera metadata to parcel: %d",
+ __FUNCTION__, res);
+ return res;
+ }
+ }
+ ALOGV("%s: Wrote physical camera metadata to parcel", __FUNCTION__);
+
res = mResultExtras.writeToParcel(parcel);
if (res != OK) {
ALOGE("%s: Failed to write result extras to parcel", __FUNCTION__);
diff --git a/camera/aidl/android/hardware/ICameraServiceProxy.aidl b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
index 5dc23eb..7575948 100644
--- a/camera/aidl/android/hardware/ICameraServiceProxy.aidl
+++ b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
@@ -46,8 +46,14 @@
const int CAMERA_FACING_EXTERNAL = 2;
/**
+ * Values for notifyCameraState api level
+ */
+ const int CAMERA_API_LEVEL_1 = 1;
+ const int CAMERA_API_LEVEL_2 = 2;
+
+ /**
* Update the status of a camera device.
*/
oneway void notifyCameraState(String cameraId, int facing, int newCameraState,
- String clientName);
+ String clientName, int apiLevel);
}
diff --git a/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl b/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl
index 28252c0..58b19a3 100644
--- a/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl
+++ b/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl
@@ -18,6 +18,7 @@
import android.hardware.camera2.impl.CameraMetadataNative;
import android.hardware.camera2.impl.CaptureResultExtras;
+import android.hardware.camera2.impl.PhysicalCaptureResultInfo;
/** @hide */
interface ICameraDeviceCallbacks
@@ -30,12 +31,14 @@
const int ERROR_CAMERA_REQUEST = 3;
const int ERROR_CAMERA_RESULT = 4;
const int ERROR_CAMERA_BUFFER = 5;
+ const int ERROR_CAMERA_DISABLED = 6;
oneway void onDeviceError(int errorCode, in CaptureResultExtras resultExtras);
oneway void onDeviceIdle();
oneway void onCaptureStarted(in CaptureResultExtras resultExtras, long timestamp);
oneway void onResultReceived(in CameraMetadataNative result,
- in CaptureResultExtras resultExtras);
+ in CaptureResultExtras resultExtras,
+ in PhysicalCaptureResultInfo[] physicalCaptureResultInfos);
oneway void onPrepared(int streamId);
/**
diff --git a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
index 0771fc8..4ced08c 100644
--- a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
+++ b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
@@ -79,8 +79,9 @@
* <p>
* @param operatingMode The kind of session to create; either NORMAL_MODE or
* CONSTRAINED_HIGH_SPEED_MODE. Must be a non-negative value.
+ * @param sessionParams Session wide camera parameters
*/
- void endConfigure(int operatingMode);
+ void endConfigure(int operatingMode, in CameraMetadataNative sessionParams);
void deleteStream(int streamId);
@@ -140,5 +141,7 @@
void prepare2(int maxCount, int streamId);
+ void updateOutputConfiguration(int streamId, in OutputConfiguration outputConfiguration);
+
void finalizeOutputConfigurations(int streamId, in OutputConfiguration outputConfiguration);
}
diff --git a/camera/aidl/android/hardware/camera2/impl/PhysicalCaptureResultInfo.aidl b/camera/aidl/android/hardware/camera2/impl/PhysicalCaptureResultInfo.aidl
new file mode 100644
index 0000000..78d9b7b
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/impl/PhysicalCaptureResultInfo.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2.impl;
+
+/** @hide */
+parcelable PhysicalCaptureResultInfo cpp_header "camera/CaptureResult.h";
diff --git a/camera/camera2/CaptureRequest.cpp b/camera/camera2/CaptureRequest.cpp
index 0597950..1843ec4 100644
--- a/camera/camera2/CaptureRequest.cpp
+++ b/camera/camera2/CaptureRequest.cpp
@@ -18,6 +18,7 @@
// #define LOG_NDEBUG 0
#define LOG_TAG "CameraRequest"
#include <utils/Log.h>
+#include <utils/String16.h>
#include <camera/camera2/CaptureRequest.h>
@@ -42,16 +43,46 @@
return BAD_VALUE;
}
- mMetadata.clear();
mSurfaceList.clear();
+ mStreamIdxList.clear();
+ mSurfaceIdxList.clear();
+ mPhysicalCameraSettings.clear();
status_t err = OK;
- if ((err = mMetadata.readFromParcel(parcel)) != OK) {
- ALOGE("%s: Failed to read metadata from parcel", __FUNCTION__);
+ int32_t settingsCount;
+ if ((err = parcel->readInt32(&settingsCount)) != OK) {
+ ALOGE("%s: Failed to read the settings count from parcel: %d", __FUNCTION__, err);
return err;
}
- ALOGV("%s: Read metadata from parcel", __FUNCTION__);
+
+ if (settingsCount <= 0) {
+ ALOGE("%s: Settings count %d should always be positive!", __FUNCTION__, settingsCount);
+ return BAD_VALUE;
+ }
+
+ for (int32_t i = 0; i < settingsCount; i++) {
+ String16 id;
+ if ((err = parcel->readString16(&id)) != OK) {
+ ALOGE("%s: Failed to read camera id!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ CameraMetadata settings;
+ if ((err = settings.readFromParcel(parcel)) != OK) {
+ ALOGE("%s: Failed to read metadata from parcel", __FUNCTION__);
+ return err;
+ }
+ ALOGV("%s: Read metadata from parcel", __FUNCTION__);
+ mPhysicalCameraSettings.push_back({std::string(String8(id).string()), settings});
+ }
+
+ int isReprocess = 0;
+ if ((err = parcel->readInt32(&isReprocess)) != OK) {
+ ALOGE("%s: Failed to read reprocessing from parcel", __FUNCTION__);
+ return err;
+ }
+ mIsReprocess = (isReprocess != 0);
int32_t size;
if ((err = parcel->readInt32(&size)) != OK) {
@@ -61,7 +92,7 @@
ALOGV("%s: Read surface list size = %d", __FUNCTION__, size);
// Do not distinguish null arrays from 0-sized arrays.
- for (int i = 0; i < size; ++i) {
+ for (int32_t i = 0; i < size; ++i) {
// Parcel.writeParcelableArray
size_t len;
const char16_t* className = parcel->readString16Inplace(&len);
@@ -88,12 +119,32 @@
mSurfaceList.push_back(surface);
}
- int isReprocess = 0;
- if ((err = parcel->readInt32(&isReprocess)) != OK) {
- ALOGE("%s: Failed to read reprocessing from parcel", __FUNCTION__);
+ int32_t streamSurfaceSize;
+ if ((err = parcel->readInt32(&streamSurfaceSize)) != OK) {
+ ALOGE("%s: Failed to read streamSurfaceSize from parcel", __FUNCTION__);
return err;
}
- mIsReprocess = (isReprocess != 0);
+
+ if (streamSurfaceSize < 0) {
+ ALOGE("%s: Bad streamSurfaceSize %d from parcel", __FUNCTION__, streamSurfaceSize);
+ return BAD_VALUE;
+ }
+
+ for (int32_t i = 0; i < streamSurfaceSize; ++i) {
+ int streamIdx;
+ if ((err = parcel->readInt32(&streamIdx)) != OK) {
+ ALOGE("%s: Failed to read stream index from parcel", __FUNCTION__);
+ return err;
+ }
+ mStreamIdxList.push_back(streamIdx);
+
+ int surfaceIdx;
+ if ((err = parcel->readInt32(&surfaceIdx)) != OK) {
+ ALOGE("%s: Failed to read surface index from parcel", __FUNCTION__);
+ return err;
+ }
+ mSurfaceIdxList.push_back(surfaceIdx);
+ }
return OK;
}
@@ -106,32 +157,62 @@
status_t err = OK;
- if ((err = mMetadata.writeToParcel(parcel)) != OK) {
+ int32_t settingsCount = static_cast<int32_t>(mPhysicalCameraSettings.size());
+
+ if ((err = parcel->writeInt32(settingsCount)) != OK) {
+ ALOGE("%s: Failed to write settings count!", __FUNCTION__);
return err;
}
- int32_t size = static_cast<int32_t>(mSurfaceList.size());
+ for (const auto &it : mPhysicalCameraSettings) {
+ if ((err = parcel->writeString16(String16(it.id.c_str()))) != OK) {
+ ALOGE("%s: Failed to camera id!", __FUNCTION__);
+ return err;
+ }
- // Send 0-sized arrays when it's empty. Do not send null arrays.
- parcel->writeInt32(size);
-
- for (int32_t i = 0; i < size; ++i) {
- // not sure if readParcelableArray does this, hard to tell from source
- parcel->writeString16(String16("android.view.Surface"));
-
- // Surface.writeToParcel
- view::Surface surfaceShim;
- surfaceShim.name = String16("unknown_name");
- surfaceShim.graphicBufferProducer = mSurfaceList[i]->getIGraphicBufferProducer();
- if ((err = surfaceShim.writeToParcel(parcel)) != OK) {
- ALOGE("%s: Failed to write output target Surface %d to parcel: %s (%d)",
- __FUNCTION__, i, strerror(-err), err);
+ if ((err = it.settings.writeToParcel(parcel)) != OK) {
+ ALOGE("%s: Failed to write settings!", __FUNCTION__);
return err;
}
}
parcel->writeInt32(mIsReprocess ? 1 : 0);
+ if (mSurfaceConverted) {
+ parcel->writeInt32(0); // 0-sized array
+ } else {
+ int32_t size = static_cast<int32_t>(mSurfaceList.size());
+
+ // Send 0-sized arrays when it's empty. Do not send null arrays.
+ parcel->writeInt32(size);
+
+ for (int32_t i = 0; i < size; ++i) {
+ // not sure if readParcelableArray does this, hard to tell from source
+ parcel->writeString16(String16("android.view.Surface"));
+
+ // Surface.writeToParcel
+ view::Surface surfaceShim;
+ surfaceShim.name = String16("unknown_name");
+ surfaceShim.graphicBufferProducer = mSurfaceList[i]->getIGraphicBufferProducer();
+ if ((err = surfaceShim.writeToParcel(parcel)) != OK) {
+ ALOGE("%s: Failed to write output target Surface %d to parcel: %s (%d)",
+ __FUNCTION__, i, strerror(-err), err);
+ return err;
+ }
+ }
+ }
+
+ parcel->writeInt32(mStreamIdxList.size());
+ for (size_t i = 0; i < mStreamIdxList.size(); ++i) {
+ if ((err = parcel->writeInt32(mStreamIdxList[i])) != OK) {
+ ALOGE("%s: Failed to write stream index to parcel", __FUNCTION__);
+ return err;
+ }
+ if ((err = parcel->writeInt32(mSurfaceIdxList[i])) != OK) {
+ ALOGE("%s: Failed to write surface index to parcel", __FUNCTION__);
+ return err;
+ }
+ }
return OK;
}
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index 468a1eb..feb04c2 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -1,6 +1,6 @@
/*
**
-** Copyright 2015, The Android Open Source Project
+** Copyright 2015-2018, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -64,6 +64,10 @@
return mIsShared;
}
+String16 OutputConfiguration::getPhysicalCameraId() const {
+ return mPhysicalCameraId;
+}
+
OutputConfiguration::OutputConfiguration() :
mRotation(INVALID_ROTATION),
mSurfaceSetID(INVALID_SET_ID),
@@ -139,6 +143,8 @@
return err;
}
+ parcel->readString16(&mPhysicalCameraId);
+
mRotation = rotation;
mSurfaceSetID = setID;
mSurfaceType = surfaceType;
@@ -153,19 +159,20 @@
mGbps.push_back(surface.graphicBufferProducer);
}
- ALOGV("%s: OutputConfiguration: rotation = %d, setId = %d, surfaceType = %d",
- __FUNCTION__, mRotation, mSurfaceSetID, mSurfaceType);
+ ALOGV("%s: OutputConfiguration: rotation = %d, setId = %d, surfaceType = %d,"
+ " physicalCameraId = %s", __FUNCTION__, mRotation, mSurfaceSetID,
+ mSurfaceType, String8(mPhysicalCameraId).string());
return err;
}
OutputConfiguration::OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation,
- int surfaceSetID) {
+ int surfaceSetID, bool isShared) {
mGbps.push_back(gbp);
mRotation = rotation;
mSurfaceSetID = surfaceSetID;
mIsDeferred = false;
- mIsShared = false;
+ mIsShared = isShared;
}
status_t OutputConfiguration::writeToParcel(android::Parcel* parcel) const {
@@ -204,6 +211,9 @@
err = parcel->writeParcelableVector(surfaceShims);
if (err != OK) return err;
+ err = parcel->writeString16(mPhysicalCameraId);
+ if (err != OK) return err;
+
return OK;
}
diff --git a/camera/include/camera/CaptureResult.h b/camera/include/camera/CaptureResult.h
index 917d953..56fa178 100644
--- a/camera/include/camera/CaptureResult.h
+++ b/camera/include/camera/CaptureResult.h
@@ -91,14 +91,36 @@
virtual status_t readFromParcel(const android::Parcel* parcel) override;
virtual status_t writeToParcel(android::Parcel* parcel) const override;
};
+
+struct PhysicalCaptureResultInfo : public android::Parcelable {
+
+ PhysicalCaptureResultInfo()
+ : mPhysicalCameraId(),
+ mPhysicalCameraMetadata() {
+ }
+ PhysicalCaptureResultInfo(const String16& cameraId,
+ const CameraMetadata& cameraMetadata)
+ : mPhysicalCameraId(cameraId),
+ mPhysicalCameraMetadata(cameraMetadata) {
+ }
+
+ String16 mPhysicalCameraId;
+ CameraMetadata mPhysicalCameraMetadata;
+
+ virtual status_t readFromParcel(const android::Parcel* parcel) override;
+ virtual status_t writeToParcel(android::Parcel* parcel) const override;
+};
+
} // namespace impl
} // namespace camera2
} // namespace hardware
using hardware::camera2::impl::CaptureResultExtras;
+using hardware::camera2::impl::PhysicalCaptureResultInfo;
struct CaptureResult : public virtual LightRefBase<CaptureResult> {
CameraMetadata mMetadata;
+ std::vector<PhysicalCaptureResultInfo> mPhysicalMetadatas;
CaptureResultExtras mResultExtras;
CaptureResult();
diff --git a/camera/include/camera/camera2/CaptureRequest.h b/camera/include/camera/camera2/CaptureRequest.h
index 0180183..506abab 100644
--- a/camera/include/camera/camera2/CaptureRequest.h
+++ b/camera/include/camera/camera2/CaptureRequest.h
@@ -40,14 +40,35 @@
CaptureRequest(CaptureRequest&& rhs) noexcept;
virtual ~CaptureRequest();
- CameraMetadata mMetadata;
+ struct PhysicalCameraSettings {
+ std::string id;
+ CameraMetadata settings;
+ };
+ std::vector<PhysicalCameraSettings> mPhysicalCameraSettings;
+
+ // Used by NDK client to pass surfaces by stream/surface index.
+ bool mSurfaceConverted = false;
+
+ // Starting in Android O, create a Surface from Parcel will take one extra
+ // IPC call.
Vector<sp<Surface> > mSurfaceList;
+ // Optional way of passing surface list since passing Surface over binder
+ // is expensive. Use the stream/surface index from current output configuration
+ // to represent an configured output Surface. When stream/surface index is used,
+ // set mSurfaceList to zero length to save unparcel time.
+ Vector<int> mStreamIdxList;
+ Vector<int> mSurfaceIdxList; // per stream surface list index
+
bool mIsReprocess;
+ void* mContext; // arbitrary user context from NDK apps, null for java apps
+
/**
* Keep impl up-to-date with CaptureRequest.java in frameworks/base
*/
+ // used by cameraserver to receive CaptureRequest from java/NDK client
status_t readFromParcel(const android::Parcel* parcel) override;
+ // used by NDK client to send CaptureRequest to cameraserver
status_t writeToParcel(android::Parcel* parcel) const override;
};
diff --git a/camera/include/camera/camera2/OutputConfiguration.h b/camera/include/camera/camera2/OutputConfiguration.h
index 8e641c7..a80f44b 100644
--- a/camera/include/camera/camera2/OutputConfiguration.h
+++ b/camera/include/camera/camera2/OutputConfiguration.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 The Android Open Source Project
+ * Copyright (C) 2015-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -46,6 +46,7 @@
int getHeight() const;
bool isDeferred() const;
bool isShared() const;
+ String16 getPhysicalCameraId() const;
/**
* Keep impl up-to-date with OutputConfiguration.java in frameworks/base
*/
@@ -64,7 +65,7 @@
OutputConfiguration(const android::Parcel& parcel);
OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation,
- int surfaceSetID = INVALID_SET_ID);
+ int surfaceSetID = INVALID_SET_ID, bool isShared = false);
bool operator == (const OutputConfiguration& other) const {
return ( mRotation == other.mRotation &&
@@ -74,7 +75,8 @@
mHeight == other.mHeight &&
mIsDeferred == other.mIsDeferred &&
mIsShared == other.mIsShared &&
- gbpsEqual(other));
+ gbpsEqual(other) &&
+ mPhysicalCameraId == other.mPhysicalCameraId );
}
bool operator != (const OutputConfiguration& other) const {
return !(*this == other);
@@ -102,6 +104,9 @@
if (mIsShared != other.mIsShared) {
return mIsShared < other.mIsShared;
}
+ if (mPhysicalCameraId != other.mPhysicalCameraId) {
+ return mPhysicalCameraId < other.mPhysicalCameraId;
+ }
return gbpsLessThan(other);
}
bool operator > (const OutputConfiguration& other) const {
@@ -110,6 +115,7 @@
bool gbpsEqual(const OutputConfiguration& other) const;
bool gbpsLessThan(const OutputConfiguration& other) const;
+ void addGraphicProducer(sp<IGraphicBufferProducer> gbp) {mGbps.push_back(gbp);}
private:
std::vector<sp<IGraphicBufferProducer>> mGbps;
int mRotation;
@@ -119,8 +125,7 @@
int mHeight;
bool mIsDeferred;
bool mIsShared;
- // helper function
- static String16 readMaybeEmptyString16(const android::Parcel* parcel);
+ String16 mPhysicalCameraId;
};
} // namespace params
} // namespace camera2
diff --git a/camera/ndk/NdkCameraCaptureSession.cpp b/camera/ndk/NdkCameraCaptureSession.cpp
index 2a6b182..fd95296 100644
--- a/camera/ndk/NdkCameraCaptureSession.cpp
+++ b/camera/ndk/NdkCameraCaptureSession.cpp
@@ -135,3 +135,19 @@
}
return session->abortCaptures();
}
+
+EXPORT
+camera_status_t ACameraCaptureSession_updateSharedOutput(ACameraCaptureSession* session,
+ ACaptureSessionOutput* output) {
+ ATRACE_CALL();
+ if (session == nullptr) {
+ ALOGE("%s: Error: session is null", __FUNCTION__);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+
+ if (session->isClosed()) {
+ ALOGE("%s: session %p is already closed", __FUNCTION__, session);
+ return ACAMERA_ERROR_SESSION_CLOSED;
+ }
+ return session->updateOutputConfiguration(output);
+}
diff --git a/camera/ndk/NdkCameraDevice.cpp b/camera/ndk/NdkCameraDevice.cpp
index 9f6d1f7..812a312 100644
--- a/camera/ndk/NdkCameraDevice.cpp
+++ b/camera/ndk/NdkCameraDevice.cpp
@@ -103,11 +103,74 @@
__FUNCTION__, window, out);
return ACAMERA_ERROR_INVALID_PARAMETER;
}
- *out = new ACaptureSessionOutput(window);
+ *out = new ACaptureSessionOutput(window, false);
return ACAMERA_OK;
}
EXPORT
+camera_status_t ACaptureSessionSharedOutput_create(
+ ANativeWindow* window, /*out*/ACaptureSessionOutput** out) {
+ ATRACE_CALL();
+ if (window == nullptr || out == nullptr) {
+ ALOGE("%s: Error: bad argument. window %p, out %p",
+ __FUNCTION__, window, out);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ *out = new ACaptureSessionOutput(window, true);
+ return ACAMERA_OK;
+}
+
+EXPORT
+camera_status_t ACaptureSessionSharedOutput_add(ACaptureSessionOutput *out,
+ ANativeWindow* window) {
+ ATRACE_CALL();
+ if ((window == nullptr) || (out == nullptr)) {
+ ALOGE("%s: Error: bad argument. window %p, out %p",
+ __FUNCTION__, window, out);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ if (!out->mIsShared) {
+ ALOGE("%s: Error trying to insert a new window in non-shared output configuration",
+ __FUNCTION__);
+ return ACAMERA_ERROR_INVALID_OPERATION;
+ }
+ if (out->mWindow == window) {
+ ALOGE("%s: Error trying to add the same window associated with the output configuration",
+ __FUNCTION__);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+
+ auto insert = out->mSharedWindows.insert(window);
+ camera_status_t ret = (insert.second) ? ACAMERA_OK : ACAMERA_ERROR_INVALID_PARAMETER;
+ return ret;
+}
+
+EXPORT
+camera_status_t ACaptureSessionSharedOutput_remove(ACaptureSessionOutput *out,
+ ANativeWindow* window) {
+ ATRACE_CALL();
+ if ((window == nullptr) || (out == nullptr)) {
+ ALOGE("%s: Error: bad argument. window %p, out %p",
+ __FUNCTION__, window, out);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ if (!out->mIsShared) {
+ ALOGE("%s: Error trying to remove a window in non-shared output configuration",
+ __FUNCTION__);
+ return ACAMERA_ERROR_INVALID_OPERATION;
+ }
+ if (out->mWindow == window) {
+ ALOGE("%s: Error trying to remove the same window associated with the output configuration",
+ __FUNCTION__);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+
+ auto remove = out->mSharedWindows.erase(window);
+ camera_status_t ret = (remove) ? ACAMERA_OK : ACAMERA_ERROR_INVALID_PARAMETER;
+ return ret;
+}
+
+EXPORT
void ACaptureSessionOutput_free(ACaptureSessionOutput* output) {
ATRACE_CALL();
if (output != nullptr) {
@@ -157,5 +220,21 @@
__FUNCTION__, device, outputs, callbacks, session);
return ACAMERA_ERROR_INVALID_PARAMETER;
}
- return device->createCaptureSession(outputs, callbacks, session);
+ return device->createCaptureSession(outputs, nullptr, callbacks, session);
+}
+
+EXPORT
+camera_status_t ACameraDevice_createCaptureSessionWithSessionParameters(
+ ACameraDevice* device,
+ const ACaptureSessionOutputContainer* outputs,
+ const ACaptureRequest* sessionParameters,
+ const ACameraCaptureSession_stateCallbacks* callbacks,
+ /*out*/ACameraCaptureSession** session) {
+ ATRACE_CALL();
+ if (device == nullptr || outputs == nullptr || callbacks == nullptr || session == nullptr) {
+ ALOGE("%s: Error: invalid input: device %p, outputs %p, callbacks %p, session %p",
+ __FUNCTION__, device, outputs, callbacks, session);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ return device->createCaptureSession(outputs, sessionParameters, callbacks, session);
}
diff --git a/camera/ndk/NdkCaptureRequest.cpp b/camera/ndk/NdkCaptureRequest.cpp
index 5b4c180..ac1856b 100644
--- a/camera/ndk/NdkCaptureRequest.cpp
+++ b/camera/ndk/NdkCaptureRequest.cpp
@@ -142,3 +142,40 @@
delete request;
return;
}
+
+EXPORT
+camera_status_t ACaptureRequest_setUserContext(
+ ACaptureRequest* request, void* context) {
+ if (request == nullptr) {
+ ALOGE("%s: invalid argument! request is NULL", __FUNCTION__);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ return request->setContext(context);
+}
+
+EXPORT
+camera_status_t ACaptureRequest_getUserContext(
+ const ACaptureRequest* request, /*out*/void** context) {
+ if (request == nullptr || context == nullptr) {
+ ALOGE("%s: invalid argument! request %p, context %p",
+ __FUNCTION__, request, context);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ return request->getContext(context);
+}
+
+EXPORT
+ACaptureRequest* ACaptureRequest_copy(const ACaptureRequest* src) {
+ ATRACE_CALL();
+ if (src == nullptr) {
+ ALOGE("%s: src is null!", __FUNCTION__);
+ return nullptr;
+ }
+
+ ACaptureRequest* pRequest = new ACaptureRequest();
+ pRequest->settings = new ACameraMetadata(*(src->settings));
+ pRequest->targets = new ACameraOutputTargets();
+ *(pRequest->targets) = *(src->targets);
+ pRequest->context = src->context;
+ return pRequest;
+}
diff --git a/camera/ndk/impl/ACameraCaptureSession.cpp b/camera/ndk/impl/ACameraCaptureSession.cpp
index b9c159d..f60e5fd 100644
--- a/camera/ndk/impl/ACameraCaptureSession.cpp
+++ b/camera/ndk/impl/ACameraCaptureSession.cpp
@@ -148,6 +148,23 @@
return ret;
}
+camera_status_t ACameraCaptureSession::updateOutputConfiguration(ACaptureSessionOutput *output) {
+ sp<CameraDevice> dev = getDeviceSp();
+ if (dev == nullptr) {
+ ALOGE("Error: Device associated with session %p has been closed!", this);
+ return ACAMERA_ERROR_SESSION_CLOSED;
+ }
+
+ camera_status_t ret;
+ dev->lockDeviceForSessionOps();
+ {
+ Mutex::Autolock _l(mSessionLock);
+ ret = dev->updateOutputConfigurationLocked(output);
+ }
+ dev->unlockDevice();
+ return ret;
+}
+
ACameraDevice*
ACameraCaptureSession::getDevice() {
Mutex::Autolock _l(mSessionLock);
diff --git a/camera/ndk/impl/ACameraCaptureSession.h b/camera/ndk/impl/ACameraCaptureSession.h
index 339c665..a2068e7 100644
--- a/camera/ndk/impl/ACameraCaptureSession.h
+++ b/camera/ndk/impl/ACameraCaptureSession.h
@@ -24,7 +24,8 @@
using namespace android;
struct ACaptureSessionOutput {
- explicit ACaptureSessionOutput(ANativeWindow* window) : mWindow(window) {};
+ explicit ACaptureSessionOutput(ANativeWindow* window, bool isShared = false) :
+ mWindow(window), mIsShared(isShared) {};
bool operator == (const ACaptureSessionOutput& other) const {
return mWindow == other.mWindow;
@@ -40,6 +41,8 @@
}
ANativeWindow* mWindow;
+ std::set<ANativeWindow *> mSharedWindows;
+ bool mIsShared;
int mRotation = CAMERA3_STREAM_ROTATION_0;
};
@@ -89,6 +92,8 @@
int numRequests, ACaptureRequest** requests,
/*optional*/int* captureSequenceId);
+ camera_status_t updateOutputConfiguration(ACaptureSessionOutput *output);
+
ACameraDevice* getDevice();
private:
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index 907802c..907debc 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -157,6 +157,7 @@
camera_status_t
CameraDevice::createCaptureSession(
const ACaptureSessionOutputContainer* outputs,
+ const ACaptureRequest* sessionParameters,
const ACameraCaptureSession_stateCallbacks* callbacks,
/*out*/ACameraCaptureSession** session) {
sp<ACameraCaptureSession> currentSession = mCurrentSession.promote();
@@ -172,7 +173,7 @@
}
// Create new session
- ret = configureStreamsLocked(outputs);
+ ret = configureStreamsLocked(outputs, sessionParameters);
if (ret != ACAMERA_OK) {
ALOGE("Fail to create new session. cannot configure streams");
return ret;
@@ -289,13 +290,93 @@
return ACAMERA_OK;
}
+camera_status_t CameraDevice::updateOutputConfigurationLocked(ACaptureSessionOutput *output) {
+ camera_status_t ret = checkCameraClosedOrErrorLocked();
+ if (ret != ACAMERA_OK) {
+ return ret;
+ }
+
+ if (output == nullptr) {
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!output->mIsShared) {
+ ALOGE("Error output configuration is not shared");
+ return ACAMERA_ERROR_INVALID_OPERATION;
+ }
+
+ int32_t streamId = -1;
+ for (auto& kvPair : mConfiguredOutputs) {
+ if (kvPair.second.first == output->mWindow) {
+ streamId = kvPair.first;
+ break;
+ }
+ }
+ if (streamId < 0) {
+ ALOGE("Error: Invalid output configuration");
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+
+ sp<IGraphicBufferProducer> iGBP(nullptr);
+ ret = getIGBPfromAnw(output->mWindow, iGBP);
+ if (ret != ACAMERA_OK) {
+ ALOGE("Camera device %s failed to extract graphic producer from native window",
+ getId());
+ return ret;
+ }
+
+ OutputConfiguration outConfig(iGBP, output->mRotation, OutputConfiguration::INVALID_SET_ID,
+ true);
+
+ for (auto& anw : output->mSharedWindows) {
+ ret = getIGBPfromAnw(anw, iGBP);
+ if (ret != ACAMERA_OK) {
+ ALOGE("Camera device %s failed to extract graphic producer from native window",
+ getId());
+ return ret;
+ }
+ outConfig.addGraphicProducer(iGBP);
+ }
+
+ auto remoteRet = mRemote->updateOutputConfiguration(streamId, outConfig);
+ if (!remoteRet.isOk()) {
+ switch (remoteRet.serviceSpecificErrorCode()) {
+ case hardware::ICameraService::ERROR_INVALID_OPERATION:
+ ALOGE("Camera device %s invalid operation: %s", getId(),
+ remoteRet.toString8().string());
+ return ACAMERA_ERROR_INVALID_OPERATION;
+ break;
+ case hardware::ICameraService::ERROR_ALREADY_EXISTS:
+ ALOGE("Camera device %s output surface already exists: %s", getId(),
+ remoteRet.toString8().string());
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ break;
+ case hardware::ICameraService::ERROR_ILLEGAL_ARGUMENT:
+ ALOGE("Camera device %s invalid input argument: %s", getId(),
+ remoteRet.toString8().string());
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ break;
+ default:
+ ALOGE("Camera device %s failed to add shared output: %s", getId(),
+ remoteRet.toString8().string());
+ return ACAMERA_ERROR_UNKNOWN;
+ }
+ }
+ mConfiguredOutputs[streamId] = std::make_pair(output->mWindow, outConfig);
+
+ return ACAMERA_OK;
+}
+
camera_status_t
CameraDevice::allocateCaptureRequest(
const ACaptureRequest* request, /*out*/sp<CaptureRequest>& outReq) {
camera_status_t ret;
sp<CaptureRequest> req(new CaptureRequest());
- req->mMetadata = request->settings->getInternalData();
+ req->mPhysicalCameraSettings.push_back({std::string(mCameraId.string()),
+ request->settings->getInternalData()});
req->mIsReprocess = false; // NDK does not support reprocessing yet
+ req->mContext = request->context;
+ req->mSurfaceConverted = true; // set to true, and fill in stream/surface idx to speed up IPC
for (auto outputTarget : request->targets->mOutputs) {
ANativeWindow* anw = outputTarget.mWindow;
@@ -306,7 +387,31 @@
return ret;
}
req->mSurfaceList.push_back(surface);
+
+ bool found = false;
+ // lookup stream/surface ID
+ for (const auto& kvPair : mConfiguredOutputs) {
+ int streamId = kvPair.first;
+ const OutputConfiguration& outConfig = kvPair.second.second;
+ const auto& gbps = outConfig.getGraphicBufferProducers();
+ for (int surfaceId = 0; surfaceId < (int) gbps.size(); surfaceId++) {
+ if (gbps[surfaceId] == surface->getIGraphicBufferProducer()) {
+ found = true;
+ req->mStreamIdxList.push_back(streamId);
+ req->mSurfaceIdxList.push_back(surfaceId);
+ break;
+ }
+ }
+ if (found) {
+ break;
+ }
+ }
+ if (!found) {
+ ALOGE("Unconfigured output target %p in capture request!", anw);
+ return ret;
+ }
}
+
outReq = req;
return ACAMERA_OK;
}
@@ -314,7 +419,7 @@
ACaptureRequest*
CameraDevice::allocateACaptureRequest(sp<CaptureRequest>& req) {
ACaptureRequest* pRequest = new ACaptureRequest();
- CameraMetadata clone = req->mMetadata;
+ CameraMetadata clone = req->mPhysicalCameraSettings.begin()->settings;
pRequest->settings = new ACameraMetadata(clone.release(), ACameraMetadata::ACM_REQUEST);
pRequest->targets = new ACameraOutputTargets();
for (size_t i = 0; i < req->mSurfaceList.size(); i++) {
@@ -322,6 +427,7 @@
ACameraOutputTarget outputTarget(anw);
pRequest->targets->mOutputs.insert(outputTarget);
}
+ pRequest->context = req->mContext;
return pRequest;
}
@@ -356,7 +462,7 @@
}
// No new session, unconfigure now
- camera_status_t ret = configureStreamsLocked(nullptr);
+ camera_status_t ret = configureStreamsLocked(nullptr, nullptr);
if (ret != ACAMERA_OK) {
ALOGE("Unconfigure stream failed. Device might still be configured! ret %d", ret);
}
@@ -486,17 +592,11 @@
CameraDevice::getIGBPfromAnw(
ANativeWindow* anw,
sp<IGraphicBufferProducer>& out) {
- if (anw == nullptr) {
- ALOGE("Error: output ANativeWindow is null");
- return ACAMERA_ERROR_INVALID_PARAMETER;
+ sp<Surface> surface;
+ camera_status_t ret = getSurfaceFromANativeWindow(anw, surface);
+ if (ret != ACAMERA_OK) {
+ return ret;
}
- int value;
- int err = (*anw->query)(anw, NATIVE_WINDOW_CONCRETE_TYPE, &value);
- if (err != OK || value != NATIVE_WINDOW_SURFACE) {
- ALOGE("Error: ANativeWindow is not backed by Surface!");
- return ACAMERA_ERROR_INVALID_PARAMETER;
- }
- const sp<Surface> surface(static_cast<Surface*>(anw));
out = surface->getIGraphicBufferProducer();
return ACAMERA_OK;
}
@@ -520,7 +620,8 @@
}
camera_status_t
-CameraDevice::configureStreamsLocked(const ACaptureSessionOutputContainer* outputs) {
+CameraDevice::configureStreamsLocked(const ACaptureSessionOutputContainer* outputs,
+ const ACaptureRequest* sessionParameters) {
ACaptureSessionOutputContainer emptyOutput;
if (outputs == nullptr) {
outputs = &emptyOutput;
@@ -540,7 +641,8 @@
return ret;
}
outputSet.insert(std::make_pair(
- anw, OutputConfiguration(iGBP, outConfig.mRotation)));
+ anw, OutputConfiguration(iGBP, outConfig.mRotation,
+ OutputConfiguration::INVALID_SET_ID, outConfig.mIsShared)));
}
auto addSet = outputSet;
std::vector<int> deleteList;
@@ -615,7 +717,11 @@
mConfiguredOutputs.insert(std::make_pair(streamId, outputPair));
}
- remoteRet = mRemote->endConfigure(/*isConstrainedHighSpeed*/ false);
+ CameraMetadata params;
+ if ((sessionParameters != nullptr) && (sessionParameters->settings != nullptr)) {
+ params.append(sessionParameters->settings->getInternalData());
+ }
+ remoteRet = mRemote->endConfigure(/*isConstrainedHighSpeed*/ false, params);
if (remoteRet.serviceSpecificErrorCode() == hardware::ICameraService::ERROR_ILLEGAL_ARGUMENT) {
ALOGE("Camera device %s cannnot support app output configuration: %s", getId(),
remoteRet.toString8().string());
@@ -730,19 +836,26 @@
setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_SERVICE);
return;
}
- ANativeWindow* anw = outputPairIt->second.first;
- ALOGV("Camera %s Lost output buffer for ANW %p frame %" PRId64,
- getId(), anw, frameNumber);
+ const auto& gbps = outputPairIt->second.second.getGraphicBufferProducers();
+ for (const auto& outGbp : gbps) {
+ for (auto surface : request->mSurfaceList) {
+ if (surface->getIGraphicBufferProducer() == outGbp) {
+ ANativeWindow* anw = static_cast<ANativeWindow*>(surface.get());
+ ALOGV("Camera %s Lost output buffer for ANW %p frame %" PRId64,
+ getId(), anw, frameNumber);
- sp<AMessage> msg = new AMessage(kWhatCaptureBufferLost, mHandler);
- msg->setPointer(kContextKey, cbh.mCallbacks.context);
- msg->setObject(kSessionSpKey, session);
- msg->setPointer(kCallbackFpKey, (void*) onBufferLost);
- msg->setObject(kCaptureRequestKey, request);
- msg->setPointer(kAnwKey, (void*) anw);
- msg->setInt64(kFrameNumberKey, frameNumber);
- postSessionMsgAndCleanup(msg);
+ sp<AMessage> msg = new AMessage(kWhatCaptureBufferLost, mHandler);
+ msg->setPointer(kContextKey, cbh.mCallbacks.context);
+ msg->setObject(kSessionSpKey, session);
+ msg->setPointer(kCallbackFpKey, (void*) onBufferLost);
+ msg->setObject(kCaptureRequestKey, request);
+ msg->setPointer(kAnwKey, (void*) anw);
+ msg->setInt64(kFrameNumberKey, frameNumber);
+ postSessionMsgAndCleanup(msg);
+ }
+ }
+ }
} else { // Handle other capture failures
// Fire capture failure callback if there is one registered
ACameraCaptureSession_captureCallback_failed onError = cbh.mCallbacks.onCaptureFailed;
@@ -1293,7 +1406,9 @@
binder::Status
CameraDevice::ServiceCallback::onResultReceived(
const CameraMetadata& metadata,
- const CaptureResultExtras& resultExtras) {
+ const CaptureResultExtras& resultExtras,
+ const std::vector<PhysicalCaptureResultInfo>& physicalResultInfos) {
+ (void) physicalResultInfos;
binder::Status ret = binder::Status::ok();
sp<CameraDevice> dev = mDevice.promote();
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index 6ed3881..1369148 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -36,7 +36,8 @@
#include <camera/camera2/OutputConfiguration.h>
#include <camera/camera2/CaptureRequest.h>
-#include <camera/NdkCameraDevice.h>
+#include <camera/NdkCameraManager.h>
+#include <camera/NdkCameraCaptureSession.h>
#include "ACameraMetadata.h"
namespace android {
@@ -59,6 +60,7 @@
camera_status_t createCaptureSession(
const ACaptureSessionOutputContainer* outputs,
+ const ACaptureRequest* sessionParameters,
const ACameraCaptureSession_stateCallbacks* callbacks,
/*out*/ACameraCaptureSession** session);
@@ -72,7 +74,8 @@
binder::Status onCaptureStarted(const CaptureResultExtras& resultExtras,
int64_t timestamp) override;
binder::Status onResultReceived(const CameraMetadata& metadata,
- const CaptureResultExtras& resultExtras) override;
+ const CaptureResultExtras& resultExtras,
+ const std::vector<PhysicalCaptureResultInfo>& physicalResultInfos) override;
binder::Status onPrepared(int streamId) override;
binder::Status onRequestQueueEmpty() override;
binder::Status onRepeatingRequestError(int64_t lastFrameNumber,
@@ -122,7 +125,9 @@
/*out*/int* captureSequenceId,
bool isRepeating);
- static camera_status_t allocateCaptureRequest(
+ camera_status_t updateOutputConfigurationLocked(ACaptureSessionOutput *output);
+
+ camera_status_t allocateCaptureRequest(
const ACaptureRequest* request, sp<CaptureRequest>& outReq);
static ACaptureRequest* allocateACaptureRequest(sp<CaptureRequest>& req);
@@ -136,7 +141,8 @@
// For capture session to notify its end of life
void notifySessionEndOfLifeLocked(ACameraCaptureSession* session);
- camera_status_t configureStreamsLocked(const ACaptureSessionOutputContainer* outputs);
+ camera_status_t configureStreamsLocked(const ACaptureSessionOutputContainer* outputs,
+ const ACaptureRequest* sessionParameters);
// Input message will be posted and cleared after this returns
void postSessionMsgAndCleanup(sp<AMessage>& msg);
@@ -306,9 +312,10 @@
camera_status_t createCaptureSession(
const ACaptureSessionOutputContainer* outputs,
+ const ACaptureRequest* sessionParameters,
const ACameraCaptureSession_stateCallbacks* callbacks,
/*out*/ACameraCaptureSession** session) {
- return mDevice->createCaptureSession(outputs, callbacks, session);
+ return mDevice->createCaptureSession(outputs, sessionParameters, callbacks, session);
}
/***********************
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index a1a8cd6..c59d0e7 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -221,7 +221,7 @@
mCallbacks.erase(cb);
}
-void CameraManagerGlobal::getCameraIdList(std::vector<String8> *cameraIds) {
+void CameraManagerGlobal::getCameraIdList(std::vector<String8>* cameraIds) {
// Ensure that we have initialized/refreshed the list of available devices
auto cs = getCameraService();
Mutex::Autolock _l(mLock);
diff --git a/camera/ndk/impl/ACameraManager.h b/camera/ndk/impl/ACameraManager.h
index 4a172f3..cc42f77 100644
--- a/camera/ndk/impl/ACameraManager.h
+++ b/camera/ndk/impl/ACameraManager.h
@@ -19,6 +19,7 @@
#include <camera/NdkCameraManager.h>
+#include <android-base/parseint.h>
#include <android/hardware/ICameraService.h>
#include <android/hardware/BnCameraServiceListener.h>
#include <camera/CameraMetadata.h>
@@ -140,8 +141,29 @@
static bool validStatus(int32_t status);
static bool isStatusAvailable(int32_t status);
+ // The sort logic must match the logic in
+ // libcameraservice/common/CameraProviderManager.cpp::getAPI1CompatibleCameraDeviceIds
+ struct CameraIdComparator {
+ bool operator()(const String8& a, const String8& b) const {
+ uint32_t aUint = 0, bUint = 0;
+ bool aIsUint = base::ParseUint(a.c_str(), &aUint);
+ bool bIsUint = base::ParseUint(b.c_str(), &bUint);
+
+ // Uint device IDs first
+ if (aIsUint && bIsUint) {
+ return aUint < bUint;
+ } else if (aIsUint) {
+ return true;
+ } else if (bIsUint) {
+ return false;
+ }
+ // Simple string compare if both id are not uint
+ return a < b;
+ }
+ };
+
// Map camera_id -> status
- std::map<String8, int32_t> mDeviceStatusMap;
+ std::map<String8, int32_t, CameraIdComparator> mDeviceStatusMap;
// For the singleton instance
static Mutex sLock;
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index 7b33c32..fc00a2d 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -235,7 +235,7 @@
}
const CameraMetadata&
-ACameraMetadata::getInternalData() {
+ACameraMetadata::getInternalData() const {
return mData;
}
@@ -305,6 +305,7 @@
case ACAMERA_STATISTICS_FACE_DETECT_MODE:
case ACAMERA_STATISTICS_HOT_PIXEL_MAP_MODE:
case ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE:
+ case ACAMERA_STATISTICS_OIS_DATA_MODE:
case ACAMERA_TONEMAP_CURVE_BLUE:
case ACAMERA_TONEMAP_CURVE_GREEN:
case ACAMERA_TONEMAP_CURVE_RED:
@@ -312,6 +313,7 @@
case ACAMERA_TONEMAP_GAMMA:
case ACAMERA_TONEMAP_PRESET_CURVE:
case ACAMERA_BLACK_LEVEL_LOCK:
+ case ACAMERA_DISTORTION_CORRECTION_MODE:
return true;
default:
return false;
diff --git a/camera/ndk/impl/ACameraMetadata.h b/camera/ndk/impl/ACameraMetadata.h
index 143efc7..0fd7efa 100644
--- a/camera/ndk/impl/ACameraMetadata.h
+++ b/camera/ndk/impl/ACameraMetadata.h
@@ -64,7 +64,7 @@
void filterUnsupportedFeatures(); // Hide features not yet supported by NDK
void filterStreamConfigurations(); // Hide input streams, translate hal format to NDK formats
- const CameraMetadata& getInternalData();
+ const CameraMetadata& getInternalData() const;
template<typename INTERNAL_T, typename NDK_T>
camera_status_t updateImpl(uint32_t tag, uint32_t count, const NDK_T* data) {
diff --git a/camera/ndk/impl/ACaptureRequest.h b/camera/ndk/impl/ACaptureRequest.h
index e5b453e..06b2cc3 100644
--- a/camera/ndk/impl/ACaptureRequest.h
+++ b/camera/ndk/impl/ACaptureRequest.h
@@ -45,8 +45,19 @@
};
struct ACaptureRequest {
+ camera_status_t setContext(void* ctx) {
+ context = ctx;
+ return ACAMERA_OK;
+ }
+
+ camera_status_t getContext(void** ctx) const {
+ *ctx = context;
+ return ACAMERA_OK;
+ }
+
ACameraMetadata* settings;
ACameraOutputTargets* targets;
+ void* context;
};
#endif // _ACAPTURE_REQUEST_H
diff --git a/camera/ndk/include/camera/NdkCameraCaptureSession.h b/camera/ndk/include/camera/NdkCameraCaptureSession.h
index b544b50..78e062a 100644
--- a/camera/ndk/include/camera/NdkCameraCaptureSession.h
+++ b/camera/ndk/include/camera/NdkCameraCaptureSession.h
@@ -593,6 +593,54 @@
#endif /* __ANDROID_API__ >= 24 */
+#if __ANDROID_API__ >= 28
+
+typedef struct ACaptureSessionOutput ACaptureSessionOutput;
+
+/**
+ * Update shared ACaptureSessionOutput.
+ *
+ * <p>A shared ACaptureSessionOutput (see {@link ACaptureSessionSharedOutput_create}) that
+ * was modified via calls to {@link ACaptureSessionSharedOutput_add} or
+ * {@link ACaptureSessionSharedOutput_remove} must be updated by calling this method before its
+ * changes take effect. After the update call returns with {@link ACAMERA_OK}, any newly added
+ * native windows can be used as a target in subsequent capture requests.</p>
+ *
+ * <p>Native windows that get removed must not be part of any active repeating or single/burst
+ * request or have any pending results. Consider updating repeating requests via
+ * {@link ACaptureSessionOutput_setRepeatingRequest} and then wait for the last frame number
+ * when the sequence completes
+ * {@link ACameraCaptureSession_captureCallback#onCaptureSequenceCompleted}.</p>
+ *
+ * <p>Native windows that get added must not be part of any other registered ACaptureSessionOutput
+ * and must be compatible. Compatible windows must have matching format, rotation and
+ * consumer usage.</p>
+ *
+ * <p>A shared ACameraCaptureSession can support up to 4 additional native windows.</p>
+ *
+ * @param session the capture session of interest
+ * @param output the modified output configuration
+ *
+ * @return <ul><li>
+ * {@link ACAMERA_OK} if the method succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if session or output is NULL; or output
+ * contains invalid native windows; or if an attempt was made to add
+ * a native window to a different output configuration; or new native window is not
+ * compatible; or any removed native window still has pending requests;</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_OPERATION} if output configuration is not shared (see
+ * {@link ACaptureSessionSharedOutput_create}; or the number of additional
+ * native windows goes beyond the supported limit.</li>
+ * <li>{@link ACAMERA_ERROR_SESSION_CLOSED} if the capture session has been closed</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if the camera device is closed</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DEVICE} if the camera device encounters fatal error</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal
+ * error</li>
+ * <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons</li></ul>
+ */
+camera_status_t ACameraCaptureSession_updateSharedOutput(ACameraCaptureSession* session,
+ ACaptureSessionOutput* output);
+#endif /* __ANDROID_API__ >= 28 */
+
__END_DECLS
#endif /* _NDK_CAMERA_CAPTURE_SESSION_H */
diff --git a/camera/ndk/include/camera/NdkCameraDevice.h b/camera/ndk/include/camera/NdkCameraDevice.h
index 2c65529..b715b12 100644
--- a/camera/ndk/include/camera/NdkCameraDevice.h
+++ b/camera/ndk/include/camera/NdkCameraDevice.h
@@ -256,6 +256,7 @@
* @see ACameraDevice_createCaptureRequest
*/
TEMPLATE_MANUAL = 6,
+
} ACameraDevice_request_template;
/**
@@ -666,9 +667,102 @@
#endif /* __ANDROID_API__ >= 24 */
+#if __ANDROID_API__ >= 28
+
+/**
+ * Create a shared ACaptureSessionOutput object.
+ *
+ * <p>The ACaptureSessionOutput is used in {@link ACaptureSessionOutputContainer_add} method to add
+ * an output {@link ANativeWindow} to ACaptureSessionOutputContainer. Use
+ * {@link ACaptureSessionOutput_free} to free the object and its memory after application no longer
+ * needs the {@link ACaptureSessionOutput}. A shared ACaptureSessionOutput can be further modified
+ * via {@link ACaptureSessionSharedOutput_add} or {@link ACaptureSessionSharedOutput_remove} and
+ * must be updated via {@link ACameraCaptureSession_updateSharedOutput}.</p>
+ *
+ * @param anw the {@link ANativeWindow} to be associated with the {@link ACaptureSessionOutput}
+ * @param output the output {@link ACaptureSessionOutput} will be stored here if the
+ * method call succeeds.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds. The created container will be
+ * filled in the output argument.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if anw or output is NULL.</li></ul>
+ *
+ * @see ACaptureSessionOutputContainer_add
+ */
+camera_status_t ACaptureSessionSharedOutput_create(
+ ANativeWindow* anw, /*out*/ACaptureSessionOutput** output);
+
+/**
+ * Add a native window to shared ACaptureSessionOutput.
+ *
+ * The ACaptureSessionOutput must be created via {@link ACaptureSessionSharedOutput_create}.
+ *
+ * @param output the shared ACaptureSessionOutput to be extended.
+ * @param anw The new native window.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if anw or output is NULL; or output is not
+ * shared see {@link ACaptureSessionSharedOutput_create}; or anw matches with the native
+ * window associated with ACaptureSessionOutput; or anw is already present inside
+ * ACaptureSessionOutput.</li></ul>
+ */
+camera_status_t ACaptureSessionSharedOutput_add(ACaptureSessionOutput *output, ANativeWindow *anw);
+
+/**
+ * Remove a native window from shared ACaptureSessionOutput.
+ *
+ * @param output the {@link ACaptureSessionOutput} to be modified.
+ * @param anw The native window to be removed.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if anw or output is NULL; or output is not
+ * shared see {@link ACaptureSessionSharedOutput_create}; or anw matches with the native
+ * window associated with ACaptureSessionOutput; or anw is not present inside
+ * ACaptureSessionOutput.</li></ul>
+ */
+camera_status_t ACaptureSessionSharedOutput_remove(ACaptureSessionOutput *output,
+ ANativeWindow* anw);
+
+/**
+ * Create a new camera capture session similar to {@link ACameraDevice_createCaptureSession}. This
+ * function allows clients to pass additional session parameters during session initialization. For
+ * further information about session parameters see {@link ACAMERA_REQUEST_AVAILABLE_SESSION_KEYS}.
+ *
+ * @param device the camera device of interest.
+ * @param outputs the {@link ACaptureSessionOutputContainer} describes all output streams.
+ * @param sessionParameters An optional capture request that contains the initial values of session
+ * parameters advertised in
+ * {@link ACAMERA_REQUEST_AVAILABLE_SESSION_KEYS}.
+ * @param callbacks the {@link ACameraCaptureSession_stateCallbacks}
+ * capture session state callbacks.
+ * @param session the created {@link ACameraCaptureSession} will be filled here if the method call
+ * succeeds.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds. The created capture session will be
+ * filled in session argument.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if any of device, outputs, callbacks or
+ * session is NULL.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if the camera device is closed.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DEVICE} if the camera device encounters fatal error.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal error.
+ * </li>
+ * <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
+ */
+camera_status_t ACameraDevice_createCaptureSessionWithSessionParameters(
+ ACameraDevice* device,
+ const ACaptureSessionOutputContainer* outputs,
+ const ACaptureRequest* sessionParameters,
+ const ACameraCaptureSession_stateCallbacks* callbacks,
+ /*out*/ACameraCaptureSession** session);
+
+#endif /* __ANDROID_API__ >= 28 */
+
__END_DECLS
#endif /* _NDK_CAMERA_DEVICE_H */
/** @} */
-
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 629d75a..3010646 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -69,6 +69,8 @@
ACAMERA_SYNC,
ACAMERA_REPROCESS,
ACAMERA_DEPTH,
+ ACAMERA_LOGICAL_MULTI_CAMERA,
+ ACAMERA_DISTORTION_CORRECTION,
ACAMERA_SECTION_COUNT,
ACAMERA_VENDOR = 0x8000
@@ -104,6 +106,12 @@
ACAMERA_SYNC_START = ACAMERA_SYNC << 16,
ACAMERA_REPROCESS_START = ACAMERA_REPROCESS << 16,
ACAMERA_DEPTH_START = ACAMERA_DEPTH << 16,
+ ACAMERA_LOGICAL_MULTI_CAMERA_START
+ = ACAMERA_LOGICAL_MULTI_CAMERA
+ << 16,
+ ACAMERA_DISTORTION_CORRECTION_START
+ = ACAMERA_DISTORTION_CORRECTION
+ << 16,
ACAMERA_VENDOR_START = ACAMERA_VENDOR << 16
} acamera_metadata_section_start_t;
@@ -471,10 +479,6 @@
* Otherwise will always be present.</p>
* <p>The maximum number of regions supported by the device is determined by the value
* of android.control.maxRegionsAe.</p>
- * <p>The data representation is int[5 * area_count].
- * Every five elements represent a metering region of (xmin, ymin, xmax, ymax, weight).
- * The rectangle is defined to be inclusive on xmin and ymin, but exclusive on xmax and
- * ymax.</p>
* <p>The coordinate system is based on the active pixel array,
* with (0,0) being the top-left pixel in the active pixel array, and
* (ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.width - 1,
@@ -495,6 +499,10 @@
* region and output only the intersection rectangle as the metering region in the result
* metadata. If the region is entirely outside the crop region, it will be ignored and
* not reported in the result metadata.</p>
+ * <p>The data representation is <code>int[5 * area_count]</code>.
+ * Every five elements represent a metering region of <code>(xmin, ymin, xmax, ymax, weight)</code>.
+ * The rectangle is defined to be inclusive on xmin and ymin, but exclusive on xmax and
+ * ymax.</p>
*
* @see ACAMERA_SCALER_CROP_REGION
* @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
@@ -633,10 +641,6 @@
* Otherwise will always be present.</p>
* <p>The maximum number of focus areas supported by the device is determined by the value
* of android.control.maxRegionsAf.</p>
- * <p>The data representation is int[5 * area_count].
- * Every five elements represent a metering region of (xmin, ymin, xmax, ymax, weight).
- * The rectangle is defined to be inclusive on xmin and ymin, but exclusive on xmax and
- * ymax.</p>
* <p>The coordinate system is based on the active pixel array,
* with (0,0) being the top-left pixel in the active pixel array, and
* (ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.width - 1,
@@ -651,12 +655,17 @@
* is used, all non-zero weights will have the same effect. A region with 0 weight is
* ignored.</p>
* <p>If all regions have 0 weight, then no specific metering area needs to be used by the
- * camera device.</p>
+ * camera device. The capture result will either be a zero weight region as well, or
+ * the region selected by the camera device as the focus area of interest.</p>
* <p>If the metering region is outside the used ACAMERA_SCALER_CROP_REGION returned in
* capture result metadata, the camera device will ignore the sections outside the crop
* region and output only the intersection rectangle as the metering region in the result
* metadata. If the region is entirely outside the crop region, it will be ignored and
* not reported in the result metadata.</p>
+ * <p>The data representation is <code>int[5 * area_count]</code>.
+ * Every five elements represent a metering region of <code>(xmin, ymin, xmax, ymax, weight)</code>.
+ * The rectangle is defined to be inclusive on xmin and ymin, but exclusive on xmax and
+ * ymax.</p>
*
* @see ACAMERA_SCALER_CROP_REGION
* @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
@@ -791,10 +800,6 @@
* Otherwise will always be present.</p>
* <p>The maximum number of regions supported by the device is determined by the value
* of android.control.maxRegionsAwb.</p>
- * <p>The data representation is int[5 * area_count].
- * Every five elements represent a metering region of (xmin, ymin, xmax, ymax, weight).
- * The rectangle is defined to be inclusive on xmin and ymin, but exclusive on xmax and
- * ymax.</p>
* <p>The coordinate system is based on the active pixel array,
* with (0,0) being the top-left pixel in the active pixel array, and
* (ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.width - 1,
@@ -815,6 +820,10 @@
* region and output only the intersection rectangle as the metering region in the result
* metadata. If the region is entirely outside the crop region, it will be ignored and
* not reported in the result metadata.</p>
+ * <p>The data representation is <code>int[5 * area_count]</code>.
+ * Every five elements represent a metering region of <code>(xmin, ymin, xmax, ymax, weight)</code>.
+ * The rectangle is defined to be inclusive on xmin and ymin, but exclusive on xmax and
+ * ymax.</p>
*
* @see ACAMERA_SCALER_CROP_REGION
* @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
@@ -837,10 +846,13 @@
*
* <p>This control (except for MANUAL) is only effective if
* <code>ACAMERA_CONTROL_MODE != OFF</code> and any 3A routine is active.</p>
- * <p>ZERO_SHUTTER_LAG will be supported if ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
- * contains PRIVATE_REPROCESSING or YUV_REPROCESSING. MANUAL will be supported if
- * ACAMERA_REQUEST_AVAILABLE_CAPABILITIES contains MANUAL_SENSOR. Other intent values are
- * always supported.</p>
+ * <p>All intents are supported by all devices, except that:
+ * * ZERO_SHUTTER_LAG will be supported if ACAMERA_REQUEST_AVAILABLE_CAPABILITIES contains
+ * PRIVATE_REPROCESSING or YUV_REPROCESSING.
+ * * MANUAL will be supported if ACAMERA_REQUEST_AVAILABLE_CAPABILITIES contains
+ * MANUAL_SENSOR.
+ * * MOTION_TRACKING will be supported if ACAMERA_REQUEST_AVAILABLE_CAPABILITIES contains
+ * MOTION_TRACKING.</p>
*
* @see ACAMERA_CONTROL_MODE
* @see ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
@@ -885,11 +897,10 @@
* <p>When set to AUTO, the individual algorithm controls in
* ACAMERA_CONTROL_* are in effect, such as ACAMERA_CONTROL_AF_MODE.</p>
* <p>When set to USE_SCENE_MODE, the individual controls in
- * ACAMERA_CONTROL_* are mostly disabled, and the camera device implements
- * one of the scene mode settings (such as ACTION, SUNSET, or PARTY)
- * as it wishes. The camera device scene mode 3A settings are provided by
- * capture results {@link ACameraMetadata} from
- * {@link ACameraCaptureSession_captureCallback_result}.</p>
+ * ACAMERA_CONTROL_* are mostly disabled, and the camera device
+ * implements one of the scene mode settings (such as ACTION,
+ * SUNSET, or PARTY) as it wishes. The camera device scene mode
+ * 3A settings are provided by {@link ACameraCaptureSession_captureCallback_result capture results}.</p>
* <p>When set to OFF_KEEP_STATE, it is similar to OFF mode, the only difference
* is that this frame will not be used by camera device background 3A statistics
* update, as if this frame is never captured. This mode can be used in the scenario
@@ -1043,20 +1054,18 @@
* <p>For constant-framerate recording, for each normal
* <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html">CamcorderProfile</a>, that is, a
* <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html">CamcorderProfile</a> that has
- * <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html#quality">quality</a>
- * in the range [
- * <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html#QUALITY_LOW">QUALITY_LOW</a>,
- * <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html#QUALITY_2160P">QUALITY_2160P</a>],
- * if the profile is supported by the device and has
- * <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html#videoFrameRate">videoFrameRate</a>
- * <code>x</code>, this list will always include (<code>x</code>,<code>x</code>).</p>
+ * <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html#quality">quality</a> in
+ * the range [<a href="https://developer.android.com/reference/android/media/CamcorderProfile.html#QUALITY_LOW">QUALITY_LOW</a>,
+ * <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html#QUALITY_2160P">QUALITY_2160P</a>], if the profile is
+ * supported by the device and has
+ * <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html#videoFrameRate">videoFrameRate</a> <code>x</code>, this list will
+ * always include (<code>x</code>,<code>x</code>).</p>
* </li>
* <li>
* <p>Also, a camera device must either not support any
* <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html">CamcorderProfile</a>,
* or support at least one
- * normal <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html">CamcorderProfile</a>
- * that has
+ * normal <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html">CamcorderProfile</a> that has
* <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html#videoFrameRate">videoFrameRate</a> <code>x</code> >= 24.</p>
* </li>
* </ul>
@@ -1282,7 +1291,7 @@
* <p>State | Transition Cause | New State | Notes
* :------------:|:----------------:|:---------:|:-----------------------:
* INACTIVE | | INACTIVE | Camera device auto exposure algorithm is disabled</p>
- * <p>When ACAMERA_CONTROL_AE_MODE is AE_MODE_ON_*:</p>
+ * <p>When ACAMERA_CONTROL_AE_MODE is AE_MODE_ON*:</p>
* <p>State | Transition Cause | New State | Notes
* :-------------:|:--------------------------------------------:|:--------------:|:-----------------:
* INACTIVE | Camera device initiates AE scan | SEARCHING | Values changing
@@ -1303,10 +1312,13 @@
* LOCKED | aeLock is ON and aePrecaptureTrigger is CANCEL| LOCKED | Precapture trigger is ignored when AE is already locked
* Any state (excluding LOCKED) | ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER is START | PRECAPTURE | Start AE precapture metering sequence
* Any state (excluding LOCKED) | ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER is CANCEL| INACTIVE | Currently active precapture metering sequence is canceled</p>
+ * <p>If the camera device supports AE external flash mode (ON_EXTERNAL_FLASH is included in
+ * ACAMERA_CONTROL_AE_AVAILABLE_MODES), ACAMERA_CONTROL_AE_STATE must be FLASH_REQUIRED after
+ * the camera device finishes AE scan and it's too dark without flash.</p>
* <p>For the above table, the camera device may skip reporting any state changes that happen
* without application intervention (i.e. mode switch, trigger, locking). Any state that
* can be skipped in that manner is called a transient state.</p>
- * <p>For example, for above AE modes (AE_MODE_ON_*), in addition to the state transitions
+ * <p>For example, for above AE modes (AE_MODE_ON*), in addition to the state transitions
* listed in above table, it is also legal for the camera device to skip one or more
* transient states between two results. See below table for examples:</p>
* <p>State | Transition Cause | New State | Notes
@@ -1319,9 +1331,11 @@
* CONVERGED | Camera device finished AE scan | FLASH_REQUIRED | Converged but too dark w/o flash after a new scan, transient states are skipped by camera device.
* FLASH_REQUIRED | Camera device finished AE scan | CONVERGED | Converged after a new scan, transient states are skipped by camera device.</p>
*
+ * @see ACAMERA_CONTROL_AE_AVAILABLE_MODES
* @see ACAMERA_CONTROL_AE_LOCK
* @see ACAMERA_CONTROL_AE_MODE
* @see ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER
+ * @see ACAMERA_CONTROL_AE_STATE
* @see ACAMERA_CONTROL_MODE
* @see ACAMERA_CONTROL_SCENE_MODE
*/
@@ -1619,13 +1633,13 @@
* compared to previous regular requests. enableZsl does not affect requests with other
* capture intents.</p>
* <p>For example, when requests are submitted in the following order:
- * Request A: enableZsl is <code>true</code>, ACAMERA_CONTROL_CAPTURE_INTENT is PREVIEW
- * Request B: enableZsl is <code>true</code>, ACAMERA_CONTROL_CAPTURE_INTENT is STILL_CAPTURE</p>
+ * Request A: enableZsl is ON, ACAMERA_CONTROL_CAPTURE_INTENT is PREVIEW
+ * Request B: enableZsl is ON, ACAMERA_CONTROL_CAPTURE_INTENT is STILL_CAPTURE</p>
* <p>The output images for request B may have contents captured before the output images for
* request A, and the result metadata for request B may be older than the result metadata for
* request A.</p>
- * <p>Note that when enableZsl is <code>true</code>, it is not guaranteed to get output images captured in the
- * past for requests with STILL_CAPTURE capture intent.</p>
+ * <p>Note that when enableZsl is <code>true</code>, it is not guaranteed to get output images captured in
+ * the past for requests with STILL_CAPTURE capture intent.</p>
* <p>For applications targeting SDK versions O and newer, the value of enableZsl in
* TEMPLATE_STILL_CAPTURE template may be <code>true</code>. The value in other templates is always
* <code>false</code> if present.</p>
@@ -1638,6 +1652,26 @@
*/
ACAMERA_CONTROL_ENABLE_ZSL = // byte (acamera_metadata_enum_android_control_enable_zsl_t)
ACAMERA_CONTROL_START + 41,
+ /**
+ * <p>Whether a significant scene change is detected within the currently-set AF
+ * region(s).</p>
+ *
+ * <p>Type: byte (acamera_metadata_enum_android_control_af_scene_change_t)</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+ * </ul></p>
+ *
+ * <p>When the camera focus routine detects a change in the scene it is looking at,
+ * such as a large shift in camera viewpoint, significant motion in the scene, or a
+ * significant illumination change, this value will be set to DETECTED for a single capture
+ * result. Otherwise the value will be NOT_DETECTED. The threshold for detection is similar
+ * to what would trigger a new passive focus scan to begin in CONTINUOUS autofocus modes.</p>
+ * <p>This key will be available if the camera device advertises this key via {@link ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS }.</p>
+ */
+ ACAMERA_CONTROL_AF_SCENE_CHANGE = // byte (acamera_metadata_enum_android_control_af_scene_change_t)
+ ACAMERA_CONTROL_START + 42,
ACAMERA_CONTROL_END,
/**
@@ -1879,8 +1913,8 @@
* the thumbnail data will also be rotated.</p>
* <p>Note that this orientation is relative to the orientation of the camera sensor, given
* by ACAMERA_SENSOR_ORIENTATION.</p>
- * <p>To translate from the device orientation given by the Android sensor APIs, the following
- * sample code may be used:</p>
+ * <p>To translate from the device orientation given by the Android sensor APIs for camera
+ * sensors which are not EXTERNAL, the following sample code may be used:</p>
* <pre><code>private int getJpegOrientation(CameraCharacteristics c, int deviceOrientation) {
* if (deviceOrientation == android.view.OrientationEventListener.ORIENTATION_UNKNOWN) return 0;
* int sensorOrientation = c.get(CameraCharacteristics.SENSOR_ORIENTATION);
@@ -1899,6 +1933,8 @@
* return jpegOrientation;
* }
* </code></pre>
+ * <p>For EXTERNAL cameras the sensor orientation will always be set to 0 and the facing will
+ * also be set to EXTERNAL. The above code is not relevant in such case.</p>
*
* @see ACAMERA_SENSOR_ORIENTATION
*/
@@ -1959,14 +1995,14 @@
* <p>When an ACAMERA_JPEG_ORIENTATION of non-zero degree is requested,
* the camera device will handle thumbnail rotation in one of the following ways:</p>
* <ul>
- * <li>Set the
- * <a href="https://developer.android.com/reference/android/media/ExifInterface.html#TAG_ORIENTATION">EXIF orientation flag</a>
+ * <li>Set the <a href="https://developer.android.com/reference/android/media/ExifInterface.html#TAG_ORIENTATION">EXIF orientation flag</a>
* and keep jpeg and thumbnail image data unrotated.</li>
* <li>Rotate the jpeg and thumbnail image data and not set
- * <a href="https://developer.android.com/reference/android/media/ExifInterface.html#TAG_ORIENTATION">EXIF orientation flag</a>.
- * In this case, LIMITED or FULL hardware level devices will report rotated thumnail size
- * in capture result, so the width and height will be interchanged if 90 or 270 degree
- * orientation is requested. LEGACY device will always report unrotated thumbnail size.</li>
+ * <a href="https://developer.android.com/reference/android/media/ExifInterface.html#TAG_ORIENTATION">EXIF orientation flag</a>. In this
+ * case, LIMITED or FULL hardware level devices will report rotated thumnail size in
+ * capture result, so the width and height will be interchanged if 90 or 270 degree
+ * orientation is requested. LEGACY device will always report unrotated thumbnail
+ * size.</li>
* </ul>
*
* @see ACAMERA_JPEG_ORIENTATION
@@ -2216,37 +2252,33 @@
* </ul></p>
*
* <p>The position of the camera device's lens optical center,
- * as a three-dimensional vector <code>(x,y,z)</code>, relative to the
- * optical center of the largest camera device facing in the
- * same direction as this camera, in the
- * <a href="https://developer.android.com/reference/android/hardware/SensorEvent.html">Android sensor coordinate axes</a>.
- * Note that only the axis definitions are shared with
- * the sensor coordinate system, but not the origin.</p>
- * <p>If this device is the largest or only camera device with a
- * given facing, then this position will be <code>(0, 0, 0)</code>; a
- * camera device with a lens optical center located 3 cm from
- * the main sensor along the +X axis (to the right from the
- * user's perspective) will report <code>(0.03, 0, 0)</code>.</p>
- * <p>To transform a pixel coordinates between two cameras
- * facing the same direction, first the source camera
- * ACAMERA_LENS_RADIAL_DISTORTION must be corrected for. Then
- * the source camera ACAMERA_LENS_INTRINSIC_CALIBRATION needs
- * to be applied, followed by the ACAMERA_LENS_POSE_ROTATION
- * of the source camera, the translation of the source camera
- * relative to the destination camera, the
- * ACAMERA_LENS_POSE_ROTATION of the destination camera, and
- * finally the inverse of ACAMERA_LENS_INTRINSIC_CALIBRATION
- * of the destination camera. This obtains a
- * radial-distortion-free coordinate in the destination
- * camera pixel coordinates.</p>
- * <p>To compare this against a real image from the destination
- * camera, the destination camera image then needs to be
- * corrected for radial distortion before comparison or
- * sampling.</p>
+ * as a three-dimensional vector <code>(x,y,z)</code>.</p>
+ * <p>Prior to Android P, or when ACAMERA_LENS_POSE_REFERENCE is PRIMARY_CAMERA, this position
+ * is relative to the optical center of the largest camera device facing in the same
+ * direction as this camera, in the <a href="https://developer.android.com/reference/android/hardware/SensorEvent.html">Android sensor
+ * coordinate axes</a>. Note that only the axis definitions are shared with the sensor
+ * coordinate system, but not the origin.</p>
+ * <p>If this device is the largest or only camera device with a given facing, then this
+ * position will be <code>(0, 0, 0)</code>; a camera device with a lens optical center located 3 cm
+ * from the main sensor along the +X axis (to the right from the user's perspective) will
+ * report <code>(0.03, 0, 0)</code>.</p>
+ * <p>To transform a pixel coordinates between two cameras facing the same direction, first
+ * the source camera ACAMERA_LENS_DISTORTION must be corrected for. Then the source
+ * camera ACAMERA_LENS_INTRINSIC_CALIBRATION needs to be applied, followed by the
+ * ACAMERA_LENS_POSE_ROTATION of the source camera, the translation of the source camera
+ * relative to the destination camera, the ACAMERA_LENS_POSE_ROTATION of the destination
+ * camera, and finally the inverse of ACAMERA_LENS_INTRINSIC_CALIBRATION of the destination
+ * camera. This obtains a radial-distortion-free coordinate in the destination camera pixel
+ * coordinates.</p>
+ * <p>To compare this against a real image from the destination camera, the destination camera
+ * image then needs to be corrected for radial distortion before comparison or sampling.</p>
+ * <p>When ACAMERA_LENS_POSE_REFERENCE is GYROSCOPE, then this position is relative to
+ * the center of the primary gyroscope on the device.</p>
*
+ * @see ACAMERA_LENS_DISTORTION
* @see ACAMERA_LENS_INTRINSIC_CALIBRATION
+ * @see ACAMERA_LENS_POSE_REFERENCE
* @see ACAMERA_LENS_POSE_ROTATION
- * @see ACAMERA_LENS_RADIAL_DISTORTION
*/
ACAMERA_LENS_POSE_TRANSLATION = // float[3]
ACAMERA_LENS_START + 7,
@@ -2356,7 +2388,7 @@
* where <code>(0,0)</code> is the top-left of the
* preCorrectionActiveArraySize rectangle. Once the pose and
* intrinsic calibration transforms have been applied to a
- * world point, then the ACAMERA_LENS_RADIAL_DISTORTION
+ * world point, then the ACAMERA_LENS_DISTORTION
* transform needs to be applied, and the result adjusted to
* be in the ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE coordinate
* system (where <code>(0, 0)</code> is the top-left of the
@@ -2364,19 +2396,42 @@
* coordinate of the world point for processed (non-RAW)
* output buffers.</p>
*
+ * @see ACAMERA_LENS_DISTORTION
* @see ACAMERA_LENS_POSE_ROTATION
* @see ACAMERA_LENS_POSE_TRANSLATION
- * @see ACAMERA_LENS_RADIAL_DISTORTION
* @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
* @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
*/
ACAMERA_LENS_INTRINSIC_CALIBRATION = // float[5]
ACAMERA_LENS_START + 10,
+ ACAMERA_LENS_RADIAL_DISTORTION = // Deprecated! DO NOT USE
+ ACAMERA_LENS_START + 11,
+ /**
+ * <p>The origin for ACAMERA_LENS_POSE_TRANSLATION.</p>
+ *
+ * @see ACAMERA_LENS_POSE_TRANSLATION
+ *
+ * <p>Type: byte (acamera_metadata_enum_android_lens_pose_reference_t)</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>Different calibration methods and use cases can produce better or worse results
+ * depending on the selected coordinate origin.</p>
+ */
+ ACAMERA_LENS_POSE_REFERENCE = // byte (acamera_metadata_enum_android_lens_pose_reference_t)
+ ACAMERA_LENS_START + 12,
/**
* <p>The correction coefficients to correct for this camera device's
* radial and tangential lens distortion.</p>
+ * <p>Replaces the deprecated ACAMERA_LENS_RADIAL_DISTORTION field, which was
+ * inconsistently defined.</p>
*
- * <p>Type: float[6]</p>
+ * @see ACAMERA_LENS_RADIAL_DISTORTION
+ *
+ * <p>Type: float[5]</p>
*
* <p>This tag may appear in:
* <ul>
@@ -2384,13 +2439,13 @@
* <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
* </ul></p>
*
- * <p>Four radial distortion coefficients <code>[kappa_0, kappa_1, kappa_2,
+ * <p>Three radial distortion coefficients <code>[kappa_1, kappa_2,
* kappa_3]</code> and two tangential distortion coefficients
* <code>[kappa_4, kappa_5]</code> that can be used to correct the
* lens's geometric distortion with the mapping equations:</p>
- * <pre><code> x_c = x_i * ( kappa_0 + kappa_1 * r^2 + kappa_2 * r^4 + kappa_3 * r^6 ) +
+ * <pre><code> x_c = x_i * ( 1 + kappa_1 * r^2 + kappa_2 * r^4 + kappa_3 * r^6 ) +
* kappa_4 * (2 * x_i * y_i) + kappa_5 * ( r^2 + 2 * x_i^2 )
- * y_c = y_i * ( kappa_0 + kappa_1 * r^2 + kappa_2 * r^4 + kappa_3 * r^6 ) +
+ * y_c = y_i * ( 1 + kappa_1 * r^2 + kappa_2 * r^4 + kappa_3 * r^6 ) +
* kappa_5 * (2 * x_i * y_i) + kappa_4 * ( r^2 + 2 * y_i^2 )
* </code></pre>
* <p>Here, <code>[x_c, y_c]</code> are the coordinates to sample in the
@@ -2398,23 +2453,21 @@
* corrected image at the coordinate <code>[x_i, y_i]</code>:</p>
* <pre><code> correctedImage(x_i, y_i) = sample_at(x_c, y_c, inputImage)
* </code></pre>
- * <p>The pixel coordinates are defined in a normalized
- * coordinate system related to the
- * ACAMERA_LENS_INTRINSIC_CALIBRATION calibration fields.
- * Both <code>[x_i, y_i]</code> and <code>[x_c, y_c]</code> have <code>(0,0)</code> at the
- * lens optical center <code>[c_x, c_y]</code>. The maximum magnitudes
- * of both x and y coordinates are normalized to be 1 at the
- * edge further from the optical center, so the range
- * for both dimensions is <code>-1 <= x <= 1</code>.</p>
+ * <p>The pixel coordinates are defined in a coordinate system
+ * related to the ACAMERA_LENS_INTRINSIC_CALIBRATION
+ * calibration fields; see that entry for details of the mapping stages.
+ * Both <code>[x_i, y_i]</code> and <code>[x_c, y_c]</code>
+ * have <code>(0,0)</code> at the lens optical center <code>[c_x, c_y]</code>, and
+ * the range of the coordinates depends on the focal length
+ * terms of the intrinsic calibration.</p>
* <p>Finally, <code>r</code> represents the radial distance from the
- * optical center, <code>r^2 = x_i^2 + y_i^2</code>, and its magnitude
- * is therefore no larger than <code>|r| <= sqrt(2)</code>.</p>
+ * optical center, <code>r^2 = x_i^2 + y_i^2</code>.</p>
* <p>The distortion model used is the Brown-Conrady model.</p>
*
* @see ACAMERA_LENS_INTRINSIC_CALIBRATION
*/
- ACAMERA_LENS_RADIAL_DISTORTION = // float[6]
- ACAMERA_LENS_START + 11,
+ ACAMERA_LENS_DISTORTION = // float[5]
+ ACAMERA_LENS_START + 13,
ACAMERA_LENS_END,
/**
@@ -2662,11 +2715,12 @@
* into the 3 stream types as below:</p>
* <ul>
* <li>Processed (but stalling): any non-RAW format with a stallDurations > 0.
- * Typically {@link AIMAGE_FORMAT_JPEG} format.</li>
- * <li>Raw formats: {@link AIMAGE_FORMAT_RAW16}, {@link AIMAGE_FORMAT_RAW10}, or
- * {@link AIMAGE_FORMAT_RAW12}.</li>
- * <li>Processed (but not-stalling): any non-RAW format without a stall duration.
- * Typically {@link AIMAGE_FORMAT_YUV_420_888}.</li>
+ * Typically {@link AIMAGE_FORMAT_JPEG JPEG format}.</li>
+ * <li>Raw formats: {@link AIMAGE_FORMAT_RAW16 RAW_SENSOR}, {@link AIMAGE_FORMAT_RAW10 RAW10}, or
+ * {@link AIMAGE_FORMAT_RAW12 RAW12}.</li>
+ * <li>Processed (but not-stalling): any non-RAW format without a stall duration. Typically
+ * {@link AIMAGE_FORMAT_YUV_420_888 YUV_420_888},
+ * <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#NV21">NV21</a>, or <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#YV12">YV12</a>.</li>
* </ul>
*
* @see ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS
@@ -2787,7 +2841,7 @@
ACAMERA_REQUEST_START + 12,
/**
* <p>A list of all keys that the camera device has available
- * to use with {@link ACaptureRequest}.</p>
+ * to use with {@link ACaptureRequest }.</p>
*
* <p>Type: int32[n]</p>
*
@@ -2809,9 +2863,7 @@
ACAMERA_REQUEST_AVAILABLE_REQUEST_KEYS = // int32[n]
ACAMERA_REQUEST_START + 13,
/**
- * <p>A list of all keys that the camera device has available
- * to query with {@link ACameraMetadata} from
- * {@link ACameraCaptureSession_captureCallback_result}.</p>
+ * <p>A list of all keys that the camera device has available to use with {@link ACameraCaptureSession_captureCallback_result }.</p>
*
* <p>Type: int32[n]</p>
*
@@ -2842,9 +2894,7 @@
ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS = // int32[n]
ACAMERA_REQUEST_START + 14,
/**
- * <p>A list of all keys that the camera device has available
- * to query with {@link ACameraMetadata} from
- * {@link ACameraManager_getCameraCharacteristics}.</p>
+ * <p>A list of all keys that the camera device has available to use with {@link ACameraManager_getCameraCharacteristics }.</p>
*
* <p>Type: int32[n]</p>
*
@@ -2862,6 +2912,59 @@
*/
ACAMERA_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS = // int32[n]
ACAMERA_REQUEST_START + 15,
+ /**
+ * <p>A subset of the available request keys that the camera device
+ * can pass as part of the capture session initialization.</p>
+ *
+ * <p>Type: int32[n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>This is a subset of ACAMERA_REQUEST_AVAILABLE_REQUEST_KEYS which
+ * contains a list of keys that are difficult to apply per-frame and
+ * can result in unexpected delays when modified during the capture session
+ * lifetime. Typical examples include parameters that require a
+ * time-consuming hardware re-configuration or internal camera pipeline
+ * change. For performance reasons we advise clients to pass their initial
+ * values as part of
+ * {@link ACameraDevice_createCaptureSessionWithSessionParameters }.
+ * Once the camera capture session is enabled it is also recommended to avoid
+ * changing them from their initial values set in
+ * {@link ACameraDevice_createCaptureSessionWithSessionParameters }.
+ * Control over session parameters can still be exerted in capture requests
+ * but clients should be aware and expect delays during their application.
+ * An example usage scenario could look like this:</p>
+ * <ul>
+ * <li>The camera client starts by quering the session parameter key list via
+ * {@link ACameraManager_getCameraCharacteristics }.</li>
+ * <li>Before triggering the capture session create sequence, a capture request
+ * must be built via
+ * {@link ACameraDevice_createCaptureRequest }
+ * using an appropriate template matching the particular use case.</li>
+ * <li>The client should go over the list of session parameters and check
+ * whether some of the keys listed matches with the parameters that
+ * they intend to modify as part of the first capture request.</li>
+ * <li>If there is no such match, the capture request can be passed
+ * unmodified to
+ * {@link ACameraDevice_createCaptureSessionWithSessionParameters }.</li>
+ * <li>If matches do exist, the client should update the respective values
+ * and pass the request to
+ * {@link ACameraDevice_createCaptureSessionWithSessionParameters }.</li>
+ * <li>After the capture session initialization completes the session parameter
+ * key list can continue to serve as reference when posting or updating
+ * further requests. As mentioned above further changes to session
+ * parameters should ideally be avoided, if updates are necessary
+ * however clients could expect a delay/glitch during the
+ * parameter switch.</li>
+ * </ul>
+ *
+ * @see ACAMERA_REQUEST_AVAILABLE_REQUEST_KEYS
+ */
+ ACAMERA_REQUEST_AVAILABLE_SESSION_KEYS = // int32[n]
+ ACAMERA_REQUEST_START + 16,
ACAMERA_REQUEST_END,
/**
@@ -2876,7 +2979,6 @@
* </ul></p>
*
* <p>This control can be used to implement digital zoom.</p>
- * <p>The data representation is int[4], which maps to (left, top, width, height).</p>
* <p>The crop region coordinate system is based off
* ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with <code>(0, 0)</code> being the
* top-left corner of the sensor active array.</p>
@@ -2906,6 +3008,7 @@
* for rounding and other hardware requirements; the final
* crop region used will be included in the output capture
* result.</p>
+ * <p>The data representation is int[4], which maps to (left, top, width, height).</p>
*
* @see ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM
* @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
@@ -3061,13 +3164,14 @@
* ignored).</p>
* <p>The following formats may always have a stall duration:</p>
* <ul>
- * <li>{@link AIMAGE_FORMAT_JPEG}</li>
- * <li>{@link AIMAGE_FORMAT_RAW16}</li>
+ * <li>{@link AIMAGE_FORMAT_JPEG }</li>
+ * <li>{@link AIMAGE_FORMAT_RAW16 }</li>
* </ul>
* <p>The following formats will never have a stall duration:</p>
* <ul>
- * <li>{@link AIMAGE_FORMAT_YUV_420_888}</li>
- * <li>{@link AIMAGE_FORMAT_RAW10}</li>
+ * <li>{@link AIMAGE_FORMAT_YUV_420_888 }</li>
+ * <li>{@link AIMAGE_FORMAT_RAW10 }</li>
+ * <li>{@link AIMAGE_FORMAT_RAW12 }</li>
* </ul>
* <p>All other formats may or may not have an allowed stall duration on
* a per-capability basis; refer to ACAMERA_REQUEST_AVAILABLE_CAPABILITIES
@@ -3177,39 +3281,29 @@
* can run concurrently to the rest of the camera pipeline, but
* cannot process more than 1 capture at a time.</li>
* </ul>
- * <p>The necessary information for the application, given the model above,
- * is provided via
- * {@link ACAMERA_SCALER_AVAILABLE_MIN_FRAME_DURATIONS}.
- * These are used to determine the maximum frame rate / minimum frame
- * duration that is possible for a given stream configuration.</p>
+ * <p>The necessary information for the application, given the model above, is provided via
+ * {@link ACAMERA_SCALER_AVAILABLE_MIN_FRAME_DURATIONS }.
+ * These are used to determine the maximum frame rate / minimum frame duration that is
+ * possible for a given stream configuration.</p>
* <p>Specifically, the application can use the following rules to
* determine the minimum frame duration it can request from the camera
* device:</p>
* <ol>
- * <li>Let the set of currently configured input/output streams
- * be called <code>S</code>.</li>
- * <li>Find the minimum frame durations for each stream in <code>S</code>, by looking
- * it up in {@link ACAMERA_SCALER_AVAILABLE_MIN_FRAME_DURATIONS}
- * (with its respective size/format). Let this set of frame durations be
- * called <code>F</code>.</li>
- * <li>For any given request <code>R</code>, the minimum frame duration allowed
- * for <code>R</code> is the maximum out of all values in <code>F</code>. Let the streams
- * used in <code>R</code> be called <code>S_r</code>.</li>
+ * <li>Let the set of currently configured input/output streams be called <code>S</code>.</li>
+ * <li>Find the minimum frame durations for each stream in <code>S</code>, by looking it up in {@link ACAMERA_SCALER_AVAILABLE_MIN_FRAME_DURATIONS }
+ * (with its respective size/format). Let this set of frame durations be called <code>F</code>.</li>
+ * <li>For any given request <code>R</code>, the minimum frame duration allowed for <code>R</code> is the maximum
+ * out of all values in <code>F</code>. Let the streams used in <code>R</code> be called <code>S_r</code>.</li>
* </ol>
- * <p>If none of the streams in <code>S_r</code> have a stall time (listed in {@link
- * ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS}
- * using its respective size/format), then the frame duration in <code>F</code>
- * determines the steady state frame rate that the application will get
- * if it uses <code>R</code> as a repeating request. Let this special kind of
- * request be called <code>Rsimple</code>.</p>
- * <p>A repeating request <code>Rsimple</code> can be <em>occasionally</em> interleaved
- * by a single capture of a new request <code>Rstall</code> (which has at least
- * one in-use stream with a non-0 stall time) and if <code>Rstall</code> has the
- * same minimum frame duration this will not cause a frame rate loss
- * if all buffers from the previous <code>Rstall</code> have already been
- * delivered.</p>
- * <p>For more details about stalling, see
- * {@link ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS}.</p>
+ * <p>If none of the streams in <code>S_r</code> have a stall time (listed in {@link ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS }
+ * using its respective size/format), then the frame duration in <code>F</code> determines the steady
+ * state frame rate that the application will get if it uses <code>R</code> as a repeating request. Let
+ * this special kind of request be called <code>Rsimple</code>.</p>
+ * <p>A repeating request <code>Rsimple</code> can be <em>occasionally</em> interleaved by a single capture of a
+ * new request <code>Rstall</code> (which has at least one in-use stream with a non-0 stall time) and if
+ * <code>Rstall</code> has the same minimum frame duration this will not cause a frame rate loss if all
+ * buffers from the previous <code>Rstall</code> have already been delivered.</p>
+ * <p>For more details about stalling, see {@link ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS }.</p>
* <p>This control is only effective if ACAMERA_CONTROL_AE_MODE or ACAMERA_CONTROL_MODE is set to
* OFF; otherwise the auto-exposure algorithm will override this value.</p>
*
@@ -3567,14 +3661,12 @@
* timestamps for other captures from the same camera device, but are
* not guaranteed to be comparable to any other time source.</p>
* <p>When ACAMERA_SENSOR_INFO_TIMESTAMP_SOURCE <code>==</code> REALTIME, the
- * timestamps measure time in the same timebase as
- * <a href="https://developer.android.com/reference/android/os/SystemClock.html#elapsedRealtimeNanos">elapsedRealtimeNanos</a>
- * (or CLOCK_BOOTTIME), and they can
+ * timestamps measure time in the same timebase as <a href="https://developer.android.com/reference/android/os/SystemClock.html#elapsedRealtimeNanos">SystemClock#elapsedRealtimeNanos</a>, and they can
* be compared to other timestamps from other subsystems that
* are using that base.</p>
* <p>For reprocessing, the timestamp will match the start of exposure of
- * the input image, i.e. {@link CaptureResult#SENSOR_TIMESTAMP the
- * timestamp} in the TotalCaptureResult that was used to create the
+ * the input image, i.e. <a href="https://developer.android.com/reference/CaptureResult.html#SENSOR_TIMESTAMP">the
+ * timestamp</a> in the TotalCaptureResult that was used to create the
* reprocess capture request.</p>
*
* @see ACAMERA_SENSOR_INFO_TIMESTAMP_SOURCE
@@ -3775,7 +3867,6 @@
* optically shielded pixel areas. By blocking light, these pixels
* provides a reliable black reference for black level compensation
* in active array region.</p>
- * <p>The data representation is int[4], which maps to (left, top, width, height).</p>
* <p>This key provides a list of disjoint rectangles specifying the
* regions of optically shielded (with metal shield) black pixel
* regions if the camera device is capable of reading out these black
@@ -3785,6 +3876,7 @@
* black level of each captured raw images.</p>
* <p>When this key is reported, the ACAMERA_SENSOR_DYNAMIC_BLACK_LEVEL and
* ACAMERA_SENSOR_DYNAMIC_WHITE_LEVEL will also be reported.</p>
+ * <p>The data representation is <code>int[4]</code>, which maps to <code>(left, top, width, height)</code>.</p>
*
* @see ACAMERA_SENSOR_BLACK_LEVEL_PATTERN
* @see ACAMERA_SENSOR_DYNAMIC_BLACK_LEVEL
@@ -3825,9 +3917,8 @@
* layout key (see ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT), i.e. the
* nth value given corresponds to the black level offset for the nth
* color channel listed in the CFA.</p>
- * <p>This key will be available if ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS is
- * available or the camera device advertises this key via
- * {@link ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS}.</p>
+ * <p>This key will be available if ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS is available or the
+ * camera device advertises this key via {@link ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS }.</p>
*
* @see ACAMERA_SENSOR_BLACK_LEVEL_PATTERN
* @see ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT
@@ -3853,7 +3944,7 @@
* estimated white level for each frame.</p>
* <p>This key will be available if ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS is
* available or the camera device advertises this key via
- * {@link ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS}.</p>
+ * {@link ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS }.</p>
*
* @see ACAMERA_SENSOR_BLACK_LEVEL_PATTERN
* @see ACAMERA_SENSOR_INFO_WHITE_LEVEL
@@ -3882,13 +3973,13 @@
* <p>This rectangle is defined relative to the full pixel array; (0,0) is the top-left of
* the full pixel array, and the size of the full pixel array is given by
* ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE.</p>
- * <p>The data representation is int[4], which maps to (left, top, width, height).</p>
* <p>The coordinate system for most other keys that list pixel coordinates, including
* ACAMERA_SCALER_CROP_REGION, is defined relative to the active array rectangle given in
* this field, with <code>(0, 0)</code> being the top-left of this rectangle.</p>
* <p>The active array may be smaller than the full pixel array, since the full array may
* include black calibration pixels or other inactive regions, and geometric correction
* resulting in scaling or cropping may have been applied.</p>
+ * <p>The data representation is <code>int[4]</code>, which maps to <code>(left, top, width, height)</code>.</p>
*
* @see ACAMERA_SCALER_CROP_REGION
* @see ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE
@@ -3960,8 +4051,7 @@
* <p>Attempting to use frame durations beyond the maximum will result in the frame
* duration being clipped to the maximum. See that control for a full definition of frame
* durations.</p>
- * <p>Refer to {@link
- * ACAMERA_SCALER_AVAILABLE_MIN_FRAME_DURATIONS}
+ * <p>Refer to {@link ACAMERA_SCALER_AVAILABLE_MIN_FRAME_DURATIONS }
* for the minimum frame duration values.</p>
*/
ACAMERA_SENSOR_INFO_MAX_FRAME_DURATION = // int64
@@ -4000,9 +4090,9 @@
* the raw buffers produced by this sensor.</p>
* <p>If a camera device supports raw sensor formats, either this or
* ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE is the maximum dimensions for the raw
- * output formats listed in ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS (this depends on
- * whether or not the image sensor returns buffers containing pixels that are not
- * part of the active array region for blacklevel calibration or other purposes).</p>
+ * output formats listed in {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS }
+ * (this depends on whether or not the image sensor returns buffers containing pixels that
+ * are not part of the active array region for blacklevel calibration or other purposes).</p>
* <p>Some parts of the full pixel array may not receive light from the scene,
* or be otherwise inactive. The ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE key
* defines the rectangle of active pixels that will be included in processed image
@@ -4092,7 +4182,6 @@
* <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
* </ul></p>
*
- * <p>The data representation is int[4], which maps to (left, top, width, height).</p>
* <p>This is the rectangle representing the size of the active region of the sensor (i.e.
* the region that actually receives light from the scene) before any geometric correction
* has been applied, and should be treated as the active region rectangle for any of the
@@ -4133,7 +4222,7 @@
* ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.</p>
* <p>The currently supported fields that correct for geometric distortion are:</p>
* <ol>
- * <li>ACAMERA_LENS_RADIAL_DISTORTION.</li>
+ * <li>ACAMERA_LENS_DISTORTION.</li>
* </ol>
* <p>If all of the geometric distortion fields are no-ops, this rectangle will be the same
* as the post-distortion-corrected rectangle given in
@@ -4143,8 +4232,9 @@
* ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE.</p>
* <p>The pre-correction active array may be smaller than the full pixel array, since the
* full array may include black calibration pixels or other inactive regions.</p>
+ * <p>The data representation is <code>int[4]</code>, which maps to <code>(left, top, width, height)</code>.</p>
*
- * @see ACAMERA_LENS_RADIAL_DISTORTION
+ * @see ACAMERA_LENS_DISTORTION
* @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
* @see ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE
* @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
@@ -4302,10 +4392,10 @@
* <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
* </ul></p>
*
- * <p>The data representation is int[4], which maps to (left, top, width, height).</p>
* <p>The coordinate system is that of ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, with
* <code>(0, 0)</code> being the top-left pixel of the active array.</p>
- * <p>Only available if ACAMERA_STATISTICS_FACE_DETECT_MODE != OFF</p>
+ * <p>Only available if ACAMERA_STATISTICS_FACE_DETECT_MODE != OFF
+ * The data representation is <code>int[4]</code>, which maps to <code>(left, top, width, height)</code>.</p>
*
* @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
* @see ACAMERA_STATISTICS_FACE_DETECT_MODE
@@ -4483,6 +4573,80 @@
*/
ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE = // byte (acamera_metadata_enum_android_statistics_lens_shading_map_mode_t)
ACAMERA_STATISTICS_START + 16,
+ /**
+ * <p>A control for selecting whether OIS position information is included in output
+ * result metadata.</p>
+ *
+ * <p>Type: byte (acamera_metadata_enum_android_statistics_ois_data_mode_t)</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+ * <li>ACaptureRequest</li>
+ * </ul></p>
+ *
+ */
+ ACAMERA_STATISTICS_OIS_DATA_MODE = // byte (acamera_metadata_enum_android_statistics_ois_data_mode_t)
+ ACAMERA_STATISTICS_START + 17,
+ /**
+ * <p>An array of timestamps of OIS samples, in nanoseconds.</p>
+ *
+ * <p>Type: int64[n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+ * </ul></p>
+ *
+ * <p>The array contains the timestamps of OIS samples. The timestamps are in the same
+ * timebase as and comparable to ACAMERA_SENSOR_TIMESTAMP.</p>
+ *
+ * @see ACAMERA_SENSOR_TIMESTAMP
+ */
+ ACAMERA_STATISTICS_OIS_TIMESTAMPS = // int64[n]
+ ACAMERA_STATISTICS_START + 18,
+ /**
+ * <p>An array of shifts of OIS samples, in x direction.</p>
+ *
+ * <p>Type: float[n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+ * </ul></p>
+ *
+ * <p>The array contains the amount of shifts in x direction, in pixels, based on OIS samples.
+ * A positive value is a shift from left to right in active array coordinate system. For
+ * example, if the optical center is (1000, 500) in active array coordinates, a shift of
+ * (3, 0) puts the new optical center at (1003, 500).</p>
+ * <p>The number of shifts must match the number of timestamps in
+ * ACAMERA_STATISTICS_OIS_TIMESTAMPS.</p>
+ *
+ * @see ACAMERA_STATISTICS_OIS_TIMESTAMPS
+ */
+ ACAMERA_STATISTICS_OIS_X_SHIFTS = // float[n]
+ ACAMERA_STATISTICS_START + 19,
+ /**
+ * <p>An array of shifts of OIS samples, in y direction.</p>
+ *
+ * <p>Type: float[n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+ * </ul></p>
+ *
+ * <p>The array contains the amount of shifts in y direction, in pixels, based on OIS samples.
+ * A positive value is a shift from top to bottom in active array coordinate system. For
+ * example, if the optical center is (1000, 500) in active array coordinates, a shift of
+ * (0, 5) puts the new optical center at (1000, 505).</p>
+ * <p>The number of shifts must match the number of timestamps in
+ * ACAMERA_STATISTICS_OIS_TIMESTAMPS.</p>
+ *
+ * @see ACAMERA_STATISTICS_OIS_TIMESTAMPS
+ */
+ ACAMERA_STATISTICS_OIS_Y_SHIFTS = // float[n]
+ ACAMERA_STATISTICS_START + 20,
ACAMERA_STATISTICS_END,
/**
@@ -4555,6 +4719,24 @@
*/
ACAMERA_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES = // byte[n]
ACAMERA_STATISTICS_INFO_START + 7,
+ /**
+ * <p>List of OIS data output modes for ACAMERA_STATISTICS_OIS_DATA_MODE that
+ * are supported by this camera device.</p>
+ *
+ * @see ACAMERA_STATISTICS_OIS_DATA_MODE
+ *
+ * <p>Type: byte[n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>If no OIS data output is available for this camera device, this key will
+ * contain only OFF.</p>
+ */
+ ACAMERA_STATISTICS_INFO_AVAILABLE_OIS_DATA_MODES = // byte[n]
+ ACAMERA_STATISTICS_INFO_START + 8,
ACAMERA_STATISTICS_INFO_END,
/**
@@ -4627,6 +4809,8 @@
* of points can be less than max (that is, the request doesn't have to
* always provide a curve with number of points equivalent to
* ACAMERA_TONEMAP_MAX_CURVE_POINTS).</p>
+ * <p>For devices with MONOCHROME capability, only red channel is used. Green and blue channels
+ * are ignored.</p>
* <p>A few examples, and their corresponding graphical mappings; these
* only specify the red channel and the precision is limited to 4
* digits, for conciseness.</p>
@@ -4832,7 +5016,7 @@
* <p>See the individual level enums for full descriptions of the supported capabilities. The
* ACAMERA_REQUEST_AVAILABLE_CAPABILITIES entry describes the device's capabilities at a
* finer-grain level, if needed. In addition, many controls have their available settings or
- * ranges defined in individual metadata tag entries in this document.</p>
+ * ranges defined in individual entries from {@link ACameraManager_getCameraCharacteristics }.</p>
* <p>Some features are not part of any particular hardware level or capability and must be
* queried separately. These include:</p>
* <ul>
@@ -4853,6 +5037,23 @@
*/
ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL = // byte (acamera_metadata_enum_android_info_supported_hardware_level_t)
ACAMERA_INFO_START,
+ /**
+ * <p>A short string for manufacturer version information about the camera device, such as
+ * ISP hardware, sensors, etc.</p>
+ *
+ * <p>Type: byte</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>This can be used in <a href="https://developer.android.com/reference/android/media/ExifInterface.html#TAG_IMAGE_DESCRIPTION">TAG_IMAGE_DESCRIPTION</a>
+ * in jpeg EXIF. This key may be absent if no version information is available on the
+ * device.</p>
+ */
+ ACAMERA_INFO_VERSION = // byte
+ ACAMERA_INFO_START + 1,
ACAMERA_INFO_END,
/**
@@ -5069,6 +5270,86 @@
ACAMERA_DEPTH_START + 4,
ACAMERA_DEPTH_END,
+ /**
+ * <p>The accuracy of frame timestamp synchronization between physical cameras</p>
+ *
+ * <p>Type: byte (acamera_metadata_enum_android_logical_multi_camera_sensor_sync_type_t)</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>The accuracy of the frame timestamp synchronization determines the physical cameras'
+ * ability to start exposure at the same time. If the sensorSyncType is CALIBRATED,
+ * the physical camera sensors usually run in master-slave mode so that their shutter
+ * time is synchronized. For APPROXIMATE sensorSyncType, the camera sensors usually run in
+ * master-master mode, and there could be offset between their start of exposure.</p>
+ * <p>In both cases, all images generated for a particular capture request still carry the same
+ * timestamps, so that they can be used to look up the matching frame number and
+ * onCaptureStarted callback.</p>
+ */
+ ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE = // byte (acamera_metadata_enum_android_logical_multi_camera_sensor_sync_type_t)
+ ACAMERA_LOGICAL_MULTI_CAMERA_START + 1,
+ ACAMERA_LOGICAL_MULTI_CAMERA_END,
+
+ /**
+ * <p>Mode of operation for the lens distortion correction block.</p>
+ *
+ * <p>Type: byte (acamera_metadata_enum_android_distortion_correction_mode_t)</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+ * <li>ACaptureRequest</li>
+ * </ul></p>
+ *
+ * <p>The lens distortion correction block attempts to improve image quality by fixing
+ * radial, tangential, or other geometric aberrations in the camera device's optics. If
+ * available, the ACAMERA_LENS_DISTORTION field documents the lens's distortion parameters.</p>
+ * <p>OFF means no distortion correction is done.</p>
+ * <p>FAST/HIGH_QUALITY both mean camera device determined distortion correction will be
+ * applied. HIGH_QUALITY mode indicates that the camera device will use the highest-quality
+ * correction algorithms, even if it slows down capture rate. FAST means the camera device
+ * will not slow down capture rate when applying correction. FAST may be the same as OFF if
+ * any correction at all would slow down capture rate. Every output stream will have a
+ * similar amount of enhancement applied.</p>
+ * <p>The correction only applies to processed outputs such as YUV, JPEG, or DEPTH16; it is not
+ * applied to any RAW output. Metadata coordinates such as face rectangles or metering
+ * regions are also not affected by correction.</p>
+ * <p>Applications enabling distortion correction need to pay extra attention when converting
+ * image coordinates between corrected output buffers and the sensor array. For example, if
+ * the app supports tap-to-focus and enables correction, it then has to apply the distortion
+ * model described in ACAMERA_LENS_DISTORTION to the image buffer tap coordinates to properly
+ * calculate the tap position on the sensor active array to be used with
+ * ACAMERA_CONTROL_AF_REGIONS. The same applies in reverse to detected face rectangles if
+ * they need to be drawn on top of the corrected output buffers.</p>
+ *
+ * @see ACAMERA_CONTROL_AF_REGIONS
+ * @see ACAMERA_LENS_DISTORTION
+ */
+ ACAMERA_DISTORTION_CORRECTION_MODE = // byte (acamera_metadata_enum_android_distortion_correction_mode_t)
+ ACAMERA_DISTORTION_CORRECTION_START,
+ /**
+ * <p>List of distortion correction modes for ACAMERA_DISTORTION_CORRECTION_MODE that are
+ * supported by this camera device.</p>
+ *
+ * @see ACAMERA_DISTORTION_CORRECTION_MODE
+ *
+ * <p>Type: byte[n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>No device is required to support this API; such devices will always list only 'OFF'.
+ * All devices that support this API will list both FAST and HIGH_QUALITY.</p>
+ */
+ ACAMERA_DISTORTION_CORRECTION_AVAILABLE_MODES = // byte[n]
+ ACAMERA_DISTORTION_CORRECTION_START + 1,
+ ACAMERA_DISTORTION_CORRECTION_END,
+
} acamera_metadata_tag_t;
/**
@@ -5282,6 +5563,21 @@
*/
ACAMERA_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE = 4,
+ /**
+ * <p>An external flash has been turned on.</p>
+ * <p>It informs the camera device that an external flash has been turned on, and that
+ * metering (and continuous focus if active) should be quickly recaculated to account
+ * for the external flash. Otherwise, this mode acts like ON.</p>
+ * <p>When the external flash is turned off, AE mode should be changed to one of the
+ * other available AE modes.</p>
+ * <p>If the camera device supports AE external flash mode, ACAMERA_CONTROL_AE_STATE must
+ * be FLASH_REQUIRED after the camera device finishes AE scan and it's too dark without
+ * flash.</p>
+ *
+ * @see ACAMERA_CONTROL_AE_STATE
+ */
+ ACAMERA_CONTROL_AE_MODE_ON_EXTERNAL_FLASH = 5,
+
} acamera_metadata_enum_android_control_ae_mode_t;
// ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER
@@ -5645,6 +5941,15 @@
*/
ACAMERA_CONTROL_CAPTURE_INTENT_MANUAL = 6,
+ /**
+ * <p>This request is for a motion tracking use case, where
+ * the application will use camera and inertial sensor data to
+ * locate and track objects in the world.</p>
+ * <p>The camera device auto-exposure routine will limit the exposure time
+ * of the camera to no more than 20 milliseconds, to minimize motion blur.</p>
+ */
+ ACAMERA_CONTROL_CAPTURE_INTENT_MOTION_TRACKING = 7,
+
} acamera_metadata_enum_android_control_capture_intent_t;
// ACAMERA_CONTROL_EFFECT_MODE
@@ -6135,6 +6440,20 @@
} acamera_metadata_enum_android_control_enable_zsl_t;
+// ACAMERA_CONTROL_AF_SCENE_CHANGE
+typedef enum acamera_metadata_enum_acamera_control_af_scene_change {
+ /**
+ * <p>Scene change is not detected within the AF region(s).</p>
+ */
+ ACAMERA_CONTROL_AF_SCENE_CHANGE_NOT_DETECTED = 0,
+
+ /**
+ * <p>Scene change is detected within the AF region(s).</p>
+ */
+ ACAMERA_CONTROL_AF_SCENE_CHANGE_DETECTED = 1,
+
+} acamera_metadata_enum_android_control_af_scene_change_t;
+
// ACAMERA_EDGE_MODE
@@ -6157,13 +6476,13 @@
ACAMERA_EDGE_MODE_HIGH_QUALITY = 2,
/**
- * <p>Edge enhancement is applied at different levels for different output streams,
- * based on resolution. Streams at maximum recording resolution (see {@link
- * ACameraDevice_createCaptureSession}) or below have
- * edge enhancement applied, while higher-resolution streams have no edge enhancement
- * applied. The level of edge enhancement for low-resolution streams is tuned so that
- * frame rate is not impacted, and the quality is equal to or better than FAST (since it
- * is only applied to lower-resolution outputs, quality may improve from FAST).</p>
+ * <p>Edge enhancement is applied at different
+ * levels for different output streams, based on resolution. Streams at maximum recording
+ * resolution (see {@link ACameraDevice_createCaptureSession })
+ * or below have edge enhancement applied, while higher-resolution streams have no edge
+ * enhancement applied. The level of edge enhancement for low-resolution streams is tuned
+ * so that frame rate is not impacted, and the quality is equal to or better than FAST
+ * (since it is only applied to lower-resolution outputs, quality may improve from FAST).</p>
* <p>This mode is intended to be used by applications operating in a zero-shutter-lag mode
* with YUV or PRIVATE reprocessing, where the application continuously captures
* high-resolution intermediate buffers into a circular buffer, from which a final image is
@@ -6342,6 +6661,27 @@
} acamera_metadata_enum_android_lens_state_t;
+// ACAMERA_LENS_POSE_REFERENCE
+typedef enum acamera_metadata_enum_acamera_lens_pose_reference {
+ /**
+ * <p>The value of ACAMERA_LENS_POSE_TRANSLATION is relative to the optical center of
+ * the largest camera device facing the same direction as this camera.</p>
+ * <p>This is the default value for API levels before Android P.</p>
+ *
+ * @see ACAMERA_LENS_POSE_TRANSLATION
+ */
+ ACAMERA_LENS_POSE_REFERENCE_PRIMARY_CAMERA = 0,
+
+ /**
+ * <p>The value of ACAMERA_LENS_POSE_TRANSLATION is relative to the position of the
+ * primary gyroscope of this Android device.</p>
+ *
+ * @see ACAMERA_LENS_POSE_TRANSLATION
+ */
+ ACAMERA_LENS_POSE_REFERENCE_GYROSCOPE = 1,
+
+} acamera_metadata_enum_android_lens_pose_reference_t;
+
// ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION
typedef enum acamera_metadata_enum_acamera_lens_info_focus_distance_calibration {
@@ -6412,13 +6752,12 @@
/**
* <p>Noise reduction is applied at different levels for different output streams,
- * based on resolution. Streams at maximum recording resolution (see {@link
- * ACameraDevice_createCaptureSession}) or below have noise
- * reduction applied, while higher-resolution streams have MINIMAL (if supported) or no
- * noise reduction applied (if MINIMAL is not supported.) The degree of noise reduction
- * for low-resolution streams is tuned so that frame rate is not impacted, and the quality
- * is equal to or better than FAST (since it is only applied to lower-resolution outputs,
- * quality may improve from FAST).</p>
+ * based on resolution. Streams at maximum recording resolution (see {@link ACameraDevice_createCaptureSession })
+ * or below have noise reduction applied, while higher-resolution streams have MINIMAL (if
+ * supported) or no noise reduction applied (if MINIMAL is not supported.) The degree of
+ * noise reduction for low-resolution streams is tuned so that frame rate is not impacted,
+ * and the quality is equal to or better than FAST (since it is only applied to
+ * lower-resolution outputs, quality may improve from FAST).</p>
* <p>This mode is intended to be used by applications operating in a zero-shutter-lag mode
* with YUV or PRIVATE reprocessing, where the application continuously captures
* high-resolution intermediate buffers into a circular buffer, from which a final image is
@@ -6635,18 +6974,16 @@
* to FAST. Additionally, maximum-resolution images can be captured at >= 10 frames
* per second. Here, 'high resolution' means at least 8 megapixels, or the maximum
* resolution of the device, whichever is smaller.</p>
- * <p>More specifically, this means that at least one output {@link
- * AIMAGE_FORMAT_YUV_420_888} size listed in
- * {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS} is larger or equal to the
- * 'high resolution' defined above, and can be captured at at least 20 fps.
- * For the largest {@link AIMAGE_FORMAT_YUV_420_888} size listed in
- * {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS}, camera device can capture this
- * size for at least 10 frames per second.
- * Also the ACAMERA_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES entry lists at least one FPS range
- * where the minimum FPS is >= 1 / minimumFrameDuration for the largest YUV_420_888 size.</p>
- * <p>If the device supports the {@link AIMAGE_FORMAT_RAW10}, {@link
- * AIMAGE_FORMAT_RAW12}, then those can also be captured at the same rate
- * as the maximum-size YUV_420_888 resolution is.</p>
+ * <p>More specifically, this means that at least one output {@link AIMAGE_FORMAT_YUV_420_888 } size listed in
+ * {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS }
+ * is larger or equal to the 'high resolution' defined above, and can be captured at at
+ * least 20 fps. For the largest {@link AIMAGE_FORMAT_YUV_420_888 } size listed in
+ * {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS },
+ * camera device can capture this size for at least 10 frames per second. Also the
+ * ACAMERA_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES entry lists at least one FPS range where
+ * the minimum FPS is >= 1 / minimumFrameDuration for the largest YUV_420_888 size.</p>
+ * <p>If the device supports the {@link AIMAGE_FORMAT_RAW10 }, {@link AIMAGE_FORMAT_RAW12 }, then those can also be
+ * captured at the same rate as the maximum-size YUV_420_888 resolution is.</p>
* <p>In addition, the ACAMERA_SYNC_MAX_LATENCY field is guaranted to have a value between 0
* and 4, inclusive. ACAMERA_CONTROL_AE_LOCK_AVAILABLE and ACAMERA_CONTROL_AWB_LOCK_AVAILABLE
* are also guaranteed to be <code>true</code> so burst capture with these two locks ON yields
@@ -6663,42 +7000,114 @@
* <p>The camera device can produce depth measurements from its field of view.</p>
* <p>This capability requires the camera device to support the following:</p>
* <ul>
- * <li>{@link AIMAGE_FORMAT_DEPTH16} is supported as an output format.</li>
- * <li>{@link AIMAGE_FORMAT_DEPTH_POINT_CLOUD} is optionally supported as an
- * output format.</li>
- * <li>This camera device, and all camera devices with the same ACAMERA_LENS_FACING,
- * will list the following calibration entries in {@link ACameraMetadata} from both
- * {@link ACameraManager_getCameraCharacteristics} and
- * {@link ACameraCaptureSession_captureCallback_result}:<ul>
+ * <li>{@link AIMAGE_FORMAT_DEPTH16 } is supported as
+ * an output format.</li>
+ * <li>{@link AIMAGE_FORMAT_DEPTH_POINT_CLOUD } is
+ * optionally supported as an output format.</li>
+ * <li>This camera device, and all camera devices with the same ACAMERA_LENS_FACING, will
+ * list the following calibration metadata entries in both {@link ACameraManager_getCameraCharacteristics }
+ * and {@link ACameraCaptureSession_captureCallback_result }:<ul>
* <li>ACAMERA_LENS_POSE_TRANSLATION</li>
* <li>ACAMERA_LENS_POSE_ROTATION</li>
* <li>ACAMERA_LENS_INTRINSIC_CALIBRATION</li>
- * <li>ACAMERA_LENS_RADIAL_DISTORTION</li>
+ * <li>ACAMERA_LENS_DISTORTION</li>
* </ul>
* </li>
* <li>The ACAMERA_DEPTH_DEPTH_IS_EXCLUSIVE entry is listed by this device.</li>
+ * <li>As of Android P, the ACAMERA_LENS_POSE_REFERENCE entry is listed by this device.</li>
* <li>A LIMITED camera with only the DEPTH_OUTPUT capability does not have to support
* normal YUV_420_888, JPEG, and PRIV-format outputs. It only has to support the DEPTH16
* format.</li>
* </ul>
* <p>Generally, depth output operates at a slower frame rate than standard color capture,
* so the DEPTH16 and DEPTH_POINT_CLOUD formats will commonly have a stall duration that
- * should be accounted for (see
- * {@link ACAMERA_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS}).
+ * should be accounted for (see {@link ACAMERA_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS }).
* On a device that supports both depth and color-based output, to enable smooth preview,
* using a repeating burst is recommended, where a depth-output target is only included
* once every N frames, where N is the ratio between preview output rate and depth output
* rate, including depth stall time.</p>
*
* @see ACAMERA_DEPTH_DEPTH_IS_EXCLUSIVE
+ * @see ACAMERA_LENS_DISTORTION
* @see ACAMERA_LENS_FACING
* @see ACAMERA_LENS_INTRINSIC_CALIBRATION
+ * @see ACAMERA_LENS_POSE_REFERENCE
* @see ACAMERA_LENS_POSE_ROTATION
* @see ACAMERA_LENS_POSE_TRANSLATION
- * @see ACAMERA_LENS_RADIAL_DISTORTION
*/
ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT = 8,
+ /**
+ * <p>The camera device supports the MOTION_TRACKING value for
+ * ACAMERA_CONTROL_CAPTURE_INTENT, which limits maximum exposure time to 20 ms.</p>
+ * <p>This limits the motion blur of capture images, resulting in better image tracking
+ * results for use cases such as image stabilization or augmented reality.</p>
+ *
+ * @see ACAMERA_CONTROL_CAPTURE_INTENT
+ */
+ ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_MOTION_TRACKING = 10,
+
+ /**
+ * <p>The camera device is a logical camera backed by two or more physical cameras that are
+ * also exposed to the application.</p>
+ * <p>Camera application shouldn't assume that there are at most 1 rear camera and 1 front
+ * camera in the system. For an application that switches between front and back cameras,
+ * the recommendation is to switch between the first rear camera and the first front
+ * camera in the list of supported camera devices.</p>
+ * <p>This capability requires the camera device to support the following:</p>
+ * <ul>
+ * <li>This camera device must list the following static metadata entries in <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html">CameraCharacteristics</a>:<ul>
+ * <li>android.logicalMultiCamera.physicalIds</li>
+ * <li>ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE</li>
+ * </ul>
+ * </li>
+ * <li>The underlying physical cameras' static metadata must list the following entries,
+ * so that the application can correlate pixels from the physical streams:<ul>
+ * <li>ACAMERA_LENS_POSE_REFERENCE</li>
+ * <li>ACAMERA_LENS_POSE_ROTATION</li>
+ * <li>ACAMERA_LENS_POSE_TRANSLATION</li>
+ * <li>ACAMERA_LENS_INTRINSIC_CALIBRATION</li>
+ * <li>ACAMERA_LENS_DISTORTION</li>
+ * </ul>
+ * </li>
+ * <li>The SENSOR_INFO_TIMESTAMP_SOURCE of the logical device and physical devices must be
+ * the same.</li>
+ * <li>The logical camera device must be LIMITED or higher device.</li>
+ * </ul>
+ * <p>Both the logical camera device and its underlying physical devices support the
+ * mandatory stream combinations required for their device levels.</p>
+ * <p>Additionally, for each guaranteed stream combination, the logical camera supports:</p>
+ * <ul>
+ * <li>For each guaranteed stream combination, the logical camera supports replacing one
+ * logical {@link AIMAGE_FORMAT_YUV_420_888 YUV_420_888}
+ * or raw stream with two physical streams of the same size and format, each from a
+ * separate physical camera, given that the size and format are supported by both
+ * physical cameras.</li>
+ * <li>If the logical camera doesn't advertise RAW capability, but the underlying physical
+ * cameras do, the logical camera will support guaranteed stream combinations for RAW
+ * capability, except that the RAW streams will be physical streams, each from a separate
+ * physical camera. This is usually the case when the physical cameras have different
+ * sensor sizes.</li>
+ * </ul>
+ * <p>Using physical streams in place of a logical stream of the same size and format will
+ * not slow down the frame rate of the capture, as long as the minimum frame duration
+ * of the physical and logical streams are the same.</p>
+ *
+ * @see ACAMERA_LENS_DISTORTION
+ * @see ACAMERA_LENS_INTRINSIC_CALIBRATION
+ * @see ACAMERA_LENS_POSE_REFERENCE
+ * @see ACAMERA_LENS_POSE_ROTATION
+ * @see ACAMERA_LENS_POSE_TRANSLATION
+ * @see ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE
+ */
+ ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA = 11,
+
+ /**
+ * <p>The camera device is a monochrome camera that doesn't contain a color filter array,
+ * and the pixel values on U and V planes are all 128.</p>
+ */
+ ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_MONOCHROME = 12,
+
} acamera_metadata_enum_android_request_available_capabilities_t;
@@ -6918,8 +7327,8 @@
/**
* <p>Timestamps from ACAMERA_SENSOR_TIMESTAMP are in the same timebase as
- * <a href="https://developer.android.com/reference/android/os/SystemClock.html#elapsedRealtimeNanos">elapsedRealtimeNanos</a>
- * (or CLOCK_BOOTTIME), and they can be compared to other timestamps using that base.</p>
+ * <a href="https://developer.android.com/reference/android/os/SystemClock.html#elapsedRealtimeNanos">SystemClock#elapsedRealtimeNanos</a>,
+ * and they can be compared to other timestamps using that base.</p>
*
* @see ACAMERA_SENSOR_TIMESTAMP
*/
@@ -7030,6 +7439,26 @@
} acamera_metadata_enum_android_statistics_lens_shading_map_mode_t;
+// ACAMERA_STATISTICS_OIS_DATA_MODE
+typedef enum acamera_metadata_enum_acamera_statistics_ois_data_mode {
+ /**
+ * <p>Do not include OIS data in the capture result.</p>
+ */
+ ACAMERA_STATISTICS_OIS_DATA_MODE_OFF = 0,
+
+ /**
+ * <p>Include OIS data in the capture result.</p>
+ * <p>ACAMERA_STATISTICS_OIS_TIMESTAMPS, ACAMERA_STATISTICS_OIS_X_SHIFTS,
+ * and ACAMERA_STATISTICS_OIS_Y_SHIFTS provide OIS data in the output result metadata.</p>
+ *
+ * @see ACAMERA_STATISTICS_OIS_TIMESTAMPS
+ * @see ACAMERA_STATISTICS_OIS_X_SHIFTS
+ * @see ACAMERA_STATISTICS_OIS_Y_SHIFTS
+ */
+ ACAMERA_STATISTICS_OIS_DATA_MODE_ON = 1,
+
+} acamera_metadata_enum_android_statistics_ois_data_mode_t;
+
// ACAMERA_TONEMAP_MODE
@@ -7104,7 +7533,7 @@
* <p>This camera device does not have enough capabilities to qualify as a <code>FULL</code> device or
* better.</p>
* <p>Only the stream configurations listed in the <code>LEGACY</code> and <code>LIMITED</code> tables in the
- * {@link ACameraDevice_createCaptureSession} documentation are guaranteed to be supported.</p>
+ * {@link ACameraDevice_createCaptureSession createCaptureSession} documentation are guaranteed to be supported.</p>
* <p>All <code>LIMITED</code> devices support the <code>BACKWARDS_COMPATIBLE</code> capability, indicating basic
* support for color image capture. The only exception is that the device may
* alternatively support only the <code>DEPTH_OUTPUT</code> capability, if it can only output depth
@@ -7130,7 +7559,7 @@
/**
* <p>This camera device is capable of supporting advanced imaging applications.</p>
* <p>The stream configurations listed in the <code>FULL</code>, <code>LEGACY</code> and <code>LIMITED</code> tables in the
- * {@link ACameraDevice_createCaptureSession} documentation are guaranteed to be supported.</p>
+ * {@link ACameraDevice_createCaptureSession createCaptureSession} documentation are guaranteed to be supported.</p>
* <p>A <code>FULL</code> device will support below capabilities:</p>
* <ul>
* <li><code>BURST_CAPTURE</code> capability (ACAMERA_REQUEST_AVAILABLE_CAPABILITIES contains
@@ -7157,8 +7586,7 @@
/**
* <p>This camera device is running in backward compatibility mode.</p>
- * <p>Only the stream configurations listed in the <code>LEGACY</code> table in the {@link
- * ACameraDevice_createCaptureSession} documentation are supported.</p>
+ * <p>Only the stream configurations listed in the <code>LEGACY</code> table in the {@link ACameraDevice_createCaptureSession createCaptureSession} documentation are supported.</p>
* <p>A <code>LEGACY</code> device does not support per-frame control, manual sensor control, manual
* post-processing, arbitrary cropping regions, and has relaxed performance constraints.
* No additional capabilities beyond <code>BACKWARD_COMPATIBLE</code> will ever be listed by a
@@ -7179,9 +7607,7 @@
* <p>This camera device is capable of YUV reprocessing and RAW data capture, in addition to
* FULL-level capabilities.</p>
* <p>The stream configurations listed in the <code>LEVEL_3</code>, <code>RAW</code>, <code>FULL</code>, <code>LEGACY</code> and
- * <code>LIMITED</code> tables in the {@link
- * ACameraDevice_createCaptureSession}
- * documentation are guaranteed to be supported.</p>
+ * <code>LIMITED</code> tables in the {@link ACameraDevice_createCaptureSession createCaptureSession} documentation are guaranteed to be supported.</p>
* <p>The following additional capabilities are guaranteed to be supported:</p>
* <ul>
* <li><code>YUV_REPROCESSING</code> capability (ACAMERA_REQUEST_AVAILABLE_CAPABILITIES contains
@@ -7194,6 +7620,37 @@
*/
ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_3 = 3,
+ /**
+ * <p>This camera device is backed by an external camera connected to this Android device.</p>
+ * <p>The device has capability identical to a LIMITED level device, with the following
+ * exceptions:</p>
+ * <ul>
+ * <li>The device may not report lens/sensor related information such as<ul>
+ * <li>ACAMERA_LENS_FOCAL_LENGTH</li>
+ * <li>ACAMERA_LENS_INFO_HYPERFOCAL_DISTANCE</li>
+ * <li>ACAMERA_SENSOR_INFO_PHYSICAL_SIZE</li>
+ * <li>ACAMERA_SENSOR_INFO_WHITE_LEVEL</li>
+ * <li>ACAMERA_SENSOR_BLACK_LEVEL_PATTERN</li>
+ * <li>ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT</li>
+ * <li>ACAMERA_SENSOR_ROLLING_SHUTTER_SKEW</li>
+ * </ul>
+ * </li>
+ * <li>The device will report 0 for ACAMERA_SENSOR_ORIENTATION</li>
+ * <li>The device has less guarantee on stable framerate, as the framerate partly depends
+ * on the external camera being used.</li>
+ * </ul>
+ *
+ * @see ACAMERA_LENS_FOCAL_LENGTH
+ * @see ACAMERA_LENS_INFO_HYPERFOCAL_DISTANCE
+ * @see ACAMERA_SENSOR_BLACK_LEVEL_PATTERN
+ * @see ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT
+ * @see ACAMERA_SENSOR_INFO_PHYSICAL_SIZE
+ * @see ACAMERA_SENSOR_INFO_WHITE_LEVEL
+ * @see ACAMERA_SENSOR_ORIENTATION
+ * @see ACAMERA_SENSOR_ROLLING_SHUTTER_SKEW
+ */
+ ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL = 4,
+
} acamera_metadata_enum_android_info_supported_hardware_level_t;
@@ -7281,6 +7738,48 @@
} acamera_metadata_enum_android_depth_depth_is_exclusive_t;
+// ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE
+typedef enum acamera_metadata_enum_acamera_logical_multi_camera_sensor_sync_type {
+ /**
+ * <p>A software mechanism is used to synchronize between the physical cameras. As a result,
+ * the timestamp of an image from a physical stream is only an approximation of the
+ * image sensor start-of-exposure time.</p>
+ */
+ ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE_APPROXIMATE = 0,
+
+ /**
+ * <p>The camera device supports frame timestamp synchronization at the hardware level,
+ * and the timestamp of a physical stream image accurately reflects its
+ * start-of-exposure time.</p>
+ */
+ ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE_CALIBRATED = 1,
+
+} acamera_metadata_enum_android_logical_multi_camera_sensor_sync_type_t;
+
+
+// ACAMERA_DISTORTION_CORRECTION_MODE
+typedef enum acamera_metadata_enum_acamera_distortion_correction_mode {
+ /**
+ * <p>No distortion correction is applied.</p>
+ */
+ ACAMERA_DISTORTION_CORRECTION_MODE_OFF = 0,
+
+ /**
+ * <p>Lens distortion correction is applied without reducing frame rate
+ * relative to sensor output. It may be the same as OFF if distortion correction would
+ * reduce frame rate relative to sensor.</p>
+ */
+ ACAMERA_DISTORTION_CORRECTION_MODE_FAST = 1,
+
+ /**
+ * <p>High-quality distortion correction is applied, at the cost of
+ * possibly reduced frame rate relative to sensor output.</p>
+ */
+ ACAMERA_DISTORTION_CORRECTION_MODE_HIGH_QUALITY = 2,
+
+} acamera_metadata_enum_android_distortion_correction_mode_t;
+
+
#endif /* __ANDROID_API__ >= 24 */
__END_DECLS
diff --git a/camera/ndk/include/camera/NdkCaptureRequest.h b/camera/ndk/include/camera/NdkCaptureRequest.h
index c62ba2c..4961ce3 100644
--- a/camera/ndk/include/camera/NdkCaptureRequest.h
+++ b/camera/ndk/include/camera/NdkCaptureRequest.h
@@ -305,6 +305,58 @@
#endif /* __ANDROID_API__ >= 24 */
+#if __ANDROID_API__ >= 28
+
+/**
+ * Associate an arbitrary user context pointer to the {@link ACaptureRequest}
+ *
+ * This method is useful for user to identify the capture request in capture session callbacks.
+ * The context is NULL for newly created request.
+ * {@link ACameraOutputTarget_free} will not free the context. Also calling this method twice
+ * will not cause the previous context be freed.
+ * Also note that calling this method after the request has been sent to capture session will not
+ * change the context pointer in the capture callbacks.
+ *
+ * @param request the {@link ACaptureRequest} of interest.
+ * @param context the user context pointer to be associated with this capture request.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request is NULL.</li></ul>
+ */
+camera_status_t ACaptureRequest_setUserContext(
+ ACaptureRequest* request, void* context);
+
+/**
+ * Get the user context pointer of the {@link ACaptureRequest}
+ *
+ * This method is useful for user to identify the capture request in capture session callbacks.
+ * The context is NULL for newly created request.
+ *
+ * @param request the {@link ACaptureRequest} of interest.
+ * @param context the user context pointer of this capture request.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request is NULL.</li></ul>
+ */
+camera_status_t ACaptureRequest_getUserContext(
+ const ACaptureRequest* request, /*out*/void** context);
+
+/**
+ * Create a copy of input {@link ACaptureRequest}.
+ *
+ * <p>The returned ACaptureRequest must be freed by the application by {@link ACaptureRequest_free}
+ * after application is done using it.</p>
+ *
+ * @param src the input {@link ACaptureRequest} to be copied.
+ *
+ * @return a valid ACaptureRequest pointer or NULL if the input request cannot be copied.
+ */
+ACaptureRequest* ACaptureRequest_copy(const ACaptureRequest* src);
+
+#endif /* __ANDROID_API__ >= 28 */
+
__END_DECLS
#endif /* _NDK_CAPTURE_REQUEST_H */
diff --git a/camera/ndk/libcamera2ndk.map.txt b/camera/ndk/libcamera2ndk.map.txt
index 41bb22b..d179aa0 100644
--- a/camera/ndk/libcamera2ndk.map.txt
+++ b/camera/ndk/libcamera2ndk.map.txt
@@ -6,9 +6,11 @@
ACameraCaptureSession_getDevice;
ACameraCaptureSession_setRepeatingRequest;
ACameraCaptureSession_stopRepeating;
+ ACameraCaptureSession_updateSharedOutput;
ACameraDevice_close;
ACameraDevice_createCaptureRequest;
ACameraDevice_createCaptureSession;
+ ACameraDevice_createCaptureSessionWithSessionParameters;
ACameraDevice_getId;
ACameraManager_create;
ACameraManager_delete;
@@ -25,9 +27,11 @@
ACameraOutputTarget_create;
ACameraOutputTarget_free;
ACaptureRequest_addTarget;
+ ACaptureRequest_copy;
ACaptureRequest_free;
ACaptureRequest_getAllTags;
ACaptureRequest_getConstEntry;
+ ACaptureRequest_getUserContext;
ACaptureRequest_removeTarget;
ACaptureRequest_setEntry_double;
ACaptureRequest_setEntry_float;
@@ -35,11 +39,15 @@
ACaptureRequest_setEntry_i64;
ACaptureRequest_setEntry_rational;
ACaptureRequest_setEntry_u8;
+ ACaptureRequest_setUserContext;
ACaptureSessionOutputContainer_add;
ACaptureSessionOutputContainer_create;
ACaptureSessionOutputContainer_free;
ACaptureSessionOutputContainer_remove;
ACaptureSessionOutput_create;
+ ACaptureSessionSharedOutput_create;
+ ACaptureSessionSharedOutput_add;
+ ACaptureSessionSharedOutput_remove;
ACaptureSessionOutput_free;
local:
*;
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
index 51d9214..1de7013 100644
--- a/camera/tests/CameraBinderTests.cpp
+++ b/camera/tests/CameraBinderTests.cpp
@@ -198,9 +198,11 @@
virtual binder::Status onResultReceived(const CameraMetadata& metadata,
- const CaptureResultExtras& resultExtras) {
+ const CaptureResultExtras& resultExtras,
+ const std::vector<PhysicalCaptureResultInfo>& physicalResultInfos) {
(void) metadata;
(void) resultExtras;
+ (void) physicalResultInfos;
Mutex::Autolock l(mLock);
mLastStatus = SENT_RESULT;
mStatusesHit.push_back(mLastStatus);
@@ -317,6 +319,9 @@
EXPECT_TRUE(res.isOk()) << res;
EXPECT_EQ(numCameras, static_cast<const int>(statuses.size()));
+ for (const auto &it : statuses) {
+ listener->onStatusChanged(it.status, String16(it.cameraId));
+ }
for (int32_t i = 0; i < numCameras; i++) {
String16 cameraId = String16(String8::format("%d", i));
@@ -421,6 +426,9 @@
serviceListener = new TestCameraServiceListener();
std::vector<hardware::CameraStatus> statuses;
service->addListener(serviceListener, &statuses);
+ for (const auto &it : statuses) {
+ serviceListener->onStatusChanged(it.status, String16(it.cameraId));
+ }
service->getNumberOfCameras(hardware::ICameraService::CAMERA_TYPE_BACKWARD_COMPATIBLE,
&numCameras);
}
@@ -439,8 +447,9 @@
ASSERT_NOT_NULL(service);
EXPECT_TRUE(serviceListener->waitForNumCameras(numCameras));
for (int32_t i = 0; i < numCameras; i++) {
+ String8 cameraId8 = String8::format("%d", i);
// Make sure we're available, or skip device tests otherwise
- String16 cameraId(String8::format("%d",i));
+ String16 cameraId(cameraId8);
int32_t s = serviceListener->getStatus(cameraId);
EXPECT_EQ(hardware::ICameraServiceListener::STATUS_PRESENT, s);
if (s != hardware::ICameraServiceListener::STATUS_PRESENT) {
@@ -476,7 +485,8 @@
res = device->createStream(output, &streamId);
EXPECT_TRUE(res.isOk()) << res;
EXPECT_LE(0, streamId);
- res = device->endConfigure(/*isConstrainedHighSpeed*/ false);
+ CameraMetadata sessionParams;
+ res = device->endConfigure(/*isConstrainedHighSpeed*/ false, sessionParams);
EXPECT_TRUE(res.isOk()) << res;
EXPECT_FALSE(callbacks->hadError());
@@ -487,7 +497,7 @@
EXPECT_TRUE(res.isOk()) << res;
hardware::camera2::CaptureRequest request;
- request.mMetadata = requestTemplate;
+ request.mPhysicalCameraSettings.push_back({cameraId8.string(), requestTemplate});
request.mSurfaceList.add(surface);
request.mIsReprocess = false;
int64_t lastFrameNumber = 0;
@@ -514,7 +524,7 @@
/*out*/&requestTemplate);
EXPECT_TRUE(res.isOk()) << res;
hardware::camera2::CaptureRequest request2;
- request2.mMetadata = requestTemplate;
+ request2.mPhysicalCameraSettings.push_back({cameraId8.string(), requestTemplate});
request2.mSurfaceList.add(surface);
request2.mIsReprocess = false;
callbacks->clearStatus();
@@ -547,10 +557,10 @@
EXPECT_TRUE(res.isOk()) << res;
android::hardware::camera2::CaptureRequest request3;
android::hardware::camera2::CaptureRequest request4;
- request3.mMetadata = requestTemplate;
+ request3.mPhysicalCameraSettings.push_back({cameraId8.string(), requestTemplate});
request3.mSurfaceList.add(surface);
request3.mIsReprocess = false;
- request4.mMetadata = requestTemplate2;
+ request4.mPhysicalCameraSettings.push_back({cameraId8.string(), requestTemplate2});
request4.mSurfaceList.add(surface);
request4.mIsReprocess = false;
std::vector<hardware::camera2::CaptureRequest> requestList;
@@ -574,7 +584,7 @@
EXPECT_TRUE(res.isOk()) << res;
res = device->deleteStream(streamId);
EXPECT_TRUE(res.isOk()) << res;
- res = device->endConfigure(/*isConstrainedHighSpeed*/ false);
+ res = device->endConfigure(/*isConstrainedHighSpeed*/ false, sessionParams);
EXPECT_TRUE(res.isOk()) << res;
sleep(/*second*/1); // allow some time for errors to show up, if any
@@ -584,3 +594,62 @@
}
};
+
+TEST_F(CameraClientBinderTest, CheckBinderCaptureRequest) {
+ sp<CaptureRequest> requestOriginal, requestParceled;
+ sp<IGraphicBufferProducer> gbProducer;
+ sp<IGraphicBufferConsumer> gbConsumer;
+ BufferQueue::createBufferQueue(&gbProducer, &gbConsumer);
+ sp<Surface> surface(new Surface(gbProducer, /*controlledByApp*/false));
+ Vector<sp<Surface>> surfaceList;
+ surfaceList.push_back(surface);
+ std::string physicalDeviceId1 = "0";
+ std::string physicalDeviceId2 = "1";
+ CameraMetadata physicalDeviceSettings1, physicalDeviceSettings2;
+ uint8_t intent1 = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+ uint8_t intent2 = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
+ EXPECT_EQ(OK, physicalDeviceSettings1.update(ANDROID_CONTROL_CAPTURE_INTENT, &intent1, 1));
+ EXPECT_EQ(OK, physicalDeviceSettings2.update(ANDROID_CONTROL_CAPTURE_INTENT, &intent2, 1));
+
+ requestParceled = new CaptureRequest();
+ Parcel p;
+ EXPECT_TRUE(requestParceled->readFromParcel(&p) != OK);
+ p.writeInt32(0);
+ p.setDataPosition(0);
+ EXPECT_TRUE(requestParceled->readFromParcel(&p) != OK);
+ p.freeData();
+ p.writeInt32(-1);
+ p.setDataPosition(0);
+ EXPECT_TRUE(requestParceled->readFromParcel(&p) != OK);
+ p.freeData();
+ p.writeInt32(1);
+ p.setDataPosition(0);
+ EXPECT_TRUE(requestParceled->readFromParcel(&p) != OK);
+
+ requestOriginal = new CaptureRequest();
+ requestOriginal->mPhysicalCameraSettings.push_back({physicalDeviceId1,
+ physicalDeviceSettings1});
+ requestOriginal->mPhysicalCameraSettings.push_back({physicalDeviceId2,
+ physicalDeviceSettings2});
+ requestOriginal->mSurfaceList.push_back(surface);
+ requestOriginal->mIsReprocess = false;
+ requestOriginal->mSurfaceConverted = false;
+
+ p.freeData();
+ EXPECT_TRUE(requestOriginal->writeToParcel(&p) == OK);
+ p.setDataPosition(0);
+ EXPECT_TRUE(requestParceled->readFromParcel(&p) == OK);
+ EXPECT_EQ(requestParceled->mIsReprocess, false);
+ EXPECT_FALSE(requestParceled->mSurfaceList.empty());
+ EXPECT_EQ(2u, requestParceled->mPhysicalCameraSettings.size());
+ auto it = requestParceled->mPhysicalCameraSettings.begin();
+ EXPECT_EQ(physicalDeviceId1, it->id);
+ EXPECT_TRUE(it->settings.exists(ANDROID_CONTROL_CAPTURE_INTENT));
+ auto entry = it->settings.find(ANDROID_CONTROL_CAPTURE_INTENT);
+ EXPECT_EQ(entry.data.u8[0], intent1);
+ it++;
+ EXPECT_EQ(physicalDeviceId2, it->id);
+ EXPECT_TRUE(it->settings.exists(ANDROID_CONTROL_CAPTURE_INTENT));
+ entry = it->settings.find(ANDROID_CONTROL_CAPTURE_INTENT);
+ EXPECT_EQ(entry.data.u8[0], intent2);
+};
diff --git a/camera/tests/CameraZSLTests.cpp b/camera/tests/CameraZSLTests.cpp
index ecca354..02c6e2a 100644
--- a/camera/tests/CameraZSLTests.cpp
+++ b/camera/tests/CameraZSLTests.cpp
@@ -256,10 +256,10 @@
ASSERT_TRUE(nullptr != surfaceControl.get());
ASSERT_TRUE(surfaceControl->isValid());
- SurfaceComposerClient::openGlobalTransaction();
- ASSERT_EQ(NO_ERROR, surfaceControl->setLayer(0x7fffffff));
- ASSERT_EQ(NO_ERROR, surfaceControl->show());
- SurfaceComposerClient::closeGlobalTransaction();
+ SurfaceComposerClient::Transaction{}
+ .setLayer(surfaceControl, 0x7fffffff)
+ .show(surfaceControl)
+ .apply();
previewSurface = surfaceControl->getSurface();
ASSERT_TRUE(previewSurface != NULL);
diff --git a/camera/tests/VendorTagDescriptorTests.cpp b/camera/tests/VendorTagDescriptorTests.cpp
index 75cfb73..0ee358d 100644
--- a/camera/tests/VendorTagDescriptorTests.cpp
+++ b/camera/tests/VendorTagDescriptorTests.cpp
@@ -142,6 +142,7 @@
EXPECT_EQ(OK, vDescOriginal->writeToParcel(&p));
p.setDataPosition(0);
+ vDescParceled = new VendorTagDescriptor();
ASSERT_EQ(OK, vDescParceled->readFromParcel(&p));
// Ensure consistent tag count
diff --git a/cmds/screenrecord/Android.mk b/cmds/screenrecord/Android.mk
index 7aa684a..5e83ed6 100644
--- a/cmds/screenrecord/Android.mk
+++ b/cmds/screenrecord/Android.mk
@@ -25,8 +25,8 @@
Program.cpp
LOCAL_SHARED_LIBRARIES := \
- libstagefright libmedia libutils libbinder libstagefright_foundation \
- libjpeg libgui libcutils liblog libEGL libGLESv2
+ libstagefright libmedia libmedia_omx libutils libbinder libstagefright_foundation \
+ libjpeg libui libgui libcutils liblog libEGL libGLESv2
LOCAL_C_INCLUDES := \
frameworks/av/media/libstagefright \
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index bc32bbe..4603515 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -50,6 +50,7 @@
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaMuxer.h>
+#include <media/stagefright/PersistentSurface.h>
#include <media/ICrypto.h>
#include <media/MediaCodecBuffer.h>
@@ -70,9 +71,11 @@
static bool gVerbose = false; // chatty on stdout
static bool gRotate = false; // rotate 90 degrees
static bool gMonotonicTime = false; // use system monotonic time for timestamps
+static bool gPersistentSurface = false; // use persistent surface
static enum {
FORMAT_MP4, FORMAT_H264, FORMAT_FRAMES, FORMAT_RAW_FRAMES
} gOutputFormat = FORMAT_MP4; // data format for output
+static AString gCodecName = ""; // codec name override
static bool gSizeSpecified = false; // was size explicitly requested?
static bool gWantInfoScreen = false; // do we want initial info screen?
static bool gWantFrameTime = false; // do we want times on each frame?
@@ -132,6 +135,7 @@
strerror(errno));
return err;
}
+ signal(SIGPIPE, SIG_IGN);
return NO_ERROR;
}
@@ -154,6 +158,7 @@
if (gVerbose) {
printf("Configuring recorder for %dx%d %s at %.2fMbps\n",
gVideoWidth, gVideoHeight, kMimeTypeAvc, gBitRate / 1000000.0);
+ fflush(stdout);
}
sp<AMessage> format = new AMessage;
@@ -169,11 +174,21 @@
looper->setName("screenrecord_looper");
looper->start();
ALOGV("Creating codec");
- sp<MediaCodec> codec = MediaCodec::CreateByType(looper, kMimeTypeAvc, true);
- if (codec == NULL) {
- fprintf(stderr, "ERROR: unable to create %s codec instance\n",
- kMimeTypeAvc);
- return UNKNOWN_ERROR;
+ sp<MediaCodec> codec;
+ if (gCodecName.empty()) {
+ codec = MediaCodec::CreateByType(looper, kMimeTypeAvc, true);
+ if (codec == NULL) {
+ fprintf(stderr, "ERROR: unable to create %s codec instance\n",
+ kMimeTypeAvc);
+ return UNKNOWN_ERROR;
+ }
+ } else {
+ codec = MediaCodec::CreateByComponentName(looper, gCodecName);
+ if (codec == NULL) {
+ fprintf(stderr, "ERROR: unable to create %s codec instance\n",
+ gCodecName.c_str());
+ return UNKNOWN_ERROR;
+ }
}
err = codec->configure(format, NULL, NULL,
@@ -187,10 +202,18 @@
ALOGV("Creating encoder input surface");
sp<IGraphicBufferProducer> bufferProducer;
- err = codec->createInputSurface(&bufferProducer);
+ if (gPersistentSurface) {
+ sp<PersistentSurface> surface = MediaCodec::CreatePersistentInputSurface();
+ bufferProducer = surface->getBufferProducer();
+ err = codec->setInputSurface(surface);
+ } else {
+ err = codec->createInputSurface(&bufferProducer);
+ }
if (err != NO_ERROR) {
fprintf(stderr,
- "ERROR: unable to create encoder input surface (err=%d)\n", err);
+ "ERROR: unable to %s encoder input surface (err=%d)\n",
+ gPersistentSurface ? "set" : "create",
+ err);
codec->release();
return err;
}
@@ -213,7 +236,9 @@
* Sets the display projection, based on the display dimensions, video size,
* and device orientation.
*/
-static status_t setDisplayProjection(const sp<IBinder>& dpy,
+static status_t setDisplayProjection(
+ SurfaceComposerClient::Transaction& t,
+ const sp<IBinder>& dpy,
const DisplayInfo& mainDpyInfo) {
// Set the region of the layer stack we're interested in, which in our
@@ -273,13 +298,15 @@
if (gRotate) {
printf("Rotated content area is %ux%u at offset x=%d y=%d\n",
outHeight, outWidth, offY, offX);
+ fflush(stdout);
} else {
printf("Content area is %ux%u at offset x=%d y=%d\n",
outWidth, outHeight, offX, offY);
+ fflush(stdout);
}
}
- SurfaceComposerClient::setDisplayProjection(dpy,
+ t.setDisplayProjection(dpy,
gRotate ? DISPLAY_ORIENTATION_90 : DISPLAY_ORIENTATION_0,
layerStackRect, displayRect);
return NO_ERROR;
@@ -295,11 +322,11 @@
sp<IBinder> dpy = SurfaceComposerClient::createDisplay(
String8("ScreenRecorder"), false /*secure*/);
- SurfaceComposerClient::openGlobalTransaction();
- SurfaceComposerClient::setDisplaySurface(dpy, bufferProducer);
- setDisplayProjection(dpy, mainDpyInfo);
- SurfaceComposerClient::setDisplayLayerStack(dpy, 0); // default stack
- SurfaceComposerClient::closeGlobalTransaction();
+ SurfaceComposerClient::Transaction t;
+ t.setDisplaySurface(dpy, bufferProducer);
+ setDisplayProjection(t, dpy, mainDpyInfo);
+ t.setDisplayLayerStack(dpy, 0); // default stack
+ t.apply();
*pDisplayHandle = dpy;
@@ -344,6 +371,7 @@
if (systemTime(CLOCK_MONOTONIC) > endWhenNsec) {
if (gVerbose) {
printf("Time limit reached\n");
+ fflush(stdout);
}
break;
}
@@ -379,9 +407,9 @@
ALOGW("getDisplayInfo(main) failed: %d", err);
} else if (orientation != mainDpyInfo.orientation) {
ALOGD("orientation changed, now %d", mainDpyInfo.orientation);
- SurfaceComposerClient::openGlobalTransaction();
- setDisplayProjection(virtualDpy, mainDpyInfo);
- SurfaceComposerClient::closeGlobalTransaction();
+ SurfaceComposerClient::Transaction t;
+ setDisplayProjection(t, virtualDpy, mainDpyInfo);
+ t.apply();
orientation = mainDpyInfo.orientation;
}
}
@@ -481,6 +509,7 @@
printf("Encoder stopping; recorded %u frames in %" PRId64 " seconds\n",
debugNumFrames, nanoseconds_to_seconds(
systemTime(CLOCK_MONOTONIC) - startWhenNsec));
+ fflush(stdout);
}
return NO_ERROR;
}
@@ -554,6 +583,7 @@
printf("Main display is %dx%d @%.2ffps (orientation=%u)\n",
mainDpyInfo.w, mainDpyInfo.h, mainDpyInfo.fps,
mainDpyInfo.orientation);
+ fflush(stdout);
}
bool rotated = isDeviceRotated(mainDpyInfo.orientation);
@@ -621,6 +651,7 @@
}
if (gVerbose) {
printf("Bugreport overlay created\n");
+ fflush(stdout);
}
} else {
// Use the encoder's input surface as the virtual display surface.
@@ -713,6 +744,7 @@
if (gVerbose) {
printf("Stopping encoder and muxer\n");
+ fflush(stdout);
}
}
@@ -759,6 +791,7 @@
printf(" %s", argv[i]);
}
putchar('\n');
+ fflush(stdout);
}
pid_t pid = fork();
@@ -896,7 +929,9 @@
{ "show-frame-time", no_argument, NULL, 'f' },
{ "rotate", no_argument, NULL, 'r' },
{ "output-format", required_argument, NULL, 'o' },
+ { "codec-name", required_argument, NULL, 'N' },
{ "monotonic-time", no_argument, NULL, 'm' },
+ { "persistent-surface", no_argument, NULL, 'p' },
{ NULL, 0, NULL, 0 }
};
@@ -976,9 +1011,15 @@
return 2;
}
break;
+ case 'N':
+ gCodecName = optarg;
+ break;
case 'm':
gMonotonicTime = true;
break;
+ case 'p':
+ gPersistentSurface = true;
+ break;
default:
if (ic != '?') {
fprintf(stderr, "getopt_long returned unexpected value 0x%x\n", ic);
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index f647ffd..c7619af 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -8,9 +8,9 @@
SineSource.cpp
LOCAL_SHARED_LIBRARIES := \
- libstagefright libmedia libutils libbinder libstagefright_foundation \
- libjpeg libgui libcutils liblog \
- libhidlmemory \
+ libstagefright libmedia libmedia_omx libmediaextractor libutils libbinder \
+ libstagefright_foundation libjpeg libui libgui libcutils liblog \
+ libhidlbase \
android.hardware.media.omx@1.0 \
LOCAL_C_INCLUDES:= \
@@ -36,7 +36,8 @@
record.cpp
LOCAL_SHARED_LIBRARIES := \
- libstagefright libmedia liblog libutils libbinder libstagefright_foundation
+ libstagefright libmedia libmediaextractor liblog libutils libbinder \
+ libstagefright_foundation
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
@@ -60,7 +61,8 @@
recordvideo.cpp
LOCAL_SHARED_LIBRARIES := \
- libstagefright libmedia liblog libutils libbinder libstagefright_foundation
+ libstagefright libmedia libmediaextractor liblog libutils libbinder \
+ libstagefright_foundation
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
@@ -85,7 +87,8 @@
audioloop.cpp
LOCAL_SHARED_LIBRARIES := \
- libstagefright libmedia liblog libutils libbinder libstagefright_foundation
+ libstagefright libmedia libmediaextractor liblog libutils libbinder \
+ libstagefright_foundation
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
@@ -107,8 +110,8 @@
stream.cpp \
LOCAL_SHARED_LIBRARIES := \
- libstagefright liblog libutils libbinder libgui \
- libstagefright_foundation libmedia libcutils
+ libstagefright liblog libutils libbinder libui libgui \
+ libstagefright_foundation libmedia libcutils libmediaextractor
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
@@ -132,7 +135,7 @@
LOCAL_SHARED_LIBRARIES := \
libstagefright liblog libutils libbinder libstagefright_foundation \
- libmedia libaudioclient libgui libcutils
+ libmedia libmedia_omx libaudioclient libui libgui libcutils
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
@@ -163,6 +166,8 @@
libbinder \
libstagefright_foundation \
libmedia \
+ libmedia_omx \
+ libui \
libgui \
libcutils \
libRScpp \
@@ -199,7 +204,7 @@
LOCAL_SHARED_LIBRARIES := \
libstagefright liblog libutils libbinder libstagefright_foundation \
- libcutils libc
+ libcutils libc libmediaextractor
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
diff --git a/cmds/stagefright/SineSource.cpp b/cmds/stagefright/SineSource.cpp
index cad8caf..0ecc16c 100644
--- a/cmds/stagefright/SineSource.cpp
+++ b/cmds/stagefright/SineSource.cpp
@@ -4,6 +4,7 @@
#include <media/stagefright/MediaBufferGroup.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaData.h>
@@ -59,10 +60,10 @@
}
status_t SineSource::read(
- MediaBuffer **out, const ReadOptions * /* options */) {
+ MediaBufferBase **out, const ReadOptions * /* options */) {
*out = NULL;
- MediaBuffer *buffer;
+ MediaBufferBase *buffer;
status_t err = mGroup->acquire_buffer(&buffer);
if (err != OK) {
@@ -88,7 +89,7 @@
x += k;
}
- buffer->meta_data()->setInt64(
+ buffer->meta_data().setInt64(
kKeyTime, ((int64_t)mPhase * 1000000) / mSampleRate);
mPhase += numFramesPerBuffer;
diff --git a/cmds/stagefright/SineSource.h b/cmds/stagefright/SineSource.h
index be05661..1817291 100644
--- a/cmds/stagefright/SineSource.h
+++ b/cmds/stagefright/SineSource.h
@@ -2,7 +2,7 @@
#define SINE_SOURCE_H_
-#include <media/stagefright/MediaSource.h>
+#include <media/MediaSource.h>
#include <utils/Compat.h>
namespace android {
@@ -18,7 +18,7 @@
virtual sp<MetaData> getFormat();
virtual status_t read(
- MediaBuffer **out, const ReadOptions *options = NULL);
+ MediaBufferBase **out, const ReadOptions *options = NULL);
protected:
virtual ~SineSource();
diff --git a/cmds/stagefright/audioloop.cpp b/cmds/stagefright/audioloop.cpp
index ed44b4d..d4f2e8d 100644
--- a/cmds/stagefright/audioloop.cpp
+++ b/cmds/stagefright/audioloop.cpp
@@ -14,6 +14,10 @@
* limitations under the License.
*/
+#define LOG_NDEBUG 0
+#define LOG_TAG "audioloop"
+#include <utils/Log.h>
+
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
@@ -29,7 +33,6 @@
#include <media/stagefright/AudioSource.h>
#include <media/stagefright/MediaCodecSource.h>
#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
#include <media/stagefright/SimpleDecodingSource.h>
#include "SineSource.h"
@@ -37,11 +40,13 @@
static void usage(const char* name)
{
- fprintf(stderr, "Usage: %s [-d du.ration] [-m] [-w] [<output-file>]\n", name);
+ fprintf(stderr, "Usage: %s [-d du.ration] [-m] [-w] [-N name] [<output-file>]\n", name);
fprintf(stderr, "Encodes either a sine wave or microphone input to AMR format\n");
fprintf(stderr, " -d duration in seconds, default 5 seconds\n");
fprintf(stderr, " -m use microphone for input, default sine source\n");
fprintf(stderr, " -w use AMR wideband (default narrowband)\n");
+ fprintf(stderr, " -N name of the encoder; must be set with -M\n");
+ fprintf(stderr, " -M media type of the encoder; must be set with -N\n");
fprintf(stderr, " <output-file> output file for AMR encoding,"
" if unspecified, decode to speaker.\n");
}
@@ -54,8 +59,10 @@
bool outputWBAMR = false;
bool playToSpeaker = true;
const char* fileOut = NULL;
+ AString name;
+ AString mediaType;
int ch;
- while ((ch = getopt(argc, argv, "d:mw")) != -1) {
+ while ((ch = getopt(argc, argv, "d:mwN:M:")) != -1) {
switch (ch) {
case 'd':
duration = atoi(optarg);
@@ -66,6 +73,12 @@
case 'w':
outputWBAMR = true;
break;
+ case 'N':
+ name.setTo(optarg);
+ break;
+ case 'M':
+ mediaType.setTo(optarg);
+ break;
default:
usage(argv[0]);
return -1;
@@ -76,8 +89,18 @@
if (argc == 1) {
fileOut = argv[0];
}
- const int32_t kSampleRate = outputWBAMR ? 16000 : 8000;
- const int32_t kBitRate = outputWBAMR ? 16000 : 8000;
+ if ((name.empty() && !mediaType.empty()) || (!name.empty() && mediaType.empty())) {
+ fprintf(stderr, "-N and -M must be set together\n");
+ usage(argv[0]);
+ return -1;
+ }
+ if (!name.empty() && fileOut != NULL) {
+ fprintf(stderr, "-N and -M cannot be used with <output file>\n");
+ usage(argv[0]);
+ return -1;
+ }
+ int32_t sampleRate = !name.empty() ? 44100 : outputWBAMR ? 16000 : 8000;
+ int32_t bitRate = sampleRate;
android::ProcessState::self()->startThreadPool();
sp<MediaSource> source;
@@ -87,22 +110,27 @@
source = new AudioSource(
AUDIO_SOURCE_MIC,
String16(),
- kSampleRate,
+ sampleRate,
channels);
} else {
// use a sine source at 500 hz.
- source = new SineSource(kSampleRate, channels);
+ source = new SineSource(sampleRate, channels);
}
sp<AMessage> meta = new AMessage;
- meta->setString(
- "mime",
- outputWBAMR ? MEDIA_MIMETYPE_AUDIO_AMR_WB
- : MEDIA_MIMETYPE_AUDIO_AMR_NB);
+ if (name.empty()) {
+ meta->setString(
+ "mime",
+ outputWBAMR ? MEDIA_MIMETYPE_AUDIO_AMR_WB
+ : MEDIA_MIMETYPE_AUDIO_AMR_NB);
+ } else {
+ meta->setString("mime", mediaType);
+ meta->setString("testing-name", name);
+ }
meta->setInt32("channel-count", channels);
- meta->setInt32("sample-rate", kSampleRate);
- meta->setInt32("bitrate", kBitRate);
+ meta->setInt32("sample-rate", sampleRate);
+ meta->setInt32("bitrate", bitRate);
int32_t maxInputSize;
if (source->getFormat()->findInt32(kKeyMaxInputSize, &maxInputSize)) {
meta->setInt32("max-input-size", maxInputSize);
@@ -112,7 +140,7 @@
looper->setName("audioloop");
looper->start();
- sp<IMediaSource> encoder = MediaCodecSource::Create(looper, meta, source);
+ sp<MediaSource> encoder = MediaCodecSource::Create(looper, meta, source);
if (fileOut != NULL) {
// target file specified, write encoded AMR output
@@ -128,19 +156,20 @@
writer->stop();
} else {
// otherwise decode to speaker
- sp<IMediaSource> decoder = SimpleDecodingSource::Create(encoder);
+ sp<MediaSource> decoder = SimpleDecodingSource::Create(encoder);
if (playToSpeaker) {
- AudioPlayer *player = new AudioPlayer(NULL);
- player->setSource(decoder);
- player->start();
+ AudioPlayer player(NULL);
+ player.setSource(decoder);
+ player.start();
sleep(duration);
+ALOGI("Line: %d", __LINE__);
decoder.clear(); // must clear |decoder| otherwise delete player will hang.
- delete player; // there is no player->stop()...
+ALOGI("Line: %d", __LINE__);
} else {
CHECK_EQ(decoder->start(), (status_t)OK);
- MediaBuffer* buffer;
+ MediaBufferBase* buffer;
while (decoder->read(&buffer) == OK) {
// do something with buffer (save it eventually?)
// need to stop after some count though...
@@ -151,6 +180,7 @@
}
CHECK_EQ(decoder->stop(), (status_t)OK);
}
+ALOGI("Line: %d", __LINE__);
}
return 0;
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
index 3108a67..6a58467 100644
--- a/cmds/stagefright/codec.cpp
+++ b/cmds/stagefright/codec.cpp
@@ -430,10 +430,10 @@
CHECK(control != NULL);
CHECK(control->isValid());
- SurfaceComposerClient::openGlobalTransaction();
- CHECK_EQ(control->setLayer(INT_MAX), (status_t)OK);
- CHECK_EQ(control->show(), (status_t)OK);
- SurfaceComposerClient::closeGlobalTransaction();
+ SurfaceComposerClient::Transaction{}
+ .setLayer(control, INT_MAX)
+ .show(control)
+ .apply();
surface = control->getSurface();
CHECK(surface != NULL);
diff --git a/cmds/stagefright/mediafilter.cpp b/cmds/stagefright/mediafilter.cpp
index f219e69..f24d2dd 100644
--- a/cmds/stagefright/mediafilter.cpp
+++ b/cmds/stagefright/mediafilter.cpp
@@ -764,10 +764,10 @@
CHECK(control != NULL);
CHECK(control->isValid());
- SurfaceComposerClient::openGlobalTransaction();
- CHECK_EQ((status_t)OK, control->setLayer(INT_MAX));
- CHECK_EQ((status_t)OK, control->show());
- SurfaceComposerClient::closeGlobalTransaction();
+ SurfaceComposerClient::Transaction{}
+ .setLayer(control, INT_MAX)
+ .show(control)
+ .apply();
surface = control->getSurface();
CHECK(surface != NULL);
diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp
index 94c2e96..44b0015 100644
--- a/cmds/stagefright/record.cpp
+++ b/cmds/stagefright/record.cpp
@@ -17,6 +17,7 @@
#include "SineSource.h"
#include <binder/ProcessState.h>
+#include <media/MediaExtractor.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -27,7 +28,7 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaCodecSource.h>
#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaExtractorFactory.h>
#include <media/stagefright/MPEG4Writer.h>
#include <media/stagefright/SimpleDecodingSource.h>
#include <media/MediaPlayerInterface.h>
@@ -120,7 +121,7 @@
sp<MediaSource> source;
sp<MediaExtractor> extractor =
- MediaExtractor::Create(new FileSource(filename));
+ MediaExtractorFactory::Create(new FileSource(filename));
if (extractor == NULL) {
return NULL;
}
@@ -320,14 +321,14 @@
looper->setName("record");
looper->start();
- sp<IMediaSource> encoder =
+ sp<MediaSource> encoder =
MediaCodecSource::Create(looper, encMeta, audioSource);
encoder->start();
int32_t n = 0;
status_t err;
- MediaBuffer *buffer;
+ MediaBufferBase *buffer;
while ((err = encoder->read(&buffer)) == OK) {
printf(".");
fflush(stdout);
diff --git a/cmds/stagefright/recordvideo.cpp b/cmds/stagefright/recordvideo.cpp
index 7a3c842..a63b9b9 100644
--- a/cmds/stagefright/recordvideo.cpp
+++ b/cmds/stagefright/recordvideo.cpp
@@ -90,7 +90,7 @@
}
virtual status_t read(
- MediaBuffer **buffer, const MediaSource::ReadOptions *options __unused) {
+ MediaBufferBase **buffer, const MediaSource::ReadOptions *options __unused) {
if (mNumFramesOutput % 10 == 0) {
fprintf(stderr, ".");
@@ -114,8 +114,8 @@
x = x >= 0xa0 ? 0x60 : x + 1;
#endif
(*buffer)->set_range(0, mSize);
- (*buffer)->meta_data()->clear();
- (*buffer)->meta_data()->setInt64(
+ (*buffer)->meta_data().clear();
+ (*buffer)->meta_data().setInt64(
kKeyTime, (mNumFramesOutput * 1000000) / mFrameRate);
++mNumFramesOutput;
@@ -303,7 +303,7 @@
looper->setName("recordvideo");
looper->start();
- sp<IMediaSource> encoder =
+ sp<MediaSource> encoder =
MediaCodecSource::Create(
looper, enc_meta, source, NULL /* consumer */,
preferSoftwareCodec ? MediaCodecSource::FLAG_PREFER_SOFTWARE_CODEC : 0);
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index d70282b..61fc897 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -31,9 +31,11 @@
#include <binder/IServiceManager.h>
#include <binder/ProcessState.h>
+#include <media/DataSource.h>
+#include <media/MediaExtractor.h>
+#include <media/MediaSource.h>
#include <media/ICrypto.h>
#include <media/IMediaHTTPService.h>
-#include <media/IMediaCodecService.h>
#include <media/IMediaPlayerService.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ALooper.h>
@@ -41,14 +43,14 @@
#include <media/stagefright/foundation/AUtils.h>
#include "include/NuCachedSource2.h"
#include <media/stagefright/AudioPlayer.h>
-#include <media/stagefright/DataSource.h>
+#include <media/stagefright/DataSourceFactory.h>
#include <media/stagefright/JPEGSource.h>
+#include <media/stagefright/InterfaceUtils.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaExtractorFactory.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/SimpleDecodingSource.h>
#include <media/stagefright/Utils.h>
@@ -65,7 +67,6 @@
#include <gui/SurfaceComposerClient.h>
#include <android/hardware/media/omx/1.0/IOmx.h>
-#include <media/omx/1.0/WOmx.h>
using namespace android;
@@ -79,6 +80,7 @@
static bool gDisplayHistogram;
static bool showProgress = true;
static String8 gWriteMP4Filename;
+static String8 gComponentNameOverride;
static sp<ANativeWindow> gSurface;
@@ -141,14 +143,14 @@
}
}
-static void dumpSource(const sp<IMediaSource> &source, const String8 &filename) {
+static void dumpSource(const sp<MediaSource> &source, const String8 &filename) {
FILE *out = fopen(filename.string(), "wb");
CHECK_EQ((status_t)OK, source->start());
status_t err;
for (;;) {
- MediaBuffer *mbuf;
+ MediaBufferBase *mbuf;
err = source->read(&mbuf);
if (err == INFO_FORMAT_CHANGED) {
@@ -174,13 +176,13 @@
out = NULL;
}
-static void playSource(sp<IMediaSource> &source) {
+static void playSource(sp<MediaSource> &source) {
sp<MetaData> meta = source->getFormat();
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
- sp<IMediaSource> rawSource;
+ sp<MediaSource> rawSource;
if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, mime)) {
rawSource = source;
} else {
@@ -192,7 +194,10 @@
CHECK(!gPreferSoftwareCodec);
flags |= MediaCodecList::kHardwareCodecsOnly;
}
- rawSource = SimpleDecodingSource::Create(source, flags, gSurface);
+ rawSource = SimpleDecodingSource::Create(
+ source, flags, gSurface,
+ gComponentNameOverride.isEmpty() ? nullptr : gComponentNameOverride.c_str(),
+ !gComponentNameOverride.isEmpty());
if (rawSource == NULL) {
return;
}
@@ -229,7 +234,7 @@
CHECK(meta->findInt64(kKeyDuration, &durationUs));
status_t err;
- MediaBuffer *buffer;
+ MediaBufferBase *buffer;
MediaSource::ReadOptions options;
int64_t seekTimeUs = -1;
for (;;) {
@@ -248,7 +253,7 @@
shouldSeek = true;
} else {
int64_t timestampUs;
- CHECK(buffer->meta_data()->findInt64(kKeyTime, ×tampUs));
+ CHECK(buffer->meta_data().findInt64(kKeyTime, ×tampUs));
bool failed = false;
@@ -316,7 +321,7 @@
while (numIterationsLeft-- > 0) {
long numFrames = 0;
- MediaBuffer *buffer;
+ MediaBufferBase *buffer;
for (;;) {
int64_t startDecodeUs = getNowUs();
@@ -404,14 +409,14 @@
////////////////////////////////////////////////////////////////////////////////
struct DetectSyncSource : public MediaSource {
- explicit DetectSyncSource(const sp<IMediaSource> &source);
+ explicit DetectSyncSource(const sp<MediaSource> &source);
virtual status_t start(MetaData *params = NULL);
virtual status_t stop();
virtual sp<MetaData> getFormat();
virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options);
+ MediaBufferBase **buffer, const ReadOptions *options);
private:
enum StreamType {
@@ -421,14 +426,14 @@
OTHER,
};
- sp<IMediaSource> mSource;
+ sp<MediaSource> mSource;
StreamType mStreamType;
bool mSawFirstIDRFrame;
DISALLOW_EVIL_CONSTRUCTORS(DetectSyncSource);
};
-DetectSyncSource::DetectSyncSource(const sp<IMediaSource> &source)
+DetectSyncSource::DetectSyncSource(const sp<MediaSource> &source)
: mSource(source),
mStreamType(OTHER),
mSawFirstIDRFrame(false) {
@@ -460,7 +465,7 @@
return mSource->getFormat();
}
-static bool isIDRFrame(MediaBuffer *buffer) {
+static bool isIDRFrame(MediaBufferBase *buffer) {
const uint8_t *data =
(const uint8_t *)buffer->data() + buffer->range_offset();
size_t size = buffer->range_length();
@@ -477,7 +482,7 @@
}
status_t DetectSyncSource::read(
- MediaBuffer **buffer, const ReadOptions *options) {
+ MediaBufferBase **buffer, const ReadOptions *options) {
for (;;) {
status_t err = mSource->read(buffer, options);
@@ -487,12 +492,12 @@
if (mStreamType == AVC) {
bool isIDR = isIDRFrame(*buffer);
- (*buffer)->meta_data()->setInt32(kKeyIsSyncFrame, isIDR);
+ (*buffer)->meta_data().setInt32(kKeyIsSyncFrame, isIDR);
if (isIDR) {
mSawFirstIDRFrame = true;
}
} else {
- (*buffer)->meta_data()->setInt32(kKeyIsSyncFrame, true);
+ (*buffer)->meta_data().setInt32(kKeyIsSyncFrame, true);
}
if (mStreamType != AVC || mSawFirstIDRFrame) {
@@ -510,7 +515,7 @@
////////////////////////////////////////////////////////////////////////////////
static void writeSourcesToMP4(
- Vector<sp<IMediaSource> > &sources, bool syncInfoPresent) {
+ Vector<sp<MediaSource> > &sources, bool syncInfoPresent) {
#if 0
sp<MPEG4Writer> writer =
new MPEG4Writer(gWriteMP4Filename.string());
@@ -528,7 +533,7 @@
writer->setMaxFileDuration(60000000ll);
for (size_t i = 0; i < sources.size(); ++i) {
- sp<IMediaSource> source = sources.editItemAt(i);
+ sp<MediaSource> source = sources.editItemAt(i);
CHECK_EQ(writer->addSource(
syncInfoPresent ? source : new DetectSyncSource(source)),
@@ -545,7 +550,7 @@
writer->stop();
}
-static void performSeekTest(const sp<IMediaSource> &source) {
+static void performSeekTest(const sp<MediaSource> &source) {
CHECK_EQ((status_t)OK, source->start());
int64_t durationUs;
@@ -557,7 +562,7 @@
options.setSeekTo(
seekTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
- MediaBuffer *buffer;
+ MediaBufferBase *buffer;
status_t err;
for (;;) {
err = source->read(&buffer, &options);
@@ -586,7 +591,7 @@
if (err == OK) {
int64_t timeUs;
- CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
+ CHECK(buffer->meta_data().findInt64(kKeyTime, &timeUs));
printf("%" PRId64 "\t%" PRId64 "\t%" PRId64 "\n",
seekTimeUs, timeUs, seekTimeUs - timeUs);
@@ -617,6 +622,7 @@
fprintf(stderr, " -o playback audio\n");
fprintf(stderr, " -w(rite) filename (write to .mp4 file)\n");
fprintf(stderr, " -k seek test\n");
+ fprintf(stderr, " -N(ame) of the component\n");
fprintf(stderr, " -x display a histogram of decoding times/fps "
"(video only)\n");
fprintf(stderr, " -q don't show progress indicator\n");
@@ -702,7 +708,7 @@
sp<ALooper> looper;
int res;
- while ((res = getopt(argc, argv, "haqn:lm:b:ptsrow:kxSTd:D:")) >= 0) {
+ while ((res = getopt(argc, argv, "haqn:lm:b:ptsrow:kN:xSTd:D:")) >= 0) {
switch (res) {
case 'a':
{
@@ -731,6 +737,12 @@
break;
}
+ case 'N':
+ {
+ gComponentNameOverride.setTo(optarg);
+ break;
+ }
+
case 'l':
{
listComponents = true;
@@ -881,7 +893,7 @@
VideoFrame *frame = (VideoFrame *)mem->pointer();
CHECK_EQ(writeJpegFile("/sdcard/out.jpg",
- (uint8_t *)frame + sizeof(VideoFrame),
+ frame->getFlattenedData(),
frame->mWidth, frame->mHeight), 0);
}
@@ -909,37 +921,24 @@
}
if (listComponents) {
- sp<IOMX> omx;
- if (property_get_bool("persist.media.treble_omx", true)) {
- using namespace ::android::hardware::media::omx::V1_0;
- sp<IOmx> tOmx = IOmx::getService();
+ using ::android::hardware::hidl_vec;
+ using ::android::hardware::hidl_string;
+ using namespace ::android::hardware::media::omx::V1_0;
+ sp<IOmx> omx = IOmx::getService();
+ CHECK(omx.get() != nullptr);
- CHECK(tOmx.get() != NULL);
-
- omx = new utils::LWOmx(tOmx);
- } else {
- sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> binder = sm->getService(String16("media.codec"));
- sp<IMediaCodecService> service = interface_cast<IMediaCodecService>(binder);
-
- CHECK(service.get() != NULL);
-
- omx = service->getOMX();
- }
- CHECK(omx.get() != NULL);
-
- List<IOMX::ComponentInfo> list;
- omx->listNodes(&list);
-
- for (List<IOMX::ComponentInfo>::iterator it = list.begin();
- it != list.end(); ++it) {
- printf("%s\t Roles: ", (*it).mName.string());
- for (List<String8>::iterator itRoles = (*it).mRoles.begin() ;
- itRoles != (*it).mRoles.end() ; ++itRoles) {
- printf("%s\t", (*itRoles).string());
- }
- printf("\n");
- }
+ hidl_vec<IOmx::ComponentInfo> nodeList;
+ auto transStatus = omx->listNodes([](
+ const auto& status, const auto& nodeList) {
+ CHECK(status == Status::OK);
+ for (const auto& info : nodeList) {
+ printf("%s\t Roles: ", info.mName.c_str());
+ for (const auto& role : info.mRoles) {
+ printf("%s\t", role.c_str());
+ }
+ }
+ });
+ CHECK(transStatus.isOk());
}
sp<SurfaceComposerClient> composerClient;
@@ -960,10 +959,10 @@
CHECK(control != NULL);
CHECK(control->isValid());
- SurfaceComposerClient::openGlobalTransaction();
- CHECK_EQ(control->setLayer(INT_MAX), (status_t)OK);
- CHECK_EQ(control->show(), (status_t)OK);
- SurfaceComposerClient::closeGlobalTransaction();
+ SurfaceComposerClient::Transaction{}
+ .setLayer(control, INT_MAX)
+ .show(control)
+ .apply();
gSurface = control->getSurface();
CHECK(gSurface != NULL);
@@ -988,7 +987,7 @@
const char *filename = argv[k];
sp<DataSource> dataSource =
- DataSource::CreateFromURI(NULL /* httpService */, filename);
+ DataSourceFactory::CreateFromURI(NULL /* httpService */, filename);
if (strncasecmp(filename, "sine:", 5) && dataSource == NULL) {
fprintf(stderr, "Unable to create data source.\n");
@@ -1002,8 +1001,8 @@
isJPEG = true;
}
- Vector<sp<IMediaSource> > mediaSources;
- sp<IMediaSource> mediaSource;
+ Vector<sp<MediaSource> > mediaSources;
+ sp<MediaSource> mediaSource;
if (isJPEG) {
mediaSource = new JPEGSource(dataSource);
@@ -1022,7 +1021,7 @@
mediaSources.push(mediaSource);
}
} else {
- sp<IMediaExtractor> extractor = MediaExtractor::Create(dataSource);
+ sp<IMediaExtractor> extractor = MediaExtractorFactory::Create(dataSource);
if (extractor == NULL) {
fprintf(stderr, "could not create extractor.\n");
@@ -1049,7 +1048,8 @@
bool haveAudio = false;
bool haveVideo = false;
for (size_t i = 0; i < numTracks; ++i) {
- sp<IMediaSource> source = extractor->getTrack(i);
+ sp<MediaSource> source = CreateMediaSourceFromIMediaSource(
+ extractor->getTrack(i));
if (source == nullptr) {
fprintf(stderr, "skip NULL track %zu, track count %zu.\n", i, numTracks);
continue;
@@ -1084,7 +1084,7 @@
i, MediaExtractor::kIncludeExtensiveMetaData);
if (meta == NULL) {
- break;
+ continue;
}
const char *mime;
meta->findCString(kKeyMIMEType, &mime);
@@ -1115,7 +1115,7 @@
thumbTimeUs, thumbTimeUs / 1E6);
}
- mediaSource = extractor->getTrack(i);
+ mediaSource = CreateMediaSourceFromIMediaSource(extractor->getTrack(i));
if (mediaSource == nullptr) {
fprintf(stderr, "skip NULL track %zu, total tracks %zu.\n", i, numTracks);
return -1;
@@ -1128,7 +1128,7 @@
} else if (dumpStream) {
dumpSource(mediaSource, dumpStreamFilename);
} else if (dumpPCMStream) {
- sp<IMediaSource> decSource = SimpleDecodingSource::Create(mediaSource);
+ sp<MediaSource> decSource = SimpleDecodingSource::Create(mediaSource);
dumpSource(decSource, dumpStreamFilename);
} else if (seekTest) {
performSeekTest(mediaSource);
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index 2e1d240..b0199d8 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -21,15 +21,18 @@
#include <binder/ProcessState.h>
#include <cutils/properties.h> // for property_get
+#include <media/DataSource.h>
#include <media/IMediaHTTPService.h>
#include <media/IStreamSource.h>
+#include <media/MediaExtractor.h>
#include <media/mediaplayer.h>
+#include <media/MediaSource.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSource.h>
+#include <media/stagefright/DataSourceFactory.h>
+#include <media/stagefright/InterfaceUtils.h>
#include <media/stagefright/MPEG2TSWriter.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaExtractorFactory.h>
#include <media/stagefright/MetaData.h>
#include <binder/IServiceManager.h>
@@ -161,11 +164,11 @@
: mCurrentBufferIndex(-1),
mCurrentBufferOffset(0) {
sp<DataSource> dataSource =
- DataSource::CreateFromURI(NULL /* httpService */, filename);
+ DataSourceFactory::CreateFromURI(NULL /* httpService */, filename);
CHECK(dataSource != NULL);
- sp<IMediaExtractor> extractor = MediaExtractor::Create(dataSource);
+ sp<IMediaExtractor> extractor = MediaExtractorFactory::Create(dataSource);
CHECK(extractor != NULL);
mWriter = new MPEG2TSWriter(
@@ -182,7 +185,7 @@
continue;
}
- sp<IMediaSource> track = extractor->getTrack(i);
+ sp<MediaSource> track = CreateMediaSourceFromIMediaSource(extractor->getTrack(i));
if (track == nullptr) {
fprintf(stderr, "skip NULL track %zu, total tracks %zu\n", i, numTracks);
continue;
@@ -335,10 +338,10 @@
CHECK(control != NULL);
CHECK(control->isValid());
- SurfaceComposerClient::openGlobalTransaction();
- CHECK_EQ(control->setLayer(INT_MAX), (status_t)OK);
- CHECK_EQ(control->show(), (status_t)OK);
- SurfaceComposerClient::closeGlobalTransaction();
+ SurfaceComposerClient::Transaction{}
+ .setLayer(control, INT_MAX)
+ .show(control)
+ .apply();
sp<Surface> surface = control->getSurface();
CHECK(surface != NULL);
diff --git a/drm/libmediadrm/Android.bp b/drm/libmediadrm/Android.bp
index 0c14201..4991e50 100644
--- a/drm/libmediadrm/Android.bp
+++ b/drm/libmediadrm/Android.bp
@@ -2,10 +2,11 @@
// libmediadrm
//
-cc_library_shared {
+// TODO: change it back to cc_library_shared when MediaPlayer2 switches to
+// using NdkMediaDrm, instead of MediaDrm.java.
+cc_library {
name: "libmediadrm",
-
srcs: [
"DrmPluginPath.cpp",
"DrmSessionManager.cpp",
@@ -13,27 +14,24 @@
"IDrm.cpp",
"IDrmClient.cpp",
"IMediaDrmService.cpp",
- "PluginMetricsReporting.cpp",
"SharedLibrary.cpp",
"DrmHal.cpp",
"CryptoHal.cpp",
- "protos/plugin_metrics.proto",
],
- proto: {
- type: "lite",
- },
-
shared_libs: [
"libbinder",
"libcutils",
"libdl",
"liblog",
+ "libmediadrmmetrics_lite",
"libmediametrics",
"libmediautils",
+ "libprotobuf-cpp-lite",
"libstagefright_foundation",
"libutils",
"android.hardware.drm@1.0",
+ "android.hardware.drm@1.1",
"libhidlallocatorutils",
"libhidlbase",
"libhidltransport",
@@ -44,3 +42,70 @@
"-Wall",
],
}
+
+// This is the version of the drm metrics configured for protobuf lite.
+cc_library_shared {
+ name: "libmediadrmmetrics_lite",
+ srcs: [
+ "DrmMetrics.cpp",
+ "PluginMetricsReporting.cpp",
+ "protos/metrics.proto",
+ ],
+
+ proto: {
+ export_proto_headers: true,
+ type: "lite",
+ },
+ shared_libs: [
+ "android.hardware.drm@1.0",
+ "android.hardware.drm@1.1",
+ "libbase",
+ "libbinder",
+ "libhidlbase",
+ "liblog",
+ "libmediametrics",
+ "libprotobuf-cpp-lite",
+ "libstagefright_foundation",
+ "libutils",
+ ],
+ cflags: [
+ // Suppress unused parameter and no error options. These cause problems
+ // with the when using the map type in a proto definition.
+ "-Wno-unused-parameter",
+ "-Wno-error",
+ ],
+}
+
+// This is the version of the drm metrics library configured for full protobuf.
+cc_library_shared {
+ name: "libmediadrmmetrics_full",
+ srcs: [
+ "DrmMetrics.cpp",
+ "PluginMetricsReporting.cpp",
+ "protos/metrics.proto",
+ ],
+
+ proto: {
+ export_proto_headers: true,
+ type: "full",
+ },
+ shared_libs: [
+ "android.hardware.drm@1.0",
+ "android.hardware.drm@1.1",
+ "libbase",
+ "libbinder",
+ "libhidlbase",
+ "liblog",
+ "libmediametrics",
+ "libprotobuf-cpp-full",
+ "libstagefright_foundation",
+ "libutils",
+ ],
+ cflags: [
+ // Suppress unused parameter and no error options. These cause problems
+ // when using the map type in a proto definition.
+ "-Wno-unused-parameter",
+ "-Wno-error",
+ ],
+}
+
diff --git a/drm/libmediadrm/CryptoHal.cpp b/drm/libmediadrm/CryptoHal.cpp
index b9b3685..3035c5a 100644
--- a/drm/libmediadrm/CryptoHal.cpp
+++ b/drm/libmediadrm/CryptoHal.cpp
@@ -22,14 +22,14 @@
#include <android/hidl/manager/1.0/IServiceManager.h>
#include <binder/IMemory.h>
-#include <cutils/native_handle.h>
-#include <media/CryptoHal.h>
+#include <hidlmemory/FrameworkUtils.h>
#include <media/hardware/CryptoAPI.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AString.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/MediaErrors.h>
-#include <hidlmemory/FrameworkUtils.h>
+#include <mediadrm/CryptoHal.h>
+
using ::android::hardware::drm::V1_0::BufferType;
using ::android::hardware::drm::V1_0::DestinationBuffer;
@@ -118,15 +118,24 @@
auto manager = ::IServiceManager::getService();
if (manager != NULL) {
- manager->listByInterface(ICryptoFactory::descriptor,
+ manager->listByInterface(drm::V1_0::ICryptoFactory::descriptor,
[&factories](const hidl_vec<hidl_string> ®istered) {
for (const auto &instance : registered) {
- auto factory = ICryptoFactory::getService(instance);
+ auto factory = drm::V1_0::ICryptoFactory::getService(instance);
if (factory != NULL) {
+ ALOGD("found drm@1.0 ICryptoFactory %s", instance.c_str());
factories.push_back(factory);
- ALOGI("makeCryptoFactories: factory instance %s is %s",
- instance.c_str(),
- factory->isRemote() ? "Remote" : "Not Remote");
+ }
+ }
+ }
+ );
+ manager->listByInterface(drm::V1_1::ICryptoFactory::descriptor,
+ [&factories](const hidl_vec<hidl_string> ®istered) {
+ for (const auto &instance : registered) {
+ auto factory = drm::V1_1::ICryptoFactory::getService(instance);
+ if (factory != NULL) {
+ ALOGD("found drm@1.1 ICryptoFactory %s", instance.c_str());
+ factories.push_back(factory);
}
}
}
@@ -137,7 +146,7 @@
// must be in passthrough mode, load the default passthrough service
auto passthrough = ICryptoFactory::getService();
if (passthrough != NULL) {
- ALOGI("makeCryptoFactories: using default crypto instance");
+ ALOGI("makeCryptoFactories: using default passthrough crypto instance");
factories.push_back(passthrough);
} else {
ALOGE("Failed to find any crypto factories");
@@ -214,10 +223,14 @@
Mutex::Autolock autoLock(mLock);
if (mInitCheck != OK) {
- return mInitCheck;
+ return false;
}
- return mPlugin->requiresSecureDecoderComponent(hidl_string(mime));
+ Return<bool> hResult = mPlugin->requiresSecureDecoderComponent(hidl_string(mime));
+ if (!hResult.isOk()) {
+ return false;
+ }
+ return hResult;
}
@@ -235,17 +248,12 @@
ALOGE("setHeapBase(): heap is NULL");
return -1;
}
- native_handle_t* nativeHandle = native_handle_create(1, 0);
- if (!nativeHandle) {
- ALOGE("setHeapBase(), failed to create native handle");
- return -1;
- }
Mutex::Autolock autoLock(mLock);
int32_t seqNum = mHeapSeqNum++;
sp<HidlMemory> hidlMemory = fromHeap(heap);
- mHeapBases.add(seqNum, mNextBufferId);
+ mHeapBases.add(seqNum, HeapBase(mNextBufferId, heap->getSize()));
Return<void> hResult = mPlugin->setSharedBufferBase(*hidlMemory, mNextBufferId++);
ALOGE_IF(!hResult.isOk(), "setSharedBufferBase(): remote call failed");
return seqNum;
@@ -254,7 +262,22 @@
void CryptoHal::clearHeapBase(int32_t seqNum) {
Mutex::Autolock autoLock(mLock);
- mHeapBases.removeItem(seqNum);
+ /*
+ * Clear the remote shared memory mapping by setting the shared
+ * buffer base to a null hidl_memory.
+ *
+ * TODO: Add a releaseSharedBuffer method in a future DRM HAL
+ * API version to make this explicit.
+ */
+ ssize_t index = mHeapBases.indexOfKey(seqNum);
+ if (index >= 0) {
+ if (mPlugin != NULL) {
+ uint32_t bufferId = mHeapBases[index].getBufferId();
+ Return<void> hResult = mPlugin->setSharedBufferBase(hidl_memory(), bufferId);
+ ALOGE_IF(!hResult.isOk(), "setSharedBufferBase(): remote call failed");
+ }
+ mHeapBases.removeItem(seqNum);
+ }
}
status_t CryptoHal::toSharedBuffer(const sp<IMemory>& memory, int32_t seqNum, ::SharedBuffer* buffer) {
@@ -270,10 +293,26 @@
return UNEXPECTED_NULL;
}
- // memory must be in the declared heap
- CHECK(mHeapBases.indexOfKey(seqNum) >= 0);
+ // memory must be in one of the heaps that have been set
+ if (mHeapBases.indexOfKey(seqNum) < 0) {
+ return UNKNOWN_ERROR;
+ }
- buffer->bufferId = mHeapBases.valueFor(seqNum);
+ // heap must be the same size as the one that was set in setHeapBase
+ if (mHeapBases.valueFor(seqNum).getSize() != heap->getSize()) {
+ android_errorWriteLog(0x534e4554, "76221123");
+ return UNKNOWN_ERROR;
+ }
+
+ // memory must be within the address space of the heap
+ if (memory->pointer() != static_cast<uint8_t *>(heap->getBase()) + memory->offset() ||
+ heap->getSize() < memory->offset() + memory->size() ||
+ SIZE_MAX - memory->offset() < memory->size()) {
+ android_errorWriteLog(0x534e4554, "76221123");
+ return UNKNOWN_ERROR;
+ }
+
+ buffer->bufferId = mHeapBases.valueFor(seqNum).getBufferId();
buffer->offset = offset >= 0 ? offset : 0;
buffer->size = size;
return OK;
@@ -332,10 +371,13 @@
return status;
}
secure = false;
- } else {
+ } else if (destination.mType == kDestinationTypeNativeHandle) {
hDestination.type = BufferType::NATIVE_HANDLE;
hDestination.secureMemory = hidl_handle(destination.mHandle);
secure = true;
+ } else {
+ android_errorWriteLog(0x534e4554, "70526702");
+ return UNKNOWN_ERROR;
}
::SharedBuffer hSource;
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index bc37557..cf08610 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -16,48 +16,81 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "DrmHal"
+#include <iomanip>
+
#include <utils/Log.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
-#include <android/hardware/drm/1.0/IDrmFactory.h>
-#include <android/hardware/drm/1.0/IDrmPlugin.h>
#include <android/hardware/drm/1.0/types.h>
#include <android/hidl/manager/1.0/IServiceManager.h>
#include <hidl/ServiceManagement.h>
-#include <media/DrmHal.h>
-#include <media/DrmSessionClientInterface.h>
-#include <media/DrmSessionManager.h>
+#include <media/EventMetric.h>
#include <media/PluginMetricsReporting.h>
#include <media/drm/DrmAPI.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/base64.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/MediaErrors.h>
+#include <mediadrm/DrmHal.h>
+#include <mediadrm/DrmSessionClientInterface.h>
+#include <mediadrm/DrmSessionManager.h>
-using ::android::hardware::drm::V1_0::EventType;
-using ::android::hardware::drm::V1_0::IDrmFactory;
-using ::android::hardware::drm::V1_0::IDrmPlugin;
-using ::android::hardware::drm::V1_0::KeyedVector;
-using ::android::hardware::drm::V1_0::KeyRequestType;
-using ::android::hardware::drm::V1_0::KeyStatus;
-using ::android::hardware::drm::V1_0::KeyStatusType;
-using ::android::hardware::drm::V1_0::KeyType;
-using ::android::hardware::drm::V1_0::KeyValue;
-using ::android::hardware::drm::V1_0::SecureStop;
-using ::android::hardware::drm::V1_0::Status;
+using drm::V1_0::KeyedVector;
+using drm::V1_0::KeyStatusType;
+using drm::V1_0::KeyType;
+using drm::V1_0::KeyValue;
+using drm::V1_1::HdcpLevel;;
+using drm::V1_0::SecureStop;
+using drm::V1_1::SecureStopRelease;
+using drm::V1_0::SecureStopId;
+using drm::V1_1::SecurityLevel;
+using drm::V1_0::Status;
+using ::android::hardware::drm::V1_1::DrmMetricGroup;
using ::android::hardware::hidl_array;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
using ::android::hardware::Return;
using ::android::hardware::Void;
using ::android::hidl::manager::V1_0::IServiceManager;
+using ::android::os::PersistableBundle;
using ::android::sp;
+namespace {
+
+// This constant corresponds to the PROPERTY_DEVICE_UNIQUE_ID constant
+// in the MediaDrm API.
+constexpr char kPropertyDeviceUniqueId[] = "deviceUniqueId";
+constexpr char kEqualsSign[] = "=";
+
+template<typename T>
+std::string toBase64StringNoPad(const T* data, size_t size) {
+ // Note that the base 64 conversion only works with arrays of single-byte
+ // values. If the source is empty or is not an array of single-byte values,
+ // return empty string.
+ if (size == 0 || sizeof(data[0]) != 1) {
+ return "";
+ }
+
+ android::AString outputString;
+ encodeBase64(data, size, &outputString);
+ // Remove trailing equals padding if it exists.
+ while (outputString.size() > 0 && outputString.endsWith(kEqualsSign)) {
+ outputString.erase(outputString.size() - 1, 1);
+ }
+
+ return std::string(outputString.c_str(), outputString.size());
+}
+
+} // anonymous namespace
+
namespace android {
+#define INIT_CHECK() {if (mInitCheck != OK) return mInitCheck;}
+
static inline int getCallingPid() {
return IPCThreadState::self()->getCallingPid();
}
@@ -89,6 +122,42 @@
return hidl_string(string.string());
}
+static DrmPlugin::SecurityLevel toSecurityLevel(SecurityLevel level) {
+ switch(level) {
+ case SecurityLevel::SW_SECURE_CRYPTO:
+ return DrmPlugin::kSecurityLevelSwSecureCrypto;
+ case SecurityLevel::SW_SECURE_DECODE:
+ return DrmPlugin::kSecurityLevelSwSecureDecode;
+ case SecurityLevel::HW_SECURE_CRYPTO:
+ return DrmPlugin::kSecurityLevelHwSecureCrypto;
+ case SecurityLevel::HW_SECURE_DECODE:
+ return DrmPlugin::kSecurityLevelHwSecureDecode;
+ case SecurityLevel::HW_SECURE_ALL:
+ return DrmPlugin::kSecurityLevelHwSecureAll;
+ default:
+ return DrmPlugin::kSecurityLevelUnknown;
+ }
+}
+
+static DrmPlugin::HdcpLevel toHdcpLevel(HdcpLevel level) {
+ switch(level) {
+ case HdcpLevel::HDCP_NONE:
+ return DrmPlugin::kHdcpNone;
+ case HdcpLevel::HDCP_V1:
+ return DrmPlugin::kHdcpV1;
+ case HdcpLevel::HDCP_V2:
+ return DrmPlugin::kHdcpV2;
+ case HdcpLevel::HDCP_V2_1:
+ return DrmPlugin::kHdcpV2_1;
+ case HdcpLevel::HDCP_V2_2:
+ return DrmPlugin::kHdcpV2_2;
+ case HdcpLevel::HDCP_NO_OUTPUT:
+ return DrmPlugin::kHdcpNoOutput;
+ default:
+ return DrmPlugin::kHdcpLevelUnknown;
+ }
+}
+
static ::KeyedVector toHidlKeyedVector(const KeyedVector<String8, String8>&
keyedVector) {
@@ -121,6 +190,15 @@
return secureStops;
}
+static List<Vector<uint8_t>> toSecureStopIds(const hidl_vec<SecureStopId>&
+ hSecureStopIds) {
+ List<Vector<uint8_t>> secureStopIds;
+ for (size_t i = 0; i < hSecureStopIds.size(); i++) {
+ secureStopIds.push_back(toVector(hSecureStopIds[i]));
+ }
+ return secureStopIds;
+}
+
static status_t toStatusT(Status status) {
switch (status) {
case Status::OK:
@@ -196,35 +274,63 @@
}
void DrmHal::closeOpenSessions() {
- if (mPlugin != NULL) {
- for (size_t i = 0; i < mOpenSessions.size(); i++) {
- mPlugin->closeSession(toHidlVec(mOpenSessions[i]));
- DrmSessionManager::Instance()->removeSession(mOpenSessions[i]);
- }
+ Mutex::Autolock autoLock(mLock);
+ auto openSessions = mOpenSessions;
+ for (size_t i = 0; i < openSessions.size(); i++) {
+ mLock.unlock();
+ closeSession(openSessions[i]);
+ mLock.lock();
}
mOpenSessions.clear();
}
DrmHal::~DrmHal() {
- closeOpenSessions();
DrmSessionManager::Instance()->removeDrm(mDrmSessionClient);
}
+void DrmHal::cleanup() {
+ closeOpenSessions();
+
+ Mutex::Autolock autoLock(mLock);
+ reportPluginMetrics();
+ reportFrameworkMetrics();
+
+ setListener(NULL);
+ mInitCheck = NO_INIT;
+
+ if (mPlugin != NULL) {
+ if (!mPlugin->setListener(NULL).isOk()) {
+ mInitCheck = DEAD_OBJECT;
+ }
+ }
+ mPlugin.clear();
+ mPluginV1_1.clear();
+}
+
Vector<sp<IDrmFactory>> DrmHal::makeDrmFactories() {
Vector<sp<IDrmFactory>> factories;
auto manager = hardware::defaultServiceManager();
if (manager != NULL) {
- manager->listByInterface(IDrmFactory::descriptor,
+ manager->listByInterface(drm::V1_0::IDrmFactory::descriptor,
[&factories](const hidl_vec<hidl_string> ®istered) {
for (const auto &instance : registered) {
- auto factory = IDrmFactory::getService(instance);
+ auto factory = drm::V1_0::IDrmFactory::getService(instance);
if (factory != NULL) {
+ ALOGD("found drm@1.0 IDrmFactory %s", instance.c_str());
factories.push_back(factory);
- ALOGI("makeDrmFactories: factory instance %s is %s",
- instance.c_str(),
- factory->isRemote() ? "Remote" : "Not Remote");
+ }
+ }
+ }
+ );
+ manager->listByInterface(drm::V1_1::IDrmFactory::descriptor,
+ [&factories](const hidl_vec<hidl_string> ®istered) {
+ for (const auto &instance : registered) {
+ auto factory = drm::V1_1::IDrmFactory::getService(instance);
+ if (factory != NULL) {
+ ALOGD("found drm@1.1 IDrmFactory %s", instance.c_str());
+ factories.push_back(factory);
}
}
}
@@ -235,7 +341,7 @@
// must be in passthrough mode, load the default passthrough service
auto passthrough = IDrmFactory::getService();
if (passthrough != NULL) {
- ALOGI("makeDrmFactories: using default drm instance");
+ ALOGI("makeDrmFactories: using default passthrough drm instance");
factories.push_back(passthrough);
} else {
ALOGE("Failed to find any drm factories");
@@ -246,6 +352,8 @@
sp<IDrmPlugin> DrmHal::makeDrmPlugin(const sp<IDrmFactory>& factory,
const uint8_t uuid[16], const String8& appPackageName) {
+ mAppPackageName = appPackageName;
+ mMetrics.SetAppPackageName(appPackageName);
sp<IDrmPlugin> plugin;
Return<void> hResult = factory->createPlugin(uuid, appPackageName.string(),
@@ -284,6 +392,7 @@
Return<void> DrmHal::sendEvent(EventType hEventType,
const hidl_vec<uint8_t>& sessionId, const hidl_vec<uint8_t>& data) {
+ mMetrics.mEventCounter.Increment(hEventType);
mEventLock.lock();
sp<IDrmClient> listener = mListener;
@@ -374,12 +483,21 @@
break;
}
obj.writeInt32(type);
+ mMetrics.mKeyStatusChangeCounter.Increment(keyStatus.type);
}
obj.writeInt32(hasNewUsableKey);
Mutex::Autolock lock(mNotifyLock);
listener->notify(DrmPlugin::kDrmPluginEventKeysChange, 0, &obj);
+ } else {
+ // There's no listener. But we still want to count the key change
+ // events.
+ size_t nKeys = keyStatusList.size();
+ for (size_t i = 0; i < nKeys; i++) {
+ mMetrics.mKeyStatusChangeCounter.Increment(keyStatusList[i].type);
+ }
}
+
return Void();
}
@@ -407,6 +525,9 @@
for (size_t i = 0; i < mFactories.size(); i++) {
if (mFactories[i]->isCryptoSchemeSupported(uuid)) {
mPlugin = makeDrmPlugin(mFactories[i], uuid, appPackageName);
+ if (mPlugin != NULL) {
+ mPluginV1_1 = drm::V1_1::IDrmPlugin::castFrom(mPlugin);
+ }
}
}
@@ -424,46 +545,66 @@
}
status_t DrmHal::destroyPlugin() {
- Mutex::Autolock autoLock(mLock);
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- closeOpenSessions();
- reportMetrics();
- setListener(NULL);
- mInitCheck = NO_INIT;
-
- if (mPlugin != NULL) {
- if (!mPlugin->setListener(NULL).isOk()) {
- mInitCheck = DEAD_OBJECT;
- }
- }
- mPlugin.clear();
+ cleanup();
return OK;
}
-status_t DrmHal::openSession(Vector<uint8_t> &sessionId) {
+status_t DrmHal::openSession(DrmPlugin::SecurityLevel level,
+ Vector<uint8_t> &sessionId) {
Mutex::Autolock autoLock(mLock);
+ INIT_CHECK();
- if (mInitCheck != OK) {
- return mInitCheck;
+ SecurityLevel hSecurityLevel;
+ bool setSecurityLevel = true;
+
+ switch(level) {
+ case DrmPlugin::kSecurityLevelSwSecureCrypto:
+ hSecurityLevel = SecurityLevel::SW_SECURE_CRYPTO;
+ break;
+ case DrmPlugin::kSecurityLevelSwSecureDecode:
+ hSecurityLevel = SecurityLevel::SW_SECURE_DECODE;
+ break;
+ case DrmPlugin::kSecurityLevelHwSecureCrypto:
+ hSecurityLevel = SecurityLevel::HW_SECURE_CRYPTO;
+ break;
+ case DrmPlugin::kSecurityLevelHwSecureDecode:
+ hSecurityLevel = SecurityLevel::HW_SECURE_DECODE;
+ break;
+ case DrmPlugin::kSecurityLevelHwSecureAll:
+ hSecurityLevel = SecurityLevel::HW_SECURE_ALL;
+ break;
+ case DrmPlugin::kSecurityLevelMax:
+ setSecurityLevel = false;
+ break;
+ default:
+ return ERROR_DRM_CANNOT_HANDLE;
}
status_t err = UNKNOWN_ERROR;
-
bool retry = true;
do {
hidl_vec<uint8_t> hSessionId;
- Return<void> hResult = mPlugin->openSession(
- [&](Status status, const hidl_vec<uint8_t>& id) {
- if (status == Status::OK) {
- sessionId = toVector(id);
+ Return<void> hResult;
+ if (mPluginV1_1 == NULL || !setSecurityLevel) {
+ hResult = mPlugin->openSession(
+ [&](Status status,const hidl_vec<uint8_t>& id) {
+ if (status == Status::OK) {
+ sessionId = toVector(id);
+ }
+ err = toStatusT(status);
}
- err = toStatusT(status);
- }
- );
+ );
+ } else {
+ hResult = mPluginV1_1->openSession_1_1(hSecurityLevel,
+ [&](Status status, const hidl_vec<uint8_t>& id) {
+ if (status == Status::OK) {
+ sessionId = toVector(id);
+ }
+ err = toStatusT(status);
+ }
+ );
+ }
if (!hResult.isOk()) {
err = DEAD_OBJECT;
@@ -485,16 +626,16 @@
DrmSessionManager::Instance()->addSession(getCallingPid(),
mDrmSessionClient, sessionId);
mOpenSessions.push(sessionId);
+ mMetrics.SetSessionStart(sessionId);
}
+
+ mMetrics.mOpenSessionCounter.Increment(err);
return err;
}
status_t DrmHal::closeSession(Vector<uint8_t> const &sessionId) {
Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
Return<Status> status = mPlugin->closeSession(toHidlVec(sessionId));
if (status.isOk()) {
@@ -507,9 +648,12 @@
}
}
}
- reportMetrics();
- return toStatusT(status);
+ status_t response = toStatusT(status);
+ mMetrics.SetSessionEnd(sessionId);
+ mMetrics.mCloseSessionCounter.Increment(response);
+ return response;
}
+ mMetrics.mCloseSessionCounter.Increment(DEAD_OBJECT);
return DEAD_OBJECT;
}
@@ -519,10 +663,8 @@
String8> const &optionalParameters, Vector<uint8_t> &request,
String8 &defaultUrl, DrmPlugin::KeyRequestType *keyRequestType) {
Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
+ EventTimer<status_t> keyRequestTimer(&mMetrics.mGetKeyRequestTimeUs);
DrmSessionManager::Instance()->useSession(sessionId);
@@ -534,6 +676,7 @@
} else if (keyType == DrmPlugin::kKeyType_Release) {
hKeyType = KeyType::RELEASE;
} else {
+ keyRequestTimer.SetAttribute(BAD_VALUE);
return BAD_VALUE;
}
@@ -541,23 +684,63 @@
status_t err = UNKNOWN_ERROR;
+ if (mPluginV1_1 != NULL) {
+ Return<void> hResult =
+ mPluginV1_1->getKeyRequest_1_1(
+ toHidlVec(sessionId), toHidlVec(initData),
+ toHidlString(mimeType), hKeyType, hOptionalParameters,
+ [&](Status status, const hidl_vec<uint8_t>& hRequest,
+ drm::V1_1::KeyRequestType hKeyRequestType,
+ const hidl_string& hDefaultUrl) {
+
+ if (status == Status::OK) {
+ request = toVector(hRequest);
+ defaultUrl = toString8(hDefaultUrl);
+
+ switch (hKeyRequestType) {
+ case drm::V1_1::KeyRequestType::INITIAL:
+ *keyRequestType = DrmPlugin::kKeyRequestType_Initial;
+ break;
+ case drm::V1_1::KeyRequestType::RENEWAL:
+ *keyRequestType = DrmPlugin::kKeyRequestType_Renewal;
+ break;
+ case drm::V1_1::KeyRequestType::RELEASE:
+ *keyRequestType = DrmPlugin::kKeyRequestType_Release;
+ break;
+ case drm::V1_1::KeyRequestType::NONE:
+ *keyRequestType = DrmPlugin::kKeyRequestType_None;
+ break;
+ case drm::V1_1::KeyRequestType::UPDATE:
+ *keyRequestType = DrmPlugin::kKeyRequestType_Update;
+ break;
+ default:
+ *keyRequestType = DrmPlugin::kKeyRequestType_Unknown;
+ break;
+ }
+ err = toStatusT(status);
+ }
+ });
+ return hResult.isOk() ? err : DEAD_OBJECT;
+ }
+
Return<void> hResult = mPlugin->getKeyRequest(toHidlVec(sessionId),
toHidlVec(initData), toHidlString(mimeType), hKeyType, hOptionalParameters,
[&](Status status, const hidl_vec<uint8_t>& hRequest,
- KeyRequestType hKeyRequestType, const hidl_string& hDefaultUrl) {
+ drm::V1_0::KeyRequestType hKeyRequestType,
+ const hidl_string& hDefaultUrl) {
if (status == Status::OK) {
request = toVector(hRequest);
defaultUrl = toString8(hDefaultUrl);
switch (hKeyRequestType) {
- case KeyRequestType::INITIAL:
+ case drm::V1_0::KeyRequestType::INITIAL:
*keyRequestType = DrmPlugin::kKeyRequestType_Initial;
break;
- case KeyRequestType::RENEWAL:
+ case drm::V1_0::KeyRequestType::RENEWAL:
*keyRequestType = DrmPlugin::kKeyRequestType_Renewal;
break;
- case KeyRequestType::RELEASE:
+ case drm::V1_0::KeyRequestType::RELEASE:
*keyRequestType = DrmPlugin::kKeyRequestType_Release;
break;
default:
@@ -568,16 +751,17 @@
}
});
- return hResult.isOk() ? err : DEAD_OBJECT;
+ err = hResult.isOk() ? err : DEAD_OBJECT;
+ keyRequestTimer.SetAttribute(err);
+ return err;
}
status_t DrmHal::provideKeyResponse(Vector<uint8_t> const &sessionId,
Vector<uint8_t> const &response, Vector<uint8_t> &keySetId) {
Mutex::Autolock autoLock(mLock);
+ EventTimer<status_t> keyResponseTimer(&mMetrics.mProvideKeyResponseTimeUs);
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
DrmSessionManager::Instance()->useSession(sessionId);
@@ -592,41 +776,35 @@
err = toStatusT(status);
}
);
-
- return hResult.isOk() ? err : DEAD_OBJECT;
+ err = hResult.isOk() ? err : DEAD_OBJECT;
+ keyResponseTimer.SetAttribute(err);
+ return err;
}
status_t DrmHal::removeKeys(Vector<uint8_t> const &keySetId) {
Mutex::Autolock autoLock(mLock);
+ INIT_CHECK();
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- return toStatusT(mPlugin->removeKeys(toHidlVec(keySetId)));
+ Return<Status> status = mPlugin->removeKeys(toHidlVec(keySetId));
+ return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
}
status_t DrmHal::restoreKeys(Vector<uint8_t> const &sessionId,
Vector<uint8_t> const &keySetId) {
Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
DrmSessionManager::Instance()->useSession(sessionId);
- return toStatusT(mPlugin->restoreKeys(toHidlVec(sessionId),
- toHidlVec(keySetId)));
+ Return<Status> status = mPlugin->restoreKeys(toHidlVec(sessionId),
+ toHidlVec(keySetId));
+ return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
}
status_t DrmHal::queryKeyStatus(Vector<uint8_t> const &sessionId,
KeyedVector<String8, String8> &infoMap) const {
Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
DrmSessionManager::Instance()->useSession(sessionId);
@@ -650,10 +828,7 @@
String8 const &certAuthority, Vector<uint8_t> &request,
String8 &defaultUrl) {
Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
status_t err = UNKNOWN_ERROR;
@@ -669,16 +844,15 @@
}
);
- return hResult.isOk() ? err : DEAD_OBJECT;
+ err = hResult.isOk() ? err : DEAD_OBJECT;
+ mMetrics.mGetProvisionRequestCounter.Increment(err);
+ return err;
}
status_t DrmHal::provideProvisionResponse(Vector<uint8_t> const &response,
Vector<uint8_t> &certificate, Vector<uint8_t> &wrappedKey) {
Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
status_t err = UNKNOWN_ERROR;
@@ -693,15 +867,14 @@
}
);
- return hResult.isOk() ? err : DEAD_OBJECT;
+ err = hResult.isOk() ? err : DEAD_OBJECT;
+ mMetrics.mProvideProvisionResponseCounter.Increment(err);
+ return err;
}
status_t DrmHal::getSecureStops(List<Vector<uint8_t>> &secureStops) {
Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
status_t err = UNKNOWN_ERROR;
@@ -718,13 +891,36 @@
}
-status_t DrmHal::getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) {
+status_t DrmHal::getSecureStopIds(List<Vector<uint8_t>> &secureStopIds) {
Mutex::Autolock autoLock(mLock);
if (mInitCheck != OK) {
return mInitCheck;
}
+ if (mPluginV1_1 == NULL) {
+ return ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ status_t err = UNKNOWN_ERROR;
+
+ Return<void> hResult = mPluginV1_1->getSecureStopIds(
+ [&](Status status, const hidl_vec<SecureStopId>& hSecureStopIds) {
+ if (status == Status::OK) {
+ secureStopIds = toSecureStopIds(hSecureStopIds);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+
+status_t DrmHal::getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) {
+ Mutex::Autolock autoLock(mLock);
+ INIT_CHECK();
+
status_t err = UNKNOWN_ERROR;
Return<void> hResult = mPlugin->getSecureStop(toHidlVec(ssid),
@@ -741,22 +937,132 @@
status_t DrmHal::releaseSecureStops(Vector<uint8_t> const &ssRelease) {
Mutex::Autolock autoLock(mLock);
+ INIT_CHECK();
- if (mInitCheck != OK) {
- return mInitCheck;
+ Return<Status> status(Status::ERROR_DRM_UNKNOWN);
+ if (mPluginV1_1 != NULL) {
+ SecureStopRelease secureStopRelease;
+ secureStopRelease.opaqueData = toHidlVec(ssRelease);
+ status = mPluginV1_1->releaseSecureStops(secureStopRelease);
+ } else {
+ status = mPlugin->releaseSecureStop(toHidlVec(ssRelease));
}
-
- return toStatusT(mPlugin->releaseSecureStop(toHidlVec(ssRelease)));
+ return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
}
-status_t DrmHal::releaseAllSecureStops() {
+status_t DrmHal::removeSecureStop(Vector<uint8_t> const &ssid) {
Mutex::Autolock autoLock(mLock);
if (mInitCheck != OK) {
return mInitCheck;
}
- return toStatusT(mPlugin->releaseAllSecureStops());
+ if (mPluginV1_1 == NULL) {
+ return ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ Return<Status> status = mPluginV1_1->removeSecureStop(toHidlVec(ssid));
+ return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+}
+
+status_t DrmHal::removeAllSecureStops() {
+ Mutex::Autolock autoLock(mLock);
+ INIT_CHECK();
+
+ Return<Status> status(Status::ERROR_DRM_UNKNOWN);
+ if (mPluginV1_1 != NULL) {
+ status = mPluginV1_1->removeAllSecureStops();
+ } else {
+ status = mPlugin->releaseAllSecureStops();
+ }
+ return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+}
+
+status_t DrmHal::getHdcpLevels(DrmPlugin::HdcpLevel *connected,
+ DrmPlugin::HdcpLevel *max) const {
+ Mutex::Autolock autoLock(mLock);
+ INIT_CHECK();
+
+ if (connected == NULL || max == NULL) {
+ return BAD_VALUE;
+ }
+ status_t err = UNKNOWN_ERROR;
+
+ if (mPluginV1_1 == NULL) {
+ return ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ *connected = DrmPlugin::kHdcpLevelUnknown;
+ *max = DrmPlugin::kHdcpLevelUnknown;
+
+ Return<void> hResult = mPluginV1_1->getHdcpLevels(
+ [&](Status status, const HdcpLevel& hConnected, const HdcpLevel& hMax) {
+ if (status == Status::OK) {
+ *connected = toHdcpLevel(hConnected);
+ *max = toHdcpLevel(hMax);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::getNumberOfSessions(uint32_t *open, uint32_t *max) const {
+ Mutex::Autolock autoLock(mLock);
+ INIT_CHECK();
+
+ if (open == NULL || max == NULL) {
+ return BAD_VALUE;
+ }
+ status_t err = UNKNOWN_ERROR;
+
+ *open = 0;
+ *max = 0;
+
+ if (mPluginV1_1 == NULL) {
+ return ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ Return<void> hResult = mPluginV1_1->getNumberOfSessions(
+ [&](Status status, uint32_t hOpen, uint32_t hMax) {
+ if (status == Status::OK) {
+ *open = hOpen;
+ *max = hMax;
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::getSecurityLevel(Vector<uint8_t> const &sessionId,
+ DrmPlugin::SecurityLevel *level) const {
+ Mutex::Autolock autoLock(mLock);
+ INIT_CHECK();
+
+ if (level == NULL) {
+ return BAD_VALUE;
+ }
+ status_t err = UNKNOWN_ERROR;
+
+ if (mPluginV1_1 == NULL) {
+ return ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ *level = DrmPlugin::kSecurityLevelUnknown;
+
+ Return<void> hResult = mPluginV1_1->getSecurityLevel(toHidlVec(sessionId),
+ [&](Status status, SecurityLevel hLevel) {
+ if (status == Status::OK) {
+ *level = toSecurityLevel(hLevel);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
}
status_t DrmHal::getPropertyString(String8 const &name, String8 &value ) const {
@@ -767,10 +1073,7 @@
status_t DrmHal::getPropertyStringInternal(String8 const &name, String8 &value) const {
// This function is internal to the class and should only be called while
// mLock is already held.
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
status_t err = UNKNOWN_ERROR;
@@ -794,10 +1097,7 @@
status_t DrmHal::getPropertyByteArrayInternal(String8 const &name, Vector<uint8_t> &value ) const {
// This function is internal to the class and should only be called while
// mLock is already held.
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
status_t err = UNKNOWN_ERROR;
@@ -810,73 +1110,106 @@
}
);
- return hResult.isOk() ? err : DEAD_OBJECT;
+ err = hResult.isOk() ? err : DEAD_OBJECT;
+ if (name == kPropertyDeviceUniqueId) {
+ mMetrics.mGetDeviceUniqueIdCounter.Increment(err);
+ }
+ return err;
}
status_t DrmHal::setPropertyString(String8 const &name, String8 const &value ) const {
Mutex::Autolock autoLock(mLock);
+ INIT_CHECK();
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- Status status = mPlugin->setPropertyString(toHidlString(name),
+ Return<Status> status = mPlugin->setPropertyString(toHidlString(name),
toHidlString(value));
- return toStatusT(status);
+ return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
}
status_t DrmHal::setPropertyByteArray(String8 const &name,
Vector<uint8_t> const &value ) const {
Mutex::Autolock autoLock(mLock);
+ INIT_CHECK();
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- Status status = mPlugin->setPropertyByteArray(toHidlString(name),
+ Return<Status> status = mPlugin->setPropertyByteArray(toHidlString(name),
toHidlVec(value));
- return toStatusT(status);
+ return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
}
+status_t DrmHal::getMetrics(PersistableBundle* metrics) {
+ if (metrics == nullptr) {
+ return UNEXPECTED_NULL;
+ }
+ mMetrics.Export(metrics);
+
+ // Append vendor metrics if they are supported.
+ if (mPluginV1_1 != NULL) {
+ String8 vendor;
+ String8 description;
+ if (getPropertyStringInternal(String8("vendor"), vendor) != OK
+ || vendor.isEmpty()) {
+ ALOGE("Get vendor failed or is empty");
+ vendor = "NONE";
+ }
+ if (getPropertyStringInternal(String8("description"), description) != OK
+ || description.isEmpty()) {
+ ALOGE("Get description failed or is empty.");
+ description = "NONE";
+ }
+ vendor += ".";
+ vendor += description;
+
+ hidl_vec<DrmMetricGroup> pluginMetrics;
+ status_t err = UNKNOWN_ERROR;
+
+ Return<void> status = mPluginV1_1->getMetrics(
+ [&](Status status, hidl_vec<DrmMetricGroup> pluginMetrics) {
+ if (status != Status::OK) {
+ ALOGV("Error getting plugin metrics: %d", status);
+ } else {
+ PersistableBundle pluginBundle;
+ if (MediaDrmMetrics::HidlMetricsToBundle(
+ pluginMetrics, &pluginBundle) == OK) {
+ metrics->putPersistableBundle(String16(vendor), pluginBundle);
+ }
+ }
+ err = toStatusT(status);
+ });
+ return status.isOk() ? err : DEAD_OBJECT;
+ }
+
+ return OK;
+}
status_t DrmHal::setCipherAlgorithm(Vector<uint8_t> const &sessionId,
String8 const &algorithm) {
Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
DrmSessionManager::Instance()->useSession(sessionId);
- Status status = mPlugin->setCipherAlgorithm(toHidlVec(sessionId),
+ Return<Status> status = mPlugin->setCipherAlgorithm(toHidlVec(sessionId),
toHidlString(algorithm));
- return toStatusT(status);
+ return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
}
status_t DrmHal::setMacAlgorithm(Vector<uint8_t> const &sessionId,
String8 const &algorithm) {
Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
DrmSessionManager::Instance()->useSession(sessionId);
- Status status = mPlugin->setMacAlgorithm(toHidlVec(sessionId),
+ Return<Status> status = mPlugin->setMacAlgorithm(toHidlVec(sessionId),
toHidlString(algorithm));
- return toStatusT(status);
+ return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
}
status_t DrmHal::encrypt(Vector<uint8_t> const &sessionId,
Vector<uint8_t> const &keyId, Vector<uint8_t> const &input,
Vector<uint8_t> const &iv, Vector<uint8_t> &output) {
Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
DrmSessionManager::Instance()->useSession(sessionId);
@@ -899,10 +1232,7 @@
Vector<uint8_t> const &keyId, Vector<uint8_t> const &input,
Vector<uint8_t> const &iv, Vector<uint8_t> &output) {
Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
DrmSessionManager::Instance()->useSession(sessionId);
@@ -925,10 +1255,7 @@
Vector<uint8_t> const &keyId, Vector<uint8_t> const &message,
Vector<uint8_t> &signature) {
Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
DrmSessionManager::Instance()->useSession(sessionId);
@@ -951,10 +1278,7 @@
Vector<uint8_t> const &keyId, Vector<uint8_t> const &message,
Vector<uint8_t> const &signature, bool &match) {
Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
DrmSessionManager::Instance()->useSession(sessionId);
@@ -979,10 +1303,7 @@
String8 const &algorithm, Vector<uint8_t> const &message,
Vector<uint8_t> const &wrappedKey, Vector<uint8_t> &signature) {
Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
+ INIT_CHECK();
if (!checkPermission("android.permission.ACCESS_DRM_CERTIFICATES")) {
return -EPERM;
@@ -1007,17 +1328,7 @@
void DrmHal::binderDied(const wp<IBinder> &the_late_who __unused)
{
- Mutex::Autolock autoLock(mLock);
- closeOpenSessions();
- setListener(NULL);
- mInitCheck = NO_INIT;
-
- if (mPlugin != NULL) {
- if (!mPlugin->setListener(NULL).isOk()) {
- mInitCheck = DEAD_OBJECT;
- }
- }
- mPlugin.clear();
+ cleanup();
}
void DrmHal::writeByteArray(Parcel &obj, hidl_vec<uint8_t> const &vec)
@@ -1030,18 +1341,55 @@
}
}
-void DrmHal::reportMetrics() const
+void DrmHal::reportFrameworkMetrics() const
{
- Vector<uint8_t> metrics;
+ MediaAnalyticsItem item("mediadrm");
+ item.generateSessionID();
+ item.setPkgName(mMetrics.GetAppPackageName().c_str());
+ String8 vendor;
+ String8 description;
+ status_t result = getPropertyStringInternal(String8("vendor"), vendor);
+ if (result != OK) {
+ ALOGE("Failed to get vendor from drm plugin: %d", result);
+ } else {
+ item.setCString("vendor", vendor.c_str());
+ }
+ result = getPropertyStringInternal(String8("description"), description);
+ if (result != OK) {
+ ALOGE("Failed to get description from drm plugin: %d", result);
+ } else {
+ item.setCString("description", description.c_str());
+ }
+
+ std::string serializedMetrics;
+ result = mMetrics.GetSerializedMetrics(&serializedMetrics);
+ if (result != OK) {
+ ALOGE("Failed to serialize framework metrics: %d", result);
+ }
+ std::string b64EncodedMetrics = toBase64StringNoPad(serializedMetrics.data(),
+ serializedMetrics.size());
+ if (!b64EncodedMetrics.empty()) {
+ item.setCString("serialized_metrics", b64EncodedMetrics.c_str());
+ }
+ if (!item.selfrecord()) {
+ ALOGE("Failed to self record framework metrics");
+ }
+}
+
+void DrmHal::reportPluginMetrics() const
+{
+ Vector<uint8_t> metricsVector;
String8 vendor;
String8 description;
if (getPropertyStringInternal(String8("vendor"), vendor) == OK &&
getPropertyStringInternal(String8("description"), description) == OK &&
- getPropertyByteArrayInternal(String8("metrics"), metrics) == OK) {
- status_t res = android::reportDrmPluginMetrics(
- metrics, vendor, description);
+ getPropertyByteArrayInternal(String8("metrics"), metricsVector) == OK) {
+ std::string metricsString = toBase64StringNoPad(metricsVector.array(),
+ metricsVector.size());
+ status_t res = android::reportDrmPluginMetrics(metricsString, vendor,
+ description, mAppPackageName);
if (res != OK) {
- ALOGE("Metrics were retrieved but could not be reported: %i", res);
+ ALOGE("Metrics were retrieved but could not be reported: %d", res);
}
}
}
diff --git a/drm/libmediadrm/DrmMetrics.cpp b/drm/libmediadrm/DrmMetrics.cpp
new file mode 100644
index 0000000..4fed707
--- /dev/null
+++ b/drm/libmediadrm/DrmMetrics.cpp
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "DrmMetrics"
+#include <iomanip>
+#include <utility>
+
+#include <android-base/macros.h>
+#include <media/stagefright/foundation/base64.h>
+#include <mediadrm/DrmMetrics.h>
+#include <sys/time.h>
+#include <utils/Log.h>
+#include <utils/Timers.h>
+
+#include "protos/metrics.pb.h"
+
+using ::android::String16;
+using ::android::String8;
+using ::android::drm_metrics::DrmFrameworkMetrics;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::drm::V1_0::EventType;
+using ::android::hardware::drm::V1_0::KeyStatusType;
+using ::android::hardware::drm::V1_1::DrmMetricGroup;
+using ::android::os::PersistableBundle;
+
+namespace {
+
+template <typename T> std::string GetAttributeName(T type);
+
+template <> std::string GetAttributeName<KeyStatusType>(KeyStatusType type) {
+ static const char *type_names[] = {"USABLE", "EXPIRED",
+ "OUTPUT_NOT_ALLOWED", "STATUS_PENDING",
+ "INTERNAL_ERROR"};
+ if (((size_t)type) > arraysize(type_names)) {
+ return "UNKNOWN_TYPE";
+ }
+ return type_names[(size_t)type];
+}
+
+template <> std::string GetAttributeName<EventType>(EventType type) {
+ static const char *type_names[] = {"PROVISION_REQUIRED", "KEY_NEEDED",
+ "KEY_EXPIRED", "VENDOR_DEFINED",
+ "SESSION_RECLAIMED"};
+ if (((size_t)type) > arraysize(type_names)) {
+ return "UNKNOWN_TYPE";
+ }
+ return type_names[(size_t)type];
+}
+
+template <typename T>
+void ExportCounterMetric(const android::CounterMetric<T> &counter,
+ PersistableBundle *metrics) {
+ if (!metrics) {
+ ALOGE("metrics was unexpectedly null.");
+ return;
+ }
+ std::string success_count_name = counter.metric_name() + ".ok.count";
+ std::string error_count_name = counter.metric_name() + ".error.count";
+ std::vector<int64_t> status_values;
+ counter.ExportValues(
+ [&](const android::status_t status, const int64_t value) {
+ if (status == android::OK) {
+ metrics->putLong(android::String16(success_count_name.c_str()),
+ value);
+ } else {
+ int64_t total_errors(0);
+ metrics->getLong(android::String16(error_count_name.c_str()),
+ &total_errors);
+ metrics->putLong(android::String16(error_count_name.c_str()),
+ total_errors + value);
+ status_values.push_back(status);
+ }
+ });
+ if (!status_values.empty()) {
+ std::string error_list_name = counter.metric_name() + ".error.list";
+ metrics->putLongVector(android::String16(error_list_name.c_str()),
+ status_values);
+ }
+}
+
+template <typename T>
+void ExportCounterMetricWithAttributeNames(
+ const android::CounterMetric<T> &counter, PersistableBundle *metrics) {
+ if (!metrics) {
+ ALOGE("metrics was unexpectedly null.");
+ return;
+ }
+ counter.ExportValues([&](const T &attribute, const int64_t value) {
+ std::string name = counter.metric_name() + "." +
+ GetAttributeName(attribute) + ".count";
+ metrics->putLong(android::String16(name.c_str()), value);
+ });
+}
+
+template <typename T>
+void ExportEventMetric(const android::EventMetric<T> &event,
+ PersistableBundle *metrics) {
+ if (!metrics) {
+ ALOGE("metrics was unexpectedly null.");
+ return;
+ }
+ std::string success_count_name = event.metric_name() + ".ok.count";
+ std::string error_count_name = event.metric_name() + ".error.count";
+ std::string timing_name = event.metric_name() + ".ok.average_time_micros";
+ std::vector<int64_t> status_values;
+ event.ExportValues([&](const android::status_t &status,
+ const android::EventStatistics &value) {
+ if (status == android::OK) {
+ metrics->putLong(android::String16(success_count_name.c_str()),
+ value.count);
+ metrics->putLong(android::String16(timing_name.c_str()),
+ value.mean);
+ } else {
+ int64_t total_errors(0);
+ metrics->getLong(android::String16(error_count_name.c_str()),
+ &total_errors);
+ metrics->putLong(android::String16(error_count_name.c_str()),
+ total_errors + value.count);
+ status_values.push_back(status);
+ }
+ });
+ if (!status_values.empty()) {
+ std::string error_list_name = event.metric_name() + ".error.list";
+ metrics->putLongVector(android::String16(error_list_name.c_str()),
+ status_values);
+ }
+}
+
+void ExportSessionLifespans(
+ const std::map<std::string, std::pair<int64_t, int64_t>> &mSessionLifespans,
+ PersistableBundle *metrics) {
+ if (!metrics) {
+ ALOGE("metrics was unexpectedly null.");
+ return;
+ }
+
+ if (mSessionLifespans.empty()) {
+ return;
+ }
+
+ PersistableBundle startTimesBundle;
+ PersistableBundle endTimesBundle;
+ for (auto it = mSessionLifespans.begin(); it != mSessionLifespans.end();
+ it++) {
+ String16 key(it->first.c_str(), it->first.size());
+ startTimesBundle.putLong(key, it->second.first);
+ endTimesBundle.putLong(key, it->second.second);
+ }
+ metrics->putPersistableBundle(
+ android::String16("drm.mediadrm.session_start_times_ms"),
+ startTimesBundle);
+ metrics->putPersistableBundle(
+ android::String16("drm.mediadrm.session_end_times_ms"), endTimesBundle);
+}
+
+std::string ToHexString(const android::Vector<uint8_t> &sessionId) {
+ std::ostringstream out;
+ out << std::hex << std::setfill('0');
+ for (size_t i = 0; i < sessionId.size(); i++) {
+ out << std::setw(2) << (int)(sessionId[i]);
+ }
+ return out.str();
+}
+
+template <typename CT>
+void SetValue(const String16 &name, DrmMetricGroup::ValueType type,
+ const CT &value, PersistableBundle *bundle) {
+ switch (type) {
+ case DrmMetricGroup::ValueType::INT64_TYPE:
+ bundle->putLong(name, value.int64Value);
+ break;
+ case DrmMetricGroup::ValueType::DOUBLE_TYPE:
+ bundle->putDouble(name, value.doubleValue);
+ break;
+ case DrmMetricGroup::ValueType::STRING_TYPE:
+ bundle->putString(name, String16(value.stringValue.c_str()));
+ break;
+ default:
+ ALOGE("Unexpected value type: %hhu", type);
+ }
+}
+
+inline String16 MakeIndexString(unsigned int index) {
+ std::string str("[");
+ str.append(std::to_string(index));
+ str.append("]");
+ return String16(str.c_str());
+}
+
+} // namespace
+
+namespace android {
+
+MediaDrmMetrics::MediaDrmMetrics()
+ : mOpenSessionCounter("drm.mediadrm.open_session", "status"),
+ mCloseSessionCounter("drm.mediadrm.close_session", "status"),
+ mGetKeyRequestTimeUs("drm.mediadrm.get_key_request", "status"),
+ mProvideKeyResponseTimeUs("drm.mediadrm.provide_key_response", "status"),
+ mGetProvisionRequestCounter("drm.mediadrm.get_provision_request",
+ "status"),
+ mProvideProvisionResponseCounter(
+ "drm.mediadrm.provide_provision_response", "status"),
+ mKeyStatusChangeCounter("drm.mediadrm.key_status_change",
+ "key_status_type"),
+ mEventCounter("drm.mediadrm.event", "event_type"),
+ mGetDeviceUniqueIdCounter("drm.mediadrm.get_device_unique_id", "status") {
+}
+
+void MediaDrmMetrics::SetSessionStart(
+ const android::Vector<uint8_t> &sessionId) {
+ std::string sessionIdHex = ToHexString(sessionId);
+ mSessionLifespans[sessionIdHex] =
+ std::make_pair(GetCurrentTimeMs(), (int64_t)0);
+}
+
+void MediaDrmMetrics::SetSessionEnd(const android::Vector<uint8_t> &sessionId) {
+ std::string sessionIdHex = ToHexString(sessionId);
+ int64_t endTimeMs = GetCurrentTimeMs();
+ if (mSessionLifespans.find(sessionIdHex) != mSessionLifespans.end()) {
+ mSessionLifespans[sessionIdHex] =
+ std::make_pair(mSessionLifespans[sessionIdHex].first, endTimeMs);
+ } else {
+ mSessionLifespans[sessionIdHex] = std::make_pair((int64_t)0, endTimeMs);
+ }
+}
+
+void MediaDrmMetrics::Export(PersistableBundle *metrics) {
+ if (!metrics) {
+ ALOGE("metrics was unexpectedly null.");
+ return;
+ }
+ ExportCounterMetric(mOpenSessionCounter, metrics);
+ ExportCounterMetric(mCloseSessionCounter, metrics);
+ ExportEventMetric(mGetKeyRequestTimeUs, metrics);
+ ExportEventMetric(mProvideKeyResponseTimeUs, metrics);
+ ExportCounterMetric(mGetProvisionRequestCounter, metrics);
+ ExportCounterMetric(mProvideProvisionResponseCounter, metrics);
+ ExportCounterMetricWithAttributeNames(mKeyStatusChangeCounter, metrics);
+ ExportCounterMetricWithAttributeNames(mEventCounter, metrics);
+ ExportCounterMetric(mGetDeviceUniqueIdCounter, metrics);
+ ExportSessionLifespans(mSessionLifespans, metrics);
+}
+
+status_t MediaDrmMetrics::GetSerializedMetrics(std::string *serializedMetrics) {
+
+ if (!serializedMetrics) {
+ ALOGE("serializedMetrics was unexpectedly null.");
+ return UNEXPECTED_NULL;
+ }
+
+ DrmFrameworkMetrics metrics;
+
+ mOpenSessionCounter.ExportValues(
+ [&](const android::status_t status, const int64_t value) {
+ DrmFrameworkMetrics::Counter *counter =
+ metrics.add_open_session_counter();
+ counter->set_count(value);
+ counter->mutable_attributes()->set_error_code(status);
+ });
+
+ mCloseSessionCounter.ExportValues(
+ [&](const android::status_t status, const int64_t value) {
+ DrmFrameworkMetrics::Counter *counter =
+ metrics.add_close_session_counter();
+ counter->set_count(value);
+ counter->mutable_attributes()->set_error_code(status);
+ });
+
+ mGetProvisionRequestCounter.ExportValues(
+ [&](const android::status_t status, const int64_t value) {
+ DrmFrameworkMetrics::Counter *counter =
+ metrics.add_get_provisioning_request_counter();
+ counter->set_count(value);
+ counter->mutable_attributes()->set_error_code(status);
+ });
+
+ mProvideProvisionResponseCounter.ExportValues(
+ [&](const android::status_t status, const int64_t value) {
+ DrmFrameworkMetrics::Counter *counter =
+ metrics.add_provide_provisioning_response_counter();
+ counter->set_count(value);
+ counter->mutable_attributes()->set_error_code(status);
+ });
+
+ mKeyStatusChangeCounter.ExportValues(
+ [&](const KeyStatusType key_status_type, const int64_t value) {
+ DrmFrameworkMetrics::Counter *counter =
+ metrics.add_key_status_change_counter();
+ counter->set_count(value);
+ counter->mutable_attributes()->set_key_status_type(
+ (uint32_t)key_status_type);
+ });
+
+ mEventCounter.ExportValues(
+ [&](const EventType event_type, const int64_t value) {
+ DrmFrameworkMetrics::Counter *counter =
+ metrics.add_event_callback_counter();
+ counter->set_count(value);
+ counter->mutable_attributes()->set_event_type((uint32_t)event_type);
+ });
+
+ mGetDeviceUniqueIdCounter.ExportValues(
+ [&](const status_t status, const int64_t value) {
+ DrmFrameworkMetrics::Counter *counter =
+ metrics.add_get_device_unique_id_counter();
+ counter->set_count(value);
+ counter->mutable_attributes()->set_error_code(status);
+ });
+
+ mGetKeyRequestTimeUs.ExportValues(
+ [&](const status_t status, const EventStatistics &stats) {
+ DrmFrameworkMetrics::DistributionMetric *metric =
+ metrics.add_get_key_request_time_us();
+ metric->set_min(stats.min);
+ metric->set_max(stats.max);
+ metric->set_mean(stats.mean);
+ metric->set_operation_count(stats.count);
+ metric->set_variance(stats.sum_squared_deviation / stats.count);
+ metric->mutable_attributes()->set_error_code(status);
+ });
+
+ mProvideKeyResponseTimeUs.ExportValues(
+ [&](const status_t status, const EventStatistics &stats) {
+ DrmFrameworkMetrics::DistributionMetric *metric =
+ metrics.add_provide_key_response_time_us();
+ metric->set_min(stats.min);
+ metric->set_max(stats.max);
+ metric->set_mean(stats.mean);
+ metric->set_operation_count(stats.count);
+ metric->set_variance(stats.sum_squared_deviation / stats.count);
+ metric->mutable_attributes()->set_error_code(status);
+ });
+
+ for (const auto &sessionLifespan : mSessionLifespans) {
+ auto *map = metrics.mutable_session_lifetimes();
+
+ (*map)[sessionLifespan.first].set_start_time_ms(
+ sessionLifespan.second.first);
+ (*map)[sessionLifespan.first].set_end_time_ms(
+ sessionLifespan.second.second);
+ }
+
+ if (!metrics.SerializeToString(serializedMetrics)) {
+ ALOGE("Failed to serialize metrics.");
+ return UNKNOWN_ERROR;
+ }
+
+ return OK;
+}
+
+int64_t MediaDrmMetrics::GetCurrentTimeMs() {
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return ((int64_t)tv.tv_sec * 1000) + ((int64_t)tv.tv_usec / 1000);
+}
+
+status_t MediaDrmMetrics::HidlMetricsToBundle(
+ const hidl_vec<DrmMetricGroup> &hidlMetricGroups,
+ PersistableBundle *bundleMetricGroups) {
+ if (bundleMetricGroups == nullptr) {
+ return UNEXPECTED_NULL;
+ }
+ if (hidlMetricGroups.size() == 0) {
+ return OK;
+ }
+
+ int groupIndex = 0;
+ std::map<String16, int> indexMap;
+ for (const auto &hidlMetricGroup : hidlMetricGroups) {
+ PersistableBundle bundleMetricGroup;
+ for (const auto &hidlMetric : hidlMetricGroup.metrics) {
+ String16 metricName(hidlMetric.name.c_str());
+ PersistableBundle bundleMetric;
+ // Add metric component values.
+ for (const auto &value : hidlMetric.values) {
+ SetValue(String16(value.componentName.c_str()), value.type,
+ value, &bundleMetric);
+ }
+ // Set metric attributes.
+ PersistableBundle bundleMetricAttributes;
+ for (const auto &attribute : hidlMetric.attributes) {
+ SetValue(String16(attribute.name.c_str()), attribute.type,
+ attribute, &bundleMetricAttributes);
+ }
+ // Add attributes to the bundle metric.
+ bundleMetric.putPersistableBundle(String16("attributes"),
+ bundleMetricAttributes);
+ // Add one layer of indirection, allowing for repeated metric names.
+ PersistableBundle repeatedMetrics;
+ bundleMetricGroup.getPersistableBundle(metricName,
+ &repeatedMetrics);
+ int index = indexMap[metricName];
+ repeatedMetrics.putPersistableBundle(MakeIndexString(index),
+ bundleMetric);
+ indexMap[metricName] = ++index;
+
+ // Add the bundle metric to the group of metrics.
+ bundleMetricGroup.putPersistableBundle(metricName,
+ repeatedMetrics);
+ }
+ // Add the bundle metric group to the collection of groups.
+ bundleMetricGroups->putPersistableBundle(MakeIndexString(groupIndex++),
+ bundleMetricGroup);
+ }
+
+ return OK;
+}
+
+} // namespace android
diff --git a/drm/libmediadrm/DrmPluginPath.cpp b/drm/libmediadrm/DrmPluginPath.cpp
index c760825..ac8607c 100644
--- a/drm/libmediadrm/DrmPluginPath.cpp
+++ b/drm/libmediadrm/DrmPluginPath.cpp
@@ -19,7 +19,7 @@
#include <utils/Log.h>
#include <cutils/properties.h>
-#include <media/DrmPluginPath.h>
+#include <mediadrm/DrmPluginPath.h>
namespace android {
diff --git a/drm/libmediadrm/DrmSessionManager.cpp b/drm/libmediadrm/DrmSessionManager.cpp
index 02270d0..375644c 100644
--- a/drm/libmediadrm/DrmSessionManager.cpp
+++ b/drm/libmediadrm/DrmSessionManager.cpp
@@ -21,9 +21,9 @@
#include <binder/IPCThreadState.h>
#include <binder/IProcessInfoService.h>
#include <binder/IServiceManager.h>
-#include <media/DrmSessionManager.h>
-#include <media/DrmSessionClientInterface.h>
#include <media/stagefright/ProcessInfo.h>
+#include <mediadrm/DrmSessionClientInterface.h>
+#include <mediadrm/DrmSessionManager.h>
#include <unistd.h>
#include <utils/String8.h>
diff --git a/drm/libmediadrm/ICrypto.cpp b/drm/libmediadrm/ICrypto.cpp
index 8506d95..73ecda1 100644
--- a/drm/libmediadrm/ICrypto.cpp
+++ b/drm/libmediadrm/ICrypto.cpp
@@ -16,14 +16,14 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "ICrypto"
-#include <utils/Log.h>
-
#include <binder/Parcel.h>
#include <binder/IMemory.h>
-#include <media/ICrypto.h>
+#include <cutils/log.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AString.h>
+#include <mediadrm/ICrypto.h>
+#include <utils/Log.h>
namespace android {
@@ -341,10 +341,10 @@
return OK;
}
- CryptoPlugin::SubSample *subSamples =
- new CryptoPlugin::SubSample[numSubSamples];
+ std::unique_ptr<CryptoPlugin::SubSample[]> subSamples =
+ std::make_unique<CryptoPlugin::SubSample[]>(numSubSamples);
- data.read(subSamples,
+ data.read(subSamples.get(),
sizeof(CryptoPlugin::SubSample) * numSubSamples);
DestinationBuffer destination;
@@ -362,6 +362,17 @@
reply->writeInt32(BAD_VALUE);
return OK;
}
+ sp<IMemory> dest = destination.mSharedMemory;
+ if (totalSize > dest->size() ||
+ (size_t)dest->offset() > dest->size() - totalSize) {
+ reply->writeInt32(BAD_VALUE);
+ android_errorWriteLog(0x534e4554, "71389378");
+ return OK;
+ }
+ } else {
+ reply->writeInt32(BAD_VALUE);
+ android_errorWriteLog(0x534e4554, "70526702");
+ return OK;
}
AString errorDetailMsg;
@@ -391,7 +402,7 @@
result = -EINVAL;
} else {
result = decrypt(key, iv, mode, pattern, source, offset,
- subSamples, numSubSamples, destination, &errorDetailMsg);
+ subSamples.get(), numSubSamples, destination, &errorDetailMsg);
}
reply->writeInt32(result);
@@ -410,9 +421,7 @@
}
}
- delete[] subSamples;
- subSamples = NULL;
-
+ subSamples.reset();
return OK;
}
diff --git a/drm/libmediadrm/IDrm.cpp b/drm/libmediadrm/IDrm.cpp
index 8ff6e6a..509961f 100644
--- a/drm/libmediadrm/IDrm.cpp
+++ b/drm/libmediadrm/IDrm.cpp
@@ -19,10 +19,10 @@
#include <utils/Log.h>
#include <binder/Parcel.h>
-#include <media/IDrm.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AString.h>
+#include <mediadrm/IDrm.h>
namespace android {
@@ -46,6 +46,7 @@
GET_PROPERTY_BYTE_ARRAY,
SET_PROPERTY_STRING,
SET_PROPERTY_BYTE_ARRAY,
+ GET_METRICS,
SET_CIPHER_ALGORITHM,
SET_MAC_ALGORITHM,
ENCRYPT,
@@ -55,7 +56,12 @@
VERIFY,
SET_LISTENER,
GET_SECURE_STOP,
- RELEASE_ALL_SECURE_STOPS
+ REMOVE_ALL_SECURE_STOPS,
+ GET_HDCP_LEVELS,
+ GET_NUMBER_OF_SESSIONS,
+ GET_SECURITY_LEVEL,
+ REMOVE_SECURE_STOP,
+ GET_SECURE_STOP_IDS
};
struct BpDrm : public BpInterface<IDrm> {
@@ -114,9 +120,11 @@
return reply.readInt32();
}
- virtual status_t openSession(Vector<uint8_t> &sessionId) {
+ virtual status_t openSession(DrmPlugin::SecurityLevel securityLevel,
+ Vector<uint8_t> &sessionId) {
Parcel data, reply;
data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
+ data.writeInt32(securityLevel);
status_t status = remote()->transact(OPEN_SESSION, data, &reply);
if (status != OK) {
@@ -297,6 +305,25 @@
return reply.readInt32();
}
+ virtual status_t getSecureStopIds(List<Vector<uint8_t> > &secureStopIds) {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
+
+ status_t status = remote()->transact(GET_SECURE_STOP_IDS, data, &reply);
+ if (status != OK) {
+ return status;
+ }
+
+ secureStopIds.clear();
+ uint32_t count = reply.readInt32();
+ for (size_t i = 0; i < count; i++) {
+ Vector<uint8_t> secureStopId;
+ readVector(reply, secureStopId);
+ secureStopIds.push_back(secureStopId);
+ }
+ return reply.readInt32();
+ }
+
virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) {
Parcel data, reply;
data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
@@ -324,11 +351,24 @@
return reply.readInt32();
}
- virtual status_t releaseAllSecureStops() {
+ virtual status_t removeSecureStop(Vector<uint8_t> const &ssid) {
Parcel data, reply;
data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
- status_t status = remote()->transact(RELEASE_ALL_SECURE_STOPS, data, &reply);
+ writeVector(data, ssid);
+ status_t status = remote()->transact(REMOVE_SECURE_STOP, data, &reply);
+ if (status != OK) {
+ return status;
+ }
+
+ return reply.readInt32();
+ }
+
+ virtual status_t removeAllSecureStops() {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
+
+ status_t status = remote()->transact(REMOVE_ALL_SECURE_STOPS, data, &reply);
if (status != OK) {
return status;
}
@@ -350,6 +390,65 @@
return reply.readInt32();
}
+ virtual status_t getHdcpLevels(DrmPlugin::HdcpLevel *connected,
+ DrmPlugin::HdcpLevel *max) const {
+ Parcel data, reply;
+
+ if (connected == NULL || max == NULL) {
+ return BAD_VALUE;
+ }
+
+ data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
+
+ status_t status = remote()->transact(GET_HDCP_LEVELS, data, &reply);
+ if (status != OK) {
+ return status;
+ }
+
+ *connected = static_cast<DrmPlugin::HdcpLevel>(reply.readInt32());
+ *max = static_cast<DrmPlugin::HdcpLevel>(reply.readInt32());
+ return reply.readInt32();
+ }
+
+ virtual status_t getNumberOfSessions(uint32_t *open, uint32_t *max) const {
+ Parcel data, reply;
+
+ if (open == NULL || max == NULL) {
+ return BAD_VALUE;
+ }
+
+ data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
+
+ status_t status = remote()->transact(GET_NUMBER_OF_SESSIONS, data, &reply);
+ if (status != OK) {
+ return status;
+ }
+
+ *open = reply.readInt32();
+ *max = reply.readInt32();
+ return reply.readInt32();
+ }
+
+ virtual status_t getSecurityLevel(Vector<uint8_t> const &sessionId,
+ DrmPlugin::SecurityLevel *level) const {
+ Parcel data, reply;
+
+ if (level == NULL) {
+ return BAD_VALUE;
+ }
+
+ data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
+
+ writeVector(data, sessionId);
+ status_t status = remote()->transact(GET_SECURITY_LEVEL, data, &reply);
+ if (status != OK) {
+ return status;
+ }
+
+ *level = static_cast<DrmPlugin::SecurityLevel>(reply.readInt32());
+ return reply.readInt32();
+ }
+
virtual status_t getPropertyByteArray(String8 const &name, Vector<uint8_t> &value) const {
Parcel data, reply;
data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
@@ -393,6 +492,35 @@
return reply.readInt32();
}
+ virtual status_t getMetrics(os::PersistableBundle *metrics) {
+ if (metrics == NULL) {
+ return BAD_VALUE;
+ }
+ Parcel data, reply;
+ data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
+
+ status_t status = remote()->transact(GET_METRICS, data, &reply);
+ if (status != OK) {
+ return status;
+ }
+ // The reply data is ordered as
+ // 1) 32 bit integer reply followed by
+ // 2) Serialized PersistableBundle containing metrics.
+ status_t reply_status;
+ if (reply.readInt32(&reply_status) != OK
+ || reply_status != OK) {
+ ALOGE("Failed to read getMetrics response code from parcel. %d",
+ reply_status);
+ return reply_status;
+ }
+
+ status = metrics->readFromParcel(&reply);
+ if (status != OK) {
+ ALOGE("Failed to read metrics from parcel. %d", status);
+ return status;
+ }
+ return reply_status;
+ }
virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId,
String8 const &algorithm) {
@@ -615,8 +743,10 @@
case OPEN_SESSION:
{
CHECK_INTERFACE(IDrm, data, reply);
+ DrmPlugin::SecurityLevel level =
+ static_cast<DrmPlugin::SecurityLevel>(data.readInt32());
Vector<uint8_t> sessionId;
- status_t result = openSession(sessionId);
+ status_t result = openSession(level, sessionId);
writeVector(reply, sessionId);
reply->writeInt32(result);
return OK;
@@ -761,6 +891,24 @@
return OK;
}
+ case GET_SECURE_STOP_IDS:
+ {
+ CHECK_INTERFACE(IDrm, data, reply);
+ List<Vector<uint8_t> > secureStopIds;
+ status_t result = getSecureStopIds(secureStopIds);
+ size_t count = secureStopIds.size();
+ reply->writeInt32(count);
+ List<Vector<uint8_t> >::iterator iter = secureStopIds.begin();
+ while(iter != secureStopIds.end()) {
+ size_t size = iter->size();
+ reply->writeInt32(size);
+ reply->write(iter->array(), iter->size());
+ iter++;
+ }
+ reply->writeInt32(result);
+ return OK;
+ }
+
case GET_SECURE_STOP:
{
CHECK_INTERFACE(IDrm, data, reply);
@@ -781,10 +929,54 @@
return OK;
}
- case RELEASE_ALL_SECURE_STOPS:
+ case REMOVE_SECURE_STOP:
{
CHECK_INTERFACE(IDrm, data, reply);
- reply->writeInt32(releaseAllSecureStops());
+ Vector<uint8_t> ssid;
+ readVector(data, ssid);
+ reply->writeInt32(removeSecureStop(ssid));
+ return OK;
+ }
+
+ case REMOVE_ALL_SECURE_STOPS:
+ {
+ CHECK_INTERFACE(IDrm, data, reply);
+ reply->writeInt32(removeAllSecureStops());
+ return OK;
+ }
+
+ case GET_HDCP_LEVELS:
+ {
+ CHECK_INTERFACE(IDrm, data, reply);
+ DrmPlugin::HdcpLevel connected = DrmPlugin::kHdcpLevelUnknown;
+ DrmPlugin::HdcpLevel max = DrmPlugin::kHdcpLevelUnknown;
+ status_t result = getHdcpLevels(&connected, &max);
+ reply->writeInt32(connected);
+ reply->writeInt32(max);
+ reply->writeInt32(result);
+ return OK;
+ }
+
+ case GET_NUMBER_OF_SESSIONS:
+ {
+ CHECK_INTERFACE(IDrm, data, reply);
+ uint32_t open = 0, max = 0;
+ status_t result = getNumberOfSessions(&open, &max);
+ reply->writeInt32(open);
+ reply->writeInt32(max);
+ reply->writeInt32(result);
+ return OK;
+ }
+
+ case GET_SECURITY_LEVEL:
+ {
+ CHECK_INTERFACE(IDrm, data, reply);
+ Vector<uint8_t> sessionId;
+ readVector(data, sessionId);
+ DrmPlugin::SecurityLevel level = DrmPlugin::kSecurityLevelUnknown;
+ status_t result = getSecurityLevel(sessionId, &level);
+ reply->writeInt32(level);
+ reply->writeInt32(result);
return OK;
}
@@ -829,6 +1021,24 @@
return OK;
}
+ case GET_METRICS:
+ {
+ CHECK_INTERFACE(IDrm, data, reply);
+
+ os::PersistableBundle metrics;
+ status_t result = getMetrics(&metrics);
+ // The reply data is ordered as
+ // 1) 32 bit integer reply followed by
+ // 2) Serialized PersistableBundle containing metrics.
+ // Only write the metrics if the getMetrics result was
+ // OK and we successfully added the status to reply.
+ status_t parcel_result = reply->writeInt32(result);
+ if (result == OK && parcel_result == OK) {
+ parcel_result = metrics.writeToParcel(reply);
+ }
+ return parcel_result;
+ }
+
case SET_CIPHER_ALGORITHM:
{
CHECK_INTERFACE(IDrm, data, reply);
diff --git a/drm/libmediadrm/IDrmClient.cpp b/drm/libmediadrm/IDrmClient.cpp
index 444201f..357de9d 100644
--- a/drm/libmediadrm/IDrmClient.cpp
+++ b/drm/libmediadrm/IDrmClient.cpp
@@ -24,7 +24,7 @@
#include <binder/Parcel.h>
#include <media/IMediaPlayerClient.h>
-#include <media/IDrmClient.h>
+#include <mediadrm/IDrmClient.h>
namespace android {
diff --git a/drm/libmediadrm/IMediaDrmService.cpp b/drm/libmediadrm/IMediaDrmService.cpp
index 84812dc..f320d0b 100644
--- a/drm/libmediadrm/IMediaDrmService.cpp
+++ b/drm/libmediadrm/IMediaDrmService.cpp
@@ -20,9 +20,9 @@
#include <binder/Parcel.h>
#include <binder/IMemory.h>
-#include <media/ICrypto.h>
-#include <media/IDrm.h>
-#include <media/IMediaDrmService.h>
+#include <mediadrm/ICrypto.h>
+#include <mediadrm/IDrm.h>
+#include <mediadrm/IMediaDrmService.h>
#include <utils/Errors.h> // for status_t
#include <utils/String8.h>
diff --git a/drm/libmediadrm/PluginMetricsReporting.cpp b/drm/libmediadrm/PluginMetricsReporting.cpp
index 57ff5b8..5cb48bf 100644
--- a/drm/libmediadrm/PluginMetricsReporting.cpp
+++ b/drm/libmediadrm/PluginMetricsReporting.cpp
@@ -16,83 +16,35 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "PluginMetricsReporting"
-#include <utils/Log.h>
#include <media/PluginMetricsReporting.h>
-#include <media/MediaAnalyticsItem.h>
+#include <inttypes.h>
-#include "protos/plugin_metrics.pb.h"
+#include <media/MediaAnalyticsItem.h>
+#include <utils/Log.h>
+
namespace android {
namespace {
-using android::drm_metrics::MetricsGroup;
-using android::drm_metrics::MetricsGroup_Metric;
-using android::drm_metrics::MetricsGroup_Metric_MetricValue;
+constexpr char kSerializedMetricsField[] = "serialized_metrics";
-const char* const kParentAttribute = "/parent/external";
-
-status_t reportMetricsGroup(const MetricsGroup& metricsGroup,
- const String8& batchName,
- const int64_t* parentId) {
- MediaAnalyticsItem analyticsItem(batchName.c_str());
+status_t reportVendorMetrics(const std::string& metrics,
+ const String8& name,
+ const String8& appPackageName) {
+ MediaAnalyticsItem analyticsItem(name.c_str());
analyticsItem.generateSessionID();
- int64_t sessionId = analyticsItem.getSessionID();
- if (parentId != NULL) {
- analyticsItem.setInt64(kParentAttribute, *parentId);
+
+ std::string app_package_name(appPackageName.c_str(), appPackageName.size());
+ analyticsItem.setPkgName(app_package_name);
+ if (metrics.size() > 0) {
+ analyticsItem.setCString(kSerializedMetricsField, metrics.c_str());
}
- // Report the package name.
- if (metricsGroup.has_app_package_name()) {
- AString app_package_name(metricsGroup.app_package_name().c_str(),
- metricsGroup.app_package_name().size());
- analyticsItem.setPkgName(app_package_name);
- }
-
- for (int i = 0; i < metricsGroup.metric_size(); ++i) {
- const MetricsGroup_Metric& metric = metricsGroup.metric(i);
- if (!metric.has_name()) {
- ALOGE("Metric with no name.");
- return BAD_VALUE;
- }
-
- if (!metric.has_value()) {
- ALOGE("Metric with no value.");
- return BAD_VALUE;
- }
-
- const MetricsGroup_Metric_MetricValue& value = metric.value();
- if (value.has_int_value()) {
- analyticsItem.setInt64(metric.name().c_str(),
- value.int_value());
- } else if (value.has_double_value()) {
- analyticsItem.setDouble(metric.name().c_str(),
- value.double_value());
- } else if (value.has_string_value()) {
- analyticsItem.setCString(metric.name().c_str(),
- value.string_value().c_str());
- } else {
- ALOGE("Metric Value with no actual value.");
- return BAD_VALUE;
- }
- }
-
- analyticsItem.setFinalized(true);
if (!analyticsItem.selfrecord()) {
- // Note the cast to int is because we build on 32 and 64 bit.
- // The cast prevents a peculiar printf problem where one format cannot
- // satisfy both.
- ALOGE("selfrecord() returned false. sessioId %d", (int) sessionId);
- }
-
- for (int i = 0; i < metricsGroup.metric_sub_group_size(); ++i) {
- const MetricsGroup& subGroup = metricsGroup.metric_sub_group(i);
- status_t res = reportMetricsGroup(subGroup, batchName, &sessionId);
- if (res != OK) {
- return res;
- }
+ ALOGE("selfrecord() returned false. sessioId %" PRId64, analyticsItem.getSessionID());
}
return OK;
@@ -114,21 +66,16 @@
} // namespace
-status_t reportDrmPluginMetrics(const Vector<uint8_t>& serializedMetrics,
+status_t reportDrmPluginMetrics(const std::string& b64EncodedMetrics,
const String8& vendor,
- const String8& description) {
- MetricsGroup root_metrics_group;
- if (!root_metrics_group.ParseFromArray(serializedMetrics.array(),
- serializedMetrics.size())) {
- ALOGE("Failure to parse.");
- return BAD_VALUE;
- }
+ const String8& description,
+ const String8& appPackageName) {
String8 name = String8::format("drm.vendor.%s.%s",
sanitize(vendor).c_str(),
sanitize(description).c_str());
- return reportMetricsGroup(root_metrics_group, name, NULL);
+ return reportVendorMetrics(b64EncodedMetrics, name, appPackageName);
}
} // namespace android
diff --git a/drm/libmediadrm/SharedLibrary.cpp b/drm/libmediadrm/SharedLibrary.cpp
index bebafa8..b2d635d 100644
--- a/drm/libmediadrm/SharedLibrary.cpp
+++ b/drm/libmediadrm/SharedLibrary.cpp
@@ -19,7 +19,7 @@
#include <dlfcn.h>
#include <media/stagefright/foundation/ADebug.h>
-#include <media/SharedLibrary.h>
+#include <mediadrm/SharedLibrary.h>
#include <utils/Log.h>
namespace android {
diff --git a/drm/libmediadrm/protos/metrics.proto b/drm/libmediadrm/protos/metrics.proto
new file mode 100644
index 0000000..6160e6f
--- /dev/null
+++ b/drm/libmediadrm/protos/metrics.proto
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto2";
+
+package android.drm_metrics;
+
+
+// This message contains the specific metrics captured by DrmMetrics. It is
+// used for serializing and logging metrics.
+// next id: 11.
+message DrmFrameworkMetrics {
+ // TODO: Consider using extensions.
+
+ // Attributes are associated with a recorded value. E.g. A counter may
+ // represent a count of an operation returning a specific error code. The
+ // error code will be an attribute.
+ message Attributes {
+ // Reserved for compatibility with logging proto.
+ reserved 2 to 13;
+
+ // A general purpose error code where 0 means OK.
+ optional int32 error_code = 1;
+
+ // Defined at ::android::hardware::drm::V1_0::KeyStatusType;
+ optional uint32 key_status_type = 14;
+
+ // Defined at ::android::hardware::drm::V1_0::EventType;
+ optional uint32 event_type = 15;
+ }
+
+ // The Counter message is used to store a count value with an associated
+ // Attribute.
+ message Counter {
+ optional uint64 count = 1;
+ // Represents the attributes associated with this counter instance.
+ optional Attributes attributes = 2;
+ }
+
+ // The DistributionMetric is meant to capture the moments of a normally
+ // distributed (or approximately normal) value.
+ message DistributionMetric {
+ optional float min = 1;
+ optional float max = 2;
+ optional float mean = 3;
+ optional double variance = 4;
+ optional uint64 operation_count = 5;
+
+ // Represents the attributes assocated with this distribution metric
+ // instance.
+ optional Attributes attributes = 6;
+ }
+
+ message SessionLifetime {
+ // Start time of the session in milliseconds since epoch.
+ optional uint64 start_time_ms = 1;
+ // End time of the session in milliseconds since epoch.
+ optional uint64 end_time_ms = 2;
+ }
+
+ // The count of open session operations. Each instance has a specific error
+ // code associated with it.
+ repeated Counter open_session_counter = 1;
+
+ // The count of close session operations. Each instance has a specific error
+ // code associated with it.
+ repeated Counter close_session_counter = 2;
+
+ // Count and execution time of getKeyRequest calls.
+ repeated DistributionMetric get_key_request_time_us = 3;
+
+ // Count and execution time of provideKeyResponse calls.
+ repeated DistributionMetric provide_key_response_time_us = 4;
+
+ // Count of getProvisionRequest calls.
+ repeated Counter get_provisioning_request_counter = 5;
+
+ // Count of provideProvisionResponse calls.
+ repeated Counter provide_provisioning_response_counter = 6;
+
+ // Count of key status events broken out by status type.
+ repeated Counter key_status_change_counter = 7;
+
+ // Count of events broken out by event type
+ repeated Counter event_callback_counter = 8;
+
+ // Count getPropertyByteArray calls to retrieve the device unique id.
+ repeated Counter get_device_unique_id_counter = 9;
+
+ // Session ids to lifetime (start and end time) map.
+ // Session ids are strings of hex-encoded byte arrays.
+ map<string, SessionLifetime> session_lifetimes = 10;
+}
+
diff --git a/drm/libmediadrm/protos/plugin_metrics.proto b/drm/libmediadrm/protos/plugin_metrics.proto
deleted file mode 100644
index 7e3bcf5..0000000
--- a/drm/libmediadrm/protos/plugin_metrics.proto
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-syntax = "proto2";
-
-package android.drm_metrics;
-
-// need this if we are using libprotobuf-cpp-2.3.0-lite
-option optimize_for = LITE_RUNTIME;
-
-// The MetricsGroup is a collection of metric name/value pair instances
-// that can be serialized and provided to a caller.
-message MetricsGroup {
- message Metric {
- message MetricValue {
- // Exactly one of the following values must be set.
- optional int64 int_value = 1;
- optional double double_value = 2;
- optional string string_value = 3;
- }
-
- // The name of the metric. Must be valid UTF-8. Required.
- optional string name = 1;
-
- // The value of the metric. Required.
- optional MetricValue value = 2;
- }
-
- // The list of name/value pairs of metrics.
- repeated Metric metric = 1;
-
- // Allow multiple sub groups of metrics.
- repeated MetricsGroup metric_sub_group = 2;
-
- // Name of the application package associated with the metrics.
- optional string app_package_name = 3;
-}
diff --git a/drm/libmediadrm/tests/Android.bp b/drm/libmediadrm/tests/Android.bp
new file mode 100644
index 0000000..66c906f
--- /dev/null
+++ b/drm/libmediadrm/tests/Android.bp
@@ -0,0 +1,53 @@
+// Build definitions for unit tests.
+
+cc_test {
+ name: "CounterMetric_test",
+ srcs: ["CounterMetric_test.cpp"],
+ shared_libs: ["libmediadrm"],
+ include_dirs: ["frameworks/av/include/media"],
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+}
+
+cc_test {
+ name: "DrmMetrics_test",
+ srcs: ["DrmMetrics_test.cpp"],
+ shared_libs: [
+ "android.hardware.drm@1.0",
+ "android.hardware.drm@1.1",
+ "libbinder",
+ "libhidlbase",
+ "liblog",
+ "libmediadrmmetrics_full",
+ "libmediametrics",
+ "libprotobuf-cpp-full",
+ "libutils",
+ ],
+ static_libs: ["libgmock"],
+ include_dirs: [
+ "frameworks/av/include/media",
+ ],
+ cflags: [
+ // Suppress unused parameter and no error options. These cause problems
+ // when using the map type in a proto definition.
+ "-Wno-unused-parameter",
+ "-Wno-error",
+ ]
+}
+
+cc_test {
+ name: "EventMetric_test",
+ srcs: ["EventMetric_test.cpp"],
+ shared_libs: [
+ "liblog",
+ "libmediadrm",
+ "libutils",
+ ],
+ include_dirs: ["frameworks/av/include/media"],
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+}
diff --git a/drm/libmediadrm/tests/CounterMetric_test.cpp b/drm/libmediadrm/tests/CounterMetric_test.cpp
new file mode 100644
index 0000000..6bca0da
--- /dev/null
+++ b/drm/libmediadrm/tests/CounterMetric_test.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "CounterMetric.h"
+
+namespace android {
+
+/**
+ * Unit tests for the CounterMetric class.
+ */
+class CounterMetricTest : public ::testing::Test {
+};
+
+TEST_F(CounterMetricTest, IntDataTypeEmpty) {
+ CounterMetric<int> metric("MyMetricName", "MetricAttributeName");
+
+ std::map<int, int64_t> values;
+
+ metric.ExportValues(
+ [&] (int attribute_value, int64_t value) {
+ values[attribute_value] = value;
+ });
+
+ EXPECT_TRUE(values.empty());
+}
+
+TEST_F(CounterMetricTest, IntDataType) {
+ CounterMetric<int> metric("MyMetricName", "MetricAttributeName");
+
+ std::map<int, int64_t> values;
+
+ metric.Increment(7);
+ metric.Increment(8);
+ metric.Increment(8);
+
+ metric.ExportValues(
+ [&] (int attribute_value, int64_t value) {
+ values[attribute_value] = value;
+ });
+
+ ASSERT_EQ(2u, values.size());
+ EXPECT_EQ(1, values[7]);
+ EXPECT_EQ(2, values[8]);
+}
+
+TEST_F(CounterMetricTest, StringDataType) {
+ CounterMetric<std::string> metric("MyMetricName", "MetricAttributeName");
+
+ std::map<std::string, int64_t> values;
+
+ metric.Increment("a");
+ metric.Increment("b");
+ metric.Increment("b");
+
+ metric.ExportValues(
+ [&] (std::string attribute_value, int64_t value) {
+ values[attribute_value] = value;
+ });
+
+ ASSERT_EQ(2u, values.size());
+ EXPECT_EQ(1, values["a"]);
+ EXPECT_EQ(2, values["b"]);
+}
+
+} // namespace android
diff --git a/drm/libmediadrm/tests/DrmMetrics_test.cpp b/drm/libmediadrm/tests/DrmMetrics_test.cpp
new file mode 100644
index 0000000..64aa9d0
--- /dev/null
+++ b/drm/libmediadrm/tests/DrmMetrics_test.cpp
@@ -0,0 +1,475 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DrmMetricsTest"
+#include "mediadrm/DrmMetrics.h"
+
+#include <android/hardware/drm/1.0/types.h>
+#include <android/hardware/drm/1.1/types.h>
+#include <binder/PersistableBundle.h>
+#include <google/protobuf/text_format.h>
+#include <google/protobuf/util/message_differencer.h>
+#include <gtest/gtest.h>
+#include <utils/Log.h>
+
+#include "protos/metrics.pb.h"
+
+using ::android::drm_metrics::DrmFrameworkMetrics;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::drm::V1_0::EventType;
+using ::android::hardware::drm::V1_0::KeyStatusType;
+using ::android::hardware::drm::V1_0::Status;
+using ::android::hardware::drm::V1_1::DrmMetricGroup;
+using ::android::os::PersistableBundle;
+using ::google::protobuf::util::MessageDifferencer;
+using ::google::protobuf::TextFormat;
+
+namespace android {
+
+/**
+ * Unit tests for the MediaDrmMetrics class.
+ */
+class MediaDrmMetricsTest : public ::testing::Test {};
+
+/**
+ * This derived class mocks the clock for testing purposes.
+ */
+class FakeMediaDrmMetrics : public MediaDrmMetrics {
+ public:
+ FakeMediaDrmMetrics() : MediaDrmMetrics(), time_(0) {};
+
+ int64_t GetCurrentTimeMs() { return time_++; }
+ int64_t time_;
+};
+
+TEST_F(MediaDrmMetricsTest, EmptySuccess) {
+ MediaDrmMetrics metrics;
+ PersistableBundle bundle;
+
+ metrics.Export(&bundle);
+ EXPECT_TRUE(bundle.empty());
+}
+
+TEST_F(MediaDrmMetricsTest, AllValuesSuccessCounts) {
+ MediaDrmMetrics metrics;
+
+ metrics.mOpenSessionCounter.Increment(OK);
+ metrics.mCloseSessionCounter.Increment(OK);
+
+ {
+ EventTimer<status_t> get_key_request_timer(&metrics.mGetKeyRequestTimeUs);
+ EventTimer<status_t> provide_key_response_timer(
+ &metrics.mProvideKeyResponseTimeUs);
+ get_key_request_timer.SetAttribute(OK);
+ provide_key_response_timer.SetAttribute(OK);
+ }
+
+ metrics.mGetProvisionRequestCounter.Increment(OK);
+ metrics.mProvideProvisionResponseCounter.Increment(OK);
+ metrics.mGetDeviceUniqueIdCounter.Increment(OK);
+
+ metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::USABLE);
+ metrics.mEventCounter.Increment(EventType::PROVISION_REQUIRED);
+
+ PersistableBundle bundle;
+
+ metrics.Export(&bundle);
+ EXPECT_EQ(11U, bundle.size());
+
+ // Verify the list of pairs of int64 metrics.
+ std::vector<std::pair<std::string, int64_t>> expected_values = {
+ { "drm.mediadrm.open_session.ok.count", 1 },
+ { "drm.mediadrm.close_session.ok.count", 1 },
+ { "drm.mediadrm.get_key_request.ok.count", 1 },
+ { "drm.mediadrm.provide_key_response.ok.count", 1 },
+ { "drm.mediadrm.get_provision_request.ok.count", 1 },
+ { "drm.mediadrm.provide_provision_response.ok.count", 1 },
+ { "drm.mediadrm.key_status_change.USABLE.count", 1 },
+ { "drm.mediadrm.event.PROVISION_REQUIRED.count", 1 },
+ { "drm.mediadrm.get_device_unique_id.ok.count", 1 }};
+ for (const auto& expected_pair : expected_values) {
+ String16 key(expected_pair.first.c_str());
+ int64_t value = -1;
+ EXPECT_TRUE(bundle.getLong(key, &value))
+ << "Unexpected error retrieviing key: " << key;
+ EXPECT_EQ(expected_pair.second, value)
+ << "Unexpected value for " << expected_pair.first << ". " << value;
+ }
+
+ // Validate timing values exist.
+ String16 get_key_request_key(
+ "drm.mediadrm.get_key_request.ok.average_time_micros");
+ String16 provide_key_response_key(
+ "drm.mediadrm.provide_key_response.ok.average_time_micros");
+ int64_t value = -1;
+ EXPECT_TRUE(bundle.getLong(get_key_request_key, &value));
+ EXPECT_GE(value, 0);
+ value = -1;
+ EXPECT_TRUE(bundle.getLong(provide_key_response_key, &value));
+ EXPECT_GE(value, 0);
+}
+
+TEST_F(MediaDrmMetricsTest, AllValuesFull) {
+ MediaDrmMetrics metrics;
+
+ metrics.mOpenSessionCounter.Increment(OK);
+ metrics.mOpenSessionCounter.Increment(UNEXPECTED_NULL);
+
+ metrics.mCloseSessionCounter.Increment(OK);
+ metrics.mCloseSessionCounter.Increment(UNEXPECTED_NULL);
+
+ for (status_t s : {OK, UNEXPECTED_NULL}) {
+ {
+ EventTimer<status_t> get_key_request_timer(&metrics.mGetKeyRequestTimeUs);
+ EventTimer<status_t> provide_key_response_timer(
+ &metrics.mProvideKeyResponseTimeUs);
+ get_key_request_timer.SetAttribute(s);
+ provide_key_response_timer.SetAttribute(s);
+ }
+ }
+
+ metrics.mGetProvisionRequestCounter.Increment(OK);
+ metrics.mGetProvisionRequestCounter.Increment(UNEXPECTED_NULL);
+ metrics.mProvideProvisionResponseCounter.Increment(OK);
+ metrics.mProvideProvisionResponseCounter.Increment(UNEXPECTED_NULL);
+ metrics.mGetDeviceUniqueIdCounter.Increment(OK);
+ metrics.mGetDeviceUniqueIdCounter.Increment(UNEXPECTED_NULL);
+
+ metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::USABLE);
+ metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::EXPIRED);
+ metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::OUTPUTNOTALLOWED);
+ metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::STATUSPENDING);
+ metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::INTERNALERROR);
+ metrics.mEventCounter.Increment(EventType::PROVISION_REQUIRED);
+ metrics.mEventCounter.Increment(EventType::KEY_NEEDED);
+ metrics.mEventCounter.Increment(EventType::KEY_EXPIRED);
+ metrics.mEventCounter.Increment(EventType::VENDOR_DEFINED);
+ metrics.mEventCounter.Increment(EventType::SESSION_RECLAIMED);
+
+ android::Vector<uint8_t> sessionId1;
+ sessionId1.push_back(1);
+ sessionId1.push_back(2);
+ android::Vector<uint8_t> sessionId2;
+ sessionId2.push_back(3);
+ sessionId2.push_back(4);
+ String16 hexSessionId1("0102");
+ String16 hexSessionId2("0304");
+
+ metrics.SetSessionStart(sessionId1);
+ metrics.SetSessionStart(sessionId2);
+ metrics.SetSessionEnd(sessionId2);
+ metrics.SetSessionEnd(sessionId1);
+
+ PersistableBundle bundle;
+ metrics.Export(&bundle);
+ EXPECT_EQ(35U, bundle.size());
+
+ // Verify the list of pairs of int64 metrics.
+ std::vector<std::pair<std::string, int64_t>> expected_values = {
+ { "drm.mediadrm.open_session.ok.count", 1 },
+ { "drm.mediadrm.close_session.ok.count", 1 },
+ { "drm.mediadrm.get_key_request.ok.count", 1 },
+ { "drm.mediadrm.provide_key_response.ok.count", 1 },
+ { "drm.mediadrm.get_provision_request.ok.count", 1 },
+ { "drm.mediadrm.provide_provision_response.ok.count", 1 },
+ { "drm.mediadrm.get_device_unique_id.ok.count", 1 },
+ { "drm.mediadrm.open_session.error.count", 1 },
+ { "drm.mediadrm.close_session.error.count", 1 },
+ { "drm.mediadrm.get_key_request.error.count", 1 },
+ { "drm.mediadrm.provide_key_response.error.count", 1 },
+ { "drm.mediadrm.get_provision_request.error.count", 1 },
+ { "drm.mediadrm.provide_provision_response.error.count", 1 },
+ { "drm.mediadrm.get_device_unique_id.error.count", 1 },
+ { "drm.mediadrm.key_status_change.USABLE.count", 1 },
+ { "drm.mediadrm.key_status_change.EXPIRED.count", 1 },
+ { "drm.mediadrm.key_status_change.OUTPUT_NOT_ALLOWED.count", 1 },
+ { "drm.mediadrm.key_status_change.STATUS_PENDING.count", 1 },
+ { "drm.mediadrm.key_status_change.INTERNAL_ERROR.count", 1 },
+ { "drm.mediadrm.event.PROVISION_REQUIRED.count", 1 },
+ { "drm.mediadrm.event.KEY_NEEDED.count", 1 },
+ { "drm.mediadrm.event.KEY_EXPIRED.count", 1 },
+ { "drm.mediadrm.event.VENDOR_DEFINED.count", 1 },
+ { "drm.mediadrm.event.SESSION_RECLAIMED.count", 1 }};
+ for (const auto& expected_pair : expected_values) {
+ String16 key(expected_pair.first.c_str());
+ int64_t value = -1;
+ EXPECT_TRUE(bundle.getLong(key, &value))
+ << "Unexpected error retrieviing key: " << key;
+ EXPECT_EQ(expected_pair.second, value)
+ << "Unexpected value for " << expected_pair.first << ". " << value;
+ }
+
+ // Verify the error lists
+ std::vector<std::pair<std::string, std::vector<int64_t>>> expected_vector_values = {
+ { "drm.mediadrm.close_session.error.list", { UNEXPECTED_NULL } },
+ { "drm.mediadrm.get_device_unique_id.error.list", { UNEXPECTED_NULL } },
+ { "drm.mediadrm.get_key_request.error.list", { UNEXPECTED_NULL } },
+ { "drm.mediadrm.get_provision_request.error.list", { UNEXPECTED_NULL } },
+ { "drm.mediadrm.open_session.error.list", { UNEXPECTED_NULL } },
+ { "drm.mediadrm.provide_key_response.error.list", { UNEXPECTED_NULL } },
+ { "drm.mediadrm.provide_provision_response.error.list", { UNEXPECTED_NULL } }};
+ for (const auto& expected_pair : expected_vector_values) {
+ String16 key(expected_pair.first.c_str());
+ std::vector<int64_t> values;
+ EXPECT_TRUE(bundle.getLongVector(key, &values))
+ << "Unexpected error retrieviing key: " << key;
+ for (auto expected : expected_pair.second) {
+ EXPECT_TRUE(std::find(values.begin(), values.end(), expected) != values.end())
+ << "Could not find " << expected << " for key " << expected_pair.first;
+ }
+ }
+
+ // Verify the lifespans
+ PersistableBundle start_times;
+ PersistableBundle end_times;
+ String16 start_time_key("drm.mediadrm.session_start_times_ms");
+ String16 end_time_key("drm.mediadrm.session_end_times_ms");
+ ASSERT_TRUE(bundle.getPersistableBundle(start_time_key, &start_times));
+ ASSERT_TRUE(bundle.getPersistableBundle(end_time_key, &end_times));
+ EXPECT_EQ(2U, start_times.size());
+ EXPECT_EQ(2U, end_times.size());
+ int64_t start_time, end_time;
+ for (const auto& sid : { hexSessionId1, hexSessionId2 }) {
+ start_time = -1;
+ end_time = -1;
+ EXPECT_TRUE(start_times.getLong(sid, &start_time));
+ EXPECT_TRUE(end_times.getLong(sid, &end_time));
+ EXPECT_GT(start_time, 0);
+ EXPECT_GE(end_time, start_time);
+ }
+
+ // Validate timing values exist.
+ String16 get_key_request_key(
+ "drm.mediadrm.get_key_request.ok.average_time_micros");
+ String16 provide_key_response_key(
+ "drm.mediadrm.provide_key_response.ok.average_time_micros");
+ int64_t value = -1;
+ EXPECT_TRUE(bundle.getLong(get_key_request_key, &value));
+ EXPECT_GE(value, 0);
+ value = -1;
+ EXPECT_TRUE(bundle.getLong(provide_key_response_key, &value));
+ EXPECT_GE(value, 0);
+}
+
+
+TEST_F(MediaDrmMetricsTest, CounterValuesProtoSerialization) {
+ MediaDrmMetrics metrics;
+
+ metrics.mOpenSessionCounter.Increment(OK);
+ metrics.mOpenSessionCounter.Increment(UNEXPECTED_NULL);
+ metrics.mCloseSessionCounter.Increment(OK);
+ metrics.mCloseSessionCounter.Increment(UNEXPECTED_NULL);
+
+ metrics.mGetProvisionRequestCounter.Increment(OK);
+ metrics.mGetProvisionRequestCounter.Increment(UNEXPECTED_NULL);
+ metrics.mProvideProvisionResponseCounter.Increment(OK);
+ metrics.mProvideProvisionResponseCounter.Increment(UNEXPECTED_NULL);
+ metrics.mGetDeviceUniqueIdCounter.Increment(OK);
+ metrics.mGetDeviceUniqueIdCounter.Increment(UNEXPECTED_NULL);
+
+ metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::USABLE);
+ metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::EXPIRED);
+ metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::OUTPUTNOTALLOWED);
+ metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::STATUSPENDING);
+ metrics.mKeyStatusChangeCounter.Increment(KeyStatusType::INTERNALERROR);
+ metrics.mEventCounter.Increment(EventType::PROVISION_REQUIRED);
+ metrics.mEventCounter.Increment(EventType::KEY_NEEDED);
+ metrics.mEventCounter.Increment(EventType::KEY_EXPIRED);
+ metrics.mEventCounter.Increment(EventType::VENDOR_DEFINED);
+ metrics.mEventCounter.Increment(EventType::SESSION_RECLAIMED);
+
+ std::string serializedMetrics;
+ ASSERT_EQ(OK, metrics.GetSerializedMetrics(&serializedMetrics));
+
+ DrmFrameworkMetrics metricsProto;
+ ASSERT_TRUE(metricsProto.ParseFromString(serializedMetrics));
+
+ std::string expectedMetrics =
+ "open_session_counter { count: 1 attributes { error_code: -0x7FFFFFF8 } } "
+ "open_session_counter { count: 1 attributes { error_code: 0 } } "
+ "close_session_counter { count: 1 attributes { error_code: -0x7FFFFFF8 } } "
+ "close_session_counter { count: 1 attributes { error_code: 0 } } "
+ "get_provisioning_request_counter { count: 1 attributes { error_code: -0x7FFFFFF8 } } "
+ "get_provisioning_request_counter { count: 1 attributes { error_code: 0 } } "
+ "provide_provisioning_response_counter { count: 1 attributes { error_code: -0x7ffffff8 } } "
+ "provide_provisioning_response_counter { count: 1 attributes { error_code: 0 } } "
+ "get_device_unique_id_counter { count: 1 attributes { error_code: -0x7ffffff8 } } "
+ "get_device_unique_id_counter { count: 1 attributes { error_code: 0 } } "
+ "key_status_change_counter { count: 1 attributes { key_status_type: 0 } } "
+ "key_status_change_counter { count: 1 attributes { key_status_type: 1 } } "
+ "key_status_change_counter { count: 1 attributes { key_status_type: 2 } } "
+ "key_status_change_counter { count: 1 attributes { key_status_type: 3 } } "
+ "key_status_change_counter { count: 1 attributes { key_status_type: 4 } } "
+ "event_callback_counter { count: 1 attributes { event_type: 0 } } "
+ "event_callback_counter { count: 1 attributes { event_type: 1 } } "
+ "event_callback_counter { count: 1 attributes { event_type: 2 } } "
+ "event_callback_counter { count: 1 attributes { event_type: 3 } } "
+ "event_callback_counter { count: 1 attributes { event_type: 4 } } ";
+
+ DrmFrameworkMetrics expectedMetricsProto;
+ ASSERT_TRUE(TextFormat::MergeFromString(expectedMetrics, &expectedMetricsProto));
+
+ std::string diffString;
+ MessageDifferencer differ;
+ differ.ReportDifferencesToString(&diffString);
+ ASSERT_TRUE(differ.Compare(expectedMetricsProto, metricsProto))
+ << diffString;
+}
+
+TEST_F(MediaDrmMetricsTest, TimeMetricsProtoSerialization) {
+ MediaDrmMetrics metrics;
+
+ for (status_t s : {OK, UNEXPECTED_NULL}) {
+ double time = 0;
+ for (int i = 0; i < 5; i++) {
+ time += 1.0;
+ metrics.mGetKeyRequestTimeUs.Record(time, s);
+ metrics.mProvideKeyResponseTimeUs.Record(time, s);
+ }
+ }
+
+ std::string serializedMetrics;
+ ASSERT_EQ(OK, metrics.GetSerializedMetrics(&serializedMetrics));
+
+ DrmFrameworkMetrics metricsProto;
+ ASSERT_TRUE(metricsProto.ParseFromString(serializedMetrics));
+
+ std::string expectedMetrics =
+ "get_key_request_time_us { "
+ " min: 1 max: 5 mean: 3.5 variance: 1 operation_count: 5 "
+ " attributes { error_code: -0x7FFFFFF8 } "
+ "} "
+ "get_key_request_time_us { "
+ " min: 1 max: 5 mean: 3.5 variance: 1 operation_count: 5 "
+ " attributes { error_code: 0 } "
+ "} "
+ "provide_key_response_time_us { "
+ " min: 1 max: 5 mean: 3.5 variance: 1 operation_count: 5 "
+ " attributes { error_code: -0x7FFFFFF8 } "
+ "} "
+ "provide_key_response_time_us { "
+ " min: 1 max: 5 mean: 3.5 variance: 1 operation_count: 5 "
+ " attributes { error_code: 0 } "
+ "} ";
+
+ DrmFrameworkMetrics expectedMetricsProto;
+ ASSERT_TRUE(TextFormat::MergeFromString(expectedMetrics, &expectedMetricsProto));
+
+ std::string diffString;
+ MessageDifferencer differ;
+ differ.ReportDifferencesToString(&diffString);
+ ASSERT_TRUE(differ.Compare(expectedMetricsProto, metricsProto))
+ << diffString;
+}
+
+TEST_F(MediaDrmMetricsTest, SessionLifetimeProtoSerialization) {
+ // Use the fake so the clock is predictable;
+ FakeMediaDrmMetrics metrics;
+
+ android::Vector<uint8_t> sessionId1;
+ sessionId1.push_back(1);
+ sessionId1.push_back(2);
+ android::Vector<uint8_t> sessionId2;
+ sessionId2.push_back(3);
+ sessionId2.push_back(4);
+
+ metrics.SetSessionStart(sessionId1);
+ metrics.SetSessionStart(sessionId2);
+ metrics.SetSessionEnd(sessionId2);
+ metrics.SetSessionEnd(sessionId1);
+
+ std::string serializedMetrics;
+ ASSERT_EQ(OK, metrics.GetSerializedMetrics(&serializedMetrics));
+
+ DrmFrameworkMetrics metricsProto;
+ ASSERT_TRUE(metricsProto.ParseFromString(serializedMetrics));
+
+ std::string expectedMetrics =
+ "session_lifetimes: { "
+ " key: '0102' "
+ " value { start_time_ms: 0 end_time_ms: 3 } "
+ "} "
+ "session_lifetimes: { "
+ " key: '0304' "
+ " value { start_time_ms: 1 end_time_ms: 2 } "
+ "} ";
+
+ DrmFrameworkMetrics expectedMetricsProto;
+ ASSERT_TRUE(TextFormat::MergeFromString(expectedMetrics, &expectedMetricsProto));
+
+ std::string diffString;
+ MessageDifferencer differ;
+ differ.ReportDifferencesToString(&diffString);
+ ASSERT_TRUE(differ.Compare(expectedMetricsProto, metricsProto))
+ << diffString;
+}
+
+TEST_F(MediaDrmMetricsTest, HidlToBundleMetricsEmpty) {
+ hidl_vec<DrmMetricGroup> hidlMetricGroups;
+ PersistableBundle bundleMetricGroups;
+
+ ASSERT_EQ(OK, MediaDrmMetrics::HidlMetricsToBundle(hidlMetricGroups, &bundleMetricGroups));
+ ASSERT_EQ(0U, bundleMetricGroups.size());
+}
+
+TEST_F(MediaDrmMetricsTest, HidlToBundleMetricsMultiple) {
+ DrmMetricGroup hidlMetricGroup =
+ { { {
+ "open_session_ok",
+ { { "status", DrmMetricGroup::ValueType::INT64_TYPE,
+ (int64_t) Status::OK, 0.0, "" } },
+ { { "count", DrmMetricGroup::ValueType::INT64_TYPE, 3, 0.0, "" } }
+ },
+ {
+ "close_session_not_opened",
+ { { "status", DrmMetricGroup::ValueType::INT64_TYPE,
+ (int64_t) Status::ERROR_DRM_SESSION_NOT_OPENED, 0.0, "" } },
+ { { "count", DrmMetricGroup::ValueType::INT64_TYPE, 7, 0.0, "" } }
+ } } };
+
+ PersistableBundle bundleMetricGroups;
+ ASSERT_EQ(OK, MediaDrmMetrics::HidlMetricsToBundle(hidl_vec<DrmMetricGroup>({hidlMetricGroup}),
+ &bundleMetricGroups));
+ ASSERT_EQ(1U, bundleMetricGroups.size());
+ PersistableBundle bundleMetricGroup;
+ ASSERT_TRUE(bundleMetricGroups.getPersistableBundle(String16("[0]"), &bundleMetricGroup));
+ ASSERT_EQ(2U, bundleMetricGroup.size());
+
+ // Verify each metric.
+ PersistableBundle metric;
+ ASSERT_TRUE(bundleMetricGroup.getPersistableBundle(String16("open_session_ok"), &metric));
+ PersistableBundle metricInstance;
+ ASSERT_TRUE(metric.getPersistableBundle(String16("[0]"), &metricInstance));
+ int64_t value = 0;
+ ASSERT_TRUE(metricInstance.getLong(String16("count"), &value));
+ ASSERT_EQ(3, value);
+ PersistableBundle attributeBundle;
+ ASSERT_TRUE(metricInstance.getPersistableBundle(String16("attributes"), &attributeBundle));
+ ASSERT_TRUE(attributeBundle.getLong(String16("status"), &value));
+ ASSERT_EQ((int64_t) Status::OK, value);
+
+ ASSERT_TRUE(bundleMetricGroup.getPersistableBundle(String16("close_session_not_opened"),
+ &metric));
+ ASSERT_TRUE(metric.getPersistableBundle(String16("[0]"), &metricInstance));
+ ASSERT_TRUE(metricInstance.getLong(String16("count"), &value));
+ ASSERT_EQ(7, value);
+ ASSERT_TRUE(metricInstance.getPersistableBundle(String16("attributes"), &attributeBundle));
+ value = 0;
+ ASSERT_TRUE(attributeBundle.getLong(String16("status"), &value));
+ ASSERT_EQ((int64_t) Status::ERROR_DRM_SESSION_NOT_OPENED, value);
+}
+
+} // namespace android
diff --git a/drm/libmediadrm/tests/EventMetric_test.cpp b/drm/libmediadrm/tests/EventMetric_test.cpp
new file mode 100644
index 0000000..eb6c4f6
--- /dev/null
+++ b/drm/libmediadrm/tests/EventMetric_test.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "EventMetric.h"
+
+namespace android {
+
+/**
+ * Unit tests for the EventMetric class.
+ */
+
+TEST(EventMetricTest, IntDataTypeEmpty) {
+ EventMetric<int> metric("MyMetricName", "MetricAttributeName");
+
+ std::map<int, EventStatistics> values;
+
+ metric.ExportValues(
+ [&] (int attribute_value, const EventStatistics& value) {
+ values[attribute_value] = value;
+ });
+
+ EXPECT_TRUE(values.empty());
+}
+
+TEST(EventMetricTest, IntDataType) {
+ EventMetric<int> metric("MyMetricName", "MetricAttributeName");
+
+ std::map<int, EventStatistics> values;
+
+ metric.Record(4, 7);
+ metric.Record(5, 8);
+ metric.Record(5, 8);
+ metric.Record(5, 8);
+ metric.Record(6, 8);
+ metric.Record(6, 8);
+ metric.Record(6, 8);
+
+ metric.ExportValues(
+ [&] (int attribute_value, const EventStatistics& value) {
+ values[attribute_value] = value;
+ });
+
+ ASSERT_EQ(2u, values.size());
+ EXPECT_EQ(4, values[7].min);
+ EXPECT_EQ(4, values[7].max);
+ EXPECT_EQ(4, values[7].mean);
+ EXPECT_EQ(1, values[7].count);
+
+ EXPECT_EQ(5, values[8].min);
+ EXPECT_EQ(6, values[8].max);
+ // This is an approximate value because of the technique we're using.
+ EXPECT_NEAR(5.5, values[8].mean, 0.2);
+ EXPECT_EQ(6, values[8].count);
+}
+
+TEST(EventMetricTest, StringDataType) {
+ EventMetric<std::string> metric("MyMetricName", "MetricAttributeName");
+
+ std::map<std::string, EventStatistics> values;
+
+ metric.Record(1, "a");
+ metric.Record(2, "b");
+ metric.Record(2, "b");
+ metric.Record(3, "b");
+ metric.Record(3, "b");
+
+ metric.ExportValues(
+ [&] (std::string attribute_value, const EventStatistics& value) {
+ values[attribute_value] = value;
+ });
+
+ ASSERT_EQ(2u, values.size());
+ EXPECT_EQ(1, values["a"].min);
+ EXPECT_EQ(1, values["a"].max);
+ EXPECT_EQ(1, values["a"].mean);
+ EXPECT_EQ(1, values["a"].count);
+
+ EXPECT_EQ(2, values["b"].min);
+ EXPECT_EQ(3, values["b"].max);
+ EXPECT_NEAR(2.5, values["b"].mean, 0.2);
+ EXPECT_EQ(4, values["b"].count);
+}
+
+// Helper class that allows us to mock the clock.
+template<typename AttributeType>
+class MockEventTimer : public EventTimer<AttributeType> {
+ public:
+ explicit MockEventTimer(nsecs_t time_delta_ns,
+ EventMetric<AttributeType>* metric)
+ : EventTimer<AttributeType>(metric) {
+ // Pretend the event started earlier.
+ this->start_time_ = systemTime() - time_delta_ns;
+ }
+};
+
+TEST(EventTimerTest, IntDataType) {
+ EventMetric<int> metric("MyMetricName", "MetricAttributeName");
+
+ for (int i = 0; i < 5; i++) {
+ {
+ // Add a mock time delta.
+ MockEventTimer<int> metric_timer(i * 1000000, &metric);
+ metric_timer.SetAttribute(i % 2);
+ }
+ }
+
+ std::map<int, EventStatistics> values;
+ metric.ExportValues(
+ [&] (int attribute_value, const EventStatistics& value) {
+ values[attribute_value] = value;
+ });
+
+ ASSERT_EQ(2u, values.size());
+ EXPECT_LT(values[0].min, values[0].max);
+ EXPECT_GE(4000, values[0].max);
+ EXPECT_GT(values[0].mean, values[0].min);
+ EXPECT_LE(values[0].mean, values[0].max);
+ EXPECT_EQ(3, values[0].count);
+
+ EXPECT_LT(values[1].min, values[1].max);
+ EXPECT_GE(3000, values[1].max);
+ EXPECT_GT(values[1].mean, values[1].min);
+ EXPECT_LE(values[1].mean, values[1].max);
+ EXPECT_EQ(2, values[1].count);
+}
+
+} // namespace android
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
index e27631f..73ed8c3 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
@@ -55,7 +55,7 @@
status_t ClearKeyCasFactory::createPlugin(
int32_t CA_system_id,
- uint64_t appData,
+ void *appData,
CasPluginCallback callback,
CasPlugin **plugin) {
if (!isSystemIdSupported(CA_system_id)) {
@@ -83,7 +83,7 @@
///////////////////////////////////////////////////////////////////////////////
ClearKeyCasPlugin::ClearKeyCasPlugin(
- uint64_t appData, CasPluginCallback callback)
+ void *appData, CasPluginCallback callback)
: mCallback(callback), mAppData(appData) {
ALOGV("CTOR");
}
@@ -347,6 +347,9 @@
return ERROR_CAS_CANNOT_HANDLE;
}
+ scramblingControl = (DescramblerPlugin::ScramblingControl)
+ (scramblingControl & DescramblerPlugin::kScrambling_Mask_Key);
+
AES_KEY contentKey;
if (scramblingControl != DescramblerPlugin::kScrambling_Unscrambled) {
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
index b7134e4..42cfb8f 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
@@ -44,7 +44,7 @@
std::vector<CasPluginDescriptor> *descriptors) const override;
virtual status_t createPlugin(
int32_t CA_system_id,
- uint64_t appData,
+ void *appData,
CasPluginCallback callback,
CasPlugin **plugin) override;
};
@@ -62,7 +62,7 @@
class ClearKeyCasPlugin : public CasPlugin {
public:
- ClearKeyCasPlugin(uint64_t appData, CasPluginCallback callback);
+ ClearKeyCasPlugin(void *appData, CasPluginCallback callback);
virtual ~ClearKeyCasPlugin();
virtual status_t setPrivateData(
@@ -94,7 +94,7 @@
Mutex mKeyFetcherLock;
std::unique_ptr<KeyFetcher> mKeyFetcher;
CasPluginCallback mCallback;
- uint64_t mAppData;
+ void* mAppData;
};
class ClearKeyDescramblerPlugin : public DescramblerPlugin {
diff --git a/drm/mediacas/plugins/mock/MockCasPlugin.cpp b/drm/mediacas/plugins/mock/MockCasPlugin.cpp
index 06516b5..8404a83 100644
--- a/drm/mediacas/plugins/mock/MockCasPlugin.cpp
+++ b/drm/mediacas/plugins/mock/MockCasPlugin.cpp
@@ -49,7 +49,7 @@
status_t MockCasFactory::createPlugin(
int32_t CA_system_id,
- uint64_t /*appData*/,
+ void* /*appData*/,
CasPluginCallback /*callback*/,
CasPlugin **plugin) {
if (!isSystemIdSupported(CA_system_id)) {
diff --git a/drm/mediacas/plugins/mock/MockCasPlugin.h b/drm/mediacas/plugins/mock/MockCasPlugin.h
index 9632492..8106990 100644
--- a/drm/mediacas/plugins/mock/MockCasPlugin.h
+++ b/drm/mediacas/plugins/mock/MockCasPlugin.h
@@ -39,7 +39,7 @@
std::vector<CasPluginDescriptor> *descriptors) const override;
virtual status_t createPlugin(
int32_t CA_system_id,
- uint64_t appData,
+ void *appData,
CasPluginCallback callback,
CasPlugin **plugin) override;
};
diff --git a/drm/mediadrm/plugins/clearkey/AesCtrDecryptor.cpp b/drm/mediadrm/plugins/clearkey/AesCtrDecryptor.cpp
deleted file mode 100644
index 01f8d65..0000000
--- a/drm/mediadrm/plugins/clearkey/AesCtrDecryptor.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ClearKeyCryptoPlugin"
-#include <utils/Log.h>
-
-#include <openssl/aes.h>
-
-#include "AesCtrDecryptor.h"
-
-namespace clearkeydrm {
-
-static const size_t kBlockBitCount = kBlockSize * 8;
-
-android::status_t AesCtrDecryptor::decrypt(const android::Vector<uint8_t>& key,
- const Iv iv, const uint8_t* source,
- uint8_t* destination,
- const SubSample* subSamples,
- size_t numSubSamples,
- size_t* bytesDecryptedOut) {
- uint32_t blockOffset = 0;
- uint8_t previousEncryptedCounter[kBlockSize];
- memset(previousEncryptedCounter, 0, kBlockSize);
-
- size_t offset = 0;
- AES_KEY opensslKey;
- AES_set_encrypt_key(key.array(), kBlockBitCount, &opensslKey);
- Iv opensslIv;
- memcpy(opensslIv, iv, sizeof(opensslIv));
-
- for (size_t i = 0; i < numSubSamples; ++i) {
- const SubSample& subSample = subSamples[i];
-
- if (subSample.mNumBytesOfClearData > 0) {
- memcpy(destination + offset, source + offset,
- subSample.mNumBytesOfClearData);
- offset += subSample.mNumBytesOfClearData;
- }
-
- if (subSample.mNumBytesOfEncryptedData > 0) {
- AES_ctr128_encrypt(source + offset, destination + offset,
- subSample.mNumBytesOfEncryptedData, &opensslKey,
- opensslIv, previousEncryptedCounter,
- &blockOffset);
- offset += subSample.mNumBytesOfEncryptedData;
- }
- }
-
- *bytesDecryptedOut = offset;
- return android::OK;
-}
-
-} // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/AesCtrDecryptor.h b/drm/mediadrm/plugins/clearkey/AesCtrDecryptor.h
deleted file mode 100644
index b416266..0000000
--- a/drm/mediadrm/plugins/clearkey/AesCtrDecryptor.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef CLEARKEY_AES_CTR_DECRYPTOR_H_
-#define CLEARKEY_AES_CTR_DECRYPTOR_H_
-
-#include <media/stagefright/foundation/ABase.h>
-#include <Utils.h>
-#include <utils/Errors.h>
-#include <utils/Vector.h>
-
-#include "ClearKeyTypes.h"
-
-namespace clearkeydrm {
-
-class AesCtrDecryptor {
-public:
- AesCtrDecryptor() {}
-
- android::status_t decrypt(const android::Vector<uint8_t>& key, const Iv iv,
- const uint8_t* source, uint8_t* destination,
- const SubSample* subSamples, size_t numSubSamples,
- size_t* bytesDecryptedOut);
-
-private:
- DISALLOW_EVIL_CONSTRUCTORS(AesCtrDecryptor);
-};
-
-} // namespace clearkeydrm
-
-#endif // CLEARKEY_AES_CTR_DECRYPTOR_H_
diff --git a/drm/mediadrm/plugins/clearkey/Android.bp b/drm/mediadrm/plugins/clearkey/Android.bp
deleted file mode 100644
index 385815c..0000000
--- a/drm/mediadrm/plugins/clearkey/Android.bp
+++ /dev/null
@@ -1,61 +0,0 @@
-//
-// Copyright (C) 2014 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-cc_library_shared {
- name: "libdrmclearkeyplugin",
-
- srcs: [
- "AesCtrDecryptor.cpp",
- "ClearKeyUUID.cpp",
- "CreatePluginFactories.cpp",
- "CryptoFactory.cpp",
- "CryptoPlugin.cpp",
- "DrmFactory.cpp",
- "DrmPlugin.cpp",
- "InitDataParser.cpp",
- "JsonWebKey.cpp",
- "Session.cpp",
- "SessionLibrary.cpp",
- "Utils.cpp",
- ],
-
- vendor: true,
- relative_install_path: "mediadrm",
-
- cflags: ["-Wall", "-Werror"],
-
- shared_libs: [
- "libcrypto",
- "liblog",
- "libstagefright_foundation",
- "libutils",
- ],
-
- static_libs: ["libjsmn"],
-
- include_dirs: [
- "frameworks/native/include",
- "frameworks/av/include",
- ],
-
- export_include_dirs: ["."],
- export_static_lib_headers: ["libjsmn"],
-}
-
-//########################################################################
-// Build unit tests
-
-subdirs = ["tests"]
diff --git a/drm/mediadrm/plugins/clearkey/CryptoFactory.cpp b/drm/mediadrm/plugins/clearkey/CryptoFactory.cpp
deleted file mode 100644
index eeb64c3..0000000
--- a/drm/mediadrm/plugins/clearkey/CryptoFactory.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ClearKeyCryptoPlugin"
-#include <utils/Log.h>
-
-#include <utils/Errors.h>
-#include <utils/StrongPointer.h>
-
-#include "CryptoFactory.h"
-
-#include "ClearKeyUUID.h"
-#include "CryptoPlugin.h"
-#include "Session.h"
-#include "SessionLibrary.h"
-
-namespace clearkeydrm {
-
-bool CryptoFactory::isCryptoSchemeSupported(const uint8_t uuid[16]) const {
- return isClearKeyUUID(uuid);
-}
-
-android::status_t CryptoFactory::createPlugin(
- const uint8_t uuid[16],
- const void* data, size_t size,
- android::CryptoPlugin** plugin) {
- if (!isCryptoSchemeSupported(uuid)) {
- *plugin = NULL;
- return android::BAD_VALUE;
- }
-
- android::Vector<uint8_t> sessionId;
- sessionId.appendArray(reinterpret_cast<const uint8_t*>(data), size);
-
- CryptoPlugin *clearKeyPlugin = new CryptoPlugin(sessionId);
- android::status_t result = clearKeyPlugin->getInitStatus();
- if (result == android::OK) {
- *plugin = clearKeyPlugin;
- } else {
- delete clearKeyPlugin;
- *plugin = NULL;
- }
- return result;
-}
-
-} // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/DrmFactory.cpp b/drm/mediadrm/plugins/clearkey/DrmFactory.cpp
deleted file mode 100644
index c83321b..0000000
--- a/drm/mediadrm/plugins/clearkey/DrmFactory.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ClearKeyCryptoPlugin"
-#include <utils/Log.h>
-
-#include <utils/Errors.h>
-
-#include "DrmFactory.h"
-
-#include "DrmPlugin.h"
-#include "ClearKeyUUID.h"
-#include "MimeType.h"
-#include "SessionLibrary.h"
-
-namespace clearkeydrm {
-
-bool DrmFactory::isCryptoSchemeSupported(const uint8_t uuid[16]) {
- return isClearKeyUUID(uuid);
-}
-
-bool DrmFactory::isContentTypeSupported(const android::String8 &type) {
- // This should match the types handed by InitDataParser.
- return type == kIsoBmffVideoMimeType ||
- type == kIsoBmffAudioMimeType ||
- type == kCencInitDataFormat ||
- type == kWebmVideoMimeType ||
- type == kWebmAudioMimeType ||
- type == kWebmInitDataFormat;
-}
-
-android::status_t DrmFactory::createDrmPlugin(
- const uint8_t uuid[16],
- android::DrmPlugin** plugin) {
- if (!isCryptoSchemeSupported(uuid)) {
- *plugin = NULL;
- return android::BAD_VALUE;
- }
-
- *plugin = new DrmPlugin(SessionLibrary::get());
- return android::OK;
-}
-
-} // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
deleted file mode 100644
index ec07d87..0000000
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ClearKeyCryptoPlugin"
-#include <utils/Log.h>
-
-#include <media/stagefright/MediaErrors.h>
-#include <utils/StrongPointer.h>
-
-#include "DrmPlugin.h"
-
-#include "Session.h"
-
-namespace {
-const android::String8 kStreaming("Streaming");
-const android::String8 kOffline("Offline");
-const android::String8 kTrue("True");
-
-const android::String8 kQueryKeyLicenseType("LicenseType");
- // Value: "Streaming" or "Offline"
-const android::String8 kQueryKeyPlayAllowed("PlayAllowed");
- // Value: "True" or "False"
-const android::String8 kQueryKeyRenewAllowed("RenewAllowed");
- // Value: "True" or "False"
-};
-
-namespace clearkeydrm {
-
-using android::sp;
-
-DrmPlugin::DrmPlugin(SessionLibrary* sessionLibrary)
- : mSessionLibrary(sessionLibrary) {
- mPlayPolicy.clear();
-}
-
-status_t DrmPlugin::openSession(Vector<uint8_t>& sessionId) {
- sp<Session> session = mSessionLibrary->createSession();
- sessionId = session->sessionId();
- return android::OK;
-}
-
-status_t DrmPlugin::closeSession(const Vector<uint8_t>& sessionId) {
- sp<Session> session = mSessionLibrary->findSession(sessionId);
- if (sessionId.size() == 0) {
- return android::BAD_VALUE;
- }
- if (session.get()) {
- mSessionLibrary->destroySession(session);
- return android::OK;
- }
- return android::ERROR_DRM_SESSION_NOT_OPENED;
-}
-
-status_t DrmPlugin::getKeyRequest(
- const Vector<uint8_t>& scope,
- const Vector<uint8_t>& initData,
- const String8& mimeType,
- KeyType keyType,
- const KeyedVector<String8, String8>& optionalParameters,
- Vector<uint8_t>& request,
- String8& defaultUrl,
- DrmPlugin::KeyRequestType *keyRequestType) {
- UNUSED(optionalParameters);
- if (scope.size() == 0) {
- return android::BAD_VALUE;
- }
-
- if (keyType != kKeyType_Streaming) {
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- *keyRequestType = DrmPlugin::kKeyRequestType_Initial;
- defaultUrl.clear();
- sp<Session> session = mSessionLibrary->findSession(scope);
- if (!session.get()) {
- return android::ERROR_DRM_SESSION_NOT_OPENED;
- }
-
- return session->getKeyRequest(initData, mimeType, &request);
-}
-
-void DrmPlugin::setPlayPolicy() {
- mPlayPolicy.clear();
- mPlayPolicy.add(kQueryKeyLicenseType, kStreaming);
- mPlayPolicy.add(kQueryKeyPlayAllowed, kTrue);
- mPlayPolicy.add(kQueryKeyRenewAllowed, kTrue);
-}
-
-status_t DrmPlugin::provideKeyResponse(
- const Vector<uint8_t>& scope,
- const Vector<uint8_t>& response,
- Vector<uint8_t>& keySetId) {
- if (scope.size() == 0 || response.size() == 0) {
- return android::BAD_VALUE;
- }
- sp<Session> session = mSessionLibrary->findSession(scope);
- if (!session.get()) {
- return android::ERROR_DRM_SESSION_NOT_OPENED;
- }
-
- setPlayPolicy();
- status_t res = session->provideKeyResponse(response);
- if (res == android::OK) {
- // This is for testing AMediaDrm_setOnEventListener only.
- sendEvent(kDrmPluginEventVendorDefined, 0, &scope, NULL);
- keySetId.clear();
- }
- return res;
-}
-
-status_t DrmPlugin::getPropertyString(
- const String8& name, String8& value) const {
- if (name == "vendor") {
- value = "Google";
- } else if (name == "version") {
- value = "1.0";
- } else if (name == "description") {
- value = "ClearKey CDM";
- } else if (name == "algorithms") {
- value = "";
- } else if (name == "listenerTestSupport") {
- value = "true";
- } else {
- ALOGE("App requested unknown string property %s", name.string());
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
- return android::OK;
-}
-
-status_t DrmPlugin::queryKeyStatus(
- const Vector<uint8_t>& sessionId,
- KeyedVector<String8, String8>& infoMap) const {
-
- if (sessionId.size() == 0) {
- return android::BAD_VALUE;
- }
-
- infoMap.clear();
- for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
- infoMap.add(mPlayPolicy.keyAt(i), mPlayPolicy.valueAt(i));
- }
- return android::OK;
-}
-} // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
deleted file mode 100644
index f37a706..0000000
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.h
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef CLEARKEY_DRM_PLUGIN_H_
-#define CLEARKEY_DRM_PLUGIN_H_
-
-#include <media/drm/DrmAPI.h>
-#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/MediaErrors.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/List.h>
-#include <utils/String8.h>
-#include <utils/Vector.h>
-
-#include "SessionLibrary.h"
-#include "Utils.h"
-
-namespace clearkeydrm {
-
-using android::KeyedVector;
-using android::List;
-using android::status_t;
-using android::String8;
-using android::Vector;
-
-class DrmPlugin : public android::DrmPlugin {
-public:
- explicit DrmPlugin(SessionLibrary* sessionLibrary);
-
- virtual ~DrmPlugin() {}
-
- virtual status_t openSession(Vector<uint8_t>& sessionId);
-
- virtual status_t closeSession(const Vector<uint8_t>& sessionId);
-
- virtual status_t getKeyRequest(
- const Vector<uint8_t>& scope,
- const Vector<uint8_t>& mimeType,
- const String8& initDataType,
- KeyType keyType,
- const KeyedVector<String8, String8>& optionalParameters,
- Vector<uint8_t>& request,
- String8& defaultUrl,
- DrmPlugin::KeyRequestType *keyRequestType);
-
- virtual status_t provideKeyResponse(
- const Vector<uint8_t>& scope,
- const Vector<uint8_t>& response,
- Vector<uint8_t>& keySetId);
-
- virtual status_t removeKeys(const Vector<uint8_t>& sessionId) {
- if (sessionId.size() == 0) {
- return android::BAD_VALUE;
- }
-
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t restoreKeys(
- const Vector<uint8_t>& sessionId,
- const Vector<uint8_t>& keySetId) {
- if (sessionId.size() == 0 || keySetId.size() == 0) {
- return android::BAD_VALUE;
- }
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t queryKeyStatus(
- const Vector<uint8_t>& sessionId,
- KeyedVector<String8, String8>& infoMap) const;
-
- virtual status_t getProvisionRequest(
- const String8& cert_type,
- const String8& cert_authority,
- Vector<uint8_t>& request,
- String8& defaultUrl) {
- UNUSED(cert_type);
- UNUSED(cert_authority);
- UNUSED(request);
- UNUSED(defaultUrl);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t provideProvisionResponse(
- const Vector<uint8_t>& response,
- Vector<uint8_t>& certificate,
- Vector<uint8_t>& wrappedKey) {
- UNUSED(certificate);
- UNUSED(wrappedKey);
- if (response.size() == 0) {
- // empty response
- return android::BAD_VALUE;
- }
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t getSecureStops(List<Vector<uint8_t> >& secureStops) {
- UNUSED(secureStops);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) {
- if (ssid.size() == 0) {
- return android::BAD_VALUE;
- }
-
- UNUSED(secureStop);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t releaseSecureStops(const Vector<uint8_t>& ssRelease) {
- if (ssRelease.size() == 0) {
- return android::BAD_VALUE;
- }
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t releaseAllSecureStops() {
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t getPropertyString(
- const String8& name, String8& value) const;
-
- virtual status_t getPropertyByteArray(
- const String8& name, Vector<uint8_t>& value) const {
- UNUSED(name);
- UNUSED(value);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t setPropertyString(
- const String8& name, const String8& value) {
- UNUSED(name);
- UNUSED(value);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t setPropertyByteArray(
- const String8& name, const Vector<uint8_t>& value) {
- UNUSED(name);
- UNUSED(value);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t setCipherAlgorithm(
- const Vector<uint8_t>& sessionId, const String8& algorithm) {
- if (sessionId.size() == 0 || algorithm.size() == 0) {
- return android::BAD_VALUE;
- }
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t setMacAlgorithm(
- const Vector<uint8_t>& sessionId, const String8& algorithm) {
- if (sessionId.size() == 0 || algorithm.size() == 0) {
- return android::BAD_VALUE;
- }
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t encrypt(
- const Vector<uint8_t>& sessionId,
- const Vector<uint8_t>& keyId,
- const Vector<uint8_t>& input,
- const Vector<uint8_t>& iv,
- Vector<uint8_t>& output) {
- if (sessionId.size() == 0 || keyId.size() == 0 ||
- input.size() == 0 || iv.size() == 0) {
- return android::BAD_VALUE;
- }
- UNUSED(output);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t decrypt(
- const Vector<uint8_t>& sessionId,
- const Vector<uint8_t>& keyId,
- const Vector<uint8_t>& input,
- const Vector<uint8_t>& iv,
- Vector<uint8_t>& output) {
- if (sessionId.size() == 0 || keyId.size() == 0 ||
- input.size() == 0 || iv.size() == 0) {
- return android::BAD_VALUE;
- }
- UNUSED(output);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t sign(
- const Vector<uint8_t>& sessionId,
- const Vector<uint8_t>& keyId,
- const Vector<uint8_t>& message,
- Vector<uint8_t>& signature) {
- if (sessionId.size() == 0 || keyId.size() == 0 ||
- message.size() == 0) {
- return android::BAD_VALUE;
- }
- UNUSED(signature);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t verify(
- const Vector<uint8_t>& sessionId,
- const Vector<uint8_t>& keyId,
- const Vector<uint8_t>& message,
- const Vector<uint8_t>& signature, bool& match) {
- if (sessionId.size() == 0 || keyId.size() == 0 ||
- message.size() == 0 || signature.size() == 0) {
- return android::BAD_VALUE;
- }
- UNUSED(match);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
- virtual status_t signRSA(
- const Vector<uint8_t>& sessionId,
- const String8& algorithm,
- const Vector<uint8_t>& message,
- const Vector<uint8_t>& wrappedKey,
- Vector<uint8_t>& signature) {
- if (sessionId.size() == 0 || algorithm.size() == 0 ||
- message.size() == 0 || wrappedKey.size() == 0) {
- return android::BAD_VALUE;
- }
- UNUSED(signature);
- return android::ERROR_DRM_CANNOT_HANDLE;
- }
-
-private:
- void setPlayPolicy();
-
- android::KeyedVector<android::String8, android::String8> mPlayPolicy;
- SessionLibrary* mSessionLibrary;
-
- DISALLOW_EVIL_CONSTRUCTORS(DrmPlugin);
-};
-
-} // namespace clearkeydrm
-
-#endif // CLEARKEY_DRM_PLUGIN_H_
diff --git a/drm/mediadrm/plugins/clearkey/Session.cpp b/drm/mediadrm/plugins/clearkey/Session.cpp
deleted file mode 100644
index d210f5e..0000000
--- a/drm/mediadrm/plugins/clearkey/Session.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ClearKeyCryptoPlugin"
-#include <utils/Log.h>
-
-#include <media/stagefright/MediaErrors.h>
-#include <utils/String8.h>
-
-#include "Session.h"
-
-#include "AesCtrDecryptor.h"
-#include "InitDataParser.h"
-#include "JsonWebKey.h"
-
-namespace clearkeydrm {
-
-using android::Mutex;
-using android::String8;
-using android::Vector;
-using android::status_t;
-
-status_t Session::getKeyRequest(
- const Vector<uint8_t>& initData,
- const String8& mimeType,
- Vector<uint8_t>* keyRequest) const {
- InitDataParser parser;
- return parser.parse(initData, mimeType, keyRequest);
-}
-
-status_t Session::provideKeyResponse(const Vector<uint8_t>& response) {
- String8 responseString(
- reinterpret_cast<const char*>(response.array()), response.size());
- KeyMap keys;
-
- Mutex::Autolock lock(mMapLock);
- JsonWebKey parser;
- if (parser.extractKeysFromJsonWebKeySet(responseString, &keys)) {
- for (size_t i = 0; i < keys.size(); ++i) {
- const KeyMap::key_type& keyId = keys.keyAt(i);
- const KeyMap::value_type& key = keys.valueAt(i);
- mKeyMap.add(keyId, key);
- }
- return android::OK;
- } else {
- return android::ERROR_DRM_UNKNOWN;
- }
-}
-
-status_t Session::decrypt(
- const KeyId keyId, const Iv iv, const void* source,
- void* destination, const SubSample* subSamples,
- size_t numSubSamples, size_t* bytesDecryptedOut) {
- Mutex::Autolock lock(mMapLock);
-
- Vector<uint8_t> keyIdVector;
- keyIdVector.appendArray(keyId, kBlockSize);
- if (mKeyMap.indexOfKey(keyIdVector) < 0) {
- return android::ERROR_DRM_NO_LICENSE;
- }
-
- const Vector<uint8_t>& key = mKeyMap.valueFor(keyIdVector);
- AesCtrDecryptor decryptor;
- return decryptor.decrypt(
- key, iv,
- reinterpret_cast<const uint8_t*>(source),
- reinterpret_cast<uint8_t*>(destination), subSamples,
- numSubSamples, bytesDecryptedOut);
-}
-
-} // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/SessionLibrary.cpp b/drm/mediadrm/plugins/clearkey/SessionLibrary.cpp
deleted file mode 100644
index 0419f97..0000000
--- a/drm/mediadrm/plugins/clearkey/SessionLibrary.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ClearKeyCryptoPlugin"
-#include <utils/Log.h>
-
-#include <utils/String8.h>
-
-#include "SessionLibrary.h"
-
-namespace clearkeydrm {
-
-using android::Mutex;
-using android::sp;
-using android::String8;
-using android::Vector;
-
-Mutex SessionLibrary::sSingletonLock;
-SessionLibrary* SessionLibrary::sSingleton = NULL;
-
-SessionLibrary* SessionLibrary::get() {
- Mutex::Autolock lock(sSingletonLock);
-
- if (sSingleton == NULL) {
- ALOGD("Instantiating Session Library Singleton.");
- sSingleton = new SessionLibrary();
- }
-
- return sSingleton;
-}
-
-sp<Session> SessionLibrary::createSession() {
- Mutex::Autolock lock(mSessionsLock);
-
- String8 sessionIdString = String8::format("%u", mNextSessionId);
- mNextSessionId += 1;
- Vector<uint8_t> sessionId;
- sessionId.appendArray(
- reinterpret_cast<const uint8_t*>(sessionIdString.string()),
- sessionIdString.size());
-
- mSessions.add(sessionId, new Session(sessionId));
- return mSessions.valueFor(sessionId);
-}
-
-sp<Session> SessionLibrary::findSession(
- const Vector<uint8_t>& sessionId) {
- Mutex::Autolock lock(mSessionsLock);
- if (mSessions.indexOfKey(sessionId) < 0) {
- return sp<Session>(NULL);
- }
- return mSessions.valueFor(sessionId);
-}
-
-void SessionLibrary::destroySession(const sp<Session>& session) {
- Mutex::Autolock lock(mSessionsLock);\
- mSessions.removeItem(session->sessionId());
-}
-
-} // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/common/Android.bp b/drm/mediadrm/plugins/clearkey/common/Android.bp
new file mode 100644
index 0000000..2c674e1
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/common/Android.bp
@@ -0,0 +1,38 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_library_static {
+ name: "libclearkeycommon",
+ vendor: true,
+
+ srcs: [
+ "ClearKeyUUID.cpp",
+ "Utils.cpp",
+ ],
+
+ cflags: ["-Wall", "-Werror"],
+
+ include_dirs: ["frameworks/av/include"],
+
+ shared_libs: ["libutils"],
+
+ export_include_dirs: ["include"],
+
+ sanitize: {
+ integer_overflow: true,
+ },
+}
+
diff --git a/drm/mediadrm/plugins/clearkey/ClearKeyUUID.cpp b/drm/mediadrm/plugins/clearkey/common/ClearKeyUUID.cpp
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/ClearKeyUUID.cpp
rename to drm/mediadrm/plugins/clearkey/common/ClearKeyUUID.cpp
diff --git a/drm/mediadrm/plugins/clearkey/Utils.cpp b/drm/mediadrm/plugins/clearkey/common/Utils.cpp
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/Utils.cpp
rename to drm/mediadrm/plugins/clearkey/common/Utils.cpp
diff --git a/drm/mediadrm/plugins/clearkey/ClearKeyUUID.h b/drm/mediadrm/plugins/clearkey/common/include/ClearKeyUUID.h
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/ClearKeyUUID.h
rename to drm/mediadrm/plugins/clearkey/common/include/ClearKeyUUID.h
diff --git a/drm/mediadrm/plugins/clearkey/MimeType.h b/drm/mediadrm/plugins/clearkey/common/include/MimeType.h
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/MimeType.h
rename to drm/mediadrm/plugins/clearkey/common/include/MimeType.h
diff --git a/drm/mediadrm/plugins/clearkey/Utils.h b/drm/mediadrm/plugins/clearkey/common/include/Utils.h
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/Utils.h
rename to drm/mediadrm/plugins/clearkey/common/include/Utils.h
diff --git a/drm/mediadrm/plugins/clearkey/default/AesCtrDecryptor.cpp b/drm/mediadrm/plugins/clearkey/default/AesCtrDecryptor.cpp
new file mode 100644
index 0000000..f7106b2
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/default/AesCtrDecryptor.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearKeyCryptoPlugin"
+#include <utils/Log.h>
+
+#include <openssl/aes.h>
+
+#include "AesCtrDecryptor.h"
+
+namespace clearkeydrm {
+
+static const size_t kBlockBitCount = kBlockSize * 8;
+
+android::status_t AesCtrDecryptor::decrypt(const android::Vector<uint8_t>& key,
+ const Iv iv, const uint8_t* source,
+ uint8_t* destination,
+ const SubSample* subSamples,
+ size_t numSubSamples,
+ size_t* bytesDecryptedOut) {
+ uint32_t blockOffset = 0;
+ uint8_t previousEncryptedCounter[kBlockSize];
+ memset(previousEncryptedCounter, 0, kBlockSize);
+
+ if (key.size() != kBlockSize || (sizeof(Iv) / sizeof(uint8_t)) != kBlockSize) {
+ android_errorWriteLog(0x534e4554, "63982768");
+ return android::ERROR_DRM_DECRYPT;
+ }
+
+ size_t offset = 0;
+ AES_KEY opensslKey;
+ AES_set_encrypt_key(key.array(), kBlockBitCount, &opensslKey);
+ Iv opensslIv;
+ memcpy(opensslIv, iv, sizeof(opensslIv));
+
+ for (size_t i = 0; i < numSubSamples; ++i) {
+ const SubSample& subSample = subSamples[i];
+
+ if (subSample.mNumBytesOfClearData > 0) {
+ memcpy(destination + offset, source + offset,
+ subSample.mNumBytesOfClearData);
+ offset += subSample.mNumBytesOfClearData;
+ }
+
+ if (subSample.mNumBytesOfEncryptedData > 0) {
+ AES_ctr128_encrypt(source + offset, destination + offset,
+ subSample.mNumBytesOfEncryptedData, &opensslKey,
+ opensslIv, previousEncryptedCounter,
+ &blockOffset);
+ offset += subSample.mNumBytesOfEncryptedData;
+ }
+ }
+
+ *bytesDecryptedOut = offset;
+ return android::OK;
+}
+
+} // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/default/Android.bp b/drm/mediadrm/plugins/clearkey/default/Android.bp
new file mode 100644
index 0000000..7ba5708
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/default/Android.bp
@@ -0,0 +1,67 @@
+//
+// Copyright (C) 2014 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_library_shared {
+ name: "libdrmclearkeyplugin",
+ vendor: true,
+
+ srcs: [
+ "AesCtrDecryptor.cpp",
+ "CreatePluginFactories.cpp",
+ "CryptoFactory.cpp",
+ "CryptoPlugin.cpp",
+ "DrmFactory.cpp",
+ "DrmPlugin.cpp",
+ "InitDataParser.cpp",
+ "JsonWebKey.cpp",
+ "Session.cpp",
+ "SessionLibrary.cpp",
+ ],
+
+ relative_install_path: "mediadrm",
+
+ cflags: ["-Wall", "-Werror"],
+
+ shared_libs: [
+ "libcrypto",
+ "liblog",
+ "libstagefright_foundation",
+ "libutils",
+ ],
+
+ static_libs: [
+ "libclearkeycommon",
+ "libjsmn"
+ ],
+
+ local_include_dirs: ["include"],
+ export_include_dirs: ["include"],
+ export_static_lib_headers: ["libjsmn"],
+
+ include_dirs: [
+ "frameworks/native/include",
+ "frameworks/av/include",
+ ],
+
+ sanitize: {
+ integer_overflow: true,
+ },
+}
+
+//########################################################################
+// Build unit tests
+
+subdirs = ["tests"]
diff --git a/drm/mediadrm/plugins/clearkey/CreatePluginFactories.cpp b/drm/mediadrm/plugins/clearkey/default/CreatePluginFactories.cpp
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/CreatePluginFactories.cpp
rename to drm/mediadrm/plugins/clearkey/default/CreatePluginFactories.cpp
diff --git a/drm/mediadrm/plugins/clearkey/default/CryptoFactory.cpp b/drm/mediadrm/plugins/clearkey/default/CryptoFactory.cpp
new file mode 100644
index 0000000..f15f92b
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/default/CryptoFactory.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearKeyCryptoFactory"
+#include <utils/Log.h>
+
+#include <utils/Errors.h>
+#include <utils/StrongPointer.h>
+
+#include "CryptoFactory.h"
+
+#include "ClearKeyUUID.h"
+#include "CryptoPlugin.h"
+#include "Session.h"
+#include "SessionLibrary.h"
+
+namespace clearkeydrm {
+
+bool CryptoFactory::isCryptoSchemeSupported(const uint8_t uuid[16]) const {
+ return isClearKeyUUID(uuid);
+}
+
+android::status_t CryptoFactory::createPlugin(
+ const uint8_t uuid[16],
+ const void* data, size_t size,
+ android::CryptoPlugin** plugin) {
+ if (!isCryptoSchemeSupported(uuid)) {
+ *plugin = NULL;
+ return android::BAD_VALUE;
+ }
+
+ android::Vector<uint8_t> sessionId;
+ sessionId.appendArray(reinterpret_cast<const uint8_t*>(data), size);
+
+ CryptoPlugin *clearKeyPlugin = new CryptoPlugin(sessionId);
+ android::status_t result = clearKeyPlugin->getInitStatus();
+ if (result == android::OK) {
+ *plugin = clearKeyPlugin;
+ } else {
+ delete clearKeyPlugin;
+ *plugin = NULL;
+ }
+ return result;
+}
+
+} // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/CryptoPlugin.cpp b/drm/mediadrm/plugins/clearkey/default/CryptoPlugin.cpp
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/CryptoPlugin.cpp
rename to drm/mediadrm/plugins/clearkey/default/CryptoPlugin.cpp
diff --git a/drm/mediadrm/plugins/clearkey/default/DrmFactory.cpp b/drm/mediadrm/plugins/clearkey/default/DrmFactory.cpp
new file mode 100644
index 0000000..8301e40
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/default/DrmFactory.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearKeyDrmFactory"
+#include <utils/Log.h>
+
+#include <utils/Errors.h>
+
+#include "DrmFactory.h"
+
+#include "DrmPlugin.h"
+#include "ClearKeyUUID.h"
+#include "MimeType.h"
+#include "SessionLibrary.h"
+
+namespace clearkeydrm {
+
+bool DrmFactory::isCryptoSchemeSupported(const uint8_t uuid[16]) {
+ return isClearKeyUUID(uuid);
+}
+
+bool DrmFactory::isContentTypeSupported(const android::String8 &type) {
+ // This should match the types handed by InitDataParser.
+ return type == kIsoBmffVideoMimeType ||
+ type == kIsoBmffAudioMimeType ||
+ type == kCencInitDataFormat ||
+ type == kWebmVideoMimeType ||
+ type == kWebmAudioMimeType ||
+ type == kWebmInitDataFormat;
+}
+
+android::status_t DrmFactory::createDrmPlugin(
+ const uint8_t uuid[16],
+ android::DrmPlugin** plugin) {
+ if (!isCryptoSchemeSupported(uuid)) {
+ *plugin = NULL;
+ return android::BAD_VALUE;
+ }
+
+ *plugin = new DrmPlugin(SessionLibrary::get());
+ return android::OK;
+}
+
+} // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
new file mode 100644
index 0000000..1b8b8c1
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/default/DrmPlugin.cpp
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearKeyDrmPlugin"
+#include <utils/Log.h>
+
+#include <media/stagefright/MediaErrors.h>
+#include <utils/StrongPointer.h>
+
+#include "DrmPlugin.h"
+#include "ClearKeyDrmProperties.h"
+#include "Session.h"
+
+namespace {
+const android::String8 kStreaming("Streaming");
+const android::String8 kOffline("Offline");
+const android::String8 kTrue("True");
+
+const android::String8 kQueryKeyLicenseType("LicenseType");
+ // Value: "Streaming" or "Offline"
+const android::String8 kQueryKeyPlayAllowed("PlayAllowed");
+ // Value: "True" or "False"
+const android::String8 kQueryKeyRenewAllowed("RenewAllowed");
+ // Value: "True" or "False"
+};
+
+namespace clearkeydrm {
+
+using android::sp;
+
+DrmPlugin::DrmPlugin(SessionLibrary* sessionLibrary)
+ : mSessionLibrary(sessionLibrary) {
+
+ mPlayPolicy.clear();
+ initProperties();
+}
+
+void DrmPlugin::initProperties() {
+ mStringProperties.clear();
+ mStringProperties.add(kVendorKey, kVendorValue);
+ mStringProperties.add(kVersionKey, kVersionValue);
+ mStringProperties.add(kPluginDescriptionKey, kPluginDescriptionValue);
+ mStringProperties.add(kAlgorithmsKey, kAlgorithmsValue);
+ mStringProperties.add(kListenerTestSupportKey, kListenerTestSupportValue);
+
+ Vector<uint8_t> testDeviceId;
+ testDeviceId.appendArray(kTestDeviceIdData, sizeof(kTestDeviceIdData) / sizeof(uint8_t));
+ mByteArrayProperties.add(kDeviceIdKey, testDeviceId);
+}
+
+status_t DrmPlugin::openSession(Vector<uint8_t>& sessionId) {
+ sp<Session> session = mSessionLibrary->createSession();
+ sessionId = session->sessionId();
+ return android::OK;
+}
+
+status_t DrmPlugin::closeSession(const Vector<uint8_t>& sessionId) {
+ sp<Session> session = mSessionLibrary->findSession(sessionId);
+ if (sessionId.size() == 0) {
+ return android::BAD_VALUE;
+ }
+ if (session.get()) {
+ mSessionLibrary->destroySession(session);
+ return android::OK;
+ }
+ return android::ERROR_DRM_SESSION_NOT_OPENED;
+}
+
+status_t DrmPlugin::getKeyRequest(
+ const Vector<uint8_t>& scope,
+ const Vector<uint8_t>& initData,
+ const String8& mimeType,
+ KeyType keyType,
+ const KeyedVector<String8, String8>& optionalParameters,
+ Vector<uint8_t>& request,
+ String8& defaultUrl,
+ DrmPlugin::KeyRequestType *keyRequestType) {
+ UNUSED(optionalParameters);
+ if (scope.size() == 0) {
+ return android::BAD_VALUE;
+ }
+
+ if (keyType != kKeyType_Streaming) {
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ *keyRequestType = DrmPlugin::kKeyRequestType_Initial;
+ defaultUrl.clear();
+ sp<Session> session = mSessionLibrary->findSession(scope);
+ if (!session.get()) {
+ return android::ERROR_DRM_SESSION_NOT_OPENED;
+ }
+
+ return session->getKeyRequest(initData, mimeType, &request);
+}
+
+void DrmPlugin::setPlayPolicy() {
+ mPlayPolicy.clear();
+ mPlayPolicy.add(kQueryKeyLicenseType, kStreaming);
+ mPlayPolicy.add(kQueryKeyPlayAllowed, kTrue);
+ mPlayPolicy.add(kQueryKeyRenewAllowed, kTrue);
+}
+
+status_t DrmPlugin::provideKeyResponse(
+ const Vector<uint8_t>& scope,
+ const Vector<uint8_t>& response,
+ Vector<uint8_t>& keySetId) {
+ if (scope.size() == 0 || response.size() == 0) {
+ return android::BAD_VALUE;
+ }
+ sp<Session> session = mSessionLibrary->findSession(scope);
+ if (!session.get()) {
+ return android::ERROR_DRM_SESSION_NOT_OPENED;
+ }
+
+ setPlayPolicy();
+ status_t res = session->provideKeyResponse(response);
+ if (res == android::OK) {
+ // This is for testing AMediaDrm_setOnEventListener only.
+ sendEvent(kDrmPluginEventVendorDefined, 0, &scope, NULL);
+ keySetId.clear();
+ }
+ return res;
+}
+
+status_t DrmPlugin::getPropertyByteArray(
+ const String8& name, Vector<uint8_t>& value) const {
+ ssize_t index = mByteArrayProperties.indexOfKey(name);
+ if (index < 0) {
+ ALOGE("App requested unknown property: %s", name.string());
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+ value = mByteArrayProperties.valueAt(index);
+ return android::OK;
+}
+
+status_t DrmPlugin::setPropertyByteArray(
+ const String8& name, const Vector<uint8_t>& value)
+{
+ UNUSED(value);
+ if (0 == name.compare(kDeviceIdKey)) {
+ ALOGD("Cannot set immutable property: %s", name.string());
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ // Setting of undefined properties is not supported
+ ALOGE("Failed to set property byte array, key=%s", name.string());
+ return android::ERROR_DRM_CANNOT_HANDLE;
+}
+
+status_t DrmPlugin::getPropertyString(
+ const String8& name, String8& value) const {
+ ssize_t index = mStringProperties.indexOfKey(name);
+ if (index < 0) {
+ ALOGE("App requested unknown property: %s", name.string());
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+ value = mStringProperties.valueAt(index);
+ return android::OK;
+}
+
+status_t DrmPlugin::setPropertyString(
+ const String8& name, const String8& value) {
+ String8 immutableKeys;
+ immutableKeys.appendFormat("%s,%s,%s,%s",
+ kAlgorithmsKey.string(), kPluginDescriptionKey.string(),
+ kVendorKey.string(), kVersionKey.string());
+ if (immutableKeys.contains(name.string())) {
+ ALOGD("Cannot set immutable property: %s", name.string());
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ ssize_t index = mStringProperties.indexOfKey(name);
+ if (index < 0) {
+ ALOGE("Cannot set undefined property string, key=%s", name.string());
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ if (mStringProperties.add(name, value) < 0) {
+ ALOGE("Failed to set property string, key=%s", name.string());
+ return android::ERROR_DRM_UNKNOWN;
+ }
+ return android::OK;
+}
+
+status_t DrmPlugin::queryKeyStatus(
+ const Vector<uint8_t>& sessionId,
+ KeyedVector<String8, String8>& infoMap) const {
+
+ if (sessionId.size() == 0) {
+ return android::BAD_VALUE;
+ }
+
+ infoMap.clear();
+ for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
+ infoMap.add(mPlayPolicy.keyAt(i), mPlayPolicy.valueAt(i));
+ }
+ return android::OK;
+}
+} // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/InitDataParser.cpp b/drm/mediadrm/plugins/clearkey/default/InitDataParser.cpp
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/InitDataParser.cpp
rename to drm/mediadrm/plugins/clearkey/default/InitDataParser.cpp
diff --git a/drm/mediadrm/plugins/clearkey/JsonWebKey.cpp b/drm/mediadrm/plugins/clearkey/default/JsonWebKey.cpp
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/JsonWebKey.cpp
rename to drm/mediadrm/plugins/clearkey/default/JsonWebKey.cpp
diff --git a/drm/mediadrm/plugins/clearkey/default/Session.cpp b/drm/mediadrm/plugins/clearkey/default/Session.cpp
new file mode 100644
index 0000000..b3ceaec
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/default/Session.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearKeySession"
+#include <utils/Log.h>
+
+#include <media/stagefright/MediaErrors.h>
+#include <utils/String8.h>
+
+#include "Session.h"
+
+#include "AesCtrDecryptor.h"
+#include "InitDataParser.h"
+#include "JsonWebKey.h"
+
+namespace clearkeydrm {
+
+using android::Mutex;
+using android::String8;
+using android::Vector;
+using android::status_t;
+
+status_t Session::getKeyRequest(
+ const Vector<uint8_t>& initData,
+ const String8& mimeType,
+ Vector<uint8_t>* keyRequest) const {
+ InitDataParser parser;
+ return parser.parse(initData, mimeType, keyRequest);
+}
+
+status_t Session::provideKeyResponse(const Vector<uint8_t>& response) {
+ String8 responseString(
+ reinterpret_cast<const char*>(response.array()), response.size());
+ KeyMap keys;
+
+ Mutex::Autolock lock(mMapLock);
+ JsonWebKey parser;
+ if (parser.extractKeysFromJsonWebKeySet(responseString, &keys)) {
+ for (size_t i = 0; i < keys.size(); ++i) {
+ const KeyMap::key_type& keyId = keys.keyAt(i);
+ const KeyMap::value_type& key = keys.valueAt(i);
+ mKeyMap.add(keyId, key);
+ }
+ return android::OK;
+ } else {
+ return android::ERROR_DRM_UNKNOWN;
+ }
+}
+
+status_t Session::decrypt(
+ const KeyId keyId, const Iv iv, const void* source,
+ void* destination, const SubSample* subSamples,
+ size_t numSubSamples, size_t* bytesDecryptedOut) {
+ Mutex::Autolock lock(mMapLock);
+
+ Vector<uint8_t> keyIdVector;
+ keyIdVector.appendArray(keyId, kBlockSize);
+ if (mKeyMap.indexOfKey(keyIdVector) < 0) {
+ return android::ERROR_DRM_NO_LICENSE;
+ }
+
+ const Vector<uint8_t>& key = mKeyMap.valueFor(keyIdVector);
+ AesCtrDecryptor decryptor;
+ return decryptor.decrypt(
+ key, iv,
+ reinterpret_cast<const uint8_t*>(source),
+ reinterpret_cast<uint8_t*>(destination), subSamples,
+ numSubSamples, bytesDecryptedOut);
+}
+
+} // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/default/SessionLibrary.cpp b/drm/mediadrm/plugins/clearkey/default/SessionLibrary.cpp
new file mode 100644
index 0000000..529230e
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/default/SessionLibrary.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearKeySessionLibrary"
+#include <utils/Log.h>
+
+#include <utils/String8.h>
+
+#include "SessionLibrary.h"
+
+namespace clearkeydrm {
+
+using android::Mutex;
+using android::sp;
+using android::String8;
+using android::Vector;
+
+Mutex SessionLibrary::sSingletonLock;
+SessionLibrary* SessionLibrary::sSingleton = NULL;
+
+SessionLibrary* SessionLibrary::get() {
+ Mutex::Autolock lock(sSingletonLock);
+
+ if (sSingleton == NULL) {
+ ALOGD("Instantiating Session Library Singleton.");
+ sSingleton = new SessionLibrary();
+ }
+
+ return sSingleton;
+}
+
+sp<Session> SessionLibrary::createSession() {
+ Mutex::Autolock lock(mSessionsLock);
+
+ String8 sessionIdString = String8::format("%u", mNextSessionId);
+ mNextSessionId += 1;
+ Vector<uint8_t> sessionId;
+ sessionId.appendArray(
+ reinterpret_cast<const uint8_t*>(sessionIdString.string()),
+ sessionIdString.size());
+
+ mSessions.add(sessionId, new Session(sessionId));
+ return mSessions.valueFor(sessionId);
+}
+
+sp<Session> SessionLibrary::findSession(
+ const Vector<uint8_t>& sessionId) {
+ Mutex::Autolock lock(mSessionsLock);
+ if (mSessions.indexOfKey(sessionId) < 0) {
+ return sp<Session>(NULL);
+ }
+ return mSessions.valueFor(sessionId);
+}
+
+void SessionLibrary::destroySession(const sp<Session>& session) {
+ Mutex::Autolock lock(mSessionsLock);\
+ mSessions.removeItem(session->sessionId());
+}
+
+} // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/default/include/AesCtrDecryptor.h b/drm/mediadrm/plugins/clearkey/default/include/AesCtrDecryptor.h
new file mode 100644
index 0000000..edb8445
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/default/include/AesCtrDecryptor.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_AES_CTR_DECRYPTOR_H_
+#define CLEARKEY_AES_CTR_DECRYPTOR_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/MediaErrors.h>
+#include <Utils.h>
+#include <utils/Errors.h>
+#include <utils/Vector.h>
+
+#include "ClearKeyTypes.h"
+
+namespace clearkeydrm {
+
+class AesCtrDecryptor {
+public:
+ AesCtrDecryptor() {}
+
+ android::status_t decrypt(const android::Vector<uint8_t>& key, const Iv iv,
+ const uint8_t* source, uint8_t* destination,
+ const SubSample* subSamples, size_t numSubSamples,
+ size_t* bytesDecryptedOut);
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(AesCtrDecryptor);
+};
+
+} // namespace clearkeydrm
+
+#endif // CLEARKEY_AES_CTR_DECRYPTOR_H_
diff --git a/drm/mediadrm/plugins/clearkey/default/include/ClearKeyDrmProperties.h b/drm/mediadrm/plugins/clearkey/default/include/ClearKeyDrmProperties.h
new file mode 100644
index 0000000..a99e174
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/default/include/ClearKeyDrmProperties.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_DRM_PROPERTIES_H_
+#define CLEARKEY_DRM_PROPERTIES_H_
+
+#include <utils/String8.h>
+
+namespace clearkeydrm {
+
+static const android::String8 kVendorKey("vendor");
+static const android::String8 kVendorValue("Google");
+static const android::String8 kVersionKey("version");
+static const android::String8 kVersionValue("1.0");
+static const android::String8 kPluginDescriptionKey("description");
+static const android::String8 kPluginDescriptionValue("ClearKey CDM");
+static const android::String8 kAlgorithmsKey("algorithms");
+static const android::String8 kAlgorithmsValue("");
+static const android::String8 kListenerTestSupportKey("listenerTestSupport");
+static const android::String8 kListenerTestSupportValue("true");
+
+static const android::String8 kDeviceIdKey("deviceId");
+static const uint8_t kTestDeviceIdData[] =
+ {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+ 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
+} // namespace clearkeydrm
+
+#endif // CLEARKEY_DRM_PROPERTIES_H_
diff --git a/drm/mediadrm/plugins/clearkey/ClearKeyTypes.h b/drm/mediadrm/plugins/clearkey/default/include/ClearKeyTypes.h
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/ClearKeyTypes.h
rename to drm/mediadrm/plugins/clearkey/default/include/ClearKeyTypes.h
diff --git a/drm/mediadrm/plugins/clearkey/CreatePluginFactories.h b/drm/mediadrm/plugins/clearkey/default/include/CreatePluginFactories.h
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/CreatePluginFactories.h
rename to drm/mediadrm/plugins/clearkey/default/include/CreatePluginFactories.h
diff --git a/drm/mediadrm/plugins/clearkey/CryptoFactory.h b/drm/mediadrm/plugins/clearkey/default/include/CryptoFactory.h
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/CryptoFactory.h
rename to drm/mediadrm/plugins/clearkey/default/include/CryptoFactory.h
diff --git a/drm/mediadrm/plugins/clearkey/CryptoPlugin.h b/drm/mediadrm/plugins/clearkey/default/include/CryptoPlugin.h
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/CryptoPlugin.h
rename to drm/mediadrm/plugins/clearkey/default/include/CryptoPlugin.h
diff --git a/drm/mediadrm/plugins/clearkey/DrmFactory.h b/drm/mediadrm/plugins/clearkey/default/include/DrmFactory.h
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/DrmFactory.h
rename to drm/mediadrm/plugins/clearkey/default/include/DrmFactory.h
diff --git a/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
new file mode 100644
index 0000000..4fa42e5
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/default/include/DrmPlugin.h
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_DRM_PLUGIN_H_
+#define CLEARKEY_DRM_PLUGIN_H_
+
+#include <media/drm/DrmAPI.h>
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/MediaErrors.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/List.h>
+#include <utils/String8.h>
+#include <utils/Vector.h>
+
+#include "SessionLibrary.h"
+#include "Utils.h"
+
+namespace clearkeydrm {
+
+using android::KeyedVector;
+using android::List;
+using android::status_t;
+using android::String8;
+using android::Vector;
+
+class DrmPlugin : public android::DrmPlugin {
+public:
+ explicit DrmPlugin(SessionLibrary* sessionLibrary);
+
+ virtual ~DrmPlugin() {}
+
+ virtual status_t openSession(Vector<uint8_t>& sessionId);
+
+ virtual status_t closeSession(const Vector<uint8_t>& sessionId);
+
+ virtual status_t getKeyRequest(
+ const Vector<uint8_t>& scope,
+ const Vector<uint8_t>& mimeType,
+ const String8& initDataType,
+ KeyType keyType,
+ const KeyedVector<String8, String8>& optionalParameters,
+ Vector<uint8_t>& request,
+ String8& defaultUrl,
+ DrmPlugin::KeyRequestType *keyRequestType);
+
+ virtual status_t provideKeyResponse(
+ const Vector<uint8_t>& scope,
+ const Vector<uint8_t>& response,
+ Vector<uint8_t>& keySetId);
+
+ virtual status_t removeKeys(const Vector<uint8_t>& sessionId) {
+ if (sessionId.size() == 0) {
+ return android::BAD_VALUE;
+ }
+
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t restoreKeys(
+ const Vector<uint8_t>& sessionId,
+ const Vector<uint8_t>& keySetId) {
+ if (sessionId.size() == 0 || keySetId.size() == 0) {
+ return android::BAD_VALUE;
+ }
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t queryKeyStatus(
+ const Vector<uint8_t>& sessionId,
+ KeyedVector<String8, String8>& infoMap) const;
+
+ virtual status_t getProvisionRequest(
+ const String8& cert_type,
+ const String8& cert_authority,
+ Vector<uint8_t>& request,
+ String8& defaultUrl) {
+ UNUSED(cert_type);
+ UNUSED(cert_authority);
+ UNUSED(request);
+ UNUSED(defaultUrl);
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t provideProvisionResponse(
+ const Vector<uint8_t>& response,
+ Vector<uint8_t>& certificate,
+ Vector<uint8_t>& wrappedKey) {
+ UNUSED(certificate);
+ UNUSED(wrappedKey);
+ if (response.size() == 0) {
+ // empty response
+ return android::BAD_VALUE;
+ }
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t getSecureStops(List<Vector<uint8_t> >& secureStops) {
+ UNUSED(secureStops);
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) {
+ if (ssid.size() == 0) {
+ return android::BAD_VALUE;
+ }
+
+ UNUSED(secureStop);
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t releaseSecureStops(const Vector<uint8_t>& ssRelease) {
+ if (ssRelease.size() == 0) {
+ return android::BAD_VALUE;
+ }
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t releaseAllSecureStops() {
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t getHdcpLevels(HdcpLevel *connectedLevel,
+ HdcpLevel *maxLevel) const {
+ UNUSED(connectedLevel);
+ UNUSED(maxLevel);
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+
+ virtual status_t getNumberOfSessions(uint32_t *currentSessions,
+ uint32_t *maxSessions) const {
+ UNUSED(currentSessions);
+ UNUSED(maxSessions);
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t getSecurityLevel(Vector<uint8_t> const &sessionId,
+ SecurityLevel *level) const {
+ UNUSED(sessionId);
+ UNUSED(level);
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t setSecurityLevel(Vector<uint8_t> const &sessionId,
+ const SecurityLevel& level) {
+ UNUSED(sessionId);
+ UNUSED(level);
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t getPropertyString(
+ const String8& name, String8& value) const;
+
+ virtual status_t getPropertyByteArray(
+ const String8& name, Vector<uint8_t>& value) const;
+
+ virtual status_t setPropertyString(
+ const String8& name, const String8& value);
+
+ virtual status_t setPropertyByteArray(
+ const String8& name, const Vector<uint8_t>& value);
+
+ virtual status_t setCipherAlgorithm(
+ const Vector<uint8_t>& sessionId, const String8& algorithm) {
+ if (sessionId.size() == 0 || algorithm.size() == 0) {
+ return android::BAD_VALUE;
+ }
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t setMacAlgorithm(
+ const Vector<uint8_t>& sessionId, const String8& algorithm) {
+ if (sessionId.size() == 0 || algorithm.size() == 0) {
+ return android::BAD_VALUE;
+ }
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t encrypt(
+ const Vector<uint8_t>& sessionId,
+ const Vector<uint8_t>& keyId,
+ const Vector<uint8_t>& input,
+ const Vector<uint8_t>& iv,
+ Vector<uint8_t>& output) {
+ if (sessionId.size() == 0 || keyId.size() == 0 ||
+ input.size() == 0 || iv.size() == 0) {
+ return android::BAD_VALUE;
+ }
+ UNUSED(output);
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t decrypt(
+ const Vector<uint8_t>& sessionId,
+ const Vector<uint8_t>& keyId,
+ const Vector<uint8_t>& input,
+ const Vector<uint8_t>& iv,
+ Vector<uint8_t>& output) {
+ if (sessionId.size() == 0 || keyId.size() == 0 ||
+ input.size() == 0 || iv.size() == 0) {
+ return android::BAD_VALUE;
+ }
+ UNUSED(output);
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t sign(
+ const Vector<uint8_t>& sessionId,
+ const Vector<uint8_t>& keyId,
+ const Vector<uint8_t>& message,
+ Vector<uint8_t>& signature) {
+ if (sessionId.size() == 0 || keyId.size() == 0 ||
+ message.size() == 0) {
+ return android::BAD_VALUE;
+ }
+ UNUSED(signature);
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t verify(
+ const Vector<uint8_t>& sessionId,
+ const Vector<uint8_t>& keyId,
+ const Vector<uint8_t>& message,
+ const Vector<uint8_t>& signature, bool& match) {
+ if (sessionId.size() == 0 || keyId.size() == 0 ||
+ message.size() == 0 || signature.size() == 0) {
+ return android::BAD_VALUE;
+ }
+ UNUSED(match);
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ virtual status_t signRSA(
+ const Vector<uint8_t>& sessionId,
+ const String8& algorithm,
+ const Vector<uint8_t>& message,
+ const Vector<uint8_t>& wrappedKey,
+ Vector<uint8_t>& signature) {
+ if (sessionId.size() == 0 || algorithm.size() == 0 ||
+ message.size() == 0 || wrappedKey.size() == 0) {
+ return android::BAD_VALUE;
+ }
+ UNUSED(signature);
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+private:
+ void initProperties();
+ void setPlayPolicy();
+
+ android::KeyedVector<String8, String8> mPlayPolicy;
+ android::KeyedVector<String8, String8> mStringProperties;
+ android::KeyedVector<String8, Vector<uint8_t>> mByteArrayProperties;
+
+ SessionLibrary* mSessionLibrary;
+
+ DISALLOW_EVIL_CONSTRUCTORS(DrmPlugin);
+};
+
+} // namespace clearkeydrm
+
+#endif // CLEARKEY_DRM_PLUGIN_H_
diff --git a/drm/mediadrm/plugins/clearkey/InitDataParser.h b/drm/mediadrm/plugins/clearkey/default/include/InitDataParser.h
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/InitDataParser.h
rename to drm/mediadrm/plugins/clearkey/default/include/InitDataParser.h
diff --git a/drm/mediadrm/plugins/clearkey/JsonWebKey.h b/drm/mediadrm/plugins/clearkey/default/include/JsonWebKey.h
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/JsonWebKey.h
rename to drm/mediadrm/plugins/clearkey/default/include/JsonWebKey.h
diff --git a/drm/mediadrm/plugins/clearkey/Session.h b/drm/mediadrm/plugins/clearkey/default/include/Session.h
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/Session.h
rename to drm/mediadrm/plugins/clearkey/default/include/Session.h
diff --git a/drm/mediadrm/plugins/clearkey/SessionLibrary.h b/drm/mediadrm/plugins/clearkey/default/include/SessionLibrary.h
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/SessionLibrary.h
rename to drm/mediadrm/plugins/clearkey/default/include/SessionLibrary.h
diff --git a/drm/mediadrm/plugins/clearkey/default/tests/AesCtrDecryptorUnittest.cpp b/drm/mediadrm/plugins/clearkey/default/tests/AesCtrDecryptorUnittest.cpp
new file mode 100644
index 0000000..5db8290
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/default/tests/AesCtrDecryptorUnittest.cpp
@@ -0,0 +1,482 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <string.h>
+
+#include <utils/String8.h>
+#include <utils/Vector.h>
+
+#include "AesCtrDecryptor.h"
+
+namespace clearkeydrm {
+
+using namespace android;
+
+class AesCtrDecryptorTest : public ::testing::Test {
+ protected:
+ typedef uint8_t Key[kBlockSize];
+
+ status_t attemptDecrypt(const Key& key, const Iv& iv, const uint8_t* source,
+ uint8_t* destination, const SubSample* subSamples,
+ size_t numSubSamples, size_t* bytesDecryptedOut) {
+ Vector<uint8_t> keyVector;
+ keyVector.appendArray(key, sizeof(key) / sizeof(uint8_t));
+
+ AesCtrDecryptor decryptor;
+ return decryptor.decrypt(keyVector, iv, source, destination, subSamples,
+ numSubSamples, bytesDecryptedOut);
+ }
+
+ template <size_t totalSize>
+ void attemptDecryptExpectingSuccess(const Key& key, const Iv& iv,
+ const uint8_t* encrypted,
+ const uint8_t* decrypted,
+ const SubSample* subSamples,
+ size_t numSubSamples) {
+ uint8_t outputBuffer[totalSize] = {};
+ size_t bytesDecrypted = 0;
+ ASSERT_EQ(android::OK, attemptDecrypt(key, iv, encrypted, outputBuffer,
+ subSamples, numSubSamples,
+ &bytesDecrypted));
+ EXPECT_EQ(totalSize, bytesDecrypted);
+ EXPECT_EQ(0, memcmp(outputBuffer, decrypted, totalSize));
+ }
+};
+
+TEST_F(AesCtrDecryptorTest, DecryptsWithEmptyKey) {
+ const size_t kTotalSize = 64;
+ const size_t kNumSubsamples = 1;
+
+ // Test vectors from NIST-800-38A
+ Iv iv = {
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+ };
+
+ uint8_t source[kTotalSize] = { 0 };
+ uint8_t destination[kTotalSize] = { 0 };
+ SubSample subSamples[kNumSubsamples] = {
+ {0, 64}
+ };
+
+ size_t bytesDecrypted = 0;
+ Vector<uint8_t> keyVector;
+ keyVector.clear();
+
+ AesCtrDecryptor decryptor;
+ ASSERT_EQ(android::ERROR_DRM_DECRYPT, decryptor.decrypt(keyVector, iv,
+ &source[0], &destination[0],
+ &subSamples[0], kNumSubsamples, &bytesDecrypted));
+ ASSERT_EQ(0u, bytesDecrypted);
+}
+
+TEST_F(AesCtrDecryptorTest, DecryptsWithKeyTooLong) {
+ const size_t kTotalSize = 64;
+ const size_t kNumSubsamples = 1;
+
+ // Test vectors from NIST-800-38A
+ uint8_t key[kBlockSize * 2] = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c,
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+ };
+
+ Iv iv = {
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+ };
+
+ uint8_t source[kTotalSize] = { 0 };
+ uint8_t destination[kTotalSize] = { 0 };
+ SubSample subSamples[kNumSubsamples] = {
+ {0, 64}
+ };
+
+ size_t bytesDecrypted = 0;
+ Vector<uint8_t> keyVector;
+ keyVector.appendArray(key, sizeof(key) / sizeof(uint8_t));
+
+ AesCtrDecryptor decryptor;
+ ASSERT_EQ(android::ERROR_DRM_DECRYPT, decryptor.decrypt(keyVector, iv,
+ &source[0], &destination[0],
+ &subSamples[0], kNumSubsamples, &bytesDecrypted));
+ ASSERT_EQ(0u, bytesDecrypted);
+}
+
+TEST_F(AesCtrDecryptorTest, DecryptsContiguousEncryptedBlock) {
+ const size_t kTotalSize = 64;
+ const size_t kNumSubsamples = 1;
+
+ // Test vectors from NIST-800-38A
+ Key key = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+ };
+
+ Iv iv = {
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+ };
+
+ uint8_t encrypted[kTotalSize] = {
+ 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26,
+ 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce,
+ 0x98, 0x06, 0xf6, 0x6b, 0x79, 0x70, 0xfd, 0xff,
+ 0x86, 0x17, 0x18, 0x7b, 0xb9, 0xff, 0xfd, 0xff,
+ 0x5a, 0xe4, 0xdf, 0x3e, 0xdb, 0xd5, 0xd3, 0x5e,
+ 0x5b, 0x4f, 0x09, 0x02, 0x0d, 0xb0, 0x3e, 0xab,
+ 0x1e, 0x03, 0x1d, 0xda, 0x2f, 0xbe, 0x03, 0xd1,
+ 0x79, 0x21, 0x70, 0xa0, 0xf3, 0x00, 0x9c, 0xee
+ };
+
+ uint8_t decrypted[kTotalSize] = {
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
+ };
+
+ SubSample subSamples[kNumSubsamples] = {
+ {0, 64}
+ };
+
+ attemptDecryptExpectingSuccess<kTotalSize>(key, iv, encrypted, decrypted,
+ subSamples, kNumSubsamples);
+}
+
+TEST_F(AesCtrDecryptorTest, DecryptsAlignedBifurcatedEncryptedBlock) {
+ const size_t kTotalSize = 64;
+ const size_t kNumSubsamples = 2;
+
+ // Test vectors from NIST-800-38A
+ Key key = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+ };
+
+ Iv iv = {
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+ };
+
+ uint8_t encrypted[kTotalSize] = {
+ 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26,
+ 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce,
+ 0x98, 0x06, 0xf6, 0x6b, 0x79, 0x70, 0xfd, 0xff,
+ 0x86, 0x17, 0x18, 0x7b, 0xb9, 0xff, 0xfd, 0xff,
+ 0x5a, 0xe4, 0xdf, 0x3e, 0xdb, 0xd5, 0xd3, 0x5e,
+ 0x5b, 0x4f, 0x09, 0x02, 0x0d, 0xb0, 0x3e, 0xab,
+ 0x1e, 0x03, 0x1d, 0xda, 0x2f, 0xbe, 0x03, 0xd1,
+ 0x79, 0x21, 0x70, 0xa0, 0xf3, 0x00, 0x9c, 0xee
+ };
+
+ uint8_t decrypted[kTotalSize] = {
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
+ };
+
+ SubSample subSamples[kNumSubsamples] = {
+ {0, 32},
+ {0, 32}
+ };
+
+ attemptDecryptExpectingSuccess<kTotalSize>(key, iv, encrypted, decrypted,
+ subSamples, kNumSubsamples);
+}
+
+TEST_F(AesCtrDecryptorTest, DecryptsUnalignedBifurcatedEncryptedBlock) {
+ const size_t kTotalSize = 64;
+ const size_t kNumSubsamples = 2;
+
+ // Test vectors from NIST-800-38A
+ Key key = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+ };
+
+ Iv iv = {
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+ };
+
+ uint8_t encrypted[kTotalSize] = {
+ 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26,
+ 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce,
+ 0x98, 0x06, 0xf6, 0x6b, 0x79, 0x70, 0xfd, 0xff,
+ 0x86, 0x17, 0x18, 0x7b, 0xb9, 0xff, 0xfd, 0xff,
+ 0x5a, 0xe4, 0xdf, 0x3e, 0xdb, 0xd5, 0xd3, 0x5e,
+ 0x5b, 0x4f, 0x09, 0x02, 0x0d, 0xb0, 0x3e, 0xab,
+ 0x1e, 0x03, 0x1d, 0xda, 0x2f, 0xbe, 0x03, 0xd1,
+ 0x79, 0x21, 0x70, 0xa0, 0xf3, 0x00, 0x9c, 0xee
+ };
+
+ uint8_t decrypted[kTotalSize] = {
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
+ };
+
+ SubSample subSamples[kNumSubsamples] = {
+ {0, 29},
+ {0, 35}
+ };
+
+ attemptDecryptExpectingSuccess<kTotalSize>(key, iv, encrypted, decrypted,
+ subSamples, kNumSubsamples);
+}
+
+TEST_F(AesCtrDecryptorTest, DecryptsOneMixedSubSample) {
+ const size_t kTotalSize = 72;
+ const size_t kNumSubsamples = 1;
+
+ // Based on test vectors from NIST-800-38A
+ Key key = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+ };
+
+ Iv iv = {
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+ };
+
+ uint8_t encrypted[kTotalSize] = {
+ // 8 clear bytes
+ 0xf0, 0x13, 0xca, 0xc7, 0x00, 0x64, 0x0b, 0xbb,
+ // 64 encrypted bytes
+ 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26,
+ 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce,
+ 0x98, 0x06, 0xf6, 0x6b, 0x79, 0x70, 0xfd, 0xff,
+ 0x86, 0x17, 0x18, 0x7b, 0xb9, 0xff, 0xfd, 0xff,
+ 0x5a, 0xe4, 0xdf, 0x3e, 0xdb, 0xd5, 0xd3, 0x5e,
+ 0x5b, 0x4f, 0x09, 0x02, 0x0d, 0xb0, 0x3e, 0xab,
+ 0x1e, 0x03, 0x1d, 0xda, 0x2f, 0xbe, 0x03, 0xd1,
+ 0x79, 0x21, 0x70, 0xa0, 0xf3, 0x00, 0x9c, 0xee
+ };
+
+ uint8_t decrypted[kTotalSize] = {
+ 0xf0, 0x13, 0xca, 0xc7, 0x00, 0x64, 0x0b, 0xbb,
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
+ };
+
+ SubSample subSamples[kNumSubsamples] = {
+ {8, 64}
+ };
+
+ attemptDecryptExpectingSuccess<kTotalSize>(key, iv, encrypted, decrypted,
+ subSamples, kNumSubsamples);
+}
+
+TEST_F(AesCtrDecryptorTest, DecryptsAlignedMixedSubSamples) {
+ const size_t kTotalSize = 80;
+ const size_t kNumSubsamples = 2;
+
+ // Based on test vectors from NIST-800-38A
+ Key key = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+ };
+
+ Iv iv = {
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+ };
+
+ uint8_t encrypted[kTotalSize] = {
+ // 8 clear bytes
+ 0xf0, 0x13, 0xca, 0xc7, 0x00, 0x64, 0x0b, 0xbb,
+ // 32 encrypted bytes
+ 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26,
+ 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce,
+ 0x98, 0x06, 0xf6, 0x6b, 0x79, 0x70, 0xfd, 0xff,
+ 0x86, 0x17, 0x18, 0x7b, 0xb9, 0xff, 0xfd, 0xff,
+ // 8 clear bytes
+ 0x94, 0xba, 0x88, 0x2e, 0x0e, 0x12, 0x11, 0x55,
+ // 32 encrypted bytes
+ 0x5a, 0xe4, 0xdf, 0x3e, 0xdb, 0xd5, 0xd3, 0x5e,
+ 0x5b, 0x4f, 0x09, 0x02, 0x0d, 0xb0, 0x3e, 0xab,
+ 0x1e, 0x03, 0x1d, 0xda, 0x2f, 0xbe, 0x03, 0xd1,
+ 0x79, 0x21, 0x70, 0xa0, 0xf3, 0x00, 0x9c, 0xee
+ };
+
+ uint8_t decrypted[kTotalSize] = {
+ 0xf0, 0x13, 0xca, 0xc7, 0x00, 0x64, 0x0b, 0xbb,
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
+ 0x94, 0xba, 0x88, 0x2e, 0x0e, 0x12, 0x11, 0x55,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
+ };
+
+ SubSample subSamples[kNumSubsamples] = {
+ {8, 32},
+ {8, 32}
+ };
+
+ attemptDecryptExpectingSuccess<kTotalSize>(key, iv, encrypted, decrypted,
+ subSamples, kNumSubsamples);
+}
+
+TEST_F(AesCtrDecryptorTest, DecryptsUnalignedMixedSubSamples) {
+ const size_t kTotalSize = 80;
+ const size_t kNumSubsamples = 2;
+
+ // Based on test vectors from NIST-800-38A
+ Key key = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+ };
+
+ Iv iv = {
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+ };
+
+ uint8_t encrypted[kTotalSize] = {
+ // 8 clear bytes
+ 0xf0, 0x13, 0xca, 0xc7, 0x00, 0x64, 0x0b, 0xbb,
+ // 30 encrypted bytes
+ 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26,
+ 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce,
+ 0x98, 0x06, 0xf6, 0x6b, 0x79, 0x70, 0xfd, 0xff,
+ 0x86, 0x17, 0x18, 0x7b, 0xb9, 0xff,
+ // 8 clear bytes
+ 0x94, 0xba, 0x88, 0x2e, 0x0e, 0x12, 0x11, 0x55,
+ // 34 encrypted bytes
+ 0xfd, 0xff, 0x5a, 0xe4, 0xdf, 0x3e, 0xdb, 0xd5,
+ 0xd3, 0x5e, 0x5b, 0x4f, 0x09, 0x02, 0x0d, 0xb0,
+ 0x3e, 0xab, 0x1e, 0x03, 0x1d, 0xda, 0x2f, 0xbe,
+ 0x03, 0xd1, 0x79, 0x21, 0x70, 0xa0, 0xf3, 0x00,
+ 0x9c, 0xee
+ };
+
+ uint8_t decrypted[kTotalSize] = {
+ 0xf0, 0x13, 0xca, 0xc7, 0x00, 0x64, 0x0b, 0xbb,
+ 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
+ 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
+ 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
+ 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x94, 0xba,
+ 0x88, 0x2e, 0x0e, 0x12, 0x11, 0x55, 0x8e, 0x51,
+ 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
+ 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
+ 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
+ 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
+ };
+
+ SubSample subSamples[kNumSubsamples] = {
+ {8, 30},
+ {8, 34}
+ };
+
+ attemptDecryptExpectingSuccess<kTotalSize>(key, iv, encrypted, decrypted,
+ subSamples, kNumSubsamples);
+}
+
+TEST_F(AesCtrDecryptorTest, DecryptsComplexMixedSubSamples) {
+ const size_t kTotalSize = 72;
+ const size_t kNumSubsamples = 6;
+
+ // Based on test vectors from NIST-800-38A
+ Key key = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+ };
+
+ Iv iv = {
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+ };
+
+ uint8_t encrypted[kTotalSize] = {
+ // 4 clear bytes
+ 0xf0, 0x13, 0xca, 0xc7,
+ // 1 encrypted bytes
+ 0x87,
+ // 9 encrypted bytes
+ 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26, 0x1b,
+ 0xef,
+ // 11 clear bytes
+ 0x81, 0x4f, 0x24, 0x87, 0x0e, 0xde, 0xba, 0xad,
+ 0x11, 0x9b, 0x46,
+ // 20 encrypted bytes
+ 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce,
+ 0x98, 0x06, 0xf6, 0x6b, 0x79, 0x70, 0xfd, 0xff,
+ 0x86, 0x17, 0x18, 0x7b, 0xb9, 0xff,
+ // 8 clear bytes
+ 0x94, 0xba, 0x88, 0x2e, 0x0e, 0x12, 0x11, 0x55,
+ // 3 clear bytes
+ 0x10, 0xf5, 0x22,
+ // 14 encrypted bytes
+ 0xfd, 0xff, 0x5a, 0xe4, 0xdf, 0x3e, 0xdb, 0xd5,
+ 0xd3, 0x5e, 0x5b, 0x4f, 0x09, 0x02,
+ // 2 clear bytes
+ 0x02, 0x01
+ };
+
+ uint8_t decrypted[kTotalSize] = {
+ 0xf0, 0x13, 0xca, 0xc7, 0x6b, 0xc1, 0xbe, 0xe2,
+ 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x81, 0x4f,
+ 0x24, 0x87, 0x0e, 0xde, 0xba, 0xad, 0x11, 0x9b,
+ 0x46, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a, 0xae,
+ 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c, 0x9e,
+ 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x94, 0xba, 0x88,
+ 0x2e, 0x0e, 0x12, 0x11, 0x55, 0x10, 0xf5, 0x22,
+ 0x8e, 0x51, 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c,
+ 0xe4, 0x11, 0xe5, 0xfb, 0xc1, 0x19, 0x02, 0x01
+ };
+
+ SubSample subSamples[kNumSubsamples] = {
+ {4, 1},
+ {0, 9},
+ {11, 20},
+ {8, 0},
+ {3, 14},
+ {2, 0}
+ };
+
+ attemptDecryptExpectingSuccess<kTotalSize>(key, iv, encrypted, decrypted,
+ subSamples, kNumSubsamples);
+}
+
+} // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/default/tests/Android.bp b/drm/mediadrm/plugins/clearkey/default/tests/Android.bp
new file mode 100644
index 0000000..4419865
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/default/tests/Android.bp
@@ -0,0 +1,42 @@
+//
+// Copyright (C) 2014 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// ----------------------------------------------------------------
+// Builds ClearKey Drm Tests
+//
+
+cc_test {
+ name: "ClearKeyDrmUnitTest",
+ vendor: true,
+
+ cflags: ["-Wall", "-Werror"],
+
+ srcs: [
+ "AesCtrDecryptorUnittest.cpp",
+ "InitDataParserUnittest.cpp",
+ "JsonWebKeyUnittest.cpp",
+ ],
+
+ static_libs: ["libclearkeycommon"],
+
+ shared_libs: [
+ "libcrypto",
+ "libdrmclearkeyplugin",
+ "liblog",
+ "libstagefright_foundation",
+ "libutils",
+ ],
+ header_libs: ["media_plugin_headers"],
+}
diff --git a/drm/mediadrm/plugins/clearkey/tests/InitDataParserUnittest.cpp b/drm/mediadrm/plugins/clearkey/default/tests/InitDataParserUnittest.cpp
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/tests/InitDataParserUnittest.cpp
rename to drm/mediadrm/plugins/clearkey/default/tests/InitDataParserUnittest.cpp
diff --git a/drm/mediadrm/plugins/clearkey/tests/JsonWebKeyUnittest.cpp b/drm/mediadrm/plugins/clearkey/default/tests/JsonWebKeyUnittest.cpp
similarity index 100%
rename from drm/mediadrm/plugins/clearkey/tests/JsonWebKeyUnittest.cpp
rename to drm/mediadrm/plugins/clearkey/default/tests/JsonWebKeyUnittest.cpp
diff --git a/drm/mediadrm/plugins/clearkey/hidl/AesCtrDecryptor.cpp b/drm/mediadrm/plugins/clearkey/hidl/AesCtrDecryptor.cpp
new file mode 100644
index 0000000..2fce0790
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/AesCtrDecryptor.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "hidl_ClearkeyDecryptor"
+#include <utils/Log.h>
+
+#include <openssl/aes.h>
+
+#include "AesCtrDecryptor.h"
+#include "ClearKeyTypes.h"
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::hardware::drm::V1_0::SubSample;
+using ::android::hardware::drm::V1_0::Status;
+
+static const size_t kBlockBitCount = kBlockSize * 8;
+
+Status AesCtrDecryptor::decrypt(
+ const std::vector<uint8_t>& key,
+ const Iv iv, const uint8_t* source,
+ uint8_t* destination,
+ const std::vector<SubSample> subSamples,
+ size_t numSubSamples,
+ size_t* bytesDecryptedOut) {
+ uint32_t blockOffset = 0;
+ uint8_t previousEncryptedCounter[kBlockSize];
+ memset(previousEncryptedCounter, 0, kBlockSize);
+
+ if (key.size() != kBlockSize || (sizeof(Iv) / sizeof(uint8_t)) != kBlockSize) {
+ android_errorWriteLog(0x534e4554, "63982768");
+ return Status::ERROR_DRM_DECRYPT;
+ }
+
+ size_t offset = 0;
+ AES_KEY opensslKey;
+ AES_set_encrypt_key(key.data(), kBlockBitCount, &opensslKey);
+ Iv opensslIv;
+ memcpy(opensslIv, iv, sizeof(opensslIv));
+
+ for (size_t i = 0; i < numSubSamples; ++i) {
+ const SubSample& subSample = subSamples[i];
+
+ if (subSample.numBytesOfClearData > 0) {
+ memcpy(destination + offset, source + offset,
+ subSample.numBytesOfClearData);
+ offset += subSample.numBytesOfClearData;
+ }
+
+ if (subSample.numBytesOfEncryptedData > 0) {
+ AES_ctr128_encrypt(source + offset, destination + offset,
+ subSample.numBytesOfEncryptedData, &opensslKey,
+ opensslIv, previousEncryptedCounter,
+ &blockOffset);
+ offset += subSample.numBytesOfEncryptedData;
+ }
+ }
+
+ *bytesDecryptedOut = offset;
+ return Status::OK;
+}
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
diff --git a/drm/mediadrm/plugins/clearkey/hidl/Android.bp b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
new file mode 100644
index 0000000..341d4f6
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
@@ -0,0 +1,68 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_binary {
+ name: "android.hardware.drm@1.1-service.clearkey",
+ vendor: true,
+
+ srcs: [
+ "AesCtrDecryptor.cpp",
+ "Base64.cpp",
+ "Buffer.cpp",
+ "CreatePluginFactories.cpp",
+ "CryptoFactory.cpp",
+ "CryptoPlugin.cpp",
+ "DrmFactory.cpp",
+ "DrmPlugin.cpp",
+ "InitDataParser.cpp",
+ "JsonWebKey.cpp",
+ "Session.cpp",
+ "SessionLibrary.cpp",
+ "service.cpp",
+ ],
+
+ relative_install_path: "hw",
+
+ cflags: ["-Wall", "-Werror"],
+ init_rc: ["android.hardware.drm@1.1-service.clearkey.rc"],
+
+ shared_libs: [
+ "android.hardware.drm@1.0",
+ "android.hardware.drm@1.1",
+ "libbase",
+ "libbinder",
+ "libcrypto",
+ "libhidlbase",
+ "libhidlmemory",
+ "libhidltransport",
+ "liblog",
+ "libutils",
+ ],
+
+ static_libs: [
+ "libclearkeycommon",
+ "libjsmn",
+ ],
+
+ local_include_dirs: ["include"],
+
+ export_static_lib_headers: ["libjsmn"],
+
+ sanitize: {
+ integer_overflow: true,
+ },
+}
+
diff --git a/drm/mediadrm/plugins/clearkey/hidl/Base64.cpp b/drm/mediadrm/plugins/clearkey/hidl/Base64.cpp
new file mode 100644
index 0000000..c2ed751
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/Base64.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Base64.h"
+
+#include <string>
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+sp<Buffer> decodeBase64(const std::string &s) {
+ size_t n = s.size();
+
+ if ((n % 4) != 0) {
+ return nullptr;
+ }
+
+ size_t padding = 0;
+ if (n >= 1 && s.c_str()[n - 1] == '=') {
+ padding = 1;
+
+ if (n >= 2 && s.c_str()[n - 2] == '=') {
+ padding = 2;
+
+ if (n >= 3 && s.c_str()[n - 3] == '=') {
+ padding = 3;
+ }
+ }
+ }
+
+ // We divide first to avoid overflow. It's OK to do this because we
+ // already made sure that n % 4 == 0.
+ size_t outLen = (n / 4) * 3 - padding;
+
+ sp<Buffer> buffer = new Buffer(outLen);
+ uint8_t *out = buffer->data();
+ if (out == nullptr || buffer->size() < outLen) {
+ return nullptr;
+ }
+
+ size_t j = 0;
+ uint32_t accum = 0;
+ for (size_t i = 0; i < n; ++i) {
+ char c = s.c_str()[i];
+ unsigned value;
+ if (c >= 'A' && c <= 'Z') {
+ value = c - 'A';
+ } else if (c >= 'a' && c <= 'z') {
+ value = 26 + c - 'a';
+ } else if (c >= '0' && c <= '9') {
+ value = 52 + c - '0';
+ } else if (c == '+' || c == '-') {
+ value = 62;
+ } else if (c == '/' || c == '_') {
+ value = 63;
+ } else if (c != '=') {
+ return nullptr;
+ } else {
+ if (i < n - padding) {
+ return nullptr;
+ }
+
+ value = 0;
+ }
+
+ accum = (accum << 6) | value;
+
+ if (((i + 1) % 4) == 0) {
+ if (j < outLen) { out[j++] = (accum >> 16); }
+ if (j < outLen) { out[j++] = (accum >> 8) & 0xff; }
+ if (j < outLen) { out[j++] = accum & 0xff; }
+
+ accum = 0;
+ }
+ }
+
+ return buffer;
+}
+
+static char encode6Bit(unsigned x) {
+ if (x <= 25) {
+ return 'A' + x;
+ } else if (x <= 51) {
+ return 'a' + x - 26;
+ } else if (x <= 61) {
+ return '0' + x - 52;
+ } else if (x == 62) {
+ return '+';
+ } else {
+ return '/';
+ }
+}
+
+void encodeBase64(const void *_data, size_t size, std::string *out) {
+ out->clear();
+
+ const uint8_t *data = (const uint8_t *)_data;
+
+ size_t i;
+ for (i = 0; i < (size / 3) * 3; i += 3) {
+ uint8_t x1 = data[i];
+ uint8_t x2 = data[i + 1];
+ uint8_t x3 = data[i + 2];
+
+ out->push_back(encode6Bit(x1 >> 2));
+ out->push_back(encode6Bit((x1 << 4 | x2 >> 4) & 0x3f));
+ out->push_back(encode6Bit((x2 << 2 | x3 >> 6) & 0x3f));
+ out->push_back(encode6Bit(x3 & 0x3f));
+ }
+ switch (size % 3) {
+ case 0:
+ break;
+ case 2:
+ {
+ uint8_t x1 = data[i];
+ uint8_t x2 = data[i + 1];
+ out->push_back(encode6Bit(x1 >> 2));
+ out->push_back(encode6Bit((x1 << 4 | x2 >> 4) & 0x3f));
+ out->push_back(encode6Bit((x2 << 2) & 0x3f));
+ out->push_back('=');
+ break;
+ }
+ default:
+ {
+ uint8_t x1 = data[i];
+ out->push_back(encode6Bit(x1 >> 2));
+ out->push_back(encode6Bit((x1 << 4) & 0x3f));
+ out->append("==");
+ break;
+ }
+ }
+}
+
+void encodeBase64Url(const void *_data, size_t size, std::string *out) {
+ encodeBase64(_data, size, out);
+
+ if ((std::string::npos != out->find("+")) ||
+ (std::string::npos != out->find("/"))) {
+ size_t outLen = out->size();
+ char *base64url = new char[outLen];
+ for (size_t i = 0; i < outLen; ++i) {
+ if (out->c_str()[i] == '+')
+ base64url[i] = '-';
+ else if (out->c_str()[i] == '/')
+ base64url[i] = '_';
+ else
+ base64url[i] = out->c_str()[i];
+ }
+
+ out->assign(base64url, outLen);
+ delete[] base64url;
+ }
+}
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
diff --git a/drm/mediadrm/plugins/clearkey/hidl/Buffer.cpp b/drm/mediadrm/plugins/clearkey/hidl/Buffer.cpp
new file mode 100644
index 0000000..e58f58a
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/Buffer.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Buffer.h"
+
+#include <android/hardware/drm/1.0/types.h>
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+Buffer::Buffer(size_t capacity)
+ : mRangeOffset(0),
+ mOwnsData(true) {
+ mData = malloc(capacity);
+ if (mData == nullptr) {
+ mCapacity = 0;
+ mRangeLength = 0;
+ } else {
+ mCapacity = capacity;
+ mRangeLength = capacity;
+ }
+}
+
+Buffer::~Buffer() {
+ if (mOwnsData) {
+ if (mData != nullptr) {
+ free(mData);
+ mData = nullptr;
+ }
+ }
+}
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
diff --git a/drm/mediadrm/plugins/clearkey/hidl/CreatePluginFactories.cpp b/drm/mediadrm/plugins/clearkey/hidl/CreatePluginFactories.cpp
new file mode 100644
index 0000000..1ba5c6a
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/CreatePluginFactories.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "CreatePluginFactories.h"
+
+#include "CryptoFactory.h"
+#include "DrmFactory.h"
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+extern "C" {
+
+IDrmFactory* createDrmFactory() {
+ return new DrmFactory();
+}
+
+ICryptoFactory* createCryptoFactory() {
+ return new CryptoFactory();
+}
+
+} // extern "C"
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
diff --git a/drm/mediadrm/plugins/clearkey/hidl/CryptoFactory.cpp b/drm/mediadrm/plugins/clearkey/hidl/CryptoFactory.cpp
new file mode 100644
index 0000000..0848cef
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/CryptoFactory.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "hidl_ClearKeyCryptoFactory"
+#include <utils/Log.h>
+
+#include "CryptoFactory.h"
+
+#include "ClearKeyUUID.h"
+#include "CryptoPlugin.h"
+#include "TypeConvert.h"
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+Return<bool> CryptoFactory::isCryptoSchemeSupported(
+ const hidl_array<uint8_t, 16> &uuid)
+{
+ return clearkeydrm::isClearKeyUUID(uuid.data());
+}
+
+Return<void> CryptoFactory::createPlugin(
+ const hidl_array<uint8_t, 16> &uuid,
+ const hidl_vec<uint8_t> &initData,
+ createPlugin_cb _hidl_cb) {
+
+ if (!isCryptoSchemeSupported(uuid.data())) {
+ ALOGE("Clearkey Drm HAL: failed to create clearkey plugin, " \
+ "invalid crypto scheme");
+ _hidl_cb(Status::BAD_VALUE, nullptr);
+ return Void();
+ }
+
+ CryptoPlugin *cryptoPlugin = new CryptoPlugin(initData);
+ Status status = cryptoPlugin->getInitStatus();
+ if (status == Status::OK) {
+ _hidl_cb(Status::OK, cryptoPlugin);
+ } else {
+ delete cryptoPlugin;
+ _hidl_cb(status, nullptr);
+ }
+ return Void();
+}
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
diff --git a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
new file mode 100644
index 0000000..f33f94e
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "hidl_ClearKeyCryptoPlugin"
+#include <utils/Log.h>
+
+#include "CryptoPlugin.h"
+#include "SessionLibrary.h"
+#include "TypeConvert.h"
+
+#include <hidlmemory/mapping.h>
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::hardware::drm::V1_0::BufferType;
+
+Return<void> CryptoPlugin::setSharedBufferBase(
+ const hidl_memory& base, uint32_t bufferId) {
+ sp<IMemory> hidlMemory = mapMemory(base);
+ ALOGE_IF(hidlMemory == nullptr, "mapMemory returns nullptr");
+
+ // allow mapMemory to return nullptr
+ mSharedBufferMap[bufferId] = hidlMemory;
+ return Void();
+}
+
+// Returns negative values for error code and positive values for the size of
+// decrypted data. In theory, the output size can be larger than the input
+// size, but in practice this will never happen for AES-CTR.
+Return<void> CryptoPlugin::decrypt(
+ bool secure,
+ const hidl_array<uint8_t, KEY_ID_SIZE>& keyId,
+ const hidl_array<uint8_t, KEY_IV_SIZE>& iv,
+ Mode mode,
+ const Pattern& pattern,
+ const hidl_vec<SubSample>& subSamples,
+ const SharedBuffer& source,
+ uint64_t offset,
+ const DestinationBuffer& destination,
+ decrypt_cb _hidl_cb) {
+ UNUSED(pattern);
+
+ if (secure) {
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, 0,
+ "Secure decryption is not supported with ClearKey.");
+ return Void();
+ }
+
+ if (mSharedBufferMap.find(source.bufferId) == mSharedBufferMap.end()) {
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, 0,
+ "source decrypt buffer base not set");
+ return Void();
+ }
+
+ if (destination.type == BufferType::SHARED_MEMORY) {
+ const SharedBuffer& dest = destination.nonsecureMemory;
+ if (mSharedBufferMap.find(dest.bufferId) == mSharedBufferMap.end()) {
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, 0,
+ "destination decrypt buffer base not set");
+ return Void();
+ }
+ }
+
+ sp<IMemory> sourceBase = mSharedBufferMap[source.bufferId];
+ if (sourceBase == nullptr) {
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, 0, "source is a nullptr");
+ return Void();
+ }
+
+ if (source.offset + offset + source.size > sourceBase->getSize()) {
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, 0, "invalid buffer size");
+ return Void();
+ }
+
+ uint8_t *base = static_cast<uint8_t *>
+ (static_cast<void *>(sourceBase->getPointer()));
+ uint8_t* srcPtr = static_cast<uint8_t *>(base + source.offset + offset);
+ void* destPtr = NULL;
+ if (destination.type == BufferType::SHARED_MEMORY) {
+ const SharedBuffer& destBuffer = destination.nonsecureMemory;
+ sp<IMemory> destBase = mSharedBufferMap[destBuffer.bufferId];
+ if (destBase == nullptr) {
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, 0, "destination is a nullptr");
+ return Void();
+ }
+
+ if (destBuffer.offset + destBuffer.size > destBase->getSize()) {
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, 0, "invalid buffer size");
+ return Void();
+ }
+ destPtr = static_cast<void *>(base + destination.nonsecureMemory.offset);
+ } else if (destination.type == BufferType::NATIVE_HANDLE) {
+ native_handle_t *handle = const_cast<native_handle_t *>(
+ destination.secureMemory.getNativeHandle());
+ destPtr = static_cast<void *>(handle);
+ }
+
+ // Calculate the output buffer size and determine if any subsamples are
+ // encrypted.
+ size_t destSize = 0;
+ bool haveEncryptedSubsamples = false;
+ for (size_t i = 0; i < subSamples.size(); i++) {
+ const SubSample &subSample = subSamples[i];
+ destSize += subSample.numBytesOfClearData;
+ destSize += subSample.numBytesOfEncryptedData;
+ if (subSample.numBytesOfEncryptedData > 0) {
+ haveEncryptedSubsamples = true;
+ }
+ }
+
+ if (mode == Mode::UNENCRYPTED) {
+ if (haveEncryptedSubsamples) {
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, 0,
+ "Encrypted subsamples found in allegedly unencrypted data.");
+ return Void();
+ }
+
+ size_t offset = 0;
+ for (size_t i = 0; i < subSamples.size(); ++i) {
+ const SubSample& subSample = subSamples[i];
+ if (subSample.numBytesOfClearData != 0) {
+ memcpy(reinterpret_cast<uint8_t*>(destPtr) + offset,
+ reinterpret_cast<const uint8_t*>(srcPtr) + offset,
+ subSample.numBytesOfClearData);
+ offset += subSample.numBytesOfClearData;
+ }
+ }
+
+ _hidl_cb(Status::OK, static_cast<ssize_t>(offset), "");
+ return Void();
+ } else if (mode == Mode::AES_CTR) {
+ size_t bytesDecrypted;
+ Status res = mSession->decrypt(keyId.data(), iv.data(), srcPtr,
+ static_cast<uint8_t*>(destPtr), toVector(subSamples), &bytesDecrypted);
+ if (res == Status::OK) {
+ _hidl_cb(Status::OK, static_cast<ssize_t>(bytesDecrypted), "");
+ return Void();
+ } else {
+ _hidl_cb(Status::ERROR_DRM_DECRYPT, static_cast<ssize_t>(res),
+ "Decryption Error");
+ return Void();
+ }
+ } else {
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, 0,
+ "Selected encryption mode is not supported by the ClearKey DRM Plugin.");
+ return Void();
+ }
+}
+
+Return<Status> CryptoPlugin::setMediaDrmSession(
+ const hidl_vec<uint8_t>& sessionId) {
+ if (!sessionId.size()) {
+ mSession = nullptr;
+ } else {
+ mSession = SessionLibrary::get()->findSession(sessionId);
+ if (!mSession.get()) {
+ return Status::ERROR_DRM_SESSION_NOT_OPENED;
+ }
+ }
+ return Status::OK;
+}
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmFactory.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmFactory.cpp
new file mode 100644
index 0000000..77557f9
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmFactory.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "hidl_ClearKeyDrmFactory"
+#include <utils/Log.h>
+
+#include <utils/Errors.h>
+
+#include "DrmFactory.h"
+
+#include "DrmPlugin.h"
+#include "ClearKeyUUID.h"
+#include "MimeType.h"
+#include "SessionLibrary.h"
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::hardware::drm::V1_0::Status;
+using ::android::hardware::Void;
+
+Return<bool> DrmFactory::isCryptoSchemeSupported(
+ const hidl_array<uint8_t, 16>& uuid) {
+ return clearkeydrm::isClearKeyUUID(uuid.data());
+}
+
+Return<bool> DrmFactory::isContentTypeSupported(const hidl_string &mimeType) {
+ // This should match the mimeTypes handed by InitDataParser.
+ return mimeType == kIsoBmffVideoMimeType ||
+ mimeType == kIsoBmffAudioMimeType ||
+ mimeType == kCencInitDataFormat ||
+ mimeType == kWebmVideoMimeType ||
+ mimeType == kWebmAudioMimeType ||
+ mimeType == kWebmInitDataFormat;
+}
+
+Return<void> DrmFactory::createPlugin(
+ const hidl_array<uint8_t, 16>& uuid,
+ const hidl_string& appPackageName,
+ createPlugin_cb _hidl_cb) {
+ UNUSED(appPackageName);
+
+ DrmPlugin *plugin = NULL;
+ if (!isCryptoSchemeSupported(uuid.data())) {
+ ALOGE("Clear key Drm HAL: failed to create drm plugin, " \
+ "invalid crypto scheme");
+ _hidl_cb(Status::BAD_VALUE, plugin);
+ return Void();
+ }
+
+ plugin = new DrmPlugin(SessionLibrary::get());
+ _hidl_cb(Status::OK, plugin);
+ return Void();
+}
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
new file mode 100644
index 0000000..d51e29d
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
@@ -0,0 +1,573 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "hidl_ClearKeyPlugin"
+#include <utils/Log.h>
+
+#include <stdio.h>
+#include <inttypes.h>
+
+#include "DrmPlugin.h"
+#include "ClearKeyDrmProperties.h"
+#include "Session.h"
+#include "TypeConvert.h"
+
+namespace {
+const int kSecureStopIdStart = 100;
+const std::string kStreaming("Streaming");
+const std::string kOffline("Offline");
+const std::string kTrue("True");
+
+const std::string kQueryKeyLicenseType("LicenseType");
+ // Value: "Streaming" or "Offline"
+const std::string kQueryKeyPlayAllowed("PlayAllowed");
+ // Value: "True" or "False"
+const std::string kQueryKeyRenewAllowed("RenewAllowed");
+ // Value: "True" or "False"
+
+const int kSecureStopIdSize = 10;
+
+std::vector<uint8_t> uint32ToVector(uint32_t value) {
+ // 10 bytes to display max value 4294967295 + one byte null terminator
+ char buffer[kSecureStopIdSize];
+ memset(buffer, 0, kSecureStopIdSize);
+ snprintf(buffer, kSecureStopIdSize, "%" PRIu32, value);
+ return std::vector<uint8_t>(buffer, buffer + sizeof(buffer));
+}
+
+}; // unnamed namespace
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+DrmPlugin::DrmPlugin(SessionLibrary* sessionLibrary)
+ : mSessionLibrary(sessionLibrary),
+ mOpenSessionOkCount(0),
+ mCloseSessionOkCount(0),
+ mCloseSessionNotOpenedCount(0),
+ mNextSecureStopId(kSecureStopIdStart) {
+ mPlayPolicy.clear();
+ initProperties();
+ mSecureStops.clear();
+}
+
+void DrmPlugin::initProperties() {
+ mStringProperties.clear();
+ mStringProperties[kVendorKey] = kVendorValue;
+ mStringProperties[kVersionKey] = kVersionValue;
+ mStringProperties[kPluginDescriptionKey] = kPluginDescriptionValue;
+ mStringProperties[kAlgorithmsKey] = kAlgorithmsValue;
+ mStringProperties[kListenerTestSupportKey] = kListenerTestSupportValue;
+
+ std::vector<uint8_t> valueVector;
+ valueVector.clear();
+ valueVector.insert(valueVector.end(),
+ kTestDeviceIdData, kTestDeviceIdData + sizeof(kTestDeviceIdData) / sizeof(uint8_t));
+ mByteArrayProperties[kDeviceIdKey] = valueVector;
+
+ valueVector.clear();
+ valueVector.insert(valueVector.end(),
+ kMetricsData, kMetricsData + sizeof(kMetricsData) / sizeof(uint8_t));
+ mByteArrayProperties[kMetricsKey] = valueVector;
+}
+
+// The secure stop in ClearKey implementation is not installed securely.
+// This function merely creates a test environment for testing secure stops APIs.
+// The content in this secure stop is implementation dependent, the clearkey
+// secureStop does not serve as a reference implementation.
+void DrmPlugin::installSecureStop(const hidl_vec<uint8_t>& sessionId) {
+ ClearkeySecureStop clearkeySecureStop;
+ clearkeySecureStop.id = uint32ToVector(++mNextSecureStopId);
+ clearkeySecureStop.data.assign(sessionId.begin(), sessionId.end());
+
+ mSecureStops.insert(std::pair<std::vector<uint8_t>, ClearkeySecureStop>(
+ clearkeySecureStop.id, clearkeySecureStop));
+}
+
+Return<void> DrmPlugin::openSession(openSession_cb _hidl_cb) {
+ sp<Session> session = mSessionLibrary->createSession();
+ std::vector<uint8_t> sessionId = session->sessionId();
+
+ Status status = setSecurityLevel(sessionId, SecurityLevel::SW_SECURE_CRYPTO);
+ _hidl_cb(status, toHidlVec(sessionId));
+ mOpenSessionOkCount++;
+ return Void();
+}
+
+Return<void> DrmPlugin::openSession_1_1(SecurityLevel securityLevel,
+ openSession_1_1_cb _hidl_cb) {
+ sp<Session> session = mSessionLibrary->createSession();
+ std::vector<uint8_t> sessionId = session->sessionId();
+
+ Status status = setSecurityLevel(sessionId, securityLevel);
+ _hidl_cb(status, toHidlVec(sessionId));
+ mOpenSessionOkCount++;
+ return Void();
+}
+
+Return<Status> DrmPlugin::closeSession(const hidl_vec<uint8_t>& sessionId) {
+ if (sessionId.size() == 0) {
+ return Status::BAD_VALUE;
+ }
+
+ sp<Session> session = mSessionLibrary->findSession(toVector(sessionId));
+ if (session.get()) {
+ mCloseSessionOkCount++;
+ mSessionLibrary->destroySession(session);
+ return Status::OK;
+ }
+ mCloseSessionNotOpenedCount++;
+ return Status::ERROR_DRM_SESSION_NOT_OPENED;
+}
+
+Status DrmPlugin::getKeyRequestCommon(const hidl_vec<uint8_t>& scope,
+ const hidl_vec<uint8_t>& initData,
+ const hidl_string& mimeType,
+ KeyType keyType,
+ const hidl_vec<KeyValue>& optionalParameters,
+ std::vector<uint8_t> *request,
+ KeyRequestType *keyRequestType,
+ std::string *defaultUrl) {
+ UNUSED(optionalParameters);
+
+ *defaultUrl = "";
+ *keyRequestType = KeyRequestType::UNKNOWN;
+ *request = std::vector<uint8_t>();
+
+ if (scope.size() == 0) {
+ return Status::BAD_VALUE;
+ }
+
+ if (keyType != KeyType::STREAMING) {
+ return Status::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ sp<Session> session = mSessionLibrary->findSession(toVector(scope));
+ if (!session.get()) {
+ return Status::ERROR_DRM_SESSION_NOT_OPENED;
+ }
+
+ Status status = session->getKeyRequest(initData, mimeType, request);
+ *keyRequestType = KeyRequestType::INITIAL;
+ return status;
+}
+
+Return<void> DrmPlugin::getKeyRequest(
+ const hidl_vec<uint8_t>& scope,
+ const hidl_vec<uint8_t>& initData,
+ const hidl_string& mimeType,
+ KeyType keyType,
+ const hidl_vec<KeyValue>& optionalParameters,
+ getKeyRequest_cb _hidl_cb) {
+ UNUSED(optionalParameters);
+
+ KeyRequestType keyRequestType = KeyRequestType::UNKNOWN;
+ std::string defaultUrl("");
+ std::vector<uint8_t> request;
+ Status status = getKeyRequestCommon(
+ scope, initData, mimeType, keyType, optionalParameters,
+ &request, &keyRequestType, &defaultUrl);
+
+ _hidl_cb(status, toHidlVec(request),
+ static_cast<drm::V1_0::KeyRequestType>(keyRequestType),
+ hidl_string(defaultUrl));
+ return Void();
+}
+
+Return<void> DrmPlugin::getKeyRequest_1_1(
+ const hidl_vec<uint8_t>& scope,
+ const hidl_vec<uint8_t>& initData,
+ const hidl_string& mimeType,
+ KeyType keyType,
+ const hidl_vec<KeyValue>& optionalParameters,
+ getKeyRequest_1_1_cb _hidl_cb) {
+ UNUSED(optionalParameters);
+
+ KeyRequestType keyRequestType = KeyRequestType::UNKNOWN;
+ std::string defaultUrl("");
+ std::vector<uint8_t> request;
+ Status status = getKeyRequestCommon(
+ scope, initData, mimeType, keyType, optionalParameters,
+ &request, &keyRequestType, &defaultUrl);
+
+ _hidl_cb(status, toHidlVec(request), keyRequestType, hidl_string(defaultUrl));
+ return Void();
+}
+
+void DrmPlugin::setPlayPolicy() {
+ mPlayPolicy.clear();
+
+ KeyValue policy;
+ policy.key = kQueryKeyLicenseType;
+ policy.value = kStreaming;
+ mPlayPolicy.push_back(policy);
+
+ policy.key = kQueryKeyPlayAllowed;
+ policy.value = kTrue;
+ mPlayPolicy.push_back(policy);
+
+ policy.key = kQueryKeyRenewAllowed;
+ mPlayPolicy.push_back(policy);
+}
+
+Return<void> DrmPlugin::provideKeyResponse(
+ const hidl_vec<uint8_t>& scope,
+ const hidl_vec<uint8_t>& response,
+ provideKeyResponse_cb _hidl_cb) {
+ if (scope.size() == 0 || response.size() == 0) {
+ // Returns empty keySetId
+ _hidl_cb(Status::BAD_VALUE, hidl_vec<uint8_t>());
+ return Void();
+ }
+
+ sp<Session> session = mSessionLibrary->findSession(toVector(scope));
+ if (!session.get()) {
+ _hidl_cb(Status::ERROR_DRM_SESSION_NOT_OPENED, hidl_vec<uint8_t>());
+ return Void();
+ }
+
+ setPlayPolicy();
+ std::vector<uint8_t> keySetId;
+ Status status = session->provideKeyResponse(response);
+ if (status == Status::OK) {
+ // This is for testing AMediaDrm_setOnEventListener only.
+ sendEvent(EventType::VENDOR_DEFINED, 0, scope);
+ keySetId.clear();
+ }
+
+ installSecureStop(scope);
+
+ // Returns status and empty keySetId
+ _hidl_cb(status, toHidlVec(keySetId));
+ return Void();
+}
+
+Return<void> DrmPlugin::getPropertyString(
+ const hidl_string& propertyName, getPropertyString_cb _hidl_cb) {
+ std::string name(propertyName.c_str());
+ std::string value;
+
+ if (name == kVendorKey) {
+ value = mStringProperties[kVendorKey];
+ } else if (name == kVersionKey) {
+ value = mStringProperties[kVersionKey];
+ } else if (name == kPluginDescriptionKey) {
+ value = mStringProperties[kPluginDescriptionKey];
+ } else if (name == kAlgorithmsKey) {
+ value = mStringProperties[kAlgorithmsKey];
+ } else if (name == kListenerTestSupportKey) {
+ value = mStringProperties[kListenerTestSupportKey];
+ } else {
+ ALOGE("App requested unknown string property %s", name.c_str());
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, "");
+ return Void();
+ }
+ _hidl_cb(Status::OK, value.c_str());
+ return Void();
+}
+
+Return<void> DrmPlugin::getPropertyByteArray(
+ const hidl_string& propertyName, getPropertyByteArray_cb _hidl_cb) {
+ std::map<std::string, std::vector<uint8_t> >::iterator itr =
+ mByteArrayProperties.find(std::string(propertyName.c_str()));
+ if (itr == mByteArrayProperties.end()) {
+ ALOGE("App requested unknown property: %s", propertyName.c_str());
+ _hidl_cb(Status::BAD_VALUE, std::vector<uint8_t>());
+ return Void();
+ }
+ _hidl_cb(Status::OK, itr->second);
+ return Void();
+
+}
+
+Return<Status> DrmPlugin::setPropertyString(
+ const hidl_string& name, const hidl_string& value) {
+ std::string immutableKeys;
+ immutableKeys.append(kAlgorithmsKey + ",");
+ immutableKeys.append(kPluginDescriptionKey + ",");
+ immutableKeys.append(kVendorKey + ",");
+ immutableKeys.append(kVersionKey + ",");
+
+ std::string key = std::string(name.c_str());
+ if (immutableKeys.find(key) != std::string::npos) {
+ ALOGD("Cannot set immutable property: %s", key.c_str());
+ return Status::BAD_VALUE;
+ }
+
+ std::map<std::string, std::string>::iterator itr =
+ mStringProperties.find(key);
+ if (itr == mStringProperties.end()) {
+ ALOGE("Cannot set undefined property string, key=%s", key.c_str());
+ return Status::BAD_VALUE;
+ }
+
+ mStringProperties[key] = std::string(value.c_str());
+ return Status::OK;
+}
+
+Return<Status> DrmPlugin::setPropertyByteArray(
+ const hidl_string& name, const hidl_vec<uint8_t>& value) {
+ UNUSED(value);
+ if (name == kDeviceIdKey) {
+ ALOGD("Cannot set immutable property: %s", name.c_str());
+ return Status::BAD_VALUE;
+ }
+
+ // Setting of undefined properties is not supported
+ ALOGE("Failed to set property byte array, key=%s", name.c_str());
+ return Status::ERROR_DRM_CANNOT_HANDLE;
+}
+
+Return<void> DrmPlugin::queryKeyStatus(
+ const hidl_vec<uint8_t>& sessionId,
+ queryKeyStatus_cb _hidl_cb) {
+
+ if (sessionId.size() == 0) {
+ // Returns empty key status KeyValue pair
+ _hidl_cb(Status::BAD_VALUE, hidl_vec<KeyValue>());
+ return Void();
+ }
+
+ std::vector<KeyValue> infoMapVec;
+ infoMapVec.clear();
+
+ KeyValue keyValuePair;
+ for (size_t i = 0; i < mPlayPolicy.size(); ++i) {
+ keyValuePair.key = mPlayPolicy[i].key;
+ keyValuePair.value = mPlayPolicy[i].value;
+ infoMapVec.push_back(keyValuePair);
+ }
+ _hidl_cb(Status::OK, toHidlVec(infoMapVec));
+ return Void();
+}
+
+Return<void> DrmPlugin::getNumberOfSessions(getNumberOfSessions_cb _hidl_cb) {
+ uint32_t currentSessions = mSessionLibrary->numOpenSessions();
+ uint32_t maxSessions = 10;
+ _hidl_cb(Status::OK, currentSessions, maxSessions);
+ return Void();
+}
+
+Return<void> DrmPlugin::getSecurityLevel(const hidl_vec<uint8_t>& sessionId,
+ getSecurityLevel_cb _hidl_cb) {
+ if (sessionId.size() == 0) {
+ _hidl_cb(Status::BAD_VALUE, SecurityLevel::UNKNOWN);
+ return Void();
+ }
+
+ std::vector<uint8_t> sid = toVector(sessionId);
+ sp<Session> session = mSessionLibrary->findSession(sid);
+ if (!session.get()) {
+ _hidl_cb(Status::ERROR_DRM_SESSION_NOT_OPENED, SecurityLevel::UNKNOWN);
+ return Void();
+ }
+
+ std::map<std::vector<uint8_t>, SecurityLevel>::iterator itr =
+ mSecurityLevel.find(sid);
+ if (itr == mSecurityLevel.end()) {
+ ALOGE("Session id not found");
+ _hidl_cb(Status::ERROR_DRM_INVALID_STATE, SecurityLevel::UNKNOWN);
+ return Void();
+ }
+
+ _hidl_cb(Status::OK, itr->second);
+ return Void();
+}
+
+Return<Status> DrmPlugin::setSecurityLevel(const hidl_vec<uint8_t>& sessionId,
+ SecurityLevel level) {
+ if (sessionId.size() == 0) {
+ ALOGE("Invalid empty session id");
+ return Status::BAD_VALUE;
+ }
+
+ if (level > SecurityLevel::SW_SECURE_CRYPTO) {
+ ALOGE("Cannot set security level > max");
+ return Status::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ std::vector<uint8_t> sid = toVector(sessionId);
+ sp<Session> session = mSessionLibrary->findSession(sid);
+ if (!session.get()) {
+ return Status::ERROR_DRM_SESSION_NOT_OPENED;
+ }
+
+ std::map<std::vector<uint8_t>, SecurityLevel>::iterator itr =
+ mSecurityLevel.find(sid);
+ if (itr != mSecurityLevel.end()) {
+ mSecurityLevel[sid] = level;
+ } else {
+ if (!mSecurityLevel.insert(
+ std::pair<std::vector<uint8_t>, SecurityLevel>(sid, level)).second) {
+ ALOGE("Failed to set security level");
+ return Status::ERROR_DRM_INVALID_STATE;
+ }
+ }
+ return Status::OK;
+}
+
+Return<void> DrmPlugin::getMetrics(getMetrics_cb _hidl_cb) {
+ // Set the open session count metric.
+ DrmMetricGroup::Attribute openSessionOkAttribute = {
+ "status", DrmMetricGroup::ValueType::INT64_TYPE, (int64_t) Status::OK, 0.0, ""
+ };
+ DrmMetricGroup::Value openSessionMetricValue = {
+ "count", DrmMetricGroup::ValueType::INT64_TYPE, mOpenSessionOkCount, 0.0, ""
+ };
+ DrmMetricGroup::Metric openSessionMetric = {
+ "open_session", { openSessionOkAttribute }, { openSessionMetricValue }
+ };
+
+ // Set the close session count metric.
+ DrmMetricGroup::Attribute closeSessionOkAttribute = {
+ "status", DrmMetricGroup::ValueType::INT64_TYPE, (int64_t) Status::OK, 0.0, ""
+ };
+ DrmMetricGroup::Value closeSessionMetricValue = {
+ "count", DrmMetricGroup::ValueType::INT64_TYPE, mCloseSessionOkCount, 0.0, ""
+ };
+ DrmMetricGroup::Metric closeSessionMetric = {
+ "close_session", { closeSessionOkAttribute }, { closeSessionMetricValue }
+ };
+
+ // Set the close session, not opened metric.
+ DrmMetricGroup::Attribute closeSessionNotOpenedAttribute = {
+ "status", DrmMetricGroup::ValueType::INT64_TYPE,
+ (int64_t) Status::ERROR_DRM_SESSION_NOT_OPENED, 0.0, ""
+ };
+ DrmMetricGroup::Value closeSessionNotOpenedMetricValue = {
+ "count", DrmMetricGroup::ValueType::INT64_TYPE, mCloseSessionNotOpenedCount, 0.0, ""
+ };
+ DrmMetricGroup::Metric closeSessionNotOpenedMetric = {
+ "close_session", { closeSessionNotOpenedAttribute }, { closeSessionNotOpenedMetricValue }
+ };
+
+ DrmMetricGroup metrics = { { openSessionMetric, closeSessionMetric,
+ closeSessionNotOpenedMetric } };
+
+ _hidl_cb(Status::OK, hidl_vec<DrmMetricGroup>({metrics}));
+ return Void();
+}
+
+Return<void> DrmPlugin::getSecureStops(getSecureStops_cb _hidl_cb) {
+ std::vector<SecureStop> stops;
+ for (auto itr = mSecureStops.begin(); itr != mSecureStops.end(); ++itr) {
+ ClearkeySecureStop clearkeyStop = itr->second;
+ std::vector<uint8_t> stopVec;
+ stopVec.insert(stopVec.end(), clearkeyStop.id.begin(), clearkeyStop.id.end());
+ stopVec.insert(stopVec.end(), clearkeyStop.data.begin(), clearkeyStop.data.end());
+
+ SecureStop stop;
+ stop.opaqueData = toHidlVec(stopVec);
+ stops.push_back(stop);
+ }
+ _hidl_cb(Status::OK, stops);
+ return Void();
+}
+
+Return<void> DrmPlugin::getSecureStop(const hidl_vec<uint8_t>& secureStopId,
+ getSecureStop_cb _hidl_cb) {
+ SecureStop stop;
+ auto itr = mSecureStops.find(toVector(secureStopId));
+ if (itr != mSecureStops.end()) {
+ ClearkeySecureStop clearkeyStop = itr->second;
+ std::vector<uint8_t> stopVec;
+ stopVec.insert(stopVec.end(), clearkeyStop.id.begin(), clearkeyStop.id.end());
+ stopVec.insert(stopVec.end(), clearkeyStop.data.begin(), clearkeyStop.data.end());
+
+ stop.opaqueData = toHidlVec(stopVec);
+ _hidl_cb(Status::OK, stop);
+ } else {
+ _hidl_cb(Status::BAD_VALUE, stop);
+ }
+
+ return Void();
+}
+
+Return<Status> DrmPlugin::releaseSecureStop(const hidl_vec<uint8_t>& secureStopId) {
+ return removeSecureStop(secureStopId);
+}
+
+Return<Status> DrmPlugin::releaseAllSecureStops() {
+ return removeAllSecureStops();
+}
+
+Return<void> DrmPlugin::getSecureStopIds(getSecureStopIds_cb _hidl_cb) {
+ std::vector<SecureStopId> ids;
+ for (auto itr = mSecureStops.begin(); itr != mSecureStops.end(); ++itr) {
+ ids.push_back(itr->first);
+ }
+
+ _hidl_cb(Status::OK, toHidlVec(ids));
+ return Void();
+}
+
+Return<Status> DrmPlugin::releaseSecureStops(const SecureStopRelease& ssRelease) {
+ if (ssRelease.opaqueData.size() == 0) {
+ return Status::BAD_VALUE;
+ }
+
+ Status status = Status::OK;
+ std::vector<uint8_t> input = toVector(ssRelease.opaqueData);
+
+ // The format of opaqueData is shared between the server
+ // and the drm service. The clearkey implementation consists of:
+ // count - number of secure stops
+ // list of fixed length secure stops
+ size_t countBufferSize = sizeof(uint32_t);
+ uint32_t count = 0;
+ sscanf(reinterpret_cast<char*>(input.data()), "%04" PRIu32, &count);
+
+ // Avoid divide by 0 below.
+ if (count == 0) {
+ return Status::BAD_VALUE;
+ }
+
+ size_t secureStopSize = (input.size() - countBufferSize) / count;
+ uint8_t buffer[secureStopSize];
+ size_t offset = countBufferSize; // skip the count
+ for (size_t i = 0; i < count; ++i, offset += secureStopSize) {
+ memcpy(buffer, input.data() + offset, secureStopSize);
+ std::vector<uint8_t> id(buffer, buffer + kSecureStopIdSize);
+
+ status = removeSecureStop(toHidlVec(id));
+ if (Status::OK != status) break;
+ }
+
+ return status;
+}
+
+Return<Status> DrmPlugin::removeSecureStop(const hidl_vec<uint8_t>& secureStopId) {
+ if (1 != mSecureStops.erase(toVector(secureStopId))) {
+ return Status::BAD_VALUE;
+ }
+ return Status::OK;
+}
+
+Return<Status> DrmPlugin::removeAllSecureStops() {
+ mSecureStops.clear();
+ mNextSecureStopId = kSecureStopIdStart;
+ return Status::OK;
+}
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
diff --git a/drm/mediadrm/plugins/clearkey/hidl/InitDataParser.cpp b/drm/mediadrm/plugins/clearkey/hidl/InitDataParser.cpp
new file mode 100644
index 0000000..e2bb651
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/InitDataParser.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "hidl_InitDataParser"
+
+#include <algorithm>
+#include <utils/Log.h>
+
+#include "InitDataParser.h"
+
+#include "Base64.h"
+
+#include "ClearKeyUUID.h"
+#include "MimeType.h"
+#include "Utils.h"
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+namespace {
+ const size_t kKeyIdSize = 16;
+ const size_t kSystemIdSize = 16;
+}
+
+std::vector<uint8_t> StrToVector(const std::string& str) {
+ std::vector<uint8_t> vec(str.begin(), str.end());
+ return vec;
+}
+
+Status InitDataParser::parse(const std::vector<uint8_t>& initData,
+ const std::string& type,
+ std::vector<uint8_t>* licenseRequest) {
+ // Build a list of the key IDs
+ std::vector<const uint8_t*> keyIds;
+
+ if (type == kIsoBmffVideoMimeType ||
+ type == kIsoBmffAudioMimeType ||
+ type == kCencInitDataFormat) {
+ Status res = parsePssh(initData, &keyIds);
+ if (res != Status::OK) {
+ return res;
+ }
+ } else if (type == kWebmVideoMimeType ||
+ type == kWebmAudioMimeType ||
+ type == kWebmInitDataFormat) {
+ // WebM "init data" is just a single key ID
+ if (initData.size() != kKeyIdSize) {
+ return Status::ERROR_DRM_CANNOT_HANDLE;
+ }
+ keyIds.push_back(initData.data());
+ } else {
+ return Status::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ // Build the request
+ std::string requestJson = generateRequest(keyIds);
+ std::vector<uint8_t> requestJsonVec = StrToVector(requestJson);
+
+ licenseRequest->clear();
+ licenseRequest->insert(licenseRequest->end(), requestJsonVec.begin(), requestJsonVec.end());
+ return Status::OK;
+}
+
+Status InitDataParser::parsePssh(const std::vector<uint8_t>& initData,
+ std::vector<const uint8_t*>* keyIds) {
+ size_t readPosition = 0;
+
+ // Validate size field
+ uint32_t expectedSize = initData.size();
+ expectedSize = htonl(expectedSize);
+ if (memcmp(&initData[readPosition], &expectedSize,
+ sizeof(expectedSize)) != 0) {
+ return Status::ERROR_DRM_CANNOT_HANDLE;
+ }
+ readPosition += sizeof(expectedSize);
+
+ // Validate PSSH box identifier
+ const char psshIdentifier[4] = {'p', 's', 's', 'h'};
+ if (memcmp(&initData[readPosition], psshIdentifier,
+ sizeof(psshIdentifier)) != 0) {
+ return Status::ERROR_DRM_CANNOT_HANDLE;
+ }
+ readPosition += sizeof(psshIdentifier);
+
+ // Validate EME version number
+ const uint8_t psshVersion1[4] = {1, 0, 0, 0};
+ if (memcmp(&initData[readPosition], psshVersion1,
+ sizeof(psshVersion1)) != 0) {
+ return Status::ERROR_DRM_CANNOT_HANDLE;
+ }
+ readPosition += sizeof(psshVersion1);
+
+ // Validate system ID
+ if (!clearkeydrm::isClearKeyUUID(&initData[readPosition])) {
+ return Status::ERROR_DRM_CANNOT_HANDLE;
+ }
+ readPosition += kSystemIdSize;
+
+ // Read key ID count
+ uint32_t keyIdCount;
+ memcpy(&keyIdCount, &initData[readPosition], sizeof(keyIdCount));
+ keyIdCount = ntohl(keyIdCount);
+ readPosition += sizeof(keyIdCount);
+ if (readPosition + ((uint64_t)keyIdCount * kKeyIdSize) !=
+ initData.size() - sizeof(uint32_t)) {
+ return Status::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ // Calculate the key ID offsets
+ for (uint32_t i = 0; i < keyIdCount; ++i) {
+ size_t keyIdPosition = readPosition + (i * kKeyIdSize);
+ keyIds->push_back(&initData[keyIdPosition]);
+ }
+ return Status::OK;
+}
+
+std::string InitDataParser::generateRequest(const std::vector<const uint8_t*>& keyIds) {
+ const std::string kRequestPrefix("{\"kids\":[");
+ const std::string kRequestSuffix("],\"type\":\"temporary\"}");
+
+ std::string request(kRequestPrefix);
+ std::string encodedId;
+ for (size_t i = 0; i < keyIds.size(); ++i) {
+ encodedId.clear();
+ encodeBase64Url(keyIds[i], kKeyIdSize, &encodedId);
+ if (i != 0) {
+ request.append(",");
+ }
+ request.push_back('\"');
+ request.append(encodedId);
+ request.push_back('\"');
+ }
+ request.append(kRequestSuffix);
+
+ // Android's Base64 encoder produces padding. EME forbids padding.
+ const char kBase64Padding = '=';
+ request.erase(std::remove(request.begin(), request.end(), kBase64Padding), request.end());
+
+ return request;
+}
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
diff --git a/drm/mediadrm/plugins/clearkey/hidl/JsonWebKey.cpp b/drm/mediadrm/plugins/clearkey/hidl/JsonWebKey.cpp
new file mode 100644
index 0000000..cccb41e
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/JsonWebKey.cpp
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "hidl_JsonWebKey"
+
+#include <utils/Log.h>
+
+#include "JsonWebKey.h"
+
+#include "Base64.h"
+
+namespace {
+const std::string kKeysTag("keys");
+const std::string kKeyTypeTag("kty");
+const std::string kSymmetricKeyValue("oct");
+const std::string kKeyTag("k");
+const std::string kKeyIdTag("kid");
+const std::string kBase64Padding("=");
+}
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+JsonWebKey::JsonWebKey() {
+}
+
+JsonWebKey::~JsonWebKey() {
+}
+
+/*
+ * Parses a JSON Web Key Set string, initializes a KeyMap with key id:key
+ * pairs from the JSON Web Key Set. Both key ids and keys are base64url
+ * encoded. The KeyMap contains base64url decoded key id:key pairs.
+ *
+ * @return Returns false for errors, true for success.
+ */
+bool JsonWebKey::extractKeysFromJsonWebKeySet(const std::string& jsonWebKeySet,
+ KeyMap* keys) {
+
+ keys->clear();
+
+ if (!parseJsonWebKeySet(jsonWebKeySet, &mJsonObjects)) {
+ return false;
+ }
+
+ // mJsonObjects[0] contains the entire JSON Web Key Set, including
+ // all the base64 encoded keys. Each key is also stored separately as
+ // a JSON object in mJsonObjects[1..n] where n is the total
+ // number of keys in the set.
+ if (!isJsonWebKeySet(mJsonObjects[0])) {
+ return false;
+ }
+
+ std::string encodedKey, encodedKeyId;
+ std::vector<uint8_t> decodedKey, decodedKeyId;
+
+ // mJsonObjects[1] contains the first JSON Web Key in the set
+ for (size_t i = 1; i < mJsonObjects.size(); ++i) {
+ encodedKeyId.clear();
+ encodedKey.clear();
+
+ if (!parseJsonObject(mJsonObjects[i], &mTokens))
+ return false;
+
+ if (findKey(mJsonObjects[i], &encodedKeyId, &encodedKey)) {
+ if (encodedKeyId.empty() || encodedKey.empty()) {
+ ALOGE("Must have both key id and key in the JsonWebKey set.");
+ continue;
+ }
+
+ if (!decodeBase64String(encodedKeyId, &decodedKeyId)) {
+ ALOGE("Failed to decode key id(%s)", encodedKeyId.c_str());
+ continue;
+ }
+
+ if (!decodeBase64String(encodedKey, &decodedKey)) {
+ ALOGE("Failed to decode key(%s)", encodedKey.c_str());
+ continue;
+ }
+
+ keys->insert(std::pair<std::vector<uint8_t>,
+ std::vector<uint8_t> >(decodedKeyId, decodedKey));
+ }
+ }
+ return true;
+}
+
+bool JsonWebKey::decodeBase64String(const std::string& encodedText,
+ std::vector<uint8_t>* decodedText) {
+
+ decodedText->clear();
+
+ // encodedText should not contain padding characters as per EME spec.
+ if (encodedText.find(kBase64Padding) != std::string::npos) {
+ return false;
+ }
+
+ // Since decodeBase64() requires padding characters,
+ // add them so length of encodedText is exactly a multiple of 4.
+ int remainder = encodedText.length() % 4;
+ std::string paddedText(encodedText);
+ if (remainder > 0) {
+ for (int i = 0; i < 4 - remainder; ++i) {
+ paddedText.append(kBase64Padding);
+ }
+ }
+
+ sp<Buffer> buffer = decodeBase64(paddedText);
+ if (buffer == nullptr) {
+ ALOGE("Malformed base64 encoded content found.");
+ return false;
+ }
+
+ decodedText->insert(decodedText->end(), buffer->base(), buffer->base() + buffer->size());
+ return true;
+}
+
+bool JsonWebKey::findKey(const std::string& jsonObject, std::string* keyId,
+ std::string* encodedKey) {
+
+ std::string key, value;
+
+ // Only allow symmetric key, i.e. "kty":"oct" pair.
+ if (jsonObject.find(kKeyTypeTag) != std::string::npos) {
+ findValue(kKeyTypeTag, &value);
+ if (0 != value.compare(kSymmetricKeyValue))
+ return false;
+ }
+
+ if (jsonObject.find(kKeyIdTag) != std::string::npos) {
+ findValue(kKeyIdTag, keyId);
+ }
+
+ if (jsonObject.find(kKeyTag) != std::string::npos) {
+ findValue(kKeyTag, encodedKey);
+ }
+ return true;
+}
+
+void JsonWebKey::findValue(const std::string &key, std::string* value) {
+ value->clear();
+ const char* valueToken;
+ for (std::vector<std::string>::const_iterator nextToken = mTokens.begin();
+ nextToken != mTokens.end(); ++nextToken) {
+ if (0 == (*nextToken).compare(key)) {
+ if (nextToken + 1 == mTokens.end())
+ break;
+ valueToken = (*(nextToken + 1)).c_str();
+ value->assign(valueToken);
+ nextToken++;
+ break;
+ }
+ }
+}
+
+bool JsonWebKey::isJsonWebKeySet(const std::string& jsonObject) const {
+ if (jsonObject.find(kKeysTag) == std::string::npos) {
+ ALOGE("JSON Web Key does not contain keys.");
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Parses a JSON objects string and initializes a vector of tokens.
+ *
+ * @return Returns false for errors, true for success.
+ */
+bool JsonWebKey::parseJsonObject(const std::string& jsonObject,
+ std::vector<std::string>* tokens) {
+ jsmn_parser parser;
+
+ jsmn_init(&parser);
+ int numTokens = jsmn_parse(&parser,
+ jsonObject.c_str(), jsonObject.size(), nullptr, 0);
+ if (numTokens < 0) {
+ ALOGE("Parser returns error code=%d", numTokens);
+ return false;
+ }
+
+ unsigned int jsmnTokensSize = numTokens * sizeof(jsmntok_t);
+ mJsmnTokens.clear();
+ mJsmnTokens.resize(jsmnTokensSize);
+
+ jsmn_init(&parser);
+ int status = jsmn_parse(&parser, jsonObject.c_str(),
+ jsonObject.size(), mJsmnTokens.data(), numTokens);
+ if (status < 0) {
+ ALOGE("Parser returns error code=%d", status);
+ return false;
+ }
+
+ tokens->clear();
+ std::string token;
+ const char *pjs;
+ for (int j = 0; j < numTokens; ++j) {
+ pjs = jsonObject.c_str() + mJsmnTokens[j].start;
+ if (mJsmnTokens[j].type == JSMN_STRING ||
+ mJsmnTokens[j].type == JSMN_PRIMITIVE) {
+ token.assign(pjs, mJsmnTokens[j].end - mJsmnTokens[j].start);
+ tokens->push_back(token);
+ }
+ }
+ return true;
+}
+
+/*
+ * Parses JSON Web Key Set string and initializes a vector of JSON objects.
+ *
+ * @return Returns false for errors, true for success.
+ */
+bool JsonWebKey::parseJsonWebKeySet(const std::string& jsonWebKeySet,
+ std::vector<std::string>* jsonObjects) {
+ if (jsonWebKeySet.empty()) {
+ ALOGE("Empty JSON Web Key");
+ return false;
+ }
+
+ // The jsmn parser only supports unicode encoding.
+ jsmn_parser parser;
+
+ // Computes number of tokens. A token marks the type, offset in
+ // the original string.
+ jsmn_init(&parser);
+ int numTokens = jsmn_parse(&parser,
+ jsonWebKeySet.c_str(), jsonWebKeySet.size(), nullptr, 0);
+ if (numTokens < 0) {
+ ALOGE("Parser returns error code=%d", numTokens);
+ return false;
+ }
+
+ unsigned int jsmnTokensSize = numTokens * sizeof(jsmntok_t);
+ mJsmnTokens.resize(jsmnTokensSize);
+
+ jsmn_init(&parser);
+ int status = jsmn_parse(&parser, jsonWebKeySet.c_str(),
+ jsonWebKeySet.size(), mJsmnTokens.data(), numTokens);
+ if (status < 0) {
+ ALOGE("Parser returns error code=%d", status);
+ return false;
+ }
+
+ std::string token;
+ const char *pjs;
+ for (int i = 0; i < numTokens; ++i) {
+ pjs = jsonWebKeySet.c_str() + mJsmnTokens[i].start;
+ if (mJsmnTokens[i].type == JSMN_OBJECT) {
+ token.assign(pjs, mJsmnTokens[i].end - mJsmnTokens[i].start);
+ jsonObjects->push_back(token);
+ }
+ }
+ return true;
+}
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
diff --git a/drm/mediadrm/plugins/clearkey/hidl/Session.cpp b/drm/mediadrm/plugins/clearkey/hidl/Session.cpp
new file mode 100644
index 0000000..07c9269
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/Session.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "hidl_ClearKeySession"
+#include <utils/Log.h>
+
+#include "Session.h"
+#include "Utils.h"
+
+#include "AesCtrDecryptor.h"
+#include "InitDataParser.h"
+#include "JsonWebKey.h"
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::hardware::drm::V1_0::KeyValue;
+using ::android::hardware::drm::V1_0::Status;
+using ::android::hardware::drm::V1_0::SubSample;
+using ::android::hardware::Return;
+using ::android::sp;
+
+using android::Mutex;
+
+Status Session::getKeyRequest(
+ const std::vector<uint8_t>& initData,
+ const std::string& mimeType,
+ std::vector<uint8_t>* keyRequest) const {
+ InitDataParser parser;
+ return parser.parse(initData, mimeType, keyRequest);
+}
+
+Status Session::provideKeyResponse(const std::vector<uint8_t>& response) {
+ std::string responseString(
+ reinterpret_cast<const char*>(response.data()), response.size());
+ KeyMap keys;
+
+ Mutex::Autolock lock(mMapLock);
+ JsonWebKey parser;
+ if (parser.extractKeysFromJsonWebKeySet(responseString, &keys)) {
+ for (auto &key : keys) {
+ std::string first(key.first.begin(), key.first.end());
+ std::string second(key.second.begin(), key.second.end());
+ mKeyMap.insert(std::pair<std::vector<uint8_t>,
+ std::vector<uint8_t> >(key.first, key.second));
+ }
+ return Status::OK;
+ } else {
+ return Status::ERROR_DRM_UNKNOWN;
+ }
+}
+
+Status Session::decrypt(
+ const KeyId keyId, const Iv iv, const uint8_t* srcPtr,
+ uint8_t* destPtr, const std::vector<SubSample> subSamples,
+ size_t* bytesDecryptedOut) {
+ Mutex::Autolock lock(mMapLock);
+
+ std::vector<uint8_t> keyIdVector;
+ keyIdVector.clear();
+ keyIdVector.insert(keyIdVector.end(), keyId, keyId + kBlockSize);
+ std::map<std::vector<uint8_t>, std::vector<uint8_t> >::iterator itr;
+ itr = mKeyMap.find(keyIdVector);
+ if (itr == mKeyMap.end()) {
+ return Status::ERROR_DRM_NO_LICENSE;
+ }
+
+ AesCtrDecryptor decryptor;
+ return decryptor.decrypt(
+ itr->second /*key*/, iv, srcPtr, destPtr, subSamples,
+ subSamples.size(), bytesDecryptedOut);
+}
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
diff --git a/drm/mediadrm/plugins/clearkey/hidl/SessionLibrary.cpp b/drm/mediadrm/plugins/clearkey/hidl/SessionLibrary.cpp
new file mode 100644
index 0000000..b4319e6
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/SessionLibrary.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "hidl_ClearKeySessionLibrary"
+#include <utils/Log.h>
+
+#include "SessionLibrary.h"
+#include "Utils.h"
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::sp;
+
+Mutex SessionLibrary::sSingletonLock;
+SessionLibrary* SessionLibrary::sSingleton = NULL;
+
+SessionLibrary* SessionLibrary::get() {
+ Mutex::Autolock lock(sSingletonLock);
+
+ if (sSingleton == NULL) {
+ ALOGD("Instantiating Session Library Singleton.");
+ sSingleton = new SessionLibrary();
+ }
+
+ return sSingleton;
+}
+
+sp<Session> SessionLibrary::createSession() {
+ Mutex::Autolock lock(mSessionsLock);
+
+ char sessionIdRaw[16];
+ snprintf(sessionIdRaw, sizeof(sessionIdRaw), "%u", mNextSessionId);
+
+ mNextSessionId += 1;
+
+ std::vector<uint8_t> sessionId;
+ sessionId.insert(sessionId.end(), sessionIdRaw,
+ sessionIdRaw + sizeof(sessionIdRaw) / sizeof(uint8_t));
+
+ mSessions.insert(std::pair<std::vector<uint8_t>,
+ sp<Session> >(sessionId, new Session(sessionId)));
+ std::map<std::vector<uint8_t>, sp<Session> >::iterator itr = mSessions.find(sessionId);
+ if (itr != mSessions.end()) {
+ return itr->second;
+ } else {
+ return nullptr;
+ }
+}
+
+sp<Session> SessionLibrary::findSession(
+ const std::vector<uint8_t>& sessionId) {
+ Mutex::Autolock lock(mSessionsLock);
+ std::map<std::vector<uint8_t>, sp<Session> >::iterator itr = mSessions.find(sessionId);
+ if (itr != mSessions.end()) {
+ return itr->second;
+ } else {
+ return nullptr;
+ }
+}
+
+void SessionLibrary::destroySession(const sp<Session>& session) {
+ Mutex::Autolock lock(mSessionsLock);
+ mSessions.erase(session->sessionId());
+}
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.1-service.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.1-service.clearkey.rc
new file mode 100644
index 0000000..ffe856a
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.1-service.clearkey.rc
@@ -0,0 +1,6 @@
+service vendor.drm-clearkey-hal-1-1 /vendor/bin/hw/android.hardware.drm@1.1-service.clearkey
+ class hal
+ user media
+ group media mediadrm
+ ioprio rt 4
+ writepid /dev/cpuset/foreground/tasks
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/AesCtrDecryptor.h b/drm/mediadrm/plugins/clearkey/hidl/include/AesCtrDecryptor.h
new file mode 100644
index 0000000..0c7ef20
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/AesCtrDecryptor.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_AES_CTR_DECRYPTOR_H_
+#define CLEARKEY_AES_CTR_DECRYPTOR_H_
+
+#include "ClearKeyTypes.h"
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::hardware::drm::V1_0::Status;
+using ::android::hardware::drm::V1_0::SubSample;
+
+class AesCtrDecryptor {
+public:
+ AesCtrDecryptor() {}
+
+ Status decrypt(const std::vector<uint8_t>& key, const Iv iv,
+ const uint8_t* source, uint8_t* destination,
+ const std::vector<SubSample> subSamples, size_t numSubSamples,
+ size_t* bytesDecryptedOut);
+
+private:
+ CLEARKEY_DISALLOW_COPY_AND_ASSIGN(AesCtrDecryptor);
+};
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
+#endif // CLEARKEY_AES_CTR_DECRYPTOR_H_
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/Base64.h b/drm/mediadrm/plugins/clearkey/hidl/include/Base64.h
new file mode 100644
index 0000000..4a385bd
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/Base64.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BASE_64_H_
+
+#define BASE_64_H_
+
+#include <android/hardware/drm/1.0/types.h>
+
+#include "Buffer.h"
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::sp;
+
+struct Buffer;
+
+sp<Buffer> decodeBase64(const std::string &s);
+void encodeBase64(const void *data, size_t size, std::string *out);
+
+void encodeBase64Url(const void *data, size_t size, std::string *out);
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
+#endif // BASE_64_H_
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/Buffer.h b/drm/mediadrm/plugins/clearkey/hidl/include/Buffer.h
new file mode 100644
index 0000000..5bbb28a
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/Buffer.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BUFFER_H_
+#define BUFFER_H_
+
+#include <android/hardware/drm/1.0/types.h>
+#include <utils/RefBase.h>
+
+#include "ClearKeyTypes.h"
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::sp;
+
+struct Buffer : public RefBase {
+ explicit Buffer(size_t capacity);
+
+ uint8_t *base() { return reinterpret_cast<uint8_t *>(mData); }
+ uint8_t *data() { return reinterpret_cast<uint8_t *>(mData) + mRangeOffset; }
+ size_t capacity() const { return mCapacity; }
+ size_t size() const { return mRangeLength; }
+ size_t offset() const { return mRangeOffset; }
+
+protected:
+ virtual ~Buffer();
+
+private:
+ void *mData;
+ size_t mCapacity;
+ size_t mRangeOffset;
+ size_t mRangeLength;
+
+ bool mOwnsData;
+
+ CLEARKEY_DISALLOW_COPY_AND_ASSIGN(Buffer);
+};
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
+#endif // BUFFER_H_
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyDrmProperties.h b/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyDrmProperties.h
new file mode 100644
index 0000000..d65b25c
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyDrmProperties.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_DRM_PROPERTIES_H_
+#define CLEARKEY_DRM_PROPERTIES_H_
+
+#include <string.h>
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+static const std::string kVendorKey("vendor");
+static const std::string kVendorValue("Google");
+static const std::string kVersionKey("version");
+static const std::string kVersionValue("1.1");
+static const std::string kPluginDescriptionKey("description");
+static const std::string kPluginDescriptionValue("ClearKey CDM");
+static const std::string kAlgorithmsKey("algorithms");
+static const std::string kAlgorithmsValue("");
+static const std::string kListenerTestSupportKey("listenerTestSupport");
+static const std::string kListenerTestSupportValue("true");
+
+static const std::string kDeviceIdKey("deviceId");
+static const uint8_t kTestDeviceIdData[] =
+ {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+ 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
+// TODO stub out metrics for nw
+static const std::string kMetricsKey("metrics");
+static const uint8_t kMetricsData[] = { 0 };
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
+#endif // CLEARKEY_DRM_PROPERTIES_H_
+
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyTypes.h b/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyTypes.h
new file mode 100644
index 0000000..46cb5e4
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyTypes.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_MACROS_H_
+#define CLEARKEY_MACROS_H_
+
+#include <android/hardware/drm/1.0/types.h>
+
+#include <map>
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::hardware::drm::V1_0::KeyValue;
+using ::android::hardware::hidl_vec;
+
+const uint8_t kBlockSize = 16; //AES_BLOCK_SIZE;
+typedef uint8_t KeyId[kBlockSize];
+typedef uint8_t Iv[kBlockSize];
+
+typedef ::android::hardware::drm::V1_0::SubSample SubSample;
+typedef std::map<std::vector<uint8_t>, std::vector<uint8_t> > KeyMap;
+
+#define CLEARKEY_DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&) = delete; \
+ void operator=(const TypeName&) = delete;
+
+#define CLEARKEY_DISALLOW_COPY_AND_ASSIGN_AND_NEW(TypeName) \
+ TypeName() = delete; \
+ TypeName(const TypeName&) = delete; \
+ void operator=(const TypeName&) = delete;
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
+#endif // CLEARKEY_MACROS_H_
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/CreatePluginFactories.h b/drm/mediadrm/plugins/clearkey/hidl/include/CreatePluginFactories.h
new file mode 100644
index 0000000..9952027
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/CreatePluginFactories.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_CREATE_PLUGIN_FACTORIES_H_
+#define CLEARKEY_CREATE_PLUGIN_FACTORIES_H_
+
+#include <android/hardware/drm/1.1/ICryptoFactory.h>
+#include <android/hardware/drm/1.1/IDrmFactory.h>
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::hardware::drm::V1_1::ICryptoFactory;
+using ::android::hardware::drm::V1_1::IDrmFactory;
+
+extern "C" {
+ IDrmFactory* createDrmFactory();
+ ICryptoFactory* createCryptoFactory();
+}
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+#endif // CLEARKEY_CREATE_PLUGIN_FACTORIES_H_
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/CryptoFactory.h b/drm/mediadrm/plugins/clearkey/hidl/include/CryptoFactory.h
new file mode 100644
index 0000000..175ab76
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/CryptoFactory.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_CRYPTO_FACTORY_H_
+#define CLEARKEY_CRYPTO_FACTORY_H_
+
+#include <android/hardware/drm/1.0/ICryptoPlugin.h>
+#include <android/hardware/drm/1.1/ICryptoFactory.h>
+
+#include "ClearKeyTypes.h"
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::hardware::drm::V1_1::ICryptoFactory;
+using ::android::hardware::drm::V1_0::ICryptoPlugin;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_string;
+using ::android::hardware::Return;
+
+struct CryptoFactory : public ICryptoFactory {
+ CryptoFactory() {}
+ virtual ~CryptoFactory() {}
+
+ Return<bool> isCryptoSchemeSupported(const hidl_array<uint8_t, 16>& uuid)
+ override;
+
+ Return<void> createPlugin(
+ const hidl_array<uint8_t, 16>& uuid,
+ const hidl_vec<uint8_t>& initData,
+ createPlugin_cb _hidl_cb) override;
+
+private:
+ CLEARKEY_DISALLOW_COPY_AND_ASSIGN(CryptoFactory);
+
+};
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
+#endif // CLEARKEY_CRYPTO_FACTORY_H_
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/CryptoPlugin.h b/drm/mediadrm/plugins/clearkey/hidl/include/CryptoPlugin.h
new file mode 100644
index 0000000..6a73806
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/CryptoPlugin.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_CRYPTO_PLUGIN_H_
+#define CLEARKEY_CRYPTO_PLUGIN_H_
+
+#include <android/hardware/drm/1.0/ICryptoPlugin.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+
+#include "ClearKeyTypes.h"
+#include "Session.h"
+#include "Utils.h"
+
+namespace {
+ static const size_t KEY_ID_SIZE = 16;
+ static const size_t KEY_IV_SIZE = 16;
+}
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::hardware::drm::V1_0::DestinationBuffer;
+using ::android::hardware::drm::V1_0::ICryptoPlugin;
+using ::android::hardware::drm::V1_0::Mode;
+using ::android::hardware::drm::V1_0::Pattern;
+using ::android::hardware::drm::V1_0::SharedBuffer;
+using ::android::hardware::drm::V1_0::Status;
+using ::android::hardware::drm::V1_0::SubSample;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hidl::memory::V1_0::IMemory;
+using ::android::sp;
+
+struct CryptoPlugin : public ICryptoPlugin {
+ explicit CryptoPlugin(const hidl_vec<uint8_t>& sessionId) {
+ mInitStatus = setMediaDrmSession(sessionId);
+ }
+ virtual ~CryptoPlugin() {}
+
+ Return<bool> requiresSecureDecoderComponent(const hidl_string& mime) {
+ UNUSED(mime);
+ return false;
+ }
+
+ Return<void> notifyResolution(uint32_t width, uint32_t height) {
+ UNUSED(width);
+ UNUSED(height);
+ return Void();
+ }
+
+ Return<void> decrypt(
+ bool secure,
+ const hidl_array<uint8_t, KEY_ID_SIZE>& keyId,
+ const hidl_array<uint8_t, KEY_IV_SIZE>& iv,
+ Mode mode,
+ const Pattern& pattern,
+ const hidl_vec<SubSample>& subSamples,
+ const SharedBuffer& source,
+ uint64_t offset,
+ const DestinationBuffer& destination,
+ decrypt_cb _hidl_cb);
+
+ Return<void> setSharedBufferBase(const hidl_memory& base,
+ uint32_t bufferId);
+
+ Return<Status> setMediaDrmSession(const hidl_vec<uint8_t>& sessionId);
+
+ Return<Status> getInitStatus() const { return mInitStatus; }
+
+private:
+ CLEARKEY_DISALLOW_COPY_AND_ASSIGN(CryptoPlugin);
+
+ std::map<uint32_t, sp<IMemory> > mSharedBufferMap;
+ sp<Session> mSession;
+ Status mInitStatus;
+};
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
+#endif // CLEARKEY_CRYPTO_PLUGIN_H_
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/DrmFactory.h b/drm/mediadrm/plugins/clearkey/hidl/include/DrmFactory.h
new file mode 100644
index 0000000..6f58195
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/DrmFactory.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_DRM_FACTORY_H_
+#define CLEARKEY_DRM_FACTORY_H_
+
+#include <android/hardware/drm/1.1/IDrmPlugin.h>
+#include <android/hardware/drm/1.1/IDrmFactory.h>
+
+#include "ClearKeyTypes.h"
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_string;
+using ::android::hardware::Return;
+
+struct DrmFactory : public IDrmFactory {
+ DrmFactory() {}
+ virtual ~DrmFactory() {}
+
+ Return<bool> isCryptoSchemeSupported(const hidl_array<uint8_t, 16>& uuid)
+ override;
+
+ Return<bool> isContentTypeSupported(const hidl_string &mimeType)
+ override;
+
+ Return<void> createPlugin(
+ const hidl_array<uint8_t, 16>& uuid,
+ const hidl_string& appPackageName,
+ createPlugin_cb _hidl_cb) override;
+
+private:
+ CLEARKEY_DISALLOW_COPY_AND_ASSIGN(DrmFactory);
+};
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
+#endif // CLEARKEY_DRM_FACTORY_H_
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
new file mode 100644
index 0000000..fb0695a
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_DRM_PLUGIN_H_
+#define CLEARKEY_DRM_PLUGIN_H_
+
+#include <android/hardware/drm/1.1/IDrmPlugin.h>
+
+#include <stdio.h>
+#include <map>
+
+#include "SessionLibrary.h"
+#include "Utils.h"
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::hardware::drm::V1_0::EventType;
+using ::android::hardware::drm::V1_0::IDrmPluginListener;
+using ::android::hardware::drm::V1_0::KeyStatus;
+using ::android::hardware::drm::V1_0::KeyType;
+using ::android::hardware::drm::V1_0::KeyValue;
+using ::android::hardware::drm::V1_0::SecureStop;
+using ::android::hardware::drm::V1_0::SecureStopId;
+using ::android::hardware::drm::V1_0::SessionId;
+using ::android::hardware::drm::V1_0::Status;
+using ::android::hardware::drm::V1_1::DrmMetricGroup;
+using ::android::hardware::drm::V1_1::IDrmPlugin;
+using ::android::hardware::drm::V1_1::KeyRequestType;
+
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+struct DrmPlugin : public IDrmPlugin {
+ explicit DrmPlugin(SessionLibrary* sessionLibrary);
+
+ virtual ~DrmPlugin() {}
+
+ Return<void> openSession(openSession_cb _hidl_cb) override;
+ Return<void> openSession_1_1(SecurityLevel securityLevel,
+ openSession_cb _hidl_cb) override;
+
+ Return<Status> closeSession(const hidl_vec<uint8_t>& sessionId) override;
+
+ Return<void> getKeyRequest(
+ const hidl_vec<uint8_t>& scope,
+ const hidl_vec<uint8_t>& initData,
+ const hidl_string& mimeType,
+ KeyType keyType,
+ const hidl_vec<KeyValue>& optionalParameters,
+ getKeyRequest_cb _hidl_cb) override;
+
+ Return<void> getKeyRequest_1_1(
+ const hidl_vec<uint8_t>& scope,
+ const hidl_vec<uint8_t>& initData,
+ const hidl_string& mimeType,
+ KeyType keyType,
+ const hidl_vec<KeyValue>& optionalParameters,
+ getKeyRequest_1_1_cb _hidl_cb) override;
+
+ Return<void> provideKeyResponse(
+ const hidl_vec<uint8_t>& scope,
+ const hidl_vec<uint8_t>& response,
+ provideKeyResponse_cb _hidl_cb) override;
+
+ Return<Status> removeKeys(const hidl_vec<uint8_t>& sessionId) {
+ if (sessionId.size() == 0) {
+ return Status::BAD_VALUE;
+ }
+ return Status::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ Return<Status> restoreKeys(
+ const hidl_vec<uint8_t>& sessionId,
+ const hidl_vec<uint8_t>& keySetId) {
+
+ if (sessionId.size() == 0 || keySetId.size() == 0) {
+ return Status::BAD_VALUE;
+ }
+ return Status::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ Return<void> queryKeyStatus(
+ const hidl_vec<uint8_t>& sessionId,
+ queryKeyStatus_cb _hidl_cb) override;
+
+ Return<void> getProvisionRequest(
+ const hidl_string& certificateType,
+ const hidl_string& certificateAuthority,
+ getProvisionRequest_cb _hidl_cb) {
+ UNUSED(certificateType);
+ UNUSED(certificateAuthority);
+
+ hidl_string defaultUrl;
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, hidl_vec<uint8_t>(), defaultUrl);
+ return Void();
+ }
+
+ Return<void> provideProvisionResponse(
+ const hidl_vec<uint8_t>& response,
+ provideProvisionResponse_cb _hidl_cb) {
+
+ if (response.size() == 0) {
+ _hidl_cb(Status::BAD_VALUE, hidl_vec<uint8_t>(), hidl_vec<uint8_t>());
+ return Void();
+ }
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, hidl_vec<uint8_t>(), hidl_vec<uint8_t>());
+ return Void();
+ }
+
+ Return<void> getHdcpLevels(getHdcpLevels_cb _hidl_cb) {
+ HdcpLevel connectedLevel = HdcpLevel::HDCP_NONE;
+ HdcpLevel maxLevel = HdcpLevel::HDCP_NO_OUTPUT;
+ _hidl_cb(Status::OK, connectedLevel, maxLevel);
+ return Void();
+ }
+
+ Return<void> getNumberOfSessions(getNumberOfSessions_cb _hidl_cb) override;
+
+ Return<void> getSecurityLevel(const hidl_vec<uint8_t>& sessionId,
+ getSecurityLevel_cb _hidl_cb) override;
+
+ Return<void> getMetrics(getMetrics_cb _hidl_cb) override;
+
+ Return<void> getPropertyString(
+ const hidl_string& name,
+ getPropertyString_cb _hidl_cb) override;
+
+ Return<void> getPropertyByteArray(
+ const hidl_string& name,
+ getPropertyByteArray_cb _hidl_cb) override;
+
+ Return<Status> setPropertyString(
+ const hidl_string& name, const hidl_string& value) override;
+
+ Return<Status> setPropertyByteArray(
+ const hidl_string& name, const hidl_vec<uint8_t>& value) override;
+
+ Return<Status> setCipherAlgorithm(
+ const hidl_vec<uint8_t>& sessionId, const hidl_string& algorithm) {
+ if (sessionId.size() == 0 || algorithm.size() == 0) {
+ return Status::BAD_VALUE;
+ }
+ return Status::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ Return<Status> setMacAlgorithm(
+ const hidl_vec<uint8_t>& sessionId, const hidl_string& algorithm) {
+ if (sessionId.size() == 0 || algorithm.size() == 0) {
+ return Status::BAD_VALUE;
+ }
+ return Status::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ Return<void> encrypt(
+ const hidl_vec<uint8_t>& sessionId,
+ const hidl_vec<uint8_t>& keyId,
+ const hidl_vec<uint8_t>& input,
+ const hidl_vec<uint8_t>& iv,
+ encrypt_cb _hidl_cb) {
+ if (sessionId.size() == 0 || keyId.size() == 0 ||
+ input.size() == 0 || iv.size() == 0) {
+ _hidl_cb(Status::BAD_VALUE, hidl_vec<uint8_t>());
+ return Void();
+ }
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, hidl_vec<uint8_t>());
+ return Void();
+ }
+
+ Return<void> decrypt(
+ const hidl_vec<uint8_t>& sessionId,
+ const hidl_vec<uint8_t>& keyId,
+ const hidl_vec<uint8_t>& input,
+ const hidl_vec<uint8_t>& iv,
+ decrypt_cb _hidl_cb) {
+ if (sessionId.size() == 0 || keyId.size() == 0 ||
+ input.size() == 0 || iv.size() == 0) {
+ _hidl_cb(Status::BAD_VALUE, hidl_vec<uint8_t>());
+ return Void();
+ }
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, hidl_vec<uint8_t>());
+ return Void();
+ }
+
+ Return<void> sign(
+ const hidl_vec<uint8_t>& sessionId,
+ const hidl_vec<uint8_t>& keyId,
+ const hidl_vec<uint8_t>& message,
+ sign_cb _hidl_cb) {
+ if (sessionId.size() == 0 || keyId.size() == 0 ||
+ message.size() == 0) {
+ _hidl_cb(Status::BAD_VALUE, hidl_vec<uint8_t>());
+ return Void();
+ }
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, hidl_vec<uint8_t>());
+ return Void();
+ }
+
+ Return<void> verify(
+ const hidl_vec<uint8_t>& sessionId,
+ const hidl_vec<uint8_t>& keyId,
+ const hidl_vec<uint8_t>& message,
+ const hidl_vec<uint8_t>& signature,
+ verify_cb _hidl_cb) {
+
+ if (sessionId.size() == 0 || keyId.size() == 0 ||
+ message.size() == 0 || signature.size() == 0) {
+ _hidl_cb(Status::BAD_VALUE, false);
+ return Void();
+ }
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, false);
+ return Void();
+ }
+
+ Return<void> signRSA(
+ const hidl_vec<uint8_t>& sessionId,
+ const hidl_string& algorithm,
+ const hidl_vec<uint8_t>& message,
+ const hidl_vec<uint8_t>& wrappedKey,
+ signRSA_cb _hidl_cb) {
+ if (sessionId.size() == 0 || algorithm.size() == 0 ||
+ message.size() == 0 || wrappedKey.size() == 0) {
+ _hidl_cb(Status::BAD_VALUE, hidl_vec<uint8_t>());
+ return Void();
+ }
+ _hidl_cb(Status::ERROR_DRM_CANNOT_HANDLE, hidl_vec<uint8_t>());
+ return Void();
+ }
+
+ Return<void> setListener(const sp<IDrmPluginListener>& listener) {
+ mListener = listener;
+ return Void();
+ };
+
+ Return<void> sendEvent(EventType eventType, const hidl_vec<uint8_t>& sessionId,
+ const hidl_vec<uint8_t>& data) {
+ if (mListener != NULL) {
+ mListener->sendEvent(eventType, sessionId, data);
+ } else {
+ ALOGE("Null event listener, event not sent");
+ }
+ return Void();
+ }
+
+ Return<void> sendExpirationUpdate(const hidl_vec<uint8_t>& sessionId, int64_t expiryTimeInMS) {
+ if (mListener != NULL) {
+ mListener->sendExpirationUpdate(sessionId, expiryTimeInMS);
+ } else {
+ ALOGE("Null event listener, event not sent");
+ }
+ return Void();
+ }
+
+ Return<void> sendKeysChange(const hidl_vec<uint8_t>& sessionId,
+ const hidl_vec<KeyStatus>& keyStatusList, bool hasNewUsableKey) {
+ if (mListener != NULL) {
+ mListener->sendKeysChange(sessionId, keyStatusList, hasNewUsableKey);
+ } else {
+ ALOGE("Null event listener, event not sent");
+ }
+ return Void();
+ }
+
+ Return<void> getSecureStops(getSecureStops_cb _hidl_cb);
+
+ Return<void> getSecureStop(const hidl_vec<uint8_t>& secureStopId,
+ getSecureStop_cb _hidl_cb);
+
+ Return<Status> releaseSecureStop(const hidl_vec<uint8_t>& ssRelease);
+
+ Return<Status> releaseAllSecureStops();
+
+ Return<void> getSecureStopIds(getSecureStopIds_cb _hidl_cb);
+
+ Return<Status> releaseSecureStops(const SecureStopRelease& ssRelease);
+
+ Return<Status> removeSecureStop(const hidl_vec<uint8_t>& secureStopId);
+
+ Return<Status> removeAllSecureStops();
+
+private:
+ void initProperties();
+ void installSecureStop(const hidl_vec<uint8_t>& sessionId);
+ void setPlayPolicy();
+
+ Return<Status> setSecurityLevel(const hidl_vec<uint8_t>& sessionId,
+ SecurityLevel level);
+
+ Status getKeyRequestCommon(const hidl_vec<uint8_t>& scope,
+ const hidl_vec<uint8_t>& initData,
+ const hidl_string& mimeType,
+ KeyType keyType,
+ const hidl_vec<KeyValue>& optionalParameters,
+ std::vector<uint8_t> *request,
+ KeyRequestType *getKeyRequestType,
+ std::string *defaultUrl);
+
+ struct ClearkeySecureStop {
+ std::vector<uint8_t> id;
+ std::vector<uint8_t> data;
+ };
+
+ std::map<std::vector<uint8_t>, ClearkeySecureStop> mSecureStops;
+ std::vector<KeyValue> mPlayPolicy;
+ std::map<std::string, std::string> mStringProperties;
+ std::map<std::string, std::vector<uint8_t> > mByteArrayProperties;
+ std::map<std::vector<uint8_t>, SecurityLevel> mSecurityLevel;
+ sp<IDrmPluginListener> mListener;
+ SessionLibrary *mSessionLibrary;
+ int64_t mOpenSessionOkCount;
+ int64_t mCloseSessionOkCount;
+ int64_t mCloseSessionNotOpenedCount;
+ uint32_t mNextSecureStopId;
+
+ CLEARKEY_DISALLOW_COPY_AND_ASSIGN_AND_NEW(DrmPlugin);
+};
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
+#endif // CLEARKEY_DRM_PLUGIN_H_
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/InitDataParser.h b/drm/mediadrm/plugins/clearkey/hidl/include/InitDataParser.h
new file mode 100644
index 0000000..3189c4a
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/InitDataParser.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_INIT_DATA_PARSER_H_
+#define CLEARKEY_INIT_DATA_PARSER_H_
+
+#include <android/hardware/drm/1.0/types.h>
+
+#include "ClearKeyTypes.h"
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::hardware::drm::V1_0::Status;
+
+class InitDataParser {
+public:
+ InitDataParser() {}
+
+ Status parse(const std::vector<uint8_t>& initData,
+ const std::string& type,
+ std::vector<uint8_t>* licenseRequest);
+
+private:
+ CLEARKEY_DISALLOW_COPY_AND_ASSIGN(InitDataParser);
+
+ Status parsePssh(const std::vector<uint8_t>& initData,
+ std::vector<const uint8_t*>* keyIds);
+
+ std::string generateRequest(
+ const std::vector<const uint8_t*>& keyIds);
+};
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
+#endif // CLEARKEY_INIT_DATA_PARSER_H_
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/JsonWebKey.h b/drm/mediadrm/plugins/clearkey/hidl/include/JsonWebKey.h
new file mode 100644
index 0000000..4ab034c
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/JsonWebKey.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef CLEARKEY_JSON_WEB_KEY_H_
+#define CLEARKEY_JSON_WEB_KEY_H_
+
+#include "jsmn.h"
+#include "Utils.h"
+#include "ClearKeyTypes.h"
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+class JsonWebKey {
+ public:
+ JsonWebKey();
+ virtual ~JsonWebKey();
+
+ bool extractKeysFromJsonWebKeySet(const std::string& jsonWebKeySet,
+ KeyMap* keys);
+
+ private:
+ std::vector<jsmntok_t> mJsmnTokens;
+ std::vector<std::string> mJsonObjects;
+ std::vector<std::string> mTokens;
+
+ bool decodeBase64String(const std::string& encodedText,
+ std::vector<uint8_t>* decodedText);
+ bool findKey(const std::string& jsonObject, std::string* keyId,
+ std::string* encodedKey);
+ void findValue(const std::string &key, std::string* value);
+ bool isJsonWebKeySet(const std::string& jsonObject) const;
+ bool parseJsonObject(const std::string& jsonObject,
+ std::vector<std::string>* tokens);
+ bool parseJsonWebKeySet(const std::string& jsonWebKeySet,
+ std::vector<std::string>* jsonObjects);
+
+ CLEARKEY_DISALLOW_COPY_AND_ASSIGN(JsonWebKey);
+};
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
+#endif // CLEARKEY_JSON_WEB_KEY_H_
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/Session.h b/drm/mediadrm/plugins/clearkey/hidl/include/Session.h
new file mode 100644
index 0000000..cddfca5
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/Session.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_SESSION_H_
+#define CLEARKEY_SESSION_H_
+
+#include <utils/Mutex.h>
+#include <utils/RefBase.h>
+#include <vector>
+
+#include "ClearKeyTypes.h"
+
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::hardware::drm::V1_0::Status;
+using ::android::hardware::drm::V1_0::SubSample;
+
+class Session : public RefBase {
+public:
+ explicit Session(const std::vector<uint8_t>& sessionId)
+ : mSessionId(sessionId) {}
+ virtual ~Session() {}
+
+ const std::vector<uint8_t>& sessionId() const { return mSessionId; }
+
+ Status getKeyRequest(
+ const std::vector<uint8_t>& mimeType,
+ const std::string& initDataType,
+ std::vector<uint8_t>* keyRequest) const;
+
+ Status provideKeyResponse(
+ const std::vector<uint8_t>& response);
+
+ Status decrypt(
+ const KeyId keyId, const Iv iv, const uint8_t* srcPtr,
+ uint8_t* dstPtr, const std::vector<SubSample> subSamples,
+ size_t* bytesDecryptedOut);
+
+private:
+ CLEARKEY_DISALLOW_COPY_AND_ASSIGN(Session);
+
+ const std::vector<uint8_t> mSessionId;
+ KeyMap mKeyMap;
+ Mutex mMapLock;
+};
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
+#endif // CLEARKEY_SESSION_H_
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/SessionLibrary.h b/drm/mediadrm/plugins/clearkey/hidl/include/SessionLibrary.h
new file mode 100644
index 0000000..326a0c1
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/SessionLibrary.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_SESSION_LIBRARY_H_
+#define CLEARKEY_SESSION_LIBRARY_H_
+
+#include <utils/RefBase.h>
+#include <utils/Mutex.h>
+
+#include "ClearKeyTypes.h"
+#include "Session.h"
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::sp;
+
+class SessionLibrary : public RefBase {
+public:
+ static SessionLibrary* get();
+
+ sp<Session> createSession();
+
+ sp<Session> findSession(
+ const std::vector<uint8_t>& sessionId);
+
+ void destroySession(const sp<Session>& session);
+
+ size_t numOpenSessions() const { return mSessions.size(); }
+
+private:
+ CLEARKEY_DISALLOW_COPY_AND_ASSIGN(SessionLibrary);
+
+ SessionLibrary() : mNextSessionId(1) {}
+
+ static Mutex sSingletonLock;
+ static SessionLibrary* sSingleton;
+
+ Mutex mSessionsLock;
+ uint32_t mNextSessionId;
+ std::map<std::vector<uint8_t>, sp<Session> > mSessions;
+};
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
+#endif // CLEARKEY_SESSION_LIBRARY_H_
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/TypeConvert.h b/drm/mediadrm/plugins/clearkey/hidl/include/TypeConvert.h
new file mode 100644
index 0000000..cc06329
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/TypeConvert.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_ANDROID_HARDWARE_DRM_V1_1_TYPECONVERT
+#define CLEARKEY_ANDROID_HARDWARE_DRM_V1_1_TYPECONVERT
+
+#include <vector>
+
+#include <android/hardware/drm/1.0/types.h>
+
+namespace android {
+namespace hardware {
+namespace drm {
+namespace V1_1 {
+namespace clearkey {
+
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_vec;
+
+template<typename T> const hidl_vec<T> toHidlVec(const std::vector<T> &vec) {
+ hidl_vec<T> hVec;
+ hVec.setToExternal(const_cast<T *>(vec.data()), vec.size());
+ return hVec;
+}
+
+template<typename T> hidl_vec<T> toHidlVec(std::vector<T> &vec) {
+ hidl_vec<T> hVec;
+ hVec.setToExternal(vec.data(), vec.size());
+ return hVec;
+}
+
+template<typename T> const std::vector<T> toVector(const hidl_vec<T> &hVec) {
+ std::vector<T> vec;
+ vec.assign(hVec.data(), hVec.data() + hVec.size());
+ return *const_cast<const std::vector<T> *>(&vec);
+}
+
+template<typename T> std::vector<T> toVector(hidl_vec<T> &hVec) {
+ std::vector<T> vec;
+ vec.assign(hVec.data(), hVec.data() + hVec.size());
+ return vec;
+}
+
+template<typename T, size_t SIZE> const std::vector<T> toVector(
+ const hidl_array<T, SIZE> &hArray) {
+ std::vector<T> vec;
+ vec.assign(hArray.data(), hArray.data() + hArray.size());
+ return vec;
+}
+
+template<typename T, size_t SIZE> std::vector<T> toVector(
+ hidl_array<T, SIZE> &hArray) {
+ std::vector<T> vec;
+ vec.assign(hArray.data(), hArray.data() + hArray.size());
+ return vec;
+}
+
+} // namespace clearkey
+} // namespace V1_1
+} // namespace drm
+} // namespace hardware
+} // namespace android
+
+#endif // CLEARKEY_ANDROID_HARDWARE_DRM_V1_1_TYPECONVERT
diff --git a/drm/mediadrm/plugins/clearkey/hidl/service.cpp b/drm/mediadrm/plugins/clearkey/hidl/service.cpp
new file mode 100644
index 0000000..6a97b72
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/service.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "android.hardware.drm@1.1-service.clearkey"
+
+#include <CryptoFactory.h>
+#include <DrmFactory.h>
+
+#include <android-base/logging.h>
+#include <binder/ProcessState.h>
+#include <hidl/HidlTransportSupport.h>
+
+using ::android::hardware::configureRpcThreadpool;
+using ::android::hardware::joinRpcThreadpool;
+using ::android::sp;
+
+using android::hardware::drm::V1_1::ICryptoFactory;
+using android::hardware::drm::V1_1::IDrmFactory;
+using android::hardware::drm::V1_1::clearkey::CryptoFactory;
+using android::hardware::drm::V1_1::clearkey::DrmFactory;
+
+
+int main(int /* argc */, char** /* argv */) {
+ ALOGD("android.hardware.drm@1.1-service.clearkey starting...");
+
+ // The DRM HAL may communicate to other vendor components via
+ // /dev/vndbinder
+ android::ProcessState::initWithDriver("/dev/vndbinder");
+
+ sp<IDrmFactory> drmFactory = new DrmFactory;
+ sp<ICryptoFactory> cryptoFactory = new CryptoFactory;
+
+ configureRpcThreadpool(8, true /* callerWillJoin */);
+
+ // Setup hwbinder service
+ CHECK_EQ(drmFactory->registerAsService("clearkey"), android::NO_ERROR)
+ << "Failed to register Clearkey Factory HAL";
+ CHECK_EQ(cryptoFactory->registerAsService("clearkey"), android::NO_ERROR)
+ << "Failed to register Clearkey Crypto HAL";
+
+ joinRpcThreadpool();
+}
diff --git a/drm/mediadrm/plugins/clearkey/tests/AesCtrDecryptorUnittest.cpp b/drm/mediadrm/plugins/clearkey/tests/AesCtrDecryptorUnittest.cpp
deleted file mode 100644
index 039e402..0000000
--- a/drm/mediadrm/plugins/clearkey/tests/AesCtrDecryptorUnittest.cpp
+++ /dev/null
@@ -1,421 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-#include <string.h>
-
-#include <utils/String8.h>
-#include <utils/Vector.h>
-
-#include "AesCtrDecryptor.h"
-
-namespace clearkeydrm {
-
-using namespace android;
-
-class AesCtrDecryptorTest : public ::testing::Test {
- protected:
- typedef uint8_t Key[kBlockSize];
-
- status_t attemptDecrypt(const Key& key, const Iv& iv, const uint8_t* source,
- uint8_t* destination, const SubSample* subSamples,
- size_t numSubSamples, size_t* bytesDecryptedOut) {
- Vector<uint8_t> keyVector;
- keyVector.appendArray(key, kBlockSize);
-
- AesCtrDecryptor decryptor;
- return decryptor.decrypt(keyVector, iv, source, destination, subSamples,
- numSubSamples, bytesDecryptedOut);
- }
-
- template <size_t totalSize>
- void attemptDecryptExpectingSuccess(const Key& key, const Iv& iv,
- const uint8_t* encrypted,
- const uint8_t* decrypted,
- const SubSample* subSamples,
- size_t numSubSamples) {
- uint8_t outputBuffer[totalSize] = {};
- size_t bytesDecrypted = 0;
- ASSERT_EQ(android::OK, attemptDecrypt(key, iv, encrypted, outputBuffer,
- subSamples, numSubSamples,
- &bytesDecrypted));
- EXPECT_EQ(totalSize, bytesDecrypted);
- EXPECT_EQ(0, memcmp(outputBuffer, decrypted, totalSize));
- }
-};
-
-TEST_F(AesCtrDecryptorTest, DecryptsContiguousEncryptedBlock) {
- const size_t kTotalSize = 64;
- const size_t kNumSubsamples = 1;
-
- // Test vectors from NIST-800-38A
- Key key = {
- 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
- 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
- };
-
- Iv iv = {
- 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
- 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
- };
-
- uint8_t encrypted[kTotalSize] = {
- 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26,
- 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce,
- 0x98, 0x06, 0xf6, 0x6b, 0x79, 0x70, 0xfd, 0xff,
- 0x86, 0x17, 0x18, 0x7b, 0xb9, 0xff, 0xfd, 0xff,
- 0x5a, 0xe4, 0xdf, 0x3e, 0xdb, 0xd5, 0xd3, 0x5e,
- 0x5b, 0x4f, 0x09, 0x02, 0x0d, 0xb0, 0x3e, 0xab,
- 0x1e, 0x03, 0x1d, 0xda, 0x2f, 0xbe, 0x03, 0xd1,
- 0x79, 0x21, 0x70, 0xa0, 0xf3, 0x00, 0x9c, 0xee
- };
-
- uint8_t decrypted[kTotalSize] = {
- 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
- 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
- 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
- 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
- 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
- 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
- 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
- 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
- };
-
- SubSample subSamples[kNumSubsamples] = {
- {0, 64}
- };
-
- attemptDecryptExpectingSuccess<kTotalSize>(key, iv, encrypted, decrypted,
- subSamples, kNumSubsamples);
-}
-
-TEST_F(AesCtrDecryptorTest, DecryptsAlignedBifurcatedEncryptedBlock) {
- const size_t kTotalSize = 64;
- const size_t kNumSubsamples = 2;
-
- // Test vectors from NIST-800-38A
- Key key = {
- 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
- 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
- };
-
- Iv iv = {
- 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
- 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
- };
-
- uint8_t encrypted[kTotalSize] = {
- 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26,
- 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce,
- 0x98, 0x06, 0xf6, 0x6b, 0x79, 0x70, 0xfd, 0xff,
- 0x86, 0x17, 0x18, 0x7b, 0xb9, 0xff, 0xfd, 0xff,
- 0x5a, 0xe4, 0xdf, 0x3e, 0xdb, 0xd5, 0xd3, 0x5e,
- 0x5b, 0x4f, 0x09, 0x02, 0x0d, 0xb0, 0x3e, 0xab,
- 0x1e, 0x03, 0x1d, 0xda, 0x2f, 0xbe, 0x03, 0xd1,
- 0x79, 0x21, 0x70, 0xa0, 0xf3, 0x00, 0x9c, 0xee
- };
-
- uint8_t decrypted[kTotalSize] = {
- 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
- 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
- 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
- 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
- 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
- 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
- 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
- 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
- };
-
- SubSample subSamples[kNumSubsamples] = {
- {0, 32},
- {0, 32}
- };
-
- attemptDecryptExpectingSuccess<kTotalSize>(key, iv, encrypted, decrypted,
- subSamples, kNumSubsamples);
-}
-
-TEST_F(AesCtrDecryptorTest, DecryptsUnalignedBifurcatedEncryptedBlock) {
- const size_t kTotalSize = 64;
- const size_t kNumSubsamples = 2;
-
- // Test vectors from NIST-800-38A
- Key key = {
- 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
- 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
- };
-
- Iv iv = {
- 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
- 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
- };
-
- uint8_t encrypted[kTotalSize] = {
- 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26,
- 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce,
- 0x98, 0x06, 0xf6, 0x6b, 0x79, 0x70, 0xfd, 0xff,
- 0x86, 0x17, 0x18, 0x7b, 0xb9, 0xff, 0xfd, 0xff,
- 0x5a, 0xe4, 0xdf, 0x3e, 0xdb, 0xd5, 0xd3, 0x5e,
- 0x5b, 0x4f, 0x09, 0x02, 0x0d, 0xb0, 0x3e, 0xab,
- 0x1e, 0x03, 0x1d, 0xda, 0x2f, 0xbe, 0x03, 0xd1,
- 0x79, 0x21, 0x70, 0xa0, 0xf3, 0x00, 0x9c, 0xee
- };
-
- uint8_t decrypted[kTotalSize] = {
- 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
- 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
- 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
- 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
- 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
- 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
- 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
- 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
- };
-
- SubSample subSamples[kNumSubsamples] = {
- {0, 29},
- {0, 35}
- };
-
- attemptDecryptExpectingSuccess<kTotalSize>(key, iv, encrypted, decrypted,
- subSamples, kNumSubsamples);
-}
-
-TEST_F(AesCtrDecryptorTest, DecryptsOneMixedSubSample) {
- const size_t kTotalSize = 72;
- const size_t kNumSubsamples = 1;
-
- // Based on test vectors from NIST-800-38A
- Key key = {
- 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
- 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
- };
-
- Iv iv = {
- 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
- 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
- };
-
- uint8_t encrypted[kTotalSize] = {
- // 8 clear bytes
- 0xf0, 0x13, 0xca, 0xc7, 0x00, 0x64, 0x0b, 0xbb,
- // 64 encrypted bytes
- 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26,
- 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce,
- 0x98, 0x06, 0xf6, 0x6b, 0x79, 0x70, 0xfd, 0xff,
- 0x86, 0x17, 0x18, 0x7b, 0xb9, 0xff, 0xfd, 0xff,
- 0x5a, 0xe4, 0xdf, 0x3e, 0xdb, 0xd5, 0xd3, 0x5e,
- 0x5b, 0x4f, 0x09, 0x02, 0x0d, 0xb0, 0x3e, 0xab,
- 0x1e, 0x03, 0x1d, 0xda, 0x2f, 0xbe, 0x03, 0xd1,
- 0x79, 0x21, 0x70, 0xa0, 0xf3, 0x00, 0x9c, 0xee
- };
-
- uint8_t decrypted[kTotalSize] = {
- 0xf0, 0x13, 0xca, 0xc7, 0x00, 0x64, 0x0b, 0xbb,
- 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
- 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
- 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
- 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
- 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
- 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
- 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
- 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
- };
-
- SubSample subSamples[kNumSubsamples] = {
- {8, 64}
- };
-
- attemptDecryptExpectingSuccess<kTotalSize>(key, iv, encrypted, decrypted,
- subSamples, kNumSubsamples);
-}
-
-TEST_F(AesCtrDecryptorTest, DecryptsAlignedMixedSubSamples) {
- const size_t kTotalSize = 80;
- const size_t kNumSubsamples = 2;
-
- // Based on test vectors from NIST-800-38A
- Key key = {
- 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
- 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
- };
-
- Iv iv = {
- 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
- 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
- };
-
- uint8_t encrypted[kTotalSize] = {
- // 8 clear bytes
- 0xf0, 0x13, 0xca, 0xc7, 0x00, 0x64, 0x0b, 0xbb,
- // 32 encrypted bytes
- 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26,
- 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce,
- 0x98, 0x06, 0xf6, 0x6b, 0x79, 0x70, 0xfd, 0xff,
- 0x86, 0x17, 0x18, 0x7b, 0xb9, 0xff, 0xfd, 0xff,
- // 8 clear bytes
- 0x94, 0xba, 0x88, 0x2e, 0x0e, 0x12, 0x11, 0x55,
- // 32 encrypted bytes
- 0x5a, 0xe4, 0xdf, 0x3e, 0xdb, 0xd5, 0xd3, 0x5e,
- 0x5b, 0x4f, 0x09, 0x02, 0x0d, 0xb0, 0x3e, 0xab,
- 0x1e, 0x03, 0x1d, 0xda, 0x2f, 0xbe, 0x03, 0xd1,
- 0x79, 0x21, 0x70, 0xa0, 0xf3, 0x00, 0x9c, 0xee
- };
-
- uint8_t decrypted[kTotalSize] = {
- 0xf0, 0x13, 0xca, 0xc7, 0x00, 0x64, 0x0b, 0xbb,
- 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
- 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
- 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
- 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51,
- 0x94, 0xba, 0x88, 0x2e, 0x0e, 0x12, 0x11, 0x55,
- 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
- 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
- 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
- 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
- };
-
- SubSample subSamples[kNumSubsamples] = {
- {8, 32},
- {8, 32}
- };
-
- attemptDecryptExpectingSuccess<kTotalSize>(key, iv, encrypted, decrypted,
- subSamples, kNumSubsamples);
-}
-
-TEST_F(AesCtrDecryptorTest, DecryptsUnalignedMixedSubSamples) {
- const size_t kTotalSize = 80;
- const size_t kNumSubsamples = 2;
-
- // Based on test vectors from NIST-800-38A
- Key key = {
- 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
- 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
- };
-
- Iv iv = {
- 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
- 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
- };
-
- uint8_t encrypted[kTotalSize] = {
- // 8 clear bytes
- 0xf0, 0x13, 0xca, 0xc7, 0x00, 0x64, 0x0b, 0xbb,
- // 30 encrypted bytes
- 0x87, 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26,
- 0x1b, 0xef, 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce,
- 0x98, 0x06, 0xf6, 0x6b, 0x79, 0x70, 0xfd, 0xff,
- 0x86, 0x17, 0x18, 0x7b, 0xb9, 0xff,
- // 8 clear bytes
- 0x94, 0xba, 0x88, 0x2e, 0x0e, 0x12, 0x11, 0x55,
- // 34 encrypted bytes
- 0xfd, 0xff, 0x5a, 0xe4, 0xdf, 0x3e, 0xdb, 0xd5,
- 0xd3, 0x5e, 0x5b, 0x4f, 0x09, 0x02, 0x0d, 0xb0,
- 0x3e, 0xab, 0x1e, 0x03, 0x1d, 0xda, 0x2f, 0xbe,
- 0x03, 0xd1, 0x79, 0x21, 0x70, 0xa0, 0xf3, 0x00,
- 0x9c, 0xee
- };
-
- uint8_t decrypted[kTotalSize] = {
- 0xf0, 0x13, 0xca, 0xc7, 0x00, 0x64, 0x0b, 0xbb,
- 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96,
- 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a,
- 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c,
- 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x94, 0xba,
- 0x88, 0x2e, 0x0e, 0x12, 0x11, 0x55, 0x8e, 0x51,
- 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11,
- 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef,
- 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17,
- 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10
- };
-
- SubSample subSamples[kNumSubsamples] = {
- {8, 30},
- {8, 34}
- };
-
- attemptDecryptExpectingSuccess<kTotalSize>(key, iv, encrypted, decrypted,
- subSamples, kNumSubsamples);
-}
-
-TEST_F(AesCtrDecryptorTest, DecryptsComplexMixedSubSamples) {
- const size_t kTotalSize = 72;
- const size_t kNumSubsamples = 6;
-
- // Based on test vectors from NIST-800-38A
- Key key = {
- 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
- 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
- };
-
- Iv iv = {
- 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
- 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
- };
-
- uint8_t encrypted[kTotalSize] = {
- // 4 clear bytes
- 0xf0, 0x13, 0xca, 0xc7,
- // 1 encrypted bytes
- 0x87,
- // 9 encrypted bytes
- 0x4d, 0x61, 0x91, 0xb6, 0x20, 0xe3, 0x26, 0x1b,
- 0xef,
- // 11 clear bytes
- 0x81, 0x4f, 0x24, 0x87, 0x0e, 0xde, 0xba, 0xad,
- 0x11, 0x9b, 0x46,
- // 20 encrypted bytes
- 0x68, 0x64, 0x99, 0x0d, 0xb6, 0xce,
- 0x98, 0x06, 0xf6, 0x6b, 0x79, 0x70, 0xfd, 0xff,
- 0x86, 0x17, 0x18, 0x7b, 0xb9, 0xff,
- // 8 clear bytes
- 0x94, 0xba, 0x88, 0x2e, 0x0e, 0x12, 0x11, 0x55,
- // 3 clear bytes
- 0x10, 0xf5, 0x22,
- // 14 encrypted bytes
- 0xfd, 0xff, 0x5a, 0xe4, 0xdf, 0x3e, 0xdb, 0xd5,
- 0xd3, 0x5e, 0x5b, 0x4f, 0x09, 0x02,
- // 2 clear bytes
- 0x02, 0x01
- };
-
- uint8_t decrypted[kTotalSize] = {
- 0xf0, 0x13, 0xca, 0xc7, 0x6b, 0xc1, 0xbe, 0xe2,
- 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x81, 0x4f,
- 0x24, 0x87, 0x0e, 0xde, 0xba, 0xad, 0x11, 0x9b,
- 0x46, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a, 0xae,
- 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c, 0x9e,
- 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x94, 0xba, 0x88,
- 0x2e, 0x0e, 0x12, 0x11, 0x55, 0x10, 0xf5, 0x22,
- 0x8e, 0x51, 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c,
- 0xe4, 0x11, 0xe5, 0xfb, 0xc1, 0x19, 0x02, 0x01
- };
-
- SubSample subSamples[kNumSubsamples] = {
- {4, 1},
- {0, 9},
- {11, 20},
- {8, 0},
- {3, 14},
- {2, 0}
- };
-
- attemptDecryptExpectingSuccess<kTotalSize>(key, iv, encrypted, decrypted,
- subSamples, kNumSubsamples);
-}
-
-} // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/tests/Android.bp b/drm/mediadrm/plugins/clearkey/tests/Android.bp
deleted file mode 100644
index ea17bbb..0000000
--- a/drm/mediadrm/plugins/clearkey/tests/Android.bp
+++ /dev/null
@@ -1,40 +0,0 @@
-//
-// Copyright (C) 2014 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// ----------------------------------------------------------------
-// Builds ClearKey Drm Tests
-//
-
-cc_test {
- name: "ClearKeyDrmUnitTest",
- vendor: true,
-
- cflags: ["-Wall", "-Werror"],
-
- srcs: [
- "AesCtrDecryptorUnittest.cpp",
- "InitDataParserUnittest.cpp",
- "JsonWebKeyUnittest.cpp",
- ],
-
- shared_libs: [
- "libcrypto",
- "libdrmclearkeyplugin",
- "liblog",
- "libstagefright_foundation",
- "libutils",
- ],
- header_libs: ["media_plugin_headers"],
-}
diff --git a/include/OWNERS b/include/OWNERS
index 3cb6d9c..d6bd998 100644
--- a/include/OWNERS
+++ b/include/OWNERS
@@ -1,5 +1,5 @@
elaurent@google.com
-gkasten@android.com
+gkasten@google.com
hunga@google.com
jtinker@google.com
lajos@google.com
diff --git a/include/common_time/OWNERS b/include/common_time/OWNERS
new file mode 100644
index 0000000..f9cb567
--- /dev/null
+++ b/include/common_time/OWNERS
@@ -0,0 +1 @@
+gkasten@google.com
diff --git a/include/media/AudioClient.h b/include/media/AudioClient.h
deleted file mode 100644
index 9efd76d..0000000
--- a/include/media/AudioClient.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#ifndef ANDROID_AUDIO_CLIENT_H
-#define ANDROID_AUDIO_CLIENT_H
-
-#include <system/audio.h>
-#include <utils/String16.h>
-
-namespace android {
-
-class AudioClient {
- public:
- AudioClient() :
- clientUid(-1), clientPid(-1), packageName("") {}
-
- uid_t clientUid;
- pid_t clientPid;
- String16 packageName;
-};
-
-}; // namespace android
-
-#endif // ANDROID_AUDIO_CLIENT_H
diff --git a/include/media/AudioClient.h b/include/media/AudioClient.h
new file mode 120000
index 0000000..a0530e4
--- /dev/null
+++ b/include/media/AudioClient.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioClient.h
\ No newline at end of file
diff --git a/include/media/AudioPresentationInfo.h b/include/media/AudioPresentationInfo.h
new file mode 100644
index 0000000..e91a992
--- /dev/null
+++ b/include/media/AudioPresentationInfo.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AUDIO_PRESENTATION_INFO_H_
+#define AUDIO_PRESENTATION_INFO_H_
+
+#include <sstream>
+#include <stdint.h>
+
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+enum mastering_indication {
+ MASTERING_NOT_INDICATED,
+ MASTERED_FOR_STEREO,
+ MASTERED_FOR_SURROUND,
+ MASTERED_FOR_3D,
+ MASTERED_FOR_HEADPHONE,
+};
+
+struct AudioPresentation : public RefBase {
+ int32_t mPresentationId;
+ int32_t mProgramId;
+ KeyedVector<String8, String8> mLabels;
+ String8 mLanguage;
+ int32_t mMasteringIndication;
+ bool mAudioDescriptionAvailable;
+ bool mSpokenSubtitlesAvailable;
+ bool mDialogueEnhancementAvailable;
+
+ AudioPresentation() {
+ mPresentationId = -1;
+ mProgramId = -1;
+ mLanguage = "";
+ mMasteringIndication = MASTERING_NOT_INDICATED;
+ mAudioDescriptionAvailable = false;
+ mSpokenSubtitlesAvailable = false;
+ mDialogueEnhancementAvailable = false;
+ }
+};
+
+typedef Vector<sp<AudioPresentation>> AudioPresentations;
+
+class AudioPresentationInfo : public RefBase {
+ public:
+ AudioPresentationInfo();
+
+ ~AudioPresentationInfo();
+
+ void addPresentation(sp<AudioPresentation> presentation);
+
+ size_t countPresentations() const;
+
+ const sp<AudioPresentation> getPresentation(size_t index) const;
+
+ private:
+ AudioPresentations mPresentations;
+};
+
+} // namespace android
+
+#endif // AUDIO_PRESENTATION_INFO_H_
diff --git a/include/media/CounterMetric.h b/include/media/CounterMetric.h
new file mode 120000
index 0000000..baba043
--- /dev/null
+++ b/include/media/CounterMetric.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/CounterMetric.h
\ No newline at end of file
diff --git a/include/media/DataSource.h b/include/media/DataSource.h
new file mode 120000
index 0000000..905bec1
--- /dev/null
+++ b/include/media/DataSource.h
@@ -0,0 +1 @@
+../../media/libmediaextractor/include/media/DataSource.h
\ No newline at end of file
diff --git a/include/media/DataSourceBase.h b/include/media/DataSourceBase.h
new file mode 120000
index 0000000..54c8047
--- /dev/null
+++ b/include/media/DataSourceBase.h
@@ -0,0 +1 @@
+../../media/libmediaextractor/include/media/DataSourceBase.h
\ No newline at end of file
diff --git a/include/media/EventMetric.h b/include/media/EventMetric.h
new file mode 120000
index 0000000..5707d9a
--- /dev/null
+++ b/include/media/EventMetric.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/EventMetric.h
\ No newline at end of file
diff --git a/include/media/ExtractorUtils.h b/include/media/ExtractorUtils.h
new file mode 120000
index 0000000..e2dd082
--- /dev/null
+++ b/include/media/ExtractorUtils.h
@@ -0,0 +1 @@
+../../media/libmediaextractor/include/media/ExtractorUtils.h
\ No newline at end of file
diff --git a/include/media/IAudioRecord.h b/include/media/IAudioRecord.h
deleted file mode 120000
index 7fbf8f2..0000000
--- a/include/media/IAudioRecord.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/IAudioRecord.h
\ No newline at end of file
diff --git a/include/media/IHDCP.h b/include/media/IHDCP.h
deleted file mode 120000
index 9d4568e..0000000
--- a/include/media/IHDCP.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IHDCP.h
\ No newline at end of file
diff --git a/include/media/IMediaCodecService.h b/include/media/IMediaCodecService.h
deleted file mode 120000
index 37f6822..0000000
--- a/include/media/IMediaCodecService.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaCodecService.h
\ No newline at end of file
diff --git a/include/media/MediaDefs.h b/include/media/MediaDefs.h
deleted file mode 120000
index 9850603..0000000
--- a/include/media/MediaDefs.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/MediaDefs.h
\ No newline at end of file
diff --git a/include/media/MediaExtractor.h b/include/media/MediaExtractor.h
new file mode 120000
index 0000000..4b35fe1
--- /dev/null
+++ b/include/media/MediaExtractor.h
@@ -0,0 +1 @@
+../../media/libmediaextractor/include/media/MediaExtractor.h
\ No newline at end of file
diff --git a/include/media/MediaSource.h b/include/media/MediaSource.h
new file mode 120000
index 0000000..2e147c4
--- /dev/null
+++ b/include/media/MediaSource.h
@@ -0,0 +1 @@
+../../media/libmediaextractor/include/media/MediaSource.h
\ No newline at end of file
diff --git a/include/media/MediaTrack.h b/include/media/MediaTrack.h
new file mode 120000
index 0000000..5a63287a
--- /dev/null
+++ b/include/media/MediaTrack.h
@@ -0,0 +1 @@
+../../media/libmediaextractor/include/media/MediaTrack.h
\ No newline at end of file
diff --git a/include/media/MicrophoneInfo.h b/include/media/MicrophoneInfo.h
new file mode 100644
index 0000000..2287aca
--- /dev/null
+++ b/include/media/MicrophoneInfo.h
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MICROPHONE_INFO_H
+#define ANDROID_MICROPHONE_INFO_H
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <system/audio.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+
+namespace android {
+namespace media {
+
+#define RETURN_IF_FAILED(calledOnce) \
+ { \
+ status_t returnStatus = calledOnce; \
+ if (returnStatus) { \
+ ALOGE("Failed at %s:%d (%s)", __FILE__, __LINE__, __func__); \
+ return returnStatus; \
+ } \
+ }
+
+class MicrophoneInfo : public Parcelable {
+public:
+ MicrophoneInfo() = default;
+ MicrophoneInfo(const MicrophoneInfo& microphoneInfo) = default;
+ MicrophoneInfo(audio_microphone_characteristic_t& characteristic) {
+ mDeviceId = String16(&characteristic.device_id[0]);
+ mPortId = characteristic.id;
+ mType = characteristic.device;
+ mAddress = String16(&characteristic.address[0]);
+ mDeviceLocation = characteristic.location;
+ mDeviceGroup = characteristic.group;
+ mIndexInTheGroup = characteristic.index_in_the_group;
+ mGeometricLocation.push_back(characteristic.geometric_location.x);
+ mGeometricLocation.push_back(characteristic.geometric_location.y);
+ mGeometricLocation.push_back(characteristic.geometric_location.z);
+ mOrientation.push_back(characteristic.orientation.x);
+ mOrientation.push_back(characteristic.orientation.y);
+ mOrientation.push_back(characteristic.orientation.z);
+ Vector<float> frequencies;
+ Vector<float> responses;
+ for (size_t i = 0; i < characteristic.num_frequency_responses; i++) {
+ frequencies.push_back(characteristic.frequency_responses[0][i]);
+ responses.push_back(characteristic.frequency_responses[1][i]);
+ }
+ mFrequencyResponses.push_back(frequencies);
+ mFrequencyResponses.push_back(responses);
+ for (size_t i = 0; i < AUDIO_CHANNEL_COUNT_MAX; i++) {
+ mChannelMapping.push_back(characteristic.channel_mapping[i]);
+ }
+ mSensitivity = characteristic.sensitivity;
+ mMaxSpl = characteristic.max_spl;
+ mMinSpl = characteristic.min_spl;
+ mDirectionality = characteristic.directionality;
+ }
+
+ virtual ~MicrophoneInfo() = default;
+
+ virtual status_t writeToParcel(Parcel* parcel) const {
+ RETURN_IF_FAILED(parcel->writeString16(mDeviceId));
+ RETURN_IF_FAILED(parcel->writeInt32(mPortId));
+ RETURN_IF_FAILED(parcel->writeUint32(mType));
+ RETURN_IF_FAILED(parcel->writeString16(mAddress));
+ RETURN_IF_FAILED(parcel->writeInt32(mDeviceLocation));
+ RETURN_IF_FAILED(parcel->writeInt32(mDeviceGroup));
+ RETURN_IF_FAILED(parcel->writeInt32(mIndexInTheGroup));
+ RETURN_IF_FAILED(writeFloatVector(parcel, mGeometricLocation));
+ RETURN_IF_FAILED(writeFloatVector(parcel, mOrientation));
+ if (mFrequencyResponses.size() != 2) {
+ return BAD_VALUE;
+ }
+ for (size_t i = 0; i < mFrequencyResponses.size(); i++) {
+ RETURN_IF_FAILED(parcel->writeInt32(mFrequencyResponses[i].size()));
+ RETURN_IF_FAILED(writeFloatVector(parcel, mFrequencyResponses[i]));
+ }
+ std::vector<int> channelMapping;
+ for (size_t i = 0; i < mChannelMapping.size(); ++i) {
+ channelMapping.push_back(mChannelMapping[i]);
+ }
+ RETURN_IF_FAILED(parcel->writeInt32Vector(channelMapping));
+ RETURN_IF_FAILED(parcel->writeFloat(mSensitivity));
+ RETURN_IF_FAILED(parcel->writeFloat(mMaxSpl));
+ RETURN_IF_FAILED(parcel->writeFloat(mMinSpl));
+ RETURN_IF_FAILED(parcel->writeInt32(mDirectionality));
+ return OK;
+ }
+
+ virtual status_t readFromParcel(const Parcel* parcel) {
+ RETURN_IF_FAILED(parcel->readString16(&mDeviceId));
+ RETURN_IF_FAILED(parcel->readInt32(&mPortId));
+ RETURN_IF_FAILED(parcel->readUint32(&mType));
+ RETURN_IF_FAILED(parcel->readString16(&mAddress));
+ RETURN_IF_FAILED(parcel->readInt32(&mDeviceLocation));
+ RETURN_IF_FAILED(parcel->readInt32(&mDeviceGroup));
+ RETURN_IF_FAILED(parcel->readInt32(&mIndexInTheGroup));
+ RETURN_IF_FAILED(readFloatVector(parcel, &mGeometricLocation, 3));
+ RETURN_IF_FAILED(readFloatVector(parcel, &mOrientation, 3));
+ int32_t frequenciesNum;
+ RETURN_IF_FAILED(parcel->readInt32(&frequenciesNum));
+ Vector<float> frequencies;
+ RETURN_IF_FAILED(readFloatVector(parcel, &frequencies, frequenciesNum));
+ int32_t responsesNum;
+ RETURN_IF_FAILED(parcel->readInt32(&responsesNum));
+ Vector<float> responses;
+ RETURN_IF_FAILED(readFloatVector(parcel, &responses, responsesNum));
+ if (frequencies.size() != responses.size()) {
+ return BAD_VALUE;
+ }
+ mFrequencyResponses.push_back(frequencies);
+ mFrequencyResponses.push_back(responses);
+ std::vector<int> channelMapping;
+ status_t result = parcel->readInt32Vector(&channelMapping);
+ if (result != OK) {
+ return result;
+ }
+ if (channelMapping.size() != AUDIO_CHANNEL_COUNT_MAX) {
+ return BAD_VALUE;
+ }
+ for (size_t i = 0; i < channelMapping.size(); i++) {
+ mChannelMapping.push_back(channelMapping[i]);
+ }
+ RETURN_IF_FAILED(parcel->readFloat(&mSensitivity));
+ RETURN_IF_FAILED(parcel->readFloat(&mMaxSpl));
+ RETURN_IF_FAILED(parcel->readFloat(&mMinSpl));
+ RETURN_IF_FAILED(parcel->readInt32(&mDirectionality));
+ return OK;
+ }
+
+ String16 getDeviceId() const {
+ return mDeviceId;
+ }
+
+ int getPortId() const {
+ return mPortId;
+ }
+
+ unsigned int getType() const {
+ return mType;
+ }
+
+ String16 getAddress() const {
+ return mAddress;
+ }
+
+ int getDeviceLocation() const {
+ return mDeviceLocation;
+ }
+
+ int getDeviceGroup() const {
+ return mDeviceGroup;
+ }
+
+ int getIndexInTheGroup() const {
+ return mIndexInTheGroup;
+ }
+
+ const Vector<float>& getGeometricLocation() const {
+ return mGeometricLocation;
+ }
+
+ const Vector<float>& getOrientation() const {
+ return mOrientation;
+ }
+
+ const Vector<Vector<float>>& getFrequencyResponses() const {
+ return mFrequencyResponses;
+ }
+
+ const Vector<int>& getChannelMapping() const {
+ return mChannelMapping;
+ }
+
+ float getSensitivity() const {
+ return mSensitivity;
+ }
+
+ float getMaxSpl() const {
+ return mMaxSpl;
+ }
+
+ float getMinSpl() const {
+ return mMinSpl;
+ }
+
+ int getDirectionality() const {
+ return mDirectionality;
+ }
+
+private:
+ status_t readFloatVector(
+ const Parcel* parcel, Vector<float> *vectorPtr, size_t defaultLength) {
+ std::unique_ptr<std::vector<float>> v;
+ status_t result = parcel->readFloatVector(&v);
+ if (result != OK) return result;
+ vectorPtr->clear();
+ if (v.get() != nullptr) {
+ for (const auto& iter : *v) {
+ vectorPtr->push_back(iter);
+ }
+ } else {
+ vectorPtr->resize(defaultLength);
+ }
+ return OK;
+ }
+ status_t writeFloatVector(Parcel* parcel, const Vector<float>& vector) const {
+ std::vector<float> v;
+ for (size_t i = 0; i < vector.size(); i++) {
+ v.push_back(vector[i]);
+ }
+ return parcel->writeFloatVector(v);
+ }
+
+ String16 mDeviceId;
+ int32_t mPortId;
+ uint32_t mType;
+ String16 mAddress;
+ int32_t mDeviceLocation;
+ int32_t mDeviceGroup;
+ int32_t mIndexInTheGroup;
+ Vector<float> mGeometricLocation;
+ Vector<float> mOrientation;
+ Vector<Vector<float>> mFrequencyResponses;
+ Vector<int> mChannelMapping;
+ float mSensitivity;
+ float mMaxSpl;
+ float mMinSpl;
+ int32_t mDirectionality;
+};
+
+} // namespace media
+} // namespace android
+
+#endif
diff --git a/include/media/MmapStreamCallback.h b/include/media/MmapStreamCallback.h
index 8098e79..31b8eb5 100644
--- a/include/media/MmapStreamCallback.h
+++ b/include/media/MmapStreamCallback.h
@@ -31,8 +31,9 @@
* The mmap stream should be torn down because conditions that permitted its creation with
* the requested parameters have changed and do not allow it to operate with the requested
* constraints any more.
+ * \param[in] handle handle for the client stream to tear down.
*/
- virtual void onTearDown() = 0;
+ virtual void onTearDown(audio_port_handle_t handle) = 0;
/**
* The volume to be applied to the use case specified when opening the stream has changed
diff --git a/include/media/MmapStreamInterface.h b/include/media/MmapStreamInterface.h
index d689e25..0196a0c 100644
--- a/include/media/MmapStreamInterface.h
+++ b/include/media/MmapStreamInterface.h
@@ -52,6 +52,9 @@
* \param[in,out] deviceId audio device the stream should preferably be routed to/from
* Requested as input,
* Actual as output
+ * \param[in,out] sessionId audio sessionId for the stream
+ * Requested as input, may be AUDIO_SESSION_ALLOCATE
+ * Actual as output
* \param[in] callback the MmapStreamCallback interface used by AudioFlinger to notify
* condition changes affecting the stream operation
* \param[out] interface the MmapStreamInterface interface controlling the created stream
@@ -66,6 +69,7 @@
audio_config_base_t *config,
const AudioClient& client,
audio_port_handle_t *deviceId,
+ audio_session_t *sessionId,
const sp<MmapStreamCallback>& callback,
sp<MmapStreamInterface>& interface,
audio_port_handle_t *handle);
diff --git a/include/media/TimeCheck.h b/include/media/TimeCheck.h
new file mode 120000
index 0000000..e3ef134
--- /dev/null
+++ b/include/media/TimeCheck.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/TimeCheck.h
\ No newline at end of file
diff --git a/include/media/VolumeShaper.h b/include/media/VolumeShaper.h
index 302641f..a3aaece 100644
--- a/include/media/VolumeShaper.h
+++ b/include/media/VolumeShaper.h
@@ -37,6 +37,8 @@
namespace android {
+namespace media {
+
// The native VolumeShaper class mirrors the java VolumeShaper class;
// in addition, the native class contains implementation for actual operation.
//
@@ -101,7 +103,7 @@
* See "frameworks/base/media/java/android/media/VolumeShaper.java" for
* details on the Java implementation.
*/
- class Configuration : public Interpolator<S, T>, public RefBase {
+ class Configuration : public Interpolator<S, T>, public RefBase, public Parcelable {
public:
// Must match with VolumeShaper.java in frameworks/base.
enum Type : int32_t {
@@ -283,7 +285,7 @@
}
// The parcel layout must match VolumeShaper.java
- status_t writeToParcel(Parcel *parcel) const {
+ status_t writeToParcel(Parcel *parcel) const override {
if (parcel == nullptr) return BAD_VALUE;
return parcel->writeInt32((int32_t)mType)
?: parcel->writeInt32(mId)
@@ -294,17 +296,17 @@
?: Interpolator<S, T>::writeToParcel(parcel);
}
- status_t readFromParcel(const Parcel &parcel) {
+ status_t readFromParcel(const Parcel *parcel) override {
int32_t type, optionFlags;
- return parcel.readInt32(&type)
+ return parcel->readInt32(&type)
?: setType((Type)type)
- ?: parcel.readInt32(&mId)
+ ?: parcel->readInt32(&mId)
?: mType == TYPE_ID
? NO_ERROR
- : parcel.readInt32(&optionFlags)
+ : parcel->readInt32(&optionFlags)
?: setOptionFlags((OptionFlag)optionFlags)
- ?: parcel.readDouble(&mDurationMs)
- ?: Interpolator<S, T>::readFromParcel(parcel)
+ ?: parcel->readDouble(&mDurationMs)
+ ?: Interpolator<S, T>::readFromParcel(*parcel)
?: checkCurve();
}
@@ -336,7 +338,7 @@
* See "frameworks/base/media/java/android/media/VolumeShaper.java" for
* details on the Java implementation.
*/
- class Operation : public RefBase {
+ class Operation : public RefBase, public Parcelable {
public:
// Must match with VolumeShaper.java.
enum Flag : int32_t {
@@ -418,18 +420,18 @@
return NO_ERROR;
}
- status_t writeToParcel(Parcel *parcel) const {
+ status_t writeToParcel(Parcel *parcel) const override {
if (parcel == nullptr) return BAD_VALUE;
return parcel->writeInt32((int32_t)mFlags)
?: parcel->writeInt32(mReplaceId)
?: parcel->writeFloat(mXOffset);
}
- status_t readFromParcel(const Parcel &parcel) {
+ status_t readFromParcel(const Parcel *parcel) override {
int32_t flags;
- return parcel.readInt32(&flags)
- ?: parcel.readInt32(&mReplaceId)
- ?: parcel.readFloat(&mXOffset)
+ return parcel->readInt32(&flags)
+ ?: parcel->readInt32(&mReplaceId)
+ ?: parcel->readFloat(&mXOffset)
?: setFlags((Flag)flags);
}
@@ -455,7 +457,7 @@
* See "frameworks/base/media/java/android/media/VolumeShaper.java" for
* details on the Java implementation.
*/
- class State : public RefBase {
+ class State : public RefBase, public Parcelable {
public:
State(T volume, S xOffset)
: mVolume(volume)
@@ -481,15 +483,15 @@
mXOffset = xOffset;
}
- status_t writeToParcel(Parcel *parcel) const {
+ status_t writeToParcel(Parcel *parcel) const override {
if (parcel == nullptr) return BAD_VALUE;
return parcel->writeFloat(mVolume)
?: parcel->writeFloat(mXOffset);
}
- status_t readFromParcel(const Parcel &parcel) {
- return parcel.readFloat(&mVolume)
- ?: parcel.readFloat(&mXOffset);
+ status_t readFromParcel(const Parcel *parcel) override {
+ return parcel->readFloat(&mVolume)
+ ?: parcel->readFloat(&mXOffset);
}
std::string toString() const {
@@ -1020,6 +1022,8 @@
std::list<VolumeShaper> mVolumeShapers; // list provides stable iterators on erase
}; // VolumeHandler
+} // namespace media
+
} // namespace android
#pragma pop_macro("LOG_TAG")
diff --git a/include/media/VorbisComment.h b/include/media/VorbisComment.h
new file mode 120000
index 0000000..adaa489
--- /dev/null
+++ b/include/media/VorbisComment.h
@@ -0,0 +1 @@
+../../media/libmediaextractor/include/media/VorbisComment.h
\ No newline at end of file
diff --git a/include/media/nbaio/NBLog.h b/include/media/nbaio/NBLog.h
deleted file mode 120000
index c35401e..0000000
--- a/include/media/nbaio/NBLog.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../media/libnbaio/include/media/nbaio/NBLog.h
\ No newline at end of file
diff --git a/include/media/nbaio/PerformanceAnalysis.h b/include/media/nbaio/PerformanceAnalysis.h
deleted file mode 120000
index 7acfc90..0000000
--- a/include/media/nbaio/PerformanceAnalysis.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../media/libnbaio/include/media/nbaio/PerformanceAnalysis.h
\ No newline at end of file
diff --git a/include/media/nblog/NBLog.h b/include/media/nblog/NBLog.h
new file mode 120000
index 0000000..3cc366c
--- /dev/null
+++ b/include/media/nblog/NBLog.h
@@ -0,0 +1 @@
+../../../media/libnblog/include/media/nblog/NBLog.h
\ No newline at end of file
diff --git a/include/media/nblog/PerformanceAnalysis.h b/include/media/nblog/PerformanceAnalysis.h
new file mode 120000
index 0000000..6ead3bc
--- /dev/null
+++ b/include/media/nblog/PerformanceAnalysis.h
@@ -0,0 +1 @@
+../../../media/libnblog/include/media/nblog/PerformanceAnalysis.h
\ No newline at end of file
diff --git a/include/media/nblog/ReportPerformance.h b/include/media/nblog/ReportPerformance.h
new file mode 120000
index 0000000..e9b8e80
--- /dev/null
+++ b/include/media/nblog/ReportPerformance.h
@@ -0,0 +1 @@
+../../../media/libnblog/include/media/nblog/ReportPerformance.h
\ No newline at end of file
diff --git a/include/media/Crypto.h b/include/mediadrm/Crypto.h
similarity index 100%
rename from include/media/Crypto.h
rename to include/mediadrm/Crypto.h
diff --git a/include/media/CryptoHal.h b/include/mediadrm/CryptoHal.h
similarity index 100%
rename from include/media/CryptoHal.h
rename to include/mediadrm/CryptoHal.h
diff --git a/include/media/Drm.h b/include/mediadrm/Drm.h
similarity index 100%
rename from include/media/Drm.h
rename to include/mediadrm/Drm.h
diff --git a/include/media/DrmHal.h b/include/mediadrm/DrmHal.h
similarity index 100%
rename from include/media/DrmHal.h
rename to include/mediadrm/DrmHal.h
diff --git a/include/mediadrm/DrmMetrics.h b/include/mediadrm/DrmMetrics.h
new file mode 120000
index 0000000..abc966b
--- /dev/null
+++ b/include/mediadrm/DrmMetrics.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/DrmMetrics.h
\ No newline at end of file
diff --git a/include/media/DrmPluginPath.h b/include/mediadrm/DrmPluginPath.h
similarity index 100%
rename from include/media/DrmPluginPath.h
rename to include/mediadrm/DrmPluginPath.h
diff --git a/include/media/DrmSessionClientInterface.h b/include/mediadrm/DrmSessionClientInterface.h
similarity index 100%
rename from include/media/DrmSessionClientInterface.h
rename to include/mediadrm/DrmSessionClientInterface.h
diff --git a/include/media/DrmSessionManager.h b/include/mediadrm/DrmSessionManager.h
similarity index 100%
rename from include/media/DrmSessionManager.h
rename to include/mediadrm/DrmSessionManager.h
diff --git a/include/media/ICrypto.h b/include/mediadrm/ICrypto.h
similarity index 100%
rename from include/media/ICrypto.h
rename to include/mediadrm/ICrypto.h
diff --git a/include/media/IDrm.h b/include/mediadrm/IDrm.h
similarity index 100%
rename from include/media/IDrm.h
rename to include/mediadrm/IDrm.h
diff --git a/include/media/IDrmClient.h b/include/mediadrm/IDrmClient.h
similarity index 100%
rename from include/media/IDrmClient.h
rename to include/mediadrm/IDrmClient.h
diff --git a/include/media/IMediaDrmService.h b/include/mediadrm/IMediaDrmService.h
similarity index 100%
rename from include/media/IMediaDrmService.h
rename to include/mediadrm/IMediaDrmService.h
diff --git a/include/mediadrm/OWNERS b/include/mediadrm/OWNERS
new file mode 100644
index 0000000..e788754
--- /dev/null
+++ b/include/mediadrm/OWNERS
@@ -0,0 +1 @@
+jtinker@google.com
diff --git a/include/media/SharedLibrary.h b/include/mediadrm/SharedLibrary.h
similarity index 100%
rename from include/media/SharedLibrary.h
rename to include/mediadrm/SharedLibrary.h
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index ff440bc..ca119d5 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -60,6 +60,8 @@
volatile int32_t mRear; // written by producer (output: client, input: server)
volatile int32_t mFlush; // incremented by client to indicate a request to flush;
// server notices and discards all data between mFront and mRear
+ volatile int32_t mStop; // set by client to indicate a stop frame position; server
+ // will not read beyond this position until start is called.
volatile uint32_t mUnderrunFrames; // server increments for each unavailable but desired frame
volatile uint32_t mUnderrunCount; // server increments for each underrun occurrence
};
@@ -335,6 +337,8 @@
mTimestamp.clear();
}
+ virtual void stop() { }; // called by client in AudioTrack::stop()
+
private:
// This is a copy of mCblk->mBufferSizeInFrames
uint32_t mBufferSizeInFrames; // effective size of the buffer
@@ -383,8 +387,14 @@
mPlaybackRateMutator.push(playbackRate);
}
+ // Sends flush and stop position information from the client to the server,
+ // used by streaming AudioTrack flush() or stop().
+ void sendStreamingFlushStop(bool flush);
+
virtual void flush();
+ void stop() override;
+
virtual uint32_t getUnderrunFrames() const {
return mCblk->u.mStreaming.mUnderrunFrames;
}
@@ -410,6 +420,8 @@
virtual void flush();
+ void stop() override;
+
#define MIN_LOOP 16 // minimum length of each loop iteration in frames
// setLoop(), setBufferPosition(), and setBufferPositionAndLoop() set the
@@ -438,7 +450,11 @@
return 0;
}
- virtual uint32_t getUnderrunFrames() const {
+ virtual uint32_t getUnderrunFrames() const override {
+ return 0;
+ }
+
+ virtual uint32_t getUnderrunCount() const override {
return 0;
}
@@ -528,6 +544,10 @@
// client will be notified via Futex
virtual void flushBufferIfNeeded();
+ // Returns the rear position of the AudioTrack shared ring buffer, limited by
+ // the stop frame position level.
+ virtual int32_t getRear() const = 0;
+
// Total count of the number of flushed frames since creation (never reset).
virtual int64_t framesFlushed() const { return mFlushed; }
@@ -603,10 +623,18 @@
return mDrained.load();
}
+ int32_t getRear() const override;
+
+ // Called on server side track start().
+ virtual void start();
+
private:
AudioPlaybackRate mPlaybackRate; // last observed playback rate
PlaybackRateQueue::Observer mPlaybackRateObserver;
+ // Last client stop-at position when start() was called. Used for streaming AudioTracks.
+ std::atomic<int32_t> mStopLast{0};
+
// The server keeps a copy here where it is safe from the client.
uint32_t mUnderrunCount; // echoed to mCblk
bool mUnderrunning; // used to detect edge of underrun
@@ -630,6 +658,10 @@
virtual void tallyUnderrunFrames(uint32_t frameCount);
virtual uint32_t getUnderrunFrames() const { return 0; }
+ int32_t getRear() const override;
+
+ void start() override { } // ignore for static tracks
+
private:
status_t updateStateWithLoop(StaticAudioTrackState *localState,
const StaticAudioTrackState &update) const;
@@ -657,6 +689,10 @@
size_t frameSize, bool clientInServer)
: ServerProxy(cblk, buffers, frameCount, frameSize, false /*isOut*/, clientInServer) { }
+ int32_t getRear() const override {
+ return mCblk->u.mStreaming.mRear; // For completeness only; mRear written by server.
+ }
+
protected:
virtual ~AudioRecordServerProxy() { }
};
diff --git a/include/private/media/OWNERS b/include/private/media/OWNERS
new file mode 100644
index 0000000..21723ba
--- /dev/null
+++ b/include/private/media/OWNERS
@@ -0,0 +1,3 @@
+elaurent@google.com
+gkasten@google.com
+hunga@google.com
diff --git a/include/private/media/VideoFrame.h b/include/private/media/VideoFrame.h
index a9d4dd1..712f118 100644
--- a/include/private/media/VideoFrame.h
+++ b/include/private/media/VideoFrame.h
@@ -25,94 +25,34 @@
namespace android {
-// Represents a color converted (RGB-based) video frame
-// with bitmap pixels stored in FrameBuffer
+// Represents a color converted (RGB-based) video frame with bitmap
+// pixels stored in FrameBuffer.
+// In a VideoFrame struct stored in IMemory, frame data and ICC data
+// come after the VideoFrame structure. Their locations can be retrieved
+// by getFlattenedData() and getFlattenedIccData();
class VideoFrame
{
public:
// Construct a VideoFrame object with the specified parameters,
- // will allocate frame buffer if |allocate| is set to true, will
- // allocate buffer to hold ICC data if |iccData| and |iccSize|
- // indicate its presence.
+ // will calculate frame buffer size if |hasData| is set to true.
VideoFrame(uint32_t width, uint32_t height,
uint32_t displayWidth, uint32_t displayHeight,
- uint32_t angle, uint32_t bpp, bool allocate,
- const void *iccData, size_t iccSize):
+ uint32_t tileWidth, uint32_t tileHeight,
+ uint32_t angle, uint32_t bpp, bool hasData, size_t iccSize):
mWidth(width), mHeight(height),
mDisplayWidth(displayWidth), mDisplayHeight(displayHeight),
+ mTileWidth(tileWidth), mTileHeight(tileHeight),
mRotationAngle(angle), mBytesPerPixel(bpp), mRowBytes(bpp * width),
- mSize(0), mIccSize(0), mReserved(0), mData(0), mIccData(0) {
- if (allocate) {
- mSize = mRowBytes * mHeight;
- mData = new uint8_t[mSize];
- if (mData == NULL) {
- mSize = 0;
- }
- }
-
- if (iccData != NULL && iccSize > 0) {
- mIccSize = iccSize;
- mIccData = new uint8_t[iccSize];
- if (mIccData != NULL) {
- memcpy(mIccData, iccData, iccSize);
- } else {
- mIccSize = 0;
- }
- }
+ mSize(hasData ? (bpp * width * height) : 0),
+ mIccSize(iccSize), mReserved(0) {
}
- // Deep copy of both the information fields and the frame data
- VideoFrame(const VideoFrame& copy) {
- copyInfoOnly(copy);
-
- mSize = copy.mSize;
- mData = NULL; // initialize it first
- if (mSize > 0 && copy.mData != NULL) {
- mData = new uint8_t[mSize];
- if (mData != NULL) {
- memcpy(mData, copy.mData, mSize);
- } else {
- mSize = 0;
- }
- }
-
- mIccSize = copy.mIccSize;
- mIccData = NULL; // initialize it first
- if (mIccSize > 0 && copy.mIccData != NULL) {
- mIccData = new uint8_t[mIccSize];
- if (mIccData != NULL) {
- memcpy(mIccData, copy.mIccData, mIccSize);
- } else {
- mIccSize = 0;
- }
- }
- }
-
- ~VideoFrame() {
- if (mData != 0) {
- delete[] mData;
- }
- if (mIccData != 0) {
- delete[] mIccData;
- }
- }
-
- // Copy |copy| to a flattened VideoFrame in IMemory, 'this' must point to
- // a chunk of memory back by IMemory of size at least getFlattenedSize()
- // of |copy|.
- void copyFlattened(const VideoFrame& copy) {
- copyInfoOnly(copy);
-
- mSize = copy.mSize;
- mData = NULL; // initialize it first
- if (copy.mSize > 0 && copy.mData != NULL) {
- memcpy(getFlattenedData(), copy.mData, copy.mSize);
- }
-
- mIccSize = copy.mIccSize;
- mIccData = NULL; // initialize it first
- if (copy.mIccSize > 0 && copy.mIccData != NULL) {
- memcpy(getFlattenedIccData(), copy.mIccData, copy.mIccSize);
+ void init(const VideoFrame& copy, const void* iccData, size_t iccSize) {
+ *this = copy;
+ if (mIccSize == iccSize && iccSize > 0 && iccData != NULL) {
+ memcpy(getFlattenedIccData(), iccData, iccSize);
+ } else {
+ mIccSize = 0;
}
}
@@ -136,38 +76,14 @@
uint32_t mHeight; // Decoded image height before rotation
uint32_t mDisplayWidth; // Display width before rotation
uint32_t mDisplayHeight; // Display height before rotation
+ uint32_t mTileWidth; // Tile width (0 if image doesn't have grid)
+ uint32_t mTileHeight; // Tile height (0 if image doesn't have grid)
int32_t mRotationAngle; // Rotation angle, clockwise, should be multiple of 90
uint32_t mBytesPerPixel; // Number of bytes per pixel
uint32_t mRowBytes; // Number of bytes per row before rotation
- uint32_t mSize; // Number of bytes in mData
- uint32_t mIccSize; // Number of bytes in mIccData
+ uint32_t mSize; // Number of bytes of frame data
+ uint32_t mIccSize; // Number of bytes of ICC data
uint32_t mReserved; // (padding to make mData 64-bit aligned)
-
- // mData should be 64-bit aligned to prevent additional padding
- uint8_t* mData; // Actual binary data
- // pad structure so it's the same size on 64-bit and 32-bit
- char mPadding[8 - sizeof(mData)];
-
- // mIccData should be 64-bit aligned to prevent additional padding
- uint8_t* mIccData; // Actual binary data
- // pad structure so it's the same size on 64-bit and 32-bit
- char mIccPadding[8 - sizeof(mIccData)];
-
-private:
- //
- // Utility methods used only within VideoFrame struct
- //
-
- // Copy the information fields only
- void copyInfoOnly(const VideoFrame& copy) {
- mWidth = copy.mWidth;
- mHeight = copy.mHeight;
- mDisplayWidth = copy.mDisplayWidth;
- mDisplayHeight = copy.mDisplayHeight;
- mRotationAngle = copy.mRotationAngle;
- mBytesPerPixel = copy.mBytesPerPixel;
- mRowBytes = copy.mRowBytes;
- }
};
}; // namespace android
diff --git a/include/soundtrigger/OWNERS b/include/soundtrigger/OWNERS
new file mode 100644
index 0000000..e83f6b9
--- /dev/null
+++ b/include/soundtrigger/OWNERS
@@ -0,0 +1,2 @@
+elaurent@google.com
+thorntonc@google.com
diff --git a/media/OWNERS b/media/OWNERS
index 1605efd..1f687a2 100644
--- a/media/OWNERS
+++ b/media/OWNERS
@@ -2,6 +2,7 @@
dwkang@google.com
elaurent@google.com
essick@google.com
+hkuang@google.com
hunga@google.com
jmtrivi@google.com
krocard@google.com
@@ -14,5 +15,6 @@
rachad@google.com
rago@google.com
robertshih@google.com
+taklee@google.com
wjia@google.com
wonsik@google.com
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index 3ee7494..70c281a 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -3,7 +3,8 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
- main_audioserver.cpp
+ main_audioserver.cpp \
+ ../libaudioclient/aidl/android/media/IAudioRecord.aidl
LOCAL_SHARED_LIBRARIES := \
libaaudioservice \
@@ -12,11 +13,13 @@
libbinder \
libcutils \
liblog \
+ libhidltransport \
+ libhwbinder \
+ libmedia \
libmedialogservice \
libnbaio \
libsoundtriggerservice \
- libutils \
- libhwbinder
+ libutils
# TODO oboeservice is the old folder name for aaudioservice. It will be changed.
LOCAL_C_INCLUDES := \
@@ -33,9 +36,13 @@
frameworks/av/media/libaaudio/include \
frameworks/av/media/libaaudio/src \
frameworks/av/media/libaaudio/src/binding \
+ frameworks/av/media/libmedia \
$(call include-path-for, audio-utils) \
external/sonic \
+LOCAL_AIDL_INCLUDES := \
+ frameworks/av/media/libaudioclient/aidl
+
# If AUDIOSERVER_MULTILIB in device.mk is non-empty then it is used to control
# the LOCAL_MULTILIB for all audioserver exclusive libraries.
# This is relevant for 64 bit architectures where either or both
diff --git a/media/audioserver/OWNERS b/media/audioserver/OWNERS
new file mode 100644
index 0000000..f9cb567
--- /dev/null
+++ b/media/audioserver/OWNERS
@@ -0,0 +1 @@
+gkasten@google.com
diff --git a/media/audioserver/audioserver.rc b/media/audioserver/audioserver.rc
index 366f52a..1f2e82f 100644
--- a/media/audioserver/audioserver.rc
+++ b/media/audioserver/audioserver.rc
@@ -1,11 +1,14 @@
service audioserver /system/bin/audioserver
- class main
+ class core
user audioserver
# media gid needed for /dev/fm (radio) and for /data/misc/media (tee)
group audio camera drmrpc inet media mediadrm net_bt net_bt_admin net_bw_acct wakelock
capabilities BLOCK_SUSPEND
ioprio rt 4
writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
+ onrestart restart vendor.audio-hal-2-0
+ # Keep the original service name for backward compatibility when upgrading
+ # O-MR1 devices with framework-only.
onrestart restart audio-hal-2-0
on property:vts.native_server.on=1
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
index 474ef97..db57248 100644
--- a/media/audioserver/main_audioserver.cpp
+++ b/media/audioserver/main_audioserver.cpp
@@ -25,12 +25,9 @@
#include <binder/IPCThreadState.h>
#include <binder/ProcessState.h>
#include <binder/IServiceManager.h>
+#include <hidl/HidlTransportSupport.h>
#include <utils/Log.h>
-// FIXME: remove when BUG 31748996 is fixed
-#include <hwbinder/IPCThreadState.h>
-#include <hwbinder/ProcessState.h>
-
// from LOCAL_C_INCLUDES
#include "aaudio/AAudioTesting.h"
#include "AudioFlinger.h"
@@ -38,12 +35,19 @@
#include "AAudioService.h"
#include "utility/AAudioUtilities.h"
#include "MediaLogService.h"
+#include "MediaUtils.h"
#include "SoundTriggerHwService.h"
using namespace android;
int main(int argc __unused, char **argv)
{
+ // TODO: update with refined parameters
+ limitProcessMemory(
+ "audio.maxmem", /* "ro.audio.maxmem", property that defines limit */
+ (size_t)512 * (1 << 20), /* SIZE_MAX, upper limit in bytes */
+ 20 /* upper limit as percentage of physical RAM */);
+
signal(SIGPIPE, SIG_IGN);
bool doLog = (bool) property_get_bool("ro.test_harness", 0);
@@ -128,6 +132,7 @@
prctl(PR_SET_PDEATHSIG, SIGKILL); // if parent media.log dies before me, kill me also
setpgid(0, 0); // but if I die first, don't kill my parent
}
+ android::hardware::configureRpcThreadpool(4, false /*callerWillJoin*/);
sp<ProcessState> proc(ProcessState::self());
sp<IServiceManager> sm = defaultServiceManager();
ALOGI("ServiceManager: %p", sm.get());
@@ -145,10 +150,6 @@
SoundTriggerHwService::instantiate();
ProcessState::self()->startThreadPool();
-
-// FIXME: remove when BUG 31748996 is fixed
- android::hardware::ProcessState::self()->startThreadPool();
-
IPCThreadState::self()->joinThreadPool();
}
}
diff --git a/media/common_time/OWNERS b/media/common_time/OWNERS
new file mode 100644
index 0000000..f9cb567
--- /dev/null
+++ b/media/common_time/OWNERS
@@ -0,0 +1 @@
+gkasten@google.com
diff --git a/media/extractors/Android.bp b/media/extractors/Android.bp
new file mode 100644
index 0000000..e8176cf
--- /dev/null
+++ b/media/extractors/Android.bp
@@ -0,0 +1,3 @@
+subdirs = [
+ "*",
+]
diff --git a/media/extractors/aac/AACExtractor.cpp b/media/extractors/aac/AACExtractor.cpp
new file mode 100644
index 0000000..9fc5a76
--- /dev/null
+++ b/media/extractors/aac/AACExtractor.cpp
@@ -0,0 +1,402 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AACExtractor"
+#include <utils/Log.h>
+
+#include "AACExtractor.h"
+#include <media/DataSourceBase.h>
+#include <media/MediaTrack.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MetaDataUtils.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class AACSource : public MediaTrack {
+public:
+ AACSource(
+ DataSourceBase *source,
+ MetaDataBase &meta,
+ const Vector<uint64_t> &offset_vector,
+ int64_t frame_duration_us);
+
+ virtual status_t start(MetaDataBase *params = NULL);
+ virtual status_t stop();
+
+ virtual status_t getFormat(MetaDataBase&);
+
+ virtual status_t read(
+ MediaBufferBase **buffer, const ReadOptions *options = NULL);
+
+protected:
+ virtual ~AACSource();
+
+private:
+ static const size_t kMaxFrameSize;
+ DataSourceBase *mDataSource;
+ MetaDataBase mMeta;
+
+ off64_t mOffset;
+ int64_t mCurrentTimeUs;
+ bool mStarted;
+ MediaBufferGroup *mGroup;
+
+ Vector<uint64_t> mOffsetVector;
+ int64_t mFrameDurationUs;
+
+ AACSource(const AACSource &);
+ AACSource &operator=(const AACSource &);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Returns the sample rate based on the sampling frequency index
+uint32_t get_sample_rate(const uint8_t sf_index)
+{
+ static const uint32_t sample_rates[] =
+ {
+ 96000, 88200, 64000, 48000, 44100, 32000,
+ 24000, 22050, 16000, 12000, 11025, 8000
+ };
+
+ if (sf_index < sizeof(sample_rates) / sizeof(sample_rates[0])) {
+ return sample_rates[sf_index];
+ }
+
+ return 0;
+}
+
+// Returns the frame length in bytes as described in an ADTS header starting at the given offset,
+// or 0 if the size can't be read due to an error in the header or a read failure.
+// The returned value is the AAC frame size with the ADTS header length (regardless of
+// the presence of the CRC).
+// If headerSize is non-NULL, it will be used to return the size of the header of this ADTS frame.
+static size_t getAdtsFrameLength(DataSourceBase *source, off64_t offset, size_t* headerSize) {
+
+ const size_t kAdtsHeaderLengthNoCrc = 7;
+ const size_t kAdtsHeaderLengthWithCrc = 9;
+
+ size_t frameSize = 0;
+
+ uint8_t syncword[2];
+ if (source->readAt(offset, &syncword, 2) != 2) {
+ return 0;
+ }
+ if ((syncword[0] != 0xff) || ((syncword[1] & 0xf6) != 0xf0)) {
+ return 0;
+ }
+
+ uint8_t protectionAbsent;
+ if (source->readAt(offset + 1, &protectionAbsent, 1) < 1) {
+ return 0;
+ }
+ protectionAbsent &= 0x1;
+
+ uint8_t header[3];
+ if (source->readAt(offset + 3, &header, 3) < 3) {
+ return 0;
+ }
+
+ frameSize = (header[0] & 0x3) << 11 | header[1] << 3 | header[2] >> 5;
+
+ // protectionAbsent is 0 if there is CRC
+ size_t headSize = protectionAbsent ? kAdtsHeaderLengthNoCrc : kAdtsHeaderLengthWithCrc;
+ if (headSize > frameSize) {
+ return 0;
+ }
+ if (headerSize != NULL) {
+ *headerSize = headSize;
+ }
+
+ return frameSize;
+}
+
+AACExtractor::AACExtractor(
+ DataSourceBase *source, off64_t offset)
+ : mDataSource(source),
+ mInitCheck(NO_INIT),
+ mFrameDurationUs(0) {
+
+ uint8_t profile, sf_index, channel, header[2];
+ if (mDataSource->readAt(offset + 2, &header, 2) < 2) {
+ return;
+ }
+
+ profile = (header[0] >> 6) & 0x3;
+ sf_index = (header[0] >> 2) & 0xf;
+ uint32_t sr = get_sample_rate(sf_index);
+ if (sr == 0) {
+ return;
+ }
+ channel = (header[0] & 0x1) << 2 | (header[1] >> 6);
+
+ MakeAACCodecSpecificData(mMeta, profile, sf_index, channel);
+
+ off64_t streamSize, numFrames = 0;
+ size_t frameSize = 0;
+ int64_t duration = 0;
+
+ if (mDataSource->getSize(&streamSize) == OK) {
+ while (offset < streamSize) {
+ if ((frameSize = getAdtsFrameLength(source, offset, NULL)) == 0) {
+ ALOGW("prematured AAC stream (%lld vs %lld)",
+ (long long)offset, (long long)streamSize);
+ break;
+ }
+
+ mOffsetVector.push(offset);
+
+ offset += frameSize;
+ numFrames ++;
+ }
+
+ // Round up and get the duration
+ mFrameDurationUs = (1024 * 1000000ll + (sr - 1)) / sr;
+ duration = numFrames * mFrameDurationUs;
+ mMeta.setInt64(kKeyDuration, duration);
+ }
+
+ mInitCheck = OK;
+}
+
+AACExtractor::~AACExtractor() {
+}
+
+status_t AACExtractor::getMetaData(MetaDataBase &meta) {
+ meta.clear();
+ if (mInitCheck == OK) {
+ meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC_ADTS);
+ }
+
+ return OK;
+}
+
+size_t AACExtractor::countTracks() {
+ return mInitCheck == OK ? 1 : 0;
+}
+
+MediaTrack *AACExtractor::getTrack(size_t index) {
+ if (mInitCheck != OK || index != 0) {
+ return NULL;
+ }
+
+ return new AACSource(mDataSource, mMeta, mOffsetVector, mFrameDurationUs);
+}
+
+status_t AACExtractor::getTrackMetaData(MetaDataBase &meta, size_t index, uint32_t /* flags */) {
+ if (mInitCheck != OK || index != 0) {
+ return UNKNOWN_ERROR;
+ }
+
+ meta = mMeta;
+ return OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// 8192 = 2^13, 13bit AAC frame size (in bytes)
+const size_t AACSource::kMaxFrameSize = 8192;
+
+AACSource::AACSource(
+ DataSourceBase *source,
+ MetaDataBase &meta,
+ const Vector<uint64_t> &offset_vector,
+ int64_t frame_duration_us)
+ : mDataSource(source),
+ mMeta(meta),
+ mOffset(0),
+ mCurrentTimeUs(0),
+ mStarted(false),
+ mGroup(NULL),
+ mOffsetVector(offset_vector),
+ mFrameDurationUs(frame_duration_us) {
+}
+
+AACSource::~AACSource() {
+ if (mStarted) {
+ stop();
+ }
+}
+
+status_t AACSource::start(MetaDataBase * /* params */) {
+ CHECK(!mStarted);
+
+ if (mOffsetVector.empty()) {
+ mOffset = 0;
+ } else {
+ mOffset = mOffsetVector.itemAt(0);
+ }
+
+ mCurrentTimeUs = 0;
+ mGroup = new MediaBufferGroup;
+ mGroup->add_buffer(MediaBufferBase::Create(kMaxFrameSize));
+ mStarted = true;
+
+ return OK;
+}
+
+status_t AACSource::stop() {
+ CHECK(mStarted);
+
+ delete mGroup;
+ mGroup = NULL;
+
+ mStarted = false;
+ return OK;
+}
+
+status_t AACSource::getFormat(MetaDataBase &meta) {
+ meta = mMeta;
+ return OK;
+}
+
+status_t AACSource::read(
+ MediaBufferBase **out, const ReadOptions *options) {
+ *out = NULL;
+
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+ if (mFrameDurationUs > 0) {
+ int64_t seekFrame = seekTimeUs / mFrameDurationUs;
+ if (seekFrame < 0 || seekFrame >= (int64_t)mOffsetVector.size()) {
+ android_errorWriteLog(0x534e4554, "70239507");
+ return ERROR_MALFORMED;
+ }
+ mCurrentTimeUs = seekFrame * mFrameDurationUs;
+
+ mOffset = mOffsetVector.itemAt(seekFrame);
+ }
+ }
+
+ size_t frameSize, frameSizeWithoutHeader, headerSize;
+ if ((frameSize = getAdtsFrameLength(mDataSource, mOffset, &headerSize)) == 0) {
+ return ERROR_END_OF_STREAM;
+ }
+
+ MediaBufferBase *buffer;
+ status_t err = mGroup->acquire_buffer(&buffer);
+ if (err != OK) {
+ return err;
+ }
+
+ frameSizeWithoutHeader = frameSize - headerSize;
+ if (mDataSource->readAt(mOffset + headerSize, buffer->data(),
+ frameSizeWithoutHeader) != (ssize_t)frameSizeWithoutHeader) {
+ buffer->release();
+ buffer = NULL;
+
+ return ERROR_IO;
+ }
+
+ buffer->set_range(0, frameSizeWithoutHeader);
+ buffer->meta_data().setInt64(kKeyTime, mCurrentTimeUs);
+ buffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+
+ mOffset += frameSize;
+ mCurrentTimeUs += mFrameDurationUs;
+
+ *out = buffer;
+ return OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static MediaExtractor* CreateExtractor(
+ DataSourceBase *source,
+ void *meta) {
+ off64_t offset = *static_cast<off64_t*>(meta);
+ return new AACExtractor(source, offset);
+}
+
+static MediaExtractor::CreatorFunc Sniff(
+ DataSourceBase *source, float *confidence, void **meta,
+ MediaExtractor::FreeMetaFunc *freeMeta) {
+ off64_t pos = 0;
+
+ for (;;) {
+ uint8_t id3header[10];
+ if (source->readAt(pos, id3header, sizeof(id3header))
+ < (ssize_t)sizeof(id3header)) {
+ return NULL;
+ }
+
+ if (memcmp("ID3", id3header, 3)) {
+ break;
+ }
+
+ // Skip the ID3v2 header.
+
+ size_t len =
+ ((id3header[6] & 0x7f) << 21)
+ | ((id3header[7] & 0x7f) << 14)
+ | ((id3header[8] & 0x7f) << 7)
+ | (id3header[9] & 0x7f);
+
+ len += 10;
+
+ pos += len;
+
+ ALOGV("skipped ID3 tag, new starting offset is %lld (0x%016llx)",
+ (long long)pos, (long long)pos);
+ }
+
+ uint8_t header[2];
+
+ if (source->readAt(pos, &header, 2) != 2) {
+ return NULL;
+ }
+
+ // ADTS syncword
+ if ((header[0] == 0xff) && ((header[1] & 0xf6) == 0xf0)) {
+ *confidence = 0.2;
+
+ off64_t *offPtr = (off64_t*) malloc(sizeof(off64_t));
+ *offPtr = pos;
+ *meta = offPtr;
+ *freeMeta = ::free;
+
+ return CreateExtractor;
+ }
+
+ return NULL;
+}
+
+
+extern "C" {
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
+MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ return {
+ MediaExtractor::EXTRACTORDEF_VERSION,
+ UUID("4fd80eae-03d2-4d72-9eb9-48fa6bb54613"),
+ 1, // version
+ "AAC Extractor",
+ Sniff
+ };
+}
+
+} // extern "C"
+
+} // namespace android
diff --git a/media/extractors/aac/AACExtractor.h b/media/extractors/aac/AACExtractor.h
new file mode 100644
index 0000000..9dadbed
--- /dev/null
+++ b/media/extractors/aac/AACExtractor.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAC_EXTRACTOR_H_
+
+#define AAC_EXTRACTOR_H_
+
+#include <media/MediaExtractor.h>
+#include <media/stagefright/MetaDataBase.h>
+
+#include <utils/Vector.h>
+
+namespace android {
+
+struct AMessage;
+class String8;
+
+class AACExtractor : public MediaExtractor {
+public:
+ AACExtractor(DataSourceBase *source, off64_t offset);
+
+ virtual size_t countTracks();
+ virtual MediaTrack *getTrack(size_t index);
+ virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags);
+
+ virtual status_t getMetaData(MetaDataBase& meta);
+ virtual const char * name() { return "AACExtractor"; }
+
+protected:
+ virtual ~AACExtractor();
+
+private:
+ DataSourceBase *mDataSource;
+ MetaDataBase mMeta;
+ status_t mInitCheck;
+
+ Vector<uint64_t> mOffsetVector;
+ int64_t mFrameDurationUs;
+
+ AACExtractor(const AACExtractor &);
+ AACExtractor &operator=(const AACExtractor &);
+};
+
+bool SniffAAC(
+ DataSourceBase *source, String8 *mimeType, float *confidence, off64_t *offset);
+
+} // namespace android
+
+#endif // AAC_EXTRACTOR_H_
diff --git a/media/extractors/aac/Android.bp b/media/extractors/aac/Android.bp
new file mode 100644
index 0000000..5f05b42
--- /dev/null
+++ b/media/extractors/aac/Android.bp
@@ -0,0 +1,43 @@
+cc_library_shared {
+
+ srcs: ["AACExtractor.cpp"],
+
+ include_dirs: [
+ "frameworks/av/media/libstagefright/",
+ ],
+
+ shared_libs: [
+ "liblog",
+ "libmediaextractor",
+ ],
+
+ static_libs: [
+ "libstagefright_foundation",
+ "libstagefright_metadatautils",
+ "libutils",
+ ],
+
+ name: "libaacextractor",
+ relative_install_path: "extractors",
+
+ compile_multilib: "first",
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-fvisibility=hidden",
+ ],
+ version_script: "exports.lds",
+
+ sanitize: {
+ cfi: true,
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ diag: {
+ cfi: true,
+ },
+ },
+
+}
diff --git a/media/libstagefright/matroska/MODULE_LICENSE_APACHE2 b/media/extractors/aac/MODULE_LICENSE_APACHE2
similarity index 100%
copy from media/libstagefright/matroska/MODULE_LICENSE_APACHE2
copy to media/extractors/aac/MODULE_LICENSE_APACHE2
diff --git a/media/libstagefright/matroska/NOTICE b/media/extractors/aac/NOTICE
similarity index 100%
copy from media/libstagefright/matroska/NOTICE
copy to media/extractors/aac/NOTICE
diff --git a/media/extractors/aac/exports.lds b/media/extractors/aac/exports.lds
new file mode 100644
index 0000000..b1309ee
--- /dev/null
+++ b/media/extractors/aac/exports.lds
@@ -0,0 +1 @@
+{ global: GETEXTRACTORDEF; local: *; };
diff --git a/media/extractors/amr/AMRExtractor.cpp b/media/extractors/amr/AMRExtractor.cpp
new file mode 100644
index 0000000..f56d5ef
--- /dev/null
+++ b/media/extractors/amr/AMRExtractor.cpp
@@ -0,0 +1,392 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AMRExtractor"
+#include <utils/Log.h>
+
+#include "AMRExtractor.h"
+
+#include <media/DataSourceBase.h>
+#include <media/MediaTrack.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class AMRSource : public MediaTrack {
+public:
+ AMRSource(
+ DataSourceBase *source,
+ MetaDataBase &meta,
+ bool isWide,
+ const off64_t *offset_table,
+ size_t offset_table_length);
+
+ virtual status_t start(MetaDataBase *params = NULL);
+ virtual status_t stop();
+
+ virtual status_t getFormat(MetaDataBase &);
+
+ virtual status_t read(
+ MediaBufferBase **buffer, const ReadOptions *options = NULL);
+
+protected:
+ virtual ~AMRSource();
+
+private:
+ DataSourceBase *mDataSource;
+ MetaDataBase mMeta;
+ bool mIsWide;
+
+ off64_t mOffset;
+ int64_t mCurrentTimeUs;
+ bool mStarted;
+ MediaBufferGroup *mGroup;
+
+ off64_t mOffsetTable[OFFSET_TABLE_LEN];
+ size_t mOffsetTableLength;
+
+ AMRSource(const AMRSource &);
+ AMRSource &operator=(const AMRSource &);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+static size_t getFrameSize(bool isWide, unsigned FT) {
+ static const size_t kFrameSizeNB[16] = {
+ 95, 103, 118, 134, 148, 159, 204, 244,
+ 39, 43, 38, 37, // SID
+ 0, 0, 0, // future use
+ 0 // no data
+ };
+ static const size_t kFrameSizeWB[16] = {
+ 132, 177, 253, 285, 317, 365, 397, 461, 477,
+ 40, // SID
+ 0, 0, 0, 0, // future use
+ 0, // speech lost
+ 0 // no data
+ };
+
+ if (FT > 15 || (isWide && FT > 9 && FT < 14) || (!isWide && FT > 11 && FT < 15)) {
+ ALOGE("illegal AMR frame type %d", FT);
+ return 0;
+ }
+
+ size_t frameSize = isWide ? kFrameSizeWB[FT] : kFrameSizeNB[FT];
+
+ // Round up bits to bytes and add 1 for the header byte.
+ frameSize = (frameSize + 7) / 8 + 1;
+
+ return frameSize;
+}
+
+static status_t getFrameSizeByOffset(DataSourceBase *source,
+ off64_t offset, bool isWide, size_t *frameSize) {
+ uint8_t header;
+ ssize_t count = source->readAt(offset, &header, 1);
+ if (count == 0) {
+ return ERROR_END_OF_STREAM;
+ } else if (count < 0) {
+ return ERROR_IO;
+ }
+
+ unsigned FT = (header >> 3) & 0x0f;
+
+ *frameSize = getFrameSize(isWide, FT);
+ if (*frameSize == 0) {
+ return ERROR_MALFORMED;
+ }
+ return OK;
+}
+
+static bool SniffAMR(
+ DataSourceBase *source, bool *isWide, float *confidence) {
+ char header[9];
+
+ if (source->readAt(0, header, sizeof(header)) != sizeof(header)) {
+ return false;
+ }
+
+ if (!memcmp(header, "#!AMR\n", 6)) {
+ if (isWide != nullptr) {
+ *isWide = false;
+ }
+ *confidence = 0.5;
+
+ return true;
+ } else if (!memcmp(header, "#!AMR-WB\n", 9)) {
+ if (isWide != nullptr) {
+ *isWide = true;
+ }
+ *confidence = 0.5;
+
+ return true;
+ }
+
+ return false;
+}
+
+AMRExtractor::AMRExtractor(DataSourceBase *source)
+ : mDataSource(source),
+ mInitCheck(NO_INIT),
+ mOffsetTableLength(0) {
+ float confidence;
+ if (!SniffAMR(mDataSource, &mIsWide, &confidence)) {
+ return;
+ }
+
+ mMeta.setCString(
+ kKeyMIMEType, mIsWide ? MEDIA_MIMETYPE_AUDIO_AMR_WB
+ : MEDIA_MIMETYPE_AUDIO_AMR_NB);
+
+ mMeta.setInt32(kKeyChannelCount, 1);
+ mMeta.setInt32(kKeySampleRate, mIsWide ? 16000 : 8000);
+
+ off64_t offset = mIsWide ? 9 : 6;
+ off64_t streamSize;
+ size_t frameSize, numFrames = 0;
+ int64_t duration = 0;
+
+ if (mDataSource->getSize(&streamSize) == OK) {
+ while (offset < streamSize) {
+ status_t status = getFrameSizeByOffset(source, offset, mIsWide, &frameSize);
+ if (status == ERROR_END_OF_STREAM) {
+ break;
+ } else if (status != OK) {
+ return;
+ }
+
+ if ((numFrames % 50 == 0) && (numFrames / 50 < OFFSET_TABLE_LEN)) {
+ CHECK_EQ(mOffsetTableLength, numFrames / 50);
+ mOffsetTable[mOffsetTableLength] = offset - (mIsWide ? 9: 6);
+ mOffsetTableLength ++;
+ }
+
+ offset += frameSize;
+ duration += 20000; // Each frame is 20ms
+ numFrames ++;
+ }
+
+ mMeta.setInt64(kKeyDuration, duration);
+ }
+
+ mInitCheck = OK;
+}
+
+AMRExtractor::~AMRExtractor() {
+}
+
+status_t AMRExtractor::getMetaData(MetaDataBase &meta) {
+ meta.clear();
+
+ if (mInitCheck == OK) {
+ meta.setCString(kKeyMIMEType, mIsWide ? "audio/amr-wb" : "audio/amr");
+ }
+
+ return OK;
+}
+
+size_t AMRExtractor::countTracks() {
+ return mInitCheck == OK ? 1 : 0;
+}
+
+MediaTrack *AMRExtractor::getTrack(size_t index) {
+ if (mInitCheck != OK || index != 0) {
+ return NULL;
+ }
+
+ return new AMRSource(mDataSource, mMeta, mIsWide,
+ mOffsetTable, mOffsetTableLength);
+}
+
+status_t AMRExtractor::getTrackMetaData(MetaDataBase &meta, size_t index, uint32_t /* flags */) {
+ if (mInitCheck != OK || index != 0) {
+ return UNKNOWN_ERROR;
+ }
+
+ meta = mMeta;
+ return OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+AMRSource::AMRSource(
+ DataSourceBase *source, MetaDataBase &meta,
+ bool isWide, const off64_t *offset_table, size_t offset_table_length)
+ : mDataSource(source),
+ mMeta(meta),
+ mIsWide(isWide),
+ mOffset(mIsWide ? 9 : 6),
+ mCurrentTimeUs(0),
+ mStarted(false),
+ mGroup(NULL),
+ mOffsetTableLength(offset_table_length) {
+ if (mOffsetTableLength > 0 && mOffsetTableLength <= OFFSET_TABLE_LEN) {
+ memcpy ((char*)mOffsetTable, (char*)offset_table, sizeof(off64_t) * mOffsetTableLength);
+ }
+}
+
+AMRSource::~AMRSource() {
+ if (mStarted) {
+ stop();
+ }
+}
+
+status_t AMRSource::start(MetaDataBase * /* params */) {
+ CHECK(!mStarted);
+
+ mOffset = mIsWide ? 9 : 6;
+ mCurrentTimeUs = 0;
+ mGroup = new MediaBufferGroup;
+ mGroup->add_buffer(MediaBufferBase::Create(128));
+ mStarted = true;
+
+ return OK;
+}
+
+status_t AMRSource::stop() {
+ CHECK(mStarted);
+
+ delete mGroup;
+ mGroup = NULL;
+
+ mStarted = false;
+ return OK;
+}
+
+status_t AMRSource::getFormat(MetaDataBase &meta) {
+ meta = mMeta;
+ return OK;
+}
+
+status_t AMRSource::read(
+ MediaBufferBase **out, const ReadOptions *options) {
+ *out = NULL;
+
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode mode;
+ if (mOffsetTableLength > 0 && options && options->getSeekTo(&seekTimeUs, &mode)) {
+ size_t size;
+ int64_t seekFrame = seekTimeUs / 20000ll; // 20ms per frame.
+ mCurrentTimeUs = seekFrame * 20000ll;
+
+ size_t index = seekFrame < 0 ? 0 : seekFrame / 50;
+ if (index >= mOffsetTableLength) {
+ index = mOffsetTableLength - 1;
+ }
+
+ mOffset = mOffsetTable[index] + (mIsWide ? 9 : 6);
+
+ for (size_t i = 0; i< seekFrame - index * 50; i++) {
+ status_t err;
+ if ((err = getFrameSizeByOffset(mDataSource, mOffset,
+ mIsWide, &size)) != OK) {
+ return err;
+ }
+ mOffset += size;
+ }
+ }
+
+ uint8_t header;
+ ssize_t n = mDataSource->readAt(mOffset, &header, 1);
+
+ if (n < 1) {
+ return ERROR_END_OF_STREAM;
+ }
+
+ if (header & 0x83) {
+ // Padding bits must be 0.
+
+ ALOGE("padding bits must be 0, header is 0x%02x", header);
+
+ return ERROR_MALFORMED;
+ }
+
+ unsigned FT = (header >> 3) & 0x0f;
+
+ size_t frameSize = getFrameSize(mIsWide, FT);
+ if (frameSize == 0) {
+ return ERROR_MALFORMED;
+ }
+
+ MediaBufferBase *buffer;
+ status_t err = mGroup->acquire_buffer(&buffer);
+ if (err != OK) {
+ return err;
+ }
+
+ n = mDataSource->readAt(mOffset, buffer->data(), frameSize);
+
+ if (n != (ssize_t)frameSize) {
+ buffer->release();
+ buffer = NULL;
+
+ if (n < 0) {
+ return ERROR_IO;
+ } else {
+ // only partial frame is available, treat it as EOS.
+ mOffset += n;
+ return ERROR_END_OF_STREAM;
+ }
+ }
+
+ buffer->set_range(0, frameSize);
+ buffer->meta_data().setInt64(kKeyTime, mCurrentTimeUs);
+ buffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+
+ mOffset += frameSize;
+ mCurrentTimeUs += 20000; // Each frame is 20ms
+
+ *out = buffer;
+
+ return OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+extern "C" {
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
+MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ return {
+ MediaExtractor::EXTRACTORDEF_VERSION,
+ UUID("c86639c9-2f31-40ac-a715-fa01b4493aaf"),
+ 1,
+ "AMR Extractor",
+ [](
+ DataSourceBase *source,
+ float *confidence,
+ void **,
+ MediaExtractor::FreeMetaFunc *) -> MediaExtractor::CreatorFunc {
+ if (SniffAMR(source, nullptr, confidence)) {
+ return [](
+ DataSourceBase *source,
+ void *) -> MediaExtractor* {
+ return new AMRExtractor(source);};
+ }
+ return NULL;
+ }
+ };
+}
+
+} // extern "C"
+
+} // namespace android
diff --git a/media/extractors/amr/AMRExtractor.h b/media/extractors/amr/AMRExtractor.h
new file mode 100644
index 0000000..c90b325
--- /dev/null
+++ b/media/extractors/amr/AMRExtractor.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AMR_EXTRACTOR_H_
+
+#define AMR_EXTRACTOR_H_
+
+#include <utils/Errors.h>
+#include <media/MediaExtractor.h>
+#include <media/stagefright/MetaDataBase.h>
+
+namespace android {
+
+struct AMessage;
+class String8;
+#define OFFSET_TABLE_LEN 300
+
+class AMRExtractor : public MediaExtractor {
+public:
+ explicit AMRExtractor(DataSourceBase *source);
+
+ virtual size_t countTracks();
+ virtual MediaTrack *getTrack(size_t index);
+ virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags);
+
+ virtual status_t getMetaData(MetaDataBase& meta);
+ virtual const char * name() { return "AMRExtractor"; }
+
+protected:
+ virtual ~AMRExtractor();
+
+private:
+ DataSourceBase *mDataSource;
+ MetaDataBase mMeta;
+ status_t mInitCheck;
+ bool mIsWide;
+
+ off64_t mOffsetTable[OFFSET_TABLE_LEN]; //5 min
+ size_t mOffsetTableLength;
+
+ AMRExtractor(const AMRExtractor &);
+ AMRExtractor &operator=(const AMRExtractor &);
+};
+
+} // namespace android
+
+#endif // AMR_EXTRACTOR_H_
diff --git a/media/extractors/amr/Android.bp b/media/extractors/amr/Android.bp
new file mode 100644
index 0000000..d962b93
--- /dev/null
+++ b/media/extractors/amr/Android.bp
@@ -0,0 +1,41 @@
+cc_library_shared {
+
+ srcs: ["AMRExtractor.cpp"],
+
+ include_dirs: [
+ "frameworks/av/media/libstagefright/include",
+ ],
+
+ shared_libs: [
+ "liblog",
+ "libmediaextractor",
+ ],
+
+ static_libs: [
+ "libstagefright_foundation",
+ ],
+
+ name: "libamrextractor",
+ relative_install_path: "extractors",
+
+ compile_multilib: "first",
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-fvisibility=hidden",
+ ],
+ version_script: "exports.lds",
+
+ sanitize: {
+ cfi: true,
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ diag: {
+ cfi: true,
+ },
+ },
+
+}
diff --git a/media/libstagefright/matroska/MODULE_LICENSE_APACHE2 b/media/extractors/amr/MODULE_LICENSE_APACHE2
similarity index 100%
copy from media/libstagefright/matroska/MODULE_LICENSE_APACHE2
copy to media/extractors/amr/MODULE_LICENSE_APACHE2
diff --git a/media/libstagefright/matroska/NOTICE b/media/extractors/amr/NOTICE
similarity index 100%
copy from media/libstagefright/matroska/NOTICE
copy to media/extractors/amr/NOTICE
diff --git a/media/extractors/amr/exports.lds b/media/extractors/amr/exports.lds
new file mode 100644
index 0000000..b1309ee
--- /dev/null
+++ b/media/extractors/amr/exports.lds
@@ -0,0 +1 @@
+{ global: GETEXTRACTORDEF; local: *; };
diff --git a/media/extractors/flac/Android.bp b/media/extractors/flac/Android.bp
new file mode 100644
index 0000000..6282793
--- /dev/null
+++ b/media/extractors/flac/Android.bp
@@ -0,0 +1,43 @@
+cc_library_shared {
+
+ srcs: ["FLACExtractor.cpp"],
+
+ include_dirs: [
+ "frameworks/av/media/libstagefright/include",
+ "external/flac/include",
+ ],
+
+ shared_libs: [
+ "liblog",
+ "libmediaextractor",
+ ],
+
+ static_libs: [
+ "libFLAC",
+ "libstagefright_foundation",
+ ],
+
+ name: "libflacextractor",
+ relative_install_path: "extractors",
+
+ compile_multilib: "first",
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-fvisibility=hidden",
+ ],
+ version_script: "exports.lds",
+
+ sanitize: {
+ cfi: true,
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ diag: {
+ cfi: true,
+ },
+ },
+
+}
diff --git a/media/extractors/flac/FLACExtractor.cpp b/media/extractors/flac/FLACExtractor.cpp
new file mode 100644
index 0000000..e3da259
--- /dev/null
+++ b/media/extractors/flac/FLACExtractor.cpp
@@ -0,0 +1,884 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FLACExtractor"
+#include <utils/Log.h>
+
+#include "FLACExtractor.h"
+// libFLAC parser
+#include "FLAC/stream_decoder.h"
+
+#include <media/DataSourceBase.h>
+#include <media/MediaTrack.h>
+#include <media/VorbisComment.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/base64.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaBufferBase.h>
+
+namespace android {
+
+class FLACParser;
+
+class FLACSource : public MediaTrack {
+
+public:
+ FLACSource(
+ DataSourceBase *dataSource,
+ MetaDataBase &meta);
+
+ virtual status_t start(MetaDataBase *params);
+ virtual status_t stop();
+ virtual status_t getFormat(MetaDataBase &meta);
+
+ virtual status_t read(
+ MediaBufferBase **buffer, const ReadOptions *options = NULL);
+
+protected:
+ virtual ~FLACSource();
+
+private:
+ DataSourceBase *mDataSource;
+ MetaDataBase mTrackMetadata;
+ FLACParser *mParser;
+ bool mInitCheck;
+ bool mStarted;
+
+ // no copy constructor or assignment
+ FLACSource(const FLACSource &);
+ FLACSource &operator=(const FLACSource &);
+
+};
+
+// FLACParser wraps a C libFLAC parser aka stream decoder
+
+class FLACParser {
+
+public:
+ enum {
+ kMaxChannels = 8,
+ };
+
+ explicit FLACParser(
+ DataSourceBase *dataSource,
+ // If metadata pointers aren't provided, we don't fill them
+ MetaDataBase *fileMetadata = 0,
+ MetaDataBase *trackMetadata = 0);
+
+ virtual ~FLACParser();
+
+ status_t initCheck() const {
+ return mInitCheck;
+ }
+
+ // stream properties
+ unsigned getMaxBlockSize() const {
+ return mStreamInfo.max_blocksize;
+ }
+ unsigned getSampleRate() const {
+ return mStreamInfo.sample_rate;
+ }
+ unsigned getChannels() const {
+ return mStreamInfo.channels;
+ }
+ unsigned getBitsPerSample() const {
+ return mStreamInfo.bits_per_sample;
+ }
+ FLAC__uint64 getTotalSamples() const {
+ return mStreamInfo.total_samples;
+ }
+
+ // media buffers
+ void allocateBuffers();
+ void releaseBuffers();
+ MediaBufferBase *readBuffer() {
+ return readBuffer(false, 0LL);
+ }
+ MediaBufferBase *readBuffer(FLAC__uint64 sample) {
+ return readBuffer(true, sample);
+ }
+
+private:
+ DataSourceBase *mDataSource;
+ MetaDataBase *mFileMetadata;
+ MetaDataBase *mTrackMetadata;
+ bool mInitCheck;
+
+ // media buffers
+ size_t mMaxBufferSize;
+ MediaBufferGroup *mGroup;
+ void (*mCopy)(short *dst, const int * src[kMaxChannels], unsigned nSamples, unsigned nChannels);
+
+ // handle to underlying libFLAC parser
+ FLAC__StreamDecoder *mDecoder;
+
+ // current position within the data source
+ off64_t mCurrentPos;
+ bool mEOF;
+
+ // cached when the STREAMINFO metadata is parsed by libFLAC
+ FLAC__StreamMetadata_StreamInfo mStreamInfo;
+ bool mStreamInfoValid;
+
+ // cached when a decoded PCM block is "written" by libFLAC parser
+ bool mWriteRequested;
+ bool mWriteCompleted;
+ FLAC__FrameHeader mWriteHeader;
+ FLAC__int32 const * mWriteBuffer[kMaxChannels];
+
+ // most recent error reported by libFLAC parser
+ FLAC__StreamDecoderErrorStatus mErrorStatus;
+
+ status_t init();
+ MediaBufferBase *readBuffer(bool doSeek, FLAC__uint64 sample);
+
+ // no copy constructor or assignment
+ FLACParser(const FLACParser &);
+ FLACParser &operator=(const FLACParser &);
+
+ // FLAC parser callbacks as C++ instance methods
+ FLAC__StreamDecoderReadStatus readCallback(
+ FLAC__byte buffer[], size_t *bytes);
+ FLAC__StreamDecoderSeekStatus seekCallback(
+ FLAC__uint64 absolute_byte_offset);
+ FLAC__StreamDecoderTellStatus tellCallback(
+ FLAC__uint64 *absolute_byte_offset);
+ FLAC__StreamDecoderLengthStatus lengthCallback(
+ FLAC__uint64 *stream_length);
+ FLAC__bool eofCallback();
+ FLAC__StreamDecoderWriteStatus writeCallback(
+ const FLAC__Frame *frame, const FLAC__int32 * const buffer[]);
+ void metadataCallback(const FLAC__StreamMetadata *metadata);
+ void errorCallback(FLAC__StreamDecoderErrorStatus status);
+
+ // FLAC parser callbacks as C-callable functions
+ static FLAC__StreamDecoderReadStatus read_callback(
+ const FLAC__StreamDecoder *decoder,
+ FLAC__byte buffer[], size_t *bytes,
+ void *client_data);
+ static FLAC__StreamDecoderSeekStatus seek_callback(
+ const FLAC__StreamDecoder *decoder,
+ FLAC__uint64 absolute_byte_offset,
+ void *client_data);
+ static FLAC__StreamDecoderTellStatus tell_callback(
+ const FLAC__StreamDecoder *decoder,
+ FLAC__uint64 *absolute_byte_offset,
+ void *client_data);
+ static FLAC__StreamDecoderLengthStatus length_callback(
+ const FLAC__StreamDecoder *decoder,
+ FLAC__uint64 *stream_length,
+ void *client_data);
+ static FLAC__bool eof_callback(
+ const FLAC__StreamDecoder *decoder,
+ void *client_data);
+ static FLAC__StreamDecoderWriteStatus write_callback(
+ const FLAC__StreamDecoder *decoder,
+ const FLAC__Frame *frame, const FLAC__int32 * const buffer[],
+ void *client_data);
+ static void metadata_callback(
+ const FLAC__StreamDecoder *decoder,
+ const FLAC__StreamMetadata *metadata,
+ void *client_data);
+ static void error_callback(
+ const FLAC__StreamDecoder *decoder,
+ FLAC__StreamDecoderErrorStatus status,
+ void *client_data);
+
+};
+
+// The FLAC parser calls our C++ static callbacks using C calling conventions,
+// inside FLAC__stream_decoder_process_until_end_of_metadata
+// and FLAC__stream_decoder_process_single.
+// We immediately then call our corresponding C++ instance methods
+// with the same parameter list, but discard redundant information.
+
+FLAC__StreamDecoderReadStatus FLACParser::read_callback(
+ const FLAC__StreamDecoder * /* decoder */, FLAC__byte buffer[],
+ size_t *bytes, void *client_data)
+{
+ return ((FLACParser *) client_data)->readCallback(buffer, bytes);
+}
+
+FLAC__StreamDecoderSeekStatus FLACParser::seek_callback(
+ const FLAC__StreamDecoder * /* decoder */,
+ FLAC__uint64 absolute_byte_offset, void *client_data)
+{
+ return ((FLACParser *) client_data)->seekCallback(absolute_byte_offset);
+}
+
+FLAC__StreamDecoderTellStatus FLACParser::tell_callback(
+ const FLAC__StreamDecoder * /* decoder */,
+ FLAC__uint64 *absolute_byte_offset, void *client_data)
+{
+ return ((FLACParser *) client_data)->tellCallback(absolute_byte_offset);
+}
+
+FLAC__StreamDecoderLengthStatus FLACParser::length_callback(
+ const FLAC__StreamDecoder * /* decoder */,
+ FLAC__uint64 *stream_length, void *client_data)
+{
+ return ((FLACParser *) client_data)->lengthCallback(stream_length);
+}
+
+FLAC__bool FLACParser::eof_callback(
+ const FLAC__StreamDecoder * /* decoder */, void *client_data)
+{
+ return ((FLACParser *) client_data)->eofCallback();
+}
+
+FLAC__StreamDecoderWriteStatus FLACParser::write_callback(
+ const FLAC__StreamDecoder * /* decoder */, const FLAC__Frame *frame,
+ const FLAC__int32 * const buffer[], void *client_data)
+{
+ return ((FLACParser *) client_data)->writeCallback(frame, buffer);
+}
+
+void FLACParser::metadata_callback(
+ const FLAC__StreamDecoder * /* decoder */,
+ const FLAC__StreamMetadata *metadata, void *client_data)
+{
+ ((FLACParser *) client_data)->metadataCallback(metadata);
+}
+
+void FLACParser::error_callback(
+ const FLAC__StreamDecoder * /* decoder */,
+ FLAC__StreamDecoderErrorStatus status, void *client_data)
+{
+ ((FLACParser *) client_data)->errorCallback(status);
+}
+
+// These are the corresponding callbacks with C++ calling conventions
+
+FLAC__StreamDecoderReadStatus FLACParser::readCallback(
+ FLAC__byte buffer[], size_t *bytes)
+{
+ size_t requested = *bytes;
+ ssize_t actual = mDataSource->readAt(mCurrentPos, buffer, requested);
+ if (0 > actual) {
+ *bytes = 0;
+ return FLAC__STREAM_DECODER_READ_STATUS_ABORT;
+ } else if (0 == actual) {
+ *bytes = 0;
+ mEOF = true;
+ return FLAC__STREAM_DECODER_READ_STATUS_END_OF_STREAM;
+ } else {
+ assert(actual <= requested);
+ *bytes = actual;
+ mCurrentPos += actual;
+ return FLAC__STREAM_DECODER_READ_STATUS_CONTINUE;
+ }
+}
+
+FLAC__StreamDecoderSeekStatus FLACParser::seekCallback(
+ FLAC__uint64 absolute_byte_offset)
+{
+ mCurrentPos = absolute_byte_offset;
+ mEOF = false;
+ return FLAC__STREAM_DECODER_SEEK_STATUS_OK;
+}
+
+FLAC__StreamDecoderTellStatus FLACParser::tellCallback(
+ FLAC__uint64 *absolute_byte_offset)
+{
+ *absolute_byte_offset = mCurrentPos;
+ return FLAC__STREAM_DECODER_TELL_STATUS_OK;
+}
+
+FLAC__StreamDecoderLengthStatus FLACParser::lengthCallback(
+ FLAC__uint64 *stream_length)
+{
+ off64_t size;
+ if (OK == mDataSource->getSize(&size)) {
+ *stream_length = size;
+ return FLAC__STREAM_DECODER_LENGTH_STATUS_OK;
+ } else {
+ return FLAC__STREAM_DECODER_LENGTH_STATUS_UNSUPPORTED;
+ }
+}
+
+FLAC__bool FLACParser::eofCallback()
+{
+ return mEOF;
+}
+
+FLAC__StreamDecoderWriteStatus FLACParser::writeCallback(
+ const FLAC__Frame *frame, const FLAC__int32 * const buffer[])
+{
+ if (mWriteRequested) {
+ mWriteRequested = false;
+ // FLAC parser doesn't free or realloc buffer until next frame or finish
+ mWriteHeader = frame->header;
+ memmove(mWriteBuffer, buffer, sizeof(const FLAC__int32 * const) * getChannels());
+ mWriteCompleted = true;
+ return FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE;
+ } else {
+ ALOGE("FLACParser::writeCallback unexpected");
+ return FLAC__STREAM_DECODER_WRITE_STATUS_ABORT;
+ }
+}
+
+void FLACParser::metadataCallback(const FLAC__StreamMetadata *metadata)
+{
+ switch (metadata->type) {
+ case FLAC__METADATA_TYPE_STREAMINFO:
+ if (!mStreamInfoValid) {
+ mStreamInfo = metadata->data.stream_info;
+ mStreamInfoValid = true;
+ } else {
+ ALOGE("FLACParser::metadataCallback unexpected STREAMINFO");
+ }
+ break;
+ case FLAC__METADATA_TYPE_VORBIS_COMMENT:
+ {
+ const FLAC__StreamMetadata_VorbisComment *vc;
+ vc = &metadata->data.vorbis_comment;
+ for (FLAC__uint32 i = 0; i < vc->num_comments; ++i) {
+ FLAC__StreamMetadata_VorbisComment_Entry *vce;
+ vce = &vc->comments[i];
+ if (mFileMetadata != 0 && vce->entry != NULL) {
+ parseVorbisComment(mFileMetadata, (const char *) vce->entry,
+ vce->length);
+ }
+ }
+ }
+ break;
+ case FLAC__METADATA_TYPE_PICTURE:
+ if (mFileMetadata != 0) {
+ const FLAC__StreamMetadata_Picture *p = &metadata->data.picture;
+ mFileMetadata->setData(kKeyAlbumArt,
+ MetaData::TYPE_NONE, p->data, p->data_length);
+ mFileMetadata->setCString(kKeyAlbumArtMIME, p->mime_type);
+ }
+ break;
+ default:
+ ALOGW("FLACParser::metadataCallback unexpected type %u", metadata->type);
+ break;
+ }
+}
+
+void FLACParser::errorCallback(FLAC__StreamDecoderErrorStatus status)
+{
+ ALOGE("FLACParser::errorCallback status=%d", status);
+ mErrorStatus = status;
+}
+
+// Copy samples from FLAC native 32-bit non-interleaved to 16-bit interleaved.
+// These are candidates for optimization if needed.
+
+static void copyMono8(
+ short *dst,
+ const int * src[FLACParser::kMaxChannels],
+ unsigned nSamples,
+ unsigned /* nChannels */) {
+ for (unsigned i = 0; i < nSamples; ++i) {
+ *dst++ = src[0][i] << 8;
+ }
+}
+
+static void copyStereo8(
+ short *dst,
+ const int * src[FLACParser::kMaxChannels],
+ unsigned nSamples,
+ unsigned /* nChannels */) {
+ for (unsigned i = 0; i < nSamples; ++i) {
+ *dst++ = src[0][i] << 8;
+ *dst++ = src[1][i] << 8;
+ }
+}
+
+static void copyMultiCh8(short *dst, const int * src[FLACParser::kMaxChannels], unsigned nSamples, unsigned nChannels)
+{
+ for (unsigned i = 0; i < nSamples; ++i) {
+ for (unsigned c = 0; c < nChannels; ++c) {
+ *dst++ = src[c][i] << 8;
+ }
+ }
+}
+
+static void copyMono16(
+ short *dst,
+ const int * src[FLACParser::kMaxChannels],
+ unsigned nSamples,
+ unsigned /* nChannels */) {
+ for (unsigned i = 0; i < nSamples; ++i) {
+ *dst++ = src[0][i];
+ }
+}
+
+static void copyStereo16(
+ short *dst,
+ const int * src[FLACParser::kMaxChannels],
+ unsigned nSamples,
+ unsigned /* nChannels */) {
+ for (unsigned i = 0; i < nSamples; ++i) {
+ *dst++ = src[0][i];
+ *dst++ = src[1][i];
+ }
+}
+
+static void copyMultiCh16(short *dst, const int * src[FLACParser::kMaxChannels], unsigned nSamples, unsigned nChannels)
+{
+ for (unsigned i = 0; i < nSamples; ++i) {
+ for (unsigned c = 0; c < nChannels; ++c) {
+ *dst++ = src[c][i];
+ }
+ }
+}
+
+// 24-bit versions should do dithering or noise-shaping, here or in AudioFlinger
+
+static void copyMono24(
+ short *dst,
+ const int * src[FLACParser::kMaxChannels],
+ unsigned nSamples,
+ unsigned /* nChannels */) {
+ for (unsigned i = 0; i < nSamples; ++i) {
+ *dst++ = src[0][i] >> 8;
+ }
+}
+
+static void copyStereo24(
+ short *dst,
+ const int * src[FLACParser::kMaxChannels],
+ unsigned nSamples,
+ unsigned /* nChannels */) {
+ for (unsigned i = 0; i < nSamples; ++i) {
+ *dst++ = src[0][i] >> 8;
+ *dst++ = src[1][i] >> 8;
+ }
+}
+
+static void copyMultiCh24(short *dst, const int * src[FLACParser::kMaxChannels], unsigned nSamples, unsigned nChannels)
+{
+ for (unsigned i = 0; i < nSamples; ++i) {
+ for (unsigned c = 0; c < nChannels; ++c) {
+ *dst++ = src[c][i] >> 8;
+ }
+ }
+}
+
+static void copyTrespass(
+ short * /* dst */,
+ const int *[FLACParser::kMaxChannels] /* src */,
+ unsigned /* nSamples */,
+ unsigned /* nChannels */) {
+ TRESPASS();
+}
+
+// FLACParser
+
+FLACParser::FLACParser(
+ DataSourceBase *dataSource,
+ MetaDataBase *fileMetadata,
+ MetaDataBase *trackMetadata)
+ : mDataSource(dataSource),
+ mFileMetadata(fileMetadata),
+ mTrackMetadata(trackMetadata),
+ mInitCheck(false),
+ mMaxBufferSize(0),
+ mGroup(NULL),
+ mCopy(copyTrespass),
+ mDecoder(NULL),
+ mCurrentPos(0LL),
+ mEOF(false),
+ mStreamInfoValid(false),
+ mWriteRequested(false),
+ mWriteCompleted(false),
+ mErrorStatus((FLAC__StreamDecoderErrorStatus) -1)
+{
+ ALOGV("FLACParser::FLACParser");
+ memset(&mStreamInfo, 0, sizeof(mStreamInfo));
+ memset(&mWriteHeader, 0, sizeof(mWriteHeader));
+ mInitCheck = init();
+}
+
+FLACParser::~FLACParser()
+{
+ ALOGV("FLACParser::~FLACParser");
+ if (mDecoder != NULL) {
+ FLAC__stream_decoder_delete(mDecoder);
+ mDecoder = NULL;
+ }
+}
+
+status_t FLACParser::init()
+{
+ // setup libFLAC parser
+ mDecoder = FLAC__stream_decoder_new();
+ if (mDecoder == NULL) {
+ // The new should succeed, since probably all it does is a malloc
+ // that always succeeds in Android. But to avoid dependence on the
+ // libFLAC internals, we check and log here.
+ ALOGE("new failed");
+ return NO_INIT;
+ }
+ FLAC__stream_decoder_set_md5_checking(mDecoder, false);
+ FLAC__stream_decoder_set_metadata_ignore_all(mDecoder);
+ FLAC__stream_decoder_set_metadata_respond(
+ mDecoder, FLAC__METADATA_TYPE_STREAMINFO);
+ FLAC__stream_decoder_set_metadata_respond(
+ mDecoder, FLAC__METADATA_TYPE_PICTURE);
+ FLAC__stream_decoder_set_metadata_respond(
+ mDecoder, FLAC__METADATA_TYPE_VORBIS_COMMENT);
+ FLAC__StreamDecoderInitStatus initStatus;
+ initStatus = FLAC__stream_decoder_init_stream(
+ mDecoder,
+ read_callback, seek_callback, tell_callback,
+ length_callback, eof_callback, write_callback,
+ metadata_callback, error_callback, (void *) this);
+ if (initStatus != FLAC__STREAM_DECODER_INIT_STATUS_OK) {
+ // A failure here probably indicates a programming error and so is
+ // unlikely to happen. But we check and log here similarly to above.
+ ALOGE("init_stream failed %d", initStatus);
+ return NO_INIT;
+ }
+ // parse all metadata
+ if (!FLAC__stream_decoder_process_until_end_of_metadata(mDecoder)) {
+ ALOGE("end_of_metadata failed");
+ return NO_INIT;
+ }
+ if (mStreamInfoValid) {
+ // check channel count
+ if (getChannels() == 0 || getChannels() > kMaxChannels) {
+ ALOGE("unsupported channel count %u", getChannels());
+ return NO_INIT;
+ }
+ // check bit depth
+ switch (getBitsPerSample()) {
+ case 8:
+ case 16:
+ case 24:
+ break;
+ default:
+ ALOGE("unsupported bits per sample %u", getBitsPerSample());
+ return NO_INIT;
+ }
+ // check sample rate
+ switch (getSampleRate()) {
+ case 8000:
+ case 11025:
+ case 12000:
+ case 16000:
+ case 22050:
+ case 24000:
+ case 32000:
+ case 44100:
+ case 48000:
+ case 88200:
+ case 96000:
+ break;
+ default:
+ ALOGE("unsupported sample rate %u", getSampleRate());
+ return NO_INIT;
+ }
+ // configure the appropriate copy function, defaulting to trespass
+ static const struct {
+ unsigned mChannels;
+ unsigned mBitsPerSample;
+ void (*mCopy)(short *dst, const int * src[kMaxChannels], unsigned nSamples, unsigned nChannels);
+ } table[] = {
+ { 1, 8, copyMono8 },
+ { 2, 8, copyStereo8 },
+ { 8, 8, copyMultiCh8 },
+ { 1, 16, copyMono16 },
+ { 2, 16, copyStereo16 },
+ { 8, 16, copyMultiCh16 },
+ { 1, 24, copyMono24 },
+ { 2, 24, copyStereo24 },
+ { 8, 24, copyMultiCh24 },
+ };
+ for (unsigned i = 0; i < sizeof(table)/sizeof(table[0]); ++i) {
+ if (table[i].mChannels >= getChannels() &&
+ table[i].mBitsPerSample == getBitsPerSample()) {
+ mCopy = table[i].mCopy;
+ break;
+ }
+ }
+ // populate track metadata
+ if (mTrackMetadata != 0) {
+ mTrackMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
+ mTrackMetadata->setInt32(kKeyChannelCount, getChannels());
+ mTrackMetadata->setInt32(kKeySampleRate, getSampleRate());
+ mTrackMetadata->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
+ // sample rate is non-zero, so division by zero not possible
+ mTrackMetadata->setInt64(kKeyDuration,
+ (getTotalSamples() * 1000000LL) / getSampleRate());
+ }
+ } else {
+ ALOGE("missing STREAMINFO");
+ return NO_INIT;
+ }
+ if (mFileMetadata != 0) {
+ mFileMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_FLAC);
+ }
+ return OK;
+}
+
+void FLACParser::allocateBuffers()
+{
+ CHECK(mGroup == NULL);
+ mGroup = new MediaBufferGroup;
+ mMaxBufferSize = getMaxBlockSize() * getChannels() * sizeof(short);
+ mGroup->add_buffer(MediaBufferBase::Create(mMaxBufferSize));
+}
+
+void FLACParser::releaseBuffers()
+{
+ CHECK(mGroup != NULL);
+ delete mGroup;
+ mGroup = NULL;
+}
+
+MediaBufferBase *FLACParser::readBuffer(bool doSeek, FLAC__uint64 sample)
+{
+ mWriteRequested = true;
+ mWriteCompleted = false;
+ if (doSeek) {
+ // We implement the seek callback, so this works without explicit flush
+ if (!FLAC__stream_decoder_seek_absolute(mDecoder, sample)) {
+ ALOGE("FLACParser::readBuffer seek to sample %lld failed", (long long)sample);
+ return NULL;
+ }
+ ALOGV("FLACParser::readBuffer seek to sample %lld succeeded", (long long)sample);
+ } else {
+ if (!FLAC__stream_decoder_process_single(mDecoder)) {
+ ALOGE("FLACParser::readBuffer process_single failed");
+ return NULL;
+ }
+ }
+ if (!mWriteCompleted) {
+ ALOGV("FLACParser::readBuffer write did not complete");
+ return NULL;
+ }
+ // verify that block header keeps the promises made by STREAMINFO
+ unsigned blocksize = mWriteHeader.blocksize;
+ if (blocksize == 0 || blocksize > getMaxBlockSize()) {
+ ALOGE("FLACParser::readBuffer write invalid blocksize %u", blocksize);
+ return NULL;
+ }
+ if (mWriteHeader.sample_rate != getSampleRate() ||
+ mWriteHeader.channels != getChannels() ||
+ mWriteHeader.bits_per_sample != getBitsPerSample()) {
+ ALOGE("FLACParser::readBuffer write changed parameters mid-stream: %d/%d/%d -> %d/%d/%d",
+ getSampleRate(), getChannels(), getBitsPerSample(),
+ mWriteHeader.sample_rate, mWriteHeader.channels, mWriteHeader.bits_per_sample);
+ return NULL;
+ }
+ // acquire a media buffer
+ CHECK(mGroup != NULL);
+ MediaBufferBase *buffer;
+ status_t err = mGroup->acquire_buffer(&buffer);
+ if (err != OK) {
+ return NULL;
+ }
+ size_t bufferSize = blocksize * getChannels() * sizeof(short);
+ CHECK(bufferSize <= mMaxBufferSize);
+ short *data = (short *) buffer->data();
+ buffer->set_range(0, bufferSize);
+ // copy PCM from FLAC write buffer to our media buffer, with interleaving
+ (*mCopy)(data, mWriteBuffer, blocksize, getChannels());
+ // fill in buffer metadata
+ CHECK(mWriteHeader.number_type == FLAC__FRAME_NUMBER_TYPE_SAMPLE_NUMBER);
+ FLAC__uint64 sampleNumber = mWriteHeader.number.sample_number;
+ int64_t timeUs = (1000000LL * sampleNumber) / getSampleRate();
+ buffer->meta_data().setInt64(kKeyTime, timeUs);
+ buffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+ return buffer;
+}
+
+// FLACsource
+
+FLACSource::FLACSource(
+ DataSourceBase *dataSource,
+ MetaDataBase &trackMetadata)
+ : mDataSource(dataSource),
+ mTrackMetadata(trackMetadata),
+ mParser(0),
+ mInitCheck(false),
+ mStarted(false)
+{
+ ALOGV("FLACSource::FLACSource");
+ // re-use the same track metadata passed into constructor from FLACExtractor
+ mParser = new FLACParser(mDataSource);
+ mInitCheck = mParser->initCheck();
+}
+
+FLACSource::~FLACSource()
+{
+ ALOGV("~FLACSource::FLACSource");
+ if (mStarted) {
+ stop();
+ }
+ delete mParser;
+}
+
+status_t FLACSource::start(MetaDataBase * /* params */)
+{
+ ALOGV("FLACSource::start");
+
+ CHECK(!mStarted);
+ mParser->allocateBuffers();
+ mStarted = true;
+
+ return OK;
+}
+
+status_t FLACSource::stop()
+{
+ ALOGV("FLACSource::stop");
+
+ CHECK(mStarted);
+ mParser->releaseBuffers();
+ mStarted = false;
+
+ return OK;
+}
+
+status_t FLACSource::getFormat(MetaDataBase &meta)
+{
+ meta = mTrackMetadata;
+ return OK;
+}
+
+status_t FLACSource::read(
+ MediaBufferBase **outBuffer, const ReadOptions *options)
+{
+ MediaBufferBase *buffer;
+ // process an optional seek request
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode mode;
+ if ((NULL != options) && options->getSeekTo(&seekTimeUs, &mode)) {
+ FLAC__uint64 sample;
+ if (seekTimeUs <= 0LL) {
+ sample = 0LL;
+ } else {
+ // sample and total samples are both zero-based, and seek to EOF ok
+ sample = (seekTimeUs * mParser->getSampleRate()) / 1000000LL;
+ if (sample >= mParser->getTotalSamples()) {
+ sample = mParser->getTotalSamples();
+ }
+ }
+ buffer = mParser->readBuffer(sample);
+ // otherwise read sequentially
+ } else {
+ buffer = mParser->readBuffer();
+ }
+ *outBuffer = buffer;
+ return buffer != NULL ? (status_t) OK : (status_t) ERROR_END_OF_STREAM;
+}
+
+// FLACExtractor
+
+FLACExtractor::FLACExtractor(
+ DataSourceBase *dataSource)
+ : mDataSource(dataSource),
+ mParser(nullptr),
+ mInitCheck(false)
+{
+ ALOGV("FLACExtractor::FLACExtractor");
+ // FLACParser will fill in the metadata for us
+ mParser = new FLACParser(mDataSource, &mFileMetadata, &mTrackMetadata);
+ mInitCheck = mParser->initCheck();
+}
+
+FLACExtractor::~FLACExtractor()
+{
+ ALOGV("~FLACExtractor::FLACExtractor");
+ delete mParser;
+}
+
+size_t FLACExtractor::countTracks()
+{
+ return mInitCheck == OK ? 1 : 0;
+}
+
+MediaTrack *FLACExtractor::getTrack(size_t index)
+{
+ if (mInitCheck != OK || index > 0) {
+ return NULL;
+ }
+ return new FLACSource(mDataSource, mTrackMetadata);
+}
+
+status_t FLACExtractor::getTrackMetaData(
+ MetaDataBase &meta,
+ size_t index, uint32_t /* flags */) {
+ if (mInitCheck != OK || index > 0) {
+ return UNKNOWN_ERROR;
+ }
+ meta = mTrackMetadata;
+ return OK;
+}
+
+status_t FLACExtractor::getMetaData(MetaDataBase &meta)
+{
+ meta = mFileMetadata;
+ return OK;
+}
+
+// Sniffer
+
+bool SniffFLAC(DataSourceBase *source, float *confidence)
+{
+ // first 4 is the signature word
+ // second 4 is the sizeof STREAMINFO
+ // 042 is the mandatory STREAMINFO
+ // no need to read rest of the header, as a premature EOF will be caught later
+ uint8_t header[4+4];
+ if (source->readAt(0, header, sizeof(header)) != sizeof(header)
+ || memcmp("fLaC\0\0\0\042", header, 4+4))
+ {
+ return false;
+ }
+
+ *confidence = 0.5;
+
+ return true;
+}
+
+
+extern "C" {
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
+MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ return {
+ MediaExtractor::EXTRACTORDEF_VERSION,
+ UUID("1364b048-cc45-4fda-9934-327d0ebf9829"),
+ 1,
+ "FLAC Extractor",
+ [](
+ DataSourceBase *source,
+ float *confidence,
+ void **,
+ MediaExtractor::FreeMetaFunc *) -> MediaExtractor::CreatorFunc {
+ if (SniffFLAC(source, confidence)) {
+ return [](
+ DataSourceBase *source,
+ void *) -> MediaExtractor* {
+ return new FLACExtractor(source);};
+ }
+ return NULL;
+ }
+ };
+}
+
+} // extern "C"
+
+} // namespace android
diff --git a/media/extractors/flac/FLACExtractor.h b/media/extractors/flac/FLACExtractor.h
new file mode 100644
index 0000000..7fb6ec6
--- /dev/null
+++ b/media/extractors/flac/FLACExtractor.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLAC_EXTRACTOR_H_
+#define FLAC_EXTRACTOR_H_
+
+#include <media/DataSourceBase.h>
+#include <media/MediaExtractor.h>
+#include <media/stagefright/MetaDataBase.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class FLACParser;
+
+class FLACExtractor : public MediaExtractor {
+
+public:
+ explicit FLACExtractor(DataSourceBase *source);
+
+ virtual size_t countTracks();
+ virtual MediaTrack *getTrack(size_t index);
+ virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags);
+
+ virtual status_t getMetaData(MetaDataBase& meta);
+ virtual const char * name() { return "FLACExtractor"; }
+
+protected:
+ virtual ~FLACExtractor();
+
+private:
+ DataSourceBase *mDataSource;
+ FLACParser *mParser;
+ status_t mInitCheck;
+ MetaDataBase mFileMetadata;
+
+ // There is only one track
+ MetaDataBase mTrackMetadata;
+
+ FLACExtractor(const FLACExtractor &);
+ FLACExtractor &operator=(const FLACExtractor &);
+
+};
+
+bool SniffFLAC(DataSourceBase *source, float *confidence);
+
+} // namespace android
+
+#endif // FLAC_EXTRACTOR_H_
diff --git a/media/libstagefright/matroska/MODULE_LICENSE_APACHE2 b/media/extractors/flac/MODULE_LICENSE_APACHE2
similarity index 100%
copy from media/libstagefright/matroska/MODULE_LICENSE_APACHE2
copy to media/extractors/flac/MODULE_LICENSE_APACHE2
diff --git a/media/libstagefright/matroska/NOTICE b/media/extractors/flac/NOTICE
similarity index 100%
copy from media/libstagefright/matroska/NOTICE
copy to media/extractors/flac/NOTICE
diff --git a/media/extractors/flac/exports.lds b/media/extractors/flac/exports.lds
new file mode 100644
index 0000000..b1309ee
--- /dev/null
+++ b/media/extractors/flac/exports.lds
@@ -0,0 +1 @@
+{ global: GETEXTRACTORDEF; local: *; };
diff --git a/media/extractors/midi/Android.bp b/media/extractors/midi/Android.bp
new file mode 100644
index 0000000..fde09df18
--- /dev/null
+++ b/media/extractors/midi/Android.bp
@@ -0,0 +1,42 @@
+cc_library_shared {
+
+ srcs: ["MidiExtractor.cpp"],
+
+ include_dirs: [
+ "frameworks/av/media/libstagefright/include",
+ ],
+
+ shared_libs: [
+ "liblog",
+ "libmediaextractor",
+ ],
+
+ static_libs: [
+ "libmedia_midiiowrapper",
+ "libsonivox",
+ "libstagefright_foundation"
+ ],
+ name: "libmidiextractor",
+ relative_install_path: "extractors",
+
+ compile_multilib: "first",
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-fvisibility=hidden",
+ ],
+ version_script: "exports.lds",
+
+ sanitize: {
+ cfi: true,
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ diag: {
+ cfi: true,
+ },
+ },
+
+}
diff --git a/media/libstagefright/matroska/MODULE_LICENSE_APACHE2 b/media/extractors/midi/MODULE_LICENSE_APACHE2
similarity index 100%
copy from media/libstagefright/matroska/MODULE_LICENSE_APACHE2
copy to media/extractors/midi/MODULE_LICENSE_APACHE2
diff --git a/media/extractors/midi/MidiExtractor.cpp b/media/extractors/midi/MidiExtractor.cpp
new file mode 100644
index 0000000..949fbe0
--- /dev/null
+++ b/media/extractors/midi/MidiExtractor.cpp
@@ -0,0 +1,352 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MidiExtractor"
+#include <utils/Log.h>
+
+#include "MidiExtractor.h"
+
+#include <media/MidiIoWrapper.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <media/MediaTrack.h>
+#include <libsonivox/eas_reverb.h>
+
+namespace android {
+
+// how many Sonivox output buffers to aggregate into one MediaBufferBase
+static const int NUM_COMBINE_BUFFERS = 4;
+
+class MidiSource : public MediaTrack {
+
+public:
+ MidiSource(
+ MidiEngine &engine,
+ MetaDataBase &trackMetadata);
+
+ virtual status_t start(MetaDataBase *params);
+ virtual status_t stop();
+ virtual status_t getFormat(MetaDataBase&);
+
+ virtual status_t read(
+ MediaBufferBase **buffer, const ReadOptions *options = NULL);
+
+protected:
+ virtual ~MidiSource();
+
+private:
+ MidiEngine &mEngine;
+ MetaDataBase &mTrackMetadata;
+ bool mInitCheck;
+ bool mStarted;
+
+ status_t init();
+
+ // no copy constructor or assignment
+ MidiSource(const MidiSource &);
+ MidiSource &operator=(const MidiSource &);
+
+};
+
+
+// Midisource
+
+MidiSource::MidiSource(
+ MidiEngine &engine,
+ MetaDataBase &trackMetadata)
+ : mEngine(engine),
+ mTrackMetadata(trackMetadata),
+ mInitCheck(false),
+ mStarted(false)
+{
+ ALOGV("MidiSource ctor");
+ mInitCheck = init();
+}
+
+MidiSource::~MidiSource()
+{
+ ALOGV("MidiSource dtor");
+ if (mStarted) {
+ stop();
+ }
+}
+
+status_t MidiSource::start(MetaDataBase * /* params */)
+{
+ ALOGV("MidiSource::start");
+
+ CHECK(!mStarted);
+ mStarted = true;
+ mEngine.allocateBuffers();
+ return OK;
+}
+
+status_t MidiSource::stop()
+{
+ ALOGV("MidiSource::stop");
+
+ CHECK(mStarted);
+ mStarted = false;
+ mEngine.releaseBuffers();
+
+ return OK;
+}
+
+status_t MidiSource::getFormat(MetaDataBase &meta)
+{
+ meta = mTrackMetadata;
+ return OK;
+}
+
+status_t MidiSource::read(
+ MediaBufferBase **outBuffer, const ReadOptions *options)
+{
+ ALOGV("MidiSource::read");
+ MediaBufferBase *buffer;
+ // process an optional seek request
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode mode;
+ if ((NULL != options) && options->getSeekTo(&seekTimeUs, &mode)) {
+ if (seekTimeUs <= 0LL) {
+ seekTimeUs = 0LL;
+ }
+ mEngine.seekTo(seekTimeUs);
+ }
+ buffer = mEngine.readBuffer();
+ *outBuffer = buffer;
+ ALOGV("MidiSource::read %p done", this);
+ return buffer != NULL ? (status_t) OK : (status_t) ERROR_END_OF_STREAM;
+}
+
+status_t MidiSource::init()
+{
+ ALOGV("MidiSource::init");
+ return OK;
+}
+
+// MidiEngine
+
+MidiEngine::MidiEngine(DataSourceBase *dataSource,
+ MetaDataBase *fileMetadata,
+ MetaDataBase *trackMetadata) :
+ mGroup(NULL),
+ mEasData(NULL),
+ mEasHandle(NULL),
+ mEasConfig(NULL),
+ mIsInitialized(false) {
+ mIoWrapper = new MidiIoWrapper(dataSource);
+ // spin up a new EAS engine
+ EAS_I32 temp;
+ EAS_RESULT result = EAS_Init(&mEasData);
+
+ if (result == EAS_SUCCESS) {
+ result = EAS_OpenFile(mEasData, mIoWrapper->getLocator(), &mEasHandle);
+ }
+ if (result == EAS_SUCCESS) {
+ result = EAS_Prepare(mEasData, mEasHandle);
+ }
+ if (result == EAS_SUCCESS) {
+ result = EAS_ParseMetaData(mEasData, mEasHandle, &temp);
+ }
+
+ if (result != EAS_SUCCESS) {
+ return;
+ }
+
+ if (fileMetadata != NULL) {
+ fileMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MIDI);
+ }
+
+ if (trackMetadata != NULL) {
+ trackMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
+ trackMetadata->setInt64(kKeyDuration, 1000ll * temp); // milli->micro
+ mEasConfig = EAS_Config();
+ trackMetadata->setInt32(kKeySampleRate, mEasConfig->sampleRate);
+ trackMetadata->setInt32(kKeyChannelCount, mEasConfig->numChannels);
+ trackMetadata->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
+ }
+ mIsInitialized = true;
+}
+
+MidiEngine::~MidiEngine() {
+ if (mEasHandle) {
+ EAS_CloseFile(mEasData, mEasHandle);
+ }
+ if (mEasData) {
+ EAS_Shutdown(mEasData);
+ }
+ delete mGroup;
+ delete mIoWrapper;
+}
+
+status_t MidiEngine::initCheck() {
+ return mIsInitialized ? OK : UNKNOWN_ERROR;
+}
+
+status_t MidiEngine::allocateBuffers() {
+ // select reverb preset and enable
+ EAS_SetParameter(mEasData, EAS_MODULE_REVERB, EAS_PARAM_REVERB_PRESET, EAS_PARAM_REVERB_CHAMBER);
+ EAS_SetParameter(mEasData, EAS_MODULE_REVERB, EAS_PARAM_REVERB_BYPASS, EAS_FALSE);
+
+ mGroup = new MediaBufferGroup;
+ int bufsize = sizeof(EAS_PCM)
+ * mEasConfig->mixBufferSize * mEasConfig->numChannels * NUM_COMBINE_BUFFERS;
+ ALOGV("using %d byte buffer", bufsize);
+ mGroup->add_buffer(MediaBufferBase::Create(bufsize));
+ return OK;
+}
+
+status_t MidiEngine::releaseBuffers() {
+ delete mGroup;
+ mGroup = NULL;
+ return OK;
+}
+
+status_t MidiEngine::seekTo(int64_t positionUs) {
+ ALOGV("seekTo %lld", (long long)positionUs);
+ EAS_RESULT result = EAS_Locate(mEasData, mEasHandle, positionUs / 1000, false);
+ return result == EAS_SUCCESS ? OK : UNKNOWN_ERROR;
+}
+
+MediaBufferBase* MidiEngine::readBuffer() {
+ EAS_STATE state;
+ EAS_State(mEasData, mEasHandle, &state);
+ if ((state == EAS_STATE_STOPPED) || (state == EAS_STATE_ERROR)) {
+ return NULL;
+ }
+ MediaBufferBase *buffer;
+ status_t err = mGroup->acquire_buffer(&buffer);
+ if (err != OK) {
+ ALOGE("readBuffer: no buffer");
+ return NULL;
+ }
+ EAS_I32 timeMs;
+ EAS_GetLocation(mEasData, mEasHandle, &timeMs);
+ int64_t timeUs = 1000ll * timeMs;
+ buffer->meta_data().setInt64(kKeyTime, timeUs);
+
+ EAS_PCM* p = (EAS_PCM*) buffer->data();
+ int numBytesOutput = 0;
+ for (int i = 0; i < NUM_COMBINE_BUFFERS; i++) {
+ EAS_I32 numRendered;
+ EAS_RESULT result = EAS_Render(mEasData, p, mEasConfig->mixBufferSize, &numRendered);
+ if (result != EAS_SUCCESS) {
+ ALOGE("EAS_Render returned %ld", result);
+ break;
+ }
+ p += numRendered * mEasConfig->numChannels;
+ numBytesOutput += numRendered * mEasConfig->numChannels * sizeof(EAS_PCM);
+ }
+ buffer->set_range(0, numBytesOutput);
+ ALOGV("readBuffer: returning %zd in buffer %p", buffer->range_length(), buffer);
+ return buffer;
+}
+
+
+// MidiExtractor
+
+MidiExtractor::MidiExtractor(
+ DataSourceBase *dataSource)
+ : mDataSource(dataSource),
+ mInitCheck(false)
+{
+ ALOGV("MidiExtractor ctor");
+ mEngine = new MidiEngine(mDataSource, &mFileMetadata, &mTrackMetadata);
+ mInitCheck = mEngine->initCheck();
+}
+
+MidiExtractor::~MidiExtractor()
+{
+ ALOGV("MidiExtractor dtor");
+}
+
+size_t MidiExtractor::countTracks()
+{
+ return mInitCheck == OK ? 1 : 0;
+}
+
+MediaTrack *MidiExtractor::getTrack(size_t index)
+{
+ if (mInitCheck != OK || index > 0) {
+ return NULL;
+ }
+ return new MidiSource(*mEngine, mTrackMetadata);
+}
+
+status_t MidiExtractor::getTrackMetaData(
+ MetaDataBase &meta,
+ size_t index, uint32_t /* flags */) {
+ ALOGV("MidiExtractor::getTrackMetaData");
+ if (mInitCheck != OK || index > 0) {
+ return UNKNOWN_ERROR;
+ }
+ meta = mTrackMetadata;
+ return OK;
+}
+
+status_t MidiExtractor::getMetaData(MetaDataBase &meta)
+{
+ ALOGV("MidiExtractor::getMetaData");
+ meta = mFileMetadata;
+ return OK;
+}
+
+// Sniffer
+
+bool SniffMidi(DataSourceBase *source, float *confidence)
+{
+ MidiEngine p(source, NULL, NULL);
+ if (p.initCheck() == OK) {
+ *confidence = 0.8;
+ ALOGV("SniffMidi: yes");
+ return true;
+ }
+ ALOGV("SniffMidi: no");
+ return false;
+
+}
+
+extern "C" {
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
+MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ return {
+ MediaExtractor::EXTRACTORDEF_VERSION,
+ UUID("ef6cca0a-f8a2-43e6-ba5f-dfcd7c9a7ef2"),
+ 1,
+ "MIDI Extractor",
+ [](
+ DataSourceBase *source,
+ float *confidence,
+ void **,
+ MediaExtractor::FreeMetaFunc *) -> MediaExtractor::CreatorFunc {
+ if (SniffMidi(source, confidence)) {
+ return [](
+ DataSourceBase *source,
+ void *) -> MediaExtractor* {
+ return new MidiExtractor(source);};
+ }
+ return NULL;
+ }
+ };
+}
+
+} // extern "C"
+
+} // namespace android
diff --git a/media/extractors/midi/MidiExtractor.h b/media/extractors/midi/MidiExtractor.h
new file mode 100644
index 0000000..244dd0f
--- /dev/null
+++ b/media/extractors/midi/MidiExtractor.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MIDI_EXTRACTOR_H_
+#define MIDI_EXTRACTOR_H_
+
+#include <media/DataSourceBase.h>
+#include <media/MediaExtractor.h>
+#include <media/stagefright/MediaBufferBase.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MetaDataBase.h>
+#include <media/MidiIoWrapper.h>
+#include <utils/String8.h>
+#include <libsonivox/eas.h>
+
+namespace android {
+
+class MidiEngine {
+public:
+ explicit MidiEngine(DataSourceBase *dataSource,
+ MetaDataBase *fileMetadata,
+ MetaDataBase *trackMetadata);
+ ~MidiEngine();
+
+ status_t initCheck();
+
+ status_t allocateBuffers();
+ status_t releaseBuffers();
+ status_t seekTo(int64_t positionUs);
+ MediaBufferBase* readBuffer();
+private:
+ MidiIoWrapper *mIoWrapper;
+ MediaBufferGroup *mGroup;
+ EAS_DATA_HANDLE mEasData;
+ EAS_HANDLE mEasHandle;
+ const S_EAS_LIB_CONFIG* mEasConfig;
+ bool mIsInitialized;
+};
+
+class MidiExtractor : public MediaExtractor {
+
+public:
+ explicit MidiExtractor(DataSourceBase *source);
+
+ virtual size_t countTracks();
+ virtual MediaTrack *getTrack(size_t index);
+ virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags);
+
+ virtual status_t getMetaData(MetaDataBase& meta);
+ virtual const char * name() { return "MidiExtractor"; }
+
+protected:
+ virtual ~MidiExtractor();
+
+private:
+ DataSourceBase *mDataSource;
+ status_t mInitCheck;
+ MetaDataBase mFileMetadata;
+
+ // There is only one track
+ MetaDataBase mTrackMetadata;
+
+ MidiEngine *mEngine;
+
+ EAS_DATA_HANDLE mEasData;
+ EAS_HANDLE mEasHandle;
+ EAS_PCM* mAudioBuffer;
+ EAS_I32 mPlayTime;
+ EAS_I32 mDuration;
+ EAS_STATE mState;
+ EAS_FILE mFileLocator;
+
+ MidiExtractor(const MidiExtractor &);
+ MidiExtractor &operator=(const MidiExtractor &);
+
+};
+
+bool SniffMidi(DataSourceBase *source, float *confidence);
+
+} // namespace android
+
+#endif // MIDI_EXTRACTOR_H_
diff --git a/media/libstagefright/matroska/NOTICE b/media/extractors/midi/NOTICE
similarity index 100%
copy from media/libstagefright/matroska/NOTICE
copy to media/extractors/midi/NOTICE
diff --git a/media/extractors/midi/exports.lds b/media/extractors/midi/exports.lds
new file mode 100644
index 0000000..b1309ee
--- /dev/null
+++ b/media/extractors/midi/exports.lds
@@ -0,0 +1 @@
+{ global: GETEXTRACTORDEF; local: *; };
diff --git a/media/extractors/mkv/Android.bp b/media/extractors/mkv/Android.bp
new file mode 100644
index 0000000..681fd35
--- /dev/null
+++ b/media/extractors/mkv/Android.bp
@@ -0,0 +1,48 @@
+cc_library_shared {
+
+ srcs: ["MatroskaExtractor.cpp"],
+
+ include_dirs: [
+ "external/flac/include",
+ "external/libvpx/libwebm",
+ "frameworks/av/media/libstagefright/flac/dec",
+ "frameworks/av/media/libstagefright/include",
+ ],
+
+ shared_libs: [
+ "liblog",
+ "libmediaextractor",
+ ],
+
+ static_libs: [
+ "libstagefright_flacdec",
+ "libstagefright_foundation",
+ "libstagefright_metadatautils",
+ "libwebm",
+ "libutils",
+ ],
+
+ name: "libmkvextractor",
+ relative_install_path: "extractors",
+
+ compile_multilib: "first",
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-fvisibility=hidden",
+ ],
+ version_script: "exports.lds",
+
+ sanitize: {
+ cfi: true,
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ diag: {
+ cfi: true,
+ },
+ },
+
+}
diff --git a/media/libstagefright/matroska/MODULE_LICENSE_APACHE2 b/media/extractors/mkv/MODULE_LICENSE_APACHE2
similarity index 100%
copy from media/libstagefright/matroska/MODULE_LICENSE_APACHE2
copy to media/extractors/mkv/MODULE_LICENSE_APACHE2
diff --git a/media/extractors/mkv/MatroskaExtractor.cpp b/media/extractors/mkv/MatroskaExtractor.cpp
new file mode 100644
index 0000000..d657582
--- /dev/null
+++ b/media/extractors/mkv/MatroskaExtractor.cpp
@@ -0,0 +1,1665 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MatroskaExtractor"
+#include <utils/Log.h>
+
+#include "FLACDecoder.h"
+#include "MatroskaExtractor.h"
+
+#include <media/DataSourceBase.h>
+#include <media/ExtractorUtils.h>
+#include <media/MediaTrack.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/stagefright/foundation/ColorUtils.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaBufferBase.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MetaDataUtils.h>
+#include <utils/String8.h>
+
+#include <arpa/inet.h>
+#include <inttypes.h>
+#include <vector>
+
+namespace android {
+
+struct DataSourceBaseReader : public mkvparser::IMkvReader {
+ explicit DataSourceBaseReader(DataSourceBase *source)
+ : mSource(source) {
+ }
+
+ virtual int Read(long long position, long length, unsigned char* buffer) {
+ CHECK(position >= 0);
+ CHECK(length >= 0);
+
+ if (length == 0) {
+ return 0;
+ }
+
+ ssize_t n = mSource->readAt(position, buffer, length);
+
+ if (n <= 0) {
+ return -1;
+ }
+
+ return 0;
+ }
+
+ virtual int Length(long long* total, long long* available) {
+ off64_t size;
+ if (mSource->getSize(&size) != OK) {
+ *total = -1;
+ *available = (long long)((1ull << 63) - 1);
+
+ return 0;
+ }
+
+ if (total) {
+ *total = size;
+ }
+
+ if (available) {
+ *available = size;
+ }
+
+ return 0;
+ }
+
+private:
+ DataSourceBase *mSource;
+
+ DataSourceBaseReader(const DataSourceBaseReader &);
+ DataSourceBaseReader &operator=(const DataSourceBaseReader &);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+struct BlockIterator {
+ BlockIterator(MatroskaExtractor *extractor, unsigned long trackNum, unsigned long index);
+
+ bool eos() const;
+
+ void advance();
+ void reset();
+
+ void seek(
+ int64_t seekTimeUs, bool isAudio,
+ int64_t *actualFrameTimeUs);
+
+ const mkvparser::Block *block() const;
+ int64_t blockTimeUs() const;
+
+private:
+ MatroskaExtractor *mExtractor;
+ long long mTrackNum;
+ unsigned long mIndex;
+
+ const mkvparser::Cluster *mCluster;
+ const mkvparser::BlockEntry *mBlockEntry;
+ long mBlockEntryIndex;
+
+ void advance_l();
+
+ BlockIterator(const BlockIterator &);
+ BlockIterator &operator=(const BlockIterator &);
+};
+
+struct MatroskaSource : public MediaTrack {
+ MatroskaSource(MatroskaExtractor *extractor, size_t index);
+
+ virtual status_t start(MetaDataBase *params);
+ virtual status_t stop();
+
+ virtual status_t getFormat(MetaDataBase &);
+
+ virtual status_t read(
+ MediaBufferBase **buffer, const ReadOptions *options);
+
+protected:
+ virtual ~MatroskaSource();
+
+private:
+ enum Type {
+ AVC,
+ AAC,
+ HEVC,
+ OTHER
+ };
+
+ MatroskaExtractor *mExtractor;
+ size_t mTrackIndex;
+ Type mType;
+ bool mIsAudio;
+ BlockIterator mBlockIter;
+ ssize_t mNALSizeLen; // for type AVC or HEVC
+
+ List<MediaBufferBase *> mPendingFrames;
+
+ status_t advance();
+
+ status_t setWebmBlockCryptoInfo(MediaBufferBase *mbuf);
+ status_t readBlock();
+ void clearPendingFrames();
+
+ MatroskaSource(const MatroskaSource &);
+ MatroskaSource &operator=(const MatroskaSource &);
+};
+
+const mkvparser::Track* MatroskaExtractor::TrackInfo::getTrack() const {
+ return mExtractor->mSegment->GetTracks()->GetTrackByNumber(mTrackNum);
+}
+
+// This function does exactly the same as mkvparser::Cues::Find, except that it
+// searches in our own track based vectors. We should not need this once mkvparser
+// adds the same functionality.
+const mkvparser::CuePoint::TrackPosition *MatroskaExtractor::TrackInfo::find(
+ long long timeNs) const {
+ ALOGV("mCuePoints.size %zu", mCuePoints.size());
+ if (mCuePoints.empty()) {
+ return NULL;
+ }
+
+ const mkvparser::CuePoint* cp = mCuePoints.itemAt(0);
+ const mkvparser::Track* track = getTrack();
+ if (timeNs <= cp->GetTime(mExtractor->mSegment)) {
+ return cp->Find(track);
+ }
+
+ // Binary searches through relevant cues; assumes cues are ordered by timecode.
+ // If we do detect out-of-order cues, return NULL.
+ size_t lo = 0;
+ size_t hi = mCuePoints.size();
+ while (lo < hi) {
+ const size_t mid = lo + (hi - lo) / 2;
+ const mkvparser::CuePoint* const midCp = mCuePoints.itemAt(mid);
+ const long long cueTimeNs = midCp->GetTime(mExtractor->mSegment);
+ if (cueTimeNs <= timeNs) {
+ lo = mid + 1;
+ } else {
+ hi = mid;
+ }
+ }
+
+ if (lo == 0) {
+ return NULL;
+ }
+
+ cp = mCuePoints.itemAt(lo - 1);
+ if (cp->GetTime(mExtractor->mSegment) > timeNs) {
+ return NULL;
+ }
+
+ return cp->Find(track);
+}
+
+MatroskaSource::MatroskaSource(
+ MatroskaExtractor *extractor, size_t index)
+ : mExtractor(extractor),
+ mTrackIndex(index),
+ mType(OTHER),
+ mIsAudio(false),
+ mBlockIter(mExtractor,
+ mExtractor->mTracks.itemAt(index).mTrackNum,
+ index),
+ mNALSizeLen(-1) {
+ MetaDataBase &meta = mExtractor->mTracks.editItemAt(index).mMeta;
+
+ const char *mime;
+ CHECK(meta.findCString(kKeyMIMEType, &mime));
+
+ mIsAudio = !strncasecmp("audio/", mime, 6);
+
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
+ mType = AVC;
+
+ uint32_t dummy;
+ const uint8_t *avcc;
+ size_t avccSize;
+ int32_t nalSizeLen = 0;
+ if (meta.findInt32(kKeyNalLengthSize, &nalSizeLen)) {
+ if (nalSizeLen >= 0 && nalSizeLen <= 4) {
+ mNALSizeLen = nalSizeLen;
+ }
+ } else if (meta.findData(kKeyAVCC, &dummy, (const void **)&avcc, &avccSize)
+ && avccSize >= 5u) {
+ mNALSizeLen = 1 + (avcc[4] & 3);
+ ALOGV("mNALSizeLen = %zd", mNALSizeLen);
+ } else {
+ ALOGE("No mNALSizeLen");
+ }
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
+ mType = HEVC;
+
+ uint32_t dummy;
+ const uint8_t *hvcc;
+ size_t hvccSize;
+ if (meta.findData(kKeyHVCC, &dummy, (const void **)&hvcc, &hvccSize)
+ && hvccSize >= 22u) {
+ mNALSizeLen = 1 + (hvcc[14+7] & 3);
+ ALOGV("mNALSizeLen = %zu", mNALSizeLen);
+ } else {
+ ALOGE("No mNALSizeLen");
+ }
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
+ mType = AAC;
+ }
+}
+
+MatroskaSource::~MatroskaSource() {
+ clearPendingFrames();
+}
+
+status_t MatroskaSource::start(MetaDataBase * /* params */) {
+ if (mType == AVC && mNALSizeLen < 0) {
+ return ERROR_MALFORMED;
+ }
+
+ mBlockIter.reset();
+
+ return OK;
+}
+
+status_t MatroskaSource::stop() {
+ clearPendingFrames();
+
+ return OK;
+}
+
+status_t MatroskaSource::getFormat(MetaDataBase &meta) {
+ meta = mExtractor->mTracks.itemAt(mTrackIndex).mMeta;
+ return OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+BlockIterator::BlockIterator(
+ MatroskaExtractor *extractor, unsigned long trackNum, unsigned long index)
+ : mExtractor(extractor),
+ mTrackNum(trackNum),
+ mIndex(index),
+ mCluster(NULL),
+ mBlockEntry(NULL),
+ mBlockEntryIndex(0) {
+ reset();
+}
+
+bool BlockIterator::eos() const {
+ return mCluster == NULL || mCluster->EOS();
+}
+
+void BlockIterator::advance() {
+ Mutex::Autolock autoLock(mExtractor->mLock);
+ advance_l();
+}
+
+void BlockIterator::advance_l() {
+ for (;;) {
+ long res = mCluster->GetEntry(mBlockEntryIndex, mBlockEntry);
+ ALOGV("GetEntry returned %ld", res);
+
+ long long pos;
+ long len;
+ if (res < 0) {
+ // Need to parse this cluster some more
+
+ CHECK_EQ(res, mkvparser::E_BUFFER_NOT_FULL);
+
+ res = mCluster->Parse(pos, len);
+ ALOGV("Parse returned %ld", res);
+
+ if (res < 0) {
+ // I/O error
+
+ ALOGE("Cluster::Parse returned result %ld", res);
+
+ mCluster = NULL;
+ break;
+ }
+
+ continue;
+ } else if (res == 0) {
+ // We're done with this cluster
+
+ const mkvparser::Cluster *nextCluster;
+ res = mExtractor->mSegment->ParseNext(
+ mCluster, nextCluster, pos, len);
+ ALOGV("ParseNext returned %ld", res);
+
+ if (res != 0) {
+ // EOF or error
+
+ mCluster = NULL;
+ break;
+ }
+
+ CHECK_EQ(res, 0);
+ CHECK(nextCluster != NULL);
+ CHECK(!nextCluster->EOS());
+
+ mCluster = nextCluster;
+
+ res = mCluster->Parse(pos, len);
+ ALOGV("Parse (2) returned %ld", res);
+
+ if (res < 0) {
+ // I/O error
+
+ ALOGE("Cluster::Parse returned result %ld", res);
+
+ mCluster = NULL;
+ break;
+ }
+
+ mBlockEntryIndex = 0;
+ continue;
+ }
+
+ CHECK(mBlockEntry != NULL);
+ CHECK(mBlockEntry->GetBlock() != NULL);
+ ++mBlockEntryIndex;
+
+ if (mBlockEntry->GetBlock()->GetTrackNumber() == mTrackNum) {
+ break;
+ }
+ }
+}
+
+void BlockIterator::reset() {
+ Mutex::Autolock autoLock(mExtractor->mLock);
+
+ mCluster = mExtractor->mSegment->GetFirst();
+ mBlockEntry = NULL;
+ mBlockEntryIndex = 0;
+
+ do {
+ advance_l();
+ } while (!eos() && block()->GetTrackNumber() != mTrackNum);
+}
+
+void BlockIterator::seek(
+ int64_t seekTimeUs, bool isAudio,
+ int64_t *actualFrameTimeUs) {
+ Mutex::Autolock autoLock(mExtractor->mLock);
+
+ *actualFrameTimeUs = -1ll;
+
+ if (seekTimeUs > INT64_MAX / 1000ll ||
+ seekTimeUs < INT64_MIN / 1000ll ||
+ (mExtractor->mSeekPreRollNs > 0 &&
+ (seekTimeUs * 1000ll) < INT64_MIN + mExtractor->mSeekPreRollNs) ||
+ (mExtractor->mSeekPreRollNs < 0 &&
+ (seekTimeUs * 1000ll) > INT64_MAX + mExtractor->mSeekPreRollNs)) {
+ ALOGE("cannot seek to %lld", (long long) seekTimeUs);
+ return;
+ }
+
+ const int64_t seekTimeNs = seekTimeUs * 1000ll - mExtractor->mSeekPreRollNs;
+
+ mkvparser::Segment* const pSegment = mExtractor->mSegment;
+
+ // Special case the 0 seek to avoid loading Cues when the application
+ // extraneously seeks to 0 before playing.
+ if (seekTimeNs <= 0) {
+ ALOGV("Seek to beginning: %" PRId64, seekTimeUs);
+ mCluster = pSegment->GetFirst();
+ mBlockEntryIndex = 0;
+ do {
+ advance_l();
+ } while (!eos() && block()->GetTrackNumber() != mTrackNum);
+ return;
+ }
+
+ ALOGV("Seeking to: %" PRId64, seekTimeUs);
+
+ // If the Cues have not been located then find them.
+ const mkvparser::Cues* pCues = pSegment->GetCues();
+ const mkvparser::SeekHead* pSH = pSegment->GetSeekHead();
+ if (!pCues && pSH) {
+ const size_t count = pSH->GetCount();
+ const mkvparser::SeekHead::Entry* pEntry;
+ ALOGV("No Cues yet");
+
+ for (size_t index = 0; index < count; index++) {
+ pEntry = pSH->GetEntry(index);
+
+ if (pEntry->id == 0x0C53BB6B) { // Cues ID
+ long len; long long pos;
+ pSegment->ParseCues(pEntry->pos, pos, len);
+ pCues = pSegment->GetCues();
+ ALOGV("Cues found");
+ break;
+ }
+ }
+
+ if (!pCues) {
+ ALOGE("No Cues in file");
+ return;
+ }
+ }
+ else if (!pSH) {
+ ALOGE("No SeekHead");
+ return;
+ }
+
+ const mkvparser::CuePoint* pCP;
+ mkvparser::Tracks const *pTracks = pSegment->GetTracks();
+ while (!pCues->DoneParsing()) {
+ pCues->LoadCuePoint();
+ pCP = pCues->GetLast();
+ CHECK(pCP);
+
+ size_t trackCount = mExtractor->mTracks.size();
+ for (size_t index = 0; index < trackCount; ++index) {
+ MatroskaExtractor::TrackInfo& track = mExtractor->mTracks.editItemAt(index);
+ const mkvparser::Track *pTrack = pTracks->GetTrackByNumber(track.mTrackNum);
+ if (pTrack && pTrack->GetType() == 1 && pCP->Find(pTrack)) { // VIDEO_TRACK
+ track.mCuePoints.push_back(pCP);
+ }
+ }
+
+ if (pCP->GetTime(pSegment) >= seekTimeNs) {
+ ALOGV("Parsed past relevant Cue");
+ break;
+ }
+ }
+
+ const mkvparser::CuePoint::TrackPosition *pTP = NULL;
+ const mkvparser::Track *thisTrack = pTracks->GetTrackByNumber(mTrackNum);
+ if (thisTrack->GetType() == 1) { // video
+ MatroskaExtractor::TrackInfo& track = mExtractor->mTracks.editItemAt(mIndex);
+ pTP = track.find(seekTimeNs);
+ } else {
+ // The Cue index is built around video keyframes
+ unsigned long int trackCount = pTracks->GetTracksCount();
+ for (size_t index = 0; index < trackCount; ++index) {
+ const mkvparser::Track *pTrack = pTracks->GetTrackByIndex(index);
+ if (pTrack && pTrack->GetType() == 1 && pCues->Find(seekTimeNs, pTrack, pCP, pTP)) {
+ ALOGV("Video track located at %zu", index);
+ break;
+ }
+ }
+ }
+
+
+ // Always *search* based on the video track, but finalize based on mTrackNum
+ if (!pTP) {
+ ALOGE("Did not locate the video track for seeking");
+ return;
+ }
+
+ mCluster = pSegment->FindOrPreloadCluster(pTP->m_pos);
+
+ CHECK(mCluster);
+ CHECK(!mCluster->EOS());
+
+ // mBlockEntryIndex starts at 0 but m_block starts at 1
+ CHECK_GT(pTP->m_block, 0);
+ mBlockEntryIndex = pTP->m_block - 1;
+
+ for (;;) {
+ advance_l();
+
+ if (eos()) break;
+
+ if (isAudio || block()->IsKey()) {
+ // Accept the first key frame
+ int64_t frameTimeUs = (block()->GetTime(mCluster) + 500LL) / 1000LL;
+ if (thisTrack->GetType() == 1 || frameTimeUs >= seekTimeUs) {
+ *actualFrameTimeUs = frameTimeUs;
+ ALOGV("Requested seek point: %" PRId64 " actual: %" PRId64,
+ seekTimeUs, *actualFrameTimeUs);
+ break;
+ }
+ }
+ }
+}
+
+const mkvparser::Block *BlockIterator::block() const {
+ CHECK(!eos());
+
+ return mBlockEntry->GetBlock();
+}
+
+int64_t BlockIterator::blockTimeUs() const {
+ if (mCluster == NULL || mBlockEntry == NULL) {
+ return -1;
+ }
+ return (mBlockEntry->GetBlock()->GetTime(mCluster) + 500ll) / 1000ll;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static unsigned U24_AT(const uint8_t *ptr) {
+ return ptr[0] << 16 | ptr[1] << 8 | ptr[2];
+}
+
+static AString uriDebugString(const char *uri) {
+ // find scheme
+ AString scheme;
+ for (size_t i = 0; i < strlen(uri); i++) {
+ const char c = uri[i];
+ if (!isascii(c)) {
+ break;
+ } else if (isalpha(c)) {
+ continue;
+ } else if (i == 0) {
+ // first character must be a letter
+ break;
+ } else if (isdigit(c) || c == '+' || c == '.' || c =='-') {
+ continue;
+ } else if (c != ':') {
+ break;
+ }
+ scheme = AString(uri, 0, i);
+ scheme.append("://<suppressed>");
+ return scheme;
+ }
+ return AString("<no-scheme URI suppressed>");
+}
+
+void MatroskaSource::clearPendingFrames() {
+ while (!mPendingFrames.empty()) {
+ MediaBufferBase *frame = *mPendingFrames.begin();
+ mPendingFrames.erase(mPendingFrames.begin());
+
+ frame->release();
+ frame = NULL;
+ }
+}
+
+status_t MatroskaSource::setWebmBlockCryptoInfo(MediaBufferBase *mbuf) {
+ if (mbuf->range_length() < 1 || mbuf->range_length() - 1 > INT32_MAX) {
+ // 1-byte signal
+ return ERROR_MALFORMED;
+ }
+
+ const uint8_t *data = (const uint8_t *)mbuf->data() + mbuf->range_offset();
+ bool encrypted = data[0] & 0x1;
+ bool partitioned = data[0] & 0x2;
+ if (encrypted && mbuf->range_length() < 9) {
+ // 1-byte signal + 8-byte IV
+ return ERROR_MALFORMED;
+ }
+
+ MetaDataBase &meta = mbuf->meta_data();
+ if (encrypted) {
+ uint8_t ctrCounter[16] = { 0 };
+ uint32_t type;
+ const uint8_t *keyId;
+ size_t keyIdSize;
+ const MetaDataBase &trackMeta = mExtractor->mTracks.itemAt(mTrackIndex).mMeta;
+ CHECK(trackMeta.findData(kKeyCryptoKey, &type, (const void **)&keyId, &keyIdSize));
+ meta.setData(kKeyCryptoKey, 0, keyId, keyIdSize);
+ memcpy(ctrCounter, data + 1, 8);
+ meta.setData(kKeyCryptoIV, 0, ctrCounter, 16);
+ if (partitioned) {
+ /* 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Signal Byte | |
+ * +-+-+-+-+-+-+-+-+ IV |
+ * | |
+ * | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | num_partition | Partition 0 offset -> |
+ * |-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-|
+ * | -> Partition 0 offset | ... |
+ * |-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-|
+ * | ... | Partition n-1 offset -> |
+ * |-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-|
+ * | -> Partition n-1 offset | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
+ * | Clear/encrypted sample data |
+ * | |
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ if (mbuf->range_length() < 10) {
+ return ERROR_MALFORMED;
+ }
+ uint8_t numPartitions = data[9];
+ if (mbuf->range_length() - 10 < numPartitions * sizeof(uint32_t)) {
+ return ERROR_MALFORMED;
+ }
+ std::vector<uint32_t> plainSizes, encryptedSizes;
+ uint32_t prev = 0;
+ uint32_t frameOffset = 10 + numPartitions * sizeof(uint32_t);
+ const uint32_t *partitions = reinterpret_cast<const uint32_t*>(data + 10);
+ for (uint32_t i = 0; i <= numPartitions; ++i) {
+ uint32_t p_i = i < numPartitions
+ ? ntohl(partitions[i])
+ : (mbuf->range_length() - frameOffset);
+ if (p_i < prev) {
+ return ERROR_MALFORMED;
+ }
+ uint32_t size = p_i - prev;
+ prev = p_i;
+ if (i % 2) {
+ encryptedSizes.push_back(size);
+ } else {
+ plainSizes.push_back(size);
+ }
+ }
+ if (plainSizes.size() > encryptedSizes.size()) {
+ encryptedSizes.push_back(0);
+ }
+ uint32_t sizeofPlainSizes = sizeof(uint32_t) * plainSizes.size();
+ uint32_t sizeofEncryptedSizes = sizeof(uint32_t) * encryptedSizes.size();
+ meta.setData(kKeyPlainSizes, 0, plainSizes.data(), sizeofPlainSizes);
+ meta.setData(kKeyEncryptedSizes, 0, encryptedSizes.data(), sizeofEncryptedSizes);
+ mbuf->set_range(frameOffset, mbuf->range_length() - frameOffset);
+ } else {
+ /*
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Signal Byte | |
+ * +-+-+-+-+-+-+-+-+ IV |
+ * | |
+ * | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | |
+ * |-+-+-+-+-+-+-+-+ |
+ * : Bytes 1..N of encrypted frame :
+ * | |
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ int32_t plainSizes[] = { 0 };
+ int32_t encryptedSizes[] = { static_cast<int32_t>(mbuf->range_length() - 9) };
+ meta.setData(kKeyPlainSizes, 0, plainSizes, sizeof(plainSizes));
+ meta.setData(kKeyEncryptedSizes, 0, encryptedSizes, sizeof(encryptedSizes));
+ mbuf->set_range(9, mbuf->range_length() - 9);
+ }
+ } else {
+ /*
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Signal Byte | |
+ * +-+-+-+-+-+-+-+-+ |
+ * : Bytes 1..N of unencrypted frame :
+ * | |
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ int32_t plainSizes[] = { static_cast<int32_t>(mbuf->range_length() - 1) };
+ int32_t encryptedSizes[] = { 0 };
+ meta.setData(kKeyPlainSizes, 0, plainSizes, sizeof(plainSizes));
+ meta.setData(kKeyEncryptedSizes, 0, encryptedSizes, sizeof(encryptedSizes));
+ mbuf->set_range(1, mbuf->range_length() - 1);
+ }
+
+ return OK;
+}
+
+status_t MatroskaSource::readBlock() {
+ CHECK(mPendingFrames.empty());
+
+ if (mBlockIter.eos()) {
+ return ERROR_END_OF_STREAM;
+ }
+
+ const mkvparser::Block *block = mBlockIter.block();
+
+ int64_t timeUs = mBlockIter.blockTimeUs();
+
+ for (int i = 0; i < block->GetFrameCount(); ++i) {
+ MatroskaExtractor::TrackInfo *trackInfo = &mExtractor->mTracks.editItemAt(mTrackIndex);
+ const mkvparser::Block::Frame &frame = block->GetFrame(i);
+ size_t len = frame.len;
+ if (SIZE_MAX - len < trackInfo->mHeaderLen) {
+ return ERROR_MALFORMED;
+ }
+
+ len += trackInfo->mHeaderLen;
+ MediaBufferBase *mbuf = MediaBufferBase::Create(len);
+ uint8_t *data = static_cast<uint8_t *>(mbuf->data());
+ if (trackInfo->mHeader) {
+ memcpy(data, trackInfo->mHeader, trackInfo->mHeaderLen);
+ }
+
+ mbuf->meta_data().setInt64(kKeyTime, timeUs);
+ mbuf->meta_data().setInt32(kKeyIsSyncFrame, block->IsKey());
+
+ status_t err = frame.Read(mExtractor->mReader, data + trackInfo->mHeaderLen);
+ if (err == OK
+ && mExtractor->mIsWebm
+ && trackInfo->mEncrypted) {
+ err = setWebmBlockCryptoInfo(mbuf);
+ }
+
+ if (err != OK) {
+ mPendingFrames.clear();
+
+ mBlockIter.advance();
+ mbuf->release();
+ return err;
+ }
+
+ mPendingFrames.push_back(mbuf);
+ }
+
+ mBlockIter.advance();
+
+ return OK;
+}
+
+status_t MatroskaSource::read(
+ MediaBufferBase **out, const ReadOptions *options) {
+ *out = NULL;
+
+ int64_t targetSampleTimeUs = -1ll;
+
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+ if (mode == ReadOptions::SEEK_FRAME_INDEX) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ if (!mExtractor->isLiveStreaming()) {
+ clearPendingFrames();
+
+ // The audio we want is located by using the Cues to seek the video
+ // stream to find the target Cluster then iterating to finalize for
+ // audio.
+ int64_t actualFrameTimeUs;
+ mBlockIter.seek(seekTimeUs, mIsAudio, &actualFrameTimeUs);
+ if (mode == ReadOptions::SEEK_CLOSEST) {
+ targetSampleTimeUs = actualFrameTimeUs;
+ }
+ }
+ }
+
+ while (mPendingFrames.empty()) {
+ status_t err = readBlock();
+
+ if (err != OK) {
+ clearPendingFrames();
+
+ return err;
+ }
+ }
+
+ MediaBufferBase *frame = *mPendingFrames.begin();
+ mPendingFrames.erase(mPendingFrames.begin());
+
+ if ((mType != AVC && mType != HEVC) || mNALSizeLen == 0) {
+ if (targetSampleTimeUs >= 0ll) {
+ frame->meta_data().setInt64(
+ kKeyTargetTime, targetSampleTimeUs);
+ }
+
+ *out = frame;
+
+ return OK;
+ }
+
+ // Each input frame contains one or more NAL fragments, each fragment
+ // is prefixed by mNALSizeLen bytes giving the fragment length,
+ // followed by a corresponding number of bytes containing the fragment.
+ // We output all these fragments into a single large buffer separated
+ // by startcodes (0x00 0x00 0x00 0x01).
+ //
+ // When mNALSizeLen is 0, we assume the data is already in the format
+ // desired.
+
+ const uint8_t *srcPtr =
+ (const uint8_t *)frame->data() + frame->range_offset();
+
+ size_t srcSize = frame->range_length();
+
+ size_t dstSize = 0;
+ MediaBufferBase *buffer = NULL;
+ uint8_t *dstPtr = NULL;
+
+ for (int32_t pass = 0; pass < 2; ++pass) {
+ size_t srcOffset = 0;
+ size_t dstOffset = 0;
+ while (srcOffset + mNALSizeLen <= srcSize) {
+ size_t NALsize;
+ switch (mNALSizeLen) {
+ case 1: NALsize = srcPtr[srcOffset]; break;
+ case 2: NALsize = U16_AT(srcPtr + srcOffset); break;
+ case 3: NALsize = U24_AT(srcPtr + srcOffset); break;
+ case 4: NALsize = U32_AT(srcPtr + srcOffset); break;
+ default:
+ TRESPASS();
+ }
+
+ if (srcOffset + mNALSizeLen + NALsize <= srcOffset + mNALSizeLen) {
+ frame->release();
+ frame = NULL;
+
+ return ERROR_MALFORMED;
+ } else if (srcOffset + mNALSizeLen + NALsize > srcSize) {
+ break;
+ }
+
+ if (pass == 1) {
+ memcpy(&dstPtr[dstOffset], "\x00\x00\x00\x01", 4);
+
+ if (frame != buffer) {
+ memcpy(&dstPtr[dstOffset + 4],
+ &srcPtr[srcOffset + mNALSizeLen],
+ NALsize);
+ }
+ }
+
+ dstOffset += 4; // 0x00 00 00 01
+ dstOffset += NALsize;
+
+ srcOffset += mNALSizeLen + NALsize;
+ }
+
+ if (srcOffset < srcSize) {
+ // There were trailing bytes or not enough data to complete
+ // a fragment.
+
+ frame->release();
+ frame = NULL;
+
+ return ERROR_MALFORMED;
+ }
+
+ if (pass == 0) {
+ dstSize = dstOffset;
+
+ if (dstSize == srcSize && mNALSizeLen == 4) {
+ // In this special case we can re-use the input buffer by substituting
+ // each 4-byte nal size with a 4-byte start code
+ buffer = frame;
+ } else {
+ buffer = MediaBufferBase::Create(dstSize);
+ }
+
+ int64_t timeUs;
+ CHECK(frame->meta_data().findInt64(kKeyTime, &timeUs));
+ int32_t isSync;
+ CHECK(frame->meta_data().findInt32(kKeyIsSyncFrame, &isSync));
+
+ buffer->meta_data().setInt64(kKeyTime, timeUs);
+ buffer->meta_data().setInt32(kKeyIsSyncFrame, isSync);
+
+ dstPtr = (uint8_t *)buffer->data();
+ }
+ }
+
+ if (frame != buffer) {
+ frame->release();
+ frame = NULL;
+ }
+
+ if (targetSampleTimeUs >= 0ll) {
+ buffer->meta_data().setInt64(
+ kKeyTargetTime, targetSampleTimeUs);
+ }
+
+ *out = buffer;
+
+ return OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+MatroskaExtractor::MatroskaExtractor(DataSourceBase *source)
+ : mDataSource(source),
+ mReader(new DataSourceBaseReader(mDataSource)),
+ mSegment(NULL),
+ mExtractedThumbnails(false),
+ mIsWebm(false),
+ mSeekPreRollNs(0) {
+ off64_t size;
+ mIsLiveStreaming =
+ (mDataSource->flags()
+ & (DataSourceBase::kWantsPrefetching
+ | DataSourceBase::kIsCachingDataSource))
+ && mDataSource->getSize(&size) != OK;
+
+ mkvparser::EBMLHeader ebmlHeader;
+ long long pos;
+ if (ebmlHeader.Parse(mReader, pos) < 0) {
+ return;
+ }
+
+ if (ebmlHeader.m_docType && !strcmp("webm", ebmlHeader.m_docType)) {
+ mIsWebm = true;
+ }
+
+ long long ret =
+ mkvparser::Segment::CreateInstance(mReader, pos, mSegment);
+
+ if (ret) {
+ CHECK(mSegment == NULL);
+ return;
+ }
+
+ // from mkvparser::Segment::Load(), but stop at first cluster
+ ret = mSegment->ParseHeaders();
+ if (ret == 0) {
+ long len;
+ ret = mSegment->LoadCluster(pos, len);
+ if (ret >= 1) {
+ // no more clusters
+ ret = 0;
+ }
+ } else if (ret > 0) {
+ ret = mkvparser::E_BUFFER_NOT_FULL;
+ }
+
+ if (ret < 0) {
+ char uri[1024];
+ if(!mDataSource->getUri(uri, sizeof(uri))) {
+ uri[0] = '\0';
+ }
+ ALOGW("Corrupt %s source: %s", mIsWebm ? "webm" : "matroska",
+ uriDebugString(uri).c_str());
+ delete mSegment;
+ mSegment = NULL;
+ return;
+ }
+
+#if 0
+ const mkvparser::SegmentInfo *info = mSegment->GetInfo();
+ ALOGI("muxing app: %s, writing app: %s",
+ info->GetMuxingAppAsUTF8(),
+ info->GetWritingAppAsUTF8());
+#endif
+
+ addTracks();
+}
+
+MatroskaExtractor::~MatroskaExtractor() {
+ delete mSegment;
+ mSegment = NULL;
+
+ delete mReader;
+ mReader = NULL;
+}
+
+size_t MatroskaExtractor::countTracks() {
+ return mTracks.size();
+}
+
+MediaTrack *MatroskaExtractor::getTrack(size_t index) {
+ if (index >= mTracks.size()) {
+ return NULL;
+ }
+
+ return new MatroskaSource(this, index);
+}
+
+status_t MatroskaExtractor::getTrackMetaData(
+ MetaDataBase &meta,
+ size_t index, uint32_t flags) {
+ if (index >= mTracks.size()) {
+ return UNKNOWN_ERROR;
+ }
+
+ if ((flags & kIncludeExtensiveMetaData) && !mExtractedThumbnails
+ && !isLiveStreaming()) {
+ findThumbnails();
+ mExtractedThumbnails = true;
+ }
+
+ meta = mTracks.itemAt(index).mMeta;
+ return OK;
+}
+
+bool MatroskaExtractor::isLiveStreaming() const {
+ return mIsLiveStreaming;
+}
+
+static int bytesForSize(size_t size) {
+ // use at most 28 bits (4 times 7)
+ CHECK(size <= 0xfffffff);
+
+ if (size > 0x1fffff) {
+ return 4;
+ } else if (size > 0x3fff) {
+ return 3;
+ } else if (size > 0x7f) {
+ return 2;
+ }
+ return 1;
+}
+
+static void storeSize(uint8_t *data, size_t &idx, size_t size) {
+ int numBytes = bytesForSize(size);
+ idx += numBytes;
+
+ data += idx;
+ size_t next = 0;
+ while (numBytes--) {
+ *--data = (size & 0x7f) | next;
+ size >>= 7;
+ next = 0x80;
+ }
+}
+
+static void addESDSFromCodecPrivate(
+ MetaDataBase &meta,
+ bool isAudio, const void *priv, size_t privSize) {
+
+ int privSizeBytesRequired = bytesForSize(privSize);
+ int esdsSize2 = 14 + privSizeBytesRequired + privSize;
+ int esdsSize2BytesRequired = bytesForSize(esdsSize2);
+ int esdsSize1 = 4 + esdsSize2BytesRequired + esdsSize2;
+ int esdsSize1BytesRequired = bytesForSize(esdsSize1);
+ size_t esdsSize = 1 + esdsSize1BytesRequired + esdsSize1;
+ uint8_t *esds = new uint8_t[esdsSize];
+
+ size_t idx = 0;
+ esds[idx++] = 0x03;
+ storeSize(esds, idx, esdsSize1);
+ esds[idx++] = 0x00; // ES_ID
+ esds[idx++] = 0x00; // ES_ID
+ esds[idx++] = 0x00; // streamDependenceFlag, URL_Flag, OCRstreamFlag
+ esds[idx++] = 0x04;
+ storeSize(esds, idx, esdsSize2);
+ esds[idx++] = isAudio ? 0x40 // Audio ISO/IEC 14496-3
+ : 0x20; // Visual ISO/IEC 14496-2
+ for (int i = 0; i < 12; i++) {
+ esds[idx++] = 0x00;
+ }
+ esds[idx++] = 0x05;
+ storeSize(esds, idx, privSize);
+ memcpy(esds + idx, priv, privSize);
+
+ meta.setData(kKeyESDS, 0, esds, esdsSize);
+
+ delete[] esds;
+ esds = NULL;
+}
+
+status_t addVorbisCodecInfo(
+ MetaDataBase &meta,
+ const void *_codecPrivate, size_t codecPrivateSize) {
+ // hexdump(_codecPrivate, codecPrivateSize);
+
+ if (codecPrivateSize < 1) {
+ return ERROR_MALFORMED;
+ }
+
+ const uint8_t *codecPrivate = (const uint8_t *)_codecPrivate;
+
+ if (codecPrivate[0] != 0x02) {
+ return ERROR_MALFORMED;
+ }
+
+ // codecInfo starts with two lengths, len1 and len2, that are
+ // "Xiph-style-lacing encoded"...
+
+ size_t offset = 1;
+ size_t len1 = 0;
+ while (offset < codecPrivateSize && codecPrivate[offset] == 0xff) {
+ if (len1 > (SIZE_MAX - 0xff)) {
+ return ERROR_MALFORMED; // would overflow
+ }
+ len1 += 0xff;
+ ++offset;
+ }
+ if (offset >= codecPrivateSize) {
+ return ERROR_MALFORMED;
+ }
+ if (len1 > (SIZE_MAX - codecPrivate[offset])) {
+ return ERROR_MALFORMED; // would overflow
+ }
+ len1 += codecPrivate[offset++];
+
+ size_t len2 = 0;
+ while (offset < codecPrivateSize && codecPrivate[offset] == 0xff) {
+ if (len2 > (SIZE_MAX - 0xff)) {
+ return ERROR_MALFORMED; // would overflow
+ }
+ len2 += 0xff;
+ ++offset;
+ }
+ if (offset >= codecPrivateSize) {
+ return ERROR_MALFORMED;
+ }
+ if (len2 > (SIZE_MAX - codecPrivate[offset])) {
+ return ERROR_MALFORMED; // would overflow
+ }
+ len2 += codecPrivate[offset++];
+
+ if (len1 > SIZE_MAX - len2 || offset > SIZE_MAX - (len1 + len2) ||
+ codecPrivateSize < offset + len1 + len2) {
+ return ERROR_MALFORMED;
+ }
+
+ if (codecPrivate[offset] != 0x01) {
+ return ERROR_MALFORMED;
+ }
+ meta.setData(kKeyVorbisInfo, 0, &codecPrivate[offset], len1);
+
+ offset += len1;
+ if (codecPrivate[offset] != 0x03) {
+ return ERROR_MALFORMED;
+ }
+
+ offset += len2;
+ if (codecPrivate[offset] != 0x05) {
+ return ERROR_MALFORMED;
+ }
+
+ meta.setData(
+ kKeyVorbisBooks, 0, &codecPrivate[offset],
+ codecPrivateSize - offset);
+
+ return OK;
+}
+
+static status_t addFlacMetadata(
+ MetaDataBase &meta,
+ const void *codecPrivate, size_t codecPrivateSize) {
+ // hexdump(codecPrivate, codecPrivateSize);
+
+ meta.setData(kKeyFlacMetadata, 0, codecPrivate, codecPrivateSize);
+
+ int32_t maxInputSize = 64 << 10;
+ FLACDecoder *flacDecoder = FLACDecoder::Create();
+ if (flacDecoder != NULL
+ && flacDecoder->parseMetadata((const uint8_t*)codecPrivate, codecPrivateSize) == OK) {
+ FLAC__StreamMetadata_StreamInfo streamInfo = flacDecoder->getStreamInfo();
+ maxInputSize = streamInfo.max_framesize;
+ if (maxInputSize == 0) {
+ // In case max framesize is not available, use raw data size as max framesize,
+ // assuming there is no expansion.
+ if (streamInfo.max_blocksize != 0
+ && streamInfo.channels != 0
+ && ((streamInfo.bits_per_sample + 7) / 8) >
+ INT32_MAX / streamInfo.max_blocksize / streamInfo.channels) {
+ delete flacDecoder;
+ return ERROR_MALFORMED;
+ }
+ maxInputSize = ((streamInfo.bits_per_sample + 7) / 8)
+ * streamInfo.max_blocksize * streamInfo.channels;
+ }
+ }
+ meta.setInt32(kKeyMaxInputSize, maxInputSize);
+
+ delete flacDecoder;
+ return OK;
+}
+
+status_t MatroskaExtractor::synthesizeAVCC(TrackInfo *trackInfo, size_t index) {
+ BlockIterator iter(this, trackInfo->mTrackNum, index);
+ if (iter.eos()) {
+ return ERROR_MALFORMED;
+ }
+
+ const mkvparser::Block *block = iter.block();
+ if (block->GetFrameCount() <= 0) {
+ return ERROR_MALFORMED;
+ }
+
+ const mkvparser::Block::Frame &frame = block->GetFrame(0);
+ auto tmpData = heapbuffer<unsigned char>(frame.len);
+ long n = frame.Read(mReader, tmpData.get());
+ if (n != 0) {
+ return ERROR_MALFORMED;
+ }
+
+ if (!MakeAVCCodecSpecificData(trackInfo->mMeta, tmpData.get(), frame.len)) {
+ return ERROR_MALFORMED;
+ }
+
+ // Override the synthesized nal length size, which is arbitrary
+ trackInfo->mMeta.setInt32(kKeyNalLengthSize, 0);
+ return OK;
+}
+
+static inline bool isValidInt32ColourValue(long long value) {
+ return value != mkvparser::Colour::kValueNotPresent
+ && value >= INT32_MIN
+ && value <= INT32_MAX;
+}
+
+static inline bool isValidUint16ColourValue(long long value) {
+ return value != mkvparser::Colour::kValueNotPresent
+ && value >= 0
+ && value <= UINT16_MAX;
+}
+
+static inline bool isValidPrimary(const mkvparser::PrimaryChromaticity *primary) {
+ return primary != NULL && primary->x >= 0 && primary->x <= 1
+ && primary->y >= 0 && primary->y <= 1;
+}
+
+void MatroskaExtractor::getColorInformation(
+ const mkvparser::VideoTrack *vtrack, MetaDataBase &meta) {
+ const mkvparser::Colour *color = vtrack->GetColour();
+ if (color == NULL) {
+ return;
+ }
+
+ // Color Aspects
+ {
+ int32_t primaries = 2; // ISO unspecified
+ int32_t transfer = 2; // ISO unspecified
+ int32_t coeffs = 2; // ISO unspecified
+ bool fullRange = false; // default
+ bool rangeSpecified = false;
+
+ if (isValidInt32ColourValue(color->primaries)) {
+ primaries = color->primaries;
+ }
+ if (isValidInt32ColourValue(color->transfer_characteristics)) {
+ transfer = color->transfer_characteristics;
+ }
+ if (isValidInt32ColourValue(color->matrix_coefficients)) {
+ coeffs = color->matrix_coefficients;
+ }
+ if (color->range != mkvparser::Colour::kValueNotPresent
+ && color->range != 0 /* MKV unspecified */) {
+ // We only support MKV broadcast range (== limited) and full range.
+ // We treat all other value as the default limited range.
+ fullRange = color->range == 2 /* MKV fullRange */;
+ rangeSpecified = true;
+ }
+
+ ColorAspects aspects;
+ ColorUtils::convertIsoColorAspectsToCodecAspects(
+ primaries, transfer, coeffs, fullRange, aspects);
+ meta.setInt32(kKeyColorPrimaries, aspects.mPrimaries);
+ meta.setInt32(kKeyTransferFunction, aspects.mTransfer);
+ meta.setInt32(kKeyColorMatrix, aspects.mMatrixCoeffs);
+ meta.setInt32(
+ kKeyColorRange, rangeSpecified ? aspects.mRange : ColorAspects::RangeUnspecified);
+ }
+
+ // HDR Static Info
+ {
+ HDRStaticInfo info, nullInfo; // nullInfo is a fully unspecified static info
+ memset(&info, 0, sizeof(info));
+ memset(&nullInfo, 0, sizeof(nullInfo));
+ if (isValidUint16ColourValue(color->max_cll)) {
+ info.sType1.mMaxContentLightLevel = color->max_cll;
+ }
+ if (isValidUint16ColourValue(color->max_fall)) {
+ info.sType1.mMaxFrameAverageLightLevel = color->max_fall;
+ }
+ const mkvparser::MasteringMetadata *mastering = color->mastering_metadata;
+ if (mastering != NULL) {
+ // Convert matroska values to HDRStaticInfo equivalent values for each fully specified
+ // group. See CTA-681.3 section 3.2.1 for more info.
+ if (mastering->luminance_max >= 0.5 && mastering->luminance_max < 65535.5) {
+ info.sType1.mMaxDisplayLuminance = (uint16_t)(mastering->luminance_max + 0.5);
+ }
+ if (mastering->luminance_min >= 0.00005 && mastering->luminance_min < 6.55355) {
+ // HDRStaticInfo Type1 stores min luminance scaled 10000:1
+ info.sType1.mMinDisplayLuminance =
+ (uint16_t)(10000 * mastering->luminance_min + 0.5);
+ }
+ // HDRStaticInfo Type1 stores primaries scaled 50000:1
+ if (isValidPrimary(mastering->white_point)) {
+ info.sType1.mW.x = (uint16_t)(50000 * mastering->white_point->x + 0.5);
+ info.sType1.mW.y = (uint16_t)(50000 * mastering->white_point->y + 0.5);
+ }
+ if (isValidPrimary(mastering->r) && isValidPrimary(mastering->g)
+ && isValidPrimary(mastering->b)) {
+ info.sType1.mR.x = (uint16_t)(50000 * mastering->r->x + 0.5);
+ info.sType1.mR.y = (uint16_t)(50000 * mastering->r->y + 0.5);
+ info.sType1.mG.x = (uint16_t)(50000 * mastering->g->x + 0.5);
+ info.sType1.mG.y = (uint16_t)(50000 * mastering->g->y + 0.5);
+ info.sType1.mB.x = (uint16_t)(50000 * mastering->b->x + 0.5);
+ info.sType1.mB.y = (uint16_t)(50000 * mastering->b->y + 0.5);
+ }
+ }
+ // Only advertise static info if at least one of the groups have been specified.
+ if (memcmp(&info, &nullInfo, sizeof(info)) != 0) {
+ info.mID = HDRStaticInfo::kType1;
+ meta.setData(kKeyHdrStaticInfo, 'hdrS', &info, sizeof(info));
+ }
+ }
+}
+
+status_t MatroskaExtractor::initTrackInfo(
+ const mkvparser::Track *track, MetaDataBase &meta, TrackInfo *trackInfo) {
+ trackInfo->mTrackNum = track->GetNumber();
+ trackInfo->mMeta = meta;
+ trackInfo->mExtractor = this;
+ trackInfo->mEncrypted = false;
+ trackInfo->mHeader = NULL;
+ trackInfo->mHeaderLen = 0;
+
+ for(size_t i = 0; i < track->GetContentEncodingCount(); i++) {
+ const mkvparser::ContentEncoding *encoding = track->GetContentEncodingByIndex(i);
+ for(size_t j = 0; j < encoding->GetEncryptionCount(); j++) {
+ const mkvparser::ContentEncoding::ContentEncryption *encryption;
+ encryption = encoding->GetEncryptionByIndex(j);
+ trackInfo->mMeta.setData(kKeyCryptoKey, 0, encryption->key_id, encryption->key_id_len);
+ trackInfo->mEncrypted = true;
+ break;
+ }
+
+ for(size_t j = 0; j < encoding->GetCompressionCount(); j++) {
+ const mkvparser::ContentEncoding::ContentCompression *compression;
+ compression = encoding->GetCompressionByIndex(j);
+ ALOGV("compression algo %llu settings_len %lld",
+ compression->algo, compression->settings_len);
+ if (compression->algo == 3
+ && compression->settings
+ && compression->settings_len > 0) {
+ trackInfo->mHeader = compression->settings;
+ trackInfo->mHeaderLen = compression->settings_len;
+ }
+ }
+ }
+
+ return OK;
+}
+
+void MatroskaExtractor::addTracks() {
+ const mkvparser::Tracks *tracks = mSegment->GetTracks();
+
+ for (size_t index = 0; index < tracks->GetTracksCount(); ++index) {
+ const mkvparser::Track *track = tracks->GetTrackByIndex(index);
+
+ if (track == NULL) {
+ // Apparently this is currently valid (if unexpected) behaviour
+ // of the mkv parser lib.
+ continue;
+ }
+
+ const char *const codecID = track->GetCodecId();
+ ALOGV("codec id = %s", codecID);
+ ALOGV("codec name = %s", track->GetCodecNameAsUTF8());
+
+ if (codecID == NULL) {
+ ALOGW("unknown codecID is not supported.");
+ continue;
+ }
+
+ size_t codecPrivateSize;
+ const unsigned char *codecPrivate =
+ track->GetCodecPrivate(codecPrivateSize);
+
+ enum { VIDEO_TRACK = 1, AUDIO_TRACK = 2 };
+
+ MetaDataBase meta;
+
+ status_t err = OK;
+
+ switch (track->GetType()) {
+ case VIDEO_TRACK:
+ {
+ const mkvparser::VideoTrack *vtrack =
+ static_cast<const mkvparser::VideoTrack *>(track);
+
+ if (!strcmp("V_MPEG4/ISO/AVC", codecID)) {
+ meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
+ meta.setData(kKeyAVCC, 0, codecPrivate, codecPrivateSize);
+ } else if (!strcmp("V_MPEGH/ISO/HEVC", codecID)) {
+ meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_HEVC);
+ if (codecPrivateSize > 0) {
+ meta.setData(kKeyHVCC, kTypeHVCC, codecPrivate, codecPrivateSize);
+ } else {
+ ALOGW("HEVC is detected, but does not have configuration.");
+ continue;
+ }
+ } else if (!strcmp("V_MPEG4/ISO/ASP", codecID)) {
+ if (codecPrivateSize > 0) {
+ meta.setCString(
+ kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4);
+ addESDSFromCodecPrivate(
+ meta, false, codecPrivate, codecPrivateSize);
+ } else {
+ ALOGW("%s is detected, but does not have configuration.",
+ codecID);
+ continue;
+ }
+ } else if (!strcmp("V_VP8", codecID)) {
+ meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_VP8);
+ } else if (!strcmp("V_VP9", codecID)) {
+ meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_VP9);
+ if (codecPrivateSize > 0) {
+ // 'csd-0' for VP9 is the Blob of Codec Private data as
+ // specified in http://www.webmproject.org/vp9/profiles/.
+ meta.setData(
+ kKeyVp9CodecPrivate, 0, codecPrivate,
+ codecPrivateSize);
+ }
+ } else {
+ ALOGW("%s is not supported.", codecID);
+ continue;
+ }
+
+ const long long width = vtrack->GetWidth();
+ const long long height = vtrack->GetHeight();
+ if (width <= 0 || width > INT32_MAX) {
+ ALOGW("track width exceeds int32_t, %lld", width);
+ continue;
+ }
+ if (height <= 0 || height > INT32_MAX) {
+ ALOGW("track height exceeds int32_t, %lld", height);
+ continue;
+ }
+ meta.setInt32(kKeyWidth, (int32_t)width);
+ meta.setInt32(kKeyHeight, (int32_t)height);
+
+ // setting display width/height is optional
+ const long long displayUnit = vtrack->GetDisplayUnit();
+ const long long displayWidth = vtrack->GetDisplayWidth();
+ const long long displayHeight = vtrack->GetDisplayHeight();
+ if (displayWidth > 0 && displayWidth <= INT32_MAX
+ && displayHeight > 0 && displayHeight <= INT32_MAX) {
+ switch (displayUnit) {
+ case 0: // pixels
+ meta.setInt32(kKeyDisplayWidth, (int32_t)displayWidth);
+ meta.setInt32(kKeyDisplayHeight, (int32_t)displayHeight);
+ break;
+ case 1: // centimeters
+ case 2: // inches
+ case 3: // aspect ratio
+ {
+ // Physical layout size is treated the same as aspect ratio.
+ // Note: displayWidth and displayHeight are never zero as they are
+ // checked in the if above.
+ const long long computedWidth =
+ std::max(width, height * displayWidth / displayHeight);
+ const long long computedHeight =
+ std::max(height, width * displayHeight / displayWidth);
+ if (computedWidth <= INT32_MAX && computedHeight <= INT32_MAX) {
+ meta.setInt32(kKeyDisplayWidth, (int32_t)computedWidth);
+ meta.setInt32(kKeyDisplayHeight, (int32_t)computedHeight);
+ }
+ break;
+ }
+ default: // unknown display units, perhaps future version of spec.
+ break;
+ }
+ }
+
+ getColorInformation(vtrack, meta);
+
+ break;
+ }
+
+ case AUDIO_TRACK:
+ {
+ const mkvparser::AudioTrack *atrack =
+ static_cast<const mkvparser::AudioTrack *>(track);
+
+ if (!strcmp("A_AAC", codecID)) {
+ meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC);
+ CHECK(codecPrivateSize >= 2);
+
+ addESDSFromCodecPrivate(
+ meta, true, codecPrivate, codecPrivateSize);
+ } else if (!strcmp("A_VORBIS", codecID)) {
+ meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_VORBIS);
+
+ err = addVorbisCodecInfo(
+ meta, codecPrivate, codecPrivateSize);
+ } else if (!strcmp("A_OPUS", codecID)) {
+ meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_OPUS);
+ meta.setData(kKeyOpusHeader, 0, codecPrivate, codecPrivateSize);
+ meta.setInt64(kKeyOpusCodecDelay, track->GetCodecDelay());
+ meta.setInt64(kKeyOpusSeekPreRoll, track->GetSeekPreRoll());
+ mSeekPreRollNs = track->GetSeekPreRoll();
+ } else if (!strcmp("A_MPEG/L3", codecID)) {
+ meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
+ } else if (!strcmp("A_FLAC", codecID)) {
+ meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_FLAC);
+ err = addFlacMetadata(meta, codecPrivate, codecPrivateSize);
+ } else {
+ ALOGW("%s is not supported.", codecID);
+ continue;
+ }
+
+ meta.setInt32(kKeySampleRate, atrack->GetSamplingRate());
+ meta.setInt32(kKeyChannelCount, atrack->GetChannels());
+ break;
+ }
+
+ default:
+ continue;
+ }
+
+ const char *language = track->GetLanguage();
+ if (language != NULL) {
+ char lang[4];
+ strncpy(lang, language, 3);
+ lang[3] = '\0';
+ meta.setCString(kKeyMediaLanguage, lang);
+ }
+
+ if (err != OK) {
+ ALOGE("skipping track, codec specific data was malformed.");
+ continue;
+ }
+
+ long long durationNs = mSegment->GetDuration();
+ meta.setInt64(kKeyDuration, (durationNs + 500) / 1000);
+
+ mTracks.push();
+ size_t n = mTracks.size() - 1;
+ TrackInfo *trackInfo = &mTracks.editItemAt(n);
+ initTrackInfo(track, meta, trackInfo);
+
+ if (!strcmp("V_MPEG4/ISO/AVC", codecID) && codecPrivateSize == 0) {
+ // Attempt to recover from AVC track without codec private data
+ err = synthesizeAVCC(trackInfo, n);
+ if (err != OK) {
+ mTracks.pop();
+ }
+ }
+ }
+}
+
+void MatroskaExtractor::findThumbnails() {
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ TrackInfo *info = &mTracks.editItemAt(i);
+
+ const char *mime;
+ CHECK(info->mMeta.findCString(kKeyMIMEType, &mime));
+
+ if (strncasecmp(mime, "video/", 6)) {
+ continue;
+ }
+
+ BlockIterator iter(this, info->mTrackNum, i);
+ int32_t j = 0;
+ int64_t thumbnailTimeUs = 0;
+ size_t maxBlockSize = 0;
+ while (!iter.eos() && j < 20) {
+ if (iter.block()->IsKey()) {
+ ++j;
+
+ size_t blockSize = 0;
+ for (int k = 0; k < iter.block()->GetFrameCount(); ++k) {
+ blockSize += iter.block()->GetFrame(k).len;
+ }
+
+ if (blockSize > maxBlockSize) {
+ maxBlockSize = blockSize;
+ thumbnailTimeUs = iter.blockTimeUs();
+ }
+ }
+ iter.advance();
+ }
+ info->mMeta.setInt64(kKeyThumbnailTime, thumbnailTimeUs);
+ }
+}
+
+status_t MatroskaExtractor::getMetaData(MetaDataBase &meta) {
+ meta.setCString(
+ kKeyMIMEType,
+ mIsWebm ? "video/webm" : MEDIA_MIMETYPE_CONTAINER_MATROSKA);
+
+ return OK;
+}
+
+uint32_t MatroskaExtractor::flags() const {
+ uint32_t x = CAN_PAUSE;
+ if (!isLiveStreaming()) {
+ x |= CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK;
+ }
+
+ return x;
+}
+
+bool SniffMatroska(
+ DataSourceBase *source, float *confidence) {
+ DataSourceBaseReader reader(source);
+ mkvparser::EBMLHeader ebmlHeader;
+ long long pos;
+ if (ebmlHeader.Parse(&reader, pos) < 0) {
+ return false;
+ }
+
+ *confidence = 0.6;
+
+ return true;
+}
+
+
+extern "C" {
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
+MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ return {
+ MediaExtractor::EXTRACTORDEF_VERSION,
+ UUID("abbedd92-38c4-4904-a4c1-b3f45f899980"),
+ 1,
+ "Matroska Extractor",
+ [](
+ DataSourceBase *source,
+ float *confidence,
+ void **,
+ MediaExtractor::FreeMetaFunc *) -> MediaExtractor::CreatorFunc {
+ if (SniffMatroska(source, confidence)) {
+ return [](
+ DataSourceBase *source,
+ void *) -> MediaExtractor* {
+ return new MatroskaExtractor(source);};
+ }
+ return NULL;
+ }
+ };
+}
+
+} // extern "C"
+
+} // namespace android
diff --git a/media/extractors/mkv/MatroskaExtractor.h b/media/extractors/mkv/MatroskaExtractor.h
new file mode 100644
index 0000000..3568ea1
--- /dev/null
+++ b/media/extractors/mkv/MatroskaExtractor.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MATROSKA_EXTRACTOR_H_
+
+#define MATROSKA_EXTRACTOR_H_
+
+#include "mkvparser/mkvparser.h"
+
+#include <media/MediaExtractor.h>
+#include <media/stagefright/MetaDataBase.h>
+#include <utils/Vector.h>
+#include <utils/threads.h>
+
+namespace android {
+
+struct AMessage;
+class String8;
+
+class MetaData;
+struct DataSourceBaseReader;
+struct MatroskaSource;
+
+struct MatroskaExtractor : public MediaExtractor {
+ explicit MatroskaExtractor(DataSourceBase *source);
+
+ virtual size_t countTracks();
+
+ virtual MediaTrack *getTrack(size_t index);
+
+ virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags);
+
+ virtual status_t getMetaData(MetaDataBase& meta);
+
+ virtual uint32_t flags() const;
+
+ virtual const char * name() { return "MatroskaExtractor"; }
+
+protected:
+ virtual ~MatroskaExtractor();
+
+private:
+ friend struct MatroskaSource;
+ friend struct BlockIterator;
+
+ struct TrackInfo {
+ unsigned long mTrackNum;
+ bool mEncrypted;
+ MetaDataBase mMeta;
+ const MatroskaExtractor *mExtractor;
+ Vector<const mkvparser::CuePoint*> mCuePoints;
+
+ // mHeader points to memory managed by mkvparser;
+ // mHeader would be deleted when mSegment is deleted
+ // in ~MatroskaExtractor.
+ unsigned char *mHeader;
+ size_t mHeaderLen;
+
+ const mkvparser::Track* getTrack() const;
+ const mkvparser::CuePoint::TrackPosition *find(long long timeNs) const;
+ };
+
+ Mutex mLock;
+ Vector<TrackInfo> mTracks;
+
+ DataSourceBase *mDataSource;
+ DataSourceBaseReader *mReader;
+ mkvparser::Segment *mSegment;
+ bool mExtractedThumbnails;
+ bool mIsLiveStreaming;
+ bool mIsWebm;
+ int64_t mSeekPreRollNs;
+
+ status_t synthesizeAVCC(TrackInfo *trackInfo, size_t index);
+ status_t initTrackInfo(
+ const mkvparser::Track *track,
+ MetaDataBase &meta,
+ TrackInfo *trackInfo);
+ void addTracks();
+ void findThumbnails();
+ void getColorInformation(
+ const mkvparser::VideoTrack *vtrack,
+ MetaDataBase &meta);
+ bool isLiveStreaming() const;
+
+ MatroskaExtractor(const MatroskaExtractor &);
+ MatroskaExtractor &operator=(const MatroskaExtractor &);
+};
+
+} // namespace android
+
+#endif // MATROSKA_EXTRACTOR_H_
diff --git a/media/libstagefright/matroska/NOTICE b/media/extractors/mkv/NOTICE
similarity index 100%
copy from media/libstagefright/matroska/NOTICE
copy to media/extractors/mkv/NOTICE
diff --git a/media/extractors/mkv/exports.lds b/media/extractors/mkv/exports.lds
new file mode 100644
index 0000000..b1309ee
--- /dev/null
+++ b/media/extractors/mkv/exports.lds
@@ -0,0 +1 @@
+{ global: GETEXTRACTORDEF; local: *; };
diff --git a/media/extractors/mp3/Android.bp b/media/extractors/mp3/Android.bp
new file mode 100644
index 0000000..a3aeaca
--- /dev/null
+++ b/media/extractors/mp3/Android.bp
@@ -0,0 +1,47 @@
+cc_library_shared {
+
+ srcs: [
+ "MP3Extractor.cpp",
+ "VBRISeeker.cpp",
+ "XINGSeeker.cpp",
+ ],
+
+ include_dirs: [
+ "frameworks/av/media/libstagefright/include",
+ ],
+
+ shared_libs: [
+ "liblog",
+ "libmediaextractor",
+ "libstagefright_foundation",
+ ],
+
+ static_libs: [
+ "libutils",
+ "libstagefright_id3",
+ ],
+
+ name: "libmp3extractor",
+ relative_install_path: "extractors",
+
+ compile_multilib: "first",
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-fvisibility=hidden",
+ ],
+ version_script: "exports.lds",
+
+ sanitize: {
+ cfi: true,
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ diag: {
+ cfi: true,
+ },
+ },
+
+}
diff --git a/media/extractors/mp3/MP3Extractor.cpp b/media/extractors/mp3/MP3Extractor.cpp
new file mode 100644
index 0000000..33cff96
--- /dev/null
+++ b/media/extractors/mp3/MP3Extractor.cpp
@@ -0,0 +1,725 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MP3Extractor"
+#include <utils/Log.h>
+
+#include "MP3Extractor.h"
+
+#include "ID3.h"
+#include "VBRISeeker.h"
+#include "XINGSeeker.h"
+
+#include <media/DataSourceBase.h>
+#include <media/MediaTrack.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/avc_utils.h>
+#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/stagefright/MediaBufferBase.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <utils/String8.h>
+
+namespace android {
+
+// Everything must match except for
+// protection, bitrate, padding, private bits, mode, mode extension,
+// copyright bit, original bit and emphasis.
+// Yes ... there are things that must indeed match...
+static const uint32_t kMask = 0xfffe0c00;
+
+static bool Resync(
+ DataSourceBase *source, uint32_t match_header,
+ off64_t *inout_pos, off64_t *post_id3_pos, uint32_t *out_header) {
+ if (post_id3_pos != NULL) {
+ *post_id3_pos = 0;
+ }
+
+ if (*inout_pos == 0) {
+ // Skip an optional ID3 header if syncing at the very beginning
+ // of the datasource.
+
+ for (;;) {
+ uint8_t id3header[10];
+ if (source->readAt(*inout_pos, id3header, sizeof(id3header))
+ < (ssize_t)sizeof(id3header)) {
+ // If we can't even read these 10 bytes, we might as well bail
+ // out, even if there _were_ 10 bytes of valid mp3 audio data...
+ return false;
+ }
+
+ if (memcmp("ID3", id3header, 3)) {
+ break;
+ }
+
+ // Skip the ID3v2 header.
+
+ size_t len =
+ ((id3header[6] & 0x7f) << 21)
+ | ((id3header[7] & 0x7f) << 14)
+ | ((id3header[8] & 0x7f) << 7)
+ | (id3header[9] & 0x7f);
+
+ len += 10;
+
+ *inout_pos += len;
+
+ ALOGV("skipped ID3 tag, new starting offset is %lld (0x%016llx)",
+ (long long)*inout_pos, (long long)*inout_pos);
+ }
+
+ if (post_id3_pos != NULL) {
+ *post_id3_pos = *inout_pos;
+ }
+ }
+
+ off64_t pos = *inout_pos;
+ bool valid = false;
+
+ const size_t kMaxReadBytes = 1024;
+ const size_t kMaxBytesChecked = 128 * 1024;
+ uint8_t buf[kMaxReadBytes];
+ ssize_t bytesToRead = kMaxReadBytes;
+ ssize_t totalBytesRead = 0;
+ ssize_t remainingBytes = 0;
+ bool reachEOS = false;
+ uint8_t *tmp = buf;
+
+ do {
+ if (pos >= (off64_t)(*inout_pos + kMaxBytesChecked)) {
+ // Don't scan forever.
+ ALOGV("giving up at offset %lld", (long long)pos);
+ break;
+ }
+
+ if (remainingBytes < 4) {
+ if (reachEOS) {
+ break;
+ } else {
+ memcpy(buf, tmp, remainingBytes);
+ bytesToRead = kMaxReadBytes - remainingBytes;
+
+ /*
+ * The next read position should start from the end of
+ * the last buffer, and thus should include the remaining
+ * bytes in the buffer.
+ */
+ totalBytesRead = source->readAt(pos + remainingBytes,
+ buf + remainingBytes,
+ bytesToRead);
+ if (totalBytesRead <= 0) {
+ break;
+ }
+ reachEOS = (totalBytesRead != bytesToRead);
+ totalBytesRead += remainingBytes;
+ remainingBytes = totalBytesRead;
+ tmp = buf;
+ continue;
+ }
+ }
+
+ uint32_t header = U32_AT(tmp);
+
+ if (match_header != 0 && (header & kMask) != (match_header & kMask)) {
+ ++pos;
+ ++tmp;
+ --remainingBytes;
+ continue;
+ }
+
+ size_t frame_size;
+ int sample_rate, num_channels, bitrate;
+ if (!GetMPEGAudioFrameSize(
+ header, &frame_size,
+ &sample_rate, &num_channels, &bitrate)) {
+ ++pos;
+ ++tmp;
+ --remainingBytes;
+ continue;
+ }
+
+ ALOGV("found possible 1st frame at %lld (header = 0x%08x)", (long long)pos, header);
+
+ // We found what looks like a valid frame,
+ // now find its successors.
+
+ off64_t test_pos = pos + frame_size;
+
+ valid = true;
+ for (int j = 0; j < 3; ++j) {
+ uint8_t tmp[4];
+ if (source->readAt(test_pos, tmp, 4) < 4) {
+ valid = false;
+ break;
+ }
+
+ uint32_t test_header = U32_AT(tmp);
+
+ ALOGV("subsequent header is %08x", test_header);
+
+ if ((test_header & kMask) != (header & kMask)) {
+ valid = false;
+ break;
+ }
+
+ size_t test_frame_size;
+ if (!GetMPEGAudioFrameSize(
+ test_header, &test_frame_size)) {
+ valid = false;
+ break;
+ }
+
+ ALOGV("found subsequent frame #%d at %lld", j + 2, (long long)test_pos);
+
+ test_pos += test_frame_size;
+ }
+
+ if (valid) {
+ *inout_pos = pos;
+
+ if (out_header != NULL) {
+ *out_header = header;
+ }
+ } else {
+ ALOGV("no dice, no valid sequence of frames found.");
+ }
+
+ ++pos;
+ ++tmp;
+ --remainingBytes;
+ } while (!valid);
+
+ return valid;
+}
+
+class MP3Source : public MediaTrack {
+public:
+ MP3Source(
+ MetaDataBase &meta, DataSourceBase *source,
+ off64_t first_frame_pos, uint32_t fixed_header,
+ MP3Seeker *seeker);
+
+ virtual status_t start(MetaDataBase *params = NULL);
+ virtual status_t stop();
+
+ virtual status_t getFormat(MetaDataBase &meta);
+
+ virtual status_t read(
+ MediaBufferBase **buffer, const ReadOptions *options = NULL);
+
+protected:
+ virtual ~MP3Source();
+
+private:
+ static const size_t kMaxFrameSize;
+ MetaDataBase &mMeta;
+ DataSourceBase *mDataSource;
+ off64_t mFirstFramePos;
+ uint32_t mFixedHeader;
+ off64_t mCurrentPos;
+ int64_t mCurrentTimeUs;
+ bool mStarted;
+ MP3Seeker *mSeeker;
+ MediaBufferGroup *mGroup;
+
+ int64_t mBasisTimeUs;
+ int64_t mSamplesRead;
+
+ MP3Source(const MP3Source &);
+ MP3Source &operator=(const MP3Source &);
+};
+
+struct Mp3Meta {
+ off64_t pos;
+ off64_t post_id3_pos;
+ uint32_t header;
+};
+
+MP3Extractor::MP3Extractor(
+ DataSourceBase *source, Mp3Meta *meta)
+ : mInitCheck(NO_INIT),
+ mDataSource(source),
+ mFirstFramePos(-1),
+ mFixedHeader(0),
+ mSeeker(NULL) {
+
+ off64_t pos = 0;
+ off64_t post_id3_pos;
+ uint32_t header;
+ bool success;
+
+ if (meta != NULL) {
+ // The sniffer has already done all the hard work for us, simply
+ // accept its judgement.
+ pos = meta->pos;
+ header = meta->header;
+ post_id3_pos = meta->post_id3_pos;
+ success = true;
+ } else {
+ success = Resync(mDataSource, 0, &pos, &post_id3_pos, &header);
+ }
+
+ if (!success) {
+ // mInitCheck will remain NO_INIT
+ return;
+ }
+
+ mFirstFramePos = pos;
+ mFixedHeader = header;
+ XINGSeeker *seeker = XINGSeeker::CreateFromSource(mDataSource, mFirstFramePos);
+
+ if (seeker == NULL) {
+ mSeeker = VBRISeeker::CreateFromSource(mDataSource, post_id3_pos);
+ } else {
+ mSeeker = seeker;
+ int encd = seeker->getEncoderDelay();
+ int encp = seeker->getEncoderPadding();
+ if (encd != 0 || encp != 0) {
+ mMeta.setInt32(kKeyEncoderDelay, encd);
+ mMeta.setInt32(kKeyEncoderPadding, encp);
+ }
+ }
+
+ if (mSeeker != NULL) {
+ // While it is safe to send the XING/VBRI frame to the decoder, this will
+ // result in an extra 1152 samples being output. In addition, the bitrate
+ // of the Xing header might not match the rest of the file, which could
+ // lead to problems when seeking. The real first frame to decode is after
+ // the XING/VBRI frame, so skip there.
+ size_t frame_size;
+ int sample_rate;
+ int num_channels;
+ int bitrate;
+ GetMPEGAudioFrameSize(
+ header, &frame_size, &sample_rate, &num_channels, &bitrate);
+ pos += frame_size;
+ if (!Resync(mDataSource, 0, &pos, &post_id3_pos, &header)) {
+ // mInitCheck will remain NO_INIT
+ return;
+ }
+ mFirstFramePos = pos;
+ mFixedHeader = header;
+ }
+
+ size_t frame_size;
+ int sample_rate;
+ int num_channels;
+ int bitrate;
+ GetMPEGAudioFrameSize(
+ header, &frame_size, &sample_rate, &num_channels, &bitrate);
+
+ unsigned layer = 4 - ((header >> 17) & 3);
+
+ switch (layer) {
+ case 1:
+ mMeta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I);
+ break;
+ case 2:
+ mMeta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II);
+ break;
+ case 3:
+ mMeta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
+ break;
+ default:
+ TRESPASS();
+ }
+
+ mMeta.setInt32(kKeySampleRate, sample_rate);
+ mMeta.setInt32(kKeyBitRate, bitrate * 1000);
+ mMeta.setInt32(kKeyChannelCount, num_channels);
+
+ int64_t durationUs;
+
+ if (mSeeker == NULL || !mSeeker->getDuration(&durationUs)) {
+ off64_t fileSize;
+ if (mDataSource->getSize(&fileSize) == OK) {
+ off64_t dataLength = fileSize - mFirstFramePos;
+ if (dataLength > INT64_MAX / 8000LL) {
+ // duration would overflow
+ durationUs = INT64_MAX;
+ } else {
+ durationUs = 8000LL * dataLength / bitrate;
+ }
+ } else {
+ durationUs = -1;
+ }
+ }
+
+ if (durationUs >= 0) {
+ mMeta.setInt64(kKeyDuration, durationUs);
+ }
+
+ mInitCheck = OK;
+
+ // Get iTunes-style gapless info if present.
+ // When getting the id3 tag, skip the V1 tags to prevent the source cache
+ // from being iterated to the end of the file.
+ ID3 id3(mDataSource, true);
+ if (id3.isValid()) {
+ ID3::Iterator *com = new ID3::Iterator(id3, "COM");
+ if (com->done()) {
+ delete com;
+ com = new ID3::Iterator(id3, "COMM");
+ }
+ while(!com->done()) {
+ String8 commentdesc;
+ String8 commentvalue;
+ com->getString(&commentdesc, &commentvalue);
+ const char * desc = commentdesc.string();
+ const char * value = commentvalue.string();
+
+ // first 3 characters are the language, which we don't care about
+ if(strlen(desc) > 3 && strcmp(desc + 3, "iTunSMPB") == 0) {
+
+ int32_t delay, padding;
+ if (sscanf(value, " %*x %x %x %*x", &delay, &padding) == 2) {
+ mMeta.setInt32(kKeyEncoderDelay, delay);
+ mMeta.setInt32(kKeyEncoderPadding, padding);
+ }
+ break;
+ }
+ com->next();
+ }
+ delete com;
+ com = NULL;
+ }
+}
+
+MP3Extractor::~MP3Extractor() {
+ delete mSeeker;
+}
+
+size_t MP3Extractor::countTracks() {
+ return mInitCheck != OK ? 0 : 1;
+}
+
+MediaTrack *MP3Extractor::getTrack(size_t index) {
+ if (mInitCheck != OK || index != 0) {
+ return NULL;
+ }
+
+ return new MP3Source(
+ mMeta, mDataSource, mFirstFramePos, mFixedHeader,
+ mSeeker);
+}
+
+status_t MP3Extractor::getTrackMetaData(
+ MetaDataBase &meta,
+ size_t index, uint32_t /* flags */) {
+ if (mInitCheck != OK || index != 0) {
+ return UNKNOWN_ERROR;
+ }
+ meta = mMeta;
+ return OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// The theoretical maximum frame size for an MPEG audio stream should occur
+// while playing a Layer 2, MPEGv2.5 audio stream at 160kbps (with padding).
+// The size of this frame should be...
+// ((1152 samples/frame * 160000 bits/sec) /
+// (8000 samples/sec * 8 bits/byte)) + 1 padding byte/frame = 2881 bytes/frame.
+// Set our max frame size to the nearest power of 2 above this size (aka, 4kB)
+const size_t MP3Source::kMaxFrameSize = (1 << 12); /* 4096 bytes */
+MP3Source::MP3Source(
+ MetaDataBase &meta, DataSourceBase *source,
+ off64_t first_frame_pos, uint32_t fixed_header,
+ MP3Seeker *seeker)
+ : mMeta(meta),
+ mDataSource(source),
+ mFirstFramePos(first_frame_pos),
+ mFixedHeader(fixed_header),
+ mCurrentPos(0),
+ mCurrentTimeUs(0),
+ mStarted(false),
+ mSeeker(seeker),
+ mGroup(NULL),
+ mBasisTimeUs(0),
+ mSamplesRead(0) {
+}
+
+MP3Source::~MP3Source() {
+ if (mStarted) {
+ stop();
+ }
+}
+
+status_t MP3Source::start(MetaDataBase *) {
+ CHECK(!mStarted);
+
+ mGroup = new MediaBufferGroup;
+
+ mGroup->add_buffer(MediaBufferBase::Create(kMaxFrameSize));
+
+ mCurrentPos = mFirstFramePos;
+ mCurrentTimeUs = 0;
+
+ mBasisTimeUs = mCurrentTimeUs;
+ mSamplesRead = 0;
+
+ mStarted = true;
+
+ return OK;
+}
+
+status_t MP3Source::stop() {
+ CHECK(mStarted);
+
+ delete mGroup;
+ mGroup = NULL;
+
+ mStarted = false;
+
+ return OK;
+}
+
+status_t MP3Source::getFormat(MetaDataBase &meta) {
+ meta = mMeta;
+ return OK;
+}
+
+status_t MP3Source::read(
+ MediaBufferBase **out, const ReadOptions *options) {
+ *out = NULL;
+
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode mode;
+ bool seekCBR = false;
+
+ if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) {
+ int64_t actualSeekTimeUs = seekTimeUs;
+ if (mSeeker == NULL
+ || !mSeeker->getOffsetForTime(&actualSeekTimeUs, &mCurrentPos)) {
+ int32_t bitrate;
+ if (!mMeta.findInt32(kKeyBitRate, &bitrate)) {
+ // bitrate is in bits/sec.
+ ALOGI("no bitrate");
+
+ return ERROR_UNSUPPORTED;
+ }
+
+ mCurrentTimeUs = seekTimeUs;
+ mCurrentPos = mFirstFramePos + seekTimeUs * bitrate / 8000000;
+ seekCBR = true;
+ } else {
+ mCurrentTimeUs = actualSeekTimeUs;
+ }
+
+ mBasisTimeUs = mCurrentTimeUs;
+ mSamplesRead = 0;
+ }
+
+ MediaBufferBase *buffer;
+ status_t err = mGroup->acquire_buffer(&buffer);
+ if (err != OK) {
+ return err;
+ }
+
+ size_t frame_size;
+ int bitrate;
+ int num_samples;
+ int sample_rate;
+ for (;;) {
+ ssize_t n = mDataSource->readAt(mCurrentPos, buffer->data(), 4);
+ if (n < 4) {
+ buffer->release();
+ buffer = NULL;
+
+ return (n < 0 ? n : ERROR_END_OF_STREAM);
+ }
+
+ uint32_t header = U32_AT((const uint8_t *)buffer->data());
+
+ if ((header & kMask) == (mFixedHeader & kMask)
+ && GetMPEGAudioFrameSize(
+ header, &frame_size, &sample_rate, NULL,
+ &bitrate, &num_samples)) {
+
+ // re-calculate mCurrentTimeUs because we might have called Resync()
+ if (seekCBR) {
+ mCurrentTimeUs = (mCurrentPos - mFirstFramePos) * 8000 / bitrate;
+ mBasisTimeUs = mCurrentTimeUs;
+ }
+
+ break;
+ }
+
+ // Lost sync.
+ ALOGV("lost sync! header = 0x%08x, old header = 0x%08x\n", header, mFixedHeader);
+
+ off64_t pos = mCurrentPos;
+ if (!Resync(mDataSource, mFixedHeader, &pos, NULL, NULL)) {
+ ALOGE("Unable to resync. Signalling end of stream.");
+
+ buffer->release();
+ buffer = NULL;
+
+ return ERROR_END_OF_STREAM;
+ }
+
+ mCurrentPos = pos;
+
+ // Try again with the new position.
+ }
+
+ CHECK(frame_size <= buffer->size());
+
+ ssize_t n = mDataSource->readAt(mCurrentPos, buffer->data(), frame_size);
+ if (n < (ssize_t)frame_size) {
+ buffer->release();
+ buffer = NULL;
+
+ return (n < 0 ? n : ERROR_END_OF_STREAM);
+ }
+
+ buffer->set_range(0, frame_size);
+
+ buffer->meta_data().setInt64(kKeyTime, mCurrentTimeUs);
+ buffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+
+ mCurrentPos += frame_size;
+
+ mSamplesRead += num_samples;
+ mCurrentTimeUs = mBasisTimeUs + ((mSamplesRead * 1000000) / sample_rate);
+
+ *out = buffer;
+
+ return OK;
+}
+
+status_t MP3Extractor::getMetaData(MetaDataBase &meta) {
+ meta.clear();
+ if (mInitCheck != OK) {
+ return UNKNOWN_ERROR;
+ }
+ meta.setCString(kKeyMIMEType, "audio/mpeg");
+
+ ID3 id3(mDataSource);
+
+ if (!id3.isValid()) {
+ return OK;
+ }
+
+ struct Map {
+ int key;
+ const char *tag1;
+ const char *tag2;
+ };
+ static const Map kMap[] = {
+ { kKeyAlbum, "TALB", "TAL" },
+ { kKeyArtist, "TPE1", "TP1" },
+ { kKeyAlbumArtist, "TPE2", "TP2" },
+ { kKeyComposer, "TCOM", "TCM" },
+ { kKeyGenre, "TCON", "TCO" },
+ { kKeyTitle, "TIT2", "TT2" },
+ { kKeyYear, "TYE", "TYER" },
+ { kKeyAuthor, "TXT", "TEXT" },
+ { kKeyCDTrackNumber, "TRK", "TRCK" },
+ { kKeyDiscNumber, "TPA", "TPOS" },
+ { kKeyCompilation, "TCP", "TCMP" },
+ };
+ static const size_t kNumMapEntries = sizeof(kMap) / sizeof(kMap[0]);
+
+ for (size_t i = 0; i < kNumMapEntries; ++i) {
+ ID3::Iterator *it = new ID3::Iterator(id3, kMap[i].tag1);
+ if (it->done()) {
+ delete it;
+ it = new ID3::Iterator(id3, kMap[i].tag2);
+ }
+
+ if (it->done()) {
+ delete it;
+ continue;
+ }
+
+ String8 s;
+ it->getString(&s);
+ delete it;
+
+ meta.setCString(kMap[i].key, s);
+ }
+
+ size_t dataSize;
+ String8 mime;
+ const void *data = id3.getAlbumArt(&dataSize, &mime);
+
+ if (data) {
+ meta.setData(kKeyAlbumArt, MetaData::TYPE_NONE, data, dataSize);
+ meta.setCString(kKeyAlbumArtMIME, mime.string());
+ }
+
+ return OK;
+}
+
+static MediaExtractor* CreateExtractor(
+ DataSourceBase *source,
+ void *meta) {
+ Mp3Meta *metaData = static_cast<Mp3Meta *>(meta);
+ return new MP3Extractor(source, metaData);
+}
+
+static MediaExtractor::CreatorFunc Sniff(
+ DataSourceBase *source, float *confidence, void **meta,
+ MediaExtractor::FreeMetaFunc *freeMeta) {
+ off64_t pos = 0;
+ off64_t post_id3_pos;
+ uint32_t header;
+ uint8_t mpeg_header[5];
+ if (source->readAt(0, mpeg_header, sizeof(mpeg_header)) < (ssize_t)sizeof(mpeg_header)) {
+ return NULL;
+ }
+
+ if (!memcmp("\x00\x00\x01\xba", mpeg_header, 4) && (mpeg_header[4] >> 4) == 2) {
+ ALOGV("MPEG1PS container is not supported!");
+ return NULL;
+ }
+ if (!Resync(source, 0, &pos, &post_id3_pos, &header)) {
+ return NULL;
+ }
+
+ Mp3Meta *mp3Meta = new Mp3Meta;
+ mp3Meta->pos = pos;
+ mp3Meta->header = header;
+ mp3Meta->post_id3_pos = post_id3_pos;
+ *meta = mp3Meta;
+ *freeMeta = ::free;
+
+ *confidence = 0.2f;
+
+ return CreateExtractor;
+}
+
+extern "C" {
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
+MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ return {
+ MediaExtractor::EXTRACTORDEF_VERSION,
+ UUID("812a3f6c-c8cf-46de-b529-3774b14103d4"),
+ 1, // version
+ "MP3 Extractor",
+ Sniff
+ };
+}
+
+} // extern "C"
+
+} // namespace android
diff --git a/media/extractors/mp3/MP3Extractor.h b/media/extractors/mp3/MP3Extractor.h
new file mode 100644
index 0000000..485b0ca
--- /dev/null
+++ b/media/extractors/mp3/MP3Extractor.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MP3_EXTRACTOR_H_
+
+#define MP3_EXTRACTOR_H_
+
+#include <utils/Errors.h>
+#include <media/MediaExtractor.h>
+#include <media/stagefright/MetaDataBase.h>
+
+namespace android {
+
+struct AMessage;
+class DataSourceBase;
+struct MP3Seeker;
+class String8;
+struct Mp3Meta;
+
+class MP3Extractor : public MediaExtractor {
+public:
+ MP3Extractor(DataSourceBase *source, Mp3Meta *meta);
+ ~MP3Extractor();
+
+ virtual size_t countTracks();
+ virtual MediaTrack *getTrack(size_t index);
+ virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags);
+
+ virtual status_t getMetaData(MetaDataBase& meta);
+ virtual const char * name() { return "MP3Extractor"; }
+
+private:
+ status_t mInitCheck;
+
+ DataSourceBase *mDataSource;
+ off64_t mFirstFramePos;
+ MetaDataBase mMeta;
+ uint32_t mFixedHeader;
+ MP3Seeker *mSeeker;
+
+ MP3Extractor(const MP3Extractor &);
+ MP3Extractor &operator=(const MP3Extractor &);
+};
+
+} // namespace android
+
+#endif // MP3_EXTRACTOR_H_
diff --git a/media/extractors/mp3/MP3Seeker.h b/media/extractors/mp3/MP3Seeker.h
new file mode 100644
index 0000000..0e3af25
--- /dev/null
+++ b/media/extractors/mp3/MP3Seeker.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MP3_SEEKER_H_
+
+#define MP3_SEEKER_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+struct MP3Seeker {
+ MP3Seeker() {}
+
+ virtual bool getDuration(int64_t *durationUs) = 0;
+
+ // Given a request seek time in "*timeUs", find the byte offset closest
+ // to that position and return it in "*pos". Update "*timeUs" to reflect
+ // the actual time that seekpoint represents.
+ virtual bool getOffsetForTime(int64_t *timeUs, off64_t *pos) = 0;
+
+ virtual ~MP3Seeker() {}
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(MP3Seeker);
+};
+
+} // namespace android
+
+#endif // MP3_SEEKER_H_
+
diff --git a/media/extractors/mp3/VBRISeeker.cpp b/media/extractors/mp3/VBRISeeker.cpp
new file mode 100644
index 0000000..523f14c
--- /dev/null
+++ b/media/extractors/mp3/VBRISeeker.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "VBRISeeker"
+
+#include <inttypes.h>
+
+#include <utils/Log.h>
+
+#include "VBRISeeker.h"
+
+#include <media/stagefright/foundation/avc_utils.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/DataSourceBase.h>
+
+namespace android {
+
+static uint32_t U24_AT(const uint8_t *ptr) {
+ return ptr[0] << 16 | ptr[1] << 8 | ptr[2];
+}
+
+// static
+VBRISeeker *VBRISeeker::CreateFromSource(
+ DataSourceBase *source, off64_t post_id3_pos) {
+ off64_t pos = post_id3_pos;
+
+ uint8_t header[4];
+ ssize_t n = source->readAt(pos, header, sizeof(header));
+ if (n < (ssize_t)sizeof(header)) {
+ return NULL;
+ }
+
+ uint32_t tmp = U32_AT(&header[0]);
+ size_t frameSize;
+ int sampleRate;
+ if (!GetMPEGAudioFrameSize(tmp, &frameSize, &sampleRate)) {
+ return NULL;
+ }
+
+ // VBRI header follows 32 bytes after the header _ends_.
+ pos += sizeof(header) + 32;
+
+ uint8_t vbriHeader[26];
+ n = source->readAt(pos, vbriHeader, sizeof(vbriHeader));
+ if (n < (ssize_t)sizeof(vbriHeader)) {
+ return NULL;
+ }
+
+ if (memcmp(vbriHeader, "VBRI", 4)) {
+ return NULL;
+ }
+
+ size_t numFrames = U32_AT(&vbriHeader[14]);
+
+ int64_t durationUs =
+ numFrames * 1000000ll * (sampleRate >= 32000 ? 1152 : 576) / sampleRate;
+
+ ALOGV("duration = %.2f secs", durationUs / 1E6);
+
+ size_t numEntries = U16_AT(&vbriHeader[18]);
+ size_t entrySize = U16_AT(&vbriHeader[22]);
+ size_t scale = U16_AT(&vbriHeader[20]);
+
+ ALOGV("%zu entries, scale=%zu, size_per_entry=%zu",
+ numEntries,
+ scale,
+ entrySize);
+
+ if (entrySize > 4) {
+ ALOGE("invalid VBRI entry size: %zu", entrySize);
+ return NULL;
+ }
+
+ VBRISeeker *seeker = new (std::nothrow) VBRISeeker;
+ if (seeker == NULL) {
+ ALOGW("Couldn't allocate VBRISeeker");
+ return NULL;
+ }
+
+ size_t totalEntrySize = numEntries * entrySize;
+ uint8_t *buffer = new (std::nothrow) uint8_t[totalEntrySize];
+ if (!buffer) {
+ ALOGW("Couldn't allocate %zu bytes", totalEntrySize);
+ delete seeker;
+ return NULL;
+ }
+
+ n = source->readAt(pos + sizeof(vbriHeader), buffer, totalEntrySize);
+ if (n < (ssize_t)totalEntrySize) {
+ delete[] buffer;
+ buffer = NULL;
+ delete seeker;
+ return NULL;
+ }
+
+ seeker->mBasePos = post_id3_pos + frameSize;
+ // only update mDurationUs if the calculated duration is valid (non zero)
+ // otherwise, leave duration at -1 so that getDuration() and getOffsetForTime()
+ // return false when called, to indicate that this vbri tag does not have the
+ // requested information
+ if (durationUs) {
+ seeker->mDurationUs = durationUs;
+ }
+
+ off64_t offset = post_id3_pos;
+ for (size_t i = 0; i < numEntries; ++i) {
+ uint32_t numBytes;
+ switch (entrySize) {
+ case 1: numBytes = buffer[i]; break;
+ case 2: numBytes = U16_AT(buffer + 2 * i); break;
+ case 3: numBytes = U24_AT(buffer + 3 * i); break;
+ default:
+ {
+ CHECK_EQ(entrySize, 4u);
+ numBytes = U32_AT(buffer + 4 * i); break;
+ }
+ }
+
+ numBytes *= scale;
+
+ seeker->mSegments.push(numBytes);
+
+ ALOGV("entry #%zu: %u offset %#016llx", i, numBytes, (long long)offset);
+ offset += numBytes;
+ }
+
+ delete[] buffer;
+ buffer = NULL;
+
+ ALOGI("Found VBRI header.");
+
+ return seeker;
+}
+
+VBRISeeker::VBRISeeker()
+ : mDurationUs(-1) {
+}
+
+bool VBRISeeker::getDuration(int64_t *durationUs) {
+ if (mDurationUs < 0) {
+ return false;
+ }
+
+ *durationUs = mDurationUs;
+
+ return true;
+}
+
+bool VBRISeeker::getOffsetForTime(int64_t *timeUs, off64_t *pos) {
+ if (mDurationUs < 0 || mSegments.size() == 0) {
+ return false;
+ }
+
+ int64_t segmentDurationUs = mDurationUs / mSegments.size();
+
+ int64_t nowUs = 0;
+ *pos = mBasePos;
+ size_t segmentIndex = 0;
+ while (segmentIndex < mSegments.size() && nowUs < *timeUs) {
+ nowUs += segmentDurationUs;
+ *pos += mSegments.itemAt(segmentIndex++);
+ }
+
+ ALOGV("getOffsetForTime %lld us => 0x%016llx", (long long)*timeUs, (long long)*pos);
+
+ *timeUs = nowUs;
+
+ return true;
+}
+
+} // namespace android
+
diff --git a/media/extractors/mp3/VBRISeeker.h b/media/extractors/mp3/VBRISeeker.h
new file mode 100644
index 0000000..9213f6e
--- /dev/null
+++ b/media/extractors/mp3/VBRISeeker.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VBRI_SEEKER_H_
+
+#define VBRI_SEEKER_H_
+
+#include "MP3Seeker.h"
+
+#include <utils/Vector.h>
+
+namespace android {
+
+class DataSourceBase;
+
+struct VBRISeeker : public MP3Seeker {
+ static VBRISeeker *CreateFromSource(
+ DataSourceBase *source, off64_t post_id3_pos);
+
+ virtual bool getDuration(int64_t *durationUs);
+ virtual bool getOffsetForTime(int64_t *timeUs, off64_t *pos);
+
+private:
+ off64_t mBasePos;
+ int64_t mDurationUs;
+ Vector<uint32_t> mSegments;
+
+ VBRISeeker();
+
+ DISALLOW_EVIL_CONSTRUCTORS(VBRISeeker);
+};
+
+} // namespace android
+
+#endif // VBRI_SEEKER_H_
+
+
diff --git a/media/extractors/mp3/XINGSeeker.cpp b/media/extractors/mp3/XINGSeeker.cpp
new file mode 100644
index 0000000..95ca556
--- /dev/null
+++ b/media/extractors/mp3/XINGSeeker.cpp
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "XINGSEEKER"
+#include <utils/Log.h>
+
+#include "XINGSeeker.h"
+#include <media/stagefright/foundation/avc_utils.h>
+
+#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/DataSourceBase.h>
+
+namespace android {
+
+XINGSeeker::XINGSeeker()
+ : mDurationUs(-1),
+ mSizeBytes(0),
+ mEncoderDelay(0),
+ mEncoderPadding(0),
+ mTOCValid(false) {
+}
+
+bool XINGSeeker::getDuration(int64_t *durationUs) {
+ if (mDurationUs < 0) {
+ return false;
+ }
+
+ *durationUs = mDurationUs;
+
+ return true;
+}
+
+bool XINGSeeker::getOffsetForTime(int64_t *timeUs, off64_t *pos) {
+ if (mSizeBytes == 0 || !mTOCValid || mDurationUs < 0) {
+ return false;
+ }
+
+ float percent = (float)(*timeUs) * 100 / mDurationUs;
+ float fx;
+ if( percent <= 0.0f ) {
+ fx = 0.0f;
+ } else if( percent >= 100.0f ) {
+ fx = 256.0f;
+ } else {
+ int a = (int)percent;
+ float fa, fb;
+ if ( a == 0 ) {
+ fa = 0.0f;
+ } else {
+ fa = (float)mTOC[a-1];
+ }
+ if ( a < 99 ) {
+ fb = (float)mTOC[a];
+ } else {
+ fb = 256.0f;
+ }
+ fx = fa + (fb-fa)*(percent-a);
+ }
+
+ *pos = (int)((1.0f/256.0f)*fx*mSizeBytes) + mFirstFramePos;
+
+ return true;
+}
+
+// static
+XINGSeeker *XINGSeeker::CreateFromSource(
+ DataSourceBase *source, off64_t first_frame_pos) {
+
+ uint8_t buffer[4];
+ int offset = first_frame_pos;
+ if (source->readAt(offset, &buffer, 4) < 4) { // get header
+ return NULL;
+ }
+ offset += 4;
+
+ int header = U32_AT(buffer);;
+ size_t xingframesize = 0;
+ int sampling_rate = 0;
+ int num_channels;
+ int samples_per_frame = 0;
+ if (!GetMPEGAudioFrameSize(header, &xingframesize, &sampling_rate, &num_channels,
+ NULL, &samples_per_frame)) {
+ return NULL;
+ }
+ uint8_t version = (buffer[1] >> 3) & 3;
+
+ // determine offset of XING header
+ if(version & 1) { // mpeg1
+ if (num_channels != 1) offset += 32;
+ else offset += 17;
+ } else { // mpeg 2 or 2.5
+ if (num_channels != 1) offset += 17;
+ else offset += 9;
+ }
+
+ int xingbase = offset;
+
+ if (source->readAt(offset, &buffer, 4) < 4) { // XING header ID
+ return NULL;
+ }
+ offset += 4;
+ // Check XING ID
+ if ((buffer[0] != 'X') || (buffer[1] != 'i')
+ || (buffer[2] != 'n') || (buffer[3] != 'g')) {
+ if ((buffer[0] != 'I') || (buffer[1] != 'n')
+ || (buffer[2] != 'f') || (buffer[3] != 'o')) {
+ return NULL;
+ }
+ }
+
+ if (source->readAt(offset, &buffer, 4) < 4) { // flags
+ return NULL;
+ }
+ offset += 4;
+ uint32_t flags = U32_AT(buffer);
+
+ XINGSeeker *seeker = new XINGSeeker;
+ seeker->mFirstFramePos = first_frame_pos + xingframesize;
+
+ if (flags & 0x0001) { // Frames field is present
+ if (source->readAt(offset, buffer, 4) < 4) {
+ delete seeker;
+ return NULL;
+ }
+ int32_t frames = U32_AT(buffer);
+ // only update mDurationUs if the calculated duration is valid (non zero)
+ // otherwise, leave duration at -1 so that getDuration() and getOffsetForTime()
+ // return false when called, to indicate that this xing tag does not have the
+ // requested information
+ if (frames) {
+ seeker->mDurationUs = (int64_t)frames * samples_per_frame * 1000000LL / sampling_rate;
+ }
+ offset += 4;
+ }
+ if (flags & 0x0002) { // Bytes field is present
+ if (source->readAt(offset, buffer, 4) < 4) {
+ delete seeker;
+ return NULL;
+ }
+ seeker->mSizeBytes = U32_AT(buffer);
+ offset += 4;
+ }
+ if (flags & 0x0004) { // TOC field is present
+ if (source->readAt(offset + 1, seeker->mTOC, 99) < 99) {
+ delete seeker;
+ return NULL;
+ }
+ seeker->mTOCValid = true;
+ offset += 100;
+ }
+
+#if 0
+ if (flags & 0x0008) { // Quality indicator field is present
+ if (source->readAt(offset, buffer, 4) < 4) {
+ delete seeker;
+ return NULL;
+ }
+ // do something with the quality indicator
+ offset += 4;
+ }
+
+ if (source->readAt(xingbase + 0xaf - 0x24, &buffer, 1) < 1) { // encoding flags
+ delete seeker;
+ return false;
+ }
+
+ ALOGV("nogap preceding: %s, nogap continued in next: %s",
+ (buffer[0] & 0x80) ? "true" : "false",
+ (buffer[0] & 0x40) ? "true" : "false");
+#endif
+
+ if (source->readAt(xingbase + 0xb1 - 0x24, &buffer, 3) == 3) {
+ seeker->mEncoderDelay = (buffer[0] << 4) + (buffer[1] >> 4);
+ seeker->mEncoderPadding = ((buffer[1] & 0xf) << 8) + buffer[2];
+ }
+
+ return seeker;
+}
+
+int32_t XINGSeeker::getEncoderDelay() {
+ return mEncoderDelay;
+}
+
+int32_t XINGSeeker::getEncoderPadding() {
+ return mEncoderPadding;
+}
+
+} // namespace android
+
diff --git a/media/extractors/mp3/XINGSeeker.h b/media/extractors/mp3/XINGSeeker.h
new file mode 100644
index 0000000..5867eae
--- /dev/null
+++ b/media/extractors/mp3/XINGSeeker.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef XING_SEEKER_H_
+
+#define XING_SEEKER_H_
+
+#include "MP3Seeker.h"
+
+namespace android {
+
+class DataSourceBase;
+
+struct XINGSeeker : public MP3Seeker {
+ static XINGSeeker *CreateFromSource(
+ DataSourceBase *source, off64_t first_frame_pos);
+
+ virtual bool getDuration(int64_t *durationUs);
+ virtual bool getOffsetForTime(int64_t *timeUs, off64_t *pos);
+
+ virtual int32_t getEncoderDelay();
+ virtual int32_t getEncoderPadding();
+
+private:
+ int64_t mFirstFramePos;
+ int64_t mDurationUs;
+ int32_t mSizeBytes;
+ int32_t mEncoderDelay;
+ int32_t mEncoderPadding;
+
+ // TOC entries in XING header. Skip the first one since it's always 0.
+ unsigned char mTOC[99];
+ bool mTOCValid;
+
+ XINGSeeker();
+
+ DISALLOW_EVIL_CONSTRUCTORS(XINGSeeker);
+};
+
+} // namespace android
+
+#endif // XING_SEEKER_H_
+
diff --git a/media/extractors/mp3/exports.lds b/media/extractors/mp3/exports.lds
new file mode 100644
index 0000000..b1309ee
--- /dev/null
+++ b/media/extractors/mp3/exports.lds
@@ -0,0 +1 @@
+{ global: GETEXTRACTORDEF; local: *; };
diff --git a/media/extractors/mp4/Android.bp b/media/extractors/mp4/Android.bp
new file mode 100644
index 0000000..fa739e8
--- /dev/null
+++ b/media/extractors/mp4/Android.bp
@@ -0,0 +1,60 @@
+cc_defaults {
+ name: "libmp4extractor_defaults",
+
+ srcs: [
+ "ItemTable.cpp",
+ "MPEG4Extractor.cpp",
+ "SampleIterator.cpp",
+ "SampleTable.cpp",
+ ],
+
+ include_dirs: [
+ "frameworks/av/media/libstagefright/",
+ ],
+
+ shared_libs: [
+ "liblog",
+ "libmediaextractor",
+ ],
+
+ static_libs: [
+ "libstagefright_esds",
+ "libstagefright_foundation",
+ "libstagefright_id3",
+ "libutils",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-fvisibility=hidden",
+ ],
+ version_script: "exports.lds",
+ relative_install_path: "extractors",
+ compile_multilib: "first",
+}
+
+cc_library_shared {
+
+
+ name: "libmp4extractor",
+ defaults: ["libmp4extractor_defaults"],
+
+ sanitize: {
+ cfi: true,
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ diag: {
+ cfi: true,
+ },
+ },
+
+}
+
+cc_library_static {
+ name: "libmp4extractor_fuzzing",
+
+ defaults: ["libmp4extractor_defaults"],
+}
diff --git a/media/extractors/mp4/ItemTable.cpp b/media/extractors/mp4/ItemTable.cpp
new file mode 100644
index 0000000..ca9deab
--- /dev/null
+++ b/media/extractors/mp4/ItemTable.cpp
@@ -0,0 +1,1681 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ItemTable"
+
+#include <ItemTable.h>
+#include <media/DataSourceBase.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+#include <utils/Log.h>
+
+namespace android {
+
+namespace heif {
+
+/////////////////////////////////////////////////////////////////////
+//
+// struct to keep track of one image item
+//
+
+struct ImageItem {
+ friend struct ItemReference;
+ friend struct ItemProperty;
+
+ ImageItem() : ImageItem(0, 0, false) {}
+ ImageItem(uint32_t _type, uint32_t _id, bool _hidden) :
+ type(_type), itemId(_id), hidden(_hidden),
+ rows(0), columns(0), width(0), height(0), rotation(0),
+ offset(0), size(0), nextTileIndex(0) {}
+
+ bool isGrid() const {
+ return type == FOURCC('g', 'r', 'i', 'd');
+ }
+
+ status_t getNextTileItemId(uint32_t *nextTileItemId, bool reset) {
+ if (reset) {
+ nextTileIndex = 0;
+ }
+ if (nextTileIndex >= dimgRefs.size()) {
+ return ERROR_END_OF_STREAM;
+ }
+ *nextTileItemId = dimgRefs[nextTileIndex++];
+ return OK;
+ }
+
+ uint32_t type;
+ uint32_t itemId;
+ bool hidden;
+ int32_t rows;
+ int32_t columns;
+ int32_t width;
+ int32_t height;
+ int32_t rotation;
+ off64_t offset;
+ size_t size;
+ sp<ABuffer> hvcc;
+ sp<ABuffer> icc;
+
+ Vector<uint32_t> thumbnails;
+ Vector<uint32_t> dimgRefs;
+ Vector<uint32_t> cdscRefs;
+ size_t nextTileIndex;
+};
+
+struct ExifItem {
+ off64_t offset;
+ size_t size;
+};
+
+/////////////////////////////////////////////////////////////////////
+//
+// ISO boxes
+//
+
+struct Box {
+protected:
+ Box(DataSourceBase *source, uint32_t type) :
+ mDataSource(source), mType(type) {}
+
+ virtual ~Box() {}
+
+ virtual status_t onChunkData(
+ uint32_t /*type*/, off64_t /*offset*/, size_t /*size*/) {
+ return OK;
+ }
+
+ inline uint32_t type() const { return mType; }
+
+ inline DataSourceBase *source() const { return mDataSource; }
+
+ status_t parseChunk(off64_t *offset);
+
+ status_t parseChunks(off64_t offset, size_t size);
+
+private:
+ DataSourceBase *mDataSource;
+ uint32_t mType;
+};
+
+status_t Box::parseChunk(off64_t *offset) {
+ if (*offset < 0) {
+ ALOGE("b/23540914");
+ return ERROR_MALFORMED;
+ }
+ uint32_t hdr[2];
+ if (mDataSource->readAt(*offset, hdr, 8) < 8) {
+ return ERROR_IO;
+ }
+ uint64_t chunk_size = ntohl(hdr[0]);
+ int32_t chunk_type = ntohl(hdr[1]);
+ off64_t data_offset = *offset + 8;
+
+ if (chunk_size == 1) {
+ if (mDataSource->readAt(*offset + 8, &chunk_size, 8) < 8) {
+ return ERROR_IO;
+ }
+ chunk_size = ntoh64(chunk_size);
+ data_offset += 8;
+
+ if (chunk_size < 16) {
+ // The smallest valid chunk is 16 bytes long in this case.
+ return ERROR_MALFORMED;
+ }
+ } else if (chunk_size == 0) {
+ // This shouldn't happen since we should never be top level
+ ALOGE("invalid chunk size 0 for non-top level box");
+ return ERROR_MALFORMED;
+ } else if (chunk_size < 8) {
+ // The smallest valid chunk is 8 bytes long.
+ ALOGE("invalid chunk size: %lld", (long long)chunk_size);
+ return ERROR_MALFORMED;
+ }
+
+ char chunk[5];
+ MakeFourCCString(chunk_type, chunk);
+ ALOGV("chunk: %s @ %lld", chunk, (long long)*offset);
+
+ off64_t chunk_data_size = chunk_size - (data_offset - *offset);
+ if (chunk_data_size < 0) {
+ ALOGE("b/23540914");
+ return ERROR_MALFORMED;
+ }
+
+ status_t err = onChunkData(chunk_type, data_offset, chunk_data_size);
+
+ if (err != OK) {
+ return err;
+ }
+ *offset += chunk_size;
+ return OK;
+}
+
+status_t Box::parseChunks(off64_t offset, size_t size) {
+ off64_t stopOffset = offset + size;
+ while (offset < stopOffset) {
+ status_t err = parseChunk(&offset);
+ if (err != OK) {
+ return err;
+ }
+ }
+ if (offset != stopOffset) {
+ return ERROR_MALFORMED;
+ }
+ return OK;
+}
+
+///////////////////////////////////////////////////////////////////////
+
+struct FullBox : public Box {
+protected:
+ FullBox(DataSourceBase *source, uint32_t type) :
+ Box(source, type), mVersion(0), mFlags(0) {}
+
+ inline uint8_t version() const { return mVersion; }
+
+ inline uint32_t flags() const { return mFlags; }
+
+ status_t parseFullBoxHeader(off64_t *offset, size_t *size);
+
+private:
+ uint8_t mVersion;
+ uint32_t mFlags;
+};
+
+status_t FullBox::parseFullBoxHeader(off64_t *offset, size_t *size) {
+ if (*size < 4) {
+ return ERROR_MALFORMED;
+ }
+ if (!source()->readAt(*offset, &mVersion, 1)) {
+ return ERROR_IO;
+ }
+ if (!source()->getUInt24(*offset + 1, &mFlags)) {
+ return ERROR_IO;
+ }
+ *offset += 4;
+ *size -= 4;
+ return OK;
+}
+
+/////////////////////////////////////////////////////////////////////
+//
+// PrimaryImage box
+//
+
+struct PitmBox : public FullBox {
+ PitmBox(DataSourceBase *source) :
+ FullBox(source, FOURCC('p', 'i', 't', 'm')) {}
+
+ status_t parse(off64_t offset, size_t size, uint32_t *primaryItemId);
+};
+
+status_t PitmBox::parse(off64_t offset, size_t size, uint32_t *primaryItemId) {
+ status_t err = parseFullBoxHeader(&offset, &size);
+ if (err != OK) {
+ return err;
+ }
+
+ size_t itemIdSize = (version() == 0) ? 2 : 4;
+ if (size < itemIdSize) {
+ return ERROR_MALFORMED;
+ }
+ uint32_t itemId;
+ if (!source()->getUInt32Var(offset, &itemId, itemIdSize)) {
+ return ERROR_IO;
+ }
+
+ ALOGV("primary id %d", itemId);
+ *primaryItemId = itemId;
+
+ return OK;
+}
+
+/////////////////////////////////////////////////////////////////////
+//
+// ItemLocation related boxes
+//
+
+struct ExtentEntry {
+ uint64_t extentIndex;
+ uint64_t extentOffset;
+ uint64_t extentLength;
+};
+
+struct ItemLoc {
+ ItemLoc() : ItemLoc(0, 0, 0, 0) {}
+ ItemLoc(uint32_t item_id, uint16_t construction_method,
+ uint16_t data_reference_index, uint64_t base_offset) :
+ itemId(item_id),
+ constructionMethod(construction_method),
+ dataReferenceIndex(data_reference_index),
+ baseOffset(base_offset) {}
+
+ void addExtent(const ExtentEntry& extent) {
+ extents.push_back(extent);
+ }
+
+ status_t getLoc(off64_t *offset, size_t *size,
+ off64_t idatOffset, size_t idatSize) const {
+ // TODO: fix extent handling, fix constructionMethod = 2
+ CHECK(extents.size() == 1);
+ if (constructionMethod == 0) {
+ *offset = baseOffset + extents[0].extentOffset;
+ *size = extents[0].extentLength;
+ return OK;
+ } else if (constructionMethod == 1) {
+ if (baseOffset + extents[0].extentOffset + extents[0].extentLength
+ > idatSize) {
+ return ERROR_MALFORMED;
+ }
+ *offset = baseOffset + extents[0].extentOffset + idatOffset;
+ *size = extents[0].extentLength;
+ return OK;
+ }
+ return ERROR_UNSUPPORTED;
+ }
+
+ // parsed info
+ uint32_t itemId;
+ uint16_t constructionMethod;
+ uint16_t dataReferenceIndex;
+ off64_t baseOffset;
+ Vector<ExtentEntry> extents;
+};
+
+struct IlocBox : public FullBox {
+ IlocBox(DataSourceBase *source, KeyedVector<uint32_t, ItemLoc> *itemLocs) :
+ FullBox(source, FOURCC('i', 'l', 'o', 'c')),
+ mItemLocs(itemLocs), mHasConstructMethod1(false) {}
+
+ status_t parse(off64_t offset, size_t size);
+
+ bool hasConstructMethod1() { return mHasConstructMethod1; }
+
+private:
+ static bool isSizeFieldValid(uint32_t offset_size) {
+ return offset_size == 0 || offset_size == 4 || offset_size == 8;
+ }
+ KeyedVector<uint32_t, ItemLoc> *mItemLocs;
+ bool mHasConstructMethod1;
+};
+
+status_t IlocBox::parse(off64_t offset, size_t size) {
+ status_t err = parseFullBoxHeader(&offset, &size);
+ if (err != OK) {
+ return err;
+ }
+ if (version() > 2) {
+ ALOGE("%s: invalid version %d", __FUNCTION__, version());
+ return ERROR_MALFORMED;
+ }
+
+ if (size < 2) {
+ return ERROR_MALFORMED;
+ }
+ uint8_t offset_size;
+ if (!source()->readAt(offset++, &offset_size, 1)) {
+ return ERROR_IO;
+ }
+ uint8_t length_size = (offset_size & 0xF);
+ offset_size >>= 4;
+
+ uint8_t base_offset_size;
+ if (!source()->readAt(offset++, &base_offset_size, 1)) {
+ return ERROR_IO;
+ }
+ uint8_t index_size = 0;
+ if (version() == 1 || version() == 2) {
+ index_size = (base_offset_size & 0xF);
+ }
+ base_offset_size >>= 4;
+ size -= 2;
+
+ if (!isSizeFieldValid(offset_size)
+ || !isSizeFieldValid(length_size)
+ || !isSizeFieldValid(base_offset_size)
+ || !isSizeFieldValid((index_size))) {
+ ALOGE("%s: offset size not valid: %d, %d, %d, %d", __FUNCTION__,
+ offset_size, length_size, base_offset_size, index_size);
+ return ERROR_MALFORMED;
+ }
+
+ uint32_t item_count;
+ size_t itemFieldSize = version() < 2 ? 2 : 4;
+ if (size < itemFieldSize) {
+ return ERROR_MALFORMED;
+ }
+ if (!source()->getUInt32Var(offset, &item_count, itemFieldSize)) {
+ return ERROR_IO;
+ }
+
+ ALOGV("item_count %lld", (long long) item_count);
+ offset += itemFieldSize;
+ size -= itemFieldSize;
+
+ for (size_t i = 0; i < item_count; i++) {
+ uint32_t item_id;
+ if (!source()->getUInt32Var(offset, &item_id, itemFieldSize)) {
+ return ERROR_IO;
+ }
+ ALOGV("item[%zu]: id %lld", i, (long long)item_id);
+ offset += itemFieldSize;
+
+ uint8_t construction_method = 0;
+ if (version() == 1 || version() == 2) {
+ uint8_t buf[2];
+ if (!source()->readAt(offset, buf, 2)) {
+ return ERROR_IO;
+ }
+ construction_method = (buf[1] & 0xF);
+ ALOGV("construction_method %d", construction_method);
+ if (construction_method == 1) {
+ mHasConstructMethod1 = true;
+ }
+
+ offset += 2;
+ }
+
+ uint16_t data_reference_index;
+ if (!source()->getUInt16(offset, &data_reference_index)) {
+ return ERROR_IO;
+ }
+ ALOGV("data_reference_index %d", data_reference_index);
+ if (data_reference_index != 0) {
+ // we don't support reference to other files
+ return ERROR_UNSUPPORTED;
+ }
+ offset += 2;
+
+ uint64_t base_offset = 0;
+ if (base_offset_size != 0) {
+ if (!source()->getUInt64Var(offset, &base_offset, base_offset_size)) {
+ return ERROR_IO;
+ }
+ offset += base_offset_size;
+ }
+ ALOGV("base_offset %lld", (long long) base_offset);
+
+ ssize_t index = mItemLocs->add(item_id, ItemLoc(
+ item_id, construction_method, data_reference_index, base_offset));
+ ItemLoc &item = mItemLocs->editValueAt(index);
+
+ uint16_t extent_count;
+ if (!source()->getUInt16(offset, &extent_count)) {
+ return ERROR_IO;
+ }
+ ALOGV("extent_count %d", extent_count);
+
+ if (extent_count > 1 && (offset_size == 0 || length_size == 0)) {
+ // if the item is dividec into more than one extents, offset and
+ // length must be present.
+ return ERROR_MALFORMED;
+ }
+ offset += 2;
+
+ for (size_t j = 0; j < extent_count; j++) {
+ uint64_t extent_index = 1; // default=1
+ if ((version() == 1 || version() == 2) && (index_size > 0)) {
+ if (!source()->getUInt64Var(offset, &extent_index, index_size)) {
+ return ERROR_IO;
+ }
+ // TODO: add support for this mode
+ offset += index_size;
+ ALOGV("extent_index %lld", (long long)extent_index);
+ }
+
+ uint64_t extent_offset = 0; // default=0
+ if (offset_size > 0) {
+ if (!source()->getUInt64Var(offset, &extent_offset, offset_size)) {
+ return ERROR_IO;
+ }
+ offset += offset_size;
+ }
+ ALOGV("extent_offset %lld", (long long)extent_offset);
+
+ uint64_t extent_length = 0; // this indicates full length of file
+ if (length_size > 0) {
+ if (!source()->getUInt64Var(offset, &extent_length, length_size)) {
+ return ERROR_IO;
+ }
+ offset += length_size;
+ }
+ ALOGV("extent_length %lld", (long long)extent_length);
+
+ item.addExtent({ extent_index, extent_offset, extent_length });
+ }
+ }
+ return OK;
+}
+
+/////////////////////////////////////////////////////////////////////
+//
+// ItemReference related boxes
+//
+
+struct ItemReference : public Box, public RefBase {
+ ItemReference(DataSourceBase *source, uint32_t type, uint32_t itemIdSize) :
+ Box(source, type), mItemId(0), mRefIdSize(itemIdSize) {}
+
+ status_t parse(off64_t offset, size_t size);
+
+ uint32_t itemId() { return mItemId; }
+
+ void apply(
+ KeyedVector<uint32_t, ImageItem> &itemIdToItemMap,
+ KeyedVector<uint32_t, ExifItem> &itemIdToExifMap) const;
+
+private:
+ uint32_t mItemId;
+ uint32_t mRefIdSize;
+ Vector<uint32_t> mRefs;
+
+ DISALLOW_EVIL_CONSTRUCTORS(ItemReference);
+};
+
+void ItemReference::apply(
+ KeyedVector<uint32_t, ImageItem> &itemIdToItemMap,
+ KeyedVector<uint32_t, ExifItem> &itemIdToExifMap) const {
+ ALOGV("attach reference type 0x%x to item id %d)", type(), mItemId);
+
+ switch(type()) {
+ case FOURCC('d', 'i', 'm', 'g'): {
+ ssize_t itemIndex = itemIdToItemMap.indexOfKey(mItemId);
+
+ // ignore non-image items
+ if (itemIndex < 0) {
+ return;
+ }
+
+ ImageItem &derivedImage = itemIdToItemMap.editValueAt(itemIndex);
+ if (!derivedImage.dimgRefs.empty()) {
+ ALOGW("dimgRefs not clean!");
+ }
+ derivedImage.dimgRefs.appendVector(mRefs);
+
+ for (size_t i = 0; i < mRefs.size(); i++) {
+ itemIndex = itemIdToItemMap.indexOfKey(mRefs[i]);
+
+ // ignore non-image items
+ if (itemIndex < 0) {
+ continue;
+ }
+ ImageItem &sourceImage = itemIdToItemMap.editValueAt(itemIndex);
+
+ // mark the source image of the derivation as hidden
+ sourceImage.hidden = true;
+ }
+ break;
+ }
+ case FOURCC('t', 'h', 'm', 'b'): {
+ ssize_t itemIndex = itemIdToItemMap.indexOfKey(mItemId);
+
+ // ignore non-image items
+ if (itemIndex < 0) {
+ return;
+ }
+
+ // mark thumbnail image as hidden, these can be retrieved if the client
+ // request thumbnail explicitly, but won't be exposed as displayables.
+ ImageItem &thumbImage = itemIdToItemMap.editValueAt(itemIndex);
+ thumbImage.hidden = true;
+
+ for (size_t i = 0; i < mRefs.size(); i++) {
+ itemIndex = itemIdToItemMap.indexOfKey(mRefs[i]);
+
+ // ignore non-image items
+ if (itemIndex < 0) {
+ continue;
+ }
+ ALOGV("Image item id %d uses thumbnail item id %d", mRefs[i], mItemId);
+ ImageItem &masterImage = itemIdToItemMap.editValueAt(itemIndex);
+ if (!masterImage.thumbnails.empty()) {
+ ALOGW("already has thumbnails!");
+ }
+ masterImage.thumbnails.push_back(mItemId);
+ }
+ break;
+ }
+ case FOURCC('c', 'd', 's', 'c'): {
+ ssize_t itemIndex = itemIdToExifMap.indexOfKey(mItemId);
+
+ // ignore non-exif block items
+ if (itemIndex < 0) {
+ return;
+ }
+
+ for (size_t i = 0; i < mRefs.size(); i++) {
+ itemIndex = itemIdToItemMap.indexOfKey(mRefs[i]);
+
+ // ignore non-image items
+ if (itemIndex < 0) {
+ continue;
+ }
+ ALOGV("Image item id %d uses metadata item id %d", mRefs[i], mItemId);
+ ImageItem &image = itemIdToItemMap.editValueAt(itemIndex);
+ image.cdscRefs.push_back(mItemId);
+ }
+ break;
+ }
+ case FOURCC('a', 'u', 'x', 'l'): {
+ ssize_t itemIndex = itemIdToItemMap.indexOfKey(mItemId);
+
+ // ignore non-image items
+ if (itemIndex < 0) {
+ return;
+ }
+
+ // mark auxiliary image as hidden
+ ImageItem &auxImage = itemIdToItemMap.editValueAt(itemIndex);
+ auxImage.hidden = true;
+ break;
+ }
+ default:
+ ALOGW("ignoring unsupported ref type 0x%x", type());
+ }
+}
+
+status_t ItemReference::parse(off64_t offset, size_t size) {
+ if (size < mRefIdSize + 2) {
+ return ERROR_MALFORMED;
+ }
+ if (!source()->getUInt32Var(offset, &mItemId, mRefIdSize)) {
+ return ERROR_IO;
+ }
+ offset += mRefIdSize;
+
+ uint16_t count;
+ if (!source()->getUInt16(offset, &count)) {
+ return ERROR_IO;
+ }
+ offset += 2;
+ size -= (mRefIdSize + 2);
+
+ if (size < count * mRefIdSize) {
+ return ERROR_MALFORMED;
+ }
+
+ for (size_t i = 0; i < count; i++) {
+ uint32_t refItemId;
+ if (!source()->getUInt32Var(offset, &refItemId, mRefIdSize)) {
+ return ERROR_IO;
+ }
+ offset += mRefIdSize;
+ mRefs.push_back(refItemId);
+ ALOGV("item id %d: referencing item id %d", mItemId, refItemId);
+ }
+
+ return OK;
+}
+
+struct IrefBox : public FullBox {
+ IrefBox(DataSourceBase *source, Vector<sp<ItemReference> > *itemRefs) :
+ FullBox(source, FOURCC('i', 'r', 'e', 'f')), mRefIdSize(0), mItemRefs(itemRefs) {}
+
+ status_t parse(off64_t offset, size_t size);
+
+protected:
+ status_t onChunkData(uint32_t type, off64_t offset, size_t size) override;
+
+private:
+ uint32_t mRefIdSize;
+ Vector<sp<ItemReference> > *mItemRefs;
+};
+
+status_t IrefBox::parse(off64_t offset, size_t size) {
+ ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
+ status_t err = parseFullBoxHeader(&offset, &size);
+ if (err != OK) {
+ return err;
+ }
+
+ mRefIdSize = (version() == 0) ? 2 : 4;
+ return parseChunks(offset, size);
+}
+
+status_t IrefBox::onChunkData(uint32_t type, off64_t offset, size_t size) {
+ sp<ItemReference> itemRef = new ItemReference(source(), type, mRefIdSize);
+
+ status_t err = itemRef->parse(offset, size);
+ if (err != OK) {
+ return err;
+ }
+ mItemRefs->push_back(itemRef);
+ return OK;
+}
+
+/////////////////////////////////////////////////////////////////////
+//
+// ItemProperty related boxes
+//
+
+struct AssociationEntry {
+ uint32_t itemId;
+ bool essential;
+ uint16_t index;
+};
+
+struct ItemProperty : public RefBase {
+ ItemProperty() {}
+
+ virtual void attachTo(ImageItem &/*image*/) const {
+ ALOGW("Unrecognized property");
+ }
+ virtual status_t parse(off64_t /*offset*/, size_t /*size*/) {
+ ALOGW("Unrecognized property");
+ return OK;
+ }
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(ItemProperty);
+};
+
+struct IspeBox : public FullBox, public ItemProperty {
+ IspeBox(DataSourceBase *source) :
+ FullBox(source, FOURCC('i', 's', 'p', 'e')), mWidth(0), mHeight(0) {}
+
+ status_t parse(off64_t offset, size_t size) override;
+
+ void attachTo(ImageItem &image) const override {
+ image.width = mWidth;
+ image.height = mHeight;
+ }
+
+private:
+ uint32_t mWidth;
+ uint32_t mHeight;
+};
+
+status_t IspeBox::parse(off64_t offset, size_t size) {
+ ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
+
+ status_t err = parseFullBoxHeader(&offset, &size);
+ if (err != OK) {
+ return err;
+ }
+
+ if (size < 8) {
+ return ERROR_MALFORMED;
+ }
+ if (!source()->getUInt32(offset, &mWidth)
+ || !source()->getUInt32(offset + 4, &mHeight)) {
+ return ERROR_IO;
+ }
+ ALOGV("property ispe: %dx%d", mWidth, mHeight);
+
+ return OK;
+}
+
+struct HvccBox : public Box, public ItemProperty {
+ HvccBox(DataSourceBase *source) :
+ Box(source, FOURCC('h', 'v', 'c', 'C')) {}
+
+ status_t parse(off64_t offset, size_t size) override;
+
+ void attachTo(ImageItem &image) const override {
+ image.hvcc = mHVCC;
+ }
+
+private:
+ sp<ABuffer> mHVCC;
+};
+
+status_t HvccBox::parse(off64_t offset, size_t size) {
+ ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
+
+ mHVCC = new ABuffer(size);
+
+ if (mHVCC->data() == NULL) {
+ ALOGE("b/28471206");
+ return NO_MEMORY;
+ }
+
+ if (source()->readAt(offset, mHVCC->data(), size) < (ssize_t)size) {
+ return ERROR_IO;
+ }
+
+ ALOGV("property hvcC");
+
+ return OK;
+}
+
+struct IrotBox : public Box, public ItemProperty {
+ IrotBox(DataSourceBase *source) :
+ Box(source, FOURCC('i', 'r', 'o', 't')), mAngle(0) {}
+
+ status_t parse(off64_t offset, size_t size) override;
+
+ void attachTo(ImageItem &image) const override {
+ image.rotation = mAngle * 90;
+ }
+
+private:
+ uint8_t mAngle;
+};
+
+status_t IrotBox::parse(off64_t offset, size_t size) {
+ ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
+
+ if (size < 1) {
+ return ERROR_MALFORMED;
+ }
+ if (source()->readAt(offset, &mAngle, 1) != 1) {
+ return ERROR_IO;
+ }
+ mAngle &= 0x3;
+ ALOGV("property irot: %d", mAngle);
+
+ return OK;
+}
+
+struct ColrBox : public Box, public ItemProperty {
+ ColrBox(DataSourceBase *source) :
+ Box(source, FOURCC('c', 'o', 'l', 'r')) {}
+
+ status_t parse(off64_t offset, size_t size) override;
+
+ void attachTo(ImageItem &image) const override {
+ image.icc = mICCData;
+ }
+
+private:
+ sp<ABuffer> mICCData;
+};
+
+status_t ColrBox::parse(off64_t offset, size_t size) {
+ ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
+
+ if (size < 4) {
+ return ERROR_MALFORMED;
+ }
+ uint32_t colour_type;
+ if (!source()->getUInt32(offset, &colour_type)) {
+ return ERROR_IO;
+ }
+ offset += 4;
+ size -= 4;
+ if (colour_type == FOURCC('n', 'c', 'l', 'x')) {
+ return OK;
+ }
+ if ((colour_type != FOURCC('r', 'I', 'C', 'C')) &&
+ (colour_type != FOURCC('p', 'r', 'o', 'f'))) {
+ return ERROR_MALFORMED;
+ }
+
+ mICCData = new ABuffer(size);
+ if (mICCData->data() == NULL) {
+ ALOGE("b/28471206");
+ return NO_MEMORY;
+ }
+
+ if (source()->readAt(offset, mICCData->data(), size) != (ssize_t)size) {
+ return ERROR_IO;
+ }
+
+ ALOGV("property Colr: size %zd", size);
+ return OK;
+}
+
+struct IpmaBox : public FullBox {
+ IpmaBox(DataSourceBase *source, Vector<AssociationEntry> *associations) :
+ FullBox(source, FOURCC('i', 'p', 'm', 'a')), mAssociations(associations) {}
+
+ status_t parse(off64_t offset, size_t size);
+private:
+ Vector<AssociationEntry> *mAssociations;
+};
+
+status_t IpmaBox::parse(off64_t offset, size_t size) {
+ status_t err = parseFullBoxHeader(&offset, &size);
+ if (err != OK) {
+ return err;
+ }
+
+ if (size < 4) {
+ return ERROR_MALFORMED;
+ }
+ uint32_t entryCount;
+ if (!source()->getUInt32(offset, &entryCount)) {
+ return ERROR_IO;
+ }
+ offset += 4;
+ size -= 4;
+
+ for (size_t k = 0; k < entryCount; ++k) {
+ uint32_t itemId = 0;
+ size_t itemIdSize = (version() < 1) ? 2 : 4;
+
+ if (size < itemIdSize + 1) {
+ return ERROR_MALFORMED;
+ }
+
+ if (!source()->getUInt32Var(offset, &itemId, itemIdSize)) {
+ return ERROR_IO;
+ }
+ offset += itemIdSize;
+ size -= itemIdSize;
+
+ uint8_t associationCount;
+ if (!source()->readAt(offset, &associationCount, 1)) {
+ return ERROR_IO;
+ }
+ offset++;
+ size--;
+
+ for (size_t i = 0; i < associationCount; ++i) {
+ size_t propIndexSize = (flags() & 1) ? 2 : 1;
+ if (size < propIndexSize) {
+ return ERROR_MALFORMED;
+ }
+ uint16_t propIndex;
+ if (!source()->getUInt16Var(offset, &propIndex, propIndexSize)) {
+ return ERROR_IO;
+ }
+ offset += propIndexSize;
+ size -= propIndexSize;
+ uint16_t bitmask = (1 << (8 * propIndexSize - 1));
+ AssociationEntry entry = {
+ .itemId = itemId,
+ .essential = !!(propIndex & bitmask),
+ .index = (uint16_t) (propIndex & ~bitmask)
+ };
+
+ ALOGV("item id %d associated to property %d (essential %d)",
+ itemId, entry.index, entry.essential);
+
+ mAssociations->push_back(entry);
+ }
+ }
+
+ return OK;
+}
+
+struct IpcoBox : public Box {
+ IpcoBox(DataSourceBase *source, Vector<sp<ItemProperty> > *properties) :
+ Box(source, FOURCC('i', 'p', 'c', 'o')), mItemProperties(properties) {}
+
+ status_t parse(off64_t offset, size_t size);
+protected:
+ status_t onChunkData(uint32_t type, off64_t offset, size_t size) override;
+
+private:
+ Vector<sp<ItemProperty> > *mItemProperties;
+};
+
+status_t IpcoBox::parse(off64_t offset, size_t size) {
+ ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
+ // push dummy as the index is 1-based
+ mItemProperties->push_back(new ItemProperty());
+ return parseChunks(offset, size);
+}
+
+status_t IpcoBox::onChunkData(uint32_t type, off64_t offset, size_t size) {
+ sp<ItemProperty> itemProperty;
+ switch(type) {
+ case FOURCC('h', 'v', 'c', 'C'):
+ {
+ itemProperty = new HvccBox(source());
+ break;
+ }
+ case FOURCC('i', 's', 'p', 'e'):
+ {
+ itemProperty = new IspeBox(source());
+ break;
+ }
+ case FOURCC('i', 'r', 'o', 't'):
+ {
+ itemProperty = new IrotBox(source());
+ break;
+ }
+ case FOURCC('c', 'o', 'l', 'r'):
+ {
+ itemProperty = new ColrBox(source());
+ break;
+ }
+ default:
+ {
+ // push dummy to maintain correct item property index
+ itemProperty = new ItemProperty();
+ break;
+ }
+ }
+ status_t err = itemProperty->parse(offset, size);
+ if (err != OK) {
+ return err;
+ }
+ mItemProperties->push_back(itemProperty);
+ return OK;
+}
+
+struct IprpBox : public Box {
+ IprpBox(DataSourceBase *source,
+ Vector<sp<ItemProperty> > *properties,
+ Vector<AssociationEntry> *associations) :
+ Box(source, FOURCC('i', 'p', 'r', 'p')),
+ mProperties(properties), mAssociations(associations) {}
+
+ status_t parse(off64_t offset, size_t size);
+protected:
+ status_t onChunkData(uint32_t type, off64_t offset, size_t size) override;
+
+private:
+ Vector<sp<ItemProperty> > *mProperties;
+ Vector<AssociationEntry> *mAssociations;
+};
+
+status_t IprpBox::parse(off64_t offset, size_t size) {
+ ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
+
+ status_t err = parseChunks(offset, size);
+ if (err != OK) {
+ return err;
+ }
+ return OK;
+}
+
+status_t IprpBox::onChunkData(uint32_t type, off64_t offset, size_t size) {
+ switch(type) {
+ case FOURCC('i', 'p', 'c', 'o'):
+ {
+ IpcoBox ipcoBox(source(), mProperties);
+ return ipcoBox.parse(offset, size);
+ }
+ case FOURCC('i', 'p', 'm', 'a'):
+ {
+ IpmaBox ipmaBox(source(), mAssociations);
+ return ipmaBox.parse(offset, size);
+ }
+ default:
+ {
+ ALOGW("Unrecognized box.");
+ break;
+ }
+ }
+ return OK;
+}
+
+/////////////////////////////////////////////////////////////////////
+//
+// ItemInfo related boxes
+//
+struct ItemInfo {
+ uint32_t itemId;
+ uint32_t itemType;
+ bool hidden;
+};
+
+struct InfeBox : public FullBox {
+ InfeBox(DataSourceBase *source) :
+ FullBox(source, FOURCC('i', 'n', 'f', 'e')) {}
+
+ status_t parse(off64_t offset, size_t size, ItemInfo *itemInfo);
+
+private:
+ bool parseNullTerminatedString(off64_t *offset, size_t *size, String8 *out);
+};
+
+bool InfeBox::parseNullTerminatedString(
+ off64_t *offset, size_t *size, String8 *out) {
+ char tmp;
+ Vector<char> buf;
+ buf.setCapacity(256);
+ off64_t newOffset = *offset;
+ off64_t stopOffset = *offset + *size;
+ while (newOffset < stopOffset) {
+ if (!source()->readAt(newOffset++, &tmp, 1)) {
+ return false;
+ }
+ buf.push_back(tmp);
+ if (tmp == 0) {
+ out->setTo(buf.array());
+
+ *offset = newOffset;
+ *size = stopOffset - newOffset;
+
+ return true;
+ }
+ }
+ return false;
+}
+
+status_t InfeBox::parse(off64_t offset, size_t size, ItemInfo *itemInfo) {
+ status_t err = parseFullBoxHeader(&offset, &size);
+ if (err != OK) {
+ return err;
+ }
+
+ if (version() == 0 || version() == 1) {
+ return ERROR_UNSUPPORTED;
+ } else { // version >= 2
+ uint32_t item_id;
+ size_t itemIdSize = (version() == 2) ? 2 : 4;
+ if (size < itemIdSize + 6) {
+ return ERROR_MALFORMED;
+ }
+ if (!source()->getUInt32Var(offset, &item_id, itemIdSize)) {
+ return ERROR_IO;
+ }
+ ALOGV("item_id %d", item_id);
+ offset += itemIdSize;
+ uint16_t item_protection_index;
+ if (!source()->getUInt16(offset, &item_protection_index)) {
+ return ERROR_IO;
+ }
+ ALOGV("item_protection_index %d", item_protection_index);
+ offset += 2;
+ uint32_t item_type;
+ if (!source()->getUInt32(offset, &item_type)) {
+ return ERROR_IO;
+ }
+
+ itemInfo->itemId = item_id;
+ itemInfo->itemType = item_type;
+ // According to HEIF spec, (flags & 1) indicates the image is hidden
+ // and not supposed to be displayed.
+ itemInfo->hidden = (flags() & 1);
+
+ char itemTypeString[5];
+ MakeFourCCString(item_type, itemTypeString);
+ ALOGV("item_type %s", itemTypeString);
+ offset += 4;
+ size -= itemIdSize + 6;
+
+ String8 item_name;
+ if (!parseNullTerminatedString(&offset, &size, &item_name)) {
+ return ERROR_MALFORMED;
+ }
+ ALOGV("item_name %s", item_name.c_str());
+
+ if (item_type == FOURCC('m', 'i', 'm', 'e')) {
+ String8 content_type;
+ if (!parseNullTerminatedString(&offset, &size, &content_type)) {
+ return ERROR_MALFORMED;
+ }
+
+ // content_encoding is optional; can be omitted if would be empty
+ if (size > 0) {
+ String8 content_encoding;
+ if (!parseNullTerminatedString(&offset, &size, &content_encoding)) {
+ return ERROR_MALFORMED;
+ }
+ }
+ } else if (item_type == FOURCC('u', 'r', 'i', ' ')) {
+ String8 item_uri_type;
+ if (!parseNullTerminatedString(&offset, &size, &item_uri_type)) {
+ return ERROR_MALFORMED;
+ }
+ }
+ }
+ return OK;
+}
+
+struct IinfBox : public FullBox {
+ IinfBox(DataSourceBase *source, Vector<ItemInfo> *itemInfos) :
+ FullBox(source, FOURCC('i', 'i', 'n', 'f')),
+ mItemInfos(itemInfos), mHasGrids(false) {}
+
+ status_t parse(off64_t offset, size_t size);
+
+ bool hasGrids() { return mHasGrids; }
+
+protected:
+ status_t onChunkData(uint32_t type, off64_t offset, size_t size) override;
+
+private:
+ Vector<ItemInfo> *mItemInfos;
+ bool mHasGrids;
+};
+
+status_t IinfBox::parse(off64_t offset, size_t size) {
+ ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
+
+ status_t err = parseFullBoxHeader(&offset, &size);
+ if (err != OK) {
+ return err;
+ }
+
+ size_t entryCountSize = version() == 0 ? 2 : 4;
+ if (size < entryCountSize) {
+ return ERROR_MALFORMED;
+ }
+ uint32_t entry_count;
+ if (!source()->getUInt32Var(offset, &entry_count, entryCountSize)) {
+ return ERROR_IO;
+ }
+ ALOGV("entry_count %d", entry_count);
+
+ off64_t stopOffset = offset + size;
+ offset += entryCountSize;
+ for (size_t i = 0; i < entry_count && offset < stopOffset; i++) {
+ ALOGV("entry %zu", i);
+ status_t err = parseChunk(&offset);
+ if (err != OK) {
+ return err;
+ }
+ }
+ if (offset != stopOffset) {
+ return ERROR_MALFORMED;
+ }
+
+ return OK;
+}
+
+status_t IinfBox::onChunkData(uint32_t type, off64_t offset, size_t size) {
+ if (type != FOURCC('i', 'n', 'f', 'e')) {
+ return OK;
+ }
+
+ InfeBox infeBox(source());
+ ItemInfo itemInfo;
+ status_t err = infeBox.parse(offset, size, &itemInfo);
+ if (err == OK) {
+ mItemInfos->push_back(itemInfo);
+ mHasGrids |= (itemInfo.itemType == FOURCC('g', 'r', 'i', 'd'));
+ }
+ // InfeBox parse returns ERROR_UNSUPPORTED if the box if an unsupported
+ // version. Ignore this error as it's not fatal.
+ return (err == ERROR_UNSUPPORTED) ? OK : err;
+}
+
+//////////////////////////////////////////////////////////////////
+
+ItemTable::ItemTable(DataSourceBase *source)
+ : mDataSource(source),
+ mPrimaryItemId(0),
+ mIdatOffset(0),
+ mIdatSize(0),
+ mImageItemsValid(false),
+ mCurrentItemIndex(0) {
+ mRequiredBoxes.insert('iprp');
+ mRequiredBoxes.insert('iloc');
+ mRequiredBoxes.insert('pitm');
+ mRequiredBoxes.insert('iinf');
+}
+
+ItemTable::~ItemTable() {}
+
+status_t ItemTable::parse(uint32_t type, off64_t data_offset, size_t chunk_data_size) {
+ switch(type) {
+ case FOURCC('i', 'l', 'o', 'c'):
+ {
+ return parseIlocBox(data_offset, chunk_data_size);
+ }
+ case FOURCC('i', 'i', 'n', 'f'):
+ {
+ return parseIinfBox(data_offset, chunk_data_size);
+ }
+ case FOURCC('i', 'p', 'r', 'p'):
+ {
+ return parseIprpBox(data_offset, chunk_data_size);
+ }
+ case FOURCC('p', 'i', 't', 'm'):
+ {
+ return parsePitmBox(data_offset, chunk_data_size);
+ }
+ case FOURCC('i', 'd', 'a', 't'):
+ {
+ return parseIdatBox(data_offset, chunk_data_size);
+ }
+ case FOURCC('i', 'r', 'e', 'f'):
+ {
+ return parseIrefBox(data_offset, chunk_data_size);
+ }
+ case FOURCC('i', 'p', 'r', 'o'):
+ {
+ ALOGW("ipro box not supported!");
+ break;
+ }
+ default:
+ {
+ ALOGW("unrecognized box type: 0x%x", type);
+ break;
+ }
+ }
+ return ERROR_UNSUPPORTED;
+}
+
+status_t ItemTable::parseIlocBox(off64_t offset, size_t size) {
+ ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
+
+ IlocBox ilocBox(mDataSource, &mItemLocs);
+ status_t err = ilocBox.parse(offset, size);
+ if (err != OK) {
+ return err;
+ }
+
+ if (ilocBox.hasConstructMethod1()) {
+ mRequiredBoxes.insert('idat');
+ }
+
+ return buildImageItemsIfPossible('iloc');
+}
+
+status_t ItemTable::parseIinfBox(off64_t offset, size_t size) {
+ ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
+
+ IinfBox iinfBox(mDataSource, &mItemInfos);
+ status_t err = iinfBox.parse(offset, size);
+ if (err != OK) {
+ return err;
+ }
+
+ if (iinfBox.hasGrids()) {
+ mRequiredBoxes.insert('iref');
+ }
+
+ return buildImageItemsIfPossible('iinf');
+}
+
+status_t ItemTable::parsePitmBox(off64_t offset, size_t size) {
+ ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
+
+ PitmBox pitmBox(mDataSource);
+ status_t err = pitmBox.parse(offset, size, &mPrimaryItemId);
+ if (err != OK) {
+ return err;
+ }
+
+ return buildImageItemsIfPossible('pitm');
+}
+
+status_t ItemTable::parseIprpBox(off64_t offset, size_t size) {
+ ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
+
+ IprpBox iprpBox(mDataSource, &mItemProperties, &mAssociations);
+ status_t err = iprpBox.parse(offset, size);
+ if (err != OK) {
+ return err;
+ }
+
+ return buildImageItemsIfPossible('iprp');
+}
+
+status_t ItemTable::parseIdatBox(off64_t offset, size_t size) {
+ ALOGV("%s: idat offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
+
+ // only remember the offset and size of idat box for later use
+ mIdatOffset = offset;
+ mIdatSize = size;
+
+ return buildImageItemsIfPossible('idat');
+}
+
+status_t ItemTable::parseIrefBox(off64_t offset, size_t size) {
+ ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
+
+ IrefBox irefBox(mDataSource, &mItemReferences);
+ status_t err = irefBox.parse(offset, size);
+ if (err != OK) {
+ return err;
+ }
+
+ return buildImageItemsIfPossible('iref');
+}
+
+status_t ItemTable::buildImageItemsIfPossible(uint32_t type) {
+ if (mImageItemsValid) {
+ return OK;
+ }
+
+ mBoxesSeen.insert(type);
+
+ // need at least 'iprp', 'iloc', 'pitm', 'iinf';
+ // need 'idat' if any items used construction_method of 2;
+ // need 'iref' if there are grids.
+ if (!std::includes(
+ mBoxesSeen.begin(), mBoxesSeen.end(),
+ mRequiredBoxes.begin(), mRequiredBoxes.end())) {
+ return OK;
+ }
+
+ ALOGV("building image table...");
+
+ for (size_t i = 0; i < mItemInfos.size(); i++) {
+ const ItemInfo &info = mItemInfos[i];
+
+ // Only handle 3 types of items, all others are ignored:
+ // 'grid': derived image from tiles
+ // 'hvc1': coded image (or tile)
+ // 'Exif': EXIF metadata
+ if (info.itemType != FOURCC('g', 'r', 'i', 'd') &&
+ info.itemType != FOURCC('h', 'v', 'c', '1') &&
+ info.itemType != FOURCC('E', 'x', 'i', 'f')) {
+ continue;
+ }
+
+ ssize_t itemIndex = mItemIdToItemMap.indexOfKey(info.itemId);
+ if (itemIndex >= 0) {
+ ALOGW("ignoring duplicate image item id %d", info.itemId);
+ continue;
+ }
+
+ ssize_t ilocIndex = mItemLocs.indexOfKey(info.itemId);
+ if (ilocIndex < 0) {
+ ALOGE("iloc missing for image item id %d", info.itemId);
+ continue;
+ }
+ const ItemLoc &iloc = mItemLocs[ilocIndex];
+
+ off64_t offset;
+ size_t size;
+ if (iloc.getLoc(&offset, &size, mIdatOffset, mIdatSize) != OK) {
+ return ERROR_MALFORMED;
+ }
+
+ if (info.itemType == FOURCC('E', 'x', 'i', 'f')) {
+ // Only add if the Exif data is non-empty. The first 4 bytes contain
+ // the offset to TIFF header, which the Exif parser doesn't use.
+ if (size > 4) {
+ ExifItem exifItem = {
+ .offset = offset,
+ .size = size,
+ };
+ mItemIdToExifMap.add(info.itemId, exifItem);
+ }
+ continue;
+ }
+
+ ImageItem image(info.itemType, info.itemId, info.hidden);
+
+ ALOGV("adding %s: itemId %d", image.isGrid() ? "grid" : "image", info.itemId);
+
+ if (image.isGrid()) {
+ // ImageGrid struct is at least 8-byte, at most 12-byte (if flags&1)
+ if (size < 8 || size > 12) {
+ return ERROR_MALFORMED;
+ }
+ uint8_t buf[12];
+ if (!mDataSource->readAt(offset, buf, size)) {
+ return ERROR_IO;
+ }
+
+ image.rows = buf[2] + 1;
+ image.columns = buf[3] + 1;
+
+ ALOGV("rows %d, columans %d", image.rows, image.columns);
+ } else {
+ image.offset = offset;
+ image.size = size;
+ }
+ mItemIdToItemMap.add(info.itemId, image);
+ }
+
+ for (size_t i = 0; i < mAssociations.size(); i++) {
+ attachProperty(mAssociations[i]);
+ }
+
+ for (size_t i = 0; i < mItemReferences.size(); i++) {
+ mItemReferences[i]->apply(mItemIdToItemMap, mItemIdToExifMap);
+ }
+
+ bool foundPrimary = false;
+ for (size_t i = 0; i < mItemIdToItemMap.size(); i++) {
+ // add all non-hidden images, also add the primary even if it's marked
+ // hidden, in case the primary is set to a thumbnail
+ bool isPrimary = (mItemIdToItemMap[i].itemId == mPrimaryItemId);
+ if (!mItemIdToItemMap[i].hidden || isPrimary) {
+ mDisplayables.push_back(i);
+ }
+ foundPrimary |= isPrimary;
+ }
+
+ ALOGV("found %zu displayables", mDisplayables.size());
+
+ // fail if no displayables are found
+ if (mDisplayables.empty()) {
+ return ERROR_MALFORMED;
+ }
+
+ // if the primary item id is invalid, set primary to the first displayable
+ if (!foundPrimary) {
+ mPrimaryItemId = mItemIdToItemMap[mDisplayables[0]].itemId;
+ }
+
+ mImageItemsValid = true;
+ return OK;
+}
+
+void ItemTable::attachProperty(const AssociationEntry &association) {
+ ssize_t itemIndex = mItemIdToItemMap.indexOfKey(association.itemId);
+
+ // ignore non-image items
+ if (itemIndex < 0) {
+ return;
+ }
+
+ uint16_t propertyIndex = association.index;
+ if (propertyIndex >= mItemProperties.size()) {
+ ALOGW("Ignoring invalid property index %d", propertyIndex);
+ return;
+ }
+
+ ALOGV("attach property %d to item id %d)",
+ propertyIndex, association.itemId);
+
+ mItemProperties[propertyIndex]->attachTo(mItemIdToItemMap.editValueAt(itemIndex));
+}
+
+uint32_t ItemTable::countImages() const {
+ return mImageItemsValid ? mDisplayables.size() : 0;
+}
+
+sp<MetaData> ItemTable::getImageMeta(const uint32_t imageIndex) {
+ if (!mImageItemsValid) {
+ return NULL;
+ }
+
+ if (imageIndex >= mDisplayables.size()) {
+ ALOGE("%s: invalid image index %u", __FUNCTION__, imageIndex);
+ return NULL;
+ }
+ const uint32_t itemIndex = mDisplayables[imageIndex];
+ ALOGV("image[%u]: item index %u", imageIndex, itemIndex);
+
+ const ImageItem *image = &mItemIdToItemMap[itemIndex];
+
+ ssize_t tileItemIndex = -1;
+ if (image->isGrid()) {
+ if (image->dimgRefs.empty()) {
+ return NULL;
+ }
+ tileItemIndex = mItemIdToItemMap.indexOfKey(image->dimgRefs[0]);
+ if (tileItemIndex < 0) {
+ return NULL;
+ }
+ }
+
+ sp<MetaData> meta = new MetaData;
+ meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
+
+ if (image->itemId == mPrimaryItemId) {
+ meta->setInt32(kKeyTrackIsDefault, 1);
+ }
+
+ ALOGV("image[%u]: size %dx%d", imageIndex, image->width, image->height);
+
+ meta->setInt32(kKeyWidth, image->width);
+ meta->setInt32(kKeyHeight, image->height);
+ if (image->rotation != 0) {
+ // Rotation angle in HEIF is CCW, convert to CW here to be
+ // consistent with the other media formats.
+ switch(image->rotation) {
+ case 90: meta->setInt32(kKeyRotation, 270); break;
+ case 180: meta->setInt32(kKeyRotation, 180); break;
+ case 270: meta->setInt32(kKeyRotation, 90); break;
+ default: break; // don't set if invalid
+ }
+ }
+ meta->setInt32(kKeyMaxInputSize, image->width * image->height * 1.5);
+
+ if (!image->thumbnails.empty()) {
+ ssize_t thumbItemIndex = mItemIdToItemMap.indexOfKey(image->thumbnails[0]);
+ if (thumbItemIndex >= 0) {
+ const ImageItem &thumbnail = mItemIdToItemMap[thumbItemIndex];
+
+ meta->setInt32(kKeyThumbnailWidth, thumbnail.width);
+ meta->setInt32(kKeyThumbnailHeight, thumbnail.height);
+ meta->setData(kKeyThumbnailHVCC, kTypeHVCC,
+ thumbnail.hvcc->data(), thumbnail.hvcc->size());
+ ALOGV("image[%u]: thumbnail: size %dx%d, item index %zd",
+ imageIndex, thumbnail.width, thumbnail.height, thumbItemIndex);
+ } else {
+ ALOGW("%s: Referenced thumbnail does not exist!", __FUNCTION__);
+ }
+ }
+
+ if (image->isGrid()) {
+ meta->setInt32(kKeyGridRows, image->rows);
+ meta->setInt32(kKeyGridCols, image->columns);
+
+ // point image to the first tile for grid size and HVCC
+ image = &mItemIdToItemMap.editValueAt(tileItemIndex);
+ meta->setInt32(kKeyTileWidth, image->width);
+ meta->setInt32(kKeyTileHeight, image->height);
+ meta->setInt32(kKeyMaxInputSize, image->width * image->height * 1.5);
+ }
+
+ if (image->hvcc == NULL) {
+ ALOGE("%s: hvcc is missing for image[%u]!", __FUNCTION__, imageIndex);
+ return NULL;
+ }
+ meta->setData(kKeyHVCC, kTypeHVCC, image->hvcc->data(), image->hvcc->size());
+
+ if (image->icc != NULL) {
+ meta->setData(kKeyIccProfile, 0, image->icc->data(), image->icc->size());
+ }
+ return meta;
+}
+
+status_t ItemTable::findImageItem(const uint32_t imageIndex, uint32_t *itemIndex) {
+ if (!mImageItemsValid) {
+ return INVALID_OPERATION;
+ }
+
+ if (imageIndex >= mDisplayables.size()) {
+ ALOGE("%s: invalid image index %d", __FUNCTION__, imageIndex);
+ return BAD_VALUE;
+ }
+
+ *itemIndex = mDisplayables[imageIndex];
+
+ ALOGV("image[%u]: item index %u", imageIndex, *itemIndex);
+ return OK;
+}
+
+status_t ItemTable::findThumbnailItem(const uint32_t imageIndex, uint32_t *itemIndex) {
+ if (!mImageItemsValid) {
+ return INVALID_OPERATION;
+ }
+
+ if (imageIndex >= mDisplayables.size()) {
+ ALOGE("%s: invalid image index %d", __FUNCTION__, imageIndex);
+ return BAD_VALUE;
+ }
+
+ uint32_t masterItemIndex = mDisplayables[imageIndex];
+
+ const ImageItem &masterImage = mItemIdToItemMap[masterItemIndex];
+ if (masterImage.thumbnails.empty()) {
+ *itemIndex = masterItemIndex;
+ return OK;
+ }
+
+ ssize_t thumbItemIndex = mItemIdToItemMap.indexOfKey(masterImage.thumbnails[0]);
+ if (thumbItemIndex < 0) {
+ // Do not return the master image in this case, fail it so that the
+ // thumbnail extraction code knows we really don't have it.
+ return INVALID_OPERATION;
+ }
+
+ *itemIndex = thumbItemIndex;
+ return OK;
+}
+
+status_t ItemTable::getImageOffsetAndSize(
+ uint32_t *itemIndex, off64_t *offset, size_t *size) {
+ if (!mImageItemsValid) {
+ return INVALID_OPERATION;
+ }
+
+ if (itemIndex != NULL) {
+ if (*itemIndex >= mItemIdToItemMap.size()) {
+ ALOGE("%s: Bad item index!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ mCurrentItemIndex = *itemIndex;
+ }
+
+ ImageItem &image = mItemIdToItemMap.editValueAt(mCurrentItemIndex);
+ if (image.isGrid()) {
+ uint32_t tileItemId;
+ status_t err = image.getNextTileItemId(&tileItemId, itemIndex != NULL);
+ if (err != OK) {
+ return err;
+ }
+ ssize_t tileItemIndex = mItemIdToItemMap.indexOfKey(tileItemId);
+ if (tileItemIndex < 0) {
+ return ERROR_END_OF_STREAM;
+ }
+ *offset = mItemIdToItemMap[tileItemIndex].offset;
+ *size = mItemIdToItemMap[tileItemIndex].size;
+ } else {
+ if (itemIndex == NULL) {
+ // For single images, we only allow it to be read once, after that
+ // it's EOS. New item index must be requested each time.
+ return ERROR_END_OF_STREAM;
+ }
+ *offset = mItemIdToItemMap[mCurrentItemIndex].offset;
+ *size = mItemIdToItemMap[mCurrentItemIndex].size;
+ }
+
+ return OK;
+}
+
+status_t ItemTable::getExifOffsetAndSize(off64_t *offset, size_t *size) {
+ if (!mImageItemsValid) {
+ return INVALID_OPERATION;
+ }
+
+ ssize_t itemIndex = mItemIdToItemMap.indexOfKey(mPrimaryItemId);
+
+ // this should not happen, something's seriously wrong.
+ if (itemIndex < 0) {
+ return INVALID_OPERATION;
+ }
+
+ const ImageItem &image = mItemIdToItemMap[itemIndex];
+ if (image.cdscRefs.size() == 0) {
+ return NAME_NOT_FOUND;
+ }
+
+ ssize_t exifIndex = mItemIdToExifMap.indexOfKey(image.cdscRefs[0]);
+ if (exifIndex < 0) {
+ return NAME_NOT_FOUND;
+ }
+
+ // skip the first 4-byte of the offset to TIFF header
+ *offset = mItemIdToExifMap[exifIndex].offset + 4;
+ *size = mItemIdToExifMap[exifIndex].size - 4;
+ return OK;
+}
+
+} // namespace heif
+
+} // namespace android
diff --git a/media/extractors/mp4/ItemTable.h b/media/extractors/mp4/ItemTable.h
new file mode 100644
index 0000000..536dcb0
--- /dev/null
+++ b/media/extractors/mp4/ItemTable.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ITEM_TABLE_H_
+#define ITEM_TABLE_H_
+
+#include <set>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class DataSourceBase;
+class MetaData;
+
+namespace heif {
+
+struct AssociationEntry;
+struct ImageItem;
+struct ExifItem;
+struct ItemLoc;
+struct ItemInfo;
+struct ItemProperty;
+struct ItemReference;
+
+/*
+ * ItemTable keeps track of all image items (including coded images, grids and
+ * tiles) inside a HEIF still image (ISO/IEC FDIS 23008-12.2:2017(E)).
+ */
+
+class ItemTable : public RefBase {
+public:
+ explicit ItemTable(DataSourceBase *source);
+
+ status_t parse(uint32_t type, off64_t offset, size_t size);
+
+ bool isValid() { return mImageItemsValid; }
+ uint32_t countImages() const;
+ sp<MetaData> getImageMeta(const uint32_t imageIndex);
+ status_t findImageItem(const uint32_t imageIndex, uint32_t *itemIndex);
+ status_t findThumbnailItem(const uint32_t imageIndex, uint32_t *itemIndex);
+ status_t getImageOffsetAndSize(
+ uint32_t *itemIndex, off64_t *offset, size_t *size);
+ status_t getExifOffsetAndSize(off64_t *offset, size_t *size);
+
+protected:
+ ~ItemTable();
+
+private:
+ DataSourceBase *mDataSource;
+
+ KeyedVector<uint32_t, ItemLoc> mItemLocs;
+ Vector<ItemInfo> mItemInfos;
+ Vector<AssociationEntry> mAssociations;
+ Vector<sp<ItemProperty> > mItemProperties;
+ Vector<sp<ItemReference> > mItemReferences;
+
+ uint32_t mPrimaryItemId;
+ off64_t mIdatOffset;
+ size_t mIdatSize;
+
+ std::set<uint32_t> mRequiredBoxes;
+ std::set<uint32_t> mBoxesSeen;
+
+ bool mImageItemsValid;
+ uint32_t mCurrentItemIndex;
+ KeyedVector<uint32_t, ImageItem> mItemIdToItemMap;
+ KeyedVector<uint32_t, ExifItem> mItemIdToExifMap;
+ Vector<uint32_t> mDisplayables;
+
+ status_t parseIlocBox(off64_t offset, size_t size);
+ status_t parseIinfBox(off64_t offset, size_t size);
+ status_t parsePitmBox(off64_t offset, size_t size);
+ status_t parseIprpBox(off64_t offset, size_t size);
+ status_t parseIdatBox(off64_t offset, size_t size);
+ status_t parseIrefBox(off64_t offset, size_t size);
+
+ void attachProperty(const AssociationEntry &association);
+ status_t buildImageItemsIfPossible(uint32_t type);
+
+ DISALLOW_EVIL_CONSTRUCTORS(ItemTable);
+};
+
+} // namespace heif
+} // namespace android
+
+#endif // ITEM_TABLE_H_
diff --git a/media/libstagefright/matroska/MODULE_LICENSE_APACHE2 b/media/extractors/mp4/MODULE_LICENSE_APACHE2
similarity index 100%
copy from media/libstagefright/matroska/MODULE_LICENSE_APACHE2
copy to media/extractors/mp4/MODULE_LICENSE_APACHE2
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
new file mode 100644
index 0000000..7b3b81d
--- /dev/null
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -0,0 +1,5585 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MPEG4Extractor"
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <memory>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <utils/Log.h>
+
+#include "MPEG4Extractor.h"
+#include "SampleTable.h"
+#include "ItemTable.h"
+#include "include/ESDS.h"
+
+#include <media/ExtractorUtils.h>
+#include <media/MediaTrack.h>
+#include <media/stagefright/foundation/ABitReader.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/stagefright/foundation/ColorUtils.h>
+#include <media/stagefright/foundation/avc_utils.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaBufferBase.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <utils/String8.h>
+
+#include <byteswap.h>
+#include "include/ID3.h"
+
+#ifndef UINT32_MAX
+#define UINT32_MAX (4294967295U)
+#endif
+
+namespace android {
+
+enum {
+ // max track header chunk to return
+ kMaxTrackHeaderSize = 32,
+
+ // maximum size of an atom. Some atoms can be bigger according to the spec,
+ // but we only allow up to this size.
+ kMaxAtomSize = 64 * 1024 * 1024,
+};
+
+class MPEG4Source : public MediaTrack {
+public:
+ // Caller retains ownership of both "dataSource" and "sampleTable".
+ MPEG4Source(MetaDataBase &format,
+ DataSourceBase *dataSource,
+ int32_t timeScale,
+ const sp<SampleTable> &sampleTable,
+ Vector<SidxEntry> &sidx,
+ const Trex *trex,
+ off64_t firstMoofOffset,
+ const sp<ItemTable> &itemTable);
+ virtual status_t init();
+
+ virtual status_t start(MetaDataBase *params = NULL);
+ virtual status_t stop();
+
+ virtual status_t getFormat(MetaDataBase &);
+
+ virtual status_t read(MediaBufferBase **buffer, const ReadOptions *options = NULL);
+ virtual bool supportNonblockingRead() { return true; }
+ virtual status_t fragmentedRead(MediaBufferBase **buffer, const ReadOptions *options = NULL);
+
+ virtual ~MPEG4Source();
+
+private:
+ Mutex mLock;
+
+ MetaDataBase &mFormat;
+ DataSourceBase *mDataSource;
+ int32_t mTimescale;
+ sp<SampleTable> mSampleTable;
+ uint32_t mCurrentSampleIndex;
+ uint32_t mCurrentFragmentIndex;
+ Vector<SidxEntry> &mSegments;
+ const Trex *mTrex;
+ off64_t mFirstMoofOffset;
+ off64_t mCurrentMoofOffset;
+ off64_t mNextMoofOffset;
+ uint32_t mCurrentTime;
+ int32_t mLastParsedTrackId;
+ int32_t mTrackId;
+
+ int32_t mCryptoMode; // passed in from extractor
+ int32_t mDefaultIVSize; // passed in from extractor
+ uint8_t mCryptoKey[16]; // passed in from extractor
+ int32_t mDefaultEncryptedByteBlock;
+ int32_t mDefaultSkipByteBlock;
+ uint32_t mCurrentAuxInfoType;
+ uint32_t mCurrentAuxInfoTypeParameter;
+ int32_t mCurrentDefaultSampleInfoSize;
+ uint32_t mCurrentSampleInfoCount;
+ uint32_t mCurrentSampleInfoAllocSize;
+ uint8_t* mCurrentSampleInfoSizes;
+ uint32_t mCurrentSampleInfoOffsetCount;
+ uint32_t mCurrentSampleInfoOffsetsAllocSize;
+ uint64_t* mCurrentSampleInfoOffsets;
+
+ bool mIsAVC;
+ bool mIsHEVC;
+ size_t mNALLengthSize;
+
+ bool mStarted;
+
+ MediaBufferGroup *mGroup;
+
+ MediaBufferBase *mBuffer;
+
+ bool mWantsNALFragments;
+
+ uint8_t *mSrcBuffer;
+
+ bool mIsHeif;
+ sp<ItemTable> mItemTable;
+
+ size_t parseNALSize(const uint8_t *data) const;
+ status_t parseChunk(off64_t *offset);
+ status_t parseTrackFragmentHeader(off64_t offset, off64_t size);
+ status_t parseTrackFragmentRun(off64_t offset, off64_t size);
+ status_t parseSampleAuxiliaryInformationSizes(off64_t offset, off64_t size);
+ status_t parseSampleAuxiliaryInformationOffsets(off64_t offset, off64_t size);
+ status_t parseClearEncryptedSizes(off64_t offset, bool isSubsampleEncryption, uint32_t flags);
+ status_t parseSampleEncryption(off64_t offset);
+
+ struct TrackFragmentHeaderInfo {
+ enum Flags {
+ kBaseDataOffsetPresent = 0x01,
+ kSampleDescriptionIndexPresent = 0x02,
+ kDefaultSampleDurationPresent = 0x08,
+ kDefaultSampleSizePresent = 0x10,
+ kDefaultSampleFlagsPresent = 0x20,
+ kDurationIsEmpty = 0x10000,
+ };
+
+ uint32_t mTrackID;
+ uint32_t mFlags;
+ uint64_t mBaseDataOffset;
+ uint32_t mSampleDescriptionIndex;
+ uint32_t mDefaultSampleDuration;
+ uint32_t mDefaultSampleSize;
+ uint32_t mDefaultSampleFlags;
+
+ uint64_t mDataOffset;
+ };
+ TrackFragmentHeaderInfo mTrackFragmentHeaderInfo;
+
+ struct Sample {
+ off64_t offset;
+ size_t size;
+ uint32_t duration;
+ int32_t compositionOffset;
+ uint8_t iv[16];
+ Vector<size_t> clearsizes;
+ Vector<size_t> encryptedsizes;
+ };
+ Vector<Sample> mCurrentSamples;
+
+ MPEG4Source(const MPEG4Source &);
+ MPEG4Source &operator=(const MPEG4Source &);
+};
+
+// This custom data source wraps an existing one and satisfies requests
+// falling entirely within a cached range from the cache while forwarding
+// all remaining requests to the wrapped datasource.
+// This is used to cache the full sampletable metadata for a single track,
+// possibly wrapping multiple times to cover all tracks, i.e.
+// Each CachedRangedDataSource caches the sampletable metadata for a single track.
+
+struct CachedRangedDataSource : public DataSourceBase {
+ explicit CachedRangedDataSource(DataSourceBase *source);
+ virtual ~CachedRangedDataSource();
+
+ virtual status_t initCheck() const;
+ virtual ssize_t readAt(off64_t offset, void *data, size_t size);
+ virtual status_t getSize(off64_t *size);
+ virtual uint32_t flags();
+
+ status_t setCachedRange(off64_t offset, size_t size, bool assumeSourceOwnershipOnSuccess);
+
+
+private:
+ Mutex mLock;
+
+ DataSourceBase *mSource;
+ bool mOwnsDataSource;
+ off64_t mCachedOffset;
+ size_t mCachedSize;
+ uint8_t *mCache;
+
+ void clearCache();
+
+ CachedRangedDataSource(const CachedRangedDataSource &);
+ CachedRangedDataSource &operator=(const CachedRangedDataSource &);
+};
+
+CachedRangedDataSource::CachedRangedDataSource(DataSourceBase *source)
+ : mSource(source),
+ mOwnsDataSource(false),
+ mCachedOffset(0),
+ mCachedSize(0),
+ mCache(NULL) {
+}
+
+CachedRangedDataSource::~CachedRangedDataSource() {
+ clearCache();
+ if (mOwnsDataSource) {
+ delete (CachedRangedDataSource*)mSource;
+ }
+}
+
+void CachedRangedDataSource::clearCache() {
+ if (mCache) {
+ free(mCache);
+ mCache = NULL;
+ }
+
+ mCachedOffset = 0;
+ mCachedSize = 0;
+}
+
+status_t CachedRangedDataSource::initCheck() const {
+ return mSource->initCheck();
+}
+
+ssize_t CachedRangedDataSource::readAt(off64_t offset, void *data, size_t size) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (isInRange(mCachedOffset, mCachedSize, offset, size)) {
+ memcpy(data, &mCache[offset - mCachedOffset], size);
+ return size;
+ }
+
+ return mSource->readAt(offset, data, size);
+}
+
+status_t CachedRangedDataSource::getSize(off64_t *size) {
+ return mSource->getSize(size);
+}
+
+uint32_t CachedRangedDataSource::flags() {
+ return mSource->flags();
+}
+
+status_t CachedRangedDataSource::setCachedRange(off64_t offset,
+ size_t size,
+ bool assumeSourceOwnershipOnSuccess) {
+ Mutex::Autolock autoLock(mLock);
+
+ clearCache();
+
+ mCache = (uint8_t *)malloc(size);
+
+ if (mCache == NULL) {
+ return -ENOMEM;
+ }
+
+ mCachedOffset = offset;
+ mCachedSize = size;
+
+ ssize_t err = mSource->readAt(mCachedOffset, mCache, mCachedSize);
+
+ if (err < (ssize_t)size) {
+ clearCache();
+
+ return ERROR_IO;
+ }
+ mOwnsDataSource = assumeSourceOwnershipOnSuccess;
+ return OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static const bool kUseHexDump = false;
+
+static const char *FourCC2MIME(uint32_t fourcc) {
+ switch (fourcc) {
+ case FOURCC('m', 'p', '4', 'a'):
+ return MEDIA_MIMETYPE_AUDIO_AAC;
+
+ case FOURCC('s', 'a', 'm', 'r'):
+ return MEDIA_MIMETYPE_AUDIO_AMR_NB;
+
+ case FOURCC('s', 'a', 'w', 'b'):
+ return MEDIA_MIMETYPE_AUDIO_AMR_WB;
+
+ case FOURCC('m', 'p', '4', 'v'):
+ return MEDIA_MIMETYPE_VIDEO_MPEG4;
+
+ case FOURCC('s', '2', '6', '3'):
+ case FOURCC('h', '2', '6', '3'):
+ case FOURCC('H', '2', '6', '3'):
+ return MEDIA_MIMETYPE_VIDEO_H263;
+
+ case FOURCC('a', 'v', 'c', '1'):
+ return MEDIA_MIMETYPE_VIDEO_AVC;
+
+ case FOURCC('h', 'v', 'c', '1'):
+ case FOURCC('h', 'e', 'v', '1'):
+ return MEDIA_MIMETYPE_VIDEO_HEVC;
+ default:
+ ALOGW("Unknown fourcc: %c%c%c%c",
+ (fourcc >> 24) & 0xff,
+ (fourcc >> 16) & 0xff,
+ (fourcc >> 8) & 0xff,
+ fourcc & 0xff
+ );
+ return "application/octet-stream";
+ }
+}
+
+static bool AdjustChannelsAndRate(uint32_t fourcc, uint32_t *channels, uint32_t *rate) {
+ if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_NB, FourCC2MIME(fourcc))) {
+ // AMR NB audio is always mono, 8kHz
+ *channels = 1;
+ *rate = 8000;
+ return true;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, FourCC2MIME(fourcc))) {
+ // AMR WB audio is always mono, 16kHz
+ *channels = 1;
+ *rate = 16000;
+ return true;
+ }
+ return false;
+}
+
+MPEG4Extractor::MPEG4Extractor(DataSourceBase *source, const char *mime)
+ : mMoofOffset(0),
+ mMoofFound(false),
+ mMdatFound(false),
+ mDataSource(source),
+ mCachedSource(NULL),
+ mInitCheck(NO_INIT),
+ mHeaderTimescale(0),
+ mIsQT(false),
+ mIsHeif(false),
+ mHasMoovBox(false),
+ mPreferHeif(mime != NULL && !strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_HEIF)),
+ mFirstTrack(NULL),
+ mLastTrack(NULL) {
+ ALOGV("mime=%s, mPreferHeif=%d", mime, mPreferHeif);
+}
+
+MPEG4Extractor::~MPEG4Extractor() {
+ Track *track = mFirstTrack;
+ while (track) {
+ Track *next = track->next;
+
+ delete track;
+ track = next;
+ }
+ mFirstTrack = mLastTrack = NULL;
+
+ for (size_t i = 0; i < mPssh.size(); i++) {
+ delete [] mPssh[i].data;
+ }
+ mPssh.clear();
+
+ delete mCachedSource;
+}
+
+uint32_t MPEG4Extractor::flags() const {
+ return CAN_PAUSE |
+ ((mMoofOffset == 0 || mSidxEntries.size() != 0) ?
+ (CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK) : 0);
+}
+
+status_t MPEG4Extractor::getMetaData(MetaDataBase &meta) {
+ status_t err;
+ if ((err = readMetaData()) != OK) {
+ return UNKNOWN_ERROR;
+ }
+ meta = mFileMetaData;
+ return OK;
+}
+
+size_t MPEG4Extractor::countTracks() {
+ status_t err;
+ if ((err = readMetaData()) != OK) {
+ ALOGV("MPEG4Extractor::countTracks: no tracks");
+ return 0;
+ }
+
+ size_t n = 0;
+ Track *track = mFirstTrack;
+ while (track) {
+ ++n;
+ track = track->next;
+ }
+
+ ALOGV("MPEG4Extractor::countTracks: %zu tracks", n);
+ return n;
+}
+
+status_t MPEG4Extractor::getTrackMetaData(
+ MetaDataBase &meta,
+ size_t index, uint32_t flags) {
+ status_t err;
+ if ((err = readMetaData()) != OK) {
+ return UNKNOWN_ERROR;
+ }
+
+ Track *track = mFirstTrack;
+ while (index > 0) {
+ if (track == NULL) {
+ return UNKNOWN_ERROR;
+ }
+
+ track = track->next;
+ --index;
+ }
+
+ if (track == NULL) {
+ return UNKNOWN_ERROR;
+ }
+
+ [=] {
+ int64_t duration;
+ int32_t samplerate;
+ if (track->has_elst && mHeaderTimescale != 0 &&
+ track->meta.findInt64(kKeyDuration, &duration) &&
+ track->meta.findInt32(kKeySampleRate, &samplerate)) {
+
+ track->has_elst = false;
+
+ if (track->elst_segment_duration > INT64_MAX) {
+ return;
+ }
+ int64_t segment_duration = track->elst_segment_duration;
+ int64_t media_time = track->elst_media_time;
+ int64_t halfscale = mHeaderTimescale / 2;
+ ALOGV("segment_duration = %" PRId64 ", media_time = %" PRId64
+ ", halfscale = %" PRId64 ", timescale = %d",
+ segment_duration,
+ media_time,
+ halfscale,
+ mHeaderTimescale);
+
+ int64_t delay;
+ // delay = ((media_time * samplerate) + halfscale) / mHeaderTimescale;
+ if (__builtin_mul_overflow(media_time, samplerate, &delay) ||
+ __builtin_add_overflow(delay, halfscale, &delay) ||
+ (delay /= mHeaderTimescale, false) ||
+ delay > INT32_MAX ||
+ delay < INT32_MIN) {
+ return;
+ }
+ ALOGV("delay = %" PRId64, delay);
+ track->meta.setInt32(kKeyEncoderDelay, delay);
+
+ int64_t scaled_duration;
+ // scaled_duration = duration * mHeaderTimescale;
+ if (__builtin_mul_overflow(duration, mHeaderTimescale, &scaled_duration)) {
+ return;
+ }
+ ALOGV("scaled_duration = %" PRId64, scaled_duration);
+
+ int64_t segment_end;
+ int64_t padding;
+ // padding = scaled_duration - ((segment_duration + media_time) * 1000000);
+ if (__builtin_add_overflow(segment_duration, media_time, &segment_end) ||
+ __builtin_mul_overflow(segment_end, 1000000, &segment_end) ||
+ __builtin_sub_overflow(scaled_duration, segment_end, &padding)) {
+ return;
+ }
+ ALOGV("segment_end = %" PRId64 ", padding = %" PRId64, segment_end, padding);
+
+ if (padding < 0) {
+ // track duration from media header (which is what kKeyDuration is) might
+ // be slightly shorter than the segment duration, which would make the
+ // padding negative. Clamp to zero.
+ padding = 0;
+ }
+
+ int64_t paddingsamples;
+ int64_t halfscale_e6;
+ int64_t timescale_e6;
+ // paddingsamples = ((padding * samplerate) + (halfscale * 1000000))
+ // / (mHeaderTimescale * 1000000);
+ if (__builtin_mul_overflow(padding, samplerate, &paddingsamples) ||
+ __builtin_mul_overflow(halfscale, 1000000, &halfscale_e6) ||
+ __builtin_mul_overflow(mHeaderTimescale, 1000000, ×cale_e6) ||
+ __builtin_add_overflow(paddingsamples, halfscale_e6, &paddingsamples) ||
+ (paddingsamples /= timescale_e6, false) ||
+ paddingsamples > INT32_MAX) {
+ return;
+ }
+ ALOGV("paddingsamples = %" PRId64, paddingsamples);
+ track->meta.setInt32(kKeyEncoderPadding, paddingsamples);
+ }
+ }();
+
+ if ((flags & kIncludeExtensiveMetaData)
+ && !track->includes_expensive_metadata) {
+ track->includes_expensive_metadata = true;
+
+ const char *mime;
+ CHECK(track->meta.findCString(kKeyMIMEType, &mime));
+ if (!strncasecmp("video/", mime, 6)) {
+ // MPEG2 tracks do not provide CSD, so read the stream header
+ if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG2)) {
+ off64_t offset;
+ size_t size;
+ if (track->sampleTable->getMetaDataForSample(
+ 0 /* sampleIndex */, &offset, &size, NULL /* sampleTime */) == OK) {
+ if (size > kMaxTrackHeaderSize) {
+ size = kMaxTrackHeaderSize;
+ }
+ uint8_t header[kMaxTrackHeaderSize];
+ if (mDataSource->readAt(offset, &header, size) == (ssize_t)size) {
+ track->meta.setData(kKeyStreamHeader, 'mdat', header, size);
+ }
+ }
+ }
+
+ if (mMoofOffset > 0) {
+ int64_t duration;
+ if (track->meta.findInt64(kKeyDuration, &duration)) {
+ // nothing fancy, just pick a frame near 1/4th of the duration
+ track->meta.setInt64(
+ kKeyThumbnailTime, duration / 4);
+ }
+ } else {
+ uint32_t sampleIndex;
+ uint32_t sampleTime;
+ if (track->timescale != 0 &&
+ track->sampleTable->findThumbnailSample(&sampleIndex) == OK
+ && track->sampleTable->getMetaDataForSample(
+ sampleIndex, NULL /* offset */, NULL /* size */,
+ &sampleTime) == OK) {
+ track->meta.setInt64(
+ kKeyThumbnailTime,
+ ((int64_t)sampleTime * 1000000) / track->timescale);
+ }
+ }
+ }
+ }
+
+ meta = track->meta;
+ return OK;
+}
+
+status_t MPEG4Extractor::readMetaData() {
+ if (mInitCheck != NO_INIT) {
+ return mInitCheck;
+ }
+
+ off64_t offset = 0;
+ status_t err;
+ bool sawMoovOrSidx = false;
+
+ while (!((mHasMoovBox && sawMoovOrSidx && (mMdatFound || mMoofFound)) ||
+ (mIsHeif && (mPreferHeif || !mHasMoovBox) &&
+ (mItemTable != NULL) && mItemTable->isValid()))) {
+ off64_t orig_offset = offset;
+ err = parseChunk(&offset, 0);
+
+ if (err != OK && err != UNKNOWN_ERROR) {
+ break;
+ } else if (offset <= orig_offset) {
+ // only continue parsing if the offset was advanced,
+ // otherwise we might end up in an infinite loop
+ ALOGE("did not advance: %lld->%lld", (long long)orig_offset, (long long)offset);
+ err = ERROR_MALFORMED;
+ break;
+ } else if (err == UNKNOWN_ERROR) {
+ sawMoovOrSidx = true;
+ }
+ }
+
+ if (mIsHeif && (mItemTable != NULL) && (mItemTable->countImages() > 0)) {
+ off64_t exifOffset;
+ size_t exifSize;
+ if (mItemTable->getExifOffsetAndSize(&exifOffset, &exifSize) == OK) {
+ mFileMetaData.setInt64(kKeyExifOffset, (int64_t)exifOffset);
+ mFileMetaData.setInt64(kKeyExifSize, (int64_t)exifSize);
+ }
+ for (uint32_t imageIndex = 0;
+ imageIndex < mItemTable->countImages(); imageIndex++) {
+ sp<MetaData> meta = mItemTable->getImageMeta(imageIndex);
+ if (meta == NULL) {
+ ALOGE("heif image %u has no meta!", imageIndex);
+ continue;
+ }
+ // Some heif files advertise image sequence brands (eg. 'hevc') in
+ // ftyp box, but don't have any valid tracks in them. Instead of
+ // reporting the entire file as malformed, we override the error
+ // to allow still images to be extracted.
+ if (err != OK) {
+ ALOGW("Extracting still images only");
+ err = OK;
+ }
+ mInitCheck = OK;
+
+ ALOGV("adding HEIF image track %u", imageIndex);
+ Track *track = new Track;
+ track->next = NULL;
+ if (mLastTrack != NULL) {
+ mLastTrack->next = track;
+ } else {
+ mFirstTrack = track;
+ }
+ mLastTrack = track;
+
+ track->meta = *(meta.get());
+ track->meta.setInt32(kKeyTrackID, imageIndex);
+ track->includes_expensive_metadata = false;
+ track->skipTrack = false;
+ track->timescale = 1000000;
+ }
+ }
+
+ if (mInitCheck == OK) {
+ if (findTrackByMimePrefix("video/") != NULL) {
+ mFileMetaData.setCString(
+ kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG4);
+ } else if (findTrackByMimePrefix("audio/") != NULL) {
+ mFileMetaData.setCString(kKeyMIMEType, "audio/mp4");
+ } else if (findTrackByMimePrefix(
+ MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC) != NULL) {
+ mFileMetaData.setCString(
+ kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_HEIF);
+ } else {
+ mFileMetaData.setCString(kKeyMIMEType, "application/octet-stream");
+ }
+ } else {
+ mInitCheck = err;
+ }
+
+ CHECK_NE(err, (status_t)NO_INIT);
+
+ // copy pssh data into file metadata
+ uint64_t psshsize = 0;
+ for (size_t i = 0; i < mPssh.size(); i++) {
+ psshsize += 20 + mPssh[i].datalen;
+ }
+ if (psshsize > 0 && psshsize <= UINT32_MAX) {
+ char *buf = (char*)malloc(psshsize);
+ if (!buf) {
+ ALOGE("b/28471206");
+ return NO_MEMORY;
+ }
+ char *ptr = buf;
+ for (size_t i = 0; i < mPssh.size(); i++) {
+ memcpy(ptr, mPssh[i].uuid, 20); // uuid + length
+ memcpy(ptr + 20, mPssh[i].data, mPssh[i].datalen);
+ ptr += (20 + mPssh[i].datalen);
+ }
+ mFileMetaData.setData(kKeyPssh, 'pssh', buf, psshsize);
+ free(buf);
+ }
+
+ return mInitCheck;
+}
+
+struct PathAdder {
+ PathAdder(Vector<uint32_t> *path, uint32_t chunkType)
+ : mPath(path) {
+ mPath->push(chunkType);
+ }
+
+ ~PathAdder() {
+ mPath->pop();
+ }
+
+private:
+ Vector<uint32_t> *mPath;
+
+ PathAdder(const PathAdder &);
+ PathAdder &operator=(const PathAdder &);
+};
+
+static bool underMetaDataPath(const Vector<uint32_t> &path) {
+ return path.size() >= 5
+ && path[0] == FOURCC('m', 'o', 'o', 'v')
+ && path[1] == FOURCC('u', 'd', 't', 'a')
+ && path[2] == FOURCC('m', 'e', 't', 'a')
+ && path[3] == FOURCC('i', 'l', 's', 't');
+}
+
+static bool underQTMetaPath(const Vector<uint32_t> &path, int32_t depth) {
+ return path.size() >= 2
+ && path[0] == FOURCC('m', 'o', 'o', 'v')
+ && path[1] == FOURCC('m', 'e', 't', 'a')
+ && (depth == 2
+ || (depth == 3
+ && (path[2] == FOURCC('h', 'd', 'l', 'r')
+ || path[2] == FOURCC('i', 'l', 's', 't')
+ || path[2] == FOURCC('k', 'e', 'y', 's'))));
+}
+
+// Given a time in seconds since Jan 1 1904, produce a human-readable string.
+static bool convertTimeToDate(int64_t time_1904, String8 *s) {
+ // delta between mpeg4 time and unix epoch time
+ static const int64_t delta = (((66 * 365 + 17) * 24) * 3600);
+ if (time_1904 < INT64_MIN + delta) {
+ return false;
+ }
+ time_t time_1970 = time_1904 - delta;
+
+ char tmp[32];
+ struct tm* tm = gmtime(&time_1970);
+ if (tm != NULL &&
+ strftime(tmp, sizeof(tmp), "%Y%m%dT%H%M%S.000Z", tm) > 0) {
+ s->setTo(tmp);
+ return true;
+ }
+ return false;
+}
+
+status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
+ ALOGV("entering parseChunk %lld/%d", (long long)*offset, depth);
+
+ if (*offset < 0) {
+ ALOGE("b/23540914");
+ return ERROR_MALFORMED;
+ }
+ if (depth > 100) {
+ ALOGE("b/27456299");
+ return ERROR_MALFORMED;
+ }
+ uint32_t hdr[2];
+ if (mDataSource->readAt(*offset, hdr, 8) < 8) {
+ return ERROR_IO;
+ }
+ uint64_t chunk_size = ntohl(hdr[0]);
+ int32_t chunk_type = ntohl(hdr[1]);
+ off64_t data_offset = *offset + 8;
+
+ if (chunk_size == 1) {
+ if (mDataSource->readAt(*offset + 8, &chunk_size, 8) < 8) {
+ return ERROR_IO;
+ }
+ chunk_size = ntoh64(chunk_size);
+ data_offset += 8;
+
+ if (chunk_size < 16) {
+ // The smallest valid chunk is 16 bytes long in this case.
+ return ERROR_MALFORMED;
+ }
+ } else if (chunk_size == 0) {
+ if (depth == 0) {
+ // atom extends to end of file
+ off64_t sourceSize;
+ if (mDataSource->getSize(&sourceSize) == OK) {
+ chunk_size = (sourceSize - *offset);
+ } else {
+ // XXX could we just pick a "sufficiently large" value here?
+ ALOGE("atom size is 0, and data source has no size");
+ return ERROR_MALFORMED;
+ }
+ } else {
+ // not allowed for non-toplevel atoms, skip it
+ *offset += 4;
+ return OK;
+ }
+ } else if (chunk_size < 8) {
+ // The smallest valid chunk is 8 bytes long.
+ ALOGE("invalid chunk size: %" PRIu64, chunk_size);
+ return ERROR_MALFORMED;
+ }
+
+ char chunk[5];
+ MakeFourCCString(chunk_type, chunk);
+ ALOGV("chunk: %s @ %lld, %d", chunk, (long long)*offset, depth);
+
+ if (kUseHexDump) {
+ static const char kWhitespace[] = " ";
+ const char *indent = &kWhitespace[sizeof(kWhitespace) - 1 - 2 * depth];
+ printf("%sfound chunk '%s' of size %" PRIu64 "\n", indent, chunk, chunk_size);
+
+ char buffer[256];
+ size_t n = chunk_size;
+ if (n > sizeof(buffer)) {
+ n = sizeof(buffer);
+ }
+ if (mDataSource->readAt(*offset, buffer, n)
+ < (ssize_t)n) {
+ return ERROR_IO;
+ }
+
+ hexdump(buffer, n);
+ }
+
+ PathAdder autoAdder(&mPath, chunk_type);
+
+ // (data_offset - *offset) is either 8 or 16
+ off64_t chunk_data_size = chunk_size - (data_offset - *offset);
+ if (chunk_data_size < 0) {
+ ALOGE("b/23540914");
+ return ERROR_MALFORMED;
+ }
+ if (chunk_type != FOURCC('m', 'd', 'a', 't') && chunk_data_size > kMaxAtomSize) {
+ char errMsg[100];
+ sprintf(errMsg, "%s atom has size %" PRId64, chunk, chunk_data_size);
+ ALOGE("%s (b/28615448)", errMsg);
+ android_errorWriteWithInfoLog(0x534e4554, "28615448", -1, errMsg, strlen(errMsg));
+ return ERROR_MALFORMED;
+ }
+
+ if (chunk_type != FOURCC('c', 'p', 'r', 't')
+ && chunk_type != FOURCC('c', 'o', 'v', 'r')
+ && mPath.size() == 5 && underMetaDataPath(mPath)) {
+ off64_t stop_offset = *offset + chunk_size;
+ *offset = data_offset;
+ while (*offset < stop_offset) {
+ status_t err = parseChunk(offset, depth + 1);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ if (*offset != stop_offset) {
+ return ERROR_MALFORMED;
+ }
+
+ return OK;
+ }
+
+ switch(chunk_type) {
+ case FOURCC('m', 'o', 'o', 'v'):
+ case FOURCC('t', 'r', 'a', 'k'):
+ case FOURCC('m', 'd', 'i', 'a'):
+ case FOURCC('m', 'i', 'n', 'f'):
+ case FOURCC('d', 'i', 'n', 'f'):
+ case FOURCC('s', 't', 'b', 'l'):
+ case FOURCC('m', 'v', 'e', 'x'):
+ case FOURCC('m', 'o', 'o', 'f'):
+ case FOURCC('t', 'r', 'a', 'f'):
+ case FOURCC('m', 'f', 'r', 'a'):
+ case FOURCC('u', 'd', 't', 'a'):
+ case FOURCC('i', 'l', 's', 't'):
+ case FOURCC('s', 'i', 'n', 'f'):
+ case FOURCC('s', 'c', 'h', 'i'):
+ case FOURCC('e', 'd', 't', 's'):
+ case FOURCC('w', 'a', 'v', 'e'):
+ {
+ if (chunk_type == FOURCC('m', 'o', 'o', 'v') && depth != 0) {
+ ALOGE("moov: depth %d", depth);
+ return ERROR_MALFORMED;
+ }
+
+ if (chunk_type == FOURCC('m', 'o', 'o', 'v') && mInitCheck == OK) {
+ ALOGE("duplicate moov");
+ return ERROR_MALFORMED;
+ }
+
+ if (chunk_type == FOURCC('m', 'o', 'o', 'f') && !mMoofFound) {
+ // store the offset of the first segment
+ mMoofFound = true;
+ mMoofOffset = *offset;
+ }
+
+ if (chunk_type == FOURCC('s', 't', 'b', 'l')) {
+ ALOGV("sampleTable chunk is %" PRIu64 " bytes long.", chunk_size);
+
+ if (mDataSource->flags()
+ & (DataSourceBase::kWantsPrefetching
+ | DataSourceBase::kIsCachingDataSource)) {
+ CachedRangedDataSource *cachedSource =
+ new CachedRangedDataSource(mDataSource);
+
+ if (cachedSource->setCachedRange(
+ *offset, chunk_size,
+ mCachedSource != NULL /* assume ownership on success */) == OK) {
+ mDataSource = mCachedSource = cachedSource;
+ } else {
+ delete cachedSource;
+ }
+ }
+
+ if (mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ mLastTrack->sampleTable = new SampleTable(mDataSource);
+ }
+
+ bool isTrack = false;
+ if (chunk_type == FOURCC('t', 'r', 'a', 'k')) {
+ if (depth != 1) {
+ ALOGE("trak: depth %d", depth);
+ return ERROR_MALFORMED;
+ }
+ isTrack = true;
+
+ ALOGV("adding new track");
+ Track *track = new Track;
+ track->next = NULL;
+ if (mLastTrack) {
+ mLastTrack->next = track;
+ } else {
+ mFirstTrack = track;
+ }
+ mLastTrack = track;
+
+ track->includes_expensive_metadata = false;
+ track->skipTrack = false;
+ track->timescale = 0;
+ track->meta.setCString(kKeyMIMEType, "application/octet-stream");
+ track->has_elst = false;
+ track->subsample_encryption = false;
+ }
+
+ off64_t stop_offset = *offset + chunk_size;
+ *offset = data_offset;
+ while (*offset < stop_offset) {
+ status_t err = parseChunk(offset, depth + 1);
+ if (err != OK) {
+ if (isTrack) {
+ mLastTrack->skipTrack = true;
+ break;
+ }
+ return err;
+ }
+ }
+
+ if (*offset != stop_offset) {
+ return ERROR_MALFORMED;
+ }
+
+ if (isTrack) {
+ int32_t trackId;
+ // There must be exact one track header per track.
+ if (!mLastTrack->meta.findInt32(kKeyTrackID, &trackId)) {
+ mLastTrack->skipTrack = true;
+ }
+
+ status_t err = verifyTrack(mLastTrack);
+ if (err != OK) {
+ mLastTrack->skipTrack = true;
+ }
+
+ if (mLastTrack->skipTrack) {
+ ALOGV("skipping this track...");
+ Track *cur = mFirstTrack;
+
+ if (cur == mLastTrack) {
+ delete cur;
+ mFirstTrack = mLastTrack = NULL;
+ } else {
+ while (cur && cur->next != mLastTrack) {
+ cur = cur->next;
+ }
+ if (cur) {
+ cur->next = NULL;
+ }
+ delete mLastTrack;
+ mLastTrack = cur;
+ }
+
+ return OK;
+ }
+ } else if (chunk_type == FOURCC('m', 'o', 'o', 'v')) {
+ mInitCheck = OK;
+
+ return UNKNOWN_ERROR; // Return a dummy error.
+ }
+ break;
+ }
+
+ case FOURCC('s', 'c', 'h', 'm'):
+ {
+
+ *offset += chunk_size;
+ if (!mLastTrack) {
+ return ERROR_MALFORMED;
+ }
+
+ uint32_t scheme_type;
+ if (mDataSource->readAt(data_offset + 4, &scheme_type, 4) < 4) {
+ return ERROR_IO;
+ }
+ scheme_type = ntohl(scheme_type);
+ int32_t mode = kCryptoModeUnencrypted;
+ switch(scheme_type) {
+ case FOURCC('c', 'b', 'c', '1'):
+ {
+ mode = kCryptoModeAesCbc;
+ break;
+ }
+ case FOURCC('c', 'b', 'c', 's'):
+ {
+ mode = kCryptoModeAesCbc;
+ mLastTrack->subsample_encryption = true;
+ break;
+ }
+ case FOURCC('c', 'e', 'n', 'c'):
+ {
+ mode = kCryptoModeAesCtr;
+ break;
+ }
+ case FOURCC('c', 'e', 'n', 's'):
+ {
+ mode = kCryptoModeAesCtr;
+ mLastTrack->subsample_encryption = true;
+ break;
+ }
+ }
+ if (mode != kCryptoModeUnencrypted) {
+ mLastTrack->meta.setInt32(kKeyCryptoMode, mode);
+ }
+ break;
+ }
+
+
+ case FOURCC('e', 'l', 's', 't'):
+ {
+ *offset += chunk_size;
+
+ if (!mLastTrack) {
+ return ERROR_MALFORMED;
+ }
+
+ // See 14496-12 8.6.6
+ uint8_t version;
+ if (mDataSource->readAt(data_offset, &version, 1) < 1) {
+ return ERROR_IO;
+ }
+
+ uint32_t entry_count;
+ if (!mDataSource->getUInt32(data_offset + 4, &entry_count)) {
+ return ERROR_IO;
+ }
+
+ if (entry_count != 1) {
+ // we only support a single entry at the moment, for gapless playback
+ ALOGW("ignoring edit list with %d entries", entry_count);
+ } else {
+ off64_t entriesoffset = data_offset + 8;
+ uint64_t segment_duration;
+ int64_t media_time;
+
+ if (version == 1) {
+ if (!mDataSource->getUInt64(entriesoffset, &segment_duration) ||
+ !mDataSource->getUInt64(entriesoffset + 8, (uint64_t*)&media_time)) {
+ return ERROR_IO;
+ }
+ } else if (version == 0) {
+ uint32_t sd;
+ int32_t mt;
+ if (!mDataSource->getUInt32(entriesoffset, &sd) ||
+ !mDataSource->getUInt32(entriesoffset + 4, (uint32_t*)&mt)) {
+ return ERROR_IO;
+ }
+ segment_duration = sd;
+ media_time = mt;
+ } else {
+ return ERROR_IO;
+ }
+
+ // save these for later, because the elst atom might precede
+ // the atoms that actually gives us the duration and sample rate
+ // needed to calculate the padding and delay values
+ mLastTrack->has_elst = true;
+ mLastTrack->elst_media_time = media_time;
+ mLastTrack->elst_segment_duration = segment_duration;
+ }
+ break;
+ }
+
+ case FOURCC('f', 'r', 'm', 'a'):
+ {
+ *offset += chunk_size;
+
+ uint32_t original_fourcc;
+ if (mDataSource->readAt(data_offset, &original_fourcc, 4) < 4) {
+ return ERROR_IO;
+ }
+ original_fourcc = ntohl(original_fourcc);
+ ALOGV("read original format: %d", original_fourcc);
+
+ if (mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ mLastTrack->meta.setCString(kKeyMIMEType, FourCC2MIME(original_fourcc));
+ uint32_t num_channels = 0;
+ uint32_t sample_rate = 0;
+ if (AdjustChannelsAndRate(original_fourcc, &num_channels, &sample_rate)) {
+ mLastTrack->meta.setInt32(kKeyChannelCount, num_channels);
+ mLastTrack->meta.setInt32(kKeySampleRate, sample_rate);
+ }
+ break;
+ }
+
+ case FOURCC('t', 'e', 'n', 'c'):
+ {
+ *offset += chunk_size;
+
+ if (chunk_size < 32) {
+ return ERROR_MALFORMED;
+ }
+
+ // tenc box contains 1 byte version, 3 byte flags, 3 byte default algorithm id, one byte
+ // default IV size, 16 bytes default KeyID
+ // (ISO 23001-7)
+
+ uint8_t version;
+ if (mDataSource->readAt(data_offset, &version, sizeof(version))
+ < (ssize_t)sizeof(version)) {
+ return ERROR_IO;
+ }
+
+ uint8_t buf[4];
+ memset(buf, 0, 4);
+ if (mDataSource->readAt(data_offset + 4, buf + 1, 3) < 3) {
+ return ERROR_IO;
+ }
+
+ if (mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t defaultEncryptedByteBlock = 0;
+ uint8_t defaultSkipByteBlock = 0;
+ uint32_t defaultAlgorithmId = ntohl(*((int32_t*)buf));
+ if (version == 1) {
+ uint32_t pattern = buf[2];
+ defaultEncryptedByteBlock = pattern >> 4;
+ defaultSkipByteBlock = pattern & 0xf;
+ if (defaultEncryptedByteBlock == 0 && defaultSkipByteBlock == 0) {
+ // use (1,0) to mean "encrypt everything"
+ defaultEncryptedByteBlock = 1;
+ }
+ } else if (mLastTrack->subsample_encryption) {
+ ALOGW("subsample_encryption should be version 1");
+ } else if (defaultAlgorithmId > 1) {
+ // only 0 (clear) and 1 (AES-128) are valid
+ ALOGW("defaultAlgorithmId: %u is a reserved value", defaultAlgorithmId);
+ defaultAlgorithmId = 1;
+ }
+
+ memset(buf, 0, 4);
+ if (mDataSource->readAt(data_offset + 7, buf + 3, 1) < 1) {
+ return ERROR_IO;
+ }
+ uint32_t defaultIVSize = ntohl(*((int32_t*)buf));
+
+ if (defaultAlgorithmId == 0 && defaultIVSize != 0) {
+ // only unencrypted data must have 0 IV size
+ return ERROR_MALFORMED;
+ } else if (defaultIVSize != 0 &&
+ defaultIVSize != 8 &&
+ defaultIVSize != 16) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t defaultKeyId[16];
+
+ if (mDataSource->readAt(data_offset + 8, &defaultKeyId, 16) < 16) {
+ return ERROR_IO;
+ }
+
+ sp<ABuffer> defaultConstantIv;
+ if (defaultAlgorithmId != 0 && defaultIVSize == 0) {
+
+ uint8_t ivlength;
+ if (mDataSource->readAt(data_offset + 24, &ivlength, sizeof(ivlength))
+ < (ssize_t)sizeof(ivlength)) {
+ return ERROR_IO;
+ }
+
+ if (ivlength != 8 && ivlength != 16) {
+ ALOGW("unsupported IV length: %u", ivlength);
+ return ERROR_MALFORMED;
+ }
+
+ defaultConstantIv = new ABuffer(ivlength);
+ if (mDataSource->readAt(data_offset + 25, defaultConstantIv->data(), ivlength)
+ < (ssize_t)ivlength) {
+ return ERROR_IO;
+ }
+
+ defaultConstantIv->setRange(0, ivlength);
+ }
+
+ int32_t tmpAlgorithmId;
+ if (!mLastTrack->meta.findInt32(kKeyCryptoMode, &tmpAlgorithmId)) {
+ mLastTrack->meta.setInt32(kKeyCryptoMode, defaultAlgorithmId);
+ }
+
+ mLastTrack->meta.setInt32(kKeyCryptoDefaultIVSize, defaultIVSize);
+ mLastTrack->meta.setData(kKeyCryptoKey, 'tenc', defaultKeyId, 16);
+ mLastTrack->meta.setInt32(kKeyEncryptedByteBlock, defaultEncryptedByteBlock);
+ mLastTrack->meta.setInt32(kKeySkipByteBlock, defaultSkipByteBlock);
+ if (defaultConstantIv != NULL) {
+ mLastTrack->meta.setData(kKeyCryptoIV, 'dciv', defaultConstantIv->data(), defaultConstantIv->size());
+ }
+ break;
+ }
+
+ case FOURCC('t', 'k', 'h', 'd'):
+ {
+ *offset += chunk_size;
+
+ status_t err;
+ if ((err = parseTrackHeader(data_offset, chunk_data_size)) != OK) {
+ return err;
+ }
+
+ break;
+ }
+
+ case FOURCC('t', 'r', 'e', 'f'):
+ {
+ off64_t stop_offset = *offset + chunk_size;
+ *offset = data_offset;
+ while (*offset < stop_offset) {
+ status_t err = parseChunk(offset, depth + 1);
+ if (err != OK) {
+ return err;
+ }
+ }
+ if (*offset != stop_offset) {
+ return ERROR_MALFORMED;
+ }
+ break;
+ }
+
+ case FOURCC('t', 'h', 'm', 'b'):
+ {
+ *offset += chunk_size;
+
+ if (mLastTrack != NULL) {
+ // Skip thumbnail track for now since we don't have an
+ // API to retrieve it yet.
+ // The thumbnail track can't be accessed by negative index or time,
+ // because each timed sample has its own corresponding thumbnail
+ // in the thumbnail track. We'll need a dedicated API to retrieve
+ // thumbnail at time instead.
+ mLastTrack->skipTrack = true;
+ }
+
+ break;
+ }
+
+ case FOURCC('p', 's', 's', 'h'):
+ {
+ *offset += chunk_size;
+
+ PsshInfo pssh;
+
+ if (mDataSource->readAt(data_offset + 4, &pssh.uuid, 16) < 16) {
+ return ERROR_IO;
+ }
+
+ uint32_t psshdatalen = 0;
+ if (mDataSource->readAt(data_offset + 20, &psshdatalen, 4) < 4) {
+ return ERROR_IO;
+ }
+ pssh.datalen = ntohl(psshdatalen);
+ ALOGV("pssh data size: %d", pssh.datalen);
+ if (chunk_size < 20 || pssh.datalen > chunk_size - 20) {
+ // pssh data length exceeds size of containing box
+ return ERROR_MALFORMED;
+ }
+
+ pssh.data = new (std::nothrow) uint8_t[pssh.datalen];
+ if (pssh.data == NULL) {
+ return ERROR_MALFORMED;
+ }
+ ALOGV("allocated pssh @ %p", pssh.data);
+ ssize_t requested = (ssize_t) pssh.datalen;
+ if (mDataSource->readAt(data_offset + 24, pssh.data, requested) < requested) {
+ delete[] pssh.data;
+ return ERROR_IO;
+ }
+ mPssh.push_back(pssh);
+
+ break;
+ }
+
+ case FOURCC('m', 'd', 'h', 'd'):
+ {
+ *offset += chunk_size;
+
+ if (chunk_data_size < 4 || mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t version;
+ if (mDataSource->readAt(
+ data_offset, &version, sizeof(version))
+ < (ssize_t)sizeof(version)) {
+ return ERROR_IO;
+ }
+
+ off64_t timescale_offset;
+
+ if (version == 1) {
+ timescale_offset = data_offset + 4 + 16;
+ } else if (version == 0) {
+ timescale_offset = data_offset + 4 + 8;
+ } else {
+ return ERROR_IO;
+ }
+
+ uint32_t timescale;
+ if (mDataSource->readAt(
+ timescale_offset, ×cale, sizeof(timescale))
+ < (ssize_t)sizeof(timescale)) {
+ return ERROR_IO;
+ }
+
+ if (!timescale) {
+ ALOGE("timescale should not be ZERO.");
+ return ERROR_MALFORMED;
+ }
+
+ mLastTrack->timescale = ntohl(timescale);
+
+ // 14496-12 says all ones means indeterminate, but some files seem to use
+ // 0 instead. We treat both the same.
+ int64_t duration = 0;
+ if (version == 1) {
+ if (mDataSource->readAt(
+ timescale_offset + 4, &duration, sizeof(duration))
+ < (ssize_t)sizeof(duration)) {
+ return ERROR_IO;
+ }
+ if (duration != -1) {
+ duration = ntoh64(duration);
+ }
+ } else {
+ uint32_t duration32;
+ if (mDataSource->readAt(
+ timescale_offset + 4, &duration32, sizeof(duration32))
+ < (ssize_t)sizeof(duration32)) {
+ return ERROR_IO;
+ }
+ if (duration32 != 0xffffffff) {
+ duration = ntohl(duration32);
+ }
+ }
+ if (duration != 0 && mLastTrack->timescale != 0) {
+ mLastTrack->meta.setInt64(
+ kKeyDuration, (duration * 1000000) / mLastTrack->timescale);
+ }
+
+ uint8_t lang[2];
+ off64_t lang_offset;
+ if (version == 1) {
+ lang_offset = timescale_offset + 4 + 8;
+ } else if (version == 0) {
+ lang_offset = timescale_offset + 4 + 4;
+ } else {
+ return ERROR_IO;
+ }
+
+ if (mDataSource->readAt(lang_offset, &lang, sizeof(lang))
+ < (ssize_t)sizeof(lang)) {
+ return ERROR_IO;
+ }
+
+ // To get the ISO-639-2/T three character language code
+ // 1 bit pad followed by 3 5-bits characters. Each character
+ // is packed as the difference between its ASCII value and 0x60.
+ char lang_code[4];
+ lang_code[0] = ((lang[0] >> 2) & 0x1f) + 0x60;
+ lang_code[1] = ((lang[0] & 0x3) << 3 | (lang[1] >> 5)) + 0x60;
+ lang_code[2] = (lang[1] & 0x1f) + 0x60;
+ lang_code[3] = '\0';
+
+ mLastTrack->meta.setCString(
+ kKeyMediaLanguage, lang_code);
+
+ break;
+ }
+
+ case FOURCC('s', 't', 's', 'd'):
+ {
+ uint8_t buffer[8];
+ if (chunk_data_size < (off64_t)sizeof(buffer)) {
+ return ERROR_MALFORMED;
+ }
+
+ if (mDataSource->readAt(
+ data_offset, buffer, 8) < 8) {
+ return ERROR_IO;
+ }
+
+ if (U32_AT(buffer) != 0) {
+ // Should be version 0, flags 0.
+ return ERROR_MALFORMED;
+ }
+
+ uint32_t entry_count = U32_AT(&buffer[4]);
+
+ if (entry_count > 1) {
+ // For 3GPP timed text, there could be multiple tx3g boxes contain
+ // multiple text display formats. These formats will be used to
+ // display the timed text.
+ // For encrypted files, there may also be more than one entry.
+ const char *mime;
+
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ CHECK(mLastTrack->meta.findCString(kKeyMIMEType, &mime));
+ if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) &&
+ strcasecmp(mime, "application/octet-stream")) {
+ // For now we only support a single type of media per track.
+ mLastTrack->skipTrack = true;
+ *offset += chunk_size;
+ break;
+ }
+ }
+ off64_t stop_offset = *offset + chunk_size;
+ *offset = data_offset + 8;
+ for (uint32_t i = 0; i < entry_count; ++i) {
+ status_t err = parseChunk(offset, depth + 1);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ if (*offset != stop_offset) {
+ return ERROR_MALFORMED;
+ }
+ break;
+ }
+ case FOURCC('m', 'e', 't', 't'):
+ {
+ *offset += chunk_size;
+
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ auto buffer = heapbuffer<uint8_t>(chunk_data_size);
+ if (buffer.get() == NULL) {
+ return NO_MEMORY;
+ }
+
+ if (mDataSource->readAt(
+ data_offset, buffer.get(), chunk_data_size) < chunk_data_size) {
+ return ERROR_IO;
+ }
+
+ String8 mimeFormat((const char *)(buffer.get()), chunk_data_size);
+ mLastTrack->meta.setCString(kKeyMIMEType, mimeFormat.string());
+
+ break;
+ }
+
+ case FOURCC('m', 'p', '4', 'a'):
+ case FOURCC('e', 'n', 'c', 'a'):
+ case FOURCC('s', 'a', 'm', 'r'):
+ case FOURCC('s', 'a', 'w', 'b'):
+ {
+ if (mIsQT && chunk_type == FOURCC('m', 'p', '4', 'a')
+ && depth >= 1 && mPath[depth - 1] == FOURCC('w', 'a', 'v', 'e')) {
+ // Ignore mp4a embedded in QT wave atom
+ *offset += chunk_size;
+ break;
+ }
+
+ uint8_t buffer[8 + 20];
+ if (chunk_data_size < (ssize_t)sizeof(buffer)) {
+ // Basic AudioSampleEntry size.
+ return ERROR_MALFORMED;
+ }
+
+ if (mDataSource->readAt(
+ data_offset, buffer, sizeof(buffer)) < (ssize_t)sizeof(buffer)) {
+ return ERROR_IO;
+ }
+
+ uint16_t data_ref_index __unused = U16_AT(&buffer[6]);
+ uint16_t version = U16_AT(&buffer[8]);
+ uint32_t num_channels = U16_AT(&buffer[16]);
+
+ uint16_t sample_size = U16_AT(&buffer[18]);
+ uint32_t sample_rate = U32_AT(&buffer[24]) >> 16;
+
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ off64_t stop_offset = *offset + chunk_size;
+ *offset = data_offset + sizeof(buffer);
+
+ if (mIsQT && chunk_type == FOURCC('m', 'p', '4', 'a')) {
+ if (version == 1) {
+ if (mDataSource->readAt(*offset, buffer, 16) < 16) {
+ return ERROR_IO;
+ }
+
+#if 0
+ U32_AT(buffer); // samples per packet
+ U32_AT(&buffer[4]); // bytes per packet
+ U32_AT(&buffer[8]); // bytes per frame
+ U32_AT(&buffer[12]); // bytes per sample
+#endif
+ *offset += 16;
+ } else if (version == 2) {
+ uint8_t v2buffer[36];
+ if (mDataSource->readAt(*offset, v2buffer, 36) < 36) {
+ return ERROR_IO;
+ }
+
+#if 0
+ U32_AT(v2buffer); // size of struct only
+ sample_rate = (uint32_t)U64_AT(&v2buffer[4]); // audio sample rate
+ num_channels = U32_AT(&v2buffer[12]); // num audio channels
+ U32_AT(&v2buffer[16]); // always 0x7f000000
+ sample_size = (uint16_t)U32_AT(&v2buffer[20]); // const bits per channel
+ U32_AT(&v2buffer[24]); // format specifc flags
+ U32_AT(&v2buffer[28]); // const bytes per audio packet
+ U32_AT(&v2buffer[32]); // const LPCM frames per audio packet
+#endif
+ *offset += 36;
+ }
+ }
+
+ if (chunk_type != FOURCC('e', 'n', 'c', 'a')) {
+ // if the chunk type is enca, we'll get the type from the frma box later
+ mLastTrack->meta.setCString(kKeyMIMEType, FourCC2MIME(chunk_type));
+ AdjustChannelsAndRate(chunk_type, &num_channels, &sample_rate);
+ }
+ ALOGV("*** coding='%s' %d channels, size %d, rate %d\n",
+ chunk, num_channels, sample_size, sample_rate);
+ mLastTrack->meta.setInt32(kKeyChannelCount, num_channels);
+ mLastTrack->meta.setInt32(kKeySampleRate, sample_rate);
+
+ while (*offset < stop_offset) {
+ status_t err = parseChunk(offset, depth + 1);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ if (*offset != stop_offset) {
+ return ERROR_MALFORMED;
+ }
+ break;
+ }
+
+ case FOURCC('m', 'p', '4', 'v'):
+ case FOURCC('e', 'n', 'c', 'v'):
+ case FOURCC('s', '2', '6', '3'):
+ case FOURCC('H', '2', '6', '3'):
+ case FOURCC('h', '2', '6', '3'):
+ case FOURCC('a', 'v', 'c', '1'):
+ case FOURCC('h', 'v', 'c', '1'):
+ case FOURCC('h', 'e', 'v', '1'):
+ {
+ uint8_t buffer[78];
+ if (chunk_data_size < (ssize_t)sizeof(buffer)) {
+ // Basic VideoSampleEntry size.
+ return ERROR_MALFORMED;
+ }
+
+ if (mDataSource->readAt(
+ data_offset, buffer, sizeof(buffer)) < (ssize_t)sizeof(buffer)) {
+ return ERROR_IO;
+ }
+
+ uint16_t data_ref_index __unused = U16_AT(&buffer[6]);
+ uint16_t width = U16_AT(&buffer[6 + 18]);
+ uint16_t height = U16_AT(&buffer[6 + 20]);
+
+ // The video sample is not standard-compliant if it has invalid dimension.
+ // Use some default width and height value, and
+ // let the decoder figure out the actual width and height (and thus
+ // be prepared for INFO_FOMRAT_CHANGED event).
+ if (width == 0) width = 352;
+ if (height == 0) height = 288;
+
+ // printf("*** coding='%s' width=%d height=%d\n",
+ // chunk, width, height);
+
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ if (chunk_type != FOURCC('e', 'n', 'c', 'v')) {
+ // if the chunk type is encv, we'll get the type from the frma box later
+ mLastTrack->meta.setCString(kKeyMIMEType, FourCC2MIME(chunk_type));
+ }
+ mLastTrack->meta.setInt32(kKeyWidth, width);
+ mLastTrack->meta.setInt32(kKeyHeight, height);
+
+ off64_t stop_offset = *offset + chunk_size;
+ *offset = data_offset + sizeof(buffer);
+ while (*offset < stop_offset) {
+ status_t err = parseChunk(offset, depth + 1);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ if (*offset != stop_offset) {
+ return ERROR_MALFORMED;
+ }
+ break;
+ }
+
+ case FOURCC('s', 't', 'c', 'o'):
+ case FOURCC('c', 'o', '6', '4'):
+ {
+ if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL)) {
+ return ERROR_MALFORMED;
+ }
+
+ status_t err =
+ mLastTrack->sampleTable->setChunkOffsetParams(
+ chunk_type, data_offset, chunk_data_size);
+
+ *offset += chunk_size;
+
+ if (err != OK) {
+ return err;
+ }
+
+ break;
+ }
+
+ case FOURCC('s', 't', 's', 'c'):
+ {
+ if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+ return ERROR_MALFORMED;
+
+ status_t err =
+ mLastTrack->sampleTable->setSampleToChunkParams(
+ data_offset, chunk_data_size);
+
+ *offset += chunk_size;
+
+ if (err != OK) {
+ return err;
+ }
+
+ break;
+ }
+
+ case FOURCC('s', 't', 's', 'z'):
+ case FOURCC('s', 't', 'z', '2'):
+ {
+ if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL)) {
+ return ERROR_MALFORMED;
+ }
+
+ status_t err =
+ mLastTrack->sampleTable->setSampleSizeParams(
+ chunk_type, data_offset, chunk_data_size);
+
+ *offset += chunk_size;
+
+ if (err != OK) {
+ return err;
+ }
+
+ size_t max_size;
+ err = mLastTrack->sampleTable->getMaxSampleSize(&max_size);
+
+ if (err != OK) {
+ return err;
+ }
+
+ if (max_size != 0) {
+ // Assume that a given buffer only contains at most 10 chunks,
+ // each chunk originally prefixed with a 2 byte length will
+ // have a 4 byte header (0x00 0x00 0x00 0x01) after conversion,
+ // and thus will grow by 2 bytes per chunk.
+ if (max_size > SIZE_MAX - 10 * 2) {
+ ALOGE("max sample size too big: %zu", max_size);
+ return ERROR_MALFORMED;
+ }
+ mLastTrack->meta.setInt32(kKeyMaxInputSize, max_size + 10 * 2);
+ } else {
+ // No size was specified. Pick a conservatively large size.
+ uint32_t width, height;
+ if (!mLastTrack->meta.findInt32(kKeyWidth, (int32_t*)&width) ||
+ !mLastTrack->meta.findInt32(kKeyHeight,(int32_t*) &height)) {
+ ALOGE("No width or height, assuming worst case 1080p");
+ width = 1920;
+ height = 1080;
+ } else {
+ // A resolution was specified, check that it's not too big. The values below
+ // were chosen so that the calculations below don't cause overflows, they're
+ // not indicating that resolutions up to 32kx32k are actually supported.
+ if (width > 32768 || height > 32768) {
+ ALOGE("can't support %u x %u video", width, height);
+ return ERROR_MALFORMED;
+ }
+ }
+
+ const char *mime;
+ CHECK(mLastTrack->meta.findCString(kKeyMIMEType, &mime));
+ if (!strncmp(mime, "audio/", 6)) {
+ // for audio, use 128KB
+ max_size = 1024 * 128;
+ } else if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
+ || !strcmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
+ // AVC & HEVC requires compression ratio of at least 2, and uses
+ // macroblocks
+ max_size = ((width + 15) / 16) * ((height + 15) / 16) * 192;
+ } else {
+ // For all other formats there is no minimum compression
+ // ratio. Use compression ratio of 1.
+ max_size = width * height * 3 / 2;
+ }
+ // HACK: allow 10% overhead
+ // TODO: read sample size from traf atom for fragmented MPEG4.
+ max_size += max_size / 10;
+ mLastTrack->meta.setInt32(kKeyMaxInputSize, max_size);
+ }
+
+ // NOTE: setting another piece of metadata invalidates any pointers (such as the
+ // mimetype) previously obtained, so don't cache them.
+ const char *mime;
+ CHECK(mLastTrack->meta.findCString(kKeyMIMEType, &mime));
+ // Calculate average frame rate.
+ if (!strncasecmp("video/", mime, 6)) {
+ size_t nSamples = mLastTrack->sampleTable->countSamples();
+ if (nSamples == 0) {
+ int32_t trackId;
+ if (mLastTrack->meta.findInt32(kKeyTrackID, &trackId)) {
+ for (size_t i = 0; i < mTrex.size(); i++) {
+ Trex *t = &mTrex.editItemAt(i);
+ if (t->track_ID == (uint32_t) trackId) {
+ if (t->default_sample_duration > 0) {
+ int32_t frameRate =
+ mLastTrack->timescale / t->default_sample_duration;
+ mLastTrack->meta.setInt32(kKeyFrameRate, frameRate);
+ }
+ break;
+ }
+ }
+ }
+ } else {
+ int64_t durationUs;
+ if (mLastTrack->meta.findInt64(kKeyDuration, &durationUs)) {
+ if (durationUs > 0) {
+ int32_t frameRate = (nSamples * 1000000LL +
+ (durationUs >> 1)) / durationUs;
+ mLastTrack->meta.setInt32(kKeyFrameRate, frameRate);
+ }
+ }
+ ALOGV("setting frame count %zu", nSamples);
+ mLastTrack->meta.setInt32(kKeyFrameCount, nSamples);
+ }
+ }
+
+ break;
+ }
+
+ case FOURCC('s', 't', 't', 's'):
+ {
+ if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+ return ERROR_MALFORMED;
+
+ *offset += chunk_size;
+
+ status_t err =
+ mLastTrack->sampleTable->setTimeToSampleParams(
+ data_offset, chunk_data_size);
+
+ if (err != OK) {
+ return err;
+ }
+
+ break;
+ }
+
+ case FOURCC('c', 't', 't', 's'):
+ {
+ if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+ return ERROR_MALFORMED;
+
+ *offset += chunk_size;
+
+ status_t err =
+ mLastTrack->sampleTable->setCompositionTimeToSampleParams(
+ data_offset, chunk_data_size);
+
+ if (err != OK) {
+ return err;
+ }
+
+ break;
+ }
+
+ case FOURCC('s', 't', 's', 's'):
+ {
+ if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+ return ERROR_MALFORMED;
+
+ *offset += chunk_size;
+
+ status_t err =
+ mLastTrack->sampleTable->setSyncSampleParams(
+ data_offset, chunk_data_size);
+
+ if (err != OK) {
+ return err;
+ }
+
+ break;
+ }
+
+ // \xA9xyz
+ case FOURCC(0xA9, 'x', 'y', 'z'):
+ {
+ *offset += chunk_size;
+
+ // Best case the total data length inside "\xA9xyz" box would
+ // be 9, for instance "\xA9xyz" + "\x00\x05\x15\xc7" + "+0+0/",
+ // where "\x00\x05" is the text string length with value = 5,
+ // "\0x15\xc7" is the language code = en, and "+0+0/" is a
+ // location (string) value with longitude = 0 and latitude = 0.
+ // Since some devices encountered in the wild omit the trailing
+ // slash, we'll allow that.
+ if (chunk_data_size < 8) { // 8 instead of 9 to allow for missing /
+ return ERROR_MALFORMED;
+ }
+
+ uint16_t len;
+ if (!mDataSource->getUInt16(data_offset, &len)) {
+ return ERROR_IO;
+ }
+
+ // allow "+0+0" without trailing slash
+ if (len < 4 || len > chunk_data_size - 4) {
+ return ERROR_MALFORMED;
+ }
+ // The location string following the language code is formatted
+ // according to ISO 6709:2008 (https://en.wikipedia.org/wiki/ISO_6709).
+ // Allocate 2 extra bytes, in case we need to add a trailing slash,
+ // and to add a terminating 0.
+ std::unique_ptr<char[]> buffer(new (std::nothrow) char[len+2]());
+ if (!buffer) {
+ return NO_MEMORY;
+ }
+
+ if (mDataSource->readAt(
+ data_offset + 4, &buffer[0], len) < len) {
+ return ERROR_IO;
+ }
+
+ len = strlen(&buffer[0]);
+ if (len < 4) {
+ return ERROR_MALFORMED;
+ }
+ // Add a trailing slash if there wasn't one.
+ if (buffer[len - 1] != '/') {
+ buffer[len] = '/';
+ }
+ mFileMetaData.setCString(kKeyLocation, &buffer[0]);
+ break;
+ }
+
+ case FOURCC('e', 's', 'd', 's'):
+ {
+ *offset += chunk_size;
+
+ if (chunk_data_size < 4) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t buffer[256];
+ if (chunk_data_size > (off64_t)sizeof(buffer)) {
+ return ERROR_BUFFER_TOO_SMALL;
+ }
+
+ if (mDataSource->readAt(
+ data_offset, buffer, chunk_data_size) < chunk_data_size) {
+ return ERROR_IO;
+ }
+
+ if (U32_AT(buffer) != 0) {
+ // Should be version 0, flags 0.
+ return ERROR_MALFORMED;
+ }
+
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ mLastTrack->meta.setData(
+ kKeyESDS, kTypeESDS, &buffer[4], chunk_data_size - 4);
+
+ if (mPath.size() >= 2
+ && mPath[mPath.size() - 2] == FOURCC('m', 'p', '4', 'a')) {
+ // Information from the ESDS must be relied on for proper
+ // setup of sample rate and channel count for MPEG4 Audio.
+ // The generic header appears to only contain generic
+ // information...
+
+ status_t err = updateAudioTrackInfoFromESDS_MPEG4Audio(
+ &buffer[4], chunk_data_size - 4);
+
+ if (err != OK) {
+ return err;
+ }
+ }
+ if (mPath.size() >= 2
+ && mPath[mPath.size() - 2] == FOURCC('m', 'p', '4', 'v')) {
+ // Check if the video is MPEG2
+ ESDS esds(&buffer[4], chunk_data_size - 4);
+
+ uint8_t objectTypeIndication;
+ if (esds.getObjectTypeIndication(&objectTypeIndication) == OK) {
+ if (objectTypeIndication >= 0x60 && objectTypeIndication <= 0x65) {
+ mLastTrack->meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG2);
+ }
+ }
+ }
+ break;
+ }
+
+ case FOURCC('b', 't', 'r', 't'):
+ {
+ *offset += chunk_size;
+ if (mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t buffer[12];
+ if (chunk_data_size != sizeof(buffer)) {
+ return ERROR_MALFORMED;
+ }
+
+ if (mDataSource->readAt(
+ data_offset, buffer, chunk_data_size) < chunk_data_size) {
+ return ERROR_IO;
+ }
+
+ uint32_t maxBitrate = U32_AT(&buffer[4]);
+ uint32_t avgBitrate = U32_AT(&buffer[8]);
+ if (maxBitrate > 0 && maxBitrate < INT32_MAX) {
+ mLastTrack->meta.setInt32(kKeyMaxBitRate, (int32_t)maxBitrate);
+ }
+ if (avgBitrate > 0 && avgBitrate < INT32_MAX) {
+ mLastTrack->meta.setInt32(kKeyBitRate, (int32_t)avgBitrate);
+ }
+ break;
+ }
+
+ case FOURCC('a', 'v', 'c', 'C'):
+ {
+ *offset += chunk_size;
+
+ auto buffer = heapbuffer<uint8_t>(chunk_data_size);
+
+ if (buffer.get() == NULL) {
+ ALOGE("b/28471206");
+ return NO_MEMORY;
+ }
+
+ if (mDataSource->readAt(
+ data_offset, buffer.get(), chunk_data_size) < chunk_data_size) {
+ return ERROR_IO;
+ }
+
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ mLastTrack->meta.setData(
+ kKeyAVCC, kTypeAVCC, buffer.get(), chunk_data_size);
+
+ break;
+ }
+ case FOURCC('h', 'v', 'c', 'C'):
+ {
+ auto buffer = heapbuffer<uint8_t>(chunk_data_size);
+
+ if (buffer.get() == NULL) {
+ ALOGE("b/28471206");
+ return NO_MEMORY;
+ }
+
+ if (mDataSource->readAt(
+ data_offset, buffer.get(), chunk_data_size) < chunk_data_size) {
+ return ERROR_IO;
+ }
+
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ mLastTrack->meta.setData(
+ kKeyHVCC, kTypeHVCC, buffer.get(), chunk_data_size);
+
+ *offset += chunk_size;
+ break;
+ }
+
+ case FOURCC('d', '2', '6', '3'):
+ {
+ *offset += chunk_size;
+ /*
+ * d263 contains a fixed 7 bytes part:
+ * vendor - 4 bytes
+ * version - 1 byte
+ * level - 1 byte
+ * profile - 1 byte
+ * optionally, "d263" box itself may contain a 16-byte
+ * bit rate box (bitr)
+ * average bit rate - 4 bytes
+ * max bit rate - 4 bytes
+ */
+ char buffer[23];
+ if (chunk_data_size != 7 &&
+ chunk_data_size != 23) {
+ ALOGE("Incorrect D263 box size %lld", (long long)chunk_data_size);
+ return ERROR_MALFORMED;
+ }
+
+ if (mDataSource->readAt(
+ data_offset, buffer, chunk_data_size) < chunk_data_size) {
+ return ERROR_IO;
+ }
+
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ mLastTrack->meta.setData(kKeyD263, kTypeD263, buffer, chunk_data_size);
+
+ break;
+ }
+
+ case FOURCC('m', 'e', 't', 'a'):
+ {
+ off64_t stop_offset = *offset + chunk_size;
+ *offset = data_offset;
+ bool isParsingMetaKeys = underQTMetaPath(mPath, 2);
+ if (!isParsingMetaKeys) {
+ uint8_t buffer[4];
+ if (chunk_data_size < (off64_t)sizeof(buffer)) {
+ *offset = stop_offset;
+ return ERROR_MALFORMED;
+ }
+
+ if (mDataSource->readAt(
+ data_offset, buffer, 4) < 4) {
+ *offset = stop_offset;
+ return ERROR_IO;
+ }
+
+ if (U32_AT(buffer) != 0) {
+ // Should be version 0, flags 0.
+
+ // If it's not, let's assume this is one of those
+ // apparently malformed chunks that don't have flags
+ // and completely different semantics than what's
+ // in the MPEG4 specs and skip it.
+ *offset = stop_offset;
+ return OK;
+ }
+ *offset += sizeof(buffer);
+ }
+
+ while (*offset < stop_offset) {
+ status_t err = parseChunk(offset, depth + 1);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ if (*offset != stop_offset) {
+ return ERROR_MALFORMED;
+ }
+ break;
+ }
+
+ case FOURCC('i', 'l', 'o', 'c'):
+ case FOURCC('i', 'i', 'n', 'f'):
+ case FOURCC('i', 'p', 'r', 'p'):
+ case FOURCC('p', 'i', 't', 'm'):
+ case FOURCC('i', 'd', 'a', 't'):
+ case FOURCC('i', 'r', 'e', 'f'):
+ case FOURCC('i', 'p', 'r', 'o'):
+ {
+ if (mIsHeif) {
+ if (mItemTable == NULL) {
+ mItemTable = new ItemTable(mDataSource);
+ }
+ status_t err = mItemTable->parse(
+ chunk_type, data_offset, chunk_data_size);
+ if (err != OK) {
+ return err;
+ }
+ }
+ *offset += chunk_size;
+ break;
+ }
+
+ case FOURCC('m', 'e', 'a', 'n'):
+ case FOURCC('n', 'a', 'm', 'e'):
+ case FOURCC('d', 'a', 't', 'a'):
+ {
+ *offset += chunk_size;
+
+ if (mPath.size() == 6 && underMetaDataPath(mPath)) {
+ status_t err = parseITunesMetaData(data_offset, chunk_data_size);
+
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ break;
+ }
+
+ case FOURCC('m', 'v', 'h', 'd'):
+ {
+ *offset += chunk_size;
+
+ if (depth != 1) {
+ ALOGE("mvhd: depth %d", depth);
+ return ERROR_MALFORMED;
+ }
+ if (chunk_data_size < 32) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t header[32];
+ if (mDataSource->readAt(
+ data_offset, header, sizeof(header))
+ < (ssize_t)sizeof(header)) {
+ return ERROR_IO;
+ }
+
+ uint64_t creationTime;
+ uint64_t duration = 0;
+ if (header[0] == 1) {
+ creationTime = U64_AT(&header[4]);
+ mHeaderTimescale = U32_AT(&header[20]);
+ duration = U64_AT(&header[24]);
+ if (duration == 0xffffffffffffffff) {
+ duration = 0;
+ }
+ } else if (header[0] != 0) {
+ return ERROR_MALFORMED;
+ } else {
+ creationTime = U32_AT(&header[4]);
+ mHeaderTimescale = U32_AT(&header[12]);
+ uint32_t d32 = U32_AT(&header[16]);
+ if (d32 == 0xffffffff) {
+ d32 = 0;
+ }
+ duration = d32;
+ }
+ if (duration != 0 && mHeaderTimescale != 0 && duration < UINT64_MAX / 1000000) {
+ mFileMetaData.setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
+ }
+
+ String8 s;
+ if (convertTimeToDate(creationTime, &s)) {
+ mFileMetaData.setCString(kKeyDate, s.string());
+ }
+
+
+ break;
+ }
+
+ case FOURCC('m', 'e', 'h', 'd'):
+ {
+ *offset += chunk_size;
+
+ if (chunk_data_size < 8) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t flags[4];
+ if (mDataSource->readAt(
+ data_offset, flags, sizeof(flags))
+ < (ssize_t)sizeof(flags)) {
+ return ERROR_IO;
+ }
+
+ uint64_t duration = 0;
+ if (flags[0] == 1) {
+ // 64 bit
+ if (chunk_data_size < 12) {
+ return ERROR_MALFORMED;
+ }
+ mDataSource->getUInt64(data_offset + 4, &duration);
+ if (duration == 0xffffffffffffffff) {
+ duration = 0;
+ }
+ } else if (flags[0] == 0) {
+ // 32 bit
+ uint32_t d32;
+ mDataSource->getUInt32(data_offset + 4, &d32);
+ if (d32 == 0xffffffff) {
+ d32 = 0;
+ }
+ duration = d32;
+ } else {
+ return ERROR_MALFORMED;
+ }
+
+ if (duration != 0 && mHeaderTimescale != 0) {
+ mFileMetaData.setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
+ }
+
+ break;
+ }
+
+ case FOURCC('m', 'd', 'a', 't'):
+ {
+ mMdatFound = true;
+
+ *offset += chunk_size;
+ break;
+ }
+
+ case FOURCC('h', 'd', 'l', 'r'):
+ {
+ *offset += chunk_size;
+
+ if (underQTMetaPath(mPath, 3)) {
+ break;
+ }
+
+ uint32_t buffer;
+ if (mDataSource->readAt(
+ data_offset + 8, &buffer, 4) < 4) {
+ return ERROR_IO;
+ }
+
+ uint32_t type = ntohl(buffer);
+ // For the 3GPP file format, the handler-type within the 'hdlr' box
+ // shall be 'text'. We also want to support 'sbtl' handler type
+ // for a practical reason as various MPEG4 containers use it.
+ if (type == FOURCC('t', 'e', 'x', 't') || type == FOURCC('s', 'b', 't', 'l')) {
+ if (mLastTrack != NULL) {
+ mLastTrack->meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_TEXT_3GPP);
+ }
+ }
+
+ break;
+ }
+
+ case FOURCC('k', 'e', 'y', 's'):
+ {
+ *offset += chunk_size;
+
+ if (underQTMetaPath(mPath, 3)) {
+ status_t err = parseQTMetaKey(data_offset, chunk_data_size);
+ if (err != OK) {
+ return err;
+ }
+ }
+ break;
+ }
+
+ case FOURCC('t', 'r', 'e', 'x'):
+ {
+ *offset += chunk_size;
+
+ if (chunk_data_size < 24) {
+ return ERROR_IO;
+ }
+ Trex trex;
+ if (!mDataSource->getUInt32(data_offset + 4, &trex.track_ID) ||
+ !mDataSource->getUInt32(data_offset + 8, &trex.default_sample_description_index) ||
+ !mDataSource->getUInt32(data_offset + 12, &trex.default_sample_duration) ||
+ !mDataSource->getUInt32(data_offset + 16, &trex.default_sample_size) ||
+ !mDataSource->getUInt32(data_offset + 20, &trex.default_sample_flags)) {
+ return ERROR_IO;
+ }
+ mTrex.add(trex);
+ break;
+ }
+
+ case FOURCC('t', 'x', '3', 'g'):
+ {
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ uint32_t type;
+ const void *data;
+ size_t size = 0;
+ if (!mLastTrack->meta.findData(
+ kKeyTextFormatData, &type, &data, &size)) {
+ size = 0;
+ }
+
+ if ((chunk_size > SIZE_MAX) || (SIZE_MAX - chunk_size <= size)) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t *buffer = new (std::nothrow) uint8_t[size + chunk_size];
+ if (buffer == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ if (size > 0) {
+ memcpy(buffer, data, size);
+ }
+
+ if ((size_t)(mDataSource->readAt(*offset, buffer + size, chunk_size))
+ < chunk_size) {
+ delete[] buffer;
+ buffer = NULL;
+
+ // advance read pointer so we don't end up reading this again
+ *offset += chunk_size;
+ return ERROR_IO;
+ }
+
+ mLastTrack->meta.setData(
+ kKeyTextFormatData, 0, buffer, size + chunk_size);
+
+ delete[] buffer;
+
+ *offset += chunk_size;
+ break;
+ }
+
+ case FOURCC('c', 'o', 'v', 'r'):
+ {
+ *offset += chunk_size;
+
+ ALOGV("chunk_data_size = %" PRId64 " and data_offset = %" PRId64,
+ chunk_data_size, data_offset);
+
+ if (chunk_data_size < 0 || static_cast<uint64_t>(chunk_data_size) >= SIZE_MAX - 1) {
+ return ERROR_MALFORMED;
+ }
+ auto buffer = heapbuffer<uint8_t>(chunk_data_size);
+ if (buffer.get() == NULL) {
+ ALOGE("b/28471206");
+ return NO_MEMORY;
+ }
+ if (mDataSource->readAt(
+ data_offset, buffer.get(), chunk_data_size) != (ssize_t)chunk_data_size) {
+ return ERROR_IO;
+ }
+ const int kSkipBytesOfDataBox = 16;
+ if (chunk_data_size <= kSkipBytesOfDataBox) {
+ return ERROR_MALFORMED;
+ }
+
+ mFileMetaData.setData(
+ kKeyAlbumArt, MetaData::TYPE_NONE,
+ buffer.get() + kSkipBytesOfDataBox, chunk_data_size - kSkipBytesOfDataBox);
+
+ break;
+ }
+
+ case FOURCC('c', 'o', 'l', 'r'):
+ {
+ *offset += chunk_size;
+ // this must be in a VisualSampleEntry box under the Sample Description Box ('stsd')
+ // ignore otherwise
+ if (depth >= 2 && mPath[depth - 2] == FOURCC('s', 't', 's', 'd')) {
+ status_t err = parseColorInfo(data_offset, chunk_data_size);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ break;
+ }
+
+ case FOURCC('t', 'i', 't', 'l'):
+ case FOURCC('p', 'e', 'r', 'f'):
+ case FOURCC('a', 'u', 't', 'h'):
+ case FOURCC('g', 'n', 'r', 'e'):
+ case FOURCC('a', 'l', 'b', 'm'):
+ case FOURCC('y', 'r', 'r', 'c'):
+ {
+ *offset += chunk_size;
+
+ status_t err = parse3GPPMetaData(data_offset, chunk_data_size, depth);
+
+ if (err != OK) {
+ return err;
+ }
+
+ break;
+ }
+
+ case FOURCC('I', 'D', '3', '2'):
+ {
+ *offset += chunk_size;
+
+ if (chunk_data_size < 6) {
+ return ERROR_MALFORMED;
+ }
+
+ parseID3v2MetaData(data_offset + 6);
+
+ break;
+ }
+
+ case FOURCC('-', '-', '-', '-'):
+ {
+ mLastCommentMean.clear();
+ mLastCommentName.clear();
+ mLastCommentData.clear();
+ *offset += chunk_size;
+ break;
+ }
+
+ case FOURCC('s', 'i', 'd', 'x'):
+ {
+ status_t err = parseSegmentIndex(data_offset, chunk_data_size);
+ if (err != OK) {
+ return err;
+ }
+ *offset += chunk_size;
+ return UNKNOWN_ERROR; // stop parsing after sidx
+ }
+
+ case FOURCC('a', 'c', '-', '3'):
+ {
+ *offset += chunk_size;
+ return parseAC3SampleEntry(data_offset);
+ }
+
+ case FOURCC('f', 't', 'y', 'p'):
+ {
+ if (chunk_data_size < 8 || depth != 0) {
+ return ERROR_MALFORMED;
+ }
+
+ off64_t stop_offset = *offset + chunk_size;
+ uint32_t numCompatibleBrands = (chunk_data_size - 8) / 4;
+ std::set<uint32_t> brandSet;
+ for (size_t i = 0; i < numCompatibleBrands + 2; ++i) {
+ if (i == 1) {
+ // Skip this index, it refers to the minorVersion,
+ // not a brand.
+ continue;
+ }
+
+ uint32_t brand;
+ if (mDataSource->readAt(data_offset + 4 * i, &brand, 4) < 4) {
+ return ERROR_MALFORMED;
+ }
+
+ brand = ntohl(brand);
+ brandSet.insert(brand);
+ }
+
+ if (brandSet.count(FOURCC('q', 't', ' ', ' ')) > 0) {
+ mIsQT = true;
+ } else {
+ if (brandSet.count(FOURCC('m', 'i', 'f', '1')) > 0
+ && brandSet.count(FOURCC('h', 'e', 'i', 'c')) > 0) {
+ ALOGV("identified HEIF image");
+
+ mIsHeif = true;
+ brandSet.erase(FOURCC('m', 'i', 'f', '1'));
+ brandSet.erase(FOURCC('h', 'e', 'i', 'c'));
+ }
+
+ if (!brandSet.empty()) {
+ // This means that the file should have moov box.
+ // It could be any iso files (mp4, heifs, etc.)
+ mHasMoovBox = true;
+ if (mIsHeif) {
+ ALOGV("identified HEIF image with other tracks");
+ }
+ }
+ }
+
+ *offset = stop_offset;
+
+ break;
+ }
+
+ default:
+ {
+ // check if we're parsing 'ilst' for meta keys
+ // if so, treat type as a number (key-id).
+ if (underQTMetaPath(mPath, 3)) {
+ status_t err = parseQTMetaVal(chunk_type, data_offset, chunk_data_size);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ *offset += chunk_size;
+ break;
+ }
+ }
+
+ return OK;
+}
+
+status_t MPEG4Extractor::parseAC3SampleEntry(off64_t offset) {
+ // skip 16 bytes:
+ // + 6-byte reserved,
+ // + 2-byte data reference index,
+ // + 8-byte reserved
+ offset += 16;
+ uint16_t channelCount;
+ if (!mDataSource->getUInt16(offset, &channelCount)) {
+ return ERROR_MALFORMED;
+ }
+ // skip 8 bytes:
+ // + 2-byte channelCount,
+ // + 2-byte sample size,
+ // + 4-byte reserved
+ offset += 8;
+ uint16_t sampleRate;
+ if (!mDataSource->getUInt16(offset, &sampleRate)) {
+ ALOGE("MPEG4Extractor: error while reading ac-3 block: cannot read sample rate");
+ return ERROR_MALFORMED;
+ }
+
+ // skip 4 bytes:
+ // + 2-byte sampleRate,
+ // + 2-byte reserved
+ offset += 4;
+ return parseAC3SpecificBox(offset, sampleRate);
+}
+
+status_t MPEG4Extractor::parseAC3SpecificBox(
+ off64_t offset, uint16_t sampleRate) {
+ uint32_t size;
+ // + 4-byte size
+ // + 4-byte type
+ // + 3-byte payload
+ const uint32_t kAC3SpecificBoxSize = 11;
+ if (!mDataSource->getUInt32(offset, &size) || size < kAC3SpecificBoxSize) {
+ ALOGE("MPEG4Extractor: error while reading ac-3 block: cannot read specific box size");
+ return ERROR_MALFORMED;
+ }
+
+ offset += 4;
+ uint32_t type;
+ if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'a', 'c', '3')) {
+ ALOGE("MPEG4Extractor: error while reading ac-3 specific block: header not dac3");
+ return ERROR_MALFORMED;
+ }
+
+ offset += 4;
+ const uint32_t kAC3SpecificBoxPayloadSize = 3;
+ uint8_t chunk[kAC3SpecificBoxPayloadSize];
+ if (mDataSource->readAt(offset, chunk, sizeof(chunk)) != sizeof(chunk)) {
+ ALOGE("MPEG4Extractor: error while reading ac-3 specific block: bitstream fields");
+ return ERROR_MALFORMED;
+ }
+
+ ABitReader br(chunk, sizeof(chunk));
+ static const unsigned channelCountTable[] = {2, 1, 2, 3, 3, 4, 4, 5};
+ static const unsigned sampleRateTable[] = {48000, 44100, 32000};
+
+ unsigned fscod = br.getBits(2);
+ if (fscod == 3) {
+ ALOGE("Incorrect fscod (3) in AC3 header");
+ return ERROR_MALFORMED;
+ }
+ unsigned boxSampleRate = sampleRateTable[fscod];
+ if (boxSampleRate != sampleRate) {
+ ALOGE("sample rate mismatch: boxSampleRate = %d, sampleRate = %d",
+ boxSampleRate, sampleRate);
+ return ERROR_MALFORMED;
+ }
+
+ unsigned bsid = br.getBits(5);
+ if (bsid > 8) {
+ ALOGW("Incorrect bsid in AC3 header. Possibly E-AC-3?");
+ return ERROR_MALFORMED;
+ }
+
+ // skip
+ unsigned bsmod __unused = br.getBits(3);
+
+ unsigned acmod = br.getBits(3);
+ unsigned lfeon = br.getBits(1);
+ unsigned channelCount = channelCountTable[acmod] + lfeon;
+
+ if (mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+ mLastTrack->meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC3);
+ mLastTrack->meta.setInt32(kKeyChannelCount, channelCount);
+ mLastTrack->meta.setInt32(kKeySampleRate, sampleRate);
+ return OK;
+}
+
+status_t MPEG4Extractor::parseSegmentIndex(off64_t offset, size_t size) {
+ ALOGV("MPEG4Extractor::parseSegmentIndex");
+
+ if (size < 12) {
+ return -EINVAL;
+ }
+
+ uint32_t flags;
+ if (!mDataSource->getUInt32(offset, &flags)) {
+ return ERROR_MALFORMED;
+ }
+
+ uint32_t version = flags >> 24;
+ flags &= 0xffffff;
+
+ ALOGV("sidx version %d", version);
+
+ uint32_t referenceId;
+ if (!mDataSource->getUInt32(offset + 4, &referenceId)) {
+ return ERROR_MALFORMED;
+ }
+
+ uint32_t timeScale;
+ if (!mDataSource->getUInt32(offset + 8, &timeScale)) {
+ return ERROR_MALFORMED;
+ }
+ ALOGV("sidx refid/timescale: %d/%d", referenceId, timeScale);
+ if (timeScale == 0)
+ return ERROR_MALFORMED;
+
+ uint64_t earliestPresentationTime;
+ uint64_t firstOffset;
+
+ offset += 12;
+ size -= 12;
+
+ if (version == 0) {
+ if (size < 8) {
+ return -EINVAL;
+ }
+ uint32_t tmp;
+ if (!mDataSource->getUInt32(offset, &tmp)) {
+ return ERROR_MALFORMED;
+ }
+ earliestPresentationTime = tmp;
+ if (!mDataSource->getUInt32(offset + 4, &tmp)) {
+ return ERROR_MALFORMED;
+ }
+ firstOffset = tmp;
+ offset += 8;
+ size -= 8;
+ } else {
+ if (size < 16) {
+ return -EINVAL;
+ }
+ if (!mDataSource->getUInt64(offset, &earliestPresentationTime)) {
+ return ERROR_MALFORMED;
+ }
+ if (!mDataSource->getUInt64(offset + 8, &firstOffset)) {
+ return ERROR_MALFORMED;
+ }
+ offset += 16;
+ size -= 16;
+ }
+ ALOGV("sidx pres/off: %" PRIu64 "/%" PRIu64, earliestPresentationTime, firstOffset);
+
+ if (size < 4) {
+ return -EINVAL;
+ }
+
+ uint16_t referenceCount;
+ if (!mDataSource->getUInt16(offset + 2, &referenceCount)) {
+ return ERROR_MALFORMED;
+ }
+ offset += 4;
+ size -= 4;
+ ALOGV("refcount: %d", referenceCount);
+
+ if (size < referenceCount * 12) {
+ return -EINVAL;
+ }
+
+ uint64_t total_duration = 0;
+ for (unsigned int i = 0; i < referenceCount; i++) {
+ uint32_t d1, d2, d3;
+
+ if (!mDataSource->getUInt32(offset, &d1) || // size
+ !mDataSource->getUInt32(offset + 4, &d2) || // duration
+ !mDataSource->getUInt32(offset + 8, &d3)) { // flags
+ return ERROR_MALFORMED;
+ }
+
+ if (d1 & 0x80000000) {
+ ALOGW("sub-sidx boxes not supported yet");
+ }
+ bool sap = d3 & 0x80000000;
+ uint32_t saptype = (d3 >> 28) & 7;
+ if (!sap || (saptype != 1 && saptype != 2)) {
+ // type 1 and 2 are sync samples
+ ALOGW("not a stream access point, or unsupported type: %08x", d3);
+ }
+ total_duration += d2;
+ offset += 12;
+ ALOGV(" item %d, %08x %08x %08x", i, d1, d2, d3);
+ SidxEntry se;
+ se.mSize = d1 & 0x7fffffff;
+ se.mDurationUs = 1000000LL * d2 / timeScale;
+ mSidxEntries.add(se);
+ }
+
+ uint64_t sidxDuration = total_duration * 1000000 / timeScale;
+
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ int64_t metaDuration;
+ if (!mLastTrack->meta.findInt64(kKeyDuration, &metaDuration) || metaDuration == 0) {
+ mLastTrack->meta.setInt64(kKeyDuration, sidxDuration);
+ }
+ return OK;
+}
+
+status_t MPEG4Extractor::parseQTMetaKey(off64_t offset, size_t size) {
+ if (size < 8) {
+ return ERROR_MALFORMED;
+ }
+
+ uint32_t count;
+ if (!mDataSource->getUInt32(offset + 4, &count)) {
+ return ERROR_MALFORMED;
+ }
+
+ if (mMetaKeyMap.size() > 0) {
+ ALOGW("'keys' atom seen again, discarding existing entries");
+ mMetaKeyMap.clear();
+ }
+
+ off64_t keyOffset = offset + 8;
+ off64_t stopOffset = offset + size;
+ for (size_t i = 1; i <= count; i++) {
+ if (keyOffset + 8 > stopOffset) {
+ return ERROR_MALFORMED;
+ }
+
+ uint32_t keySize;
+ if (!mDataSource->getUInt32(keyOffset, &keySize)
+ || keySize < 8
+ || keyOffset + keySize > stopOffset) {
+ return ERROR_MALFORMED;
+ }
+
+ uint32_t type;
+ if (!mDataSource->getUInt32(keyOffset + 4, &type)
+ || type != FOURCC('m', 'd', 't', 'a')) {
+ return ERROR_MALFORMED;
+ }
+
+ keySize -= 8;
+ keyOffset += 8;
+
+ auto keyData = heapbuffer<uint8_t>(keySize);
+ if (keyData.get() == NULL) {
+ return ERROR_MALFORMED;
+ }
+ if (mDataSource->readAt(
+ keyOffset, keyData.get(), keySize) < (ssize_t) keySize) {
+ return ERROR_MALFORMED;
+ }
+
+ AString key((const char *)keyData.get(), keySize);
+ mMetaKeyMap.add(i, key);
+
+ keyOffset += keySize;
+ }
+ return OK;
+}
+
+status_t MPEG4Extractor::parseQTMetaVal(
+ int32_t keyId, off64_t offset, size_t size) {
+ ssize_t index = mMetaKeyMap.indexOfKey(keyId);
+ if (index < 0) {
+ // corresponding key is not present, ignore
+ return ERROR_MALFORMED;
+ }
+
+ if (size <= 16) {
+ return ERROR_MALFORMED;
+ }
+ uint32_t dataSize;
+ if (!mDataSource->getUInt32(offset, &dataSize)
+ || dataSize > size || dataSize <= 16) {
+ return ERROR_MALFORMED;
+ }
+ uint32_t atomFourCC;
+ if (!mDataSource->getUInt32(offset + 4, &atomFourCC)
+ || atomFourCC != FOURCC('d', 'a', 't', 'a')) {
+ return ERROR_MALFORMED;
+ }
+ uint32_t dataType;
+ if (!mDataSource->getUInt32(offset + 8, &dataType)
+ || ((dataType & 0xff000000) != 0)) {
+ // not well-known type
+ return ERROR_MALFORMED;
+ }
+
+ dataSize -= 16;
+ offset += 16;
+
+ if (dataType == 23 && dataSize >= 4) {
+ // BE Float32
+ uint32_t val;
+ if (!mDataSource->getUInt32(offset, &val)) {
+ return ERROR_MALFORMED;
+ }
+ if (!strcasecmp(mMetaKeyMap[index].c_str(), "com.android.capture.fps")) {
+ mFileMetaData.setFloat(kKeyCaptureFramerate, *(float *)&val);
+ }
+ } else if (dataType == 67 && dataSize >= 4) {
+ // BE signed int32
+ uint32_t val;
+ if (!mDataSource->getUInt32(offset, &val)) {
+ return ERROR_MALFORMED;
+ }
+ if (!strcasecmp(mMetaKeyMap[index].c_str(), "com.android.video.temporal_layers_count")) {
+ mFileMetaData.setInt32(kKeyTemporalLayerCount, val);
+ }
+ } else {
+ // add more keys if needed
+ ALOGV("ignoring key: type %d, size %d", dataType, dataSize);
+ }
+
+ return OK;
+}
+
+status_t MPEG4Extractor::parseTrackHeader(
+ off64_t data_offset, off64_t data_size) {
+ if (data_size < 4) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t version;
+ if (mDataSource->readAt(data_offset, &version, 1) < 1) {
+ return ERROR_IO;
+ }
+
+ size_t dynSize = (version == 1) ? 36 : 24;
+
+ uint8_t buffer[36 + 60];
+
+ if (data_size != (off64_t)dynSize + 60) {
+ return ERROR_MALFORMED;
+ }
+
+ if (mDataSource->readAt(
+ data_offset, buffer, data_size) < (ssize_t)data_size) {
+ return ERROR_IO;
+ }
+
+ uint64_t ctime __unused, mtime __unused, duration __unused;
+ int32_t id;
+
+ if (version == 1) {
+ ctime = U64_AT(&buffer[4]);
+ mtime = U64_AT(&buffer[12]);
+ id = U32_AT(&buffer[20]);
+ duration = U64_AT(&buffer[28]);
+ } else if (version == 0) {
+ ctime = U32_AT(&buffer[4]);
+ mtime = U32_AT(&buffer[8]);
+ id = U32_AT(&buffer[12]);
+ duration = U32_AT(&buffer[20]);
+ } else {
+ return ERROR_UNSUPPORTED;
+ }
+
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ mLastTrack->meta.setInt32(kKeyTrackID, id);
+
+ size_t matrixOffset = dynSize + 16;
+ int32_t a00 = U32_AT(&buffer[matrixOffset]);
+ int32_t a01 = U32_AT(&buffer[matrixOffset + 4]);
+ int32_t a10 = U32_AT(&buffer[matrixOffset + 12]);
+ int32_t a11 = U32_AT(&buffer[matrixOffset + 16]);
+
+#if 0
+ int32_t dx = U32_AT(&buffer[matrixOffset + 8]);
+ int32_t dy = U32_AT(&buffer[matrixOffset + 20]);
+
+ ALOGI("x' = %.2f * x + %.2f * y + %.2f",
+ a00 / 65536.0f, a01 / 65536.0f, dx / 65536.0f);
+ ALOGI("y' = %.2f * x + %.2f * y + %.2f",
+ a10 / 65536.0f, a11 / 65536.0f, dy / 65536.0f);
+#endif
+
+ uint32_t rotationDegrees;
+
+ static const int32_t kFixedOne = 0x10000;
+ if (a00 == kFixedOne && a01 == 0 && a10 == 0 && a11 == kFixedOne) {
+ // Identity, no rotation
+ rotationDegrees = 0;
+ } else if (a00 == 0 && a01 == kFixedOne && a10 == -kFixedOne && a11 == 0) {
+ rotationDegrees = 90;
+ } else if (a00 == 0 && a01 == -kFixedOne && a10 == kFixedOne && a11 == 0) {
+ rotationDegrees = 270;
+ } else if (a00 == -kFixedOne && a01 == 0 && a10 == 0 && a11 == -kFixedOne) {
+ rotationDegrees = 180;
+ } else {
+ ALOGW("We only support 0,90,180,270 degree rotation matrices");
+ rotationDegrees = 0;
+ }
+
+ if (rotationDegrees != 0) {
+ mLastTrack->meta.setInt32(kKeyRotation, rotationDegrees);
+ }
+
+ // Handle presentation display size, which could be different
+ // from the image size indicated by kKeyWidth and kKeyHeight.
+ uint32_t width = U32_AT(&buffer[dynSize + 52]);
+ uint32_t height = U32_AT(&buffer[dynSize + 56]);
+ mLastTrack->meta.setInt32(kKeyDisplayWidth, width >> 16);
+ mLastTrack->meta.setInt32(kKeyDisplayHeight, height >> 16);
+
+ return OK;
+}
+
+status_t MPEG4Extractor::parseITunesMetaData(off64_t offset, size_t size) {
+ if (size == 0) {
+ return OK;
+ }
+
+ if (size < 4 || size == SIZE_MAX) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t *buffer = new (std::nothrow) uint8_t[size + 1];
+ if (buffer == NULL) {
+ return ERROR_MALFORMED;
+ }
+ if (mDataSource->readAt(
+ offset, buffer, size) != (ssize_t)size) {
+ delete[] buffer;
+ buffer = NULL;
+
+ return ERROR_IO;
+ }
+
+ uint32_t flags = U32_AT(buffer);
+
+ uint32_t metadataKey = 0;
+ char chunk[5];
+ MakeFourCCString(mPath[4], chunk);
+ ALOGV("meta: %s @ %lld", chunk, (long long)offset);
+ switch ((int32_t)mPath[4]) {
+ case FOURCC(0xa9, 'a', 'l', 'b'):
+ {
+ metadataKey = kKeyAlbum;
+ break;
+ }
+ case FOURCC(0xa9, 'A', 'R', 'T'):
+ {
+ metadataKey = kKeyArtist;
+ break;
+ }
+ case FOURCC('a', 'A', 'R', 'T'):
+ {
+ metadataKey = kKeyAlbumArtist;
+ break;
+ }
+ case FOURCC(0xa9, 'd', 'a', 'y'):
+ {
+ metadataKey = kKeyYear;
+ break;
+ }
+ case FOURCC(0xa9, 'n', 'a', 'm'):
+ {
+ metadataKey = kKeyTitle;
+ break;
+ }
+ case FOURCC(0xa9, 'w', 'r', 't'):
+ {
+ metadataKey = kKeyWriter;
+ break;
+ }
+ case FOURCC('c', 'o', 'v', 'r'):
+ {
+ metadataKey = kKeyAlbumArt;
+ break;
+ }
+ case FOURCC('g', 'n', 'r', 'e'):
+ {
+ metadataKey = kKeyGenre;
+ break;
+ }
+ case FOURCC(0xa9, 'g', 'e', 'n'):
+ {
+ metadataKey = kKeyGenre;
+ break;
+ }
+ case FOURCC('c', 'p', 'i', 'l'):
+ {
+ if (size == 9 && flags == 21) {
+ char tmp[16];
+ sprintf(tmp, "%d",
+ (int)buffer[size - 1]);
+
+ mFileMetaData.setCString(kKeyCompilation, tmp);
+ }
+ break;
+ }
+ case FOURCC('t', 'r', 'k', 'n'):
+ {
+ if (size == 16 && flags == 0) {
+ char tmp[16];
+ uint16_t* pTrack = (uint16_t*)&buffer[10];
+ uint16_t* pTotalTracks = (uint16_t*)&buffer[12];
+ sprintf(tmp, "%d/%d", ntohs(*pTrack), ntohs(*pTotalTracks));
+
+ mFileMetaData.setCString(kKeyCDTrackNumber, tmp);
+ }
+ break;
+ }
+ case FOURCC('d', 'i', 's', 'k'):
+ {
+ if ((size == 14 || size == 16) && flags == 0) {
+ char tmp[16];
+ uint16_t* pDisc = (uint16_t*)&buffer[10];
+ uint16_t* pTotalDiscs = (uint16_t*)&buffer[12];
+ sprintf(tmp, "%d/%d", ntohs(*pDisc), ntohs(*pTotalDiscs));
+
+ mFileMetaData.setCString(kKeyDiscNumber, tmp);
+ }
+ break;
+ }
+ case FOURCC('-', '-', '-', '-'):
+ {
+ buffer[size] = '\0';
+ switch (mPath[5]) {
+ case FOURCC('m', 'e', 'a', 'n'):
+ mLastCommentMean.setTo((const char *)buffer + 4);
+ break;
+ case FOURCC('n', 'a', 'm', 'e'):
+ mLastCommentName.setTo((const char *)buffer + 4);
+ break;
+ case FOURCC('d', 'a', 't', 'a'):
+ if (size < 8) {
+ delete[] buffer;
+ buffer = NULL;
+ ALOGE("b/24346430");
+ return ERROR_MALFORMED;
+ }
+ mLastCommentData.setTo((const char *)buffer + 8);
+ break;
+ }
+
+ // Once we have a set of mean/name/data info, go ahead and process
+ // it to see if its something we are interested in. Whether or not
+ // were are interested in the specific tag, make sure to clear out
+ // the set so we can be ready to process another tuple should one
+ // show up later in the file.
+ if ((mLastCommentMean.length() != 0) &&
+ (mLastCommentName.length() != 0) &&
+ (mLastCommentData.length() != 0)) {
+
+ if (mLastCommentMean == "com.apple.iTunes"
+ && mLastCommentName == "iTunSMPB") {
+ int32_t delay, padding;
+ if (sscanf(mLastCommentData,
+ " %*x %x %x %*x", &delay, &padding) == 2) {
+ if (mLastTrack == NULL) {
+ delete[] buffer;
+ return ERROR_MALFORMED;
+ }
+
+ mLastTrack->meta.setInt32(kKeyEncoderDelay, delay);
+ mLastTrack->meta.setInt32(kKeyEncoderPadding, padding);
+ }
+ }
+
+ mLastCommentMean.clear();
+ mLastCommentName.clear();
+ mLastCommentData.clear();
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ if (size >= 8 && metadataKey && !mFileMetaData.hasData(metadataKey)) {
+ if (metadataKey == kKeyAlbumArt) {
+ mFileMetaData.setData(
+ kKeyAlbumArt, MetaData::TYPE_NONE,
+ buffer + 8, size - 8);
+ } else if (metadataKey == kKeyGenre) {
+ if (flags == 0) {
+ // uint8_t genre code, iTunes genre codes are
+ // the standard id3 codes, except they start
+ // at 1 instead of 0 (e.g. Pop is 14, not 13)
+ // We use standard id3 numbering, so subtract 1.
+ int genrecode = (int)buffer[size - 1];
+ genrecode--;
+ if (genrecode < 0) {
+ genrecode = 255; // reserved for 'unknown genre'
+ }
+ char genre[10];
+ sprintf(genre, "%d", genrecode);
+
+ mFileMetaData.setCString(metadataKey, genre);
+ } else if (flags == 1) {
+ // custom genre string
+ buffer[size] = '\0';
+
+ mFileMetaData.setCString(
+ metadataKey, (const char *)buffer + 8);
+ }
+ } else {
+ buffer[size] = '\0';
+
+ mFileMetaData.setCString(
+ metadataKey, (const char *)buffer + 8);
+ }
+ }
+
+ delete[] buffer;
+ buffer = NULL;
+
+ return OK;
+}
+
+status_t MPEG4Extractor::parseColorInfo(off64_t offset, size_t size) {
+ if (size < 4 || size == SIZE_MAX || mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t *buffer = new (std::nothrow) uint8_t[size + 1];
+ if (buffer == NULL) {
+ return ERROR_MALFORMED;
+ }
+ if (mDataSource->readAt(offset, buffer, size) != (ssize_t)size) {
+ delete[] buffer;
+ buffer = NULL;
+
+ return ERROR_IO;
+ }
+
+ int32_t type = U32_AT(&buffer[0]);
+ if ((type == FOURCC('n', 'c', 'l', 'x') && size >= 11)
+ || (type == FOURCC('n', 'c', 'l', 'c') && size >= 10)) {
+ int32_t primaries = U16_AT(&buffer[4]);
+ int32_t transfer = U16_AT(&buffer[6]);
+ int32_t coeffs = U16_AT(&buffer[8]);
+ bool fullRange = (type == FOURCC('n', 'c', 'l', 'x')) && (buffer[10] & 128);
+
+ ColorAspects aspects;
+ ColorUtils::convertIsoColorAspectsToCodecAspects(
+ primaries, transfer, coeffs, fullRange, aspects);
+
+ // only store the first color specification
+ if (!mLastTrack->meta.hasData(kKeyColorPrimaries)) {
+ mLastTrack->meta.setInt32(kKeyColorPrimaries, aspects.mPrimaries);
+ mLastTrack->meta.setInt32(kKeyTransferFunction, aspects.mTransfer);
+ mLastTrack->meta.setInt32(kKeyColorMatrix, aspects.mMatrixCoeffs);
+ mLastTrack->meta.setInt32(kKeyColorRange, aspects.mRange);
+ }
+ }
+
+ delete[] buffer;
+ buffer = NULL;
+
+ return OK;
+}
+
+status_t MPEG4Extractor::parse3GPPMetaData(off64_t offset, size_t size, int depth) {
+ if (size < 4 || size == SIZE_MAX) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t *buffer = new (std::nothrow) uint8_t[size + 1];
+ if (buffer == NULL) {
+ return ERROR_MALFORMED;
+ }
+ if (mDataSource->readAt(
+ offset, buffer, size) != (ssize_t)size) {
+ delete[] buffer;
+ buffer = NULL;
+
+ return ERROR_IO;
+ }
+
+ uint32_t metadataKey = 0;
+ switch (mPath[depth]) {
+ case FOURCC('t', 'i', 't', 'l'):
+ {
+ metadataKey = kKeyTitle;
+ break;
+ }
+ case FOURCC('p', 'e', 'r', 'f'):
+ {
+ metadataKey = kKeyArtist;
+ break;
+ }
+ case FOURCC('a', 'u', 't', 'h'):
+ {
+ metadataKey = kKeyWriter;
+ break;
+ }
+ case FOURCC('g', 'n', 'r', 'e'):
+ {
+ metadataKey = kKeyGenre;
+ break;
+ }
+ case FOURCC('a', 'l', 'b', 'm'):
+ {
+ if (buffer[size - 1] != '\0') {
+ char tmp[4];
+ sprintf(tmp, "%u", buffer[size - 1]);
+
+ mFileMetaData.setCString(kKeyCDTrackNumber, tmp);
+ }
+
+ metadataKey = kKeyAlbum;
+ break;
+ }
+ case FOURCC('y', 'r', 'r', 'c'):
+ {
+ if (size < 6) {
+ delete[] buffer;
+ buffer = NULL;
+ ALOGE("b/62133227");
+ android_errorWriteLog(0x534e4554, "62133227");
+ return ERROR_MALFORMED;
+ }
+ char tmp[5];
+ uint16_t year = U16_AT(&buffer[4]);
+
+ if (year < 10000) {
+ sprintf(tmp, "%u", year);
+
+ mFileMetaData.setCString(kKeyYear, tmp);
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ if (metadataKey > 0) {
+ bool isUTF8 = true; // Common case
+ char16_t *framedata = NULL;
+ int len16 = 0; // Number of UTF-16 characters
+
+ // smallest possible valid UTF-16 string w BOM: 0xfe 0xff 0x00 0x00
+ if (size < 6) {
+ delete[] buffer;
+ buffer = NULL;
+ return ERROR_MALFORMED;
+ }
+
+ if (size - 6 >= 4) {
+ len16 = ((size - 6) / 2) - 1; // don't include 0x0000 terminator
+ framedata = (char16_t *)(buffer + 6);
+ if (0xfffe == *framedata) {
+ // endianness marker (BOM) doesn't match host endianness
+ for (int i = 0; i < len16; i++) {
+ framedata[i] = bswap_16(framedata[i]);
+ }
+ // BOM is now swapped to 0xfeff, we will execute next block too
+ }
+
+ if (0xfeff == *framedata) {
+ // Remove the BOM
+ framedata++;
+ len16--;
+ isUTF8 = false;
+ }
+ // else normal non-zero-length UTF-8 string
+ // we can't handle UTF-16 without BOM as there is no other
+ // indication of encoding.
+ }
+
+ if (isUTF8) {
+ buffer[size] = 0;
+ mFileMetaData.setCString(metadataKey, (const char *)buffer + 6);
+ } else {
+ // Convert from UTF-16 string to UTF-8 string.
+ String8 tmpUTF8str(framedata, len16);
+ mFileMetaData.setCString(metadataKey, tmpUTF8str.string());
+ }
+ }
+
+ delete[] buffer;
+ buffer = NULL;
+
+ return OK;
+}
+
+void MPEG4Extractor::parseID3v2MetaData(off64_t offset) {
+ ID3 id3(mDataSource, true /* ignorev1 */, offset);
+
+ if (id3.isValid()) {
+ struct Map {
+ int key;
+ const char *tag1;
+ const char *tag2;
+ };
+ static const Map kMap[] = {
+ { kKeyAlbum, "TALB", "TAL" },
+ { kKeyArtist, "TPE1", "TP1" },
+ { kKeyAlbumArtist, "TPE2", "TP2" },
+ { kKeyComposer, "TCOM", "TCM" },
+ { kKeyGenre, "TCON", "TCO" },
+ { kKeyTitle, "TIT2", "TT2" },
+ { kKeyYear, "TYE", "TYER" },
+ { kKeyAuthor, "TXT", "TEXT" },
+ { kKeyCDTrackNumber, "TRK", "TRCK" },
+ { kKeyDiscNumber, "TPA", "TPOS" },
+ { kKeyCompilation, "TCP", "TCMP" },
+ };
+ static const size_t kNumMapEntries = sizeof(kMap) / sizeof(kMap[0]);
+
+ for (size_t i = 0; i < kNumMapEntries; ++i) {
+ if (!mFileMetaData.hasData(kMap[i].key)) {
+ ID3::Iterator *it = new ID3::Iterator(id3, kMap[i].tag1);
+ if (it->done()) {
+ delete it;
+ it = new ID3::Iterator(id3, kMap[i].tag2);
+ }
+
+ if (it->done()) {
+ delete it;
+ continue;
+ }
+
+ String8 s;
+ it->getString(&s);
+ delete it;
+
+ mFileMetaData.setCString(kMap[i].key, s);
+ }
+ }
+
+ size_t dataSize;
+ String8 mime;
+ const void *data = id3.getAlbumArt(&dataSize, &mime);
+
+ if (data) {
+ mFileMetaData.setData(kKeyAlbumArt, MetaData::TYPE_NONE, data, dataSize);
+ mFileMetaData.setCString(kKeyAlbumArtMIME, mime.string());
+ }
+ }
+}
+
+MediaTrack *MPEG4Extractor::getTrack(size_t index) {
+ status_t err;
+ if ((err = readMetaData()) != OK) {
+ return NULL;
+ }
+
+ Track *track = mFirstTrack;
+ while (index > 0) {
+ if (track == NULL) {
+ return NULL;
+ }
+
+ track = track->next;
+ --index;
+ }
+
+ if (track == NULL) {
+ return NULL;
+ }
+
+
+ Trex *trex = NULL;
+ int32_t trackId;
+ if (track->meta.findInt32(kKeyTrackID, &trackId)) {
+ for (size_t i = 0; i < mTrex.size(); i++) {
+ Trex *t = &mTrex.editItemAt(i);
+ if (t->track_ID == (uint32_t) trackId) {
+ trex = t;
+ break;
+ }
+ }
+ } else {
+ ALOGE("b/21657957");
+ return NULL;
+ }
+
+ ALOGV("getTrack called, pssh: %zu", mPssh.size());
+
+ const char *mime;
+ if (!track->meta.findCString(kKeyMIMEType, &mime)) {
+ return NULL;
+ }
+
+ sp<ItemTable> itemTable;
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
+ uint32_t type;
+ const void *data;
+ size_t size;
+ if (!track->meta.findData(kKeyAVCC, &type, &data, &size)) {
+ return NULL;
+ }
+
+ const uint8_t *ptr = (const uint8_t *)data;
+
+ if (size < 7 || ptr[0] != 1) { // configurationVersion == 1
+ return NULL;
+ }
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)
+ || !strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC)) {
+ uint32_t type;
+ const void *data;
+ size_t size;
+ if (!track->meta.findData(kKeyHVCC, &type, &data, &size)) {
+ return NULL;
+ }
+
+ const uint8_t *ptr = (const uint8_t *)data;
+
+ if (size < 22 || ptr[0] != 1) { // configurationVersion == 1
+ return NULL;
+ }
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC)) {
+ itemTable = mItemTable;
+ }
+ }
+
+ MPEG4Source *source = new MPEG4Source(
+ track->meta, mDataSource, track->timescale, track->sampleTable,
+ mSidxEntries, trex, mMoofOffset, itemTable);
+ if (source->init() != OK) {
+ delete source;
+ return NULL;
+ }
+ return source;
+}
+
+// static
+status_t MPEG4Extractor::verifyTrack(Track *track) {
+ const char *mime;
+ CHECK(track->meta.findCString(kKeyMIMEType, &mime));
+
+ uint32_t type;
+ const void *data;
+ size_t size;
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
+ if (!track->meta.findData(kKeyAVCC, &type, &data, &size)
+ || type != kTypeAVCC) {
+ return ERROR_MALFORMED;
+ }
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
+ if (!track->meta.findData(kKeyHVCC, &type, &data, &size)
+ || type != kTypeHVCC) {
+ return ERROR_MALFORMED;
+ }
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)
+ || !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG2)
+ || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
+ if (!track->meta.findData(kKeyESDS, &type, &data, &size)
+ || type != kTypeESDS) {
+ return ERROR_MALFORMED;
+ }
+ }
+
+ if (track->sampleTable == NULL || !track->sampleTable->isValid()) {
+ // Make sure we have all the metadata we need.
+ ALOGE("stbl atom missing/invalid.");
+ return ERROR_MALFORMED;
+ }
+
+ if (track->timescale == 0) {
+ ALOGE("timescale invalid.");
+ return ERROR_MALFORMED;
+ }
+
+ return OK;
+}
+
+typedef enum {
+ //AOT_NONE = -1,
+ //AOT_NULL_OBJECT = 0,
+ //AOT_AAC_MAIN = 1, /**< Main profile */
+ AOT_AAC_LC = 2, /**< Low Complexity object */
+ //AOT_AAC_SSR = 3,
+ //AOT_AAC_LTP = 4,
+ AOT_SBR = 5,
+ //AOT_AAC_SCAL = 6,
+ //AOT_TWIN_VQ = 7,
+ //AOT_CELP = 8,
+ //AOT_HVXC = 9,
+ //AOT_RSVD_10 = 10, /**< (reserved) */
+ //AOT_RSVD_11 = 11, /**< (reserved) */
+ //AOT_TTSI = 12, /**< TTSI Object */
+ //AOT_MAIN_SYNTH = 13, /**< Main Synthetic object */
+ //AOT_WAV_TAB_SYNTH = 14, /**< Wavetable Synthesis object */
+ //AOT_GEN_MIDI = 15, /**< General MIDI object */
+ //AOT_ALG_SYNTH_AUD_FX = 16, /**< Algorithmic Synthesis and Audio FX object */
+ AOT_ER_AAC_LC = 17, /**< Error Resilient(ER) AAC Low Complexity */
+ //AOT_RSVD_18 = 18, /**< (reserved) */
+ //AOT_ER_AAC_LTP = 19, /**< Error Resilient(ER) AAC LTP object */
+ AOT_ER_AAC_SCAL = 20, /**< Error Resilient(ER) AAC Scalable object */
+ //AOT_ER_TWIN_VQ = 21, /**< Error Resilient(ER) TwinVQ object */
+ AOT_ER_BSAC = 22, /**< Error Resilient(ER) BSAC object */
+ AOT_ER_AAC_LD = 23, /**< Error Resilient(ER) AAC LowDelay object */
+ //AOT_ER_CELP = 24, /**< Error Resilient(ER) CELP object */
+ //AOT_ER_HVXC = 25, /**< Error Resilient(ER) HVXC object */
+ //AOT_ER_HILN = 26, /**< Error Resilient(ER) HILN object */
+ //AOT_ER_PARA = 27, /**< Error Resilient(ER) Parametric object */
+ //AOT_RSVD_28 = 28, /**< might become SSC */
+ AOT_PS = 29, /**< PS, Parametric Stereo (includes SBR) */
+ //AOT_MPEGS = 30, /**< MPEG Surround */
+
+ AOT_ESCAPE = 31, /**< Signal AOT uses more than 5 bits */
+
+ //AOT_MP3ONMP4_L1 = 32, /**< MPEG-Layer1 in mp4 */
+ //AOT_MP3ONMP4_L2 = 33, /**< MPEG-Layer2 in mp4 */
+ //AOT_MP3ONMP4_L3 = 34, /**< MPEG-Layer3 in mp4 */
+ //AOT_RSVD_35 = 35, /**< might become DST */
+ //AOT_RSVD_36 = 36, /**< might become ALS */
+ //AOT_AAC_SLS = 37, /**< AAC + SLS */
+ //AOT_SLS = 38, /**< SLS */
+ //AOT_ER_AAC_ELD = 39, /**< AAC Enhanced Low Delay */
+
+ //AOT_USAC = 42, /**< USAC */
+ //AOT_SAOC = 43, /**< SAOC */
+ //AOT_LD_MPEGS = 44, /**< Low Delay MPEG Surround */
+
+ //AOT_RSVD50 = 50, /**< Interim AOT for Rsvd50 */
+} AUDIO_OBJECT_TYPE;
+
+status_t MPEG4Extractor::updateAudioTrackInfoFromESDS_MPEG4Audio(
+ const void *esds_data, size_t esds_size) {
+ ESDS esds(esds_data, esds_size);
+
+ uint8_t objectTypeIndication;
+ if (esds.getObjectTypeIndication(&objectTypeIndication) != OK) {
+ return ERROR_MALFORMED;
+ }
+
+ if (objectTypeIndication == 0xe1) {
+ // This isn't MPEG4 audio at all, it's QCELP 14k...
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ mLastTrack->meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_QCELP);
+ return OK;
+ }
+
+ if (objectTypeIndication == 0x6b) {
+ // The media subtype is MP3 audio
+ // Our software MP3 audio decoder may not be able to handle
+ // packetized MP3 audio; for now, lets just return ERROR_UNSUPPORTED
+ ALOGE("MP3 track in MP4/3GPP file is not supported");
+ return ERROR_UNSUPPORTED;
+ }
+
+ if (mLastTrack != NULL) {
+ uint32_t maxBitrate = 0;
+ uint32_t avgBitrate = 0;
+ esds.getBitRate(&maxBitrate, &avgBitrate);
+ if (maxBitrate > 0 && maxBitrate < INT32_MAX) {
+ mLastTrack->meta.setInt32(kKeyMaxBitRate, (int32_t)maxBitrate);
+ }
+ if (avgBitrate > 0 && avgBitrate < INT32_MAX) {
+ mLastTrack->meta.setInt32(kKeyBitRate, (int32_t)avgBitrate);
+ }
+ }
+
+ const uint8_t *csd;
+ size_t csd_size;
+ if (esds.getCodecSpecificInfo(
+ (const void **)&csd, &csd_size) != OK) {
+ return ERROR_MALFORMED;
+ }
+
+ if (kUseHexDump) {
+ printf("ESD of size %zu\n", csd_size);
+ hexdump(csd, csd_size);
+ }
+
+ if (csd_size == 0) {
+ // There's no further information, i.e. no codec specific data
+ // Let's assume that the information provided in the mpeg4 headers
+ // is accurate and hope for the best.
+
+ return OK;
+ }
+
+ if (csd_size < 2) {
+ return ERROR_MALFORMED;
+ }
+
+ static uint32_t kSamplingRate[] = {
+ 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050,
+ 16000, 12000, 11025, 8000, 7350
+ };
+
+ ABitReader br(csd, csd_size);
+ uint32_t objectType = br.getBits(5);
+
+ if (objectType == 31) { // AAC-ELD => additional 6 bits
+ objectType = 32 + br.getBits(6);
+ }
+
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ //keep AOT type
+ mLastTrack->meta.setInt32(kKeyAACAOT, objectType);
+
+ uint32_t freqIndex = br.getBits(4);
+
+ int32_t sampleRate = 0;
+ int32_t numChannels = 0;
+ if (freqIndex == 15) {
+ if (br.numBitsLeft() < 28) return ERROR_MALFORMED;
+ sampleRate = br.getBits(24);
+ numChannels = br.getBits(4);
+ } else {
+ if (br.numBitsLeft() < 4) return ERROR_MALFORMED;
+ numChannels = br.getBits(4);
+
+ if (freqIndex == 13 || freqIndex == 14) {
+ return ERROR_MALFORMED;
+ }
+
+ sampleRate = kSamplingRate[freqIndex];
+ }
+
+ if (objectType == AOT_SBR || objectType == AOT_PS) {//SBR specific config per 14496-3 table 1.13
+ if (br.numBitsLeft() < 4) return ERROR_MALFORMED;
+ uint32_t extFreqIndex = br.getBits(4);
+ int32_t extSampleRate __unused;
+ if (extFreqIndex == 15) {
+ if (csd_size < 8) {
+ return ERROR_MALFORMED;
+ }
+ if (br.numBitsLeft() < 24) return ERROR_MALFORMED;
+ extSampleRate = br.getBits(24);
+ } else {
+ if (extFreqIndex == 13 || extFreqIndex == 14) {
+ return ERROR_MALFORMED;
+ }
+ extSampleRate = kSamplingRate[extFreqIndex];
+ }
+ //TODO: save the extension sampling rate value in meta data =>
+ // mLastTrack->meta.setInt32(kKeyExtSampleRate, extSampleRate);
+ }
+
+ switch (numChannels) {
+ // values defined in 14496-3_2009 amendment-4 Table 1.19 - Channel Configuration
+ case 0:
+ case 1:// FC
+ case 2:// FL FR
+ case 3:// FC, FL FR
+ case 4:// FC, FL FR, RC
+ case 5:// FC, FL FR, SL SR
+ case 6:// FC, FL FR, SL SR, LFE
+ //numChannels already contains the right value
+ break;
+ case 11:// FC, FL FR, SL SR, RC, LFE
+ numChannels = 7;
+ break;
+ case 7: // FC, FCL FCR, FL FR, SL SR, LFE
+ case 12:// FC, FL FR, SL SR, RL RR, LFE
+ case 14:// FC, FL FR, SL SR, LFE, FHL FHR
+ numChannels = 8;
+ break;
+ default:
+ return ERROR_UNSUPPORTED;
+ }
+
+ {
+ if (objectType == AOT_SBR || objectType == AOT_PS) {
+ if (br.numBitsLeft() < 5) return ERROR_MALFORMED;
+ objectType = br.getBits(5);
+
+ if (objectType == AOT_ESCAPE) {
+ if (br.numBitsLeft() < 6) return ERROR_MALFORMED;
+ objectType = 32 + br.getBits(6);
+ }
+ }
+ if (objectType == AOT_AAC_LC || objectType == AOT_ER_AAC_LC ||
+ objectType == AOT_ER_AAC_LD || objectType == AOT_ER_AAC_SCAL ||
+ objectType == AOT_ER_BSAC) {
+ if (br.numBitsLeft() < 2) return ERROR_MALFORMED;
+ const int32_t frameLengthFlag __unused = br.getBits(1);
+
+ const int32_t dependsOnCoreCoder = br.getBits(1);
+
+ if (dependsOnCoreCoder ) {
+ if (br.numBitsLeft() < 14) return ERROR_MALFORMED;
+ const int32_t coreCoderDelay __unused = br.getBits(14);
+ }
+
+ int32_t extensionFlag = -1;
+ if (br.numBitsLeft() > 0) {
+ extensionFlag = br.getBits(1);
+ } else {
+ switch (objectType) {
+ // 14496-3 4.5.1.1 extensionFlag
+ case AOT_AAC_LC:
+ extensionFlag = 0;
+ break;
+ case AOT_ER_AAC_LC:
+ case AOT_ER_AAC_SCAL:
+ case AOT_ER_BSAC:
+ case AOT_ER_AAC_LD:
+ extensionFlag = 1;
+ break;
+ default:
+ return ERROR_MALFORMED;
+ break;
+ }
+ ALOGW("csd missing extension flag; assuming %d for object type %u.",
+ extensionFlag, objectType);
+ }
+
+ if (numChannels == 0) {
+ int32_t channelsEffectiveNum = 0;
+ int32_t channelsNum = 0;
+ if (br.numBitsLeft() < 32) {
+ return ERROR_MALFORMED;
+ }
+ const int32_t ElementInstanceTag __unused = br.getBits(4);
+ const int32_t Profile __unused = br.getBits(2);
+ const int32_t SamplingFrequencyIndex __unused = br.getBits(4);
+ const int32_t NumFrontChannelElements = br.getBits(4);
+ const int32_t NumSideChannelElements = br.getBits(4);
+ const int32_t NumBackChannelElements = br.getBits(4);
+ const int32_t NumLfeChannelElements = br.getBits(2);
+ const int32_t NumAssocDataElements __unused = br.getBits(3);
+ const int32_t NumValidCcElements __unused = br.getBits(4);
+
+ const int32_t MonoMixdownPresent = br.getBits(1);
+
+ if (MonoMixdownPresent != 0) {
+ if (br.numBitsLeft() < 4) return ERROR_MALFORMED;
+ const int32_t MonoMixdownElementNumber __unused = br.getBits(4);
+ }
+
+ if (br.numBitsLeft() < 1) return ERROR_MALFORMED;
+ const int32_t StereoMixdownPresent = br.getBits(1);
+ if (StereoMixdownPresent != 0) {
+ if (br.numBitsLeft() < 4) return ERROR_MALFORMED;
+ const int32_t StereoMixdownElementNumber __unused = br.getBits(4);
+ }
+
+ if (br.numBitsLeft() < 1) return ERROR_MALFORMED;
+ const int32_t MatrixMixdownIndexPresent = br.getBits(1);
+ if (MatrixMixdownIndexPresent != 0) {
+ if (br.numBitsLeft() < 3) return ERROR_MALFORMED;
+ const int32_t MatrixMixdownIndex __unused = br.getBits(2);
+ const int32_t PseudoSurroundEnable __unused = br.getBits(1);
+ }
+
+ int i;
+ for (i=0; i < NumFrontChannelElements; i++) {
+ if (br.numBitsLeft() < 5) return ERROR_MALFORMED;
+ const int32_t FrontElementIsCpe = br.getBits(1);
+ const int32_t FrontElementTagSelect __unused = br.getBits(4);
+ channelsNum += FrontElementIsCpe ? 2 : 1;
+ }
+
+ for (i=0; i < NumSideChannelElements; i++) {
+ if (br.numBitsLeft() < 5) return ERROR_MALFORMED;
+ const int32_t SideElementIsCpe = br.getBits(1);
+ const int32_t SideElementTagSelect __unused = br.getBits(4);
+ channelsNum += SideElementIsCpe ? 2 : 1;
+ }
+
+ for (i=0; i < NumBackChannelElements; i++) {
+ if (br.numBitsLeft() < 5) return ERROR_MALFORMED;
+ const int32_t BackElementIsCpe = br.getBits(1);
+ const int32_t BackElementTagSelect __unused = br.getBits(4);
+ channelsNum += BackElementIsCpe ? 2 : 1;
+ }
+ channelsEffectiveNum = channelsNum;
+
+ for (i=0; i < NumLfeChannelElements; i++) {
+ if (br.numBitsLeft() < 4) return ERROR_MALFORMED;
+ const int32_t LfeElementTagSelect __unused = br.getBits(4);
+ channelsNum += 1;
+ }
+ ALOGV("mpeg4 audio channelsNum = %d", channelsNum);
+ ALOGV("mpeg4 audio channelsEffectiveNum = %d", channelsEffectiveNum);
+ numChannels = channelsNum;
+ }
+ }
+ }
+
+ if (numChannels == 0) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ int32_t prevSampleRate;
+ CHECK(mLastTrack->meta.findInt32(kKeySampleRate, &prevSampleRate));
+
+ if (prevSampleRate != sampleRate) {
+ ALOGV("mpeg4 audio sample rate different from previous setting. "
+ "was: %d, now: %d", prevSampleRate, sampleRate);
+ }
+
+ mLastTrack->meta.setInt32(kKeySampleRate, sampleRate);
+
+ int32_t prevChannelCount;
+ CHECK(mLastTrack->meta.findInt32(kKeyChannelCount, &prevChannelCount));
+
+ if (prevChannelCount != numChannels) {
+ ALOGV("mpeg4 audio channel count different from previous setting. "
+ "was: %d, now: %d", prevChannelCount, numChannels);
+ }
+
+ mLastTrack->meta.setInt32(kKeyChannelCount, numChannels);
+
+ return OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+MPEG4Source::MPEG4Source(
+ MetaDataBase &format,
+ DataSourceBase *dataSource,
+ int32_t timeScale,
+ const sp<SampleTable> &sampleTable,
+ Vector<SidxEntry> &sidx,
+ const Trex *trex,
+ off64_t firstMoofOffset,
+ const sp<ItemTable> &itemTable)
+ : mFormat(format),
+ mDataSource(dataSource),
+ mTimescale(timeScale),
+ mSampleTable(sampleTable),
+ mCurrentSampleIndex(0),
+ mCurrentFragmentIndex(0),
+ mSegments(sidx),
+ mTrex(trex),
+ mFirstMoofOffset(firstMoofOffset),
+ mCurrentMoofOffset(firstMoofOffset),
+ mNextMoofOffset(-1),
+ mCurrentTime(0),
+ mDefaultEncryptedByteBlock(0),
+ mDefaultSkipByteBlock(0),
+ mCurrentSampleInfoAllocSize(0),
+ mCurrentSampleInfoSizes(NULL),
+ mCurrentSampleInfoOffsetsAllocSize(0),
+ mCurrentSampleInfoOffsets(NULL),
+ mIsAVC(false),
+ mIsHEVC(false),
+ mNALLengthSize(0),
+ mStarted(false),
+ mGroup(NULL),
+ mBuffer(NULL),
+ mWantsNALFragments(false),
+ mSrcBuffer(NULL),
+ mIsHeif(itemTable != NULL),
+ mItemTable(itemTable) {
+
+ memset(&mTrackFragmentHeaderInfo, 0, sizeof(mTrackFragmentHeaderInfo));
+
+ mFormat.findInt32(kKeyCryptoMode, &mCryptoMode);
+ mDefaultIVSize = 0;
+ mFormat.findInt32(kKeyCryptoDefaultIVSize, &mDefaultIVSize);
+ uint32_t keytype;
+ const void *key;
+ size_t keysize;
+ if (mFormat.findData(kKeyCryptoKey, &keytype, &key, &keysize)) {
+ CHECK(keysize <= 16);
+ memset(mCryptoKey, 0, 16);
+ memcpy(mCryptoKey, key, keysize);
+ }
+
+ mFormat.findInt32(kKeyEncryptedByteBlock, &mDefaultEncryptedByteBlock);
+ mFormat.findInt32(kKeySkipByteBlock, &mDefaultSkipByteBlock);
+
+ const char *mime;
+ bool success = mFormat.findCString(kKeyMIMEType, &mime);
+ CHECK(success);
+
+ mIsAVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
+ mIsHEVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC) ||
+ !strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
+
+ if (mIsAVC) {
+ uint32_t type;
+ const void *data;
+ size_t size;
+ CHECK(format.findData(kKeyAVCC, &type, &data, &size));
+
+ const uint8_t *ptr = (const uint8_t *)data;
+
+ CHECK(size >= 7);
+ CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1
+
+ // The number of bytes used to encode the length of a NAL unit.
+ mNALLengthSize = 1 + (ptr[4] & 3);
+ } else if (mIsHEVC) {
+ uint32_t type;
+ const void *data;
+ size_t size;
+ CHECK(format.findData(kKeyHVCC, &type, &data, &size));
+
+ const uint8_t *ptr = (const uint8_t *)data;
+
+ CHECK(size >= 22);
+ CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1
+
+ mNALLengthSize = 1 + (ptr[14 + 7] & 3);
+ }
+
+ CHECK(format.findInt32(kKeyTrackID, &mTrackId));
+
+}
+
+status_t MPEG4Source::init() {
+ if (mFirstMoofOffset != 0) {
+ off64_t offset = mFirstMoofOffset;
+ return parseChunk(&offset);
+ }
+ return OK;
+}
+
+MPEG4Source::~MPEG4Source() {
+ if (mStarted) {
+ stop();
+ }
+ free(mCurrentSampleInfoSizes);
+ free(mCurrentSampleInfoOffsets);
+}
+
+status_t MPEG4Source::start(MetaDataBase *params) {
+ Mutex::Autolock autoLock(mLock);
+
+ CHECK(!mStarted);
+
+ int32_t val;
+ if (params && params->findInt32(kKeyWantsNALFragments, &val)
+ && val != 0) {
+ mWantsNALFragments = true;
+ } else {
+ mWantsNALFragments = false;
+ }
+
+ int32_t tmp;
+ CHECK(mFormat.findInt32(kKeyMaxInputSize, &tmp));
+ size_t max_size = tmp;
+
+ // A somewhat arbitrary limit that should be sufficient for 8k video frames
+ // If you see the message below for a valid input stream: increase the limit
+ const size_t kMaxBufferSize = 64 * 1024 * 1024;
+ if (max_size > kMaxBufferSize) {
+ ALOGE("bogus max input size: %zu > %zu", max_size, kMaxBufferSize);
+ return ERROR_MALFORMED;
+ }
+ if (max_size == 0) {
+ ALOGE("zero max input size");
+ return ERROR_MALFORMED;
+ }
+
+ // Allow up to kMaxBuffers, but not if the total exceeds kMaxBufferSize.
+ const size_t kInitialBuffers = 2;
+ const size_t kMaxBuffers = 8;
+ const size_t realMaxBuffers = min(kMaxBufferSize / max_size, kMaxBuffers);
+ mGroup = new MediaBufferGroup(kInitialBuffers, max_size, realMaxBuffers);
+ mSrcBuffer = new (std::nothrow) uint8_t[max_size];
+ if (mSrcBuffer == NULL) {
+ // file probably specified a bad max size
+ delete mGroup;
+ mGroup = NULL;
+ return ERROR_MALFORMED;
+ }
+
+ mStarted = true;
+
+ return OK;
+}
+
+status_t MPEG4Source::stop() {
+ Mutex::Autolock autoLock(mLock);
+
+ CHECK(mStarted);
+
+ if (mBuffer != NULL) {
+ mBuffer->release();
+ mBuffer = NULL;
+ }
+
+ delete[] mSrcBuffer;
+ mSrcBuffer = NULL;
+
+ delete mGroup;
+ mGroup = NULL;
+
+ mStarted = false;
+ mCurrentSampleIndex = 0;
+
+ return OK;
+}
+
+status_t MPEG4Source::parseChunk(off64_t *offset) {
+ uint32_t hdr[2];
+ if (mDataSource->readAt(*offset, hdr, 8) < 8) {
+ return ERROR_IO;
+ }
+ uint64_t chunk_size = ntohl(hdr[0]);
+ uint32_t chunk_type = ntohl(hdr[1]);
+ off64_t data_offset = *offset + 8;
+
+ if (chunk_size == 1) {
+ if (mDataSource->readAt(*offset + 8, &chunk_size, 8) < 8) {
+ return ERROR_IO;
+ }
+ chunk_size = ntoh64(chunk_size);
+ data_offset += 8;
+
+ if (chunk_size < 16) {
+ // The smallest valid chunk is 16 bytes long in this case.
+ return ERROR_MALFORMED;
+ }
+ } else if (chunk_size < 8) {
+ // The smallest valid chunk is 8 bytes long.
+ return ERROR_MALFORMED;
+ }
+
+ char chunk[5];
+ MakeFourCCString(chunk_type, chunk);
+ ALOGV("MPEG4Source chunk %s @ %#llx", chunk, (long long)*offset);
+
+ off64_t chunk_data_size = *offset + chunk_size - data_offset;
+
+ switch(chunk_type) {
+
+ case FOURCC('t', 'r', 'a', 'f'):
+ case FOURCC('m', 'o', 'o', 'f'): {
+ off64_t stop_offset = *offset + chunk_size;
+ *offset = data_offset;
+ while (*offset < stop_offset) {
+ status_t err = parseChunk(offset);
+ if (err != OK) {
+ return err;
+ }
+ }
+ if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
+ // *offset points to the box following this moof. Find the next moof from there.
+
+ while (true) {
+ if (mDataSource->readAt(*offset, hdr, 8) < 8) {
+ // no more box to the end of file.
+ break;
+ }
+ chunk_size = ntohl(hdr[0]);
+ chunk_type = ntohl(hdr[1]);
+ if (chunk_size == 1) {
+ // ISO/IEC 14496-12:2012, 8.8.4 Movie Fragment Box, moof is a Box
+ // which is defined in 4.2 Object Structure.
+ // When chunk_size==1, 8 bytes follows as "largesize".
+ if (mDataSource->readAt(*offset + 8, &chunk_size, 8) < 8) {
+ return ERROR_IO;
+ }
+ chunk_size = ntoh64(chunk_size);
+ if (chunk_size < 16) {
+ // The smallest valid chunk is 16 bytes long in this case.
+ return ERROR_MALFORMED;
+ }
+ } else if (chunk_size == 0) {
+ // next box extends to end of file.
+ } else if (chunk_size < 8) {
+ // The smallest valid chunk is 8 bytes long in this case.
+ return ERROR_MALFORMED;
+ }
+
+ if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
+ mNextMoofOffset = *offset;
+ break;
+ } else if (chunk_size == 0) {
+ break;
+ }
+ *offset += chunk_size;
+ }
+ }
+ break;
+ }
+
+ case FOURCC('t', 'f', 'h', 'd'): {
+ status_t err;
+ if ((err = parseTrackFragmentHeader(data_offset, chunk_data_size)) != OK) {
+ return err;
+ }
+ *offset += chunk_size;
+ break;
+ }
+
+ case FOURCC('t', 'r', 'u', 'n'): {
+ status_t err;
+ if (mLastParsedTrackId == mTrackId) {
+ if ((err = parseTrackFragmentRun(data_offset, chunk_data_size)) != OK) {
+ return err;
+ }
+ }
+
+ *offset += chunk_size;
+ break;
+ }
+
+ case FOURCC('s', 'a', 'i', 'z'): {
+ status_t err;
+ if ((err = parseSampleAuxiliaryInformationSizes(data_offset, chunk_data_size)) != OK) {
+ return err;
+ }
+ *offset += chunk_size;
+ break;
+ }
+ case FOURCC('s', 'a', 'i', 'o'): {
+ status_t err;
+ if ((err = parseSampleAuxiliaryInformationOffsets(data_offset, chunk_data_size)) != OK) {
+ return err;
+ }
+ *offset += chunk_size;
+ break;
+ }
+
+ case FOURCC('s', 'e', 'n', 'c'): {
+ status_t err;
+ if ((err = parseSampleEncryption(data_offset)) != OK) {
+ return err;
+ }
+ *offset += chunk_size;
+ break;
+ }
+
+ case FOURCC('m', 'd', 'a', 't'): {
+ // parse DRM info if present
+ ALOGV("MPEG4Source::parseChunk mdat");
+ // if saiz/saoi was previously observed, do something with the sampleinfos
+ *offset += chunk_size;
+ break;
+ }
+
+ default: {
+ *offset += chunk_size;
+ break;
+ }
+ }
+ return OK;
+}
+
+status_t MPEG4Source::parseSampleAuxiliaryInformationSizes(
+ off64_t offset, off64_t /* size */) {
+ ALOGV("parseSampleAuxiliaryInformationSizes");
+ // 14496-12 8.7.12
+ uint8_t version;
+ if (mDataSource->readAt(
+ offset, &version, sizeof(version))
+ < (ssize_t)sizeof(version)) {
+ return ERROR_IO;
+ }
+
+ if (version != 0) {
+ return ERROR_UNSUPPORTED;
+ }
+ offset++;
+
+ uint32_t flags;
+ if (!mDataSource->getUInt24(offset, &flags)) {
+ return ERROR_IO;
+ }
+ offset += 3;
+
+ if (flags & 1) {
+ uint32_t tmp;
+ if (!mDataSource->getUInt32(offset, &tmp)) {
+ return ERROR_MALFORMED;
+ }
+ mCurrentAuxInfoType = tmp;
+ offset += 4;
+ if (!mDataSource->getUInt32(offset, &tmp)) {
+ return ERROR_MALFORMED;
+ }
+ mCurrentAuxInfoTypeParameter = tmp;
+ offset += 4;
+ }
+
+ uint8_t defsize;
+ if (mDataSource->readAt(offset, &defsize, 1) != 1) {
+ return ERROR_MALFORMED;
+ }
+ mCurrentDefaultSampleInfoSize = defsize;
+ offset++;
+
+ uint32_t smplcnt;
+ if (!mDataSource->getUInt32(offset, &smplcnt)) {
+ return ERROR_MALFORMED;
+ }
+ mCurrentSampleInfoCount = smplcnt;
+ offset += 4;
+
+ if (mCurrentDefaultSampleInfoSize != 0) {
+ ALOGV("@@@@ using default sample info size of %d", mCurrentDefaultSampleInfoSize);
+ return OK;
+ }
+ if (smplcnt > mCurrentSampleInfoAllocSize) {
+ uint8_t * newPtr = (uint8_t*) realloc(mCurrentSampleInfoSizes, smplcnt);
+ if (newPtr == NULL) {
+ ALOGE("failed to realloc %u -> %u", mCurrentSampleInfoAllocSize, smplcnt);
+ return NO_MEMORY;
+ }
+ mCurrentSampleInfoSizes = newPtr;
+ mCurrentSampleInfoAllocSize = smplcnt;
+ }
+
+ mDataSource->readAt(offset, mCurrentSampleInfoSizes, smplcnt);
+ return OK;
+}
+
+status_t MPEG4Source::parseSampleAuxiliaryInformationOffsets(
+ off64_t offset, off64_t /* size */) {
+ ALOGV("parseSampleAuxiliaryInformationOffsets");
+ // 14496-12 8.7.13
+ uint8_t version;
+ if (mDataSource->readAt(offset, &version, sizeof(version)) != 1) {
+ return ERROR_IO;
+ }
+ offset++;
+
+ uint32_t flags;
+ if (!mDataSource->getUInt24(offset, &flags)) {
+ return ERROR_IO;
+ }
+ offset += 3;
+
+ uint32_t entrycount;
+ if (!mDataSource->getUInt32(offset, &entrycount)) {
+ return ERROR_IO;
+ }
+ offset += 4;
+ if (entrycount == 0) {
+ return OK;
+ }
+ if (entrycount > UINT32_MAX / 8) {
+ return ERROR_MALFORMED;
+ }
+
+ if (entrycount > mCurrentSampleInfoOffsetsAllocSize) {
+ uint64_t *newPtr = (uint64_t *)realloc(mCurrentSampleInfoOffsets, entrycount * 8);
+ if (newPtr == NULL) {
+ ALOGE("failed to realloc %u -> %u", mCurrentSampleInfoOffsetsAllocSize, entrycount * 8);
+ return NO_MEMORY;
+ }
+ mCurrentSampleInfoOffsets = newPtr;
+ mCurrentSampleInfoOffsetsAllocSize = entrycount;
+ }
+ mCurrentSampleInfoOffsetCount = entrycount;
+
+ if (mCurrentSampleInfoOffsets == NULL) {
+ return OK;
+ }
+
+ for (size_t i = 0; i < entrycount; i++) {
+ if (version == 0) {
+ uint32_t tmp;
+ if (!mDataSource->getUInt32(offset, &tmp)) {
+ return ERROR_IO;
+ }
+ mCurrentSampleInfoOffsets[i] = tmp;
+ offset += 4;
+ } else {
+ uint64_t tmp;
+ if (!mDataSource->getUInt64(offset, &tmp)) {
+ return ERROR_IO;
+ }
+ mCurrentSampleInfoOffsets[i] = tmp;
+ offset += 8;
+ }
+ }
+
+ // parse clear/encrypted data
+
+ off64_t drmoffset = mCurrentSampleInfoOffsets[0]; // from moof
+
+ drmoffset += mCurrentMoofOffset;
+
+ return parseClearEncryptedSizes(drmoffset, false, 0);
+}
+
+status_t MPEG4Source::parseClearEncryptedSizes(off64_t offset, bool isSubsampleEncryption, uint32_t flags) {
+
+ int ivlength;
+ CHECK(mFormat.findInt32(kKeyCryptoDefaultIVSize, &ivlength));
+
+ // only 0, 8 and 16 byte initialization vectors are supported
+ if (ivlength != 0 && ivlength != 8 && ivlength != 16) {
+ ALOGW("unsupported IV length: %d", ivlength);
+ return ERROR_MALFORMED;
+ }
+
+ uint32_t sampleCount = mCurrentSampleInfoCount;
+ if (isSubsampleEncryption) {
+ if (!mDataSource->getUInt32(offset, &sampleCount)) {
+ return ERROR_IO;
+ }
+ offset += 4;
+ }
+
+ // read CencSampleAuxiliaryDataFormats
+ for (size_t i = 0; i < sampleCount; i++) {
+ if (i >= mCurrentSamples.size()) {
+ ALOGW("too few samples");
+ break;
+ }
+ Sample *smpl = &mCurrentSamples.editItemAt(i);
+ if (!smpl->clearsizes.isEmpty()) {
+ continue;
+ }
+
+ memset(smpl->iv, 0, 16);
+ if (mDataSource->readAt(offset, smpl->iv, ivlength) != ivlength) {
+ return ERROR_IO;
+ }
+
+ offset += ivlength;
+
+ bool readSubsamples;
+ if (isSubsampleEncryption) {
+ readSubsamples = flags & 2;
+ } else {
+ int32_t smplinfosize = mCurrentDefaultSampleInfoSize;
+ if (smplinfosize == 0) {
+ smplinfosize = mCurrentSampleInfoSizes[i];
+ }
+ readSubsamples = smplinfosize > ivlength;
+ }
+
+ if (readSubsamples) {
+ uint16_t numsubsamples;
+ if (!mDataSource->getUInt16(offset, &numsubsamples)) {
+ return ERROR_IO;
+ }
+ offset += 2;
+ for (size_t j = 0; j < numsubsamples; j++) {
+ uint16_t numclear;
+ uint32_t numencrypted;
+ if (!mDataSource->getUInt16(offset, &numclear)) {
+ return ERROR_IO;
+ }
+ offset += 2;
+ if (!mDataSource->getUInt32(offset, &numencrypted)) {
+ return ERROR_IO;
+ }
+ offset += 4;
+ smpl->clearsizes.add(numclear);
+ smpl->encryptedsizes.add(numencrypted);
+ }
+ } else {
+ smpl->clearsizes.add(0);
+ smpl->encryptedsizes.add(smpl->size);
+ }
+ }
+
+ return OK;
+}
+
+status_t MPEG4Source::parseSampleEncryption(off64_t offset) {
+ uint32_t flags;
+ if (!mDataSource->getUInt32(offset, &flags)) { // actually version + flags
+ return ERROR_MALFORMED;
+ }
+ return parseClearEncryptedSizes(offset + 4, true, flags);
+}
+
+status_t MPEG4Source::parseTrackFragmentHeader(off64_t offset, off64_t size) {
+
+ if (size < 8) {
+ return -EINVAL;
+ }
+
+ uint32_t flags;
+ if (!mDataSource->getUInt32(offset, &flags)) { // actually version + flags
+ return ERROR_MALFORMED;
+ }
+
+ if (flags & 0xff000000) {
+ return -EINVAL;
+ }
+
+ if (!mDataSource->getUInt32(offset + 4, (uint32_t*)&mLastParsedTrackId)) {
+ return ERROR_MALFORMED;
+ }
+
+ if (mLastParsedTrackId != mTrackId) {
+ // this is not the right track, skip it
+ return OK;
+ }
+
+ mTrackFragmentHeaderInfo.mFlags = flags;
+ mTrackFragmentHeaderInfo.mTrackID = mLastParsedTrackId;
+ offset += 8;
+ size -= 8;
+
+ ALOGV("fragment header: %08x %08x", flags, mTrackFragmentHeaderInfo.mTrackID);
+
+ if (flags & TrackFragmentHeaderInfo::kBaseDataOffsetPresent) {
+ if (size < 8) {
+ return -EINVAL;
+ }
+
+ if (!mDataSource->getUInt64(offset, &mTrackFragmentHeaderInfo.mBaseDataOffset)) {
+ return ERROR_MALFORMED;
+ }
+ offset += 8;
+ size -= 8;
+ }
+
+ if (flags & TrackFragmentHeaderInfo::kSampleDescriptionIndexPresent) {
+ if (size < 4) {
+ return -EINVAL;
+ }
+
+ if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mSampleDescriptionIndex)) {
+ return ERROR_MALFORMED;
+ }
+ offset += 4;
+ size -= 4;
+ }
+
+ if (flags & TrackFragmentHeaderInfo::kDefaultSampleDurationPresent) {
+ if (size < 4) {
+ return -EINVAL;
+ }
+
+ if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mDefaultSampleDuration)) {
+ return ERROR_MALFORMED;
+ }
+ offset += 4;
+ size -= 4;
+ }
+
+ if (flags & TrackFragmentHeaderInfo::kDefaultSampleSizePresent) {
+ if (size < 4) {
+ return -EINVAL;
+ }
+
+ if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mDefaultSampleSize)) {
+ return ERROR_MALFORMED;
+ }
+ offset += 4;
+ size -= 4;
+ }
+
+ if (flags & TrackFragmentHeaderInfo::kDefaultSampleFlagsPresent) {
+ if (size < 4) {
+ return -EINVAL;
+ }
+
+ if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mDefaultSampleFlags)) {
+ return ERROR_MALFORMED;
+ }
+ offset += 4;
+ size -= 4;
+ }
+
+ if (!(flags & TrackFragmentHeaderInfo::kBaseDataOffsetPresent)) {
+ mTrackFragmentHeaderInfo.mBaseDataOffset = mCurrentMoofOffset;
+ }
+
+ mTrackFragmentHeaderInfo.mDataOffset = 0;
+ return OK;
+}
+
+status_t MPEG4Source::parseTrackFragmentRun(off64_t offset, off64_t size) {
+
+ ALOGV("MPEG4Extractor::parseTrackFragmentRun");
+ if (size < 8) {
+ return -EINVAL;
+ }
+
+ enum {
+ kDataOffsetPresent = 0x01,
+ kFirstSampleFlagsPresent = 0x04,
+ kSampleDurationPresent = 0x100,
+ kSampleSizePresent = 0x200,
+ kSampleFlagsPresent = 0x400,
+ kSampleCompositionTimeOffsetPresent = 0x800,
+ };
+
+ uint32_t flags;
+ if (!mDataSource->getUInt32(offset, &flags)) {
+ return ERROR_MALFORMED;
+ }
+ // |version| only affects SampleCompositionTimeOffset field.
+ // If version == 0, SampleCompositionTimeOffset is uint32_t;
+ // Otherwise, SampleCompositionTimeOffset is int32_t.
+ // Sample.compositionOffset is defined as int32_t.
+ uint8_t version = flags >> 24;
+ flags &= 0xffffff;
+ ALOGV("fragment run version: 0x%02x, flags: 0x%06x", version, flags);
+
+ if ((flags & kFirstSampleFlagsPresent) && (flags & kSampleFlagsPresent)) {
+ // These two shall not be used together.
+ return -EINVAL;
+ }
+
+ uint32_t sampleCount;
+ if (!mDataSource->getUInt32(offset + 4, &sampleCount)) {
+ return ERROR_MALFORMED;
+ }
+ offset += 8;
+ size -= 8;
+
+ uint64_t dataOffset = mTrackFragmentHeaderInfo.mDataOffset;
+
+ uint32_t firstSampleFlags = 0;
+
+ if (flags & kDataOffsetPresent) {
+ if (size < 4) {
+ return -EINVAL;
+ }
+
+ int32_t dataOffsetDelta;
+ if (!mDataSource->getUInt32(offset, (uint32_t*)&dataOffsetDelta)) {
+ return ERROR_MALFORMED;
+ }
+
+ dataOffset = mTrackFragmentHeaderInfo.mBaseDataOffset + dataOffsetDelta;
+
+ offset += 4;
+ size -= 4;
+ }
+
+ if (flags & kFirstSampleFlagsPresent) {
+ if (size < 4) {
+ return -EINVAL;
+ }
+
+ if (!mDataSource->getUInt32(offset, &firstSampleFlags)) {
+ return ERROR_MALFORMED;
+ }
+ offset += 4;
+ size -= 4;
+ }
+
+ uint32_t sampleDuration = 0, sampleSize = 0, sampleFlags = 0,
+ sampleCtsOffset = 0;
+
+ size_t bytesPerSample = 0;
+ if (flags & kSampleDurationPresent) {
+ bytesPerSample += 4;
+ } else if (mTrackFragmentHeaderInfo.mFlags
+ & TrackFragmentHeaderInfo::kDefaultSampleDurationPresent) {
+ sampleDuration = mTrackFragmentHeaderInfo.mDefaultSampleDuration;
+ } else if (mTrex) {
+ sampleDuration = mTrex->default_sample_duration;
+ }
+
+ if (flags & kSampleSizePresent) {
+ bytesPerSample += 4;
+ } else if (mTrackFragmentHeaderInfo.mFlags
+ & TrackFragmentHeaderInfo::kDefaultSampleSizePresent) {
+ sampleSize = mTrackFragmentHeaderInfo.mDefaultSampleSize;
+ } else {
+ sampleSize = mTrackFragmentHeaderInfo.mDefaultSampleSize;
+ }
+
+ if (flags & kSampleFlagsPresent) {
+ bytesPerSample += 4;
+ } else if (mTrackFragmentHeaderInfo.mFlags
+ & TrackFragmentHeaderInfo::kDefaultSampleFlagsPresent) {
+ sampleFlags = mTrackFragmentHeaderInfo.mDefaultSampleFlags;
+ } else {
+ sampleFlags = mTrackFragmentHeaderInfo.mDefaultSampleFlags;
+ }
+
+ if (flags & kSampleCompositionTimeOffsetPresent) {
+ bytesPerSample += 4;
+ } else {
+ sampleCtsOffset = 0;
+ }
+
+ if (size < (off64_t)(sampleCount * bytesPerSample)) {
+ return -EINVAL;
+ }
+
+ Sample tmp;
+ for (uint32_t i = 0; i < sampleCount; ++i) {
+ if (flags & kSampleDurationPresent) {
+ if (!mDataSource->getUInt32(offset, &sampleDuration)) {
+ return ERROR_MALFORMED;
+ }
+ offset += 4;
+ }
+
+ if (flags & kSampleSizePresent) {
+ if (!mDataSource->getUInt32(offset, &sampleSize)) {
+ return ERROR_MALFORMED;
+ }
+ offset += 4;
+ }
+
+ if (flags & kSampleFlagsPresent) {
+ if (!mDataSource->getUInt32(offset, &sampleFlags)) {
+ return ERROR_MALFORMED;
+ }
+ offset += 4;
+ }
+
+ if (flags & kSampleCompositionTimeOffsetPresent) {
+ if (!mDataSource->getUInt32(offset, &sampleCtsOffset)) {
+ return ERROR_MALFORMED;
+ }
+ offset += 4;
+ }
+
+ ALOGV("adding sample %d at offset 0x%08" PRIx64 ", size %u, duration %u, "
+ " flags 0x%08x", i + 1,
+ dataOffset, sampleSize, sampleDuration,
+ (flags & kFirstSampleFlagsPresent) && i == 0
+ ? firstSampleFlags : sampleFlags);
+ tmp.offset = dataOffset;
+ tmp.size = sampleSize;
+ tmp.duration = sampleDuration;
+ tmp.compositionOffset = sampleCtsOffset;
+ memset(tmp.iv, 0, sizeof(tmp.iv));
+ mCurrentSamples.add(tmp);
+
+ dataOffset += sampleSize;
+ }
+
+ mTrackFragmentHeaderInfo.mDataOffset = dataOffset;
+
+ return OK;
+}
+
+status_t MPEG4Source::getFormat(MetaDataBase &meta) {
+ Mutex::Autolock autoLock(mLock);
+ meta = mFormat;
+ return OK;
+}
+
+size_t MPEG4Source::parseNALSize(const uint8_t *data) const {
+ switch (mNALLengthSize) {
+ case 1:
+ return *data;
+ case 2:
+ return U16_AT(data);
+ case 3:
+ return ((size_t)data[0] << 16) | U16_AT(&data[1]);
+ case 4:
+ return U32_AT(data);
+ }
+
+ // This cannot happen, mNALLengthSize springs to life by adding 1 to
+ // a 2-bit integer.
+ CHECK(!"Should not be here.");
+
+ return 0;
+}
+
+status_t MPEG4Source::read(
+ MediaBufferBase **out, const ReadOptions *options) {
+ Mutex::Autolock autoLock(mLock);
+
+ CHECK(mStarted);
+
+ if (options != nullptr && options->getNonBlocking() && !mGroup->has_buffers()) {
+ *out = nullptr;
+ return WOULD_BLOCK;
+ }
+
+ if (mFirstMoofOffset > 0) {
+ return fragmentedRead(out, options);
+ }
+
+ *out = NULL;
+
+ int64_t targetSampleTimeUs = -1;
+
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+ if (mIsHeif) {
+ CHECK(mSampleTable == NULL);
+ CHECK(mItemTable != NULL);
+ int32_t imageIndex;
+ if (!mFormat.findInt32(kKeyTrackID, &imageIndex)) {
+ return ERROR_MALFORMED;
+ }
+
+ status_t err;
+ if (seekTimeUs >= 0) {
+ err = mItemTable->findImageItem(imageIndex, &mCurrentSampleIndex);
+ } else {
+ err = mItemTable->findThumbnailItem(imageIndex, &mCurrentSampleIndex);
+ }
+ if (err != OK) {
+ return err;
+ }
+ } else {
+ uint32_t findFlags = 0;
+ switch (mode) {
+ case ReadOptions::SEEK_PREVIOUS_SYNC:
+ findFlags = SampleTable::kFlagBefore;
+ break;
+ case ReadOptions::SEEK_NEXT_SYNC:
+ findFlags = SampleTable::kFlagAfter;
+ break;
+ case ReadOptions::SEEK_CLOSEST_SYNC:
+ case ReadOptions::SEEK_CLOSEST:
+ findFlags = SampleTable::kFlagClosest;
+ break;
+ case ReadOptions::SEEK_FRAME_INDEX:
+ findFlags = SampleTable::kFlagFrameIndex;
+ break;
+ default:
+ CHECK(!"Should not be here.");
+ break;
+ }
+
+ uint32_t sampleIndex;
+ status_t err = mSampleTable->findSampleAtTime(
+ seekTimeUs, 1000000, mTimescale,
+ &sampleIndex, findFlags);
+
+ if (mode == ReadOptions::SEEK_CLOSEST
+ || mode == ReadOptions::SEEK_FRAME_INDEX) {
+ // We found the closest sample already, now we want the sync
+ // sample preceding it (or the sample itself of course), even
+ // if the subsequent sync sample is closer.
+ findFlags = SampleTable::kFlagBefore;
+ }
+
+ uint32_t syncSampleIndex;
+ if (err == OK) {
+ err = mSampleTable->findSyncSampleNear(
+ sampleIndex, &syncSampleIndex, findFlags);
+ }
+
+ uint32_t sampleTime;
+ if (err == OK) {
+ err = mSampleTable->getMetaDataForSample(
+ sampleIndex, NULL, NULL, &sampleTime);
+ }
+
+ if (err != OK) {
+ if (err == ERROR_OUT_OF_RANGE) {
+ // An attempt to seek past the end of the stream would
+ // normally cause this ERROR_OUT_OF_RANGE error. Propagating
+ // this all the way to the MediaPlayer would cause abnormal
+ // termination. Legacy behaviour appears to be to behave as if
+ // we had seeked to the end of stream, ending normally.
+ err = ERROR_END_OF_STREAM;
+ }
+ ALOGV("end of stream");
+ return err;
+ }
+
+ if (mode == ReadOptions::SEEK_CLOSEST
+ || mode == ReadOptions::SEEK_FRAME_INDEX) {
+ targetSampleTimeUs = (sampleTime * 1000000ll) / mTimescale;
+ }
+
+#if 0
+ uint32_t syncSampleTime;
+ CHECK_EQ(OK, mSampleTable->getMetaDataForSample(
+ syncSampleIndex, NULL, NULL, &syncSampleTime));
+
+ ALOGI("seek to time %lld us => sample at time %lld us, "
+ "sync sample at time %lld us",
+ seekTimeUs,
+ sampleTime * 1000000ll / mTimescale,
+ syncSampleTime * 1000000ll / mTimescale);
+#endif
+
+ mCurrentSampleIndex = syncSampleIndex;
+ }
+
+ if (mBuffer != NULL) {
+ mBuffer->release();
+ mBuffer = NULL;
+ }
+
+ // fall through
+ }
+
+ off64_t offset = 0;
+ size_t size = 0;
+ uint32_t cts, stts;
+ bool isSyncSample;
+ bool newBuffer = false;
+ if (mBuffer == NULL) {
+ newBuffer = true;
+
+ status_t err;
+ if (!mIsHeif) {
+ err = mSampleTable->getMetaDataForSample(
+ mCurrentSampleIndex, &offset, &size, &cts, &isSyncSample, &stts);
+ } else {
+ err = mItemTable->getImageOffsetAndSize(
+ options && options->getSeekTo(&seekTimeUs, &mode) ?
+ &mCurrentSampleIndex : NULL, &offset, &size);
+
+ cts = stts = 0;
+ isSyncSample = 0;
+ ALOGV("image offset %lld, size %zu", (long long)offset, size);
+ }
+
+ if (err != OK) {
+ return err;
+ }
+
+ err = mGroup->acquire_buffer(&mBuffer);
+
+ if (err != OK) {
+ CHECK(mBuffer == NULL);
+ return err;
+ }
+ if (size > mBuffer->size()) {
+ ALOGE("buffer too small: %zu > %zu", size, mBuffer->size());
+ mBuffer->release();
+ mBuffer = NULL;
+ return ERROR_BUFFER_TOO_SMALL;
+ }
+ }
+
+ if ((!mIsAVC && !mIsHEVC) || mWantsNALFragments) {
+ if (newBuffer) {
+ ssize_t num_bytes_read =
+ mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);
+
+ if (num_bytes_read < (ssize_t)size) {
+ mBuffer->release();
+ mBuffer = NULL;
+
+ return ERROR_IO;
+ }
+
+ CHECK(mBuffer != NULL);
+ mBuffer->set_range(0, size);
+ mBuffer->meta_data().clear();
+ mBuffer->meta_data().setInt64(
+ kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
+ mBuffer->meta_data().setInt64(
+ kKeyDuration, ((int64_t)stts * 1000000) / mTimescale);
+
+ if (targetSampleTimeUs >= 0) {
+ mBuffer->meta_data().setInt64(
+ kKeyTargetTime, targetSampleTimeUs);
+ }
+
+ if (isSyncSample) {
+ mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+ }
+
+ ++mCurrentSampleIndex;
+ }
+
+ if (!mIsAVC && !mIsHEVC) {
+ *out = mBuffer;
+ mBuffer = NULL;
+
+ return OK;
+ }
+
+ // Each NAL unit is split up into its constituent fragments and
+ // each one of them returned in its own buffer.
+
+ CHECK(mBuffer->range_length() >= mNALLengthSize);
+
+ const uint8_t *src =
+ (const uint8_t *)mBuffer->data() + mBuffer->range_offset();
+
+ size_t nal_size = parseNALSize(src);
+ if (mNALLengthSize > SIZE_MAX - nal_size) {
+ ALOGE("b/24441553, b/24445122");
+ }
+ if (mBuffer->range_length() - mNALLengthSize < nal_size) {
+ ALOGE("incomplete NAL unit.");
+
+ mBuffer->release();
+ mBuffer = NULL;
+
+ return ERROR_MALFORMED;
+ }
+
+ MediaBufferBase *clone = mBuffer->clone();
+ CHECK(clone != NULL);
+ clone->set_range(mBuffer->range_offset() + mNALLengthSize, nal_size);
+
+ CHECK(mBuffer != NULL);
+ mBuffer->set_range(
+ mBuffer->range_offset() + mNALLengthSize + nal_size,
+ mBuffer->range_length() - mNALLengthSize - nal_size);
+
+ if (mBuffer->range_length() == 0) {
+ mBuffer->release();
+ mBuffer = NULL;
+ }
+
+ *out = clone;
+
+ return OK;
+ } else {
+ // Whole NAL units are returned but each fragment is prefixed by
+ // the start code (0x00 00 00 01).
+ ssize_t num_bytes_read = 0;
+ int32_t drm = 0;
+ bool usesDRM = (mFormat.findInt32(kKeyIsDRM, &drm) && drm != 0);
+ if (usesDRM) {
+ num_bytes_read =
+ mDataSource->readAt(offset, (uint8_t*)mBuffer->data(), size);
+ } else {
+ num_bytes_read = mDataSource->readAt(offset, mSrcBuffer, size);
+ }
+
+ if (num_bytes_read < (ssize_t)size) {
+ mBuffer->release();
+ mBuffer = NULL;
+
+ return ERROR_IO;
+ }
+
+ if (usesDRM) {
+ CHECK(mBuffer != NULL);
+ mBuffer->set_range(0, size);
+
+ } else {
+ uint8_t *dstData = (uint8_t *)mBuffer->data();
+ size_t srcOffset = 0;
+ size_t dstOffset = 0;
+
+ while (srcOffset < size) {
+ bool isMalFormed = !isInRange((size_t)0u, size, srcOffset, mNALLengthSize);
+ size_t nalLength = 0;
+ if (!isMalFormed) {
+ nalLength = parseNALSize(&mSrcBuffer[srcOffset]);
+ srcOffset += mNALLengthSize;
+ isMalFormed = !isInRange((size_t)0u, size, srcOffset, nalLength);
+ }
+
+ if (isMalFormed) {
+ ALOGE("Video is malformed");
+ mBuffer->release();
+ mBuffer = NULL;
+ return ERROR_MALFORMED;
+ }
+
+ if (nalLength == 0) {
+ continue;
+ }
+
+ if (dstOffset > SIZE_MAX - 4 ||
+ dstOffset + 4 > SIZE_MAX - nalLength ||
+ dstOffset + 4 + nalLength > mBuffer->size()) {
+ ALOGE("b/27208621 : %zu %zu", dstOffset, mBuffer->size());
+ android_errorWriteLog(0x534e4554, "27208621");
+ mBuffer->release();
+ mBuffer = NULL;
+ return ERROR_MALFORMED;
+ }
+
+ dstData[dstOffset++] = 0;
+ dstData[dstOffset++] = 0;
+ dstData[dstOffset++] = 0;
+ dstData[dstOffset++] = 1;
+ memcpy(&dstData[dstOffset], &mSrcBuffer[srcOffset], nalLength);
+ srcOffset += nalLength;
+ dstOffset += nalLength;
+ }
+ CHECK_EQ(srcOffset, size);
+ CHECK(mBuffer != NULL);
+ mBuffer->set_range(0, dstOffset);
+ }
+
+ mBuffer->meta_data().clear();
+ mBuffer->meta_data().setInt64(
+ kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
+ mBuffer->meta_data().setInt64(
+ kKeyDuration, ((int64_t)stts * 1000000) / mTimescale);
+
+ if (targetSampleTimeUs >= 0) {
+ mBuffer->meta_data().setInt64(
+ kKeyTargetTime, targetSampleTimeUs);
+ }
+
+ if (mIsAVC) {
+ uint32_t layerId = FindAVCLayerId(
+ (const uint8_t *)mBuffer->data(), mBuffer->range_length());
+ mBuffer->meta_data().setInt32(kKeyTemporalLayerId, layerId);
+ }
+
+ if (isSyncSample) {
+ mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+ }
+
+ ++mCurrentSampleIndex;
+
+ *out = mBuffer;
+ mBuffer = NULL;
+
+ return OK;
+ }
+}
+
+status_t MPEG4Source::fragmentedRead(
+ MediaBufferBase **out, const ReadOptions *options) {
+
+ ALOGV("MPEG4Source::fragmentedRead");
+
+ CHECK(mStarted);
+
+ *out = NULL;
+
+ int64_t targetSampleTimeUs = -1;
+
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+
+ int numSidxEntries = mSegments.size();
+ if (numSidxEntries != 0) {
+ int64_t totalTime = 0;
+ off64_t totalOffset = mFirstMoofOffset;
+ for (int i = 0; i < numSidxEntries; i++) {
+ const SidxEntry *se = &mSegments[i];
+ if (totalTime + se->mDurationUs > seekTimeUs) {
+ // The requested time is somewhere in this segment
+ if ((mode == ReadOptions::SEEK_NEXT_SYNC && seekTimeUs > totalTime) ||
+ (mode == ReadOptions::SEEK_CLOSEST_SYNC &&
+ (seekTimeUs - totalTime) > (totalTime + se->mDurationUs - seekTimeUs))) {
+ // requested next sync, or closest sync and it was closer to the end of
+ // this segment
+ totalTime += se->mDurationUs;
+ totalOffset += se->mSize;
+ }
+ break;
+ }
+ totalTime += se->mDurationUs;
+ totalOffset += se->mSize;
+ }
+ mCurrentMoofOffset = totalOffset;
+ mNextMoofOffset = -1;
+ mCurrentSamples.clear();
+ mCurrentSampleIndex = 0;
+ status_t err = parseChunk(&totalOffset);
+ if (err != OK) {
+ return err;
+ }
+ mCurrentTime = totalTime * mTimescale / 1000000ll;
+ } else {
+ // without sidx boxes, we can only seek to 0
+ mCurrentMoofOffset = mFirstMoofOffset;
+ mNextMoofOffset = -1;
+ mCurrentSamples.clear();
+ mCurrentSampleIndex = 0;
+ off64_t tmp = mCurrentMoofOffset;
+ status_t err = parseChunk(&tmp);
+ if (err != OK) {
+ return err;
+ }
+ mCurrentTime = 0;
+ }
+
+ if (mBuffer != NULL) {
+ mBuffer->release();
+ mBuffer = NULL;
+ }
+
+ // fall through
+ }
+
+ off64_t offset = 0;
+ size_t size = 0;
+ uint32_t cts = 0;
+ bool isSyncSample = false;
+ bool newBuffer = false;
+ if (mBuffer == NULL) {
+ newBuffer = true;
+
+ if (mCurrentSampleIndex >= mCurrentSamples.size()) {
+ // move to next fragment if there is one
+ if (mNextMoofOffset <= mCurrentMoofOffset) {
+ return ERROR_END_OF_STREAM;
+ }
+ off64_t nextMoof = mNextMoofOffset;
+ mCurrentMoofOffset = nextMoof;
+ mCurrentSamples.clear();
+ mCurrentSampleIndex = 0;
+ status_t err = parseChunk(&nextMoof);
+ if (err != OK) {
+ return err;
+ }
+ if (mCurrentSampleIndex >= mCurrentSamples.size()) {
+ return ERROR_END_OF_STREAM;
+ }
+ }
+
+ const Sample *smpl = &mCurrentSamples[mCurrentSampleIndex];
+ offset = smpl->offset;
+ size = smpl->size;
+ cts = mCurrentTime + smpl->compositionOffset;
+ mCurrentTime += smpl->duration;
+ isSyncSample = (mCurrentSampleIndex == 0); // XXX
+
+ status_t err = mGroup->acquire_buffer(&mBuffer);
+
+ if (err != OK) {
+ CHECK(mBuffer == NULL);
+ ALOGV("acquire_buffer returned %d", err);
+ return err;
+ }
+ if (size > mBuffer->size()) {
+ ALOGE("buffer too small: %zu > %zu", size, mBuffer->size());
+ mBuffer->release();
+ mBuffer = NULL;
+ return ERROR_BUFFER_TOO_SMALL;
+ }
+ }
+
+ const Sample *smpl = &mCurrentSamples[mCurrentSampleIndex];
+ MetaDataBase &bufmeta = mBuffer->meta_data();
+ bufmeta.clear();
+ if (smpl->encryptedsizes.size()) {
+ // store clear/encrypted lengths in metadata
+ bufmeta.setData(kKeyPlainSizes, 0,
+ smpl->clearsizes.array(), smpl->clearsizes.size() * 4);
+ bufmeta.setData(kKeyEncryptedSizes, 0,
+ smpl->encryptedsizes.array(), smpl->encryptedsizes.size() * 4);
+ bufmeta.setInt32(kKeyCryptoDefaultIVSize, mDefaultIVSize);
+ bufmeta.setInt32(kKeyCryptoMode, mCryptoMode);
+ bufmeta.setData(kKeyCryptoKey, 0, mCryptoKey, 16);
+ bufmeta.setInt32(kKeyEncryptedByteBlock, mDefaultEncryptedByteBlock);
+ bufmeta.setInt32(kKeySkipByteBlock, mDefaultSkipByteBlock);
+
+ uint32_t type = 0;
+ const void *iv = NULL;
+ size_t ivlength = 0;
+ if (!mFormat.findData(
+ kKeyCryptoIV, &type, &iv, &ivlength)) {
+ iv = smpl->iv;
+ ivlength = 16; // use 16 or the actual size?
+ }
+ bufmeta.setData(kKeyCryptoIV, 0, iv, ivlength);
+
+ }
+
+ if ((!mIsAVC && !mIsHEVC)|| mWantsNALFragments) {
+ if (newBuffer) {
+ if (!isInRange((size_t)0u, mBuffer->size(), size)) {
+ mBuffer->release();
+ mBuffer = NULL;
+
+ ALOGE("fragmentedRead ERROR_MALFORMED size %zu", size);
+ return ERROR_MALFORMED;
+ }
+
+ ssize_t num_bytes_read =
+ mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);
+
+ if (num_bytes_read < (ssize_t)size) {
+ mBuffer->release();
+ mBuffer = NULL;
+
+ ALOGE("i/o error");
+ return ERROR_IO;
+ }
+
+ CHECK(mBuffer != NULL);
+ mBuffer->set_range(0, size);
+ mBuffer->meta_data().setInt64(
+ kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
+ mBuffer->meta_data().setInt64(
+ kKeyDuration, ((int64_t)smpl->duration * 1000000) / mTimescale);
+
+ if (targetSampleTimeUs >= 0) {
+ mBuffer->meta_data().setInt64(
+ kKeyTargetTime, targetSampleTimeUs);
+ }
+
+ if (mIsAVC) {
+ uint32_t layerId = FindAVCLayerId(
+ (const uint8_t *)mBuffer->data(), mBuffer->range_length());
+ mBuffer->meta_data().setInt32(kKeyTemporalLayerId, layerId);
+ }
+
+ if (isSyncSample) {
+ mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+ }
+
+ ++mCurrentSampleIndex;
+ }
+
+ if (!mIsAVC && !mIsHEVC) {
+ *out = mBuffer;
+ mBuffer = NULL;
+
+ return OK;
+ }
+
+ // Each NAL unit is split up into its constituent fragments and
+ // each one of them returned in its own buffer.
+
+ CHECK(mBuffer->range_length() >= mNALLengthSize);
+
+ const uint8_t *src =
+ (const uint8_t *)mBuffer->data() + mBuffer->range_offset();
+
+ size_t nal_size = parseNALSize(src);
+ if (mNALLengthSize > SIZE_MAX - nal_size) {
+ ALOGE("b/24441553, b/24445122");
+ }
+
+ if (mBuffer->range_length() - mNALLengthSize < nal_size) {
+ ALOGE("incomplete NAL unit.");
+
+ mBuffer->release();
+ mBuffer = NULL;
+
+ return ERROR_MALFORMED;
+ }
+
+ MediaBufferBase *clone = mBuffer->clone();
+ CHECK(clone != NULL);
+ clone->set_range(mBuffer->range_offset() + mNALLengthSize, nal_size);
+
+ CHECK(mBuffer != NULL);
+ mBuffer->set_range(
+ mBuffer->range_offset() + mNALLengthSize + nal_size,
+ mBuffer->range_length() - mNALLengthSize - nal_size);
+
+ if (mBuffer->range_length() == 0) {
+ mBuffer->release();
+ mBuffer = NULL;
+ }
+
+ *out = clone;
+
+ return OK;
+ } else {
+ ALOGV("whole NAL");
+ // Whole NAL units are returned but each fragment is prefixed by
+ // the start code (0x00 00 00 01).
+ ssize_t num_bytes_read = 0;
+ int32_t drm = 0;
+ bool usesDRM = (mFormat.findInt32(kKeyIsDRM, &drm) && drm != 0);
+ void *data = NULL;
+ bool isMalFormed = false;
+ if (usesDRM) {
+ if (mBuffer == NULL || !isInRange((size_t)0u, mBuffer->size(), size)) {
+ isMalFormed = true;
+ } else {
+ data = mBuffer->data();
+ }
+ } else {
+ int32_t max_size;
+ if (!mFormat.findInt32(kKeyMaxInputSize, &max_size)
+ || !isInRange((size_t)0u, (size_t)max_size, size)) {
+ isMalFormed = true;
+ } else {
+ data = mSrcBuffer;
+ }
+ }
+
+ if (isMalFormed || data == NULL) {
+ ALOGE("isMalFormed size %zu", size);
+ if (mBuffer != NULL) {
+ mBuffer->release();
+ mBuffer = NULL;
+ }
+ return ERROR_MALFORMED;
+ }
+ num_bytes_read = mDataSource->readAt(offset, data, size);
+
+ if (num_bytes_read < (ssize_t)size) {
+ mBuffer->release();
+ mBuffer = NULL;
+
+ ALOGE("i/o error");
+ return ERROR_IO;
+ }
+
+ if (usesDRM) {
+ CHECK(mBuffer != NULL);
+ mBuffer->set_range(0, size);
+
+ } else {
+ uint8_t *dstData = (uint8_t *)mBuffer->data();
+ size_t srcOffset = 0;
+ size_t dstOffset = 0;
+
+ while (srcOffset < size) {
+ isMalFormed = !isInRange((size_t)0u, size, srcOffset, mNALLengthSize);
+ size_t nalLength = 0;
+ if (!isMalFormed) {
+ nalLength = parseNALSize(&mSrcBuffer[srcOffset]);
+ srcOffset += mNALLengthSize;
+ isMalFormed = !isInRange((size_t)0u, size, srcOffset, nalLength)
+ || !isInRange((size_t)0u, mBuffer->size(), dstOffset, (size_t)4u)
+ || !isInRange((size_t)0u, mBuffer->size(), dstOffset + 4, nalLength);
+ }
+
+ if (isMalFormed) {
+ ALOGE("Video is malformed; nalLength %zu", nalLength);
+ mBuffer->release();
+ mBuffer = NULL;
+ return ERROR_MALFORMED;
+ }
+
+ if (nalLength == 0) {
+ continue;
+ }
+
+ if (dstOffset > SIZE_MAX - 4 ||
+ dstOffset + 4 > SIZE_MAX - nalLength ||
+ dstOffset + 4 + nalLength > mBuffer->size()) {
+ ALOGE("b/26365349 : %zu %zu", dstOffset, mBuffer->size());
+ android_errorWriteLog(0x534e4554, "26365349");
+ mBuffer->release();
+ mBuffer = NULL;
+ return ERROR_MALFORMED;
+ }
+
+ dstData[dstOffset++] = 0;
+ dstData[dstOffset++] = 0;
+ dstData[dstOffset++] = 0;
+ dstData[dstOffset++] = 1;
+ memcpy(&dstData[dstOffset], &mSrcBuffer[srcOffset], nalLength);
+ srcOffset += nalLength;
+ dstOffset += nalLength;
+ }
+ CHECK_EQ(srcOffset, size);
+ CHECK(mBuffer != NULL);
+ mBuffer->set_range(0, dstOffset);
+ }
+
+ mBuffer->meta_data().setInt64(
+ kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
+ mBuffer->meta_data().setInt64(
+ kKeyDuration, ((int64_t)smpl->duration * 1000000) / mTimescale);
+
+ if (targetSampleTimeUs >= 0) {
+ mBuffer->meta_data().setInt64(
+ kKeyTargetTime, targetSampleTimeUs);
+ }
+
+ if (isSyncSample) {
+ mBuffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+ }
+
+ ++mCurrentSampleIndex;
+
+ *out = mBuffer;
+ mBuffer = NULL;
+
+ return OK;
+ }
+}
+
+MPEG4Extractor::Track *MPEG4Extractor::findTrackByMimePrefix(
+ const char *mimePrefix) {
+ for (Track *track = mFirstTrack; track != NULL; track = track->next) {
+ const char *mime;
+ if (track->meta.findCString(kKeyMIMEType, &mime)
+ && !strncasecmp(mime, mimePrefix, strlen(mimePrefix))) {
+ return track;
+ }
+ }
+
+ return NULL;
+}
+
+static bool LegacySniffMPEG4(DataSourceBase *source, float *confidence) {
+ uint8_t header[8];
+
+ ssize_t n = source->readAt(4, header, sizeof(header));
+ if (n < (ssize_t)sizeof(header)) {
+ return false;
+ }
+
+ if (!memcmp(header, "ftyp3gp", 7) || !memcmp(header, "ftypmp42", 8)
+ || !memcmp(header, "ftyp3gr6", 8) || !memcmp(header, "ftyp3gs6", 8)
+ || !memcmp(header, "ftyp3ge6", 8) || !memcmp(header, "ftyp3gg6", 8)
+ || !memcmp(header, "ftypisom", 8) || !memcmp(header, "ftypM4V ", 8)
+ || !memcmp(header, "ftypM4A ", 8) || !memcmp(header, "ftypf4v ", 8)
+ || !memcmp(header, "ftypkddi", 8) || !memcmp(header, "ftypM4VP", 8)
+ || !memcmp(header, "ftypmif1", 8) || !memcmp(header, "ftypheic", 8)
+ || !memcmp(header, "ftypmsf1", 8) || !memcmp(header, "ftyphevc", 8)) {
+ *confidence = 0.4;
+
+ return true;
+ }
+
+ return false;
+}
+
+static bool isCompatibleBrand(uint32_t fourcc) {
+ static const uint32_t kCompatibleBrands[] = {
+ FOURCC('i', 's', 'o', 'm'),
+ FOURCC('i', 's', 'o', '2'),
+ FOURCC('a', 'v', 'c', '1'),
+ FOURCC('h', 'v', 'c', '1'),
+ FOURCC('h', 'e', 'v', '1'),
+ FOURCC('3', 'g', 'p', '4'),
+ FOURCC('m', 'p', '4', '1'),
+ FOURCC('m', 'p', '4', '2'),
+ FOURCC('d', 'a', 's', 'h'),
+
+ // Won't promise that the following file types can be played.
+ // Just give these file types a chance.
+ FOURCC('q', 't', ' ', ' '), // Apple's QuickTime
+ FOURCC('M', 'S', 'N', 'V'), // Sony's PSP
+
+ FOURCC('3', 'g', '2', 'a'), // 3GPP2
+ FOURCC('3', 'g', '2', 'b'),
+ FOURCC('m', 'i', 'f', '1'), // HEIF image
+ FOURCC('h', 'e', 'i', 'c'), // HEIF image
+ FOURCC('m', 's', 'f', '1'), // HEIF image sequence
+ FOURCC('h', 'e', 'v', 'c'), // HEIF image sequence
+ };
+
+ for (size_t i = 0;
+ i < sizeof(kCompatibleBrands) / sizeof(kCompatibleBrands[0]);
+ ++i) {
+ if (kCompatibleBrands[i] == fourcc) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// Attempt to actually parse the 'ftyp' atom and determine if a suitable
+// compatible brand is present.
+// Also try to identify where this file's metadata ends
+// (end of the 'moov' atom) and report it to the caller as part of
+// the metadata.
+static bool BetterSniffMPEG4(DataSourceBase *source, float *confidence) {
+ // We scan up to 128 bytes to identify this file as an MP4.
+ static const off64_t kMaxScanOffset = 128ll;
+
+ off64_t offset = 0ll;
+ bool foundGoodFileType = false;
+ off64_t moovAtomEndOffset = -1ll;
+ bool done = false;
+
+ while (!done && offset < kMaxScanOffset) {
+ uint32_t hdr[2];
+ if (source->readAt(offset, hdr, 8) < 8) {
+ return false;
+ }
+
+ uint64_t chunkSize = ntohl(hdr[0]);
+ uint32_t chunkType = ntohl(hdr[1]);
+ off64_t chunkDataOffset = offset + 8;
+
+ if (chunkSize == 1) {
+ if (source->readAt(offset + 8, &chunkSize, 8) < 8) {
+ return false;
+ }
+
+ chunkSize = ntoh64(chunkSize);
+ chunkDataOffset += 8;
+
+ if (chunkSize < 16) {
+ // The smallest valid chunk is 16 bytes long in this case.
+ return false;
+ }
+
+ } else if (chunkSize < 8) {
+ // The smallest valid chunk is 8 bytes long.
+ return false;
+ }
+
+ // (data_offset - offset) is either 8 or 16
+ off64_t chunkDataSize = chunkSize - (chunkDataOffset - offset);
+ if (chunkDataSize < 0) {
+ ALOGE("b/23540914");
+ return false;
+ }
+
+ char chunkstring[5];
+ MakeFourCCString(chunkType, chunkstring);
+ ALOGV("saw chunk type %s, size %" PRIu64 " @ %lld", chunkstring, chunkSize, (long long)offset);
+ switch (chunkType) {
+ case FOURCC('f', 't', 'y', 'p'):
+ {
+ if (chunkDataSize < 8) {
+ return false;
+ }
+
+ uint32_t numCompatibleBrands = (chunkDataSize - 8) / 4;
+ for (size_t i = 0; i < numCompatibleBrands + 2; ++i) {
+ if (i == 1) {
+ // Skip this index, it refers to the minorVersion,
+ // not a brand.
+ continue;
+ }
+
+ uint32_t brand;
+ if (source->readAt(
+ chunkDataOffset + 4 * i, &brand, 4) < 4) {
+ return false;
+ }
+
+ brand = ntohl(brand);
+
+ if (isCompatibleBrand(brand)) {
+ foundGoodFileType = true;
+ break;
+ }
+ }
+
+ if (!foundGoodFileType) {
+ return false;
+ }
+
+ break;
+ }
+
+ case FOURCC('m', 'o', 'o', 'v'):
+ {
+ moovAtomEndOffset = offset + chunkSize;
+
+ done = true;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ offset += chunkSize;
+ }
+
+ if (!foundGoodFileType) {
+ return false;
+ }
+
+ *confidence = 0.4f;
+
+ return true;
+}
+
+static MediaExtractor* CreateExtractor(DataSourceBase *source, void *) {
+ return new MPEG4Extractor(source);
+}
+
+static MediaExtractor::CreatorFunc Sniff(
+ DataSourceBase *source, float *confidence, void **,
+ MediaExtractor::FreeMetaFunc *) {
+ if (BetterSniffMPEG4(source, confidence)) {
+ return CreateExtractor;
+ }
+
+ if (LegacySniffMPEG4(source, confidence)) {
+ ALOGW("Identified supported mpeg4 through LegacySniffMPEG4.");
+ return CreateExtractor;
+ }
+
+ return NULL;
+}
+
+extern "C" {
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
+MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ return {
+ MediaExtractor::EXTRACTORDEF_VERSION,
+ UUID("27575c67-4417-4c54-8d3d-8e626985a164"),
+ 1, // version
+ "MP4 Extractor",
+ Sniff
+ };
+}
+
+} // extern "C"
+
+} // namespace android
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/MPEG4Extractor.h
new file mode 100644
index 0000000..3ea0963
--- /dev/null
+++ b/media/extractors/mp4/MPEG4Extractor.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MPEG4_EXTRACTOR_H_
+
+#define MPEG4_EXTRACTOR_H_
+
+#include <arpa/inet.h>
+
+#include <media/DataSourceBase.h>
+#include <media/MediaExtractor.h>
+#include <media/stagefright/MetaDataBase.h>
+#include <media/stagefright/foundation/AString.h>
+#include <utils/KeyedVector.h>
+#include <utils/List.h>
+#include <utils/String8.h>
+#include <utils/Vector.h>
+
+namespace android {
+struct AMessage;
+class DataSourceBase;
+struct CachedRangedDataSource;
+class SampleTable;
+class String8;
+namespace heif {
+class ItemTable;
+}
+using heif::ItemTable;
+
+struct SidxEntry {
+ size_t mSize;
+ uint32_t mDurationUs;
+};
+
+struct Trex {
+ uint32_t track_ID;
+ uint32_t default_sample_description_index;
+ uint32_t default_sample_duration;
+ uint32_t default_sample_size;
+ uint32_t default_sample_flags;
+};
+
+class MPEG4Extractor : public MediaExtractor {
+public:
+ explicit MPEG4Extractor(DataSourceBase *source, const char *mime = NULL);
+
+ virtual size_t countTracks();
+ virtual MediaTrack *getTrack(size_t index);
+ virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags);
+
+ virtual status_t getMetaData(MetaDataBase& meta);
+ virtual uint32_t flags() const;
+ virtual const char * name() { return "MPEG4Extractor"; }
+
+protected:
+ virtual ~MPEG4Extractor();
+
+private:
+
+ struct PsshInfo {
+ uint8_t uuid[16];
+ uint32_t datalen;
+ uint8_t *data;
+ };
+ struct Track {
+ Track *next;
+ MetaDataBase meta;
+ uint32_t timescale;
+ sp<SampleTable> sampleTable;
+ bool includes_expensive_metadata;
+ bool skipTrack;
+ bool has_elst;
+ int64_t elst_media_time;
+ uint64_t elst_segment_duration;
+ bool subsample_encryption;
+ };
+
+ Vector<SidxEntry> mSidxEntries;
+ off64_t mMoofOffset;
+ bool mMoofFound;
+ bool mMdatFound;
+
+ Vector<PsshInfo> mPssh;
+
+ Vector<Trex> mTrex;
+
+ DataSourceBase *mDataSource;
+ CachedRangedDataSource *mCachedSource;
+ status_t mInitCheck;
+ uint32_t mHeaderTimescale;
+ bool mIsQT;
+ bool mIsHeif;
+ bool mHasMoovBox;
+ bool mPreferHeif;
+
+ Track *mFirstTrack, *mLastTrack;
+
+ MetaDataBase mFileMetaData;
+
+ Vector<uint32_t> mPath;
+ String8 mLastCommentMean;
+ String8 mLastCommentName;
+ String8 mLastCommentData;
+
+ KeyedVector<uint32_t, AString> mMetaKeyMap;
+
+ status_t readMetaData();
+ status_t parseChunk(off64_t *offset, int depth);
+ status_t parseITunesMetaData(off64_t offset, size_t size);
+ status_t parseColorInfo(off64_t offset, size_t size);
+ status_t parse3GPPMetaData(off64_t offset, size_t size, int depth);
+ void parseID3v2MetaData(off64_t offset);
+ status_t parseQTMetaKey(off64_t data_offset, size_t data_size);
+ status_t parseQTMetaVal(int32_t keyId, off64_t data_offset, size_t data_size);
+
+ status_t updateAudioTrackInfoFromESDS_MPEG4Audio(
+ const void *esds_data, size_t esds_size);
+
+ static status_t verifyTrack(Track *track);
+
+ sp<ItemTable> mItemTable;
+
+ status_t parseTrackHeader(off64_t data_offset, off64_t data_size);
+
+ status_t parseSegmentIndex(off64_t data_offset, size_t data_size);
+
+ Track *findTrackByMimePrefix(const char *mimePrefix);
+
+ status_t parseAC3SampleEntry(off64_t offset);
+ status_t parseAC3SpecificBox(off64_t offset, uint16_t sampleRate);
+
+ MPEG4Extractor(const MPEG4Extractor &);
+ MPEG4Extractor &operator=(const MPEG4Extractor &);
+};
+
+bool SniffMPEG4(
+ DataSourceBase *source, String8 *mimeType, float *confidence,
+ sp<AMessage> *);
+
+} // namespace android
+
+#endif // MPEG4_EXTRACTOR_H_
diff --git a/media/libstagefright/matroska/NOTICE b/media/extractors/mp4/NOTICE
similarity index 100%
copy from media/libstagefright/matroska/NOTICE
copy to media/extractors/mp4/NOTICE
diff --git a/media/extractors/mp4/SampleIterator.cpp b/media/extractors/mp4/SampleIterator.cpp
new file mode 100644
index 0000000..93ee7c6
--- /dev/null
+++ b/media/extractors/mp4/SampleIterator.cpp
@@ -0,0 +1,352 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "SampleIterator"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "SampleIterator.h"
+
+#include <arpa/inet.h>
+
+#include <media/DataSourceBase.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ByteUtils.h>
+
+#include "SampleTable.h"
+
+namespace android {
+
+SampleIterator::SampleIterator(SampleTable *table)
+ : mTable(table),
+ mInitialized(false),
+ mTimeToSampleIndex(0),
+ mTTSSampleIndex(0),
+ mTTSSampleTime(0),
+ mTTSCount(0),
+ mTTSDuration(0) {
+ reset();
+}
+
+void SampleIterator::reset() {
+ mSampleToChunkIndex = 0;
+ mFirstChunk = 0;
+ mFirstChunkSampleIndex = 0;
+ mStopChunk = 0;
+ mStopChunkSampleIndex = 0;
+ mSamplesPerChunk = 0;
+ mChunkDesc = 0;
+}
+
+status_t SampleIterator::seekTo(uint32_t sampleIndex) {
+ ALOGV("seekTo(%d)", sampleIndex);
+
+ if (sampleIndex >= mTable->mNumSampleSizes) {
+ return ERROR_END_OF_STREAM;
+ }
+
+ if (mTable->mSampleToChunkOffset < 0
+ || mTable->mChunkOffsetOffset < 0
+ || mTable->mSampleSizeOffset < 0
+ || mTable->mTimeToSampleCount == 0) {
+
+ return ERROR_MALFORMED;
+ }
+
+ if (mInitialized && mCurrentSampleIndex == sampleIndex) {
+ return OK;
+ }
+
+ if (!mInitialized || sampleIndex < mFirstChunkSampleIndex) {
+ reset();
+ }
+
+ if (sampleIndex >= mStopChunkSampleIndex) {
+ status_t err;
+ if ((err = findChunkRange(sampleIndex)) != OK) {
+ ALOGE("findChunkRange failed");
+ return err;
+ }
+ }
+
+ CHECK(sampleIndex < mStopChunkSampleIndex);
+
+ if (mSamplesPerChunk == 0) {
+ ALOGE("b/22802344");
+ return ERROR_MALFORMED;
+ }
+
+ uint32_t chunk =
+ (sampleIndex - mFirstChunkSampleIndex) / mSamplesPerChunk
+ + mFirstChunk;
+
+ if (!mInitialized || chunk != mCurrentChunkIndex) {
+ status_t err;
+ if ((err = getChunkOffset(chunk, &mCurrentChunkOffset)) != OK) {
+ ALOGE("getChunkOffset return error");
+ return err;
+ }
+
+ mCurrentChunkSampleSizes.clear();
+
+ uint32_t firstChunkSampleIndex =
+ mFirstChunkSampleIndex
+ + mSamplesPerChunk * (chunk - mFirstChunk);
+
+ for (uint32_t i = 0; i < mSamplesPerChunk; ++i) {
+ size_t sampleSize;
+ if ((err = getSampleSizeDirect(
+ firstChunkSampleIndex + i, &sampleSize)) != OK) {
+ ALOGE("getSampleSizeDirect return error");
+ mCurrentChunkSampleSizes.clear();
+ return err;
+ }
+
+ mCurrentChunkSampleSizes.push(sampleSize);
+ }
+
+ mCurrentChunkIndex = chunk;
+ }
+
+ uint32_t chunkRelativeSampleIndex =
+ (sampleIndex - mFirstChunkSampleIndex) % mSamplesPerChunk;
+
+ mCurrentSampleOffset = mCurrentChunkOffset;
+ for (uint32_t i = 0; i < chunkRelativeSampleIndex; ++i) {
+ mCurrentSampleOffset += mCurrentChunkSampleSizes[i];
+ }
+
+ mCurrentSampleSize = mCurrentChunkSampleSizes[chunkRelativeSampleIndex];
+ if (sampleIndex < mTTSSampleIndex) {
+ mTimeToSampleIndex = 0;
+ mTTSSampleIndex = 0;
+ mTTSSampleTime = 0;
+ mTTSCount = 0;
+ mTTSDuration = 0;
+ }
+
+ status_t err;
+ if ((err = findSampleTimeAndDuration(
+ sampleIndex, &mCurrentSampleTime, &mCurrentSampleDuration)) != OK) {
+ ALOGE("findSampleTime return error");
+ return err;
+ }
+
+ mCurrentSampleIndex = sampleIndex;
+
+ mInitialized = true;
+
+ return OK;
+}
+
+status_t SampleIterator::findChunkRange(uint32_t sampleIndex) {
+ CHECK(sampleIndex >= mFirstChunkSampleIndex);
+
+ while (sampleIndex >= mStopChunkSampleIndex) {
+ if (mSampleToChunkIndex == mTable->mNumSampleToChunkOffsets) {
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ mFirstChunkSampleIndex = mStopChunkSampleIndex;
+
+ const SampleTable::SampleToChunkEntry *entry =
+ &mTable->mSampleToChunkEntries[mSampleToChunkIndex];
+
+ mFirstChunk = entry->startChunk;
+ mSamplesPerChunk = entry->samplesPerChunk;
+ mChunkDesc = entry->chunkDesc;
+
+ if (mSampleToChunkIndex + 1 < mTable->mNumSampleToChunkOffsets) {
+ mStopChunk = entry[1].startChunk;
+
+ if (mSamplesPerChunk == 0 || mStopChunk < mFirstChunk ||
+ (mStopChunk - mFirstChunk) > UINT32_MAX / mSamplesPerChunk ||
+ ((mStopChunk - mFirstChunk) * mSamplesPerChunk >
+ UINT32_MAX - mFirstChunkSampleIndex)) {
+
+ return ERROR_OUT_OF_RANGE;
+ }
+ mStopChunkSampleIndex =
+ mFirstChunkSampleIndex
+ + (mStopChunk - mFirstChunk) * mSamplesPerChunk;
+ } else {
+ mStopChunk = 0xffffffff;
+ mStopChunkSampleIndex = 0xffffffff;
+ }
+
+ ++mSampleToChunkIndex;
+ }
+
+ return OK;
+}
+
+status_t SampleIterator::getChunkOffset(uint32_t chunk, off64_t *offset) {
+ *offset = 0;
+
+ if (chunk >= mTable->mNumChunkOffsets) {
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ if (mTable->mChunkOffsetType == SampleTable::kChunkOffsetType32) {
+ uint32_t offset32;
+
+ if (mTable->mDataSource->readAt(
+ mTable->mChunkOffsetOffset + 8 + 4 * chunk,
+ &offset32,
+ sizeof(offset32)) < (ssize_t)sizeof(offset32)) {
+ return ERROR_IO;
+ }
+
+ *offset = ntohl(offset32);
+ } else {
+ CHECK_EQ(mTable->mChunkOffsetType, SampleTable::kChunkOffsetType64);
+
+ uint64_t offset64;
+ if (mTable->mDataSource->readAt(
+ mTable->mChunkOffsetOffset + 8 + 8 * chunk,
+ &offset64,
+ sizeof(offset64)) < (ssize_t)sizeof(offset64)) {
+ return ERROR_IO;
+ }
+
+ *offset = ntoh64(offset64);
+ }
+
+ return OK;
+}
+
+status_t SampleIterator::getSampleSizeDirect(
+ uint32_t sampleIndex, size_t *size) {
+ *size = 0;
+
+ if (sampleIndex >= mTable->mNumSampleSizes) {
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ if (mTable->mDefaultSampleSize > 0) {
+ *size = mTable->mDefaultSampleSize;
+ return OK;
+ }
+
+ switch (mTable->mSampleSizeFieldSize) {
+ case 32:
+ {
+ uint32_t x;
+ if (mTable->mDataSource->readAt(
+ mTable->mSampleSizeOffset + 12 + 4 * sampleIndex,
+ &x, sizeof(x)) < (ssize_t)sizeof(x)) {
+ return ERROR_IO;
+ }
+
+ *size = ntohl(x);
+ break;
+ }
+
+ case 16:
+ {
+ uint16_t x;
+ if (mTable->mDataSource->readAt(
+ mTable->mSampleSizeOffset + 12 + 2 * sampleIndex,
+ &x, sizeof(x)) < (ssize_t)sizeof(x)) {
+ return ERROR_IO;
+ }
+
+ *size = ntohs(x);
+ break;
+ }
+
+ case 8:
+ {
+ uint8_t x;
+ if (mTable->mDataSource->readAt(
+ mTable->mSampleSizeOffset + 12 + sampleIndex,
+ &x, sizeof(x)) < (ssize_t)sizeof(x)) {
+ return ERROR_IO;
+ }
+
+ *size = x;
+ break;
+ }
+
+ default:
+ {
+ CHECK_EQ(mTable->mSampleSizeFieldSize, 4u);
+
+ uint8_t x;
+ if (mTable->mDataSource->readAt(
+ mTable->mSampleSizeOffset + 12 + sampleIndex / 2,
+ &x, sizeof(x)) < (ssize_t)sizeof(x)) {
+ return ERROR_IO;
+ }
+
+ *size = (sampleIndex & 1) ? x & 0x0f : x >> 4;
+ break;
+ }
+ }
+
+ return OK;
+}
+
+status_t SampleIterator::findSampleTimeAndDuration(
+ uint32_t sampleIndex, uint32_t *time, uint32_t *duration) {
+ if (sampleIndex >= mTable->mNumSampleSizes) {
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ while (true) {
+ if (mTTSSampleIndex > UINT32_MAX - mTTSCount) {
+ return ERROR_OUT_OF_RANGE;
+ }
+ if(sampleIndex < mTTSSampleIndex + mTTSCount) {
+ break;
+ }
+ if (mTimeToSampleIndex == mTable->mTimeToSampleCount ||
+ (mTTSDuration != 0 && mTTSCount > UINT32_MAX / mTTSDuration) ||
+ mTTSSampleTime > UINT32_MAX - (mTTSCount * mTTSDuration)) {
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ mTTSSampleIndex += mTTSCount;
+ mTTSSampleTime += mTTSCount * mTTSDuration;
+
+ mTTSCount = mTable->mTimeToSample[2 * mTimeToSampleIndex];
+ mTTSDuration = mTable->mTimeToSample[2 * mTimeToSampleIndex + 1];
+
+ ++mTimeToSampleIndex;
+ }
+
+ *time = mTTSSampleTime + mTTSDuration * (sampleIndex - mTTSSampleIndex);
+
+ int32_t offset = mTable->getCompositionTimeOffset(sampleIndex);
+ if ((offset < 0 && *time < (offset == INT32_MIN ?
+ INT32_MAX : uint32_t(-offset))) ||
+ (offset > 0 && *time > UINT32_MAX - offset)) {
+ ALOGE("%u + %d would overflow", *time, offset);
+ return ERROR_OUT_OF_RANGE;
+ }
+ if (offset > 0) {
+ *time += offset;
+ } else {
+ *time -= (offset == INT32_MIN ? INT32_MAX : (-offset));
+ }
+
+ *duration = mTTSDuration;
+
+ return OK;
+}
+
+} // namespace android
+
diff --git a/media/libstagefright/include/SampleIterator.h b/media/extractors/mp4/SampleIterator.h
similarity index 100%
rename from media/libstagefright/include/SampleIterator.h
rename to media/extractors/mp4/SampleIterator.h
diff --git a/media/extractors/mp4/SampleTable.cpp b/media/extractors/mp4/SampleTable.cpp
new file mode 100644
index 0000000..81c353e
--- /dev/null
+++ b/media/extractors/mp4/SampleTable.cpp
@@ -0,0 +1,1008 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "SampleTable"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <limits>
+
+#include "SampleTable.h"
+#include "SampleIterator.h"
+
+#include <arpa/inet.h>
+
+#include <media/DataSourceBase.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ByteUtils.h>
+
+/* TODO: remove after being merged into other branches */
+#ifndef UINT32_MAX
+#define UINT32_MAX (4294967295U)
+#endif
+
+namespace android {
+
+// static
+const uint32_t SampleTable::kChunkOffsetType32 = FOURCC('s', 't', 'c', 'o');
+// static
+const uint32_t SampleTable::kChunkOffsetType64 = FOURCC('c', 'o', '6', '4');
+// static
+const uint32_t SampleTable::kSampleSizeType32 = FOURCC('s', 't', 's', 'z');
+// static
+const uint32_t SampleTable::kSampleSizeTypeCompact = FOURCC('s', 't', 'z', '2');
+
+////////////////////////////////////////////////////////////////////////////////
+
+const off64_t kMaxOffset = std::numeric_limits<off64_t>::max();
+
+struct SampleTable::CompositionDeltaLookup {
+ CompositionDeltaLookup();
+
+ void setEntries(
+ const int32_t *deltaEntries, size_t numDeltaEntries);
+
+ int32_t getCompositionTimeOffset(uint32_t sampleIndex);
+
+private:
+ Mutex mLock;
+
+ const int32_t *mDeltaEntries;
+ size_t mNumDeltaEntries;
+
+ size_t mCurrentDeltaEntry;
+ size_t mCurrentEntrySampleIndex;
+
+ DISALLOW_EVIL_CONSTRUCTORS(CompositionDeltaLookup);
+};
+
+SampleTable::CompositionDeltaLookup::CompositionDeltaLookup()
+ : mDeltaEntries(NULL),
+ mNumDeltaEntries(0),
+ mCurrentDeltaEntry(0),
+ mCurrentEntrySampleIndex(0) {
+}
+
+void SampleTable::CompositionDeltaLookup::setEntries(
+ const int32_t *deltaEntries, size_t numDeltaEntries) {
+ Mutex::Autolock autolock(mLock);
+
+ mDeltaEntries = deltaEntries;
+ mNumDeltaEntries = numDeltaEntries;
+ mCurrentDeltaEntry = 0;
+ mCurrentEntrySampleIndex = 0;
+}
+
+int32_t SampleTable::CompositionDeltaLookup::getCompositionTimeOffset(
+ uint32_t sampleIndex) {
+ Mutex::Autolock autolock(mLock);
+
+ if (mDeltaEntries == NULL) {
+ return 0;
+ }
+
+ if (sampleIndex < mCurrentEntrySampleIndex) {
+ mCurrentDeltaEntry = 0;
+ mCurrentEntrySampleIndex = 0;
+ }
+
+ while (mCurrentDeltaEntry < mNumDeltaEntries) {
+ uint32_t sampleCount = mDeltaEntries[2 * mCurrentDeltaEntry];
+ if (sampleIndex < mCurrentEntrySampleIndex + sampleCount) {
+ return mDeltaEntries[2 * mCurrentDeltaEntry + 1];
+ }
+
+ mCurrentEntrySampleIndex += sampleCount;
+ ++mCurrentDeltaEntry;
+ }
+
+ return 0;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+SampleTable::SampleTable(DataSourceBase *source)
+ : mDataSource(source),
+ mChunkOffsetOffset(-1),
+ mChunkOffsetType(0),
+ mNumChunkOffsets(0),
+ mSampleToChunkOffset(-1),
+ mNumSampleToChunkOffsets(0),
+ mSampleSizeOffset(-1),
+ mSampleSizeFieldSize(0),
+ mDefaultSampleSize(0),
+ mNumSampleSizes(0),
+ mHasTimeToSample(false),
+ mTimeToSampleCount(0),
+ mTimeToSample(NULL),
+ mSampleTimeEntries(NULL),
+ mCompositionTimeDeltaEntries(NULL),
+ mNumCompositionTimeDeltaEntries(0),
+ mCompositionDeltaLookup(new CompositionDeltaLookup),
+ mSyncSampleOffset(-1),
+ mNumSyncSamples(0),
+ mSyncSamples(NULL),
+ mLastSyncSampleIndex(0),
+ mSampleToChunkEntries(NULL),
+ mTotalSize(0) {
+ mSampleIterator = new SampleIterator(this);
+}
+
+SampleTable::~SampleTable() {
+ delete[] mSampleToChunkEntries;
+ mSampleToChunkEntries = NULL;
+
+ delete[] mSyncSamples;
+ mSyncSamples = NULL;
+
+ delete[] mTimeToSample;
+ mTimeToSample = NULL;
+
+ delete mCompositionDeltaLookup;
+ mCompositionDeltaLookup = NULL;
+
+ delete[] mCompositionTimeDeltaEntries;
+ mCompositionTimeDeltaEntries = NULL;
+
+ delete[] mSampleTimeEntries;
+ mSampleTimeEntries = NULL;
+
+ delete mSampleIterator;
+ mSampleIterator = NULL;
+}
+
+bool SampleTable::isValid() const {
+ return mChunkOffsetOffset >= 0
+ && mSampleToChunkOffset >= 0
+ && mSampleSizeOffset >= 0
+ && mHasTimeToSample;
+}
+
+status_t SampleTable::setChunkOffsetParams(
+ uint32_t type, off64_t data_offset, size_t data_size) {
+ if (mChunkOffsetOffset >= 0) {
+ return ERROR_MALFORMED;
+ }
+
+ CHECK(type == kChunkOffsetType32 || type == kChunkOffsetType64);
+
+ mChunkOffsetOffset = data_offset;
+ mChunkOffsetType = type;
+
+ if (data_size < 8) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t header[8];
+ if (mDataSource->readAt(
+ data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) {
+ return ERROR_IO;
+ }
+
+ if (U32_AT(header) != 0) {
+ // Expected version = 0, flags = 0.
+ return ERROR_MALFORMED;
+ }
+
+ mNumChunkOffsets = U32_AT(&header[4]);
+
+ if (mChunkOffsetType == kChunkOffsetType32) {
+ if ((data_size - 8) / 4 < mNumChunkOffsets) {
+ return ERROR_MALFORMED;
+ }
+ } else {
+ if ((data_size - 8) / 8 < mNumChunkOffsets) {
+ return ERROR_MALFORMED;
+ }
+ }
+
+ return OK;
+}
+
+status_t SampleTable::setSampleToChunkParams(
+ off64_t data_offset, size_t data_size) {
+ if (mSampleToChunkOffset >= 0) {
+ // already set
+ return ERROR_MALFORMED;
+ }
+
+ if (data_offset < 0) {
+ return ERROR_MALFORMED;
+ }
+
+ mSampleToChunkOffset = data_offset;
+
+ if (data_size < 8) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t header[8];
+ if (mDataSource->readAt(
+ data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) {
+ return ERROR_IO;
+ }
+
+ if (U32_AT(header) != 0) {
+ // Expected version = 0, flags = 0.
+ return ERROR_MALFORMED;
+ }
+
+ mNumSampleToChunkOffsets = U32_AT(&header[4]);
+
+ if ((data_size - 8) / sizeof(SampleToChunkEntry) < mNumSampleToChunkOffsets) {
+ return ERROR_MALFORMED;
+ }
+
+ if ((uint64_t)kMaxTotalSize / sizeof(SampleToChunkEntry) <=
+ (uint64_t)mNumSampleToChunkOffsets) {
+ ALOGE("Sample-to-chunk table size too large.");
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ mTotalSize += (uint64_t)mNumSampleToChunkOffsets *
+ sizeof(SampleToChunkEntry);
+ if (mTotalSize > kMaxTotalSize) {
+ ALOGE("Sample-to-chunk table size would make sample table too large.\n"
+ " Requested sample-to-chunk table size = %llu\n"
+ " Eventual sample table size >= %llu\n"
+ " Allowed sample table size = %llu\n",
+ (unsigned long long)mNumSampleToChunkOffsets *
+ sizeof(SampleToChunkEntry),
+ (unsigned long long)mTotalSize,
+ (unsigned long long)kMaxTotalSize);
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ mSampleToChunkEntries =
+ new (std::nothrow) SampleToChunkEntry[mNumSampleToChunkOffsets];
+ if (!mSampleToChunkEntries) {
+ ALOGE("Cannot allocate sample-to-chunk table with %llu entries.",
+ (unsigned long long)mNumSampleToChunkOffsets);
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ if (mNumSampleToChunkOffsets == 0) {
+ return OK;
+ }
+
+ if ((off64_t)(kMaxOffset - 8 -
+ ((mNumSampleToChunkOffsets - 1) * sizeof(SampleToChunkEntry)))
+ < mSampleToChunkOffset) {
+ return ERROR_MALFORMED;
+ }
+
+ for (uint32_t i = 0; i < mNumSampleToChunkOffsets; ++i) {
+ uint8_t buffer[sizeof(SampleToChunkEntry)];
+
+ if (mDataSource->readAt(
+ mSampleToChunkOffset + 8 + i * sizeof(SampleToChunkEntry),
+ buffer,
+ sizeof(buffer))
+ != (ssize_t)sizeof(buffer)) {
+ return ERROR_IO;
+ }
+ // chunk index is 1 based in the spec.
+ if (U32_AT(buffer) < 1) {
+ ALOGE("b/23534160");
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ // We want the chunk index to be 0-based.
+ mSampleToChunkEntries[i].startChunk = U32_AT(buffer) - 1;
+ mSampleToChunkEntries[i].samplesPerChunk = U32_AT(&buffer[4]);
+ mSampleToChunkEntries[i].chunkDesc = U32_AT(&buffer[8]);
+ }
+
+ return OK;
+}
+
+status_t SampleTable::setSampleSizeParams(
+ uint32_t type, off64_t data_offset, size_t data_size) {
+ if (mSampleSizeOffset >= 0) {
+ return ERROR_MALFORMED;
+ }
+
+ CHECK(type == kSampleSizeType32 || type == kSampleSizeTypeCompact);
+
+ mSampleSizeOffset = data_offset;
+
+ if (data_size < 12) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t header[12];
+ if (mDataSource->readAt(
+ data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) {
+ return ERROR_IO;
+ }
+
+ if (U32_AT(header) != 0) {
+ // Expected version = 0, flags = 0.
+ return ERROR_MALFORMED;
+ }
+
+ mDefaultSampleSize = U32_AT(&header[4]);
+ mNumSampleSizes = U32_AT(&header[8]);
+ if (mNumSampleSizes > (UINT32_MAX - 12) / 16) {
+ ALOGE("b/23247055, mNumSampleSizes(%u)", mNumSampleSizes);
+ return ERROR_MALFORMED;
+ }
+
+ if (type == kSampleSizeType32) {
+ mSampleSizeFieldSize = 32;
+
+ if (mDefaultSampleSize != 0) {
+ return OK;
+ }
+
+ if (data_size < 12 + mNumSampleSizes * 4) {
+ return ERROR_MALFORMED;
+ }
+ } else {
+ if ((mDefaultSampleSize & 0xffffff00) != 0) {
+ // The high 24 bits are reserved and must be 0.
+ return ERROR_MALFORMED;
+ }
+
+ mSampleSizeFieldSize = mDefaultSampleSize & 0xff;
+ mDefaultSampleSize = 0;
+
+ if (mSampleSizeFieldSize != 4 && mSampleSizeFieldSize != 8
+ && mSampleSizeFieldSize != 16) {
+ return ERROR_MALFORMED;
+ }
+
+ if (data_size < 12 + (mNumSampleSizes * mSampleSizeFieldSize + 4) / 8) {
+ return ERROR_MALFORMED;
+ }
+ }
+
+ return OK;
+}
+
+status_t SampleTable::setTimeToSampleParams(
+ off64_t data_offset, size_t data_size) {
+ if (mHasTimeToSample || data_size < 8) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t header[8];
+ if (mDataSource->readAt(
+ data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) {
+ return ERROR_IO;
+ }
+
+ if (U32_AT(header) != 0) {
+ // Expected version = 0, flags = 0.
+ return ERROR_MALFORMED;
+ }
+
+ mTimeToSampleCount = U32_AT(&header[4]);
+ if (mTimeToSampleCount > UINT32_MAX / (2 * sizeof(uint32_t))) {
+ // Choose this bound because
+ // 1) 2 * sizeof(uint32_t) is the amount of memory needed for one
+ // time-to-sample entry in the time-to-sample table.
+ // 2) mTimeToSampleCount is the number of entries of the time-to-sample
+ // table.
+ // 3) We hope that the table size does not exceed UINT32_MAX.
+ ALOGE("Time-to-sample table size too large.");
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ // Note: At this point, we know that mTimeToSampleCount * 2 will not
+ // overflow because of the above condition.
+
+ uint64_t allocSize = (uint64_t)mTimeToSampleCount * 2 * sizeof(uint32_t);
+ mTotalSize += allocSize;
+ if (mTotalSize > kMaxTotalSize) {
+ ALOGE("Time-to-sample table size would make sample table too large.\n"
+ " Requested time-to-sample table size = %llu\n"
+ " Eventual sample table size >= %llu\n"
+ " Allowed sample table size = %llu\n",
+ (unsigned long long)allocSize,
+ (unsigned long long)mTotalSize,
+ (unsigned long long)kMaxTotalSize);
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ mTimeToSample = new (std::nothrow) uint32_t[mTimeToSampleCount * 2];
+ if (!mTimeToSample) {
+ ALOGE("Cannot allocate time-to-sample table with %llu entries.",
+ (unsigned long long)mTimeToSampleCount);
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ if (mDataSource->readAt(data_offset + 8, mTimeToSample,
+ (size_t)allocSize) < (ssize_t)allocSize) {
+ ALOGE("Incomplete data read for time-to-sample table.");
+ return ERROR_IO;
+ }
+
+ for (size_t i = 0; i < mTimeToSampleCount * 2; ++i) {
+ mTimeToSample[i] = ntohl(mTimeToSample[i]);
+ }
+
+ mHasTimeToSample = true;
+ return OK;
+}
+
+// NOTE: per 14996-12, version 0 ctts contains unsigned values, while version 1
+// contains signed values, however some software creates version 0 files that
+// contain signed values, so we're always treating the values as signed,
+// regardless of version.
+status_t SampleTable::setCompositionTimeToSampleParams(
+ off64_t data_offset, size_t data_size) {
+ ALOGI("There are reordered frames present.");
+
+ if (mCompositionTimeDeltaEntries != NULL || data_size < 8) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t header[8];
+ if (mDataSource->readAt(
+ data_offset, header, sizeof(header))
+ < (ssize_t)sizeof(header)) {
+ return ERROR_IO;
+ }
+
+ uint32_t flags = U32_AT(header);
+ uint32_t version = flags >> 24;
+ flags &= 0xffffff;
+
+ if ((version != 0 && version != 1) || flags != 0) {
+ // Expected version = 0 or 1, flags = 0.
+ return ERROR_MALFORMED;
+ }
+
+ size_t numEntries = U32_AT(&header[4]);
+
+ if (((SIZE_MAX / 8) - 1 < numEntries) || (data_size != (numEntries + 1) * 8)) {
+ return ERROR_MALFORMED;
+ }
+
+ mNumCompositionTimeDeltaEntries = numEntries;
+ uint64_t allocSize = (uint64_t)numEntries * 2 * sizeof(int32_t);
+ if (allocSize > kMaxTotalSize) {
+ ALOGE("Composition-time-to-sample table size too large.");
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ mTotalSize += allocSize;
+ if (mTotalSize > kMaxTotalSize) {
+ ALOGE("Composition-time-to-sample table would make sample table too large.\n"
+ " Requested composition-time-to-sample table size = %llu\n"
+ " Eventual sample table size >= %llu\n"
+ " Allowed sample table size = %llu\n",
+ (unsigned long long)allocSize,
+ (unsigned long long)mTotalSize,
+ (unsigned long long)kMaxTotalSize);
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ mCompositionTimeDeltaEntries = new (std::nothrow) int32_t[2 * numEntries];
+ if (!mCompositionTimeDeltaEntries) {
+ ALOGE("Cannot allocate composition-time-to-sample table with %llu "
+ "entries.", (unsigned long long)numEntries);
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ if (mDataSource->readAt(data_offset + 8, mCompositionTimeDeltaEntries,
+ (size_t)allocSize) < (ssize_t)allocSize) {
+ delete[] mCompositionTimeDeltaEntries;
+ mCompositionTimeDeltaEntries = NULL;
+
+ return ERROR_IO;
+ }
+
+ for (size_t i = 0; i < 2 * numEntries; ++i) {
+ mCompositionTimeDeltaEntries[i] = ntohl(mCompositionTimeDeltaEntries[i]);
+ }
+
+ mCompositionDeltaLookup->setEntries(
+ mCompositionTimeDeltaEntries, mNumCompositionTimeDeltaEntries);
+
+ return OK;
+}
+
+status_t SampleTable::setSyncSampleParams(off64_t data_offset, size_t data_size) {
+ if (mSyncSampleOffset >= 0 || data_size < 8) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t header[8];
+ if (mDataSource->readAt(
+ data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) {
+ return ERROR_IO;
+ }
+
+ if (U32_AT(header) != 0) {
+ // Expected version = 0, flags = 0.
+ return ERROR_MALFORMED;
+ }
+
+ uint32_t numSyncSamples = U32_AT(&header[4]);
+
+ if (numSyncSamples < 2) {
+ ALOGV("Table of sync samples is empty or has only a single entry!");
+ }
+
+ uint64_t allocSize = (uint64_t)numSyncSamples * sizeof(uint32_t);
+ if (allocSize > kMaxTotalSize) {
+ ALOGE("Sync sample table size too large.");
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ mTotalSize += allocSize;
+ if (mTotalSize > kMaxTotalSize) {
+ ALOGE("Sync sample table size would make sample table too large.\n"
+ " Requested sync sample table size = %llu\n"
+ " Eventual sample table size >= %llu\n"
+ " Allowed sample table size = %llu\n",
+ (unsigned long long)allocSize,
+ (unsigned long long)mTotalSize,
+ (unsigned long long)kMaxTotalSize);
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ mSyncSamples = new (std::nothrow) uint32_t[numSyncSamples];
+ if (!mSyncSamples) {
+ ALOGE("Cannot allocate sync sample table with %llu entries.",
+ (unsigned long long)numSyncSamples);
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ if (mDataSource->readAt(data_offset + 8, mSyncSamples,
+ (size_t)allocSize) != (ssize_t)allocSize) {
+ delete[] mSyncSamples;
+ mSyncSamples = NULL;
+ return ERROR_IO;
+ }
+
+ for (size_t i = 0; i < numSyncSamples; ++i) {
+ if (mSyncSamples[i] == 0) {
+ ALOGE("b/32423862, unexpected zero value in stss");
+ continue;
+ }
+ mSyncSamples[i] = ntohl(mSyncSamples[i]) - 1;
+ }
+
+ mSyncSampleOffset = data_offset;
+ mNumSyncSamples = numSyncSamples;
+
+ return OK;
+}
+
+uint32_t SampleTable::countChunkOffsets() const {
+ return mNumChunkOffsets;
+}
+
+uint32_t SampleTable::countSamples() const {
+ return mNumSampleSizes;
+}
+
+status_t SampleTable::getMaxSampleSize(size_t *max_size) {
+ Mutex::Autolock autoLock(mLock);
+
+ *max_size = 0;
+
+ for (uint32_t i = 0; i < mNumSampleSizes; ++i) {
+ size_t sample_size;
+ status_t err = getSampleSize_l(i, &sample_size);
+
+ if (err != OK) {
+ return err;
+ }
+
+ if (sample_size > *max_size) {
+ *max_size = sample_size;
+ }
+ }
+
+ return OK;
+}
+
+uint32_t abs_difference(uint32_t time1, uint32_t time2) {
+ return time1 > time2 ? time1 - time2 : time2 - time1;
+}
+
+// static
+int SampleTable::CompareIncreasingTime(const void *_a, const void *_b) {
+ const SampleTimeEntry *a = (const SampleTimeEntry *)_a;
+ const SampleTimeEntry *b = (const SampleTimeEntry *)_b;
+
+ if (a->mCompositionTime < b->mCompositionTime) {
+ return -1;
+ } else if (a->mCompositionTime > b->mCompositionTime) {
+ return 1;
+ }
+
+ return 0;
+}
+
+void SampleTable::buildSampleEntriesTable() {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mSampleTimeEntries != NULL || mNumSampleSizes == 0) {
+ if (mNumSampleSizes == 0) {
+ ALOGE("b/23247055, mNumSampleSizes(%u)", mNumSampleSizes);
+ }
+ return;
+ }
+
+ mTotalSize += (uint64_t)mNumSampleSizes * sizeof(SampleTimeEntry);
+ if (mTotalSize > kMaxTotalSize) {
+ ALOGE("Sample entry table size would make sample table too large.\n"
+ " Requested sample entry table size = %llu\n"
+ " Eventual sample table size >= %llu\n"
+ " Allowed sample table size = %llu\n",
+ (unsigned long long)mNumSampleSizes * sizeof(SampleTimeEntry),
+ (unsigned long long)mTotalSize,
+ (unsigned long long)kMaxTotalSize);
+ return;
+ }
+
+ mSampleTimeEntries = new (std::nothrow) SampleTimeEntry[mNumSampleSizes];
+ if (!mSampleTimeEntries) {
+ ALOGE("Cannot allocate sample entry table with %llu entries.",
+ (unsigned long long)mNumSampleSizes);
+ return;
+ }
+
+ uint32_t sampleIndex = 0;
+ uint32_t sampleTime = 0;
+
+ for (uint32_t i = 0; i < mTimeToSampleCount; ++i) {
+ uint32_t n = mTimeToSample[2 * i];
+ uint32_t delta = mTimeToSample[2 * i + 1];
+
+ for (uint32_t j = 0; j < n; ++j) {
+ if (sampleIndex < mNumSampleSizes) {
+ // Technically this should always be the case if the file
+ // is well-formed, but you know... there's (gasp) malformed
+ // content out there.
+
+ mSampleTimeEntries[sampleIndex].mSampleIndex = sampleIndex;
+
+ int32_t compTimeDelta =
+ mCompositionDeltaLookup->getCompositionTimeOffset(
+ sampleIndex);
+
+ if ((compTimeDelta < 0 && sampleTime <
+ (compTimeDelta == INT32_MIN ?
+ INT32_MAX : uint32_t(-compTimeDelta)))
+ || (compTimeDelta > 0 &&
+ sampleTime > UINT32_MAX - compTimeDelta)) {
+ ALOGE("%u + %d would overflow, clamping",
+ sampleTime, compTimeDelta);
+ if (compTimeDelta < 0) {
+ sampleTime = 0;
+ } else {
+ sampleTime = UINT32_MAX;
+ }
+ compTimeDelta = 0;
+ }
+
+ mSampleTimeEntries[sampleIndex].mCompositionTime =
+ compTimeDelta > 0 ? sampleTime + compTimeDelta:
+ sampleTime - (-compTimeDelta);
+ }
+
+ ++sampleIndex;
+ if (sampleTime > UINT32_MAX - delta) {
+ ALOGE("%u + %u would overflow, clamping",
+ sampleTime, delta);
+ sampleTime = UINT32_MAX;
+ } else {
+ sampleTime += delta;
+ }
+ }
+ }
+
+ qsort(mSampleTimeEntries, mNumSampleSizes, sizeof(SampleTimeEntry),
+ CompareIncreasingTime);
+}
+
+status_t SampleTable::findSampleAtTime(
+ uint64_t req_time, uint64_t scale_num, uint64_t scale_den,
+ uint32_t *sample_index, uint32_t flags) {
+ buildSampleEntriesTable();
+
+ if (mSampleTimeEntries == NULL) {
+ return ERROR_OUT_OF_RANGE;
+ }
+
+ if (flags == kFlagFrameIndex) {
+ if (req_time >= mNumSampleSizes) {
+ return ERROR_OUT_OF_RANGE;
+ }
+ *sample_index = mSampleTimeEntries[req_time].mSampleIndex;
+ return OK;
+ }
+
+ uint32_t left = 0;
+ uint32_t right_plus_one = mNumSampleSizes;
+ while (left < right_plus_one) {
+ uint32_t center = left + (right_plus_one - left) / 2;
+ uint64_t centerTime =
+ getSampleTime(center, scale_num, scale_den);
+
+ if (req_time < centerTime) {
+ right_plus_one = center;
+ } else if (req_time > centerTime) {
+ left = center + 1;
+ } else {
+ *sample_index = mSampleTimeEntries[center].mSampleIndex;
+ return OK;
+ }
+ }
+
+ uint32_t closestIndex = left;
+
+ if (closestIndex == mNumSampleSizes) {
+ if (flags == kFlagAfter) {
+ return ERROR_OUT_OF_RANGE;
+ }
+ flags = kFlagBefore;
+ } else if (closestIndex == 0) {
+ if (flags == kFlagBefore) {
+ // normally we should return out of range, but that is
+ // treated as end-of-stream. instead return first sample
+ //
+ // return ERROR_OUT_OF_RANGE;
+ }
+ flags = kFlagAfter;
+ }
+
+ switch (flags) {
+ case kFlagBefore:
+ {
+ --closestIndex;
+ break;
+ }
+
+ case kFlagAfter:
+ {
+ // nothing to do
+ break;
+ }
+
+ default:
+ {
+ CHECK(flags == kFlagClosest);
+ // pick closest based on timestamp. use abs_difference for safety
+ if (abs_difference(
+ getSampleTime(closestIndex, scale_num, scale_den), req_time) >
+ abs_difference(
+ req_time, getSampleTime(closestIndex - 1, scale_num, scale_den))) {
+ --closestIndex;
+ }
+ break;
+ }
+ }
+
+ *sample_index = mSampleTimeEntries[closestIndex].mSampleIndex;
+ return OK;
+}
+
+status_t SampleTable::findSyncSampleNear(
+ uint32_t start_sample_index, uint32_t *sample_index, uint32_t flags) {
+ Mutex::Autolock autoLock(mLock);
+
+ *sample_index = 0;
+
+ if (mSyncSampleOffset < 0) {
+ // All samples are sync-samples.
+ *sample_index = start_sample_index;
+ return OK;
+ }
+
+ if (mNumSyncSamples == 0) {
+ *sample_index = 0;
+ return OK;
+ }
+
+ uint32_t left = 0;
+ uint32_t right_plus_one = mNumSyncSamples;
+ while (left < right_plus_one) {
+ uint32_t center = left + (right_plus_one - left) / 2;
+ uint32_t x = mSyncSamples[center];
+
+ if (start_sample_index < x) {
+ right_plus_one = center;
+ } else if (start_sample_index > x) {
+ left = center + 1;
+ } else {
+ *sample_index = x;
+ return OK;
+ }
+ }
+
+ if (left == mNumSyncSamples) {
+ if (flags == kFlagAfter) {
+ ALOGE("tried to find a sync frame after the last one: %d", left);
+ return ERROR_OUT_OF_RANGE;
+ }
+ flags = kFlagBefore;
+ }
+ else if (left == 0) {
+ if (flags == kFlagBefore) {
+ ALOGE("tried to find a sync frame before the first one: %d", left);
+
+ // normally we should return out of range, but that is
+ // treated as end-of-stream. instead seek to first sync
+ //
+ // return ERROR_OUT_OF_RANGE;
+ }
+ flags = kFlagAfter;
+ }
+
+ // Now ssi[left - 1] <(=) start_sample_index <= ssi[left]
+ switch (flags) {
+ case kFlagBefore:
+ {
+ --left;
+ break;
+ }
+ case kFlagAfter:
+ {
+ // nothing to do
+ break;
+ }
+ default:
+ {
+ // this route is not used, but implement it nonetheless
+ CHECK(flags == kFlagClosest);
+
+ status_t err = mSampleIterator->seekTo(start_sample_index);
+ if (err != OK) {
+ return err;
+ }
+ uint32_t sample_time = mSampleIterator->getSampleTime();
+
+ err = mSampleIterator->seekTo(mSyncSamples[left]);
+ if (err != OK) {
+ return err;
+ }
+ uint32_t upper_time = mSampleIterator->getSampleTime();
+
+ err = mSampleIterator->seekTo(mSyncSamples[left - 1]);
+ if (err != OK) {
+ return err;
+ }
+ uint32_t lower_time = mSampleIterator->getSampleTime();
+
+ // use abs_difference for safety
+ if (abs_difference(upper_time, sample_time) >
+ abs_difference(sample_time, lower_time)) {
+ --left;
+ }
+ break;
+ }
+ }
+
+ *sample_index = mSyncSamples[left];
+ return OK;
+}
+
+status_t SampleTable::findThumbnailSample(uint32_t *sample_index) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mSyncSampleOffset < 0) {
+ // All samples are sync-samples.
+ *sample_index = 0;
+ return OK;
+ }
+
+ uint32_t bestSampleIndex = 0;
+ size_t maxSampleSize = 0;
+
+ static const size_t kMaxNumSyncSamplesToScan = 20;
+
+ // Consider the first kMaxNumSyncSamplesToScan sync samples and
+ // pick the one with the largest (compressed) size as the thumbnail.
+
+ size_t numSamplesToScan = mNumSyncSamples;
+ if (numSamplesToScan > kMaxNumSyncSamplesToScan) {
+ numSamplesToScan = kMaxNumSyncSamplesToScan;
+ }
+
+ for (size_t i = 0; i < numSamplesToScan; ++i) {
+ uint32_t x = mSyncSamples[i];
+
+ // Now x is a sample index.
+ size_t sampleSize;
+ status_t err = getSampleSize_l(x, &sampleSize);
+ if (err != OK) {
+ return err;
+ }
+
+ if (i == 0 || sampleSize > maxSampleSize) {
+ bestSampleIndex = x;
+ maxSampleSize = sampleSize;
+ }
+ }
+
+ *sample_index = bestSampleIndex;
+
+ return OK;
+}
+
+status_t SampleTable::getSampleSize_l(
+ uint32_t sampleIndex, size_t *sampleSize) {
+ return mSampleIterator->getSampleSizeDirect(
+ sampleIndex, sampleSize);
+}
+
+status_t SampleTable::getMetaDataForSample(
+ uint32_t sampleIndex,
+ off64_t *offset,
+ size_t *size,
+ uint32_t *compositionTime,
+ bool *isSyncSample,
+ uint32_t *sampleDuration) {
+ Mutex::Autolock autoLock(mLock);
+
+ status_t err;
+ if ((err = mSampleIterator->seekTo(sampleIndex)) != OK) {
+ return err;
+ }
+
+ if (offset) {
+ *offset = mSampleIterator->getSampleOffset();
+ }
+
+ if (size) {
+ *size = mSampleIterator->getSampleSize();
+ }
+
+ if (compositionTime) {
+ *compositionTime = mSampleIterator->getSampleTime();
+ }
+
+ if (isSyncSample) {
+ *isSyncSample = false;
+ if (mSyncSampleOffset < 0) {
+ // Every sample is a sync sample.
+ *isSyncSample = true;
+ } else {
+ size_t i = (mLastSyncSampleIndex < mNumSyncSamples)
+ && (mSyncSamples[mLastSyncSampleIndex] <= sampleIndex)
+ ? mLastSyncSampleIndex : 0;
+
+ while (i < mNumSyncSamples && mSyncSamples[i] < sampleIndex) {
+ ++i;
+ }
+
+ if (i < mNumSyncSamples && mSyncSamples[i] == sampleIndex) {
+ *isSyncSample = true;
+ }
+
+ mLastSyncSampleIndex = i;
+ }
+ }
+
+ if (sampleDuration) {
+ *sampleDuration = mSampleIterator->getSampleDuration();
+ }
+
+ return OK;
+}
+
+int32_t SampleTable::getCompositionTimeOffset(uint32_t sampleIndex) {
+ return mCompositionDeltaLookup->getCompositionTimeOffset(sampleIndex);
+}
+
+} // namespace android
diff --git a/media/extractors/mp4/SampleTable.h b/media/extractors/mp4/SampleTable.h
new file mode 100644
index 0000000..e4e974b
--- /dev/null
+++ b/media/extractors/mp4/SampleTable.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SAMPLE_TABLE_H_
+
+#define SAMPLE_TABLE_H_
+
+#include <sys/types.h>
+#include <stdint.h>
+
+#include <media/stagefright/MediaErrors.h>
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+
+namespace android {
+
+class DataSourceBase;
+struct SampleIterator;
+
+class SampleTable : public RefBase {
+public:
+ explicit SampleTable(DataSourceBase *source);
+
+ bool isValid() const;
+
+ // type can be 'stco' or 'co64'.
+ status_t setChunkOffsetParams(
+ uint32_t type, off64_t data_offset, size_t data_size);
+
+ status_t setSampleToChunkParams(off64_t data_offset, size_t data_size);
+
+ // type can be 'stsz' or 'stz2'.
+ status_t setSampleSizeParams(
+ uint32_t type, off64_t data_offset, size_t data_size);
+
+ status_t setTimeToSampleParams(off64_t data_offset, size_t data_size);
+
+ status_t setCompositionTimeToSampleParams(
+ off64_t data_offset, size_t data_size);
+
+ status_t setSyncSampleParams(off64_t data_offset, size_t data_size);
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ uint32_t countChunkOffsets() const;
+
+ uint32_t countSamples() const;
+
+ status_t getMaxSampleSize(size_t *size);
+
+ status_t getMetaDataForSample(
+ uint32_t sampleIndex,
+ off64_t *offset,
+ size_t *size,
+ uint32_t *compositionTime,
+ bool *isSyncSample = NULL,
+ uint32_t *sampleDuration = NULL);
+
+ enum {
+ kFlagBefore,
+ kFlagAfter,
+ kFlagClosest,
+ kFlagFrameIndex,
+ };
+ status_t findSampleAtTime(
+ uint64_t req_time, uint64_t scale_num, uint64_t scale_den,
+ uint32_t *sample_index, uint32_t flags);
+
+ status_t findSyncSampleNear(
+ uint32_t start_sample_index, uint32_t *sample_index,
+ uint32_t flags);
+
+ status_t findThumbnailSample(uint32_t *sample_index);
+
+protected:
+ ~SampleTable();
+
+private:
+ struct CompositionDeltaLookup;
+
+ static const uint32_t kChunkOffsetType32;
+ static const uint32_t kChunkOffsetType64;
+ static const uint32_t kSampleSizeType32;
+ static const uint32_t kSampleSizeTypeCompact;
+
+ // Limit the total size of all internal tables to 200MiB.
+ static const size_t kMaxTotalSize = 200 * (1 << 20);
+
+ DataSourceBase *mDataSource;
+ Mutex mLock;
+
+ off64_t mChunkOffsetOffset;
+ uint32_t mChunkOffsetType;
+ uint32_t mNumChunkOffsets;
+
+ off64_t mSampleToChunkOffset;
+ uint32_t mNumSampleToChunkOffsets;
+
+ off64_t mSampleSizeOffset;
+ uint32_t mSampleSizeFieldSize;
+ uint32_t mDefaultSampleSize;
+ uint32_t mNumSampleSizes;
+
+ bool mHasTimeToSample;
+ uint32_t mTimeToSampleCount;
+ uint32_t* mTimeToSample;
+
+ struct SampleTimeEntry {
+ uint32_t mSampleIndex;
+ uint32_t mCompositionTime;
+ };
+ SampleTimeEntry *mSampleTimeEntries;
+
+ int32_t *mCompositionTimeDeltaEntries;
+ size_t mNumCompositionTimeDeltaEntries;
+ CompositionDeltaLookup *mCompositionDeltaLookup;
+
+ off64_t mSyncSampleOffset;
+ uint32_t mNumSyncSamples;
+ uint32_t *mSyncSamples;
+ size_t mLastSyncSampleIndex;
+
+ SampleIterator *mSampleIterator;
+
+ struct SampleToChunkEntry {
+ uint32_t startChunk;
+ uint32_t samplesPerChunk;
+ uint32_t chunkDesc;
+ };
+ SampleToChunkEntry *mSampleToChunkEntries;
+
+ // Approximate size of all tables combined.
+ uint64_t mTotalSize;
+
+ friend struct SampleIterator;
+
+ // normally we don't round
+ inline uint64_t getSampleTime(
+ size_t sample_index, uint64_t scale_num, uint64_t scale_den) const {
+ return (sample_index < (size_t)mNumSampleSizes && mSampleTimeEntries != NULL
+ && scale_den != 0)
+ ? (mSampleTimeEntries[sample_index].mCompositionTime * scale_num) / scale_den : 0;
+ }
+
+ status_t getSampleSize_l(uint32_t sample_index, size_t *sample_size);
+ int32_t getCompositionTimeOffset(uint32_t sampleIndex);
+
+ static int CompareIncreasingTime(const void *, const void *);
+
+ void buildSampleEntriesTable();
+
+ SampleTable(const SampleTable &);
+ SampleTable &operator=(const SampleTable &);
+};
+
+} // namespace android
+
+#endif // SAMPLE_TABLE_H_
diff --git a/media/extractors/mp4/exports.lds b/media/extractors/mp4/exports.lds
new file mode 100644
index 0000000..b1309ee
--- /dev/null
+++ b/media/extractors/mp4/exports.lds
@@ -0,0 +1 @@
+{ global: GETEXTRACTORDEF; local: *; };
diff --git a/media/extractors/mpeg2/Android.bp b/media/extractors/mpeg2/Android.bp
new file mode 100644
index 0000000..5e4a592
--- /dev/null
+++ b/media/extractors/mpeg2/Android.bp
@@ -0,0 +1,56 @@
+cc_library_shared {
+
+ srcs: [
+ "ExtractorBundle.cpp",
+ "MPEG2PSExtractor.cpp",
+ "MPEG2TSExtractor.cpp",
+ ],
+
+ include_dirs: [
+ "frameworks/av/media/libstagefright",
+ "frameworks/av/media/libstagefright/include",
+ ],
+
+ shared_libs: [
+ "android.hardware.cas@1.0",
+ "android.hardware.cas.native@1.0",
+ "android.hidl.token@1.0-utils",
+ "libbinder",
+ "libcrypto",
+ "libcutils",
+ "libhidlallocatorutils",
+ "libhidlbase",
+ "liblog",
+ "libmediaextractor",
+ "libstagefright_foundation",
+ ],
+
+ static_libs: [
+ "libstagefright_mpeg2support",
+ "libutils",
+ ],
+
+ name: "libmpeg2extractor",
+ relative_install_path: "extractors",
+
+ compile_multilib: "first",
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-fvisibility=hidden",
+ ],
+ version_script: "exports.lds",
+
+ sanitize: {
+ cfi: true,
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ diag: {
+ cfi: true,
+ },
+ },
+
+}
diff --git a/media/extractors/mpeg2/ExtractorBundle.cpp b/media/extractors/mpeg2/ExtractorBundle.cpp
new file mode 100644
index 0000000..8a0fa03
--- /dev/null
+++ b/media/extractors/mpeg2/ExtractorBundle.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MPEG2ExtractorBundle"
+#include <utils/Log.h>
+
+#include <media/MediaExtractor.h>
+#include "MPEG2PSExtractor.h"
+#include "MPEG2TSExtractor.h"
+
+namespace android {
+
+extern "C" {
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
+MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ return {
+ MediaExtractor::EXTRACTORDEF_VERSION,
+ UUID("3d1dcfeb-e40a-436d-a574-c2438a555e5f"),
+ 1,
+ "MPEG2-PS/TS Extractor",
+ [](
+ DataSourceBase *source,
+ float *confidence,
+ void **,
+ MediaExtractor::FreeMetaFunc *) -> MediaExtractor::CreatorFunc {
+ if (SniffMPEG2TS(source, confidence)) {
+ return [](
+ DataSourceBase *source,
+ void *) -> MediaExtractor* {
+ return new MPEG2TSExtractor(source);};
+ } else if (SniffMPEG2PS(source, confidence)) {
+ return [](
+ DataSourceBase *source,
+ void *) -> MediaExtractor* {
+ return new MPEG2PSExtractor(source);};
+ }
+ return NULL;
+ }
+ };
+}
+
+} // extern "C"
+
+} // namespace android
diff --git a/media/libstagefright/matroska/MODULE_LICENSE_APACHE2 b/media/extractors/mpeg2/MODULE_LICENSE_APACHE2
similarity index 100%
copy from media/libstagefright/matroska/MODULE_LICENSE_APACHE2
copy to media/extractors/mpeg2/MODULE_LICENSE_APACHE2
diff --git a/media/extractors/mpeg2/MPEG2PSExtractor.cpp b/media/extractors/mpeg2/MPEG2PSExtractor.cpp
new file mode 100644
index 0000000..6980b82
--- /dev/null
+++ b/media/extractors/mpeg2/MPEG2PSExtractor.cpp
@@ -0,0 +1,772 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MPEG2PSExtractor"
+#include <utils/Log.h>
+
+#include "MPEG2PSExtractor.h"
+
+#include "mpeg2ts/AnotherPacketSource.h"
+#include "mpeg2ts/ESQueue.h"
+
+#include <media/DataSourceBase.h>
+#include <media/MediaTrack.h>
+#include <media/stagefright/foundation/ABitReader.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <utils/String8.h>
+
+#include <inttypes.h>
+
+namespace android {
+
+struct MPEG2PSExtractor::Track : public MediaTrack, public RefBase {
+ Track(MPEG2PSExtractor *extractor,
+ unsigned stream_id, unsigned stream_type);
+
+ virtual status_t start(MetaDataBase *params);
+ virtual status_t stop();
+ virtual status_t getFormat(MetaDataBase &);
+
+ virtual status_t read(
+ MediaBufferBase **buffer, const ReadOptions *options);
+
+protected:
+ virtual ~Track();
+
+private:
+ friend struct MPEG2PSExtractor;
+
+ MPEG2PSExtractor *mExtractor;
+
+ unsigned mStreamID;
+ unsigned mStreamType;
+ ElementaryStreamQueue *mQueue;
+ sp<AnotherPacketSource> mSource;
+
+ status_t appendPESData(
+ unsigned PTS_DTS_flags,
+ uint64_t PTS, uint64_t DTS,
+ const uint8_t *data, size_t size);
+
+ DISALLOW_EVIL_CONSTRUCTORS(Track);
+};
+
+struct MPEG2PSExtractor::WrappedTrack : public MediaTrack {
+ WrappedTrack(MPEG2PSExtractor *extractor, const sp<Track> &track);
+
+ virtual status_t start(MetaDataBase *params);
+ virtual status_t stop();
+ virtual status_t getFormat(MetaDataBase &);
+
+ virtual status_t read(
+ MediaBufferBase **buffer, const ReadOptions *options);
+
+protected:
+ virtual ~WrappedTrack();
+
+private:
+ MPEG2PSExtractor *mExtractor;
+ sp<MPEG2PSExtractor::Track> mTrack;
+
+ DISALLOW_EVIL_CONSTRUCTORS(WrappedTrack);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+MPEG2PSExtractor::MPEG2PSExtractor(DataSourceBase *source)
+ : mDataSource(source),
+ mOffset(0),
+ mFinalResult(OK),
+ mBuffer(new ABuffer(0)),
+ mScanning(true),
+ mProgramStreamMapValid(false) {
+ for (size_t i = 0; i < 500; ++i) {
+ if (feedMore() != OK) {
+ break;
+ }
+ }
+
+ // Remove all tracks that were unable to determine their format.
+ MetaDataBase meta;
+ for (size_t i = mTracks.size(); i > 0;) {
+ i--;
+ if (mTracks.valueAt(i)->getFormat(meta) != OK) {
+ mTracks.removeItemsAt(i);
+ }
+ }
+
+ mScanning = false;
+}
+
+MPEG2PSExtractor::~MPEG2PSExtractor() {
+}
+
+size_t MPEG2PSExtractor::countTracks() {
+ return mTracks.size();
+}
+
+MediaTrack *MPEG2PSExtractor::getTrack(size_t index) {
+ if (index >= mTracks.size()) {
+ return NULL;
+ }
+
+ return new WrappedTrack(this, mTracks.valueAt(index));
+}
+
+status_t MPEG2PSExtractor::getTrackMetaData(
+ MetaDataBase &meta,
+ size_t index, uint32_t /* flags */) {
+ if (index >= mTracks.size()) {
+ return UNKNOWN_ERROR;
+ }
+
+ return mTracks.valueAt(index)->getFormat(meta);
+}
+
+status_t MPEG2PSExtractor::getMetaData(MetaDataBase &meta) {
+ meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG2PS);
+
+ return OK;
+}
+
+uint32_t MPEG2PSExtractor::flags() const {
+ return CAN_PAUSE;
+}
+
+status_t MPEG2PSExtractor::feedMore() {
+ Mutex::Autolock autoLock(mLock);
+
+ // How much data we're reading at a time
+ static const size_t kChunkSize = 8192;
+
+ for (;;) {
+ status_t err = dequeueChunk();
+
+ if (err == -EAGAIN && mFinalResult == OK) {
+ memmove(mBuffer->base(), mBuffer->data(), mBuffer->size());
+ mBuffer->setRange(0, mBuffer->size());
+
+ if (mBuffer->size() + kChunkSize > mBuffer->capacity()) {
+ size_t newCapacity = mBuffer->capacity() + kChunkSize;
+ sp<ABuffer> newBuffer = new ABuffer(newCapacity);
+ memcpy(newBuffer->data(), mBuffer->data(), mBuffer->size());
+ newBuffer->setRange(0, mBuffer->size());
+ mBuffer = newBuffer;
+ }
+
+ ssize_t n = mDataSource->readAt(
+ mOffset, mBuffer->data() + mBuffer->size(), kChunkSize);
+
+ if (n < (ssize_t)kChunkSize) {
+ mFinalResult = (n < 0) ? (status_t)n : ERROR_END_OF_STREAM;
+ return mFinalResult;
+ }
+
+ mBuffer->setRange(mBuffer->offset(), mBuffer->size() + n);
+ mOffset += n;
+ } else if (err != OK) {
+ mFinalResult = err;
+ return err;
+ } else {
+ return OK;
+ }
+ }
+}
+
+status_t MPEG2PSExtractor::dequeueChunk() {
+ if (mBuffer->size() < 4) {
+ return -EAGAIN;
+ }
+
+ if (memcmp("\x00\x00\x01", mBuffer->data(), 3)) {
+ return ERROR_MALFORMED;
+ }
+
+ unsigned chunkType = mBuffer->data()[3];
+
+ ssize_t res;
+
+ switch (chunkType) {
+ case 0xba:
+ {
+ res = dequeuePack();
+ break;
+ }
+
+ case 0xbb:
+ {
+ res = dequeueSystemHeader();
+ break;
+ }
+
+ default:
+ {
+ res = dequeuePES();
+ break;
+ }
+ }
+
+ if (res > 0) {
+ if (mBuffer->size() < (size_t)res) {
+ return -EAGAIN;
+ }
+
+ mBuffer->setRange(mBuffer->offset() + res, mBuffer->size() - res);
+ res = OK;
+ }
+
+ return res;
+}
+
+ssize_t MPEG2PSExtractor::dequeuePack() {
+ // 32 + 2 + 3 + 1 + 15 + 1 + 15+ 1 + 9 + 1 + 22 + 1 + 1 | +5
+
+ if (mBuffer->size() < 14) {
+ return -EAGAIN;
+ }
+
+ unsigned pack_stuffing_length = mBuffer->data()[13] & 7;
+
+ return pack_stuffing_length + 14;
+}
+
+ssize_t MPEG2PSExtractor::dequeueSystemHeader() {
+ if (mBuffer->size() < 6) {
+ return -EAGAIN;
+ }
+
+ unsigned header_length = U16_AT(mBuffer->data() + 4);
+
+ return header_length + 6;
+}
+
+ssize_t MPEG2PSExtractor::dequeuePES() {
+ if (mBuffer->size() < 6) {
+ return -EAGAIN;
+ }
+
+ unsigned PES_packet_length = U16_AT(mBuffer->data() + 4);
+ if (PES_packet_length == 0u) {
+ ALOGE("PES_packet_length is 0");
+ return -EAGAIN;
+ }
+
+ size_t n = PES_packet_length + 6;
+
+ if (mBuffer->size() < n) {
+ return -EAGAIN;
+ }
+
+ ABitReader br(mBuffer->data(), n);
+
+ unsigned packet_startcode_prefix = br.getBits(24);
+
+ ALOGV("packet_startcode_prefix = 0x%08x", packet_startcode_prefix);
+
+ if (packet_startcode_prefix != 1) {
+ ALOGV("Supposedly payload_unit_start=1 unit does not start "
+ "with startcode.");
+
+ return ERROR_MALFORMED;
+ }
+
+ if (packet_startcode_prefix != 0x000001u) {
+ ALOGE("Wrong PES prefix");
+ return ERROR_MALFORMED;
+ }
+
+ unsigned stream_id = br.getBits(8);
+ ALOGV("stream_id = 0x%02x", stream_id);
+
+ /* unsigned PES_packet_length = */br.getBits(16);
+
+ if (stream_id == 0xbc) {
+ // program_stream_map
+
+ if (!mScanning) {
+ return n;
+ }
+
+ mStreamTypeByESID.clear();
+
+ /* unsigned current_next_indicator = */br.getBits(1);
+ /* unsigned reserved = */br.getBits(2);
+ /* unsigned program_stream_map_version = */br.getBits(5);
+ /* unsigned reserved = */br.getBits(7);
+ /* unsigned marker_bit = */br.getBits(1);
+ unsigned program_stream_info_length = br.getBits(16);
+
+ size_t offset = 0;
+ while (offset < program_stream_info_length) {
+ if (offset + 2 > program_stream_info_length) {
+ return ERROR_MALFORMED;
+ }
+
+ unsigned descriptor_tag = br.getBits(8);
+ unsigned descriptor_length = br.getBits(8);
+
+ ALOGI("found descriptor tag 0x%02x of length %u",
+ descriptor_tag, descriptor_length);
+
+ if (offset + 2 + descriptor_length > program_stream_info_length) {
+ return ERROR_MALFORMED;
+ }
+
+ br.skipBits(8 * descriptor_length);
+
+ offset += 2 + descriptor_length;
+ }
+
+ unsigned elementary_stream_map_length = br.getBits(16);
+
+ offset = 0;
+ while (offset < elementary_stream_map_length) {
+ if (offset + 4 > elementary_stream_map_length) {
+ return ERROR_MALFORMED;
+ }
+
+ unsigned stream_type = br.getBits(8);
+ unsigned elementary_stream_id = br.getBits(8);
+
+ ALOGI("elementary stream id 0x%02x has stream type 0x%02x",
+ elementary_stream_id, stream_type);
+
+ mStreamTypeByESID.add(elementary_stream_id, stream_type);
+
+ unsigned elementary_stream_info_length = br.getBits(16);
+
+ if (offset + 4 + elementary_stream_info_length
+ > elementary_stream_map_length) {
+ return ERROR_MALFORMED;
+ }
+
+ offset += 4 + elementary_stream_info_length;
+ }
+
+ /* unsigned CRC32 = */br.getBits(32);
+
+ mProgramStreamMapValid = true;
+ } else if (stream_id != 0xbe // padding_stream
+ && stream_id != 0xbf // private_stream_2
+ && stream_id != 0xf0 // ECM
+ && stream_id != 0xf1 // EMM
+ && stream_id != 0xff // program_stream_directory
+ && stream_id != 0xf2 // DSMCC
+ && stream_id != 0xf8) { // H.222.1 type E
+ /* unsigned PES_marker_bits = */br.getBits(2); // should be 0x2(hex)
+ /* unsigned PES_scrambling_control = */br.getBits(2);
+ /* unsigned PES_priority = */br.getBits(1);
+ /* unsigned data_alignment_indicator = */br.getBits(1);
+ /* unsigned copyright = */br.getBits(1);
+ /* unsigned original_or_copy = */br.getBits(1);
+
+ unsigned PTS_DTS_flags = br.getBits(2);
+ ALOGV("PTS_DTS_flags = %u", PTS_DTS_flags);
+
+ unsigned ESCR_flag = br.getBits(1);
+ ALOGV("ESCR_flag = %u", ESCR_flag);
+
+ unsigned ES_rate_flag = br.getBits(1);
+ ALOGV("ES_rate_flag = %u", ES_rate_flag);
+
+ unsigned DSM_trick_mode_flag = br.getBits(1);
+ ALOGV("DSM_trick_mode_flag = %u", DSM_trick_mode_flag);
+
+ unsigned additional_copy_info_flag = br.getBits(1);
+ ALOGV("additional_copy_info_flag = %u", additional_copy_info_flag);
+
+ /* unsigned PES_CRC_flag = */br.getBits(1);
+ /* PES_extension_flag = */br.getBits(1);
+
+ unsigned PES_header_data_length = br.getBits(8);
+ ALOGV("PES_header_data_length = %u", PES_header_data_length);
+
+ unsigned optional_bytes_remaining = PES_header_data_length;
+
+ uint64_t PTS = 0, DTS = 0;
+
+ if (PTS_DTS_flags == 2 || PTS_DTS_flags == 3) {
+ if (optional_bytes_remaining < 5u) {
+ return ERROR_MALFORMED;
+ }
+
+ if (br.getBits(4) != PTS_DTS_flags) {
+ return ERROR_MALFORMED;
+ }
+
+ PTS = ((uint64_t)br.getBits(3)) << 30;
+ if (br.getBits(1) != 1u) {
+ return ERROR_MALFORMED;
+ }
+ PTS |= ((uint64_t)br.getBits(15)) << 15;
+ if (br.getBits(1) != 1u) {
+ return ERROR_MALFORMED;
+ }
+ PTS |= br.getBits(15);
+ if (br.getBits(1) != 1u) {
+ return ERROR_MALFORMED;
+ }
+
+ ALOGV("PTS = %" PRIu64, PTS);
+ // ALOGI("PTS = %.2f secs", PTS / 90000.0f);
+
+ optional_bytes_remaining -= 5;
+
+ if (PTS_DTS_flags == 3) {
+ if (optional_bytes_remaining < 5u) {
+ return ERROR_MALFORMED;
+ }
+
+ if (br.getBits(4) != 1u) {
+ return ERROR_MALFORMED;
+ }
+
+ DTS = ((uint64_t)br.getBits(3)) << 30;
+ if (br.getBits(1) != 1u) {
+ return ERROR_MALFORMED;
+ }
+ DTS |= ((uint64_t)br.getBits(15)) << 15;
+ if (br.getBits(1) != 1u) {
+ return ERROR_MALFORMED;
+ }
+ DTS |= br.getBits(15);
+ if (br.getBits(1) != 1u) {
+ return ERROR_MALFORMED;
+ }
+
+ ALOGV("DTS = %" PRIu64, DTS);
+
+ optional_bytes_remaining -= 5;
+ }
+ }
+
+ if (ESCR_flag) {
+ if (optional_bytes_remaining < 6u) {
+ return ERROR_MALFORMED;
+ }
+
+ br.getBits(2);
+
+ uint64_t ESCR = ((uint64_t)br.getBits(3)) << 30;
+ if (br.getBits(1) != 1u) {
+ return ERROR_MALFORMED;
+ }
+ ESCR |= ((uint64_t)br.getBits(15)) << 15;
+ if (br.getBits(1) != 1u) {
+ return ERROR_MALFORMED;
+ }
+ ESCR |= br.getBits(15);
+ if (br.getBits(1) != 1u) {
+ return ERROR_MALFORMED;
+ }
+
+ ALOGV("ESCR = %" PRIu64, ESCR);
+ /* unsigned ESCR_extension = */br.getBits(9);
+
+ if (br.getBits(1) != 1u) {
+ return ERROR_MALFORMED;
+ }
+
+ optional_bytes_remaining -= 6;
+ }
+
+ if (ES_rate_flag) {
+ if (optional_bytes_remaining < 3u) {
+ return ERROR_MALFORMED;
+ }
+
+ if (br.getBits(1) != 1u) {
+ return ERROR_MALFORMED;
+ }
+ /* unsigned ES_rate = */br.getBits(22);
+ if (br.getBits(1) != 1u) {
+ return ERROR_MALFORMED;
+ }
+
+ optional_bytes_remaining -= 3;
+ }
+
+ if (br.numBitsLeft() < optional_bytes_remaining * 8) {
+ return ERROR_MALFORMED;
+ }
+
+ br.skipBits(optional_bytes_remaining * 8);
+
+ // ES data follows.
+
+ if (PES_packet_length < PES_header_data_length + 3) {
+ return ERROR_MALFORMED;
+ }
+
+ unsigned dataLength =
+ PES_packet_length - 3 - PES_header_data_length;
+
+ if (br.numBitsLeft() < dataLength * 8) {
+ ALOGE("PES packet does not carry enough data to contain "
+ "payload. (numBitsLeft = %zu, required = %u)",
+ br.numBitsLeft(), dataLength * 8);
+
+ return ERROR_MALFORMED;
+ }
+
+ if (br.numBitsLeft() < dataLength * 8) {
+ return ERROR_MALFORMED;
+ }
+
+ ssize_t index = mTracks.indexOfKey(stream_id);
+ if (index < 0 && mScanning) {
+ unsigned streamType;
+
+ ssize_t streamTypeIndex;
+ if (mProgramStreamMapValid
+ && (streamTypeIndex =
+ mStreamTypeByESID.indexOfKey(stream_id)) >= 0) {
+ streamType = mStreamTypeByESID.valueAt(streamTypeIndex);
+ } else if ((stream_id & ~0x1f) == 0xc0) {
+ // ISO/IEC 13818-3 or ISO/IEC 11172-3 or ISO/IEC 13818-7
+ // or ISO/IEC 14496-3 audio
+ streamType = ATSParser::STREAMTYPE_MPEG2_AUDIO;
+ } else if ((stream_id & ~0x0f) == 0xe0) {
+ // ISO/IEC 13818-2 or ISO/IEC 11172-2 or ISO/IEC 14496-2 video
+ streamType = ATSParser::STREAMTYPE_MPEG2_VIDEO;
+ } else {
+ streamType = ATSParser::STREAMTYPE_RESERVED;
+ }
+
+ index = mTracks.add(
+ stream_id, new Track(this, stream_id, streamType));
+ }
+
+ status_t err = OK;
+
+ if (index >= 0) {
+ err =
+ mTracks.editValueAt(index)->appendPESData(
+ PTS_DTS_flags, PTS, DTS, br.data(), dataLength);
+ }
+
+ br.skipBits(dataLength * 8);
+
+ if (err != OK) {
+ return err;
+ }
+ } else if (stream_id == 0xbe) { // padding_stream
+ if (PES_packet_length == 0u) {
+ return ERROR_MALFORMED;
+ }
+ br.skipBits(PES_packet_length * 8);
+ } else {
+ if (PES_packet_length == 0u) {
+ return ERROR_MALFORMED;
+ }
+ br.skipBits(PES_packet_length * 8);
+ }
+
+ return n;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+MPEG2PSExtractor::Track::Track(
+ MPEG2PSExtractor *extractor, unsigned stream_id, unsigned stream_type)
+ : mExtractor(extractor),
+ mStreamID(stream_id),
+ mStreamType(stream_type),
+ mQueue(NULL) {
+ bool supported = true;
+ ElementaryStreamQueue::Mode mode;
+
+ switch (mStreamType) {
+ case ATSParser::STREAMTYPE_H264:
+ mode = ElementaryStreamQueue::H264;
+ break;
+ case ATSParser::STREAMTYPE_MPEG2_AUDIO_ADTS:
+ mode = ElementaryStreamQueue::AAC;
+ break;
+ case ATSParser::STREAMTYPE_MPEG1_AUDIO:
+ case ATSParser::STREAMTYPE_MPEG2_AUDIO:
+ mode = ElementaryStreamQueue::MPEG_AUDIO;
+ break;
+
+ case ATSParser::STREAMTYPE_MPEG1_VIDEO:
+ case ATSParser::STREAMTYPE_MPEG2_VIDEO:
+ mode = ElementaryStreamQueue::MPEG_VIDEO;
+ break;
+
+ case ATSParser::STREAMTYPE_MPEG4_VIDEO:
+ mode = ElementaryStreamQueue::MPEG4_VIDEO;
+ break;
+
+ default:
+ supported = false;
+ break;
+ }
+
+ if (supported) {
+ mQueue = new ElementaryStreamQueue(mode);
+ } else {
+ ALOGI("unsupported stream ID 0x%02x", stream_id);
+ }
+}
+
+MPEG2PSExtractor::Track::~Track() {
+ delete mQueue;
+ mQueue = NULL;
+}
+
+status_t MPEG2PSExtractor::Track::start(MetaDataBase *) {
+ if (mSource == NULL) {
+ return NO_INIT;
+ }
+
+ return mSource->start(NULL); // AnotherPacketSource::start doesn't use its argument
+}
+
+status_t MPEG2PSExtractor::Track::stop() {
+ if (mSource == NULL) {
+ return NO_INIT;
+ }
+
+ return mSource->stop();
+}
+
+status_t MPEG2PSExtractor::Track::getFormat(MetaDataBase &meta) {
+ if (mSource == NULL) {
+ return NO_INIT;
+ }
+
+ sp<MetaData> sourceMeta = mSource->getFormat();
+ meta = *sourceMeta;
+ return OK;
+}
+
+status_t MPEG2PSExtractor::Track::read(
+ MediaBufferBase **buffer, const ReadOptions *options) {
+ if (mSource == NULL) {
+ return NO_INIT;
+ }
+
+ status_t finalResult;
+ while (!mSource->hasBufferAvailable(&finalResult)) {
+ if (finalResult != OK) {
+ return ERROR_END_OF_STREAM;
+ }
+
+ status_t err = mExtractor->feedMore();
+
+ if (err != OK) {
+ mSource->signalEOS(err);
+ }
+ }
+
+ return mSource->read(buffer, options);
+}
+
+status_t MPEG2PSExtractor::Track::appendPESData(
+ unsigned PTS_DTS_flags,
+ uint64_t PTS, uint64_t /* DTS */,
+ const uint8_t *data, size_t size) {
+ if (mQueue == NULL) {
+ return OK;
+ }
+
+ int64_t timeUs;
+ if (PTS_DTS_flags == 2 || PTS_DTS_flags == 3) {
+ timeUs = (PTS * 100) / 9;
+ } else {
+ timeUs = 0;
+ }
+
+ status_t err = mQueue->appendData(data, size, timeUs);
+
+ if (err != OK) {
+ return err;
+ }
+
+ sp<ABuffer> accessUnit;
+ while ((accessUnit = mQueue->dequeueAccessUnit()) != NULL) {
+ if (mSource == NULL) {
+ sp<MetaData> meta = mQueue->getFormat();
+
+ if (meta != NULL) {
+ ALOGV("Stream ID 0x%02x now has data.", mStreamID);
+
+ mSource = new AnotherPacketSource(meta);
+ mSource->queueAccessUnit(accessUnit);
+ }
+ } else if (mQueue->getFormat() != NULL) {
+ mSource->queueAccessUnit(accessUnit);
+ }
+ }
+
+ return OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+MPEG2PSExtractor::WrappedTrack::WrappedTrack(
+ MPEG2PSExtractor *extractor, const sp<Track> &track)
+ : mExtractor(extractor),
+ mTrack(track) {
+}
+
+MPEG2PSExtractor::WrappedTrack::~WrappedTrack() {
+}
+
+status_t MPEG2PSExtractor::WrappedTrack::start(MetaDataBase *params) {
+ return mTrack->start(params);
+}
+
+status_t MPEG2PSExtractor::WrappedTrack::stop() {
+ return mTrack->stop();
+}
+
+status_t MPEG2PSExtractor::WrappedTrack::getFormat(MetaDataBase &meta) {
+ return mTrack->getFormat(meta);
+}
+
+status_t MPEG2PSExtractor::WrappedTrack::read(
+ MediaBufferBase **buffer, const ReadOptions *options) {
+ return mTrack->read(buffer, options);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool SniffMPEG2PS(
+ DataSourceBase *source, float *confidence) {
+ uint8_t header[5];
+ if (source->readAt(0, header, sizeof(header)) < (ssize_t)sizeof(header)) {
+ return false;
+ }
+
+ if (memcmp("\x00\x00\x01\xba", header, 4) || (header[4] >> 6) != 1) {
+ return false;
+ }
+
+ *confidence = 0.25f; // Slightly larger than .mp3 extractor's confidence
+
+ return true;
+}
+
+} // namespace android
diff --git a/media/extractors/mpeg2/MPEG2PSExtractor.h b/media/extractors/mpeg2/MPEG2PSExtractor.h
new file mode 100644
index 0000000..8b9dad9
--- /dev/null
+++ b/media/extractors/mpeg2/MPEG2PSExtractor.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MPEG2_PS_EXTRACTOR_H_
+
+#define MPEG2_PS_EXTRACTOR_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/MediaExtractor.h>
+#include <media/stagefright/MetaDataBase.h>
+#include <utils/threads.h>
+#include <utils/KeyedVector.h>
+
+namespace android {
+
+struct ABuffer;
+struct AMessage;
+struct Track;
+class String8;
+
+struct MPEG2PSExtractor : public MediaExtractor {
+ explicit MPEG2PSExtractor(DataSourceBase *source);
+
+ virtual size_t countTracks();
+ virtual MediaTrack *getTrack(size_t index);
+ virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags);
+
+ virtual status_t getMetaData(MetaDataBase& meta);
+
+ virtual uint32_t flags() const;
+ virtual const char * name() { return "MPEG2PSExtractor"; }
+
+protected:
+ virtual ~MPEG2PSExtractor();
+
+private:
+ struct Track;
+ struct WrappedTrack;
+
+ mutable Mutex mLock;
+ DataSourceBase *mDataSource;
+
+ off64_t mOffset;
+ status_t mFinalResult;
+ sp<ABuffer> mBuffer;
+ KeyedVector<unsigned, sp<Track> > mTracks;
+ bool mScanning;
+
+ bool mProgramStreamMapValid;
+ KeyedVector<unsigned, unsigned> mStreamTypeByESID;
+
+ status_t feedMore();
+
+ status_t dequeueChunk();
+ ssize_t dequeuePack();
+ ssize_t dequeueSystemHeader();
+ ssize_t dequeuePES();
+
+ DISALLOW_EVIL_CONSTRUCTORS(MPEG2PSExtractor);
+};
+
+bool SniffMPEG2PS(DataSourceBase *source, float *confidence);
+
+} // namespace android
+
+#endif // MPEG2_PS_EXTRACTOR_H_
+
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.cpp b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
new file mode 100644
index 0000000..c83f7ce
--- /dev/null
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
@@ -0,0 +1,669 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MPEG2TSExtractor"
+
+#include <inttypes.h>
+#include <utils/Log.h>
+
+#include "MPEG2TSExtractor.h"
+
+#include <media/DataSourceBase.h>
+#include <media/IStreamSource.h>
+#include <media/MediaTrack.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/MediaKeys.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <utils/String8.h>
+
+#include "mpeg2ts/AnotherPacketSource.h"
+#include "mpeg2ts/ATSParser.h"
+
+#include <hidl/HybridInterface.h>
+#include <android/hardware/cas/1.0/ICas.h>
+
+namespace android {
+
+using hardware::cas::V1_0::ICas;
+
+static const size_t kTSPacketSize = 188;
+static const int kMaxDurationReadSize = 250000LL;
+static const int kMaxDurationRetry = 6;
+
+struct MPEG2TSSource : public MediaTrack {
+ MPEG2TSSource(
+ MPEG2TSExtractor *extractor,
+ const sp<AnotherPacketSource> &impl,
+ bool doesSeek);
+ virtual ~MPEG2TSSource();
+
+ virtual status_t start(MetaDataBase *params = NULL);
+ virtual status_t stop();
+ virtual status_t getFormat(MetaDataBase &);
+
+ virtual status_t read(
+ MediaBufferBase **buffer, const ReadOptions *options = NULL);
+
+private:
+ MPEG2TSExtractor *mExtractor;
+ sp<AnotherPacketSource> mImpl;
+
+ // If there are both audio and video streams, only the video stream
+ // will signal seek on the extractor; otherwise the single stream will seek.
+ bool mDoesSeek;
+
+ DISALLOW_EVIL_CONSTRUCTORS(MPEG2TSSource);
+};
+
+MPEG2TSSource::MPEG2TSSource(
+ MPEG2TSExtractor *extractor,
+ const sp<AnotherPacketSource> &impl,
+ bool doesSeek)
+ : mExtractor(extractor),
+ mImpl(impl),
+ mDoesSeek(doesSeek) {
+}
+
+MPEG2TSSource::~MPEG2TSSource() {
+}
+
+status_t MPEG2TSSource::start(MetaDataBase *) {
+ return mImpl->start(NULL); // AnotherPacketSource::start() doesn't use its argument
+}
+
+status_t MPEG2TSSource::stop() {
+ return mImpl->stop();
+}
+
+status_t MPEG2TSSource::getFormat(MetaDataBase &meta) {
+ sp<MetaData> implMeta = mImpl->getFormat();
+ meta = *implMeta;
+ return OK;
+}
+
+status_t MPEG2TSSource::read(
+ MediaBufferBase **out, const ReadOptions *options) {
+ *out = NULL;
+
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode seekMode;
+ if (mDoesSeek && options && options->getSeekTo(&seekTimeUs, &seekMode)) {
+ // seek is needed
+ status_t err = mExtractor->seek(seekTimeUs, seekMode);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ if (mExtractor->feedUntilBufferAvailable(mImpl) != OK) {
+ return ERROR_END_OF_STREAM;
+ }
+
+ return mImpl->read(out, options);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+MPEG2TSExtractor::MPEG2TSExtractor(DataSourceBase *source)
+ : mDataSource(source),
+ mParser(new ATSParser),
+ mLastSyncEvent(0),
+ mOffset(0) {
+ init();
+}
+
+size_t MPEG2TSExtractor::countTracks() {
+ return mSourceImpls.size();
+}
+
+MediaTrack *MPEG2TSExtractor::getTrack(size_t index) {
+ if (index >= mSourceImpls.size()) {
+ return NULL;
+ }
+
+ // The seek reference track (video if present; audio otherwise) performs
+ // seek requests, while other tracks ignore requests.
+ return new MPEG2TSSource(this, mSourceImpls.editItemAt(index),
+ (mSeekSyncPoints == &mSyncPoints.editItemAt(index)));
+}
+
+status_t MPEG2TSExtractor::getTrackMetaData(
+ MetaDataBase &meta,
+ size_t index, uint32_t /* flags */) {
+ sp<MetaData> implMeta = index < mSourceImpls.size()
+ ? mSourceImpls.editItemAt(index)->getFormat() : NULL;
+ if (implMeta == NULL) {
+ return UNKNOWN_ERROR;
+ }
+ meta = *implMeta;
+ return OK;
+}
+
+status_t MPEG2TSExtractor::getMetaData(MetaDataBase &meta) {
+ meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG2TS);
+
+ return OK;
+}
+
+//static
+bool MPEG2TSExtractor::isScrambledFormat(MetaDataBase &format) {
+ const char *mime;
+ return format.findCString(kKeyMIMEType, &mime)
+ && (!strcasecmp(MEDIA_MIMETYPE_VIDEO_SCRAMBLED, mime)
+ || !strcasecmp(MEDIA_MIMETYPE_AUDIO_SCRAMBLED, mime));
+}
+
+status_t MPEG2TSExtractor::setMediaCas(const uint8_t* casToken, size_t size) {
+ HalToken halToken;
+ halToken.setToExternal((uint8_t*)casToken, size);
+ sp<ICas> cas = ICas::castFrom(retrieveHalInterface(halToken));
+ ALOGD("setMediaCas: %p", cas.get());
+
+ status_t err = mParser->setMediaCas(cas);
+ if (err == OK) {
+ ALOGI("All tracks now have descramblers");
+ init();
+ }
+ return err;
+}
+
+void MPEG2TSExtractor::addSource(const sp<AnotherPacketSource> &impl) {
+ bool found = false;
+ for (size_t i = 0; i < mSourceImpls.size(); i++) {
+ if (mSourceImpls[i] == impl) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ mSourceImpls.push(impl);
+ }
+}
+
+void MPEG2TSExtractor::init() {
+ bool haveAudio = false;
+ bool haveVideo = false;
+ int64_t startTime = ALooper::GetNowUs();
+
+ status_t err;
+ while ((err = feedMore(true /* isInit */)) == OK
+ || err == ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED) {
+ if (haveAudio && haveVideo) {
+ addSyncPoint_l(mLastSyncEvent);
+ mLastSyncEvent.reset();
+ break;
+ }
+ if (!haveVideo) {
+ sp<AnotherPacketSource> impl = mParser->getSource(ATSParser::VIDEO);
+
+ if (impl != NULL) {
+ sp<MetaData> format = impl->getFormat();
+ if (format != NULL) {
+ haveVideo = true;
+ addSource(impl);
+ if (!isScrambledFormat(*(format.get()))) {
+ mSyncPoints.push();
+ mSeekSyncPoints = &mSyncPoints.editTop();
+ }
+ }
+ }
+ }
+
+ if (!haveAudio) {
+ sp<AnotherPacketSource> impl = mParser->getSource(ATSParser::AUDIO);
+
+ if (impl != NULL) {
+ sp<MetaData> format = impl->getFormat();
+ if (format != NULL) {
+ haveAudio = true;
+ addSource(impl);
+ if (!isScrambledFormat(*(format.get()))) {
+ mSyncPoints.push();
+ if (!haveVideo) {
+ mSeekSyncPoints = &mSyncPoints.editTop();
+ }
+ }
+ }
+ }
+ }
+
+ addSyncPoint_l(mLastSyncEvent);
+ mLastSyncEvent.reset();
+
+ // ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED is returned when the mpeg2ts
+ // is scrambled but we don't have a MediaCas object set. The extraction
+ // will only continue when setMediaCas() is called successfully.
+ if (err == ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED) {
+ ALOGI("stopped parsing scrambled content, "
+ "haveAudio=%d, haveVideo=%d, elaspedTime=%" PRId64,
+ haveAudio, haveVideo, ALooper::GetNowUs() - startTime);
+ return;
+ }
+
+ // Wait only for 2 seconds to detect audio/video streams.
+ if (ALooper::GetNowUs() - startTime > 2000000ll) {
+ break;
+ }
+ }
+
+ off64_t size;
+ if (mDataSource->getSize(&size) == OK && (haveAudio || haveVideo)) {
+ sp<AnotherPacketSource> impl = haveVideo
+ ? mParser->getSource(ATSParser::VIDEO)
+ : mParser->getSource(ATSParser::AUDIO);
+ size_t prevSyncSize = 1;
+ int64_t durationUs = -1;
+ List<int64_t> durations;
+ // Estimate duration --- stabilize until you get <500ms deviation.
+ while (feedMore() == OK
+ && ALooper::GetNowUs() - startTime <= 2000000ll) {
+ if (mSeekSyncPoints->size() > prevSyncSize) {
+ prevSyncSize = mSeekSyncPoints->size();
+ int64_t diffUs = mSeekSyncPoints->keyAt(prevSyncSize - 1)
+ - mSeekSyncPoints->keyAt(0);
+ off64_t diffOffset = mSeekSyncPoints->valueAt(prevSyncSize - 1)
+ - mSeekSyncPoints->valueAt(0);
+ int64_t currentDurationUs = size * diffUs / diffOffset;
+ durations.push_back(currentDurationUs);
+ if (durations.size() > 5) {
+ durations.erase(durations.begin());
+ int64_t min = *durations.begin();
+ int64_t max = *durations.begin();
+ for (auto duration : durations) {
+ if (min > duration) {
+ min = duration;
+ }
+ if (max < duration) {
+ max = duration;
+ }
+ }
+ if (max - min < 500 * 1000) {
+ durationUs = currentDurationUs;
+ break;
+ }
+ }
+ }
+ }
+ status_t err;
+ int64_t bufferedDurationUs;
+ bufferedDurationUs = impl->getBufferedDurationUs(&err);
+ if (err == ERROR_END_OF_STREAM) {
+ durationUs = bufferedDurationUs;
+ }
+ if (durationUs > 0) {
+ const sp<MetaData> meta = impl->getFormat();
+ meta->setInt64(kKeyDuration, durationUs);
+ impl->setFormat(meta);
+ } else {
+ estimateDurationsFromTimesUsAtEnd();
+ }
+ }
+
+ ALOGI("haveAudio=%d, haveVideo=%d, elaspedTime=%" PRId64,
+ haveAudio, haveVideo, ALooper::GetNowUs() - startTime);
+}
+
+status_t MPEG2TSExtractor::feedMore(bool isInit) {
+ Mutex::Autolock autoLock(mLock);
+
+ uint8_t packet[kTSPacketSize];
+ ssize_t n = mDataSource->readAt(mOffset, packet, kTSPacketSize);
+
+ if (n < (ssize_t)kTSPacketSize) {
+ if (n >= 0) {
+ mParser->signalEOS(ERROR_END_OF_STREAM);
+ }
+ return (n < 0) ? (status_t)n : ERROR_END_OF_STREAM;
+ }
+
+ ATSParser::SyncEvent event(mOffset);
+ mOffset += n;
+ status_t err = mParser->feedTSPacket(packet, kTSPacketSize, &event);
+ if (event.hasReturnedData()) {
+ if (isInit) {
+ mLastSyncEvent = event;
+ } else {
+ addSyncPoint_l(event);
+ }
+ }
+ return err;
+}
+
+void MPEG2TSExtractor::addSyncPoint_l(const ATSParser::SyncEvent &event) {
+ if (!event.hasReturnedData()) {
+ return;
+ }
+
+ for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+ if (mSourceImpls[i].get() == event.getMediaSource().get()) {
+ KeyedVector<int64_t, off64_t> *syncPoints = &mSyncPoints.editItemAt(i);
+ syncPoints->add(event.getTimeUs(), event.getOffset());
+ // We're keeping the size of the sync points at most 5mb per a track.
+ size_t size = syncPoints->size();
+ if (size >= 327680) {
+ int64_t firstTimeUs = syncPoints->keyAt(0);
+ int64_t lastTimeUs = syncPoints->keyAt(size - 1);
+ if (event.getTimeUs() - firstTimeUs > lastTimeUs - event.getTimeUs()) {
+ syncPoints->removeItemsAt(0, 4096);
+ } else {
+ syncPoints->removeItemsAt(size - 4096, 4096);
+ }
+ }
+ break;
+ }
+ }
+}
+
+status_t MPEG2TSExtractor::estimateDurationsFromTimesUsAtEnd() {
+ if (!(mDataSource->flags() & DataSourceBase::kIsLocalFileSource)) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ off64_t size = 0;
+ status_t err = mDataSource->getSize(&size);
+ if (err != OK) {
+ return err;
+ }
+
+ uint8_t packet[kTSPacketSize];
+ const off64_t zero = 0;
+ off64_t offset = max(zero, size - kMaxDurationReadSize);
+ if (mDataSource->readAt(offset, &packet, 0) < 0) {
+ return ERROR_IO;
+ }
+
+ int retry = 0;
+ bool allDurationsFound = false;
+ int64_t timeAnchorUs = mParser->getFirstPTSTimeUs();
+ do {
+ int bytesRead = 0;
+ sp<ATSParser> parser = new ATSParser(ATSParser::TS_TIMESTAMPS_ARE_ABSOLUTE);
+ ATSParser::SyncEvent ev(0);
+ offset = max(zero, size - (kMaxDurationReadSize << retry));
+ offset = (offset / kTSPacketSize) * kTSPacketSize;
+ for (;;) {
+ if (bytesRead >= kMaxDurationReadSize << max(0, retry - 1)) {
+ break;
+ }
+
+ ssize_t n = mDataSource->readAt(offset, packet, kTSPacketSize);
+ if (n < 0) {
+ return n;
+ } else if (n < (ssize_t)kTSPacketSize) {
+ break;
+ }
+
+ offset += kTSPacketSize;
+ bytesRead += kTSPacketSize;
+ err = parser->feedTSPacket(packet, kTSPacketSize, &ev);
+ if (err != OK) {
+ return err;
+ }
+
+ if (ev.hasReturnedData()) {
+ int64_t durationUs = ev.getTimeUs();
+ ATSParser::SourceType type = ev.getType();
+ ev.reset();
+
+ int64_t firstTimeUs;
+ sp<AnotherPacketSource> src = mParser->getSource(type);
+ if (src == NULL || src->nextBufferTime(&firstTimeUs) != OK) {
+ continue;
+ }
+ durationUs += src->getEstimatedBufferDurationUs();
+ durationUs -= timeAnchorUs;
+ durationUs -= firstTimeUs;
+ if (durationUs > 0) {
+ int64_t origDurationUs, lastDurationUs;
+ const sp<MetaData> meta = src->getFormat();
+ const uint32_t kKeyLastDuration = 'ldur';
+ // Require two consecutive duration calculations to be within 1 sec before
+ // updating; use MetaData to store previous duration estimate in per-stream
+ // context.
+ if (!meta->findInt64(kKeyDuration, &origDurationUs)
+ || !meta->findInt64(kKeyLastDuration, &lastDurationUs)
+ || (origDurationUs < durationUs
+ && abs(durationUs - lastDurationUs) < 60000000)) {
+ meta->setInt64(kKeyDuration, durationUs);
+ }
+ meta->setInt64(kKeyLastDuration, durationUs);
+ }
+ }
+ }
+
+ if (!allDurationsFound) {
+ allDurationsFound = true;
+ for (auto t: {ATSParser::VIDEO, ATSParser::AUDIO}) {
+ sp<AnotherPacketSource> src = mParser->getSource(t);
+ if (src == NULL) {
+ continue;
+ }
+ int64_t durationUs;
+ const sp<MetaData> meta = src->getFormat();
+ if (!meta->findInt64(kKeyDuration, &durationUs)) {
+ allDurationsFound = false;
+ break;
+ }
+ }
+ }
+
+ ++retry;
+ } while(!allDurationsFound && offset > 0 && retry <= kMaxDurationRetry);
+
+ return allDurationsFound? OK : ERROR_UNSUPPORTED;
+}
+
+uint32_t MPEG2TSExtractor::flags() const {
+ return CAN_PAUSE | CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD;
+}
+
+status_t MPEG2TSExtractor::seek(int64_t seekTimeUs,
+ const MediaTrack::ReadOptions::SeekMode &seekMode) {
+ if (mSeekSyncPoints == NULL || mSeekSyncPoints->isEmpty()) {
+ ALOGW("No sync point to seek to.");
+ // ... and therefore we have nothing useful to do here.
+ return OK;
+ }
+
+ // Determine whether we're seeking beyond the known area.
+ bool shouldSeekBeyond =
+ (seekTimeUs > mSeekSyncPoints->keyAt(mSeekSyncPoints->size() - 1));
+
+ // Determine the sync point to seek.
+ size_t index = 0;
+ for (; index < mSeekSyncPoints->size(); ++index) {
+ int64_t timeUs = mSeekSyncPoints->keyAt(index);
+ if (timeUs > seekTimeUs) {
+ break;
+ }
+ }
+
+ switch (seekMode) {
+ case MediaTrack::ReadOptions::SEEK_NEXT_SYNC:
+ if (index == mSeekSyncPoints->size()) {
+ ALOGW("Next sync not found; starting from the latest sync.");
+ --index;
+ }
+ break;
+ case MediaTrack::ReadOptions::SEEK_CLOSEST_SYNC:
+ case MediaTrack::ReadOptions::SEEK_CLOSEST:
+ ALOGW("seekMode not supported: %d; falling back to PREVIOUS_SYNC",
+ seekMode);
+ // fall-through
+ case MediaTrack::ReadOptions::SEEK_PREVIOUS_SYNC:
+ if (index == 0) {
+ ALOGW("Previous sync not found; starting from the earliest "
+ "sync.");
+ } else {
+ --index;
+ }
+ break;
+ default:
+ return ERROR_UNSUPPORTED;
+ }
+ if (!shouldSeekBeyond || mOffset <= mSeekSyncPoints->valueAt(index)) {
+ int64_t actualSeekTimeUs = mSeekSyncPoints->keyAt(index);
+ mOffset = mSeekSyncPoints->valueAt(index);
+ status_t err = queueDiscontinuityForSeek(actualSeekTimeUs);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ if (shouldSeekBeyond) {
+ status_t err = seekBeyond(seekTimeUs);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ // Fast-forward to sync frame.
+ for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+ const sp<AnotherPacketSource> &impl = mSourceImpls[i];
+ status_t err;
+ feedUntilBufferAvailable(impl);
+ while (impl->hasBufferAvailable(&err)) {
+ sp<AMessage> meta = impl->getMetaAfterLastDequeued(0);
+ sp<ABuffer> buffer;
+ if (meta == NULL) {
+ return UNKNOWN_ERROR;
+ }
+ int32_t sync;
+ if (meta->findInt32("isSync", &sync) && sync) {
+ break;
+ }
+ err = impl->dequeueAccessUnit(&buffer);
+ if (err != OK) {
+ return err;
+ }
+ feedUntilBufferAvailable(impl);
+ }
+ }
+
+ return OK;
+}
+
+status_t MPEG2TSExtractor::queueDiscontinuityForSeek(int64_t actualSeekTimeUs) {
+ // Signal discontinuity
+ sp<AMessage> extra(new AMessage);
+ extra->setInt64(kATSParserKeyMediaTimeUs, actualSeekTimeUs);
+ mParser->signalDiscontinuity(ATSParser::DISCONTINUITY_TIME, extra);
+
+ // After discontinuity, impl should only have discontinuities
+ // with the last being what we queued. Dequeue them all here.
+ for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+ const sp<AnotherPacketSource> &impl = mSourceImpls.itemAt(i);
+ sp<ABuffer> buffer;
+ status_t err;
+ while (impl->hasBufferAvailable(&err)) {
+ if (err != OK) {
+ return err;
+ }
+ err = impl->dequeueAccessUnit(&buffer);
+ // If the source contains anything but discontinuity, that's
+ // a programming mistake.
+ CHECK(err == INFO_DISCONTINUITY);
+ }
+ }
+
+ // Feed until we have a buffer for each source.
+ for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+ const sp<AnotherPacketSource> &impl = mSourceImpls.itemAt(i);
+ sp<ABuffer> buffer;
+ status_t err = feedUntilBufferAvailable(impl);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ return OK;
+}
+
+status_t MPEG2TSExtractor::seekBeyond(int64_t seekTimeUs) {
+ // If we're seeking beyond where we know --- read until we reach there.
+ size_t syncPointsSize = mSeekSyncPoints->size();
+
+ while (seekTimeUs > mSeekSyncPoints->keyAt(
+ mSeekSyncPoints->size() - 1)) {
+ status_t err;
+ if (syncPointsSize < mSeekSyncPoints->size()) {
+ syncPointsSize = mSeekSyncPoints->size();
+ int64_t syncTimeUs = mSeekSyncPoints->keyAt(syncPointsSize - 1);
+ // Dequeue buffers before sync point in order to avoid too much
+ // cache building up.
+ sp<ABuffer> buffer;
+ for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+ const sp<AnotherPacketSource> &impl = mSourceImpls[i];
+ int64_t timeUs;
+ while ((err = impl->nextBufferTime(&timeUs)) == OK) {
+ if (timeUs < syncTimeUs) {
+ impl->dequeueAccessUnit(&buffer);
+ } else {
+ break;
+ }
+ }
+ if (err != OK && err != -EWOULDBLOCK) {
+ return err;
+ }
+ }
+ }
+ if (feedMore() != OK) {
+ return ERROR_END_OF_STREAM;
+ }
+ }
+
+ return OK;
+}
+
+status_t MPEG2TSExtractor::feedUntilBufferAvailable(
+ const sp<AnotherPacketSource> &impl) {
+ status_t finalResult;
+ while (!impl->hasBufferAvailable(&finalResult)) {
+ if (finalResult != OK) {
+ return finalResult;
+ }
+
+ status_t err = feedMore();
+ if (err != OK) {
+ impl->signalEOS(err);
+ }
+ }
+ return OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool SniffMPEG2TS(DataSourceBase *source, float *confidence) {
+ for (int i = 0; i < 5; ++i) {
+ char header;
+ if (source->readAt(kTSPacketSize * i, &header, 1) != 1
+ || header != 0x47) {
+ return false;
+ }
+ }
+
+ *confidence = 0.1f;
+
+ return true;
+}
+
+} // namespace android
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.h b/media/extractors/mpeg2/MPEG2TSExtractor.h
new file mode 100644
index 0000000..cbdd3cb
--- /dev/null
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.h
@@ -0,0 +1,108 @@
+/*
+
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MPEG2_TS_EXTRACTOR_H_
+
+#define MPEG2_TS_EXTRACTOR_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/MediaExtractor.h>
+#include <media/MediaTrack.h>
+#include <media/stagefright/MetaDataBase.h>
+#include <utils/threads.h>
+#include <utils/KeyedVector.h>
+#include <utils/Vector.h>
+
+#include "mpeg2ts/ATSParser.h"
+
+namespace android {
+
+struct AMessage;
+struct AnotherPacketSource;
+struct ATSParser;
+class DataSourceBase;
+struct MPEG2TSSource;
+class String8;
+
+struct MPEG2TSExtractor : public MediaExtractor {
+ explicit MPEG2TSExtractor(DataSourceBase *source);
+
+ virtual size_t countTracks();
+ virtual MediaTrack *getTrack(size_t index);
+ virtual status_t getTrackMetaData(MetaDataBase &meta, size_t index, uint32_t flags);
+
+ virtual status_t getMetaData(MetaDataBase& meta);
+
+ virtual status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) override;
+
+ virtual uint32_t flags() const;
+ virtual const char * name() { return "MPEG2TSExtractor"; }
+
+private:
+ friend struct MPEG2TSSource;
+
+ mutable Mutex mLock;
+
+ DataSourceBase *mDataSource;
+
+ sp<ATSParser> mParser;
+
+ // Used to remember SyncEvent occurred in feedMore() when called from init(),
+ // because init() needs to update |mSourceImpls| before adding SyncPoint.
+ ATSParser::SyncEvent mLastSyncEvent;
+
+ Vector<sp<AnotherPacketSource> > mSourceImpls;
+
+ Vector<KeyedVector<int64_t, off64_t> > mSyncPoints;
+ // Sync points used for seeking --- normally one for video track is used.
+ // If no video track is present, audio track will be used instead.
+ KeyedVector<int64_t, off64_t> *mSeekSyncPoints;
+
+ off64_t mOffset;
+
+ static bool isScrambledFormat(MetaDataBase &format);
+
+ void init();
+ void addSource(const sp<AnotherPacketSource> &impl);
+ // Try to feed more data from source to parser.
+ // |isInit| means this function is called inside init(). This is a signal to
+ // save SyncEvent so that init() can add SyncPoint after it updates |mSourceImpls|.
+ // This function returns OK if expected amount of data is fed from DataSourceBase to
+ // parser and is successfully parsed. Otherwise, various error codes could be
+ // returned, e.g., ERROR_END_OF_STREAM, or no data availalbe from DataSourceBase, or
+ // the data has syntax error during parsing, etc.
+ status_t feedMore(bool isInit = false);
+ status_t seek(int64_t seekTimeUs,
+ const MediaSource::ReadOptions::SeekMode& seekMode);
+ status_t queueDiscontinuityForSeek(int64_t actualSeekTimeUs);
+ status_t seekBeyond(int64_t seekTimeUs);
+
+ status_t feedUntilBufferAvailable(const sp<AnotherPacketSource> &impl);
+
+ // Add a SynPoint derived from |event|.
+ void addSyncPoint_l(const ATSParser::SyncEvent &event);
+
+ status_t estimateDurationsFromTimesUsAtEnd();
+
+ DISALLOW_EVIL_CONSTRUCTORS(MPEG2TSExtractor);
+};
+
+bool SniffMPEG2TS(DataSourceBase *source, float *confidence);
+
+} // namespace android
+
+#endif // MPEG2_TS_EXTRACTOR_H_
diff --git a/media/libstagefright/matroska/NOTICE b/media/extractors/mpeg2/NOTICE
similarity index 100%
copy from media/libstagefright/matroska/NOTICE
copy to media/extractors/mpeg2/NOTICE
diff --git a/media/extractors/mpeg2/exports.lds b/media/extractors/mpeg2/exports.lds
new file mode 100644
index 0000000..b1309ee
--- /dev/null
+++ b/media/extractors/mpeg2/exports.lds
@@ -0,0 +1 @@
+{ global: GETEXTRACTORDEF; local: *; };
diff --git a/media/extractors/ogg/Android.bp b/media/extractors/ogg/Android.bp
new file mode 100644
index 0000000..7c6fc75
--- /dev/null
+++ b/media/extractors/ogg/Android.bp
@@ -0,0 +1,44 @@
+cc_library_shared {
+
+ srcs: ["OggExtractor.cpp"],
+
+ include_dirs: [
+ "frameworks/av/media/libstagefright/include",
+ "external/tremolo",
+ ],
+
+ shared_libs: [
+ "liblog",
+ "libmediaextractor",
+ ],
+
+ static_libs: [
+ "libstagefright_foundation",
+ "libutils",
+ "libvorbisidec",
+ ],
+
+ name: "liboggextractor",
+ relative_install_path: "extractors",
+
+ compile_multilib: "first",
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-fvisibility=hidden",
+ ],
+ version_script: "exports.lds",
+
+ sanitize: {
+ cfi: true,
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ diag: {
+ cfi: true,
+ },
+ },
+
+}
diff --git a/media/libstagefright/matroska/MODULE_LICENSE_APACHE2 b/media/extractors/ogg/MODULE_LICENSE_APACHE2
similarity index 100%
copy from media/libstagefright/matroska/MODULE_LICENSE_APACHE2
copy to media/extractors/ogg/MODULE_LICENSE_APACHE2
diff --git a/media/libstagefright/matroska/NOTICE b/media/extractors/ogg/NOTICE
similarity index 100%
copy from media/libstagefright/matroska/NOTICE
copy to media/extractors/ogg/NOTICE
diff --git a/media/extractors/ogg/OggExtractor.cpp b/media/extractors/ogg/OggExtractor.cpp
new file mode 100644
index 0000000..b2fe69c
--- /dev/null
+++ b/media/extractors/ogg/OggExtractor.cpp
@@ -0,0 +1,1287 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "OggExtractor"
+#include <utils/Log.h>
+
+#include "OggExtractor.h"
+
+#include <cutils/properties.h>
+#include <media/DataSourceBase.h>
+#include <media/ExtractorUtils.h>
+#include <media/MediaTrack.h>
+#include <media/VorbisComment.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/base64.h>
+#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/stagefright/MediaBufferBase.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaDataBase.h>
+#include <utils/String8.h>
+
+extern "C" {
+ #include <Tremolo/codec_internal.h>
+
+ int _vorbis_unpack_books(vorbis_info *vi,oggpack_buffer *opb);
+ int _vorbis_unpack_info(vorbis_info *vi,oggpack_buffer *opb);
+ int _vorbis_unpack_comment(vorbis_comment *vc,oggpack_buffer *opb);
+ long vorbis_packet_blocksize(vorbis_info *vi,ogg_packet *op);
+}
+
+namespace android {
+
+struct OggSource : public MediaTrack {
+ explicit OggSource(OggExtractor *extractor);
+
+ virtual status_t getFormat(MetaDataBase &);
+
+ virtual status_t start(MetaDataBase *params = NULL);
+ virtual status_t stop();
+
+ virtual status_t read(
+ MediaBufferBase **buffer, const ReadOptions *options = NULL);
+
+protected:
+ virtual ~OggSource();
+
+private:
+ OggExtractor *mExtractor;
+ bool mStarted;
+
+ OggSource(const OggSource &);
+ OggSource &operator=(const OggSource &);
+};
+
+struct MyOggExtractor {
+ MyOggExtractor(
+ DataSourceBase *source,
+ const char *mimeType,
+ size_t numHeaders,
+ int64_t seekPreRollUs);
+ virtual ~MyOggExtractor();
+
+ status_t getFormat(MetaDataBase &) const;
+
+ // Returns an approximate bitrate in bits per second.
+ virtual uint64_t approxBitrate() const = 0;
+
+ status_t seekToTime(int64_t timeUs);
+ status_t seekToOffset(off64_t offset);
+ virtual status_t readNextPacket(MediaBufferBase **buffer) = 0;
+
+ status_t init();
+
+ status_t getFileMetaData(MetaDataBase &meta) {
+ meta = mFileMeta;
+ return OK;
+ }
+
+protected:
+ struct Page {
+ uint64_t mGranulePosition;
+ int32_t mPrevPacketSize;
+ uint64_t mPrevPacketPos;
+ uint32_t mSerialNo;
+ uint32_t mPageNo;
+ uint8_t mFlags;
+ uint8_t mNumSegments;
+ uint8_t mLace[255];
+ };
+
+ struct TOCEntry {
+ off64_t mPageOffset;
+ int64_t mTimeUs;
+ };
+
+ DataSourceBase *mSource;
+ off64_t mOffset;
+ Page mCurrentPage;
+ uint64_t mCurGranulePosition;
+ uint64_t mPrevGranulePosition;
+ size_t mCurrentPageSize;
+ bool mFirstPacketInPage;
+ uint64_t mCurrentPageSamples;
+ size_t mNextLaceIndex;
+
+ const char *mMimeType;
+ size_t mNumHeaders;
+ int64_t mSeekPreRollUs;
+
+ off64_t mFirstDataOffset;
+
+ vorbis_info mVi;
+ vorbis_comment mVc;
+
+ MetaDataBase mMeta;
+ MetaDataBase mFileMeta;
+
+ Vector<TOCEntry> mTableOfContents;
+
+ ssize_t readPage(off64_t offset, Page *page);
+ status_t findNextPage(off64_t startOffset, off64_t *pageOffset);
+
+ virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const = 0;
+
+ // Extract codec format, metadata tags, and various codec specific data;
+ // the format and CSD's are required to setup the decoders for the enclosed media content.
+ //
+ // Valid values for `type` are:
+ // 1 - bitstream identification header
+ // 3 - comment header
+ // 5 - codec setup header (Vorbis only)
+ virtual status_t verifyHeader(MediaBufferBase *buffer, uint8_t type) = 0;
+
+ // Read the next ogg packet from the underlying data source; optionally
+ // calculate the timestamp for the output packet whilst pretending
+ // that we are parsing an Ogg Vorbis stream.
+ //
+ // *buffer is NULL'ed out immediately upon entry, and if successful a new buffer is allocated;
+ // clients are responsible for releasing the original buffer.
+ status_t _readNextPacket(MediaBufferBase **buffer, bool calcVorbisTimestamp);
+
+ int32_t getPacketBlockSize(MediaBufferBase *buffer);
+
+ void parseFileMetaData();
+
+ status_t findPrevGranulePosition(off64_t pageOffset, uint64_t *granulePos);
+
+ void buildTableOfContents();
+
+ MyOggExtractor(const MyOggExtractor &);
+ MyOggExtractor &operator=(const MyOggExtractor &);
+};
+
+struct MyVorbisExtractor : public MyOggExtractor {
+ explicit MyVorbisExtractor(DataSourceBase *source)
+ : MyOggExtractor(source,
+ MEDIA_MIMETYPE_AUDIO_VORBIS,
+ /* numHeaders */ 3,
+ /* seekPreRollUs */ 0) {
+ }
+
+ virtual uint64_t approxBitrate() const;
+
+ virtual status_t readNextPacket(MediaBufferBase **buffer) {
+ return _readNextPacket(buffer, /* calcVorbisTimestamp = */ true);
+ }
+
+protected:
+ virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const {
+ if (granulePos > INT64_MAX / 1000000ll) {
+ return INT64_MAX;
+ }
+ return granulePos * 1000000ll / mVi.rate;
+ }
+
+ virtual status_t verifyHeader(MediaBufferBase *buffer, uint8_t type);
+};
+
+struct MyOpusExtractor : public MyOggExtractor {
+ static const int32_t kOpusSampleRate = 48000;
+ static const int64_t kOpusSeekPreRollUs = 80000; // 80 ms
+
+ explicit MyOpusExtractor(DataSourceBase *source)
+ : MyOggExtractor(source, MEDIA_MIMETYPE_AUDIO_OPUS, /*numHeaders*/ 2, kOpusSeekPreRollUs),
+ mChannelCount(0),
+ mCodecDelay(0),
+ mStartGranulePosition(-1) {
+ }
+
+ virtual uint64_t approxBitrate() const {
+ return 0;
+ }
+
+ virtual status_t readNextPacket(MediaBufferBase **buffer);
+
+protected:
+ virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const;
+ virtual status_t verifyHeader(MediaBufferBase *buffer, uint8_t type);
+
+private:
+ status_t verifyOpusHeader(MediaBufferBase *buffer);
+ status_t verifyOpusComments(MediaBufferBase *buffer);
+ uint32_t getNumSamplesInPacket(MediaBufferBase *buffer) const;
+
+ uint8_t mChannelCount;
+ uint16_t mCodecDelay;
+ int64_t mStartGranulePosition;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+OggSource::OggSource(OggExtractor *extractor)
+ : mExtractor(extractor),
+ mStarted(false) {
+}
+
+OggSource::~OggSource() {
+ if (mStarted) {
+ stop();
+ }
+}
+
+status_t OggSource::getFormat(MetaDataBase &meta) {
+ return mExtractor->mImpl->getFormat(meta);
+}
+
+status_t OggSource::start(MetaDataBase * /* params */) {
+ if (mStarted) {
+ return INVALID_OPERATION;
+ }
+
+ mStarted = true;
+
+ return OK;
+}
+
+status_t OggSource::stop() {
+ mStarted = false;
+
+ return OK;
+}
+
+status_t OggSource::read(
+ MediaBufferBase **out, const ReadOptions *options) {
+ *out = NULL;
+
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+ status_t err = mExtractor->mImpl->seekToTime(seekTimeUs);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ MediaBufferBase *packet;
+ status_t err = mExtractor->mImpl->readNextPacket(&packet);
+
+ if (err != OK) {
+ return err;
+ }
+
+#if 0
+ int64_t timeUs;
+ if (packet->meta_data().findInt64(kKeyTime, &timeUs)) {
+ ALOGI("found time = %lld us", timeUs);
+ } else {
+ ALOGI("NO time");
+ }
+#endif
+
+ packet->meta_data().setInt32(kKeyIsSyncFrame, 1);
+
+ *out = packet;
+
+ return OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+MyOggExtractor::MyOggExtractor(
+ DataSourceBase *source,
+ const char *mimeType,
+ size_t numHeaders,
+ int64_t seekPreRollUs)
+ : mSource(source),
+ mOffset(0),
+ mCurGranulePosition(0),
+ mPrevGranulePosition(0),
+ mCurrentPageSize(0),
+ mFirstPacketInPage(true),
+ mCurrentPageSamples(0),
+ mNextLaceIndex(0),
+ mMimeType(mimeType),
+ mNumHeaders(numHeaders),
+ mSeekPreRollUs(seekPreRollUs),
+ mFirstDataOffset(-1) {
+ mCurrentPage.mNumSegments = 0;
+
+ vorbis_info_init(&mVi);
+ vorbis_comment_init(&mVc);
+}
+
+MyOggExtractor::~MyOggExtractor() {
+ vorbis_comment_clear(&mVc);
+ vorbis_info_clear(&mVi);
+}
+
+status_t MyOggExtractor::getFormat(MetaDataBase &meta) const {
+ meta = mMeta;
+ return OK;
+}
+
+status_t MyOggExtractor::findNextPage(
+ off64_t startOffset, off64_t *pageOffset) {
+ *pageOffset = startOffset;
+
+ for (;;) {
+ char signature[4];
+ ssize_t n = mSource->readAt(*pageOffset, &signature, 4);
+
+ if (n < 4) {
+ *pageOffset = 0;
+
+ return (n < 0) ? n : (status_t)ERROR_END_OF_STREAM;
+ }
+
+ if (!memcmp(signature, "OggS", 4)) {
+ if (*pageOffset > startOffset) {
+ ALOGV("skipped %lld bytes of junk to reach next frame",
+ (long long)(*pageOffset - startOffset));
+ }
+
+ return OK;
+ }
+
+ ++*pageOffset;
+ }
+}
+
+// Given the offset of the "current" page, find the page immediately preceding
+// it (if any) and return its granule position.
+// To do this we back up from the "current" page's offset until we find any
+// page preceding it and then scan forward to just before the current page.
+status_t MyOggExtractor::findPrevGranulePosition(
+ off64_t pageOffset, uint64_t *granulePos) {
+ *granulePos = 0;
+
+ off64_t prevPageOffset = 0;
+ off64_t prevGuess = pageOffset;
+ for (;;) {
+ if (prevGuess >= 5000) {
+ prevGuess -= 5000;
+ } else {
+ prevGuess = 0;
+ }
+
+ ALOGV("backing up %lld bytes", (long long)(pageOffset - prevGuess));
+
+ status_t err = findNextPage(prevGuess, &prevPageOffset);
+ if (err == ERROR_END_OF_STREAM) {
+ // We are at the last page and didn't back off enough;
+ // back off 5000 bytes more and try again.
+ continue;
+ } else if (err != OK) {
+ return err;
+ }
+
+ if (prevPageOffset < pageOffset || prevGuess == 0) {
+ break;
+ }
+ }
+
+ if (prevPageOffset == pageOffset) {
+ // We did not find a page preceding this one.
+ return UNKNOWN_ERROR;
+ }
+
+ ALOGV("prevPageOffset at %lld, pageOffset at %lld",
+ (long long)prevPageOffset, (long long)pageOffset);
+
+ for (;;) {
+ Page prevPage;
+ ssize_t n = readPage(prevPageOffset, &prevPage);
+
+ if (n <= 0) {
+ return (status_t)n;
+ }
+
+ prevPageOffset += n;
+
+ if (prevPageOffset == pageOffset) {
+ *granulePos = prevPage.mGranulePosition;
+ return OK;
+ }
+ }
+}
+
+status_t MyOggExtractor::seekToTime(int64_t timeUs) {
+ timeUs -= mSeekPreRollUs;
+ if (timeUs < 0) {
+ timeUs = 0;
+ }
+
+ if (mTableOfContents.isEmpty()) {
+ // Perform approximate seeking based on avg. bitrate.
+ uint64_t bps = approxBitrate();
+ if (bps <= 0) {
+ return INVALID_OPERATION;
+ }
+
+ off64_t pos = timeUs * bps / 8000000ll;
+
+ ALOGV("seeking to offset %lld", (long long)pos);
+ return seekToOffset(pos);
+ }
+
+ size_t left = 0;
+ size_t right_plus_one = mTableOfContents.size();
+ while (left < right_plus_one) {
+ size_t center = left + (right_plus_one - left) / 2;
+
+ const TOCEntry &entry = mTableOfContents.itemAt(center);
+
+ if (timeUs < entry.mTimeUs) {
+ right_plus_one = center;
+ } else if (timeUs > entry.mTimeUs) {
+ left = center + 1;
+ } else {
+ left = center;
+ break;
+ }
+ }
+
+ if (left == mTableOfContents.size()) {
+ --left;
+ }
+
+ const TOCEntry &entry = mTableOfContents.itemAt(left);
+
+ ALOGV("seeking to entry %zu / %zu at offset %lld",
+ left, mTableOfContents.size(), (long long)entry.mPageOffset);
+
+ return seekToOffset(entry.mPageOffset);
+}
+
+status_t MyOggExtractor::seekToOffset(off64_t offset) {
+ if (mFirstDataOffset >= 0 && offset < mFirstDataOffset) {
+ // Once we know where the actual audio data starts (past the headers)
+ // don't ever seek to anywhere before that.
+ offset = mFirstDataOffset;
+ }
+
+ off64_t pageOffset;
+ status_t err = findNextPage(offset, &pageOffset);
+
+ if (err != OK) {
+ return err;
+ }
+
+ // We found the page we wanted to seek to, but we'll also need
+ // the page preceding it to determine how many valid samples are on
+ // this page.
+ findPrevGranulePosition(pageOffset, &mPrevGranulePosition);
+
+ mOffset = pageOffset;
+
+ mCurrentPageSize = 0;
+ mFirstPacketInPage = true;
+ mCurrentPageSamples = 0;
+ mCurrentPage.mNumSegments = 0;
+ mCurrentPage.mPrevPacketSize = -1;
+ mNextLaceIndex = 0;
+
+ // XXX what if new page continues packet from last???
+
+ return OK;
+}
+
+ssize_t MyOggExtractor::readPage(off64_t offset, Page *page) {
+ uint8_t header[27];
+ ssize_t n;
+ if ((n = mSource->readAt(offset, header, sizeof(header)))
+ < (ssize_t)sizeof(header)) {
+ ALOGV("failed to read %zu bytes at offset %#016llx, got %zd bytes",
+ sizeof(header), (long long)offset, n);
+
+ if (n < 0) {
+ return n;
+ } else if (n == 0) {
+ return ERROR_END_OF_STREAM;
+ } else {
+ return ERROR_IO;
+ }
+ }
+
+ if (memcmp(header, "OggS", 4)) {
+ return ERROR_MALFORMED;
+ }
+
+ if (header[4] != 0) {
+ // Wrong version.
+
+ return ERROR_UNSUPPORTED;
+ }
+
+ page->mFlags = header[5];
+
+ if (page->mFlags & ~7) {
+ // Only bits 0-2 are defined in version 0.
+ return ERROR_MALFORMED;
+ }
+
+ page->mGranulePosition = U64LE_AT(&header[6]);
+
+#if 0
+ printf("granulePosition = %llu (0x%llx)\n",
+ page->mGranulePosition, page->mGranulePosition);
+#endif
+
+ page->mSerialNo = U32LE_AT(&header[14]);
+ page->mPageNo = U32LE_AT(&header[18]);
+
+ page->mNumSegments = header[26];
+ if (mSource->readAt(
+ offset + sizeof(header), page->mLace, page->mNumSegments)
+ < (ssize_t)page->mNumSegments) {
+ return ERROR_IO;
+ }
+
+ size_t totalSize = 0;;
+ for (size_t i = 0; i < page->mNumSegments; ++i) {
+ totalSize += page->mLace[i];
+ }
+
+#if 0
+ String8 tmp;
+ for (size_t i = 0; i < page->mNumSegments; ++i) {
+ char x[32];
+ sprintf(x, "%s%u", i > 0 ? ", " : "", (unsigned)page->mLace[i]);
+
+ tmp.append(x);
+ }
+
+ ALOGV("%c %s", page->mFlags & 1 ? '+' : ' ', tmp.string());
+#endif
+
+ return sizeof(header) + page->mNumSegments + totalSize;
+}
+
+status_t MyOpusExtractor::readNextPacket(MediaBufferBase **out) {
+ if (mOffset <= mFirstDataOffset && mStartGranulePosition < 0) {
+ // The first sample might not start at time 0; find out where by subtracting
+ // the number of samples on the first page from the granule position
+ // (position of last complete sample) of the first page. This happens
+ // the first time before we attempt to read a packet from the first page.
+ MediaBufferBase *mBuf;
+ uint32_t numSamples = 0;
+ uint64_t curGranulePosition = 0;
+ while (true) {
+ status_t err = _readNextPacket(&mBuf, /* calcVorbisTimestamp = */false);
+ if (err != OK && err != ERROR_END_OF_STREAM) {
+ return err;
+ }
+ // First two pages are header pages.
+ if (err == ERROR_END_OF_STREAM || mCurrentPage.mPageNo > 2) {
+ if (mBuf != NULL) {
+ mBuf->release();
+ mBuf = NULL;
+ }
+ break;
+ }
+ curGranulePosition = mCurrentPage.mGranulePosition;
+ numSamples += getNumSamplesInPacket(mBuf);
+ mBuf->release();
+ mBuf = NULL;
+ }
+
+ if (curGranulePosition > numSamples) {
+ mStartGranulePosition = curGranulePosition - numSamples;
+ } else {
+ mStartGranulePosition = 0;
+ }
+ seekToOffset(0);
+ }
+
+ status_t err = _readNextPacket(out, /* calcVorbisTimestamp = */false);
+ if (err != OK) {
+ return err;
+ }
+
+ int32_t currentPageSamples;
+ // Calculate timestamps by accumulating durations starting from the first sample of a page;
+ // We assume that we only seek to page boundaries.
+ if ((*out)->meta_data().findInt32(kKeyValidSamples, ¤tPageSamples)) {
+ // first packet in page
+ if (mOffset == mFirstDataOffset) {
+ currentPageSamples -= mStartGranulePosition;
+ (*out)->meta_data().setInt32(kKeyValidSamples, currentPageSamples);
+ }
+ mCurGranulePosition = mCurrentPage.mGranulePosition - currentPageSamples;
+ }
+
+ int64_t timeUs = getTimeUsOfGranule(mCurGranulePosition);
+ (*out)->meta_data().setInt64(kKeyTime, timeUs);
+
+ uint32_t frames = getNumSamplesInPacket(*out);
+ mCurGranulePosition += frames;
+ return OK;
+}
+
+uint32_t MyOpusExtractor::getNumSamplesInPacket(MediaBufferBase *buffer) const {
+ if (buffer == NULL || buffer->range_length() < 1) {
+ return 0;
+ }
+
+ uint8_t *data = (uint8_t *)buffer->data() + buffer->range_offset();
+ uint8_t toc = data[0];
+ uint8_t config = (toc >> 3) & 0x1f;
+ uint32_t frameSizesUs[] = {
+ 10000, 20000, 40000, 60000, // 0...3
+ 10000, 20000, 40000, 60000, // 4...7
+ 10000, 20000, 40000, 60000, // 8...11
+ 10000, 20000, // 12...13
+ 10000, 20000, // 14...15
+ 2500, 5000, 10000, 20000, // 16...19
+ 2500, 5000, 10000, 20000, // 20...23
+ 2500, 5000, 10000, 20000, // 24...27
+ 2500, 5000, 10000, 20000 // 28...31
+ };
+ uint32_t frameSizeUs = frameSizesUs[config];
+
+ uint32_t numFrames;
+ uint8_t c = toc & 3;
+ switch (c) {
+ case 0:
+ numFrames = 1;
+ break;
+ case 1:
+ case 2:
+ numFrames = 2;
+ break;
+ case 3:
+ if (buffer->range_length() < 3) {
+ numFrames = 0;
+ } else {
+ numFrames = data[2] & 0x3f;
+ }
+ break;
+ default:
+ TRESPASS();
+ }
+
+ uint32_t numSamples = frameSizeUs * numFrames * kOpusSampleRate / 1000000;
+ return numSamples;
+}
+
+status_t MyOggExtractor::_readNextPacket(MediaBufferBase **out, bool calcVorbisTimestamp) {
+ *out = NULL;
+
+ MediaBufferBase *buffer = NULL;
+ int64_t timeUs = -1;
+
+ for (;;) {
+ size_t i;
+ size_t packetSize = 0;
+ bool gotFullPacket = false;
+ for (i = mNextLaceIndex; i < mCurrentPage.mNumSegments; ++i) {
+ uint8_t lace = mCurrentPage.mLace[i];
+
+ packetSize += lace;
+
+ if (lace < 255) {
+ gotFullPacket = true;
+ ++i;
+ break;
+ }
+ }
+
+ if (mNextLaceIndex < mCurrentPage.mNumSegments) {
+ off64_t dataOffset = mOffset + 27 + mCurrentPage.mNumSegments;
+ for (size_t j = 0; j < mNextLaceIndex; ++j) {
+ dataOffset += mCurrentPage.mLace[j];
+ }
+
+ size_t fullSize = packetSize;
+ if (buffer != NULL) {
+ fullSize += buffer->range_length();
+ }
+ if (fullSize > 16 * 1024 * 1024) { // arbitrary limit of 16 MB packet size
+ if (buffer != NULL) {
+ buffer->release();
+ }
+ ALOGE("b/36592202");
+ return ERROR_MALFORMED;
+ }
+ MediaBufferBase *tmp = MediaBufferBase::Create(fullSize);
+ if (tmp == NULL) {
+ if (buffer != NULL) {
+ buffer->release();
+ }
+ ALOGE("b/36592202");
+ return ERROR_MALFORMED;
+ }
+ if (buffer != NULL) {
+ memcpy(tmp->data(), buffer->data(), buffer->range_length());
+ tmp->set_range(0, buffer->range_length());
+ buffer->release();
+ } else {
+ tmp->set_range(0, 0);
+ }
+ buffer = tmp;
+
+ ssize_t n = mSource->readAt(
+ dataOffset,
+ (uint8_t *)buffer->data() + buffer->range_length(),
+ packetSize);
+
+ if (n < (ssize_t)packetSize) {
+ buffer->release();
+ ALOGV("failed to read %zu bytes at %#016llx, got %zd bytes",
+ packetSize, (long long)dataOffset, n);
+ return ERROR_IO;
+ }
+
+ buffer->set_range(0, fullSize);
+
+ mNextLaceIndex = i;
+
+ if (gotFullPacket) {
+ // We've just read the entire packet.
+
+ if (mFirstPacketInPage) {
+ buffer->meta_data().setInt32(
+ kKeyValidSamples, mCurrentPageSamples);
+ mFirstPacketInPage = false;
+ }
+
+ if (calcVorbisTimestamp) {
+ int32_t curBlockSize = getPacketBlockSize(buffer);
+ if (mCurrentPage.mPrevPacketSize < 0) {
+ mCurrentPage.mPrevPacketSize = curBlockSize;
+ mCurrentPage.mPrevPacketPos =
+ mCurrentPage.mGranulePosition - mCurrentPageSamples;
+ timeUs = mCurrentPage.mPrevPacketPos * 1000000ll / mVi.rate;
+ } else {
+ // The effective block size is the average of the two overlapped blocks
+ int32_t actualBlockSize =
+ (curBlockSize + mCurrentPage.mPrevPacketSize) / 2;
+ timeUs = mCurrentPage.mPrevPacketPos * 1000000ll / mVi.rate;
+ // The actual size output by the decoder will be half the effective
+ // size, due to the overlap
+ mCurrentPage.mPrevPacketPos += actualBlockSize / 2;
+ mCurrentPage.mPrevPacketSize = curBlockSize;
+ }
+ buffer->meta_data().setInt64(kKeyTime, timeUs);
+ }
+ *out = buffer;
+
+ return OK;
+ }
+
+ // fall through, the buffer now contains the start of the packet.
+ }
+
+ CHECK_EQ(mNextLaceIndex, mCurrentPage.mNumSegments);
+
+ mOffset += mCurrentPageSize;
+ ssize_t n = readPage(mOffset, &mCurrentPage);
+
+ if (n <= 0) {
+ if (buffer) {
+ buffer->release();
+ buffer = NULL;
+ }
+
+ ALOGV("readPage returned %zd", n);
+
+ return n < 0 ? n : (status_t)ERROR_END_OF_STREAM;
+ }
+
+ // Prevent a harmless unsigned integer overflow by clamping to 0
+ if (mCurrentPage.mGranulePosition >= mPrevGranulePosition) {
+ mCurrentPageSamples =
+ mCurrentPage.mGranulePosition - mPrevGranulePosition;
+ } else {
+ mCurrentPageSamples = 0;
+ }
+ mFirstPacketInPage = true;
+
+ mPrevGranulePosition = mCurrentPage.mGranulePosition;
+
+ mCurrentPageSize = n;
+ mNextLaceIndex = 0;
+
+ if (buffer != NULL) {
+ if ((mCurrentPage.mFlags & 1) == 0) {
+ // This page does not continue the packet, i.e. the packet
+ // is already complete.
+
+ if (timeUs >= 0) {
+ buffer->meta_data().setInt64(kKeyTime, timeUs);
+ }
+
+ buffer->meta_data().setInt32(
+ kKeyValidSamples, mCurrentPageSamples);
+ mFirstPacketInPage = false;
+
+ *out = buffer;
+
+ return OK;
+ }
+ }
+ }
+}
+
+status_t MyOggExtractor::init() {
+ mMeta.setCString(kKeyMIMEType, mMimeType);
+
+ status_t err;
+ MediaBufferBase *packet;
+ for (size_t i = 0; i < mNumHeaders; ++i) {
+ // ignore timestamp for configuration packets
+ if ((err = _readNextPacket(&packet, /* calcVorbisTimestamp = */ false)) != OK) {
+ return err;
+ }
+ ALOGV("read packet of size %zu\n", packet->range_length());
+ err = verifyHeader(packet, /* type = */ i * 2 + 1);
+ packet->release();
+ packet = NULL;
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ mFirstDataOffset = mOffset + mCurrentPageSize;
+
+ off64_t size;
+ uint64_t lastGranulePosition;
+ if (!(mSource->flags() & DataSourceBase::kIsCachingDataSource)
+ && mSource->getSize(&size) == OK
+ && findPrevGranulePosition(size, &lastGranulePosition) == OK) {
+ // Let's assume it's cheap to seek to the end.
+ // The granule position of the final page in the stream will
+ // give us the exact duration of the content, something that
+ // we can only approximate using avg. bitrate if seeking to
+ // the end is too expensive or impossible (live streaming).
+
+ int64_t durationUs = getTimeUsOfGranule(lastGranulePosition);
+
+ mMeta.setInt64(kKeyDuration, durationUs);
+
+ buildTableOfContents();
+ }
+
+ return OK;
+}
+
+void MyOggExtractor::buildTableOfContents() {
+ off64_t offset = mFirstDataOffset;
+ Page page;
+ ssize_t pageSize;
+ while ((pageSize = readPage(offset, &page)) > 0) {
+ mTableOfContents.push();
+
+ TOCEntry &entry =
+ mTableOfContents.editItemAt(mTableOfContents.size() - 1);
+
+ entry.mPageOffset = offset;
+ entry.mTimeUs = getTimeUsOfGranule(page.mGranulePosition);
+
+ offset += (size_t)pageSize;
+ }
+
+ // Limit the maximum amount of RAM we spend on the table of contents,
+ // if necessary thin out the table evenly to trim it down to maximum
+ // size.
+
+ static const size_t kMaxTOCSize = 8192;
+ static const size_t kMaxNumTOCEntries = kMaxTOCSize / sizeof(TOCEntry);
+
+ size_t numerator = mTableOfContents.size();
+
+ if (numerator > kMaxNumTOCEntries) {
+ size_t denom = numerator - kMaxNumTOCEntries;
+
+ size_t accum = 0;
+ for (ssize_t i = mTableOfContents.size() - 1; i >= 0; --i) {
+ accum += denom;
+ if (accum >= numerator) {
+ mTableOfContents.removeAt(i);
+ accum -= numerator;
+ }
+ }
+ }
+}
+
+int32_t MyOggExtractor::getPacketBlockSize(MediaBufferBase *buffer) {
+ const uint8_t *data =
+ (const uint8_t *)buffer->data() + buffer->range_offset();
+
+ size_t size = buffer->range_length();
+
+ ogg_buffer buf;
+ buf.data = (uint8_t *)data;
+ buf.size = size;
+ buf.refcount = 1;
+ buf.ptr.owner = NULL;
+
+ ogg_reference ref;
+ ref.buffer = &buf;
+ ref.begin = 0;
+ ref.length = size;
+ ref.next = NULL;
+
+ ogg_packet pack;
+ pack.packet = &ref;
+ pack.bytes = ref.length;
+ pack.b_o_s = 0;
+ pack.e_o_s = 0;
+ pack.granulepos = 0;
+ pack.packetno = 0;
+
+ return vorbis_packet_blocksize(&mVi, &pack);
+}
+
+int64_t MyOpusExtractor::getTimeUsOfGranule(uint64_t granulePos) const {
+ uint64_t pcmSamplePosition = 0;
+ if (granulePos > mCodecDelay) {
+ pcmSamplePosition = granulePos - mCodecDelay;
+ }
+ if (pcmSamplePosition > INT64_MAX / 1000000ll) {
+ return INT64_MAX;
+ }
+ return pcmSamplePosition * 1000000ll / kOpusSampleRate;
+}
+
+status_t MyOpusExtractor::verifyHeader(MediaBufferBase *buffer, uint8_t type) {
+ switch (type) {
+ // there are actually no header types defined in the Opus spec; we choose 1 and 3 to mean
+ // header and comments such that we can share code with MyVorbisExtractor.
+ case 1:
+ return verifyOpusHeader(buffer);
+ case 3:
+ return verifyOpusComments(buffer);
+ default:
+ return INVALID_OPERATION;
+ }
+}
+
+status_t MyOpusExtractor::verifyOpusHeader(MediaBufferBase *buffer) {
+ const size_t kOpusHeaderSize = 19;
+ const uint8_t *data =
+ (const uint8_t *)buffer->data() + buffer->range_offset();
+
+ size_t size = buffer->range_length();
+
+ if (size < kOpusHeaderSize
+ || memcmp(data, "OpusHead", 8)
+ || /* version = */ data[8] != 1) {
+ return ERROR_MALFORMED;
+ }
+
+ mChannelCount = data[9];
+ mCodecDelay = U16LE_AT(&data[10]);
+
+ mMeta.setData(kKeyOpusHeader, 0, data, size);
+ mMeta.setInt32(kKeySampleRate, kOpusSampleRate);
+ mMeta.setInt32(kKeyChannelCount, mChannelCount);
+ mMeta.setInt64(kKeyOpusSeekPreRoll /* ns */, kOpusSeekPreRollUs * 1000 /* = 80 ms*/);
+ mMeta.setInt64(kKeyOpusCodecDelay /* ns */,
+ mCodecDelay /* sample/s */ * 1000000000ll / kOpusSampleRate);
+
+ return OK;
+}
+
+status_t MyOpusExtractor::verifyOpusComments(MediaBufferBase *buffer) {
+ // add artificial framing bit so we can reuse _vorbis_unpack_comment
+ int32_t commentSize = buffer->range_length() + 1;
+ auto tmp = heapbuffer<uint8_t>(commentSize);
+ uint8_t *commentData = tmp.get();
+ if (commentData == nullptr) {
+ return ERROR_MALFORMED;
+ }
+
+ memcpy(commentData,
+ (uint8_t *)buffer->data() + buffer->range_offset(),
+ buffer->range_length());
+
+ ogg_buffer buf;
+ buf.data = commentData;
+ buf.size = commentSize;
+ buf.refcount = 1;
+ buf.ptr.owner = NULL;
+
+ ogg_reference ref;
+ ref.buffer = &buf;
+ ref.begin = 0;
+ ref.length = commentSize;
+ ref.next = NULL;
+
+ oggpack_buffer bits;
+ oggpack_readinit(&bits, &ref);
+
+ // skip 'OpusTags'
+ const char *OpusTags = "OpusTags";
+ const int32_t headerLen = strlen(OpusTags);
+ int32_t framingBitOffset = headerLen;
+ for (int i = 0; i < headerLen; ++i) {
+ char chr = oggpack_read(&bits, 8);
+ if (chr != OpusTags[i]) {
+ return ERROR_MALFORMED;
+ }
+ }
+
+ int32_t vendorLen = oggpack_read(&bits, 32);
+ framingBitOffset += 4;
+ if (vendorLen < 0 || vendorLen > commentSize - 8) {
+ return ERROR_MALFORMED;
+ }
+ // skip vendor string
+ framingBitOffset += vendorLen;
+ for (int i = 0; i < vendorLen; ++i) {
+ oggpack_read(&bits, 8);
+ }
+
+ int32_t n = oggpack_read(&bits, 32);
+ framingBitOffset += 4;
+ if (n < 0 || n > ((commentSize - oggpack_bytes(&bits)) >> 2)) {
+ return ERROR_MALFORMED;
+ }
+ for (int i = 0; i < n; ++i) {
+ int32_t len = oggpack_read(&bits, 32);
+ framingBitOffset += 4;
+ if (len < 0 || len > (commentSize - oggpack_bytes(&bits))) {
+ return ERROR_MALFORMED;
+ }
+ framingBitOffset += len;
+ for (int j = 0; j < len; ++j) {
+ oggpack_read(&bits, 8);
+ }
+ }
+ if (framingBitOffset < 0 || framingBitOffset >= commentSize) {
+ return ERROR_MALFORMED;
+ }
+ commentData[framingBitOffset] = 1;
+
+ buf.data = commentData + headerLen;
+ buf.size = commentSize - headerLen;
+ buf.refcount = 1;
+ buf.ptr.owner = NULL;
+
+ ref.buffer = &buf;
+ ref.begin = 0;
+ ref.length = commentSize - headerLen;
+ ref.next = NULL;
+
+ oggpack_readinit(&bits, &ref);
+ int err = _vorbis_unpack_comment(&mVc, &bits);
+ if (0 != err) {
+ return ERROR_MALFORMED;
+ }
+
+ parseFileMetaData();
+ return OK;
+}
+
+status_t MyVorbisExtractor::verifyHeader(
+ MediaBufferBase *buffer, uint8_t type) {
+ const uint8_t *data =
+ (const uint8_t *)buffer->data() + buffer->range_offset();
+
+ size_t size = buffer->range_length();
+
+ if (size < 7 || data[0] != type || memcmp(&data[1], "vorbis", 6)) {
+ return ERROR_MALFORMED;
+ }
+
+ ogg_buffer buf;
+ buf.data = (uint8_t *)data;
+ buf.size = size;
+ buf.refcount = 1;
+ buf.ptr.owner = NULL;
+
+ ogg_reference ref;
+ ref.buffer = &buf;
+ ref.begin = 0;
+ ref.length = size;
+ ref.next = NULL;
+
+ oggpack_buffer bits;
+ oggpack_readinit(&bits, &ref);
+
+ if (oggpack_read(&bits, 8) != type) {
+ return ERROR_MALFORMED;
+ }
+ for (size_t i = 0; i < 6; ++i) {
+ oggpack_read(&bits, 8); // skip 'vorbis'
+ }
+
+ switch (type) {
+ case 1:
+ {
+ if (0 != _vorbis_unpack_info(&mVi, &bits)) {
+ return ERROR_MALFORMED;
+ }
+
+ mMeta.setData(kKeyVorbisInfo, 0, data, size);
+ mMeta.setInt32(kKeySampleRate, mVi.rate);
+ mMeta.setInt32(kKeyChannelCount, mVi.channels);
+ mMeta.setInt32(kKeyBitRate, mVi.bitrate_nominal);
+
+ ALOGV("lower-bitrate = %ld", mVi.bitrate_lower);
+ ALOGV("upper-bitrate = %ld", mVi.bitrate_upper);
+ ALOGV("nominal-bitrate = %ld", mVi.bitrate_nominal);
+ ALOGV("window-bitrate = %ld", mVi.bitrate_window);
+ ALOGV("blocksizes: %d/%d",
+ vorbis_info_blocksize(&mVi, 0),
+ vorbis_info_blocksize(&mVi, 1)
+ );
+
+ off64_t size;
+ if (mSource->getSize(&size) == OK) {
+ uint64_t bps = approxBitrate();
+ if (bps != 0) {
+ mMeta.setInt64(kKeyDuration, size * 8000000ll / bps);
+ }
+ }
+ break;
+ }
+
+ case 3:
+ {
+ if (0 != _vorbis_unpack_comment(&mVc, &bits)) {
+ return ERROR_MALFORMED;
+ }
+
+ parseFileMetaData();
+ break;
+ }
+
+ case 5:
+ {
+ if (0 != _vorbis_unpack_books(&mVi, &bits)) {
+ return ERROR_MALFORMED;
+ }
+
+ mMeta.setData(kKeyVorbisBooks, 0, data, size);
+ break;
+ }
+ }
+
+ return OK;
+}
+
+uint64_t MyVorbisExtractor::approxBitrate() const {
+ if (mVi.bitrate_nominal != 0) {
+ return mVi.bitrate_nominal;
+ }
+
+ return (mVi.bitrate_lower + mVi.bitrate_upper) / 2;
+}
+
+
+void MyOggExtractor::parseFileMetaData() {
+ mFileMeta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_OGG);
+
+ for (int i = 0; i < mVc.comments; ++i) {
+ const char *comment = mVc.user_comments[i];
+ size_t commentLength = mVc.comment_lengths[i];
+ parseVorbisComment(&mFileMeta, comment, commentLength);
+ //ALOGI("comment #%d: '%s'", i + 1, mVc.user_comments[i]);
+ }
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+
+OggExtractor::OggExtractor(DataSourceBase *source)
+ : mDataSource(source),
+ mInitCheck(NO_INIT),
+ mImpl(NULL) {
+ for (int i = 0; i < 2; ++i) {
+ if (mImpl != NULL) {
+ delete mImpl;
+ }
+ if (i == 0) {
+ mImpl = new MyVorbisExtractor(mDataSource);
+ } else {
+ mImpl = new MyOpusExtractor(mDataSource);
+ }
+ mInitCheck = mImpl->seekToOffset(0);
+
+ if (mInitCheck == OK) {
+ mInitCheck = mImpl->init();
+ if (mInitCheck == OK) {
+ break;
+ }
+ }
+ }
+}
+
+OggExtractor::~OggExtractor() {
+ delete mImpl;
+ mImpl = NULL;
+}
+
+size_t OggExtractor::countTracks() {
+ return mInitCheck != OK ? 0 : 1;
+}
+
+MediaTrack *OggExtractor::getTrack(size_t index) {
+ if (index >= 1) {
+ return NULL;
+ }
+
+ return new OggSource(this);
+}
+
+status_t OggExtractor::getTrackMetaData(
+ MetaDataBase &meta,
+ size_t index, uint32_t /* flags */) {
+ if (index >= 1) {
+ return UNKNOWN_ERROR;
+ }
+
+ return mImpl->getFormat(meta);
+}
+
+status_t OggExtractor::getMetaData(MetaDataBase &meta) {
+ return mImpl->getFileMetaData(meta);
+}
+
+static MediaExtractor* CreateExtractor(
+ DataSourceBase *source,
+ void *) {
+ return new OggExtractor(source);
+}
+
+static MediaExtractor::CreatorFunc Sniff(
+ DataSourceBase *source,
+ float *confidence,
+ void **,
+ MediaExtractor::FreeMetaFunc *) {
+ char tmp[4];
+ if (source->readAt(0, tmp, 4) < 4 || memcmp(tmp, "OggS", 4)) {
+ return NULL;
+ }
+
+ *confidence = 0.2f;
+
+ return CreateExtractor;
+}
+
+extern "C" {
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
+MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ return {
+ MediaExtractor::EXTRACTORDEF_VERSION,
+ UUID("8cc5cd06-f772-495e-8a62-cba9649374e9"),
+ 1, // version
+ "Ogg Extractor",
+ Sniff
+ };
+}
+
+} // extern "C"
+
+} // namespace android
diff --git a/media/extractors/ogg/OggExtractor.h b/media/extractors/ogg/OggExtractor.h
new file mode 100644
index 0000000..9fe2944
--- /dev/null
+++ b/media/extractors/ogg/OggExtractor.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OGG_EXTRACTOR_H_
+
+#define OGG_EXTRACTOR_H_
+
+#include <utils/Errors.h>
+#include <media/MediaExtractor.h>
+
+namespace android {
+
+struct AMessage;
+class DataSourceBase;
+class String8;
+
+struct MyOggExtractor;
+struct OggSource;
+
+struct OggExtractor : public MediaExtractor {
+ explicit OggExtractor(DataSourceBase *source);
+
+ virtual size_t countTracks();
+ virtual MediaTrack *getTrack(size_t index);
+ virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags);
+
+ virtual status_t getMetaData(MetaDataBase& meta);
+ virtual const char * name() { return "OggExtractor"; }
+
+protected:
+ virtual ~OggExtractor();
+
+private:
+ friend struct OggSource;
+
+ DataSourceBase *mDataSource;
+ status_t mInitCheck;
+
+ MyOggExtractor *mImpl;
+
+ OggExtractor(const OggExtractor &);
+ OggExtractor &operator=(const OggExtractor &);
+};
+
+} // namespace android
+
+#endif // OGG_EXTRACTOR_H_
diff --git a/media/extractors/ogg/exports.lds b/media/extractors/ogg/exports.lds
new file mode 100644
index 0000000..b1309ee
--- /dev/null
+++ b/media/extractors/ogg/exports.lds
@@ -0,0 +1 @@
+{ global: GETEXTRACTORDEF; local: *; };
diff --git a/media/extractors/wav/Android.bp b/media/extractors/wav/Android.bp
new file mode 100644
index 0000000..067933e
--- /dev/null
+++ b/media/extractors/wav/Android.bp
@@ -0,0 +1,42 @@
+cc_library_shared {
+
+ srcs: ["WAVExtractor.cpp"],
+
+ include_dirs: [
+ "frameworks/av/media/libstagefright/include",
+ ],
+
+ shared_libs: [
+ "liblog",
+ "libmediaextractor",
+ ],
+
+ static_libs: [
+ "libfifo",
+ "libstagefright_foundation",
+ ],
+
+ name: "libwavextractor",
+ relative_install_path: "extractors",
+
+ compile_multilib: "first",
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-fvisibility=hidden",
+ ],
+ version_script: "exports.lds",
+
+ sanitize: {
+ cfi: true,
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ diag: {
+ cfi: true,
+ },
+ },
+
+}
diff --git a/media/libstagefright/matroska/MODULE_LICENSE_APACHE2 b/media/extractors/wav/MODULE_LICENSE_APACHE2
similarity index 100%
copy from media/libstagefright/matroska/MODULE_LICENSE_APACHE2
copy to media/extractors/wav/MODULE_LICENSE_APACHE2
diff --git a/media/libstagefright/matroska/NOTICE b/media/extractors/wav/NOTICE
similarity index 100%
copy from media/libstagefright/matroska/NOTICE
copy to media/extractors/wav/NOTICE
diff --git a/media/extractors/wav/WAVExtractor.cpp b/media/extractors/wav/WAVExtractor.cpp
new file mode 100644
index 0000000..f5a1b01
--- /dev/null
+++ b/media/extractors/wav/WAVExtractor.cpp
@@ -0,0 +1,594 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "WAVExtractor"
+#include <utils/Log.h>
+
+#include "WAVExtractor.h"
+
+#include <audio_utils/primitives.h>
+#include <media/DataSourceBase.h>
+#include <media/MediaTrack.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <utils/String8.h>
+#include <cutils/bitops.h>
+
+#define CHANNEL_MASK_USE_CHANNEL_ORDER 0
+
+namespace android {
+
+enum {
+ WAVE_FORMAT_PCM = 0x0001,
+ WAVE_FORMAT_IEEE_FLOAT = 0x0003,
+ WAVE_FORMAT_ALAW = 0x0006,
+ WAVE_FORMAT_MULAW = 0x0007,
+ WAVE_FORMAT_MSGSM = 0x0031,
+ WAVE_FORMAT_EXTENSIBLE = 0xFFFE
+};
+
+static const char* WAVEEXT_SUBFORMAT = "\x00\x00\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71";
+static const char* AMBISONIC_SUBFORMAT = "\x00\x00\x21\x07\xD3\x11\x86\x44\xC8\xC1\xCA\x00\x00\x00";
+
+static uint32_t U32_LE_AT(const uint8_t *ptr) {
+ return ptr[3] << 24 | ptr[2] << 16 | ptr[1] << 8 | ptr[0];
+}
+
+static uint16_t U16_LE_AT(const uint8_t *ptr) {
+ return ptr[1] << 8 | ptr[0];
+}
+
+struct WAVSource : public MediaTrack {
+ WAVSource(
+ DataSourceBase *dataSource,
+ MetaDataBase &meta,
+ uint16_t waveFormat,
+ int32_t bitsPerSample,
+ off64_t offset, size_t size);
+
+ virtual status_t start(MetaDataBase *params = NULL);
+ virtual status_t stop();
+ virtual status_t getFormat(MetaDataBase &meta);
+
+ virtual status_t read(
+ MediaBufferBase **buffer, const ReadOptions *options = NULL);
+
+ virtual bool supportNonblockingRead() { return true; }
+
+protected:
+ virtual ~WAVSource();
+
+private:
+ static const size_t kMaxFrameSize;
+
+ DataSourceBase *mDataSource;
+ MetaDataBase &mMeta;
+ uint16_t mWaveFormat;
+ int32_t mSampleRate;
+ int32_t mNumChannels;
+ int32_t mBitsPerSample;
+ off64_t mOffset;
+ size_t mSize;
+ bool mStarted;
+ MediaBufferGroup *mGroup;
+ off64_t mCurrentPos;
+
+ WAVSource(const WAVSource &);
+ WAVSource &operator=(const WAVSource &);
+};
+
+WAVExtractor::WAVExtractor(DataSourceBase *source)
+ : mDataSource(source),
+ mValidFormat(false),
+ mChannelMask(CHANNEL_MASK_USE_CHANNEL_ORDER) {
+ mInitCheck = init();
+}
+
+WAVExtractor::~WAVExtractor() {
+}
+
+status_t WAVExtractor::getMetaData(MetaDataBase &meta) {
+ meta.clear();
+ if (mInitCheck == OK) {
+ meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_WAV);
+ }
+
+ return OK;
+}
+
+size_t WAVExtractor::countTracks() {
+ return mInitCheck == OK ? 1 : 0;
+}
+
+MediaTrack *WAVExtractor::getTrack(size_t index) {
+ if (mInitCheck != OK || index > 0) {
+ return NULL;
+ }
+
+ return new WAVSource(
+ mDataSource, mTrackMeta,
+ mWaveFormat, mBitsPerSample, mDataOffset, mDataSize);
+}
+
+status_t WAVExtractor::getTrackMetaData(
+ MetaDataBase &meta,
+ size_t index, uint32_t /* flags */) {
+ if (mInitCheck != OK || index > 0) {
+ return UNKNOWN_ERROR;
+ }
+
+ meta = mTrackMeta;
+ return OK;
+}
+
+status_t WAVExtractor::init() {
+ uint8_t header[12];
+ if (mDataSource->readAt(
+ 0, header, sizeof(header)) < (ssize_t)sizeof(header)) {
+ return NO_INIT;
+ }
+
+ if (memcmp(header, "RIFF", 4) || memcmp(&header[8], "WAVE", 4)) {
+ return NO_INIT;
+ }
+
+ size_t totalSize = U32_LE_AT(&header[4]);
+
+ off64_t offset = 12;
+ size_t remainingSize = totalSize;
+ while (remainingSize >= 8) {
+ uint8_t chunkHeader[8];
+ if (mDataSource->readAt(offset, chunkHeader, 8) < 8) {
+ return NO_INIT;
+ }
+
+ remainingSize -= 8;
+ offset += 8;
+
+ uint32_t chunkSize = U32_LE_AT(&chunkHeader[4]);
+
+ if (chunkSize > remainingSize) {
+ return NO_INIT;
+ }
+
+ if (!memcmp(chunkHeader, "fmt ", 4)) {
+ if (chunkSize < 16) {
+ return NO_INIT;
+ }
+
+ uint8_t formatSpec[40];
+ if (mDataSource->readAt(offset, formatSpec, 2) < 2) {
+ return NO_INIT;
+ }
+
+ mWaveFormat = U16_LE_AT(formatSpec);
+ if (mWaveFormat != WAVE_FORMAT_PCM
+ && mWaveFormat != WAVE_FORMAT_IEEE_FLOAT
+ && mWaveFormat != WAVE_FORMAT_ALAW
+ && mWaveFormat != WAVE_FORMAT_MULAW
+ && mWaveFormat != WAVE_FORMAT_MSGSM
+ && mWaveFormat != WAVE_FORMAT_EXTENSIBLE) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ uint8_t fmtSize = 16;
+ if (mWaveFormat == WAVE_FORMAT_EXTENSIBLE) {
+ fmtSize = 40;
+ }
+ if (mDataSource->readAt(offset, formatSpec, fmtSize) < fmtSize) {
+ return NO_INIT;
+ }
+
+ mNumChannels = U16_LE_AT(&formatSpec[2]);
+
+ if (mNumChannels < 1 || mNumChannels > 8) {
+ ALOGE("Unsupported number of channels (%d)", mNumChannels);
+ return ERROR_UNSUPPORTED;
+ }
+
+ if (mWaveFormat != WAVE_FORMAT_EXTENSIBLE) {
+ if (mNumChannels != 1 && mNumChannels != 2) {
+ ALOGW("More than 2 channels (%d) in non-WAVE_EXT, unknown channel mask",
+ mNumChannels);
+ }
+ }
+
+ mSampleRate = U32_LE_AT(&formatSpec[4]);
+
+ if (mSampleRate == 0) {
+ return ERROR_MALFORMED;
+ }
+
+ mBitsPerSample = U16_LE_AT(&formatSpec[14]);
+
+ if (mWaveFormat == WAVE_FORMAT_EXTENSIBLE) {
+ uint16_t validBitsPerSample = U16_LE_AT(&formatSpec[18]);
+ if (validBitsPerSample != mBitsPerSample) {
+ if (validBitsPerSample != 0) {
+ ALOGE("validBits(%d) != bitsPerSample(%d) are not supported",
+ validBitsPerSample, mBitsPerSample);
+ return ERROR_UNSUPPORTED;
+ } else {
+ // we only support valitBitsPerSample == bitsPerSample but some WAV_EXT
+ // writers don't correctly set the valid bits value, and leave it at 0.
+ ALOGW("WAVE_EXT has 0 valid bits per sample, ignoring");
+ }
+ }
+
+ mChannelMask = U32_LE_AT(&formatSpec[20]);
+ ALOGV("numChannels=%d channelMask=0x%x", mNumChannels, mChannelMask);
+ if ((mChannelMask >> 18) != 0) {
+ ALOGE("invalid channel mask 0x%x", mChannelMask);
+ return ERROR_MALFORMED;
+ }
+
+ if ((mChannelMask != CHANNEL_MASK_USE_CHANNEL_ORDER)
+ && (popcount(mChannelMask) != mNumChannels)) {
+ ALOGE("invalid number of channels (%d) in channel mask (0x%x)",
+ popcount(mChannelMask), mChannelMask);
+ return ERROR_MALFORMED;
+ }
+
+ // In a WAVE_EXT header, the first two bytes of the GUID stored at byte 24 contain
+ // the sample format, using the same definitions as a regular WAV header
+ mWaveFormat = U16_LE_AT(&formatSpec[24]);
+ if (memcmp(&formatSpec[26], WAVEEXT_SUBFORMAT, 14) &&
+ memcmp(&formatSpec[26], AMBISONIC_SUBFORMAT, 14)) {
+ ALOGE("unsupported GUID");
+ return ERROR_UNSUPPORTED;
+ }
+ }
+
+ if (mWaveFormat == WAVE_FORMAT_PCM) {
+ if (mBitsPerSample != 8 && mBitsPerSample != 16
+ && mBitsPerSample != 24 && mBitsPerSample != 32) {
+ return ERROR_UNSUPPORTED;
+ }
+ } else if (mWaveFormat == WAVE_FORMAT_IEEE_FLOAT) {
+ if (mBitsPerSample != 32) { // TODO we don't support double
+ return ERROR_UNSUPPORTED;
+ }
+ }
+ else if (mWaveFormat == WAVE_FORMAT_MSGSM) {
+ if (mBitsPerSample != 0) {
+ return ERROR_UNSUPPORTED;
+ }
+ } else if (mWaveFormat == WAVE_FORMAT_MULAW || mWaveFormat == WAVE_FORMAT_ALAW) {
+ if (mBitsPerSample != 8) {
+ return ERROR_UNSUPPORTED;
+ }
+ } else {
+ return ERROR_UNSUPPORTED;
+ }
+
+ mValidFormat = true;
+ } else if (!memcmp(chunkHeader, "data", 4)) {
+ if (mValidFormat) {
+ mDataOffset = offset;
+ mDataSize = chunkSize;
+
+ mTrackMeta.clear();
+
+ switch (mWaveFormat) {
+ case WAVE_FORMAT_PCM:
+ case WAVE_FORMAT_IEEE_FLOAT:
+ mTrackMeta.setCString(
+ kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
+ break;
+ case WAVE_FORMAT_ALAW:
+ mTrackMeta.setCString(
+ kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_G711_ALAW);
+ break;
+ case WAVE_FORMAT_MSGSM:
+ mTrackMeta.setCString(
+ kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MSGSM);
+ break;
+ default:
+ CHECK_EQ(mWaveFormat, (uint16_t)WAVE_FORMAT_MULAW);
+ mTrackMeta.setCString(
+ kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_G711_MLAW);
+ break;
+ }
+
+ mTrackMeta.setInt32(kKeyChannelCount, mNumChannels);
+ mTrackMeta.setInt32(kKeyChannelMask, mChannelMask);
+ mTrackMeta.setInt32(kKeySampleRate, mSampleRate);
+ mTrackMeta.setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
+
+ int64_t durationUs = 0;
+ if (mWaveFormat == WAVE_FORMAT_MSGSM) {
+ // 65 bytes decode to 320 8kHz samples
+ durationUs =
+ 1000000LL * (mDataSize / 65 * 320) / 8000;
+ } else {
+ size_t bytesPerSample = mBitsPerSample >> 3;
+
+ if (!bytesPerSample || !mNumChannels)
+ return ERROR_MALFORMED;
+
+ size_t num_samples = mDataSize / (mNumChannels * bytesPerSample);
+
+ if (!mSampleRate)
+ return ERROR_MALFORMED;
+
+ durationUs =
+ 1000000LL * num_samples / mSampleRate;
+ }
+
+ mTrackMeta.setInt64(kKeyDuration, durationUs);
+
+ return OK;
+ }
+ }
+
+ offset += chunkSize;
+ }
+
+ return NO_INIT;
+}
+
+const size_t WAVSource::kMaxFrameSize = 32768;
+
+WAVSource::WAVSource(
+ DataSourceBase *dataSource,
+ MetaDataBase &meta,
+ uint16_t waveFormat,
+ int32_t bitsPerSample,
+ off64_t offset, size_t size)
+ : mDataSource(dataSource),
+ mMeta(meta),
+ mWaveFormat(waveFormat),
+ mSampleRate(0),
+ mNumChannels(0),
+ mBitsPerSample(bitsPerSample),
+ mOffset(offset),
+ mSize(size),
+ mStarted(false),
+ mGroup(NULL) {
+ CHECK(mMeta.findInt32(kKeySampleRate, &mSampleRate));
+ CHECK(mMeta.findInt32(kKeyChannelCount, &mNumChannels));
+
+ mMeta.setInt32(kKeyMaxInputSize, kMaxFrameSize);
+}
+
+WAVSource::~WAVSource() {
+ if (mStarted) {
+ stop();
+ }
+}
+
+status_t WAVSource::start(MetaDataBase * /* params */) {
+ ALOGV("WAVSource::start");
+
+ CHECK(!mStarted);
+
+ // some WAV files may have large audio buffers that use shared memory transfer.
+ mGroup = new MediaBufferGroup(4 /* buffers */, kMaxFrameSize);
+
+ if (mBitsPerSample == 8) {
+ // As a temporary buffer for 8->16 bit conversion.
+ mGroup->add_buffer(MediaBufferBase::Create(kMaxFrameSize));
+ }
+
+ mCurrentPos = mOffset;
+
+ mStarted = true;
+
+ return OK;
+}
+
+status_t WAVSource::stop() {
+ ALOGV("WAVSource::stop");
+
+ CHECK(mStarted);
+
+ delete mGroup;
+ mGroup = NULL;
+
+ mStarted = false;
+
+ return OK;
+}
+
+status_t WAVSource::getFormat(MetaDataBase &meta) {
+ ALOGV("WAVSource::getFormat");
+
+ meta = mMeta;
+ return OK;
+}
+
+status_t WAVSource::read(
+ MediaBufferBase **out, const ReadOptions *options) {
+ *out = NULL;
+
+ if (options != nullptr && options->getNonBlocking() && !mGroup->has_buffers()) {
+ return WOULD_BLOCK;
+ }
+
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode mode;
+ if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) {
+ int64_t pos = 0;
+
+ if (mWaveFormat == WAVE_FORMAT_MSGSM) {
+ // 65 bytes decode to 320 8kHz samples
+ int64_t samplenumber = (seekTimeUs * mSampleRate) / 1000000;
+ int64_t framenumber = samplenumber / 320;
+ pos = framenumber * 65;
+ } else {
+ pos = (seekTimeUs * mSampleRate) / 1000000 * mNumChannels * (mBitsPerSample >> 3);
+ }
+ if (pos > (off64_t)mSize) {
+ pos = mSize;
+ }
+ mCurrentPos = pos + mOffset;
+ }
+
+ MediaBufferBase *buffer;
+ status_t err = mGroup->acquire_buffer(&buffer);
+ if (err != OK) {
+ return err;
+ }
+
+ // make sure that maxBytesToRead is multiple of 3, in 24-bit case
+ size_t maxBytesToRead =
+ mBitsPerSample == 8 ? kMaxFrameSize / 2 :
+ (mBitsPerSample == 24 ? 3*(kMaxFrameSize/3): kMaxFrameSize);
+
+ size_t maxBytesAvailable =
+ (mCurrentPos - mOffset >= (off64_t)mSize)
+ ? 0 : mSize - (mCurrentPos - mOffset);
+
+ if (maxBytesToRead > maxBytesAvailable) {
+ maxBytesToRead = maxBytesAvailable;
+ }
+
+ if (mWaveFormat == WAVE_FORMAT_MSGSM) {
+ // Microsoft packs 2 frames into 65 bytes, rather than using separate 33-byte frames,
+ // so read multiples of 65, and use smaller buffers to account for ~10:1 expansion ratio
+ if (maxBytesToRead > 1024) {
+ maxBytesToRead = 1024;
+ }
+ maxBytesToRead = (maxBytesToRead / 65) * 65;
+ } else {
+ // read only integral amounts of audio unit frames.
+ const size_t inputUnitFrameSize = mNumChannels * mBitsPerSample / 8;
+ maxBytesToRead -= maxBytesToRead % inputUnitFrameSize;
+ }
+
+ ssize_t n = mDataSource->readAt(
+ mCurrentPos, buffer->data(),
+ maxBytesToRead);
+
+ if (n <= 0) {
+ buffer->release();
+ buffer = NULL;
+
+ return ERROR_END_OF_STREAM;
+ }
+
+ buffer->set_range(0, n);
+
+ // TODO: add capability to return data as float PCM instead of 16 bit PCM.
+ if (mWaveFormat == WAVE_FORMAT_PCM) {
+ if (mBitsPerSample == 8) {
+ // Convert 8-bit unsigned samples to 16-bit signed.
+
+ // Create new buffer with 2 byte wide samples
+ MediaBufferBase *tmp;
+ CHECK_EQ(mGroup->acquire_buffer(&tmp), (status_t)OK);
+ tmp->set_range(0, 2 * n);
+
+ memcpy_to_i16_from_u8((int16_t *)tmp->data(), (const uint8_t *)buffer->data(), n);
+ buffer->release();
+ buffer = tmp;
+ } else if (mBitsPerSample == 24) {
+ // Convert 24-bit signed samples to 16-bit signed in place
+ const size_t numSamples = n / 3;
+
+ memcpy_to_i16_from_p24((int16_t *)buffer->data(), (const uint8_t *)buffer->data(), numSamples);
+ buffer->set_range(0, 2 * numSamples);
+ } else if (mBitsPerSample == 32) {
+ // Convert 32-bit signed samples to 16-bit signed in place
+ const size_t numSamples = n / 4;
+
+ memcpy_to_i16_from_i32((int16_t *)buffer->data(), (const int32_t *)buffer->data(), numSamples);
+ buffer->set_range(0, 2 * numSamples);
+ }
+ } else if (mWaveFormat == WAVE_FORMAT_IEEE_FLOAT) {
+ if (mBitsPerSample == 32) {
+ // Convert 32-bit float samples to 16-bit signed in place
+ const size_t numSamples = n / 4;
+
+ memcpy_to_i16_from_float((int16_t *)buffer->data(), (const float *)buffer->data(), numSamples);
+ buffer->set_range(0, 2 * numSamples);
+ }
+ }
+
+ int64_t timeStampUs = 0;
+
+ if (mWaveFormat == WAVE_FORMAT_MSGSM) {
+ timeStampUs = 1000000LL * (mCurrentPos - mOffset) * 320 / 65 / mSampleRate;
+ } else {
+ size_t bytesPerSample = mBitsPerSample >> 3;
+ timeStampUs = 1000000LL * (mCurrentPos - mOffset)
+ / (mNumChannels * bytesPerSample) / mSampleRate;
+ }
+
+ buffer->meta_data().setInt64(kKeyTime, timeStampUs);
+
+ buffer->meta_data().setInt32(kKeyIsSyncFrame, 1);
+ mCurrentPos += n;
+
+ *out = buffer;
+
+ return OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static MediaExtractor* CreateExtractor(
+ DataSourceBase *source,
+ void *) {
+ return new WAVExtractor(source);
+}
+
+static MediaExtractor::CreatorFunc Sniff(
+ DataSourceBase *source,
+ float *confidence,
+ void **,
+ MediaExtractor::FreeMetaFunc *) {
+ char header[12];
+ if (source->readAt(0, header, sizeof(header)) < (ssize_t)sizeof(header)) {
+ return NULL;
+ }
+
+ if (memcmp(header, "RIFF", 4) || memcmp(&header[8], "WAVE", 4)) {
+ return NULL;
+ }
+
+ MediaExtractor *extractor = new WAVExtractor(source);
+ int numTracks = extractor->countTracks();
+ delete extractor;
+ if (numTracks == 0) {
+ return NULL;
+ }
+
+ *confidence = 0.3f;
+
+ return CreateExtractor;
+}
+
+extern "C" {
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
+MediaExtractor::ExtractorDef GETEXTRACTORDEF() {
+ return {
+ MediaExtractor::EXTRACTORDEF_VERSION,
+ UUID("7d613858-5837-4a38-84c5-332d1cddee27"),
+ 1, // version
+ "WAV Extractor",
+ Sniff
+ };
+}
+
+} // extern "C"
+
+} // namespace android
diff --git a/media/extractors/wav/WAVExtractor.h b/media/extractors/wav/WAVExtractor.h
new file mode 100644
index 0000000..467d0b7
--- /dev/null
+++ b/media/extractors/wav/WAVExtractor.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WAV_EXTRACTOR_H_
+
+#define WAV_EXTRACTOR_H_
+
+#include <utils/Errors.h>
+#include <media/MediaExtractor.h>
+#include <media/stagefright/MetaDataBase.h>
+
+namespace android {
+
+struct AMessage;
+class DataSourceBase;
+class String8;
+
+class WAVExtractor : public MediaExtractor {
+public:
+ explicit WAVExtractor(DataSourceBase *source);
+
+ virtual size_t countTracks();
+ virtual MediaTrack *getTrack(size_t index);
+ virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags);
+
+ virtual status_t getMetaData(MetaDataBase& meta);
+ virtual const char * name() { return "WAVExtractor"; }
+
+ virtual ~WAVExtractor();
+
+private:
+ DataSourceBase *mDataSource;
+ status_t mInitCheck;
+ bool mValidFormat;
+ uint16_t mWaveFormat;
+ uint16_t mNumChannels;
+ uint32_t mChannelMask;
+ uint32_t mSampleRate;
+ uint16_t mBitsPerSample;
+ off64_t mDataOffset;
+ size_t mDataSize;
+ MetaDataBase mTrackMeta;
+
+ status_t init();
+
+ WAVExtractor(const WAVExtractor &);
+ WAVExtractor &operator=(const WAVExtractor &);
+};
+
+} // namespace android
+
+#endif // WAV_EXTRACTOR_H_
+
diff --git a/media/extractors/wav/exports.lds b/media/extractors/wav/exports.lds
new file mode 100644
index 0000000..b1309ee
--- /dev/null
+++ b/media/extractors/wav/exports.lds
@@ -0,0 +1 @@
+{ global: GETEXTRACTORDEF; local: *; };
diff --git a/media/img_utils/include/img_utils/DngUtils.h b/media/img_utils/include/img_utils/DngUtils.h
index 1d8df9c..de8f120 100644
--- a/media/img_utils/include/img_utils/DngUtils.h
+++ b/media/img_utils/include/img_utils/DngUtils.h
@@ -39,11 +39,16 @@
*/
class ANDROID_API OpcodeListBuilder : public LightRefBase<OpcodeListBuilder> {
public:
+ // Note that the Adobe DNG 1.4 spec for Bayer phase (defined for the
+ // FixBadPixelsConstant and FixBadPixelsList opcodes) is incorrect. It's
+ // inconsistent with the DNG SDK (cf. dng_negative::SetBayerMosaic and
+ // dng_opcode_FixBadPixelsList::IsGreen), and Adobe confirms that the
+ // spec should be updated to match the SDK.
enum CfaLayout {
- CFA_RGGB = 0,
- CFA_GRBG,
- CFA_GBRG,
+ CFA_GRBG = 0,
+ CFA_RGGB,
CFA_BGGR,
+ CFA_GBRG,
};
OpcodeListBuilder();
diff --git a/media/img_utils/src/DngUtils.cpp b/media/img_utils/src/DngUtils.cpp
index 9dc5f05..9ac7e2a 100644
--- a/media/img_utils/src/DngUtils.cpp
+++ b/media/img_utils/src/DngUtils.cpp
@@ -18,6 +18,7 @@
#include <inttypes.h>
+#include <algorithm>
#include <vector>
#include <math.h>
@@ -61,8 +62,8 @@
const float* lensShadingMap) {
uint32_t activeAreaWidth = activeAreaRight - activeAreaLeft;
uint32_t activeAreaHeight = activeAreaBottom - activeAreaTop;
- double spacingV = 1.0 / lsmHeight;
- double spacingH = 1.0 / lsmWidth;
+ double spacingV = 1.0 / std::max(1u, lsmHeight - 1);
+ double spacingH = 1.0 / std::max(1u, lsmWidth - 1);
std::vector<float> redMapVector(lsmWidth * lsmHeight);
float *redMap = redMapVector.data();
@@ -301,29 +302,14 @@
normalizedOCX = CLAMP(normalizedOCX, 0, 1);
normalizedOCY = CLAMP(normalizedOCY, 0, 1);
- // Conversion factors from Camera2 K factors to DNG spec. K factors:
- //
- // Note: these are necessary because our unit system assumes a
- // normalized max radius of sqrt(2), whereas the DNG spec's
- // WarpRectilinear opcode assumes a normalized max radius of 1.
- // Thus, each K coefficient must include the domain scaling
- // factor (the DNG domain is scaled by sqrt(2) to emulate the
- // domain used by the Camera2 specification).
-
- const double c_0 = sqrt(2);
- const double c_1 = 2 * sqrt(2);
- const double c_2 = 4 * sqrt(2);
- const double c_3 = 8 * sqrt(2);
- const double c_4 = 2;
- const double c_5 = 2;
-
- const double coeffs[] = { c_0 * kCoeffs[0],
- c_1 * kCoeffs[1],
- c_2 * kCoeffs[2],
- c_3 * kCoeffs[3],
- c_4 * kCoeffs[4],
- c_5 * kCoeffs[5] };
-
+ double coeffs[6] = {
+ kCoeffs[0],
+ kCoeffs[1],
+ kCoeffs[2],
+ kCoeffs[3],
+ kCoeffs[4],
+ kCoeffs[5]
+ };
return addWarpRectilinear(/*numPlanes*/1,
/*opticalCenterX*/normalizedOCX,
diff --git a/media/libaaudio/examples/Android.bp b/media/libaaudio/examples/Android.bp
index f2e00a7..639fab2 100644
--- a/media/libaaudio/examples/Android.bp
+++ b/media/libaaudio/examples/Android.bp
@@ -1,4 +1,6 @@
+subdirs = ["*"]
+
cc_library_headers {
name: "libaaudio_example_utils",
- export_include_dirs: ["."],
+ export_include_dirs: ["utils"],
}
diff --git a/media/libaaudio/examples/input_monitor/Android.bp b/media/libaaudio/examples/input_monitor/Android.bp
new file mode 100644
index 0000000..d8c5843
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/Android.bp
@@ -0,0 +1,17 @@
+cc_test {
+ name: "input_monitor",
+ gtest: false,
+ srcs: ["src/input_monitor.cpp"],
+ cflags: ["-Wall", "-Werror"],
+ shared_libs: ["libaaudio"],
+ header_libs: ["libaaudio_example_utils"],
+}
+
+cc_test {
+ name: "input_monitor_callback",
+ gtest: false,
+ srcs: ["src/input_monitor_callback.cpp"],
+ cflags: ["-Wall", "-Werror"],
+ shared_libs: ["libaaudio"],
+ header_libs: ["libaaudio_example_utils"],
+}
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
index e5ad2d9..c1ff34b 100644
--- a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
@@ -26,23 +26,22 @@
#include "AAudioExampleUtils.h"
#include "AAudioSimpleRecorder.h"
-// TODO support FLOAT
-#define REQUIRED_FORMAT AAUDIO_FORMAT_PCM_I16
#define MIN_FRAMES_TO_READ 48 /* arbitrary, 1 msec at 48000 Hz */
static const int FRAMES_PER_LINE = 20000;
int main(int argc, const char **argv)
{
- AAudioArgsParser argParser;
- aaudio_result_t result;
- AAudioSimpleRecorder recorder;
- int actualSamplesPerFrame;
- int actualSampleRate;
- aaudio_format_t actualDataFormat;
+ AAudioArgsParser argParser;
+ AAudioSimpleRecorder recorder;
+ AAudioStream *aaudioStream = nullptr;
- AAudioStream *aaudioStream = nullptr;
+ aaudio_result_t result;
+ aaudio_format_t actualDataFormat;
aaudio_stream_state_t state;
+
+ int32_t actualSamplesPerFrame;
+ int32_t actualSampleRate;
int32_t framesPerBurst = 0;
int32_t framesPerRead = 0;
int32_t framesToRecord = 0;
@@ -50,18 +49,18 @@
int32_t nextFrameCount = 0;
int32_t frameCount = 0;
int32_t xRunCount = 0;
- int64_t previousFramePosition = -1;
- int16_t *data = nullptr;
- float peakLevel = 0.0;
int32_t deviceId;
+ int16_t *shortData = nullptr;
+ float *floatData = nullptr;
+ float peakLevel = 0.0;
+
// Make printf print immediately so that debug info is not stuck
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Monitor input level using AAudio read, V0.1.2\n", argv[0]);
+ printf("%s - Monitor input level using AAudio read, V0.1.3\n", argv[0]);
- argParser.setFormat(REQUIRED_FORMAT);
if (argParser.parseArgs(argc, argv)) {
return EXIT_FAILURE;
}
@@ -69,6 +68,7 @@
result = recorder.open(argParser);
if (result != AAUDIO_OK) {
fprintf(stderr, "ERROR - recorder.open() returned %d\n", result);
+ printf("IMPORTANT - Did you remember to enter: adb root\n");
goto finish;
}
aaudioStream = recorder.getStream();
@@ -96,17 +96,18 @@
printf("DataFormat: framesPerRead = %d\n",framesPerRead);
actualDataFormat = AAudioStream_getFormat(aaudioStream);
- printf("DataFormat: requested = %d, actual = %d\n",
- REQUIRED_FORMAT, actualDataFormat);
- // TODO handle other data formats
- assert(actualDataFormat == REQUIRED_FORMAT);
// Allocate a buffer for the PCM_16 audio data.
- data = new(std::nothrow) int16_t[framesPerRead * actualSamplesPerFrame];
- if (data == nullptr) {
- fprintf(stderr, "ERROR - could not allocate data buffer\n");
- result = AAUDIO_ERROR_NO_MEMORY;
- goto finish;
+ switch (actualDataFormat) {
+ case AAUDIO_FORMAT_PCM_I16:
+ shortData = new int16_t[framesPerRead * actualSamplesPerFrame];
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT:
+ floatData = new float[framesPerRead * actualSamplesPerFrame];
+ break;
+ default:
+ fprintf(stderr, "UNEXPECTED FORMAT! %d", actualDataFormat);
+ goto finish;
}
// Start the stream.
@@ -126,7 +127,12 @@
// Read audio data from the stream.
const int64_t timeoutNanos = 1000 * NANOS_PER_MILLISECOND;
int minFrames = (framesToRecord < framesPerRead) ? framesToRecord : framesPerRead;
- int actual = AAudioStream_read(aaudioStream, data, minFrames, timeoutNanos);
+ int actual = 0;
+ if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+ actual = AAudioStream_read(aaudioStream, shortData, minFrames, timeoutNanos);
+ } else if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ actual = AAudioStream_read(aaudioStream, floatData, minFrames, timeoutNanos);
+ }
if (actual < 0) {
fprintf(stderr, "ERROR - AAudioStream_read() returned %d\n", actual);
result = actual;
@@ -140,7 +146,12 @@
// Peak finder.
for (int frameIndex = 0; frameIndex < actual; frameIndex++) {
- float sample = data[frameIndex * actualSamplesPerFrame] * (1.0/32768);
+ float sample = 0.0f;
+ if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+ sample = shortData[frameIndex * actualSamplesPerFrame] * (1.0/32768);
+ } else if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ sample = floatData[frameIndex * actualSamplesPerFrame];
+ }
if (sample > peakLevel) {
peakLevel = sample;
}
@@ -151,17 +162,15 @@
displayPeakLevel(peakLevel);
peakLevel = 0.0;
nextFrameCount += FRAMES_PER_LINE;
- }
- // Print timestamps.
- int64_t framePosition = 0;
- int64_t frameTime = 0;
- aaudio_result_t timeResult;
- timeResult = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC,
- &framePosition, &frameTime);
+ // Print timestamps.
+ int64_t framePosition = 0;
+ int64_t frameTime = 0;
+ aaudio_result_t timeResult;
+ timeResult = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC,
+ &framePosition, &frameTime);
- if (timeResult == AAUDIO_OK) {
- if (framePosition > (previousFramePosition + FRAMES_PER_LINE)) {
+ if (timeResult == AAUDIO_OK) {
int64_t realTime = getNanoseconds();
int64_t framesRead = AAudioStream_getFramesRead(aaudioStream);
@@ -175,11 +184,15 @@
(long long) framePosition,
(long long) frameTime,
latencyMillis);
- previousFramePosition = framePosition;
+ } else {
+ printf("WARNING - AAudioStream_getTimestamp() returned %d\n", timeResult);
}
}
}
+ state = AAudioStream_getState(aaudioStream);
+ printf("after loop, state = %s\n", AAudio_convertStreamStateToText(state));
+
xRunCount = AAudioStream_getXRunCount(aaudioStream);
printf("AAudioStream_getXRunCount %d\n", xRunCount);
@@ -192,7 +205,8 @@
finish:
recorder.close();
- delete[] data;
+ delete[] shortData;
+ delete[] floatData;
printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
return (result != AAUDIO_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
}
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
index 893795b..d10f812 100644
--- a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
@@ -26,29 +26,39 @@
#include "AAudioExampleUtils.h"
#include "AAudioSimpleRecorder.h"
-#define NUM_SECONDS 5
-
-int main(int argc, char **argv)
+int main(int argc, const char **argv)
{
- (void)argc; // unused
- AAudioSimpleRecorder recorder;
- PeakTrackerData_t myData = {0.0};
- aaudio_result_t result;
+ AAudioArgsParser argParser;
+ AAudioSimpleRecorder recorder;
+ PeakTrackerData_t myData = {0.0};
+ AAudioStream *aaudioStream = nullptr;
+ aaudio_result_t result;
aaudio_stream_state_t state;
+
+ int loopsNeeded = 0;
const int displayRateHz = 20; // arbitrary
- const int loopsNeeded = NUM_SECONDS * displayRateHz;
// Make printf print immediately so that debug info is not stuck
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Display audio input using an AAudio callback, V0.1.2\n", argv[0]);
+ printf("%s - Display audio input using an AAudio callback, V0.1.3\n", argv[0]);
- result = recorder.open(2, 48000, AAUDIO_FORMAT_PCM_I16,
- SimpleRecorderDataCallbackProc, SimpleRecorderErrorCallbackProc, &myData);
+ if (argParser.parseArgs(argc, argv)) {
+ return EXIT_FAILURE;
+ }
+
+ result = recorder.open(argParser,
+ SimpleRecorderDataCallbackProc,
+ SimpleRecorderErrorCallbackProc,
+ &myData);
if (result != AAUDIO_OK) {
fprintf(stderr, "ERROR - recorder.open() returned %d\n", result);
+ printf("IMPORTANT - Did you remember to enter: adb root\n");
goto error;
}
+ aaudioStream = recorder.getStream();
+ argParser.compareWithStream(aaudioStream);
+
printf("recorder.getFramesPerSecond() = %d\n", recorder.getFramesPerSecond());
printf("recorder.getSamplesPerFrame() = %d\n", recorder.getSamplesPerFrame());
@@ -58,7 +68,9 @@
goto error;
}
- printf("Sleep for %d seconds while audio record in a callback thread.\n", NUM_SECONDS);
+ printf("Sleep for %d seconds while audio record in a callback thread.\n",
+ argParser.getDurationSeconds());
+ loopsNeeded = argParser.getDurationSeconds() * displayRateHz;
for (int i = 0; i < loopsNeeded; i++)
{
const struct timespec request = { .tv_sec = 0,
@@ -67,7 +79,7 @@
printf("%08d: ", (int)recorder.getFramesRead());
displayPeakLevel(myData.peakLevel);
- result = AAudioStream_waitForStateChange(recorder.getStream(),
+ result = AAudioStream_waitForStateChange(aaudioStream,
AAUDIO_STREAM_STATE_CLOSED,
&state,
0);
@@ -93,7 +105,8 @@
goto error;
}
- printf("Sleep for %d seconds while audio records in a callback thread.\n", NUM_SECONDS);
+ printf("Sleep for %d seconds while audio records in a callback thread.\n",
+ argParser.getDurationSeconds());
for (int i = 0; i < loopsNeeded; i++)
{
const struct timespec request = { .tv_sec = 0,
@@ -102,13 +115,14 @@
printf("%08d: ", (int)recorder.getFramesRead());
displayPeakLevel(myData.peakLevel);
- state = AAudioStream_getState(recorder.getStream());
+ state = AAudioStream_getState(aaudioStream);
if (state != AAUDIO_STREAM_STATE_STARTING && state != AAUDIO_STREAM_STATE_STARTED) {
printf("Stream state is %d %s!\n", state, AAudio_convertStreamStateToText(state));
break;
}
}
printf("Woke up now.\n");
+ argParser.compareWithStream(aaudioStream);
result = recorder.stop();
if (result != AAUDIO_OK) {
diff --git a/media/libaaudio/examples/loopback/Android.bp b/media/libaaudio/examples/loopback/Android.bp
new file mode 100644
index 0000000..5b7d956
--- /dev/null
+++ b/media/libaaudio/examples/loopback/Android.bp
@@ -0,0 +1,12 @@
+cc_test {
+ name: "aaudio_loopback",
+ gtest: false,
+ srcs: ["src/loopback.cpp"],
+ cflags: ["-Wall", "-Werror"],
+ static_libs: ["libsndfile"],
+ shared_libs: [
+ "libaaudio",
+ "libaudioutils",
+ ],
+ header_libs: ["libaaudio_example_utils"],
+}
diff --git a/media/libaaudio/examples/loopback/jni/Android.mk b/media/libaaudio/examples/loopback/jni/Android.mk
index 1fe3def..aebe877 100644
--- a/media/libaaudio/examples/loopback/jni/Android.mk
+++ b/media/libaaudio/examples/loopback/jni/Android.mk
@@ -10,6 +10,7 @@
# NDK recommends using this kind of relative path instead of an absolute path.
LOCAL_SRC_FILES:= ../src/loopback.cpp
LOCAL_CFLAGS := -Wall -Werror
-LOCAL_SHARED_LIBRARIES := libaaudio
+LOCAL_STATIC_LIBRARIES := libsndfile
+LOCAL_SHARED_LIBRARIES := libaaudio libaudioutils
LOCAL_MODULE := aaudio_loopback
include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h b/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
index 276b45f..ef9a753 100644
--- a/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
+++ b/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
@@ -30,6 +30,8 @@
#include <stdlib.h>
#include <unistd.h>
+#include <audio_utils/sndfile.h>
+
// Tag for machine readable results as property = value pairs
#define LOOPBACK_RESULT_TAG "RESULT: "
#define LOOPBACK_SAMPLE_RATE 48000
@@ -37,13 +39,18 @@
#define MILLIS_PER_SECOND 1000
#define MAX_ZEROTH_PARTIAL_BINS 40
+constexpr double MAX_ECHO_GAIN = 10.0; // based on experiments, otherwise autocorrelation too noisy
+// A narrow impulse seems to have better immunity against over estimating the
+// latency due to detecting subharmonics by the auto-correlator.
static const float s_Impulse[] = {
- 0.0f, 0.0f, 0.0f, 0.0f, 0.2f, // silence on each side of the impulse
- 0.5f, 0.9999f, 0.0f, -0.9999, -0.5f, // bipolar
- -0.2f, 0.0f, 0.0f, 0.0f, 0.0f
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.3f, // silence on each side of the impulse
+ 0.99f, 0.0f, -0.99f, // bipolar with one zero crossing in middle
+ -0.3f, 0.0f, 0.0f, 0.0f, 0.0f
};
+constexpr int32_t kImpulseSizeInFrames = (int32_t)(sizeof(s_Impulse) / sizeof(s_Impulse[0]));
+
class PseudoRandom {
public:
PseudoRandom() {}
@@ -156,6 +163,8 @@
const float *needle, int needleSize,
LatencyReport *report) {
const double threshold = 0.1;
+ printf("measureLatencyFromEchos: haystackSize = %d, needleSize = %d\n",
+ haystackSize, needleSize);
// Find first peak
int first = (int) (findFirstMatch(haystack,
@@ -173,7 +182,7 @@
needleSize,
threshold) + 0.5);
- printf("first = %d, again at %d\n", first, again);
+ printf("measureLatencyFromEchos: first = %d, again at %d\n", first, again);
first = again;
// Allocate results array
@@ -270,37 +279,60 @@
return mData;
}
+ void setSampleRate(int32_t sampleRate) {
+ mSampleRate = sampleRate;
+ }
+
+ int32_t getSampleRate() {
+ return mSampleRate;
+ }
+
int save(const char *fileName, bool writeShorts = true) {
+ SNDFILE *sndFile = nullptr;
int written = 0;
- const int chunkSize = 64;
- FILE *fid = fopen(fileName, "wb");
- if (fid == NULL) {
+ SF_INFO info = {
+ .frames = mFrameCounter,
+ .samplerate = mSampleRate,
+ .channels = 1,
+ .format = SF_FORMAT_WAV | (writeShorts ? SF_FORMAT_PCM_16 : SF_FORMAT_FLOAT)
+ };
+
+ sndFile = sf_open(fileName, SFM_WRITE, &info);
+ if (sndFile == nullptr) {
+ printf("AudioRecording::save(%s) failed to open file\n", fileName);
return -errno;
}
- if (writeShorts) {
- int16_t buffer[chunkSize];
- int32_t framesLeft = mFrameCounter;
- int32_t cursor = 0;
- while (framesLeft) {
- int32_t framesToWrite = framesLeft < chunkSize ? framesLeft : chunkSize;
- for (int i = 0; i < framesToWrite; i++) {
- buffer[i] = (int16_t) (mData[cursor++] * 32767);
- }
- written += fwrite(buffer, sizeof(int16_t), framesToWrite, fid);
- framesLeft -= framesToWrite;
- }
- } else {
- written = (int) fwrite(mData, sizeof(float), mFrameCounter, fid);
- }
- fclose(fid);
+ written = sf_writef_float(sndFile, mData, mFrameCounter);
+
+ sf_close(sndFile);
return written;
}
+ int load(const char *fileName) {
+ SNDFILE *sndFile = nullptr;
+ SF_INFO info;
+
+ sndFile = sf_open(fileName, SFM_READ, &info);
+ if (sndFile == nullptr) {
+ printf("AudioRecording::load(%s) failed to open file\n", fileName);
+ return -errno;
+ }
+
+ assert(info.channels == 1);
+
+ allocate(info.frames);
+ mFrameCounter = sf_readf_float(sndFile, mData, info.frames);
+
+ sf_close(sndFile);
+ return mFrameCounter;
+ }
+
private:
float *mData = nullptr;
int32_t mFrameCounter = 0;
int32_t mMaxFrames = 0;
+ int32_t mSampleRate = 48000; // common default
};
// ====================================================================================
@@ -320,11 +352,25 @@
virtual void printStatus() {};
+ virtual int getResult() {
+ return -1;
+ }
+
virtual bool isDone() {
return false;
}
- void setSampleRate(int32_t sampleRate) {
+ virtual int save(const char *fileName) {
+ (void) fileName;
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ virtual int load(const char *fileName) {
+ (void) fileName;
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ virtual void setSampleRate(int32_t sampleRate) {
mSampleRate = sampleRate;
}
@@ -368,8 +414,7 @@
static void printAudioScope(float sample) {
- const int maxStars = 80
- ; // arbitrary, fits on one line
+ const int maxStars = 80; // arbitrary, fits on one line
char c = '*';
if (sample < -1.0) {
sample = -1.0;
@@ -395,7 +440,13 @@
public:
EchoAnalyzer() : LoopbackProcessor() {
- audioRecorder.allocate(2 * LOOPBACK_SAMPLE_RATE);
+ mAudioRecording.allocate(2 * getSampleRate());
+ mAudioRecording.setSampleRate(getSampleRate());
+ }
+
+ void setSampleRate(int32_t sampleRate) override {
+ LoopbackProcessor::setSampleRate(sampleRate);
+ mAudioRecording.setSampleRate(sampleRate);
}
void reset() override {
@@ -406,8 +457,12 @@
mState = STATE_INITIAL_SILENCE;
}
+ virtual int getResult() {
+ return mState == STATE_DONE ? 0 : -1;
+ }
+
virtual bool isDone() {
- return mState == STATE_DONE;
+ return mState == STATE_DONE || mState == STATE_FAILED;
}
void setGain(float gain) {
@@ -423,44 +478,47 @@
printf("EchoAnalyzer ---------------\n");
printf(LOOPBACK_RESULT_TAG "measured.gain = %f\n", mMeasuredLoopGain);
printf(LOOPBACK_RESULT_TAG "echo.gain = %f\n", mEchoGain);
- printf(LOOPBACK_RESULT_TAG "frame.count = %d\n", mFrameCounter);
printf(LOOPBACK_RESULT_TAG "test.state = %d\n", mState);
if (mMeasuredLoopGain >= 0.9999) {
printf(" ERROR - clipping, turn down volume slightly\n");
} else {
const float *needle = s_Impulse;
int needleSize = (int) (sizeof(s_Impulse) / sizeof(float));
- float *haystack = audioRecorder.getData();
- int haystackSize = audioRecorder.size();
- measureLatencyFromEchos(haystack, haystackSize, needle, needleSize, &latencyReport);
- if (latencyReport.confidence < 0.01) {
- printf(" ERROR - confidence too low = %f\n", latencyReport.confidence);
+ float *haystack = mAudioRecording.getData();
+ int haystackSize = mAudioRecording.size();
+ measureLatencyFromEchos(haystack, haystackSize, needle, needleSize, &mLatencyReport);
+ if (mLatencyReport.confidence < 0.01) {
+ printf(" ERROR - confidence too low = %f\n", mLatencyReport.confidence);
} else {
- double latencyMillis = 1000.0 * latencyReport.latencyInFrames / getSampleRate();
- printf(LOOPBACK_RESULT_TAG "latency.frames = %8.2f\n", latencyReport.latencyInFrames);
+ double latencyMillis = 1000.0 * mLatencyReport.latencyInFrames / getSampleRate();
+ printf(LOOPBACK_RESULT_TAG "latency.frames = %8.2f\n", mLatencyReport.latencyInFrames);
printf(LOOPBACK_RESULT_TAG "latency.msec = %8.2f\n", latencyMillis);
- printf(LOOPBACK_RESULT_TAG "latency.confidence = %8.6f\n", latencyReport.confidence);
+ printf(LOOPBACK_RESULT_TAG "latency.confidence = %8.6f\n", mLatencyReport.confidence);
}
}
-
- {
-#define ECHO_FILENAME "/data/oboe_echo.raw"
- int written = audioRecorder.save(ECHO_FILENAME);
- printf("Echo wrote %d mono samples to %s on Android device\n", written, ECHO_FILENAME);
- }
}
void printStatus() override {
- printf("state = %d, echo gain = %f ", mState, mEchoGain);
+ printf("st = %d, echo gain = %f ", mState, mEchoGain);
}
- static void sendImpulse(float *outputData, int outputChannelCount) {
- for (float sample : s_Impulse) {
+ void sendImpulses(float *outputData, int outputChannelCount, int numFrames) {
+ while (numFrames-- > 0) {
+ float sample = s_Impulse[mSampleIndex++];
+ if (mSampleIndex >= kImpulseSizeInFrames) {
+ mSampleIndex = 0;
+ }
+
*outputData = sample;
outputData += outputChannelCount;
}
}
+ void sendOneImpulse(float *outputData, int outputChannelCount) {
+ mSampleIndex = 0;
+ sendImpulses(outputData, outputChannelCount, kImpulseSizeInFrames);
+ }
+
void process(float *inputData, int inputChannelCount,
float *outputData, int outputChannelCount,
int numFrames) override {
@@ -486,26 +544,31 @@
break;
case STATE_MEASURING_GAIN:
- sendImpulse(outputData, outputChannelCount);
+ sendImpulses(outputData, outputChannelCount, numFrames);
peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
// If we get several in a row then go to next state.
if (peak > mPulseThreshold) {
if (mDownCounter-- <= 0) {
- nextState = STATE_WAITING_FOR_SILENCE;
//printf("%5d: switch to STATE_WAITING_FOR_SILENCE, measured peak = %f\n",
// mLoopCounter, peak);
mDownCounter = 8;
mMeasuredLoopGain = peak; // assumes original pulse amplitude is one
// Calculate gain that will give us a nice decaying echo.
mEchoGain = mDesiredEchoGain / mMeasuredLoopGain;
+ if (mEchoGain > MAX_ECHO_GAIN) {
+ printf("ERROR - loop gain too low. Increase the volume.\n");
+ nextState = STATE_FAILED;
+ } else {
+ nextState = STATE_WAITING_FOR_SILENCE;
+ }
}
- } else {
+ } else if (numFrames > kImpulseSizeInFrames){ // ignore short callbacks
mDownCounter = 8;
}
break;
case STATE_WAITING_FOR_SILENCE:
- // Output silence.
+ // Output silence and wait for the echos to die down.
numSamples = numFrames * outputChannelCount;
for (int i = 0; i < numSamples; i++) {
outputData[i] = 0;
@@ -524,14 +587,14 @@
break;
case STATE_SENDING_PULSE:
- audioRecorder.write(inputData, inputChannelCount, numFrames);
- sendImpulse(outputData, outputChannelCount);
+ mAudioRecording.write(inputData, inputChannelCount, numFrames);
+ sendOneImpulse(outputData, outputChannelCount);
nextState = STATE_GATHERING_ECHOS;
//printf("%5d: switch to STATE_GATHERING_ECHOS\n", mLoopCounter);
break;
case STATE_GATHERING_ECHOS:
- numWritten = audioRecorder.write(inputData, inputChannelCount, numFrames);
+ numWritten = mAudioRecording.write(inputData, inputChannelCount, numFrames);
peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
if (peak > mMeasuredLoopGain) {
mMeasuredLoopGain = peak; // AGC might be raising gain so adjust it on the fly.
@@ -565,6 +628,14 @@
mLoopCounter++;
}
+ int save(const char *fileName) override {
+ return mAudioRecording.save(fileName);
+ }
+
+ int load(const char *fileName) override {
+ return mAudioRecording.load(fileName);
+ }
+
private:
enum echo_state_t {
@@ -573,22 +644,23 @@
STATE_WAITING_FOR_SILENCE,
STATE_SENDING_PULSE,
STATE_GATHERING_ECHOS,
- STATE_DONE
+ STATE_DONE,
+ STATE_FAILED
};
- int mDownCounter = 500;
- int mLoopCounter = 0;
- float mPulseThreshold = 0.02f;
- float mSilenceThreshold = 0.002f;
- float mMeasuredLoopGain = 0.0f;
- float mDesiredEchoGain = 0.95f;
- float mEchoGain = 1.0f;
- echo_state_t mState = STATE_INITIAL_SILENCE;
- int32_t mFrameCounter = 0;
+ int32_t mDownCounter = 500;
+ int32_t mLoopCounter = 0;
+ int32_t mSampleIndex = 0;
+ float mPulseThreshold = 0.02f;
+ float mSilenceThreshold = 0.002f;
+ float mMeasuredLoopGain = 0.0f;
+ float mDesiredEchoGain = 0.95f;
+ float mEchoGain = 1.0f;
+ echo_state_t mState = STATE_INITIAL_SILENCE;
- AudioRecording audioRecorder;
- LatencyReport latencyReport;
- PeakDetector mPeakDetector;
+ AudioRecording mAudioRecording; // contains only the input after the gain detection burst
+ LatencyReport mLatencyReport;
+ // PeakDetector mPeakDetector;
};
@@ -602,6 +674,10 @@
class SineAnalyzer : public LoopbackProcessor {
public:
+ virtual int getResult() {
+ return mState == STATE_LOCKED ? 0 : -1;
+ }
+
void report() override {
printf("SineAnalyzer ------------------\n");
printf(LOOPBACK_RESULT_TAG "peak.amplitude = %7.5f\n", mPeakAmplitude);
@@ -609,7 +685,7 @@
printf(LOOPBACK_RESULT_TAG "phase.offset = %7.5f\n", mPhaseOffset);
printf(LOOPBACK_RESULT_TAG "ref.phase = %7.5f\n", mPhase);
printf(LOOPBACK_RESULT_TAG "frames.accumulated = %6d\n", mFramesAccumulated);
- printf(LOOPBACK_RESULT_TAG "sine.period = %6d\n", mPeriod);
+ printf(LOOPBACK_RESULT_TAG "sine.period = %6d\n", mSinePeriod);
printf(LOOPBACK_RESULT_TAG "test.state = %6d\n", mState);
printf(LOOPBACK_RESULT_TAG "frame.count = %6d\n", mFrameCounter);
// Did we ever get a lock?
@@ -623,7 +699,7 @@
}
void printStatus() override {
- printf(" state = %d, glitches = %d,", mState, mGlitchCount);
+ printf("st = %d, #gl = %3d,", mState, mGlitchCount);
}
double calculateMagnitude(double *phasePtr = NULL) {
@@ -648,6 +724,8 @@
void process(float *inputData, int inputChannelCount,
float *outputData, int outputChannelCount,
int numFrames) override {
+ mProcessCount++;
+
float peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
if (peak > mPeakAmplitude) {
mPeakAmplitude = peak;
@@ -659,6 +737,7 @@
float sinOut = sinf(mPhase);
switch (mState) {
+ case STATE_IDLE:
case STATE_IMMUNE:
case STATE_WAITING_FOR_SIGNAL:
break;
@@ -667,7 +746,7 @@
mCosAccumulator += sample * cosf(mPhase);
mFramesAccumulated++;
// Must be a multiple of the period or the calculation will not be accurate.
- if (mFramesAccumulated == mPeriod * 4) {
+ if (mFramesAccumulated == mSinePeriod * PERIODS_NEEDED_FOR_LOCK) {
mPhaseOffset = 0.0;
mMagnitude = calculateMagnitude(&mPhaseOffset);
if (mMagnitude > mThreshold) {
@@ -693,7 +772,22 @@
// mFrameCounter, mGlitchCount, predicted, sample);
mState = STATE_IMMUNE;
//printf("%5d: switch to STATE_IMMUNE\n", mFrameCounter);
- mDownCounter = mPeriod; // Set duration of IMMUNE state.
+ mDownCounter = mSinePeriod; // Set duration of IMMUNE state.
+ }
+
+ // Track incoming signal and slowly adjust magnitude to account
+ // for drift in the DRC or AGC.
+ mSinAccumulator += sample * sinOut;
+ mCosAccumulator += sample * cosf(mPhase);
+ mFramesAccumulated++;
+ // Must be a multiple of the period or the calculation will not be accurate.
+ if (mFramesAccumulated == mSinePeriod) {
+ const double coefficient = 0.1;
+ double phaseOffset = 0.0;
+ double magnitude = calculateMagnitude(&phaseOffset);
+ // One pole averaging filter.
+ mMagnitude = (mMagnitude * (1.0 - coefficient)) + (magnitude * coefficient);
+ resetAccumulator();
}
} break;
}
@@ -714,6 +808,9 @@
// Do these once per buffer.
switch (mState) {
+ case STATE_IDLE:
+ mState = STATE_IMMUNE; // so we can tell when
+ break;
case STATE_IMMUNE:
mDownCounter -= numFrames;
if (mDownCounter <= 0) {
@@ -744,21 +841,29 @@
void reset() override {
mGlitchCount = 0;
mState = STATE_IMMUNE;
- mPhaseIncrement = 2.0 * M_PI / mPeriod;
- printf("phaseInc = %f for period %d\n", mPhaseIncrement, mPeriod);
+ mDownCounter = IMMUNE_FRAME_COUNT;
+ mPhaseIncrement = 2.0 * M_PI / mSinePeriod;
+ printf("phaseInc = %f for period %d\n", mPhaseIncrement, mSinePeriod);
resetAccumulator();
+ mProcessCount = 0;
}
private:
enum sine_state_t {
+ STATE_IDLE,
STATE_IMMUNE,
STATE_WAITING_FOR_SIGNAL,
STATE_WAITING_FOR_LOCK,
STATE_LOCKED
};
- int mPeriod = 79;
+ enum constants {
+ IMMUNE_FRAME_COUNT = 48 * 500,
+ PERIODS_NEEDED_FOR_LOCK = 8
+ };
+
+ int mSinePeriod = 79;
double mPhaseIncrement = 0.0;
double mPhase = 0.0;
double mPhaseOffset = 0.0;
@@ -767,18 +872,19 @@
double mThreshold = 0.005;
double mTolerance = 0.01;
int32_t mFramesAccumulated = 0;
+ int32_t mProcessCount = 0;
double mSinAccumulator = 0.0;
double mCosAccumulator = 0.0;
int32_t mGlitchCount = 0;
double mPeakAmplitude = 0.0;
- int mDownCounter = 4000;
+ int mDownCounter = IMMUNE_FRAME_COUNT;
int32_t mFrameCounter = 0;
float mOutputAmplitude = 0.75;
PseudoRandom mWhiteNoise;
float mNoiseAmplitude = 0.00; // Used to experiment with warbling caused by DRC.
- sine_state_t mState = STATE_IMMUNE;
+ sine_state_t mState = STATE_IDLE;
};
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index b678d8a..91ebf73 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -37,23 +37,32 @@
// Tag for machine readable results as property = value pairs
#define RESULT_TAG "RESULT: "
-#define SAMPLE_RATE 48000
#define NUM_SECONDS 5
+#define PERIOD_MILLIS 1000
#define NUM_INPUT_CHANNELS 1
-#define FILENAME "/data/oboe_input.raw"
-#define APP_VERSION "0.1.22"
+#define FILENAME_ALL "/data/loopback_all.wav"
+#define FILENAME_ECHOS "/data/loopback_echos.wav"
+#define APP_VERSION "0.2.04"
+
+constexpr int kNumCallbacksToDrain = 20;
+constexpr int kNumCallbacksToDiscard = 20;
struct LoopbackData {
AAudioStream *inputStream = nullptr;
int32_t inputFramesMaximum = 0;
- int16_t *inputData = nullptr;
- int16_t peakShort = 0;
- float *conversionBuffer = nullptr;
+ int16_t *inputShortData = nullptr;
+ float *inputFloatData = nullptr;
+ aaudio_format_t actualInputFormat = AAUDIO_FORMAT_INVALID;
int32_t actualInputChannelCount = 0;
int32_t actualOutputChannelCount = 0;
- int32_t inputBuffersToDiscard = 10;
+ int32_t numCallbacksToDrain = kNumCallbacksToDrain;
+ int32_t numCallbacksToDiscard = kNumCallbacksToDiscard;
int32_t minNumFrames = INT32_MAX;
int32_t maxNumFrames = 0;
+ int32_t insufficientReadCount = 0;
+ int32_t insufficientReadFrames = 0;
+ int32_t framesReadTotal = 0;
+ int32_t framesWrittenTotal = 0;
bool isDone = false;
aaudio_result_t inputError = AAUDIO_OK;
@@ -61,14 +70,14 @@
SineAnalyzer sineAnalyzer;
EchoAnalyzer echoAnalyzer;
- AudioRecording audioRecorder;
+ AudioRecording audioRecording;
LoopbackProcessor *loopbackProcessor;
};
static void convertPcm16ToFloat(const int16_t *source,
float *destination,
int32_t numSamples) {
- const float scaler = 1.0f / 32768.0f;
+ constexpr float scaler = 1.0f / 32768.0f;
for (int i = 0; i < numSamples; i++) {
destination[i] = source[i] * scaler;
}
@@ -78,6 +87,31 @@
// ========================= CALLBACK =================================================
// ====================================================================================
// Callback function that fills the audio output buffer.
+
+static int32_t readFormattedData(LoopbackData *myData, int32_t numFrames) {
+ int32_t framesRead = AAUDIO_ERROR_INVALID_FORMAT;
+ if (myData->actualInputFormat == AAUDIO_FORMAT_PCM_I16) {
+ framesRead = AAudioStream_read(myData->inputStream, myData->inputShortData,
+ numFrames,
+ 0 /* timeoutNanoseconds */);
+ } else if (myData->actualInputFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ framesRead = AAudioStream_read(myData->inputStream, myData->inputFloatData,
+ numFrames,
+ 0 /* timeoutNanoseconds */);
+ } else {
+ printf("ERROR actualInputFormat = %d\n", myData->actualInputFormat);
+ assert(false);
+ }
+ if (framesRead < 0) {
+ myData->inputError = framesRead;
+ printf("ERROR in read = %d = %s\n", framesRead,
+ AAudio_convertResultToText(framesRead));
+ } else {
+ myData->framesReadTotal += framesRead;
+ }
+ return framesRead;
+}
+
static aaudio_data_callback_result_t MyDataCallbackProc(
AAudioStream *outputStream,
void *userData,
@@ -90,7 +124,7 @@
float *outputData = (float *) audioData;
// Read audio data from the input stream.
- int32_t framesRead;
+ int32_t actualFramesRead;
if (numFrames > myData->inputFramesMaximum) {
myData->inputError = AAUDIO_ERROR_OUT_OF_RANGE;
@@ -104,46 +138,86 @@
myData->minNumFrames = numFrames;
}
- if (myData->inputBuffersToDiscard > 0) {
+ // Silence the output.
+ int32_t numBytes = numFrames * myData->actualOutputChannelCount * sizeof(float);
+ memset(audioData, 0 /* value */, numBytes);
+
+ if (myData->numCallbacksToDrain > 0) {
// Drain the input.
+ int32_t totalFramesRead = 0;
do {
- framesRead = AAudioStream_read(myData->inputStream, myData->inputData,
- numFrames, 0);
- if (framesRead < 0) {
- myData->inputError = framesRead;
- printf("ERROR in read = %d", framesRead);
- result = AAUDIO_CALLBACK_RESULT_STOP;
- } else if (framesRead > 0) {
- myData->inputBuffersToDiscard--;
+ actualFramesRead = readFormattedData(myData, numFrames);
+ if (actualFramesRead) {
+ totalFramesRead += actualFramesRead;
}
- } while(framesRead > 0);
- } else {
- framesRead = AAudioStream_read(myData->inputStream, myData->inputData,
- numFrames, 0);
- if (framesRead < 0) {
- myData->inputError = framesRead;
- printf("ERROR in read = %d", framesRead);
+ // Ignore errors because input stream may not be started yet.
+ } while (actualFramesRead > 0);
+ // Only counts if we actually got some data.
+ if (totalFramesRead > 0) {
+ myData->numCallbacksToDrain--;
+ }
+
+ } else if (myData->numCallbacksToDiscard > 0) {
+ // Ignore. Allow the input to fill back up to equilibrium with the output.
+ actualFramesRead = readFormattedData(myData, numFrames);
+ if (actualFramesRead < 0) {
result = AAUDIO_CALLBACK_RESULT_STOP;
- } else if (framesRead > 0) {
+ }
+ myData->numCallbacksToDiscard--;
- myData->audioRecorder.write(myData->inputData,
- myData->actualInputChannelCount,
- numFrames);
+ } else {
- int32_t numSamples = framesRead * myData->actualInputChannelCount;
- convertPcm16ToFloat(myData->inputData, myData->conversionBuffer, numSamples);
+ int32_t numInputBytes = numFrames * myData->actualInputChannelCount * sizeof(float);
+ memset(myData->inputFloatData, 0 /* value */, numInputBytes);
- myData->loopbackProcessor->process(myData->conversionBuffer,
- myData->actualInputChannelCount,
- outputData,
- myData->actualOutputChannelCount,
- framesRead);
+ // Process data after equilibrium.
+ int64_t inputFramesWritten = AAudioStream_getFramesWritten(myData->inputStream);
+ int64_t inputFramesRead = AAudioStream_getFramesRead(myData->inputStream);
+ int64_t framesAvailable = inputFramesWritten - inputFramesRead;
+ actualFramesRead = readFormattedData(myData, numFrames);
+ if (actualFramesRead < 0) {
+ result = AAUDIO_CALLBACK_RESULT_STOP;
+ } else {
+
+ if (actualFramesRead < numFrames) {
+ if(actualFramesRead < (int32_t) framesAvailable) {
+ printf("insufficient but numFrames = %d"
+ ", actualFramesRead = %d"
+ ", inputFramesWritten = %d"
+ ", inputFramesRead = %d"
+ ", available = %d\n",
+ numFrames,
+ actualFramesRead,
+ (int) inputFramesWritten,
+ (int) inputFramesRead,
+ (int) framesAvailable);
+ }
+ myData->insufficientReadCount++;
+ myData->insufficientReadFrames += numFrames - actualFramesRead; // deficit
+ }
+
+ int32_t numSamples = actualFramesRead * myData->actualInputChannelCount;
+
+ if (myData->actualInputFormat == AAUDIO_FORMAT_PCM_I16) {
+ convertPcm16ToFloat(myData->inputShortData, myData->inputFloatData, numSamples);
+ }
+ // Save for later.
+ myData->audioRecording.write(myData->inputFloatData,
+ myData->actualInputChannelCount,
+ numFrames);
+ // Analyze the data.
+ myData->loopbackProcessor->process(myData->inputFloatData,
+ myData->actualInputChannelCount,
+ outputData,
+ myData->actualOutputChannelCount,
+ numFrames);
myData->isDone = myData->loopbackProcessor->isDone();
if (myData->isDone) {
result = AAUDIO_CALLBACK_RESULT_STOP;
}
}
}
+ myData->framesWrittenTotal += numFrames;
return result;
}
@@ -151,29 +225,29 @@
static void MyErrorCallbackProc(
AAudioStream *stream __unused,
void *userData __unused,
- aaudio_result_t error)
-{
+ aaudio_result_t error) {
printf("Error Callback, error: %d\n",(int)error);
LoopbackData *myData = (LoopbackData *) userData;
myData->outputError = error;
}
static void usage() {
- printf("loopback: -n{numBursts} -p{outPerf} -P{inPerf} -t{test} -g{gain} -f{freq}\n");
- printf(" -c{inputChannels}\n");
- printf(" -f{freq} sine frequency\n");
- printf(" -g{gain} recirculating loopback gain\n");
- printf(" -m enable MMAP mode\n");
- printf(" -n{numBursts} buffer size, for example 2 for double buffered\n");
- printf(" -p{outPerf} set output AAUDIO_PERFORMANCE_MODE*\n");
- printf(" -P{inPerf} set input AAUDIO_PERFORMANCE_MODE*\n");
- printf(" n for _NONE\n");
- printf(" l for _LATENCY\n");
- printf(" p for _POWER_SAVING;\n");
- printf(" -t{test} select test mode\n");
- printf(" m for sine magnitude\n");
- printf(" e for echo latency (default)\n");
- printf("For example: loopback -b2 -pl -Pn\n");
+ printf("Usage: aaudio_loopback [OPTION]...\n\n");
+ AAudioArgsParser::usage();
+ printf(" -B{frames} input capacity in frames\n");
+ printf(" -C{channels} number of input channels\n");
+ printf(" -F{0,1,2} input format, 1=I16, 2=FLOAT\n");
+ printf(" -g{gain} recirculating loopback gain\n");
+ printf(" -P{inPerf} set input AAUDIO_PERFORMANCE_MODE*\n");
+ printf(" n for _NONE\n");
+ printf(" l for _LATENCY\n");
+ printf(" p for _POWER_SAVING\n");
+ printf(" -t{test} select test mode\n");
+ printf(" m for sine magnitude\n");
+ printf(" e for echo latency (default)\n");
+ printf(" f for file latency, analyzes %s\n\n", FILENAME_ECHOS);
+ printf(" -X use EXCLUSIVE mode for input\n");
+ printf("Example: aaudio_loopback -n2 -pl -Pl -x\n");
}
static aaudio_performance_mode_t parsePerformanceMode(char c) {
@@ -199,6 +273,7 @@
enum {
TEST_SINE_MAGNITUDE = 0,
TEST_ECHO_LATENCY,
+ TEST_FILE_LATENCY,
};
static int parseTestMode(char c) {
@@ -211,6 +286,9 @@
case 'e':
testMode = TEST_ECHO_LATENCY;
break;
+ case 'f':
+ testMode = TEST_FILE_LATENCY;
+ break;
default:
printf("ERROR in value test mode %c\n", c);
break;
@@ -234,9 +312,10 @@
}
}
float gain = 0.98f / maxSample;
+
for (int32_t i = start; i < end; i++) {
float sample = data[i];
- printf("%5.3f ", sample); // actual value
+ printf("%6d: %7.4f ", i, sample); // actual value
sample *= gain;
printAudioScope(sample);
}
@@ -248,26 +327,28 @@
int main(int argc, const char **argv)
{
- AAudioArgsParser argParser;
- AAudioSimplePlayer player;
- AAudioSimpleRecorder recorder;
- LoopbackData loopbackData;
- AAudioStream *outputStream = nullptr;
+ AAudioArgsParser argParser;
+ AAudioSimplePlayer player;
+ AAudioSimpleRecorder recorder;
+ LoopbackData loopbackData;
+ AAudioStream *inputStream = nullptr;
+ AAudioStream *outputStream = nullptr;
- aaudio_result_t result = AAUDIO_OK;
- aaudio_sharing_mode_t requestedInputSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ aaudio_result_t result = AAUDIO_OK;
+ aaudio_sharing_mode_t requestedInputSharingMode = AAUDIO_SHARING_MODE_SHARED;
int requestedInputChannelCount = NUM_INPUT_CHANNELS;
- const aaudio_format_t requestedInputFormat = AAUDIO_FORMAT_PCM_I16;
- const aaudio_format_t requestedOutputFormat = AAUDIO_FORMAT_PCM_FLOAT;
- aaudio_format_t actualInputFormat;
- aaudio_format_t actualOutputFormat;
- aaudio_performance_mode_t inputPerformanceLevel = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+ aaudio_format_t requestedInputFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ int32_t requestedInputCapacity = -1;
+ aaudio_performance_mode_t inputPerformanceLevel = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
- int testMode = TEST_ECHO_LATENCY;
- double gain = 1.0;
+ int32_t outputFramesPerBurst = 0;
- int32_t framesPerBurst = 0;
- float *outputData = NULL;
+ aaudio_format_t actualOutputFormat = AAUDIO_FORMAT_INVALID;
+ int32_t actualSampleRate = 0;
+ int written = 0;
+
+ int testMode = TEST_ECHO_LATENCY;
+ double gain = 1.0;
// Make printf print immediately so that debug info is not stuck
// in a buffer if we hang or crash.
@@ -282,9 +363,15 @@
if (arg[0] == '-') {
char option = arg[1];
switch (option) {
+ case 'B':
+ requestedInputCapacity = atoi(&arg[2]);
+ break;
case 'C':
requestedInputChannelCount = atoi(&arg[2]);
break;
+ case 'F':
+ requestedInputFormat = atoi(&arg[2]);
+ break;
case 'g':
gain = atof(&arg[2]);
break;
@@ -317,8 +404,9 @@
}
int32_t requestedDuration = argParser.getDurationSeconds();
- int32_t recordingDuration = std::min(60, requestedDuration);
- loopbackData.audioRecorder.allocate(recordingDuration * SAMPLE_RATE);
+ int32_t requestedDurationMillis = requestedDuration * MILLIS_PER_SECOND;
+ int32_t timeMillis = 0;
+ int32_t recordingDuration = std::min(60 * 5, requestedDuration);
switch(testMode) {
case TEST_SINE_MAGNITUDE:
@@ -328,61 +416,112 @@
loopbackData.echoAnalyzer.setGain(gain);
loopbackData.loopbackProcessor = &loopbackData.echoAnalyzer;
break;
+ case TEST_FILE_LATENCY: {
+ loopbackData.echoAnalyzer.setGain(gain);
+
+ loopbackData.loopbackProcessor = &loopbackData.echoAnalyzer;
+ int read = loopbackData.loopbackProcessor->load(FILENAME_ECHOS);
+ printf("main() read %d mono samples from %s on Android device\n", read, FILENAME_ECHOS);
+ loopbackData.loopbackProcessor->report();
+ return 0;
+ }
+ break;
default:
exit(1);
break;
}
printf("OUTPUT stream ----------------------------------------\n");
- argParser.setFormat(requestedOutputFormat);
result = player.open(argParser, MyDataCallbackProc, MyErrorCallbackProc, &loopbackData);
if (result != AAUDIO_OK) {
fprintf(stderr, "ERROR - player.open() returned %d\n", result);
- goto finish;
+ exit(1);
}
outputStream = player.getStream();
- argParser.compareWithStream(outputStream);
actualOutputFormat = AAudioStream_getFormat(outputStream);
- assert(actualOutputFormat == AAUDIO_FORMAT_PCM_FLOAT);
+ if (actualOutputFormat != AAUDIO_FORMAT_PCM_FLOAT) {
+ fprintf(stderr, "ERROR - only AAUDIO_FORMAT_PCM_FLOAT supported\n");
+ exit(1);
+ }
- printf("INPUT stream ----------------------------------------\n");
+ actualSampleRate = AAudioStream_getSampleRate(outputStream);
+ loopbackData.audioRecording.allocate(recordingDuration * actualSampleRate);
+ loopbackData.audioRecording.setSampleRate(actualSampleRate);
+ outputFramesPerBurst = AAudioStream_getFramesPerBurst(outputStream);
+
+ argParser.compareWithStream(outputStream);
+
+ printf("INPUT stream ----------------------------------------\n");
// Use different parameters for the input.
argParser.setNumberOfBursts(AAUDIO_UNSPECIFIED);
argParser.setFormat(requestedInputFormat);
argParser.setPerformanceMode(inputPerformanceLevel);
argParser.setChannelCount(requestedInputChannelCount);
argParser.setSharingMode(requestedInputSharingMode);
+
+ // Make sure the input buffer has plenty of capacity.
+ // Extra capacity on input should not increase latency if we keep it drained.
+ int32_t inputBufferCapacity = requestedInputCapacity;
+ if (inputBufferCapacity < 0) {
+ int32_t outputBufferCapacity = AAudioStream_getBufferCapacityInFrames(outputStream);
+ inputBufferCapacity = 2 * outputBufferCapacity;
+ }
+ argParser.setBufferCapacity(inputBufferCapacity);
+
result = recorder.open(argParser);
if (result != AAUDIO_OK) {
fprintf(stderr, "ERROR - recorder.open() returned %d\n", result);
goto finish;
}
- loopbackData.inputStream = recorder.getStream();
- argParser.compareWithStream(loopbackData.inputStream);
+ inputStream = loopbackData.inputStream = recorder.getStream();
- // This is the number of frames that are read in one chunk by a DMA controller
- // or a DSP or a mixer.
- framesPerBurst = AAudioStream_getFramesPerBurst(outputStream);
+ {
+ int32_t actualCapacity = AAudioStream_getBufferCapacityInFrames(inputStream);
+ result = AAudioStream_setBufferSizeInFrames(inputStream, actualCapacity);
+ if (result < 0) {
+ fprintf(stderr, "ERROR - AAudioStream_setBufferSizeInFrames() returned %d\n", result);
+ goto finish;
+ } else {}
+ }
- actualInputFormat = AAudioStream_getFormat(outputStream);
- assert(actualInputFormat == AAUDIO_FORMAT_PCM_I16);
+ argParser.compareWithStream(inputStream);
+ // If the input stream is too small then we cannot satisfy the output callback.
+ {
+ int32_t actualCapacity = AAudioStream_getBufferCapacityInFrames(inputStream);
+ if (actualCapacity < 2 * outputFramesPerBurst) {
+ fprintf(stderr, "ERROR - input capacity < 2 * outputFramesPerBurst\n");
+ goto finish;
+ }
+ }
+
+ // ------- Setup loopbackData -----------------------------
+ loopbackData.actualInputFormat = AAudioStream_getFormat(inputStream);
loopbackData.actualInputChannelCount = recorder.getChannelCount();
loopbackData.actualOutputChannelCount = player.getChannelCount();
// Allocate a buffer for the audio data.
- loopbackData.inputFramesMaximum = 32 * framesPerBurst;
- loopbackData.inputBuffersToDiscard = 100;
+ loopbackData.inputFramesMaximum = 32 * AAudioStream_getFramesPerBurst(inputStream);
- loopbackData.inputData = new int16_t[loopbackData.inputFramesMaximum
- * loopbackData.actualInputChannelCount];
- loopbackData.conversionBuffer = new float[loopbackData.inputFramesMaximum *
- loopbackData.actualInputChannelCount];
+ if (loopbackData.actualInputFormat == AAUDIO_FORMAT_PCM_I16) {
+ loopbackData.inputShortData = new int16_t[loopbackData.inputFramesMaximum
+ * loopbackData.actualInputChannelCount]{};
+ }
+ loopbackData.inputFloatData = new float[loopbackData.inputFramesMaximum *
+ loopbackData.actualInputChannelCount]{};
loopbackData.loopbackProcessor->reset();
+ // Start OUTPUT first so INPUT does not overflow.
+ result = player.start();
+ if (result != AAUDIO_OK) {
+ printf("ERROR - AAudioStream_requestStart(output) returned %d = %s\n",
+ result, AAudio_convertResultToText(result));
+ goto finish;
+ }
+
result = recorder.start();
if (result != AAUDIO_OK) {
printf("ERROR - AAudioStream_requestStart(input) returned %d = %s\n",
@@ -390,16 +529,8 @@
goto finish;
}
- result = player.start();
- if (result != AAUDIO_OK) {
- printf("ERROR - AAudioStream_requestStart(output) returned %d = %s\n",
- result, AAudio_convertResultToText(result));
- goto finish;
- }
-
- printf("------- sleep while the callback runs --------------\n");
- fflush(stdout);
- for (int i = requestedDuration; i > 0 ; i--) {
+ printf("------- sleep and log while the callback runs --------------\n");
+ while (timeMillis <= requestedDurationMillis) {
if (loopbackData.inputError != AAUDIO_OK) {
printf(" ERROR on input stream\n");
break;
@@ -407,60 +538,128 @@
printf(" ERROR on output stream\n");
break;
} else if (loopbackData.isDone) {
- printf(" test says it is done!\n");
+ printf(" Test says it is DONE!\n");
break;
} else {
- sleep(1);
- printf("%4d: ", i);
+ // Log a line of stream data.
+ printf("%7.3f: ", 0.001 * timeMillis); // display in seconds
loopbackData.loopbackProcessor->printStatus();
+ printf(" insf %3d,", (int) loopbackData.insufficientReadCount);
- int64_t inputFramesWritten = AAudioStream_getFramesWritten(loopbackData.inputStream);
- int64_t inputFramesRead = AAudioStream_getFramesRead(loopbackData.inputStream);
+ int64_t inputFramesWritten = AAudioStream_getFramesWritten(inputStream);
+ int64_t inputFramesRead = AAudioStream_getFramesRead(inputStream);
int64_t outputFramesWritten = AAudioStream_getFramesWritten(outputStream);
int64_t outputFramesRead = AAudioStream_getFramesRead(outputStream);
- printf(" INPUT: wr %lld rd %lld state %s, OUTPUT: wr %lld rd %lld state %s, xruns %d\n",
+ static const int textOffset = strlen("AAUDIO_STREAM_STATE_"); // strip this off
+ printf(" | INPUT: wr %7lld - rd %7lld = %5lld, st %8s, oruns %3d",
(long long) inputFramesWritten,
(long long) inputFramesRead,
- AAudio_convertStreamStateToText(AAudioStream_getState(loopbackData.inputStream)),
+ (long long) (inputFramesWritten - inputFramesRead),
+ &AAudio_convertStreamStateToText(
+ AAudioStream_getState(inputStream))[textOffset],
+ AAudioStream_getXRunCount(inputStream));
+
+ printf(" | OUTPUT: wr %7lld - rd %7lld = %5lld, st %8s, uruns %3d\n",
(long long) outputFramesWritten,
(long long) outputFramesRead,
- AAudio_convertStreamStateToText(AAudioStream_getState(outputStream)),
+ (long long) (outputFramesWritten - outputFramesRead),
+ &AAudio_convertStreamStateToText(
+ AAudioStream_getState(outputStream))[textOffset],
AAudioStream_getXRunCount(outputStream)
);
}
+ int32_t periodMillis = (timeMillis < 2000) ? PERIOD_MILLIS / 4 : PERIOD_MILLIS;
+ usleep(periodMillis * 1000);
+ timeMillis += periodMillis;
+ }
+
+ result = player.stop();
+ if (result != AAUDIO_OK) {
+ printf("ERROR - player.stop() returned %d = %s\n",
+ result, AAudio_convertResultToText(result));
+ goto finish;
+ }
+
+ result = recorder.stop();
+ if (result != AAUDIO_OK) {
+ printf("ERROR - recorder.stop() returned %d = %s\n",
+ result, AAudio_convertResultToText(result));
+ goto finish;
}
printf("input error = %d = %s\n",
- loopbackData.inputError, AAudio_convertResultToText(loopbackData.inputError));
-
- printf("AAudioStream_getXRunCount %d\n", AAudioStream_getXRunCount(outputStream));
- printf("framesRead = %8d\n", (int) AAudioStream_getFramesRead(outputStream));
- printf("framesWritten = %8d\n", (int) AAudioStream_getFramesWritten(outputStream));
- printf("min numFrames = %8d\n", (int) loopbackData.minNumFrames);
- printf("max numFrames = %8d\n", (int) loopbackData.maxNumFrames);
+ loopbackData.inputError, AAudio_convertResultToText(loopbackData.inputError));
if (loopbackData.inputError == AAUDIO_OK) {
if (testMode == TEST_SINE_MAGNITUDE) {
- printAudioGraph(loopbackData.audioRecorder, 200);
+ printAudioGraph(loopbackData.audioRecording, 200);
}
+ // Print again so we don't have to scroll past waveform.
+ printf("OUTPUT Stream ----------------------------------------\n");
+ argParser.compareWithStream(outputStream);
+ printf("INPUT Stream ----------------------------------------\n");
+ argParser.compareWithStream(inputStream);
+
loopbackData.loopbackProcessor->report();
}
{
- int written = loopbackData.audioRecorder.save(FILENAME);
- printf("main() wrote %d mono samples to %s on Android device\n", written, FILENAME);
+ int32_t framesRead = AAudioStream_getFramesRead(inputStream);
+ int32_t framesWritten = AAudioStream_getFramesWritten(inputStream);
+ printf("Callback Results ---------------------------------------- INPUT\n");
+ printf(" input overruns = %d\n", AAudioStream_getXRunCount(inputStream));
+ printf(" framesWritten = %8d\n", framesWritten);
+ printf(" framesRead = %8d\n", framesRead);
+ printf(" myFramesRead = %8d\n", (int) loopbackData.framesReadTotal);
+ printf(" written - read = %8d\n", (int) (framesWritten - framesRead));
+ printf(" insufficient # = %8d\n", (int) loopbackData.insufficientReadCount);
+ if (loopbackData.insufficientReadCount > 0) {
+ printf(" insufficient frames = %8d\n", (int) loopbackData.insufficientReadFrames);
+ }
+ }
+ {
+ int32_t framesRead = AAudioStream_getFramesRead(outputStream);
+ int32_t framesWritten = AAudioStream_getFramesWritten(outputStream);
+ printf("Callback Results ---------------------------------------- OUTPUT\n");
+ printf(" output underruns = %d\n", AAudioStream_getXRunCount(outputStream));
+ printf(" myFramesWritten = %8d\n", (int) loopbackData.framesWrittenTotal);
+ printf(" framesWritten = %8d\n", framesWritten);
+ printf(" framesRead = %8d\n", framesRead);
+ printf(" min numFrames = %8d\n", (int) loopbackData.minNumFrames);
+ printf(" max numFrames = %8d\n", (int) loopbackData.maxNumFrames);
+ }
+
+ written = loopbackData.loopbackProcessor->save(FILENAME_ECHOS);
+ if (written > 0) {
+ printf("main() wrote %8d mono samples to \"%s\" on Android device\n",
+ written, FILENAME_ECHOS);
+ }
+
+ written = loopbackData.audioRecording.save(FILENAME_ALL);
+ if (written > 0) {
+ printf("main() wrote %8d mono samples to \"%s\" on Android device\n",
+ written, FILENAME_ALL);
+ }
+
+ if (loopbackData.loopbackProcessor->getResult() < 0) {
+ printf("ERROR: LOOPBACK PROCESSING FAILED. Maybe because the volume was too low.\n");
+ result = loopbackData.loopbackProcessor->getResult();
+ }
+ if (loopbackData.insufficientReadCount > 3) {
+ printf("ERROR: LOOPBACK PROCESSING FAILED. insufficientReadCount too high\n");
+ result = AAUDIO_ERROR_UNAVAILABLE;
}
finish:
player.close();
recorder.close();
- delete[] loopbackData.conversionBuffer;
- delete[] loopbackData.inputData;
- delete[] outputData;
+ delete[] loopbackData.inputFloatData;
+ delete[] loopbackData.inputShortData;
- printf(RESULT_TAG "error = %d = %s\n", result, AAudio_convertResultToText(result));
- if ((result != AAUDIO_OK)) {
- printf("error %d = %s\n", result, AAudio_convertResultToText(result));
+ printf(RESULT_TAG "result = %d \n", result); // machine readable
+ printf("result is %s\n", AAudio_convertResultToText(result)); // human readable
+ if (result != AAUDIO_OK) {
+ printf("FAILURE\n");
return EXIT_FAILURE;
} else {
printf("SUCCESS\n");
diff --git a/media/libaaudio/examples/loopback/src/loopback.sh b/media/libaaudio/examples/loopback/src/loopback.sh
index bc63125..a5712b8 100644
--- a/media/libaaudio/examples/loopback/src/loopback.sh
+++ b/media/libaaudio/examples/loopback/src/loopback.sh
@@ -1,10 +1,30 @@
#!/system/bin/sh
# Run a loopback test in the background after a delay.
-# To run the script enter:
+# To run the script, enter these commands once:
+# adb disable-verity
+# adb reboot
+# adb remount
+# adb sync
+# adb push loopback.sh /data/
+# For each test run:
# adb shell "nohup sh /data/loopback.sh &"
+# Quickly connect USB audio if needed, either manually or via Tigertail switch.
+# Wait until the test completes, restore USB to host if needed, and then:
+# adb pull /data/loopreport.txt
+# adb pull /data/loopback_all.wav
+# adb pull /data/loopback_echos.wav
SLEEP_TIME=10
-TEST_COMMAND="aaudio_loopback -pl -Pl -C1 -n2 -m2 -tm -d5"
+TEST_COMMAND="/data/nativetest/aaudio_loopback/aaudio_loopback -pl -Pl -C1 -n2 -m2 -te -d5"
+# Partial list of options:
+# -pl (output) performance mode: low latency
+# -Pl input performance mode: low latency
+# -C1 input channel count: 1
+# -n2 number of bursts: 2
+# -m2 mmap policy: 2
+# -t? test mode: -tm for sine magnitude, -te for echo latency, -tf for file latency
+# -d5 device ID
+# For full list of available options, see AAudioArgsParser.h and loopback.cpp
echo "Plug in USB Mir and Fun Plug."
echo "Test will start in ${SLEEP_TIME} seconds: ${TEST_COMMAND}"
diff --git a/media/libaaudio/examples/utils/AAudioArgsParser.h b/media/libaaudio/examples/utils/AAudioArgsParser.h
index ada37e2..88d7401 100644
--- a/media/libaaudio/examples/utils/AAudioArgsParser.h
+++ b/media/libaaudio/examples/utils/AAudioArgsParser.h
@@ -17,7 +17,10 @@
#ifndef AAUDIO_EXAMPLE_ARGS_PARSER_H
#define AAUDIO_EXAMPLE_ARGS_PARSER_H
-#include <cctype>
+#define MAX_CHANNELS 8
+
+//#include <cctype>
+#include <dlfcn.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
@@ -27,7 +30,63 @@
#include "AAudioExampleUtils.h"
-// TODO use this as a base class within AAudio
+
+static void (*s_setUsage)(AAudioStreamBuilder* builder, aaudio_usage_t usage) = nullptr;
+static void (*s_setContentType)(AAudioStreamBuilder* builder,
+ aaudio_content_type_t contentType) = nullptr;
+static void (*s_setInputPreset)(AAudioStreamBuilder* builder,
+ aaudio_input_preset_t inputPreset) = nullptr;
+
+static bool s_loadAttempted = false;
+static aaudio_usage_t (*s_getUsage)(AAudioStream *stream) = nullptr;
+static aaudio_content_type_t (*s_getContentType)(AAudioStream *stream) = nullptr;
+static aaudio_input_preset_t (*s_getInputPreset)(AAudioStream *stream) = nullptr;
+
+// Link to test functions in shared library.
+static void loadFutureFunctions() {
+ if (s_loadAttempted) return; // only try once
+ s_loadAttempted = true;
+
+ void *handle = dlopen("libaaudio.so", RTLD_NOW);
+ if (handle != nullptr) {
+ s_setUsage = (void (*)(AAudioStreamBuilder *, aaudio_usage_t))
+ dlsym(handle, "AAudioStreamBuilder_setUsage");
+ if (s_setUsage == nullptr) goto error;
+
+ s_setContentType = (void (*)(AAudioStreamBuilder *, aaudio_content_type_t))
+ dlsym(handle, "AAudioStreamBuilder_setContentType");
+ if (s_setContentType == nullptr) goto error;
+
+ s_setInputPreset = (void (*)(AAudioStreamBuilder *, aaudio_input_preset_t))
+ dlsym(handle, "AAudioStreamBuilder_setInputPreset");
+ if (s_setInputPreset == nullptr) goto error;
+
+ s_getUsage = (aaudio_usage_t (*)(AAudioStream *))
+ dlsym(handle, "AAudioStream_getUsage");
+ if (s_getUsage == nullptr) goto error;
+
+ s_getContentType = (aaudio_content_type_t (*)(AAudioStream *))
+ dlsym(handle, "AAudioStream_getContentType");
+ if (s_getContentType == nullptr) goto error;
+
+ s_getInputPreset = (aaudio_input_preset_t (*)(AAudioStream *))
+ dlsym(handle, "AAudioStream_getInputPreset");
+ if (s_getInputPreset == nullptr) goto error;
+ }
+ return;
+
+error:
+ // prevent any calls to these functions
+ s_setUsage = nullptr;
+ s_setContentType = nullptr;
+ s_setInputPreset = nullptr;
+ s_getUsage = nullptr;
+ s_getContentType = nullptr;
+ s_getInputPreset = nullptr;
+ dlclose(handle);
+ return;
+}
+
class AAudioParameters {
public:
@@ -39,6 +98,10 @@
}
void setChannelCount(int32_t channelCount) {
+ if (channelCount > MAX_CHANNELS) {
+ printf("Sorry, MAX of %d channels!\n", MAX_CHANNELS);
+ channelCount = MAX_CHANNELS;
+ }
mChannelCount = channelCount;
}
@@ -82,6 +145,30 @@
mPerformanceMode = performanceMode;
}
+ aaudio_usage_t getUsage() const {
+ return mUsage;
+ }
+
+ void setUsage(aaudio_usage_t usage) {
+ mUsage = usage;
+ }
+
+ aaudio_content_type_t getContentType() const {
+ return mContentType;
+ }
+
+ void setContentType(aaudio_content_type_t contentType) {
+ mContentType = contentType;
+ }
+
+ aaudio_input_preset_t getInputPreset() const {
+ return mInputPreset;
+ }
+
+ void setInputPreset(aaudio_input_preset_t inputPreset) {
+ mInputPreset = inputPreset;
+ }
+
int32_t getDeviceId() const {
return mDeviceId;
}
@@ -110,6 +197,24 @@
AAudioStreamBuilder_setDeviceId(builder, mDeviceId);
AAudioStreamBuilder_setSharingMode(builder, mSharingMode);
AAudioStreamBuilder_setPerformanceMode(builder, mPerformanceMode);
+
+ // Call P functions if supported.
+ loadFutureFunctions();
+ if (s_setUsage != nullptr) {
+ s_setUsage(builder, mUsage);
+ } else if (mUsage != AAUDIO_UNSPECIFIED){
+ printf("WARNING: setUsage not supported");
+ }
+ if (s_setContentType != nullptr) {
+ s_setContentType(builder, mContentType);
+ } else if (mUsage != AAUDIO_UNSPECIFIED){
+ printf("WARNING: setContentType not supported");
+ }
+ if (s_setInputPreset != nullptr) {
+ s_setInputPreset(builder, mInputPreset);
+ } else if (mUsage != AAUDIO_UNSPECIFIED){
+ printf("WARNING: setInputPreset not supported");
+ }
}
private:
@@ -122,6 +227,10 @@
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
aaudio_performance_mode_t mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
+ aaudio_usage_t mUsage = AAUDIO_UNSPECIFIED;
+ aaudio_content_type_t mContentType = AAUDIO_UNSPECIFIED;
+ aaudio_input_preset_t mInputPreset = AAUDIO_UNSPECIFIED;
+
int32_t mNumberOfBursts = AAUDIO_UNSPECIFIED;
};
@@ -152,8 +261,11 @@
case 'd':
setDeviceId(atoi(&arg[2]));
break;
- case 's':
- mDurationSeconds = atoi(&arg[2]);
+ case 'f':
+ setFormat(atoi(&arg[2]));
+ break;
+ case 'i':
+ setInputPreset(atoi(&arg[2]));
break;
case 'm': {
aaudio_policy_t policy = AAUDIO_POLICY_AUTO;
@@ -171,9 +283,18 @@
case 'r':
setSampleRate(atoi(&arg[2]));
break;
+ case 's':
+ mDurationSeconds = atoi(&arg[2]);
+ break;
+ case 'u':
+ setUsage(atoi(&arg[2]));
+ break;
case 'x':
setSharingMode(AAUDIO_SHARING_MODE_EXCLUSIVE);
break;
+ case 'y':
+ setContentType(atoi(&arg[2]));
+ break;
default:
unrecognized = true;
break;
@@ -201,24 +322,32 @@
}
static void usage() {
- printf("-c{channels} -d{duration} -m -n{burstsPerBuffer} -p{perfMode} -r{rate} -x\n");
+ printf("-c{channels} -d{deviceId} -m{mmapPolicy} -n{burstsPerBuffer} -p{perfMode}");
+ printf(" -r{rate} -s{seconds} -x\n");
printf(" Default values are UNSPECIFIED unless otherwise stated.\n");
printf(" -b{bufferCapacity} frames\n");
printf(" -c{channels} for example 2 for stereo\n");
printf(" -d{deviceId} default is %d\n", AAUDIO_UNSPECIFIED);
- printf(" -s{duration} in seconds, default is %d\n", DEFAULT_DURATION_SECONDS);
+ printf(" -f{0|1|2} set format\n");
+ printf(" 0 = UNSPECIFIED\n");
+ printf(" 1 = PCM_I16\n");
+ printf(" 2 = FLOAT\n");
+ printf(" -i{inputPreset} eg. 5 for AAUDIO_INPUT_PRESET_CAMCORDER\n");
printf(" -m{0|1|2|3} set MMAP policy\n");
- printf(" 0 = _UNSPECIFIED, default\n");
- printf(" 1 = _NEVER\n");
- printf(" 2 = _AUTO, also if -m is used with no number\n");
- printf(" 3 = _ALWAYS\n");
+ printf(" 0 = _UNSPECIFIED, use aaudio.mmap_policy system property, default\n");
+ printf(" 1 = _NEVER, never use MMAP\n");
+ printf(" 2 = _AUTO, use MMAP if available, default for -m with no number\n");
+ printf(" 3 = _ALWAYS, use MMAP or fail\n");
printf(" -n{numberOfBursts} for setBufferSize\n");
printf(" -p{performanceMode} set output AAUDIO_PERFORMANCE_MODE*, default NONE\n");
printf(" n for _NONE\n");
printf(" l for _LATENCY\n");
printf(" p for _POWER_SAVING;\n");
printf(" -r{sampleRate} for example 44100\n");
+ printf(" -s{duration} in seconds, default is %d\n", DEFAULT_DURATION_SECONDS);
+ printf(" -u{usage} eg. 14 for AAUDIO_USAGE_GAME\n");
printf(" -x to use EXCLUSIVE mode\n");
+ printf(" -y{contentType} eg. 1 for AAUDIO_CONTENT_TYPE_SPEECH\n");
}
static aaudio_performance_mode_t parsePerformanceMode(char c) {
@@ -281,6 +410,24 @@
printf(" PerformanceMode: requested = %d, actual = %d\n",
getPerformanceMode(), AAudioStream_getPerformanceMode(stream));
+
+ loadFutureFunctions();
+
+ if (s_setUsage != nullptr) {
+ printf(" Usage: requested = %d, actual = %d\n",
+ getUsage(), s_getUsage(stream));
+ }
+ if (s_getContentType != nullptr) {
+ printf(" ContentType: requested = %d, actual = %d\n",
+ getContentType(), s_getContentType(stream));
+ }
+
+ if (AAudioStream_getDirection(stream) == AAUDIO_DIRECTION_INPUT
+ && s_getInputPreset != nullptr) {
+ printf(" InputPreset: requested = %d, actual = %d\n",
+ getInputPreset(), s_getInputPreset(stream));
+ }
+
printf(" Is MMAP used? %s\n", AAudioStream_isMMapUsed(stream)
? "yes" : "no");
diff --git a/media/libaaudio/examples/utils/AAudioExampleUtils.h b/media/libaaudio/examples/utils/AAudioExampleUtils.h
index 2671c3a..46b8895 100644
--- a/media/libaaudio/examples/utils/AAudioExampleUtils.h
+++ b/media/libaaudio/examples/utils/AAudioExampleUtils.h
@@ -32,6 +32,7 @@
#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000)
+template <class T = aaudio_sharing_mode_t>
const char *getSharingModeText(aaudio_sharing_mode_t mode) {
const char *text = "unknown";
switch (mode) {
diff --git a/media/libaaudio/examples/utils/AAudioSimplePlayer.h b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
index 606c4ba..54b77ba 100644
--- a/media/libaaudio/examples/utils/AAudioSimplePlayer.h
+++ b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
@@ -19,11 +19,10 @@
#ifndef AAUDIO_SIMPLE_PLAYER_H
#define AAUDIO_SIMPLE_PLAYER_H
-#include <unistd.h>
#include <sched.h>
+#include <unistd.h>
#include <aaudio/AAudio.h>
-#include <atomic>
#include "AAudioArgsParser.h"
#include "SineGenerator.h"
@@ -31,12 +30,12 @@
#define SHARING_MODE AAUDIO_SHARING_MODE_SHARED
#define PERFORMANCE_MODE AAUDIO_PERFORMANCE_MODE_NONE
-// Arbitrary period for glitches, once per second at 48000 Hz.
-#define FORCED_UNDERRUN_PERIOD_FRAMES 48000
+// Arbitrary period for glitches
+#define FORCED_UNDERRUN_PERIOD_FRAMES (2 * 48000)
// How long to sleep in a callback to cause an intentional glitch. For testing.
#define FORCED_UNDERRUN_SLEEP_MICROS (10 * 1000)
-#define MAX_TIMESTAMPS 16
+#define MAX_TIMESTAMPS 16
typedef struct Timestamp {
int64_t position;
@@ -70,13 +69,6 @@
}
// TODO Extract a common base class for record and playback.
- /**
- * Also known as "sample rate"
- * Only call this after open() has been called.
- */
- int32_t getFramesPerSecond() const {
- return getSampleRate(); // alias
- }
/**
* Only call this after open() has been called.
@@ -172,12 +164,12 @@
result = AAudioStreamBuilder_openStream(builder, &mStream);
AAudioStreamBuilder_delete(builder);
+
return result;
}
aaudio_result_t close() {
if (mStream != nullptr) {
- printf("call AAudioStream_close(%p)\n", mStream); fflush(stdout);
AAudioStream_close(mStream);
mStream = nullptr;
}
@@ -212,13 +204,35 @@
aaudio_result_t result = AAudioStream_requestStop(mStream);
if (result != AAUDIO_OK) {
printf("ERROR - AAudioStream_requestStop() returned %d %s\n",
- result, AAudio_convertResultToText(result));
+ result, AAudio_convertResultToText(result));
}
int32_t xRunCount = AAudioStream_getXRunCount(mStream);
printf("AAudioStream_getXRunCount %d\n", xRunCount);
return result;
}
+ // Pause the stream. AAudio will stop calling your callback function.
+ aaudio_result_t pause() {
+ aaudio_result_t result = AAudioStream_requestPause(mStream);
+ if (result != AAUDIO_OK) {
+ printf("ERROR - AAudioStream_requestPause() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ int32_t xRunCount = AAudioStream_getXRunCount(mStream);
+ printf("AAudioStream_getXRunCount %d\n", xRunCount);
+ return result;
+ }
+
+ // Flush the stream. AAudio will stop calling your callback function.
+ aaudio_result_t flush() {
+ aaudio_result_t result = AAudioStream_requestFlush(mStream);
+ if (result != AAUDIO_OK) {
+ printf("ERROR - AAudioStream_requestFlush() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ return result;
+ }
+
AAudioStream *getStream() const {
return mStream;
}
@@ -232,23 +246,49 @@
typedef struct SineThreadedData_s {
- SineGenerator sineOsc1;
- SineGenerator sineOsc2;
- Timestamp timestamps[MAX_TIMESTAMPS];
- int64_t framesTotal = 0;
- int64_t nextFrameToGlitch = FORCED_UNDERRUN_PERIOD_FRAMES;
- int32_t minNumFrames = INT32_MAX;
- int32_t maxNumFrames = 0;
- int32_t timestampCount = 0; // in timestamps
+ SineGenerator sineOscillators[MAX_CHANNELS];
+ Timestamp timestamps[MAX_TIMESTAMPS];
+ int64_t framesTotal = 0;
+ int64_t nextFrameToGlitch = FORCED_UNDERRUN_PERIOD_FRAMES;
+ int32_t minNumFrames = INT32_MAX;
+ int32_t maxNumFrames = 0;
+ int32_t timestampCount = 0; // in timestamps
+ int32_t sampleRate = 48000;
+ int32_t prefixToneFrames = 0;
+ bool sweepSetup = false;
- int scheduler = 0;
- bool schedulerChecked = false;
- bool forceUnderruns = false;
+ int scheduler = 0;
+ bool schedulerChecked = false;
+ bool forceUnderruns = false;
AAudioSimplePlayer simplePlayer;
int32_t callbackCount = 0;
WakeUp waker{AAUDIO_OK};
+ /**
+ * Set sampleRate first.
+ */
+ void setupSineBlip() {
+ for (int i = 0; i < MAX_CHANNELS; ++i) {
+ double centerFrequency = 880.0 * (i + 2);
+ sineOscillators[i].setup(centerFrequency, sampleRate);
+ sineOscillators[i].setSweep(centerFrequency, centerFrequency, 0.0);
+ }
+ }
+
+ void setupSineSweeps() {
+ for (int i = 0; i < MAX_CHANNELS; ++i) {
+ double centerFrequency = 220.0 * (i + 2);
+ sineOscillators[i].setup(centerFrequency, sampleRate);
+ double minFrequency = centerFrequency * 2.0 / 3.0;
+ // Change range slightly so they will go out of phase.
+ double maxFrequency = centerFrequency * 3.0 / 2.0;
+ double sweepSeconds = 5.0 + i;
+ sineOscillators[i].setSweep(minFrequency, maxFrequency, sweepSeconds);
+ }
+ sweepSetup = true;
+ }
+
} SineThreadedData_t;
// Callback function that fills the audio output buffer.
@@ -265,9 +305,11 @@
return AAUDIO_CALLBACK_RESULT_STOP;
}
SineThreadedData_t *sineData = (SineThreadedData_t *) userData;
- sineData->callbackCount++;
- sineData->framesTotal += numFrames;
+ // Play an initial high tone so we can tell whether the beginning was truncated.
+ if (!sineData->sweepSetup && sineData->framesTotal >= sineData->prefixToneFrames) {
+ sineData->setupSineSweeps();
+ }
if (sineData->forceUnderruns) {
if (sineData->framesTotal > sineData->nextFrameToGlitch) {
@@ -301,33 +343,32 @@
}
int32_t samplesPerFrame = AAudioStream_getChannelCount(stream);
- // This code only plays on the first one or two channels.
- // TODO Support arbitrary number of channels.
+
+
+ int numActiveOscilators = (samplesPerFrame > MAX_CHANNELS) ? MAX_CHANNELS : samplesPerFrame;
switch (AAudioStream_getFormat(stream)) {
case AAUDIO_FORMAT_PCM_I16: {
int16_t *audioBuffer = (int16_t *) audioData;
- // Render sine waves as shorts to first channel.
- sineData->sineOsc1.render(&audioBuffer[0], samplesPerFrame, numFrames);
- // Render sine waves to second channel if there is one.
- if (samplesPerFrame > 1) {
- sineData->sineOsc2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ for (int i = 0; i < numActiveOscilators; ++i) {
+ sineData->sineOscillators[i].render(&audioBuffer[i], samplesPerFrame,
+ numFrames);
}
}
- break;
+ break;
case AAUDIO_FORMAT_PCM_FLOAT: {
float *audioBuffer = (float *) audioData;
- // Render sine waves as floats to first channel.
- sineData->sineOsc1.render(&audioBuffer[0], samplesPerFrame, numFrames);
- // Render sine waves to second channel if there is one.
- if (samplesPerFrame > 1) {
- sineData->sineOsc2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ for (int i = 0; i < numActiveOscilators; ++i) {
+ sineData->sineOscillators[i].render(&audioBuffer[i], samplesPerFrame,
+ numFrames);
}
}
- break;
+ break;
default:
return AAUDIO_CALLBACK_RESULT_STOP;
}
+ sineData->callbackCount++;
+ sineData->framesTotal += numFrames;
return AAUDIO_CALLBACK_RESULT_CONTINUE;
}
diff --git a/media/libaaudio/examples/utils/AAudioSimpleRecorder.h b/media/libaaudio/examples/utils/AAudioSimpleRecorder.h
index 1344273..869fad0 100644
--- a/media/libaaudio/examples/utils/AAudioSimpleRecorder.h
+++ b/media/libaaudio/examples/utils/AAudioSimpleRecorder.h
@@ -178,7 +178,6 @@
aaudio_result_t close() {
if (mStream != nullptr) {
- printf("call AAudioStream_close(%p)\n", mStream); fflush(stdout);
AAudioStream_close(mStream);
mStream = nullptr;
}
diff --git a/media/libaaudio/examples/utils/SineGenerator.h b/media/libaaudio/examples/utils/SineGenerator.h
index a755582..9e6d46d 100644
--- a/media/libaaudio/examples/utils/SineGenerator.h
+++ b/media/libaaudio/examples/utils/SineGenerator.h
@@ -31,20 +31,20 @@
}
void setSweep(double frequencyLow, double frequencyHigh, double seconds) {
- mPhaseIncrementLow = frequencyLow * M_PI * 2 / mFrameRate;
- mPhaseIncrementHigh = frequencyHigh * M_PI * 2 / mFrameRate;
-
- double numFrames = seconds * mFrameRate;
- mUpScaler = pow((frequencyHigh / frequencyLow), (1.0 / numFrames));
- mDownScaler = 1.0 / mUpScaler;
- mGoingUp = true;
- mSweeping = true;
+ mSweeping = seconds > 0.0;
+ if (mSweeping) {
+ mPhaseIncrementLow = frequencyLow * M_PI * 2 / mFrameRate;
+ mPhaseIncrementHigh = frequencyHigh * M_PI * 2 / mFrameRate;
+ double numFrames = seconds * mFrameRate;
+ mUpScaler = pow((frequencyHigh / frequencyLow), (1.0 / numFrames));
+ mDownScaler = 1.0 / mUpScaler;
+ }
}
void render(int16_t *buffer, int32_t channelStride, int32_t numFrames) {
int sampleIndex = 0;
for (int i = 0; i < numFrames; i++) {
- buffer[sampleIndex] = (int16_t) (32767 * sin(mPhase) * mAmplitude);
+ buffer[sampleIndex] = (int16_t) (INT16_MAX * sin(mPhase) * mAmplitude);
sampleIndex += channelStride;
advancePhase();
}
@@ -61,6 +61,7 @@
void setAmplitude(double amplitude) {
mAmplitude = amplitude;
}
+
double getAmplitude() const {
return mAmplitude;
}
diff --git a/media/libaaudio/examples/write_sine/Android.bp b/media/libaaudio/examples/write_sine/Android.bp
new file mode 100644
index 0000000..aa25e67
--- /dev/null
+++ b/media/libaaudio/examples/write_sine/Android.bp
@@ -0,0 +1,15 @@
+cc_test {
+ name: "write_sine",
+ srcs: ["src/write_sine.cpp"],
+ cflags: ["-Wall", "-Werror"],
+ shared_libs: ["libaaudio"],
+ header_libs: ["libaaudio_example_utils"],
+}
+
+cc_test {
+ name: "write_sine_callback",
+ srcs: ["src/write_sine_callback.cpp"],
+ cflags: ["-Wall", "-Werror"],
+ shared_libs: ["libaaudio"],
+ header_libs: ["libaaudio_example_utils"],
+}
diff --git a/media/libaaudio/examples/write_sine/src/write_sine.cpp b/media/libaaudio/examples/write_sine/src/write_sine.cpp
index 8c6f783..8e33a31 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine.cpp
@@ -47,6 +47,7 @@
int32_t framesToPlay = 0;
int32_t framesLeft = 0;
int32_t xRunCount = 0;
+ int numActiveOscilators = 0;
float *floatData = nullptr;
int16_t *shortData = nullptr;
@@ -56,7 +57,7 @@
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Play a sine wave using AAudio V0.1.2\n", argv[0]);
+ printf("%s - Play a sine wave using AAudio V0.1.3\n", argv[0]);
if (argParser.parseArgs(argc, argv)) {
return EXIT_FAILURE;
@@ -76,8 +77,8 @@
actualSampleRate = AAudioStream_getSampleRate(aaudioStream);
actualDataFormat = AAudioStream_getFormat(aaudioStream);
- myData.sineOsc1.setup(440.0, actualSampleRate);
- myData.sineOsc2.setup(660.0, actualSampleRate);
+ myData.sampleRate = actualSampleRate;
+ myData.setupSineSweeps();
// Some DMA might use very short bursts of 16 frames. We don't need to write such small
// buffers. But it helps to use a multiple of the burst size for predictable scheduling.
@@ -116,19 +117,18 @@
// Play for a while.
framesToPlay = actualSampleRate * argParser.getDurationSeconds();
framesLeft = framesToPlay;
+ numActiveOscilators = (actualChannelCount > MAX_CHANNELS) ? MAX_CHANNELS : actualChannelCount;
while (framesLeft > 0) {
-
+ // Render as FLOAT or PCM
if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
- // Render sine waves to left and right channels.
- myData.sineOsc1.render(&floatData[0], actualChannelCount, framesPerWrite);
- if (actualChannelCount > 1) {
- myData.sineOsc2.render(&floatData[1], actualChannelCount, framesPerWrite);
+ for (int i = 0; i < numActiveOscilators; ++i) {
+ myData.sineOscillators[i].render(&floatData[i], actualChannelCount,
+ framesPerWrite);
}
} else if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
- // Render sine waves to left and right channels.
- myData.sineOsc1.render(&shortData[0], actualChannelCount, framesPerWrite);
- if (actualChannelCount > 1) {
- myData.sineOsc2.render(&shortData[1], actualChannelCount, framesPerWrite);
+ for (int i = 0; i < numActiveOscilators; ++i) {
+ myData.sineOscillators[i].render(&shortData[i], actualChannelCount,
+ framesPerWrite);
}
}
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index 4f9cde6..e33e9f8 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -28,7 +28,7 @@
#include <aaudio/AAudio.h>
#include "AAudioExampleUtils.h"
#include "AAudioSimplePlayer.h"
-#include "../../utils/AAudioSimplePlayer.h"
+#include "AAudioArgsParser.h"
/**
* Open stream, play some sine waves, then close the stream.
@@ -36,37 +36,39 @@
* @param argParser
* @return AAUDIO_OK or negative error code
*/
-static aaudio_result_t testOpenPlayClose(AAudioArgsParser &argParser)
+static aaudio_result_t testOpenPlayClose(AAudioArgsParser &argParser,
+ int32_t loopCount,
+ int32_t prefixToneMsec,
+ bool forceUnderruns)
{
SineThreadedData_t myData;
AAudioSimplePlayer &player = myData.simplePlayer;
aaudio_result_t result = AAUDIO_OK;
bool disconnected = false;
+ bool bailOut = false;
int64_t startedAtNanos;
printf("----------------------- run complete test --------------------------\n");
myData.schedulerChecked = false;
myData.callbackCount = 0;
- myData.forceUnderruns = false; // set true to test AAudioStream_getXRunCount()
+ myData.forceUnderruns = forceUnderruns; // test AAudioStream_getXRunCount()
result = player.open(argParser,
SimplePlayerDataCallbackProc, SimplePlayerErrorCallbackProc, &myData);
if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - player.open() returned %d\n", result);
+ fprintf(stderr, "ERROR - player.open() returned %s\n",
+ AAudio_convertResultToText(result));
goto error;
}
argParser.compareWithStream(player.getStream());
- // Setup sine wave generators.
- {
- int32_t actualSampleRate = player.getSampleRate();
- myData.sineOsc1.setup(440.0, actualSampleRate);
- myData.sineOsc1.setSweep(300.0, 600.0, 5.0);
- myData.sineOsc1.setAmplitude(0.2);
- myData.sineOsc2.setup(660.0, actualSampleRate);
- myData.sineOsc2.setSweep(350.0, 900.0, 7.0);
- myData.sineOsc2.setAmplitude(0.2);
+ myData.sampleRate = player.getSampleRate();
+ myData.prefixToneFrames = prefixToneMsec * myData.sampleRate / 1000;
+ if (myData.prefixToneFrames > 0) {
+ myData.setupSineBlip();
+ } else {
+ myData.setupSineSweeps();
}
#if 0
@@ -78,42 +80,93 @@
}
#endif
- result = player.start();
- if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - player.start() returned %d\n", result);
- goto error;
- }
+ for (int loopIndex = 0; loopIndex < loopCount; loopIndex++) {
+ // Only play data on every other loop so we can hear if there is stale data.
+ double amplitude;
+ int32_t durationSeconds;
+ if ((loopIndex & 1) == 0) {
+ printf("--------------- SINE ------\n");
+ amplitude = 0.2;
+ durationSeconds = argParser.getDurationSeconds();
+ } else {
+ printf("--------------- QUIET -----\n");
+ amplitude = 0.0;
+ durationSeconds = 2; // just wait briefly when quiet
+ }
+ for (int i = 0; i < MAX_CHANNELS; ++i) {
+ myData.sineOscillators[i].setAmplitude(amplitude);
+ }
- // Play a sine wave in the background.
- printf("Sleep for %d seconds while audio plays in a callback thread.\n",
- argParser.getDurationSeconds());
- startedAtNanos = getNanoseconds(CLOCK_MONOTONIC);
- for (int second = 0; second < argParser.getDurationSeconds(); second++)
- {
- // Sleep a while. Wake up early if there is an error, for example a DISCONNECT.
- long ret = myData.waker.wait(AAUDIO_OK, NANOS_PER_SECOND);
- int64_t millis = (getNanoseconds(CLOCK_MONOTONIC) - startedAtNanos) / NANOS_PER_MILLISECOND;
- result = myData.waker.get();
- printf("wait() returns %ld, aaudio_result = %d, at %6d millis"
- ", second = %d, framesWritten = %8d, underruns = %d\n",
- ret, result, (int) millis,
- second,
- (int) AAudioStream_getFramesWritten(player.getStream()),
- (int) AAudioStream_getXRunCount(player.getStream()));
+ result = player.start();
if (result != AAUDIO_OK) {
- if (result == AAUDIO_ERROR_DISCONNECTED) {
- disconnected = true;
+ fprintf(stderr, "ERROR - player.start() returned %d\n", result);
+ goto error;
+ }
+
+ // Play a sine wave in the background.
+ printf("Sleep for %d seconds while audio plays in a callback thread. %d of %d\n",
+ argParser.getDurationSeconds(), (loopIndex + 1), loopCount);
+ startedAtNanos = getNanoseconds(CLOCK_MONOTONIC);
+ for (int second = 0; second < durationSeconds; second++) {
+ // Sleep a while. Wake up early if there is an error, for example a DISCONNECT.
+ long ret = myData.waker.wait(AAUDIO_OK, NANOS_PER_SECOND);
+ int64_t millis =
+ (getNanoseconds(CLOCK_MONOTONIC) - startedAtNanos) / NANOS_PER_MILLISECOND;
+ result = myData.waker.get();
+ printf("wait() returns %ld, aaudio_result = %d, at %6d millis"
+ ", second = %3d, framesWritten = %8d, underruns = %d\n",
+ ret, result, (int) millis,
+ second,
+ (int) AAudioStream_getFramesWritten(player.getStream()),
+ (int) AAudioStream_getXRunCount(player.getStream()));
+ if (result != AAUDIO_OK) {
+ disconnected = (result == AAUDIO_ERROR_DISCONNECTED);
+ bailOut = true;
+ break;
}
+ }
+ printf("AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+
+ // Alternate between using stop or pause for each sine/quiet pair.
+ // Repeat this pattern: {sine-stop-quiet-stop-sine-pause-quiet-pause}
+ if ((loopIndex & 2) == 0) {
+ printf("STOP, callback # = %d\n", myData.callbackCount);
+ result = player.stop();
+ } else {
+ printf("PAUSE/FLUSH, callback # = %d\n", myData.callbackCount);
+ result = player.pause();
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+ result = player.flush();
+ }
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+
+ if (bailOut) {
break;
}
- }
- printf("AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
- printf("call stop() callback # = %d\n", myData.callbackCount);
- result = player.stop();
- if (result != AAUDIO_OK) {
- goto error;
+ {
+ aaudio_stream_state_t state = AAudioStream_getState(player.getStream());
+ aaudio_stream_state_t finalState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+ int64_t timeoutNanos = 2000 * NANOS_PER_MILLISECOND;
+ result = AAudioStream_waitForStateChange(player.getStream(), state,
+ &finalState, timeoutNanos);
+ printf("waitForStateChange returns %s, state = %s\n",
+ AAudio_convertResultToText(result),
+ AAudio_convertStreamStateToText(finalState));
+ int64_t written = AAudioStream_getFramesWritten(player.getStream());
+ int64_t read = AAudioStream_getFramesRead(player.getStream());
+ printf(" framesWritten = %lld, framesRead = %lld, diff = %d\n",
+ (long long) written,
+ (long long) read,
+ (int) (written - read));
+ }
+
}
+
printf("call close()\n");
result = player.close();
if (result != AAUDIO_OK) {
@@ -147,23 +200,59 @@
return disconnected ? AAUDIO_ERROR_DISCONNECTED : result;
}
+static void usage() {
+ AAudioArgsParser::usage();
+ printf(" -l{count} loopCount start/stop, every other one is silent\n");
+ printf(" -t{msec} play a high pitched tone at the beginning\n");
+ printf(" -z force periodic underruns by sleeping in callback\n");
+}
+
int main(int argc, const char **argv)
{
AAudioArgsParser argParser;
aaudio_result_t result;
+ int32_t loopCount = 1;
+ int32_t prefixToneMsec = 0;
+ bool forceUnderruns = false;
// Make printf print immediately so that debug info is not stuck
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Play a sine sweep using an AAudio callback V0.1.2\n", argv[0]);
+ printf("%s - Play a sine sweep using an AAudio callback V0.1.4\n", argv[0]);
- if (argParser.parseArgs(argc, argv)) {
- return EXIT_FAILURE;
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (argParser.parseArg(arg)) {
+ // Handle options that are not handled by the ArgParser
+ if (arg[0] == '-') {
+ char option = arg[1];
+ switch (option) {
+ case 'l':
+ loopCount = atoi(&arg[2]);
+ break;
+ case 't':
+ prefixToneMsec = atoi(&arg[2]);
+ break;
+ case 'z':
+ forceUnderruns = true; // Zzzzzzz
+ break;
+ default:
+ usage();
+ exit(EXIT_FAILURE);
+ break;
+ }
+ } else {
+ usage();
+ exit(EXIT_FAILURE);
+ break;
+ }
+ }
}
// Keep looping until we can complete the test without disconnecting.
- while((result = testOpenPlayClose(argParser)) == AAUDIO_ERROR_DISCONNECTED);
+ while((result = testOpenPlayClose(argParser, loopCount, prefixToneMsec, forceUnderruns))
+ == AAUDIO_ERROR_DISCONNECTED);
return (result) ? EXIT_FAILURE : EXIT_SUCCESS;
}
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 3c23736..5b29419 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -44,7 +44,15 @@
#define AAUDIO_UNSPECIFIED 0
enum {
+ /**
+ * Audio data will travel out of the device, for example through a speaker.
+ */
AAUDIO_DIRECTION_OUTPUT,
+
+
+ /**
+ * Audio data will travel into the device, for example from a microphone.
+ */
AAUDIO_DIRECTION_INPUT
};
typedef int32_t aaudio_direction_t;
@@ -52,33 +60,112 @@
enum {
AAUDIO_FORMAT_INVALID = -1,
AAUDIO_FORMAT_UNSPECIFIED = 0,
+
+ /**
+ * This format uses the int16_t data type.
+ * The maximum range of the data is -32768 to 32767.
+ */
AAUDIO_FORMAT_PCM_I16,
+
+ /**
+ * This format uses the float data type.
+ * The nominal range of the data is [-1.0f, 1.0f).
+ * Values outside that range may be clipped.
+ *
+ * See also 'floatData' at
+ * https://developer.android.com/reference/android/media/AudioTrack#write(float[],%20int,%20int,%20int)
+ */
AAUDIO_FORMAT_PCM_FLOAT
};
typedef int32_t aaudio_format_t;
+/**
+ * These result codes are returned from AAudio functions to indicate success or failure.
+ * Note that error return codes may change in the future so applications should generally
+ * not rely on specific return codes.
+ */
enum {
+ /**
+ * The call was successful.
+ */
AAUDIO_OK,
AAUDIO_ERROR_BASE = -900, // TODO review
+
+ /**
+ * The audio device was disconnected. This could occur, for example, when headphones
+ * are plugged in or unplugged. The stream cannot be used after the device is disconnected.
+ * Applications should stop and close the stream.
+ * If this error is received in an error callback then another thread should be
+ * used to stop and close the stream.
+ */
AAUDIO_ERROR_DISCONNECTED,
+
+ /**
+ * An invalid parameter was passed to AAudio.
+ */
AAUDIO_ERROR_ILLEGAL_ARGUMENT,
// reserved
AAUDIO_ERROR_INTERNAL = AAUDIO_ERROR_ILLEGAL_ARGUMENT + 2,
+
+ /**
+ * The requested operation is not appropriate for the current state of AAudio.
+ */
AAUDIO_ERROR_INVALID_STATE,
// reserved
// reserved
+ /* The server rejected the handle used to identify the stream.
+ */
AAUDIO_ERROR_INVALID_HANDLE = AAUDIO_ERROR_INVALID_STATE + 3,
// reserved
+
+ /**
+ * The function is not implemented for this stream.
+ */
AAUDIO_ERROR_UNIMPLEMENTED = AAUDIO_ERROR_INVALID_HANDLE + 2,
+
+ /**
+ * A resource or information is unavailable.
+ * This could occur when an application tries to open too many streams,
+ * or a timestamp is not available.
+ */
AAUDIO_ERROR_UNAVAILABLE,
AAUDIO_ERROR_NO_FREE_HANDLES,
+
+ /**
+ * Memory could not be allocated.
+ */
AAUDIO_ERROR_NO_MEMORY,
+
+ /**
+ * A NULL pointer was passed to AAudio.
+ * Or a NULL pointer was detected internally.
+ */
AAUDIO_ERROR_NULL,
+
+ /**
+ * An operation took longer than expected.
+ */
AAUDIO_ERROR_TIMEOUT,
AAUDIO_ERROR_WOULD_BLOCK,
+
+ /**
+ * The requested data format is not supported.
+ */
AAUDIO_ERROR_INVALID_FORMAT,
+
+ /**
+ * A requested was out of range.
+ */
AAUDIO_ERROR_OUT_OF_RANGE,
+
+ /**
+ * The audio service was not available.
+ */
AAUDIO_ERROR_NO_SERVICE,
+
+ /**
+ * The requested sample rate was not supported.
+ */
AAUDIO_ERROR_INVALID_RATE
};
typedef int32_t aaudio_result_t;
@@ -123,20 +210,200 @@
/**
* No particular performance needs. Default.
*/
- AAUDIO_PERFORMANCE_MODE_NONE = 10,
+ AAUDIO_PERFORMANCE_MODE_NONE = 10,
/**
- * Extending battery life is most important.
+ * Extending battery life is more important than low latency.
+ *
+ * This mode is not supported in input streams.
+ * For input, mode NONE will be used if this is requested.
*/
- AAUDIO_PERFORMANCE_MODE_POWER_SAVING,
+ AAUDIO_PERFORMANCE_MODE_POWER_SAVING,
/**
- * Reducing latency is most important.
+ * Reducing latency is more important than battery life.
*/
- AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
};
typedef int32_t aaudio_performance_mode_t;
+/**
+ * The USAGE attribute expresses "why" you are playing a sound, what is this sound used for.
+ * This information is used by certain platforms or routing policies
+ * to make more refined volume or routing decisions.
+ *
+ * Note that these match the equivalent values in AudioAttributes in the Android Java API.
+ *
+ * Added in API level 28.
+ */
+enum {
+ /**
+ * Use this for streaming media, music performance, video, podcasts, etcetera.
+ */
+ AAUDIO_USAGE_MEDIA = 1,
+
+ /**
+ * Use this for voice over IP, telephony, etcetera.
+ */
+ AAUDIO_USAGE_VOICE_COMMUNICATION = 2,
+
+ /**
+ * Use this for sounds associated with telephony such as busy tones, DTMF, etcetera.
+ */
+ AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING = 3,
+
+ /**
+ * Use this to demand the users attention.
+ */
+ AAUDIO_USAGE_ALARM = 4,
+
+ /**
+ * Use this for notifying the user when a message has arrived or some
+ * other background event has occured.
+ */
+ AAUDIO_USAGE_NOTIFICATION = 5,
+
+ /**
+ * Use this when the phone rings.
+ */
+ AAUDIO_USAGE_NOTIFICATION_RINGTONE = 6,
+
+ /**
+ * Use this to attract the users attention when, for example, the battery is low.
+ */
+ AAUDIO_USAGE_NOTIFICATION_EVENT = 10,
+
+ /**
+ * Use this for screen readers, etcetera.
+ */
+ AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY = 11,
+
+ /**
+ * Use this for driving or navigation directions.
+ */
+ AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE = 12,
+
+ /**
+ * Use this for user interface sounds, beeps, etcetera.
+ */
+ AAUDIO_USAGE_ASSISTANCE_SONIFICATION = 13,
+
+ /**
+ * Use this for game audio and sound effects.
+ */
+ AAUDIO_USAGE_GAME = 14,
+
+ /**
+ * Use this for audio responses to user queries, audio instructions or help utterances.
+ */
+ AAUDIO_USAGE_ASSISTANT = 16
+};
+typedef int32_t aaudio_usage_t;
+
+/**
+ * The CONTENT_TYPE attribute describes "what" you are playing.
+ * It expresses the general category of the content. This information is optional.
+ * But in case it is known (for instance {@link #AAUDIO_CONTENT_TYPE_MOVIE} for a
+ * movie streaming service or {@link #AAUDIO_CONTENT_TYPE_SPEECH} for
+ * an audio book application) this information might be used by the audio framework to
+ * enforce audio focus.
+ *
+ * Note that these match the equivalent values in AudioAttributes in the Android Java API.
+ *
+ * Added in API level 28.
+ */
+enum {
+
+ /**
+ * Use this for spoken voice, audio books, etcetera.
+ */
+ AAUDIO_CONTENT_TYPE_SPEECH = 1,
+
+ /**
+ * Use this for pre-recorded or live music.
+ */
+ AAUDIO_CONTENT_TYPE_MUSIC = 2,
+
+ /**
+ * Use this for a movie or video soundtrack.
+ */
+ AAUDIO_CONTENT_TYPE_MOVIE = 3,
+
+ /**
+ * Use this for sound is designed to accompany a user action,
+ * such as a click or beep sound made when the user presses a button.
+ */
+ AAUDIO_CONTENT_TYPE_SONIFICATION = 4
+};
+typedef int32_t aaudio_content_type_t;
+
+/**
+ * Defines the audio source.
+ * An audio source defines both a default physical source of audio signal, and a recording
+ * configuration.
+ *
+ * Note that these match the equivalent values in MediaRecorder.AudioSource in the Android Java API.
+ *
+ * Added in API level 28.
+ */
+enum {
+ /**
+ * Use this preset when other presets do not apply.
+ */
+ AAUDIO_INPUT_PRESET_GENERIC = 1,
+
+ /**
+ * Use this preset when recording video.
+ */
+ AAUDIO_INPUT_PRESET_CAMCORDER = 5,
+
+ /**
+ * Use this preset when doing speech recognition.
+ */
+ AAUDIO_INPUT_PRESET_VOICE_RECOGNITION = 6,
+
+ /**
+ * Use this preset when doing telephony or voice messaging.
+ */
+ AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION = 7,
+
+ /**
+ * Use this preset to obtain an input with no effects.
+ * Note that this input will not have automatic gain control
+ * so the recorded volume may be very low.
+ */
+ AAUDIO_INPUT_PRESET_UNPROCESSED = 9,
+};
+typedef int32_t aaudio_input_preset_t;
+
+/**
+ * These may be used with AAudioStreamBuilder_setSessionId().
+ *
+ * Added in API level 28.
+ */
+enum {
+ /**
+ * Do not allocate a session ID.
+ * Effects cannot be used with this stream.
+ * Default.
+ *
+ * Added in API level 28.
+ */
+ AAUDIO_SESSION_ID_NONE = -1,
+
+ /**
+ * Allocate a session ID that can be used to attach and control
+ * effects using the Java AudioEffects API.
+ * Note that using this may result in higher latency.
+ *
+ * Note that this matches the value of AudioManager.AUDIO_SESSION_ID_GENERATE.
+ *
+ * Added in API level 28.
+ */
+ AAUDIO_SESSION_ID_ALLOCATE = 0,
+};
+typedef int32_t aaudio_session_id_t;
+
typedef struct AAudioStreamStruct AAudioStream;
typedef struct AAudioStreamBuilderStruct AAudioStreamBuilder;
@@ -299,8 +566,14 @@
/**
* Set the requested performance mode.
*
+ * Supported modes are AAUDIO_PERFORMANCE_MODE_NONE, AAUDIO_PERFORMANCE_MODE_POWER_SAVING
+ * and AAUDIO_PERFORMANCE_MODE_LOW_LATENCY.
+ *
* The default, if you do not call this function, is AAUDIO_PERFORMANCE_MODE_NONE.
*
+ * You may not get the mode you requested.
+ * You can call AAudioStream_getPerformanceMode() to find out the final mode for the stream.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param mode the desired performance mode, eg. AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
*/
@@ -308,6 +581,90 @@
aaudio_performance_mode_t mode);
/**
+ * Set the intended use case for the stream.
+ *
+ * The AAudio system will use this information to optimize the
+ * behavior of the stream.
+ * This could, for example, affect how volume and focus is handled for the stream.
+ *
+ * The default, if you do not call this function, is AAUDIO_USAGE_MEDIA.
+ *
+ * Added in API level 28.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param usage the desired usage, eg. AAUDIO_USAGE_GAME
+ */
+AAUDIO_API void AAudioStreamBuilder_setUsage(AAudioStreamBuilder* builder,
+ aaudio_usage_t usage);
+
+/**
+ * Set the type of audio data that the stream will carry.
+ *
+ * The AAudio system will use this information to optimize the
+ * behavior of the stream.
+ * This could, for example, affect whether a stream is paused when a notification occurs.
+ *
+ * The default, if you do not call this function, is AAUDIO_CONTENT_TYPE_MUSIC.
+ *
+ * Added in API level 28.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param contentType the type of audio data, eg. AAUDIO_CONTENT_TYPE_SPEECH
+ */
+AAUDIO_API void AAudioStreamBuilder_setContentType(AAudioStreamBuilder* builder,
+ aaudio_content_type_t contentType);
+
+/**
+ * Set the input (capture) preset for the stream.
+ *
+ * The AAudio system will use this information to optimize the
+ * behavior of the stream.
+ * This could, for example, affect which microphones are used and how the
+ * recorded data is processed.
+ *
+ * The default, if you do not call this function, is AAUDIO_INPUT_PRESET_VOICE_RECOGNITION.
+ * That is because VOICE_RECOGNITION is the preset with the lowest latency
+ * on many platforms.
+ *
+ * Added in API level 28.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param inputPreset the desired configuration for recording
+ */
+AAUDIO_API void AAudioStreamBuilder_setInputPreset(AAudioStreamBuilder* builder,
+ aaudio_input_preset_t inputPreset);
+
+/** Set the requested session ID.
+ *
+ * The session ID can be used to associate a stream with effects processors.
+ * The effects are controlled using the Android AudioEffect Java API.
+ *
+ * The default, if you do not call this function, is AAUDIO_SESSION_ID_NONE.
+ *
+ * If set to AAUDIO_SESSION_ID_ALLOCATE then a session ID will be allocated
+ * when the stream is opened.
+ *
+ * The allocated session ID can be obtained by calling AAudioStream_getSessionId()
+ * and then used with this function when opening another stream.
+ * This allows effects to be shared between streams.
+ *
+ * Session IDs from AAudio can be used with the Android Java APIs and vice versa.
+ * So a session ID from an AAudio stream can be passed to Java
+ * and effects applied using the Java AudioEffect API.
+ *
+ * Note that allocating or setting a session ID may result in a stream with higher latency.
+ *
+ * Allocated session IDs will always be positive and nonzero.
+ *
+ * Added in API level 28.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param sessionId an allocated sessionID or AAUDIO_SESSION_ID_ALLOCATE
+ */
+AAUDIO_API void AAudioStreamBuilder_setSessionId(AAudioStreamBuilder* builder,
+ aaudio_session_id_t sessionId);
+
+/**
* Return one of these values from the data callback function.
*/
enum {
@@ -337,7 +694,13 @@
* For an input stream, this function should read and process numFrames of data
* from the audioData buffer.
*
- * Note that this callback function should be considered a "real-time" function.
+ * The audio data is passed through the buffer. So do NOT call AAudioStream_read() or
+ * AAudioStream_write() on the stream that is making the callback.
+ *
+ * Note that numFrames can vary unless AAudioStreamBuilder_setFramesPerDataCallback()
+ * is called.
+ *
+ * Also note that this callback function should be considered a "real-time" function.
* It must not do anything that could cause an unbounded delay because that can cause the
* audio to glitch or pop.
*
@@ -348,6 +711,15 @@
* <li>any network operations such as streaming</li>
* <li>use any mutexes or other synchronization primitives</li>
* <li>sleep</li>
+ * <li>stop or close the stream</li>
+ * <li>AAudioStream_read()</li>
+ * <li>AAudioStream_write()</li>
+ * </ul>
+ *
+ * The following are OK to call from the data callback:
+ * <ul>
+ * <li>AAudioStream_get*()</li>
+ * <li>AAudio_convertResultToText()</li>
* </ul>
*
* If you need to move data, eg. MIDI commands, in or out of the callback function then
@@ -356,7 +728,7 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @param userData the same address that was passed to AAudioStreamBuilder_setCallback()
* @param audioData a pointer to the audio data
- * @param numFrames the number of frames to be processed
+ * @param numFrames the number of frames to be processed, which can vary
* @return AAUDIO_CALLBACK_RESULT_*
*/
typedef aaudio_data_callback_result_t (*AAudioStream_dataCallback)(
@@ -421,6 +793,22 @@
* Prototype for the callback function that is passed to
* AAudioStreamBuilder_setErrorCallback().
*
+ * The following may NOT be called from the error callback:
+ * <ul>
+ * <li>AAudioStream_requestStop()</li>
+ * <li>AAudioStream_requestPause()</li>
+ * <li>AAudioStream_close()</li>
+ * <li>AAudioStream_waitForStateChange()</li>
+ * <li>AAudioStream_read()</li>
+ * <li>AAudioStream_write()</li>
+ * </ul>
+ *
+ * The following are OK to call from the error callback:
+ * <ul>
+ * <li>AAudioStream_get*()</li>
+ * <li>AAudio_convertResultToText()</li>
+ * </ul>
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @param userData the same address that was passed to AAudioStreamBuilder_setErrorCallback()
* @param error an AAUDIO_ERROR_* value.
@@ -431,18 +819,19 @@
aaudio_result_t error);
/**
- * Request that AAudio call this functions if any error occurs on a callback thread.
+ * Request that AAudio call this function if any error occurs or the stream is disconnected.
*
* It will be called, for example, if a headset or a USB device is unplugged causing the stream's
- * device to be unavailable.
- * In response, this function could signal or launch another thread to reopen a
- * stream on another device. Do not reopen the stream in this callback.
- *
- * This will not be called because of actions by the application, such as stopping
- * or closing a stream.
- *
+ * device to be unavailable or "disconnected".
* Another possible cause of error would be a timeout or an unanticipated internal error.
*
+ * In response, this function should signal or create another thread to stop
+ * and close this stream. The other thread could then reopen a stream on another device.
+ * Do not stop or close the stream, or reopen the new stream, directly from this callback.
+ *
+ * This callback will not be called because of actions by the application, such as stopping
+ * or closing a stream.
+ *
* Note that the AAudio callbacks will never be called simultaneously from multiple threads.
*
* @param builder reference provided by AAudio_createStreamBuilder()
@@ -554,11 +943,13 @@
* This will update the current client state.
*
* <pre><code>
- * aaudio_stream_state_t currentState;
- * aaudio_result_t result = AAudioStream_getState(stream, ¤tState);
- * while (result == AAUDIO_OK && currentState != AAUDIO_STREAM_STATE_PAUSING) {
+ * aaudio_result_t result = AAUDIO_OK;
+ * aaudio_stream_state_t currentState = AAudioStream_getState(stream);
+ * aaudio_stream_state_t inputState = currentState;
+ * while (result == AAUDIO_OK && currentState != AAUDIO_STREAM_STATE_PAUSED) {
* result = AAudioStream_waitForStateChange(
- * stream, currentState, ¤tState, MY_TIMEOUT_NANOS);
+ * stream, inputState, ¤tState, MY_TIMEOUT_NANOS);
+ * inputState = currentState;
* }
* </code></pre>
*
@@ -589,6 +980,8 @@
*
* This call is "strong non-blocking" unless it has to wait for data.
*
+ * If the call times out then zero or a partial frame count will be returned.
+ *
* @param stream A stream created using AAudioStreamBuilder_openStream().
* @param buffer The address of the first sample.
* @param numFrames Number of frames to read. Only complete frames will be written.
@@ -612,6 +1005,8 @@
*
* This call is "strong non-blocking" unless it has to wait for room in the buffer.
*
+ * If the call times out then zero or a partial frame count will be returned.
+ *
* @param stream A stream created using AAudioStreamBuilder_openStream().
* @param buffer The address of the first sample.
* @param numFrames Number of frames to write. Only complete frames will be written.
@@ -636,7 +1031,8 @@
* This cannot be set higher than AAudioStream_getBufferCapacityInFrames().
*
* Note that you will probably not get the exact size you request.
- * Call AAudioStream_getBufferSizeInFrames() to see what the actual final size is.
+ * You can check the return value or call AAudioStream_getBufferSizeInFrames()
+ * to see what the actual final size is.
*
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @param numFrames requested number of frames that can be filled without blocking
@@ -683,10 +1079,10 @@
* This call can be used if the application needs to know the value of numFrames before
* the stream is started. This is not normally necessary.
*
- * If a specific size was requested by calling AAudioStreamBuilder_setCallbackSizeInFrames()
+ * If a specific size was requested by calling AAudioStreamBuilder_setFramesPerDataCallback()
* then this will be the same size.
*
- * If AAudioStreamBuilder_setCallbackSizeInFrames() was not called then this will
+ * If AAudioStreamBuilder_setFramesPerDataCallback() was not called then this will
* return the size chosen by AAudio, or AAUDIO_UNSPECIFIED.
*
* AAUDIO_UNSPECIFIED indicates that the callback buffer size for this stream
@@ -771,7 +1167,8 @@
/**
* Passes back the number of frames that have been written since the stream was created.
- * For an output stream, this will be advanced by the application calling write().
+ * For an output stream, this will be advanced by the application calling write()
+ * or by a data callback.
* For an input stream, this will be advanced by the endpoint.
*
* The frame position is monotonically increasing.
@@ -784,7 +1181,8 @@
/**
* Passes back the number of frames that have been read since the stream was created.
* For an output stream, this will be advanced by the endpoint.
- * For an input stream, this will be advanced by the application calling read().
+ * For an input stream, this will be advanced by the application calling read()
+ * or by a data callback.
*
* The frame position is monotonically increasing.
*
@@ -794,6 +1192,30 @@
AAUDIO_API int64_t AAudioStream_getFramesRead(AAudioStream* stream);
/**
+ * Passes back the session ID associated with this stream.
+ *
+ * The session ID can be used to associate a stream with effects processors.
+ * The effects are controlled using the Android AudioEffect Java API.
+ *
+ * If AAudioStreamBuilder_setSessionId() was called with AAUDIO_SESSION_ID_ALLOCATE
+ * then a new session ID should be allocated once when the stream is opened.
+ *
+ * If AAudioStreamBuilder_setSessionId() was called with a previously allocated
+ * session ID then that value should be returned.
+ *
+ * If AAudioStreamBuilder_setSessionId() was not called then this function should
+ * return AAUDIO_SESSION_ID_NONE.
+ *
+ * The sessionID for a stream should not change once the stream has been opened.
+ *
+ * Added in API level 28.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return session ID or AAUDIO_SESSION_ID_NONE
+ */
+AAUDIO_API aaudio_session_id_t AAudioStream_getSessionId(AAudioStream* stream);
+
+/**
* Passes back the time at which a particular frame was presented.
* This can be used to synchronize audio with video or MIDI.
* It can also be used to align a recorded stream with a playback stream.
@@ -820,6 +1242,36 @@
int64_t *framePosition,
int64_t *timeNanoseconds);
+/**
+ * Return the use case for the stream.
+ *
+ * Added in API level 28.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return frames read
+ */
+AAUDIO_API aaudio_usage_t AAudioStream_getUsage(AAudioStream* stream);
+
+/**
+ * Return the content type for the stream.
+ *
+ * Added in API level 28.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return content type, for example AAUDIO_CONTENT_TYPE_MUSIC
+ */
+AAUDIO_API aaudio_content_type_t AAudioStream_getContentType(AAudioStream* stream);
+
+/**
+ * Return the input preset for the stream.
+ *
+ * Added in API level 28.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return input preset, for example AAUDIO_INPUT_PRESET_CAMCORDER
+ */
+AAUDIO_API aaudio_input_preset_t AAudioStream_getInputPreset(AAudioStream* stream);
+
#ifdef __cplusplus
}
#endif
diff --git a/media/libaaudio/libaaudio.map.txt b/media/libaaudio/libaaudio.map.txt
index 2ba5250..cbf5921 100644
--- a/media/libaaudio/libaaudio.map.txt
+++ b/media/libaaudio/libaaudio.map.txt
@@ -17,6 +17,10 @@
AAudioStreamBuilder_setSharingMode;
AAudioStreamBuilder_setDirection;
AAudioStreamBuilder_setBufferCapacityInFrames;
+ AAudioStreamBuilder_setUsage; # introduced=28
+ AAudioStreamBuilder_setContentType; # introduced=28
+ AAudioStreamBuilder_setInputPreset; # introduced=28
+ AAudioStreamBuilder_setSessionId; # introduced=28
AAudioStreamBuilder_openStream;
AAudioStreamBuilder_delete;
AAudioStream_close;
@@ -42,8 +46,12 @@
AAudioStream_getFormat;
AAudioStream_getSharingMode;
AAudioStream_getDirection;
+ AAudioStream_getUsage; # introduced=28
+ AAudioStream_getContentType; # introduced=28
+ AAudioStream_getInputPreset; # introduced=28
AAudioStream_getFramesWritten;
AAudioStream_getFramesRead;
+ AAudioStream_getSessionId; # introduced=28
AAudioStream_getTimestamp;
AAudioStream_isMMapUsed;
local:
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index 788833b..b9e28a0 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -57,6 +57,7 @@
shared_libs: [
"libaudioclient",
+ "libaudioutils",
"liblog",
"libcutils",
"libutils",
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.cpp b/media/libaaudio/src/binding/AAudioBinderClient.cpp
index 07ee2de..dd620e3 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.cpp
+++ b/media/libaaudio/src/binding/AAudioBinderClient.cpp
@@ -15,7 +15,7 @@
*/
-#define LOG_TAG "AAudio"
+#define LOG_TAG "AAudioBinderClient"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -61,11 +61,11 @@
, Singleton<AAudioBinderClient>() {
gKeepBinderClient = this; // so this singleton won't get deleted
mAAudioClient = new AAudioClient(this);
- ALOGV("AAudioBinderClient() this = %p, created mAAudioClient = %p", this, mAAudioClient.get());
+ ALOGV("%s - this = %p, created mAAudioClient = %p", __func__, this, mAAudioClient.get());
}
AAudioBinderClient::~AAudioBinderClient() {
- ALOGV("AAudioBinderClient()::~AAudioBinderClient() destroying %p", this);
+ ALOGV("%s - destroying %p", __func__, this);
Mutex::Autolock _l(mServiceLock);
if (mAAudioService != 0) {
IInterface::asBinder(mAAudioService)->unlinkToDeath(mAAudioClient);
@@ -137,7 +137,7 @@
stream = service->openStream(request, configurationOutput);
if (stream == AAUDIO_ERROR_NO_SERVICE) {
- ALOGE("AAudioBinderClient::openStream lost connection to AAudioService.");
+ ALOGE("openStream lost connection to AAudioService.");
dropAAudioService(); // force a reconnect
} else {
break;
diff --git a/media/libaaudio/src/binding/AAudioServiceMessage.h b/media/libaaudio/src/binding/AAudioServiceMessage.h
index 54e8001..3981454 100644
--- a/media/libaaudio/src/binding/AAudioServiceMessage.h
+++ b/media/libaaudio/src/binding/AAudioServiceMessage.h
@@ -36,15 +36,17 @@
AAUDIO_SERVICE_EVENT_PAUSED,
AAUDIO_SERVICE_EVENT_STOPPED,
AAUDIO_SERVICE_EVENT_FLUSHED,
- AAUDIO_SERVICE_EVENT_CLOSED,
AAUDIO_SERVICE_EVENT_DISCONNECTED,
- AAUDIO_SERVICE_EVENT_VOLUME
+ AAUDIO_SERVICE_EVENT_VOLUME,
+ AAUDIO_SERVICE_EVENT_XRUN
} aaudio_service_event_t;
struct AAudioMessageEvent {
aaudio_service_event_t event;
- double dataDouble;
- int64_t dataLong;
+ union {
+ double dataDouble;
+ int64_t dataLong;
+ };
};
typedef struct AAudioServiceMessage_s {
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index 153fce3..959db61 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -50,6 +50,14 @@
if (status != NO_ERROR) goto error;
status = parcel->writeInt32(getBufferCapacity());
if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32((int32_t) getUsage());
+ if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32((int32_t) getContentType());
+ if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32((int32_t) getInputPreset());
+ if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32(getSessionId());
+ if (status != NO_ERROR) goto error;
return NO_ERROR;
error:
ALOGE("AAudioStreamConfiguration.writeToParcel(): write failed = %d", status);
@@ -69,16 +77,28 @@
setSamplesPerFrame(value);
status = parcel->readInt32(&value);
if (status != NO_ERROR) goto error;
- setSharingMode(value);
+ setSharingMode((aaudio_sharing_mode_t) value);
status = parcel->readInt32(&value);
if (status != NO_ERROR) goto error;
- setFormat(value);
+ setFormat((aaudio_format_t) value);
status = parcel->readInt32(&value);
if (status != NO_ERROR) goto error;
setDirection((aaudio_direction_t) value);
status = parcel->readInt32(&value);
if (status != NO_ERROR) goto error;
setBufferCapacity(value);
+ status = parcel->readInt32(&value);
+ if (status != NO_ERROR) goto error;
+ setUsage((aaudio_usage_t) value);
+ status = parcel->readInt32(&value);
+ if (status != NO_ERROR) goto error;
+ setContentType((aaudio_content_type_t) value);
+ status = parcel->readInt32(&value);
+ if (status != NO_ERROR) goto error;
+ setInputPreset((aaudio_input_preset_t) value);
+ status = parcel->readInt32(&value);
+ if (status != NO_ERROR) goto error;
+ setSessionId(value);
return NO_ERROR;
error:
ALOGE("AAudioStreamConfiguration.readFromParcel(): read failed = %d", status);
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.cpp b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
index 1200ab2..c30c5b9 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "AAudio"
+#define LOG_TAG "AAudioStreamRequest"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -58,7 +58,7 @@
return NO_ERROR;
error:
- ALOGE("AAudioStreamRequest.writeToParcel(): write failed = %d", status);
+ ALOGE("writeToParcel(): write failed = %d", status);
return status;
}
@@ -80,7 +80,7 @@
return NO_ERROR;
error:
- ALOGE("AAudioStreamRequest.readFromParcel(): read failed = %d", status);
+ ALOGE("readFromParcel(): read failed = %d", status);
return status;
}
@@ -89,9 +89,9 @@
}
void AAudioStreamRequest::dump() const {
- ALOGD("AAudioStreamRequest mUserId = %d", mUserId);
- ALOGD("AAudioStreamRequest mProcessId = %d", mProcessId);
- ALOGD("AAudioStreamRequest mSharingModeMatchRequired = %d", mSharingModeMatchRequired);
- ALOGD("AAudioStreamRequest mInService = %d", mInService);
+ ALOGD("mUserId = %d", mUserId);
+ ALOGD("mProcessId = %d", mProcessId);
+ ALOGD("mSharingModeMatchRequired = %d", mSharingModeMatchRequired);
+ ALOGD("mInService = %d", mInService);
mConfiguration.dump();
}
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
index 1a97555..61d7d27 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "AAudio"
+#define LOG_TAG "AudioEndpointParcelable"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -64,27 +64,54 @@
* The read and write must be symmetric.
*/
status_t AudioEndpointParcelable::writeToParcel(Parcel* parcel) const {
- parcel->writeInt32(mNumSharedMemories);
+ status_t status = AAudioConvert_aaudioToAndroidStatus(validate());
+ if (status != NO_ERROR) goto error;
+
+ status = parcel->writeInt32(mNumSharedMemories);
+ if (status != NO_ERROR) goto error;
+
for (int i = 0; i < mNumSharedMemories; i++) {
- mSharedMemories[i].writeToParcel(parcel);
+ status = mSharedMemories[i].writeToParcel(parcel);
+ if (status != NO_ERROR) goto error;
}
- mUpMessageQueueParcelable.writeToParcel(parcel);
- mDownMessageQueueParcelable.writeToParcel(parcel);
- mUpDataQueueParcelable.writeToParcel(parcel);
- mDownDataQueueParcelable.writeToParcel(parcel);
- return NO_ERROR; // TODO check for errors above
+ status = mUpMessageQueueParcelable.writeToParcel(parcel);
+ if (status != NO_ERROR) goto error;
+ status = mDownMessageQueueParcelable.writeToParcel(parcel);
+ if (status != NO_ERROR) goto error;
+ status = mUpDataQueueParcelable.writeToParcel(parcel);
+ if (status != NO_ERROR) goto error;
+ status = mDownDataQueueParcelable.writeToParcel(parcel);
+ if (status != NO_ERROR) goto error;
+
+ return NO_ERROR;
+
+error:
+ ALOGE("%s returning %d", __func__, status);
+ return status;
}
status_t AudioEndpointParcelable::readFromParcel(const Parcel* parcel) {
- parcel->readInt32(&mNumSharedMemories);
+ status_t status = parcel->readInt32(&mNumSharedMemories);
+ if (status != NO_ERROR) goto error;
+
for (int i = 0; i < mNumSharedMemories; i++) {
mSharedMemories[i].readFromParcel(parcel);
+ if (status != NO_ERROR) goto error;
}
- mUpMessageQueueParcelable.readFromParcel(parcel);
- mDownMessageQueueParcelable.readFromParcel(parcel);
- mUpDataQueueParcelable.readFromParcel(parcel);
- mDownDataQueueParcelable.readFromParcel(parcel);
- return NO_ERROR; // TODO check for errors above
+ status = mUpMessageQueueParcelable.readFromParcel(parcel);
+ if (status != NO_ERROR) goto error;
+ status = mDownMessageQueueParcelable.readFromParcel(parcel);
+ if (status != NO_ERROR) goto error;
+ status = mUpDataQueueParcelable.readFromParcel(parcel);
+ if (status != NO_ERROR) goto error;
+ status = mDownDataQueueParcelable.readFromParcel(parcel);
+ if (status != NO_ERROR) goto error;
+
+ return AAudioConvert_aaudioToAndroidStatus(validate());
+
+error:
+ ALOGE("%s returning %d", __func__, status);
+ return status;
}
aaudio_result_t AudioEndpointParcelable::resolve(EndpointDescriptor *descriptor) {
@@ -109,52 +136,28 @@
return AAudioConvert_androidToAAudioResult(err);
}
-aaudio_result_t AudioEndpointParcelable::validate() {
- aaudio_result_t result;
+aaudio_result_t AudioEndpointParcelable::validate() const {
if (mNumSharedMemories < 0 || mNumSharedMemories >= MAX_SHARED_MEMORIES) {
- ALOGE("AudioEndpointParcelable invalid mNumSharedMemories = %d", mNumSharedMemories);
+ ALOGE("invalid mNumSharedMemories = %d", mNumSharedMemories);
return AAUDIO_ERROR_INTERNAL;
}
- for (int i = 0; i < mNumSharedMemories; i++) {
- result = mSharedMemories[i].validate();
- if (result != AAUDIO_OK) {
- ALOGE("AudioEndpointParcelable invalid mSharedMemories[%d] = %d", i, result);
- return result;
- }
- }
- if ((result = mUpMessageQueueParcelable.validate()) != AAUDIO_OK) {
- ALOGE("AudioEndpointParcelable invalid mUpMessageQueueParcelable = %d", result);
- return result;
- }
- if ((result = mDownMessageQueueParcelable.validate()) != AAUDIO_OK) {
- ALOGE("AudioEndpointParcelable invalid mDownMessageQueueParcelable = %d", result);
- return result;
- }
- if ((result = mUpDataQueueParcelable.validate()) != AAUDIO_OK) {
- ALOGE("AudioEndpointParcelable invalid mUpDataQueueParcelable = %d", result);
- return result;
- }
- if ((result = mDownDataQueueParcelable.validate()) != AAUDIO_OK) {
- ALOGE("AudioEndpointParcelable invalid mDownDataQueueParcelable = %d", result);
- return result;
- }
return AAUDIO_OK;
}
void AudioEndpointParcelable::dump() {
- ALOGD("AudioEndpointParcelable ======================================= BEGIN");
- ALOGD("AudioEndpointParcelable mNumSharedMemories = %d", mNumSharedMemories);
+ ALOGD("======================================= BEGIN");
+ ALOGD("mNumSharedMemories = %d", mNumSharedMemories);
for (int i = 0; i < mNumSharedMemories; i++) {
mSharedMemories[i].dump();
}
- ALOGD("AudioEndpointParcelable mUpMessageQueueParcelable =========");
+ ALOGD("mUpMessageQueueParcelable =========");
mUpMessageQueueParcelable.dump();
- ALOGD("AudioEndpointParcelable mDownMessageQueueParcelable =======");
+ ALOGD("mDownMessageQueueParcelable =======");
mDownMessageQueueParcelable.dump();
- ALOGD("AudioEndpointParcelable mUpDataQueueParcelable ============");
+ ALOGD("mUpDataQueueParcelable ============");
mUpDataQueueParcelable.dump();
- ALOGD("AudioEndpointParcelable mDownDataQueueParcelable ==========");
+ ALOGD("mDownDataQueueParcelable ==========");
mDownDataQueueParcelable.dump();
- ALOGD("AudioEndpointParcelable ======================================= END");
+ ALOGD("======================================= END");
}
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.h b/media/libaaudio/src/binding/AudioEndpointParcelable.h
index aa8573f..e4f8b9e 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.h
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.h
@@ -56,8 +56,6 @@
aaudio_result_t resolve(EndpointDescriptor *descriptor);
- aaudio_result_t validate();
-
aaudio_result_t close();
void dump();
@@ -70,6 +68,8 @@
RingBufferParcelable mDownDataQueueParcelable; // eg. playback
private:
+ aaudio_result_t validate() const;
+
int32_t mNumSharedMemories = 0;
SharedMemoryParcelable mSharedMemories[MAX_SHARED_MEMORIES];
};
diff --git a/media/libaaudio/src/binding/IAAudioService.cpp b/media/libaaudio/src/binding/IAAudioService.cpp
index b3c4934..620edc7 100644
--- a/media/libaaudio/src/binding/IAAudioService.cpp
+++ b/media/libaaudio/src/binding/IAAudioService.cpp
@@ -121,17 +121,11 @@
ALOGE("BpAAudioService::client GET_STREAM_DESCRIPTION passed result %d", result);
return result;
}
- err = parcelable.readFromParcel(&reply);;
+ err = parcelable.readFromParcel(&reply);
if (err != NO_ERROR) {
ALOGE("BpAAudioService::client transact(GET_STREAM_DESCRIPTION) read endpoint %d", err);
return AAudioConvert_androidToAAudioResult(err);
}
- //parcelable.dump();
- result = parcelable.validate();
- if (result != AAUDIO_OK) {
- ALOGE("BpAAudioService::client GET_STREAM_DESCRIPTION validation fails %d", result);
- return result;
- }
return result;
}
@@ -250,6 +244,7 @@
pid_t tid;
int64_t nanoseconds;
aaudio_result_t result;
+ status_t status = NO_ERROR;
ALOGV("BnAAudioService::onTransact(%i) %i", code, flags);
switch(code) {
@@ -294,21 +289,20 @@
case GET_STREAM_DESCRIPTION: {
CHECK_INTERFACE(IAAudioService, data, reply);
- data.readInt32(&streamHandle);
+ status = data.readInt32(&streamHandle);
+ if (status != NO_ERROR) {
+ return status;
+ }
aaudio::AudioEndpointParcelable parcelable;
result = getStreamDescription(streamHandle, parcelable);
if (result != AAUDIO_OK) {
return AAudioConvert_aaudioToAndroidStatus(result);
}
- result = parcelable.validate();
- if (result != AAUDIO_OK) {
- ALOGE("BnAAudioService::onTransact getStreamDescription() returns %d", result);
- parcelable.dump();
- return AAudioConvert_aaudioToAndroidStatus(result);
+ status = reply->writeInt32(result);
+ if (status != NO_ERROR) {
+ return status;
}
- reply->writeInt32(result);
- parcelable.writeToParcel(reply);
- return NO_ERROR;
+ return parcelable.writeToParcel(reply);
} break;
case START_STREAM: {
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.cpp b/media/libaaudio/src/binding/RingBufferParcelable.cpp
index 6b74b21..4996b3f 100644
--- a/media/libaaudio/src/binding/RingBufferParcelable.cpp
+++ b/media/libaaudio/src/binding/RingBufferParcelable.cpp
@@ -14,13 +14,14 @@
* limitations under the License.
*/
-#define LOG_TAG "AAudio"
+#define LOG_TAG "RingBufferParcelable"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
#include <stdint.h>
#include <binder/Parcelable.h>
+#include <utility/AAudioUtilities.h>
#include "binding/AAudioServiceDefinitions.h"
#include "binding/SharedRegionParcelable.h"
@@ -79,7 +80,10 @@
* The read and write must be symmetric.
*/
status_t RingBufferParcelable::writeToParcel(Parcel* parcel) const {
- status_t status = parcel->writeInt32(mCapacityInFrames);
+ status_t status = AAudioConvert_aaudioToAndroidStatus(validate());
+ if (status != NO_ERROR) goto error;
+
+ status = parcel->writeInt32(mCapacityInFrames);
if (status != NO_ERROR) goto error;
if (mCapacityInFrames > 0) {
status = parcel->writeInt32(mBytesPerFrame);
@@ -97,7 +101,7 @@
}
return NO_ERROR;
error:
- ALOGE("RingBufferParcelable::writeToParcel() error = %d", status);
+ ALOGE("%s returning %d", __func__, status);
return status;
}
@@ -118,9 +122,9 @@
status = mDataParcelable.readFromParcel(parcel);
if (status != NO_ERROR) goto error;
}
- return NO_ERROR;
+ return AAudioConvert_aaudioToAndroidStatus(validate());
error:
- ALOGE("RingBufferParcelable::readFromParcel() error = %d", status);
+ ALOGE("%s returning %d", __func__, status);
return status;
}
@@ -151,42 +155,29 @@
return AAUDIO_OK;
}
-aaudio_result_t RingBufferParcelable::validate() {
- aaudio_result_t result;
+aaudio_result_t RingBufferParcelable::validate() const {
if (mCapacityInFrames < 0 || mCapacityInFrames >= 32 * 1024) {
- ALOGE("RingBufferParcelable invalid mCapacityInFrames = %d", mCapacityInFrames);
+ ALOGE("invalid mCapacityInFrames = %d", mCapacityInFrames);
return AAUDIO_ERROR_INTERNAL;
}
if (mBytesPerFrame < 0 || mBytesPerFrame >= 256) {
- ALOGE("RingBufferParcelable invalid mBytesPerFrame = %d", mBytesPerFrame);
+ ALOGE("invalid mBytesPerFrame = %d", mBytesPerFrame);
return AAUDIO_ERROR_INTERNAL;
}
if (mFramesPerBurst < 0 || mFramesPerBurst >= 16 * 1024) {
- ALOGE("RingBufferParcelable invalid mFramesPerBurst = %d", mFramesPerBurst);
+ ALOGE("invalid mFramesPerBurst = %d", mFramesPerBurst);
return AAUDIO_ERROR_INTERNAL;
}
- if ((result = mReadCounterParcelable.validate()) != AAUDIO_OK) {
- ALOGE("RingBufferParcelable invalid mReadCounterParcelable = %d", result);
- return result;
- }
- if ((result = mWriteCounterParcelable.validate()) != AAUDIO_OK) {
- ALOGE("RingBufferParcelable invalid mWriteCounterParcelable = %d", result);
- return result;
- }
- if ((result = mDataParcelable.validate()) != AAUDIO_OK) {
- ALOGE("RingBufferParcelable invalid mDataParcelable = %d", result);
- return result;
- }
return AAUDIO_OK;
}
void RingBufferParcelable::dump() {
- ALOGD("RingBufferParcelable mCapacityInFrames = %d ---------", mCapacityInFrames);
+ ALOGD("mCapacityInFrames = %d ---------", mCapacityInFrames);
if (mCapacityInFrames > 0) {
- ALOGD("RingBufferParcelable mBytesPerFrame = %d", mBytesPerFrame);
- ALOGD("RingBufferParcelable mFramesPerBurst = %d", mFramesPerBurst);
- ALOGD("RingBufferParcelable mFlags = %u", mFlags);
+ ALOGD("mBytesPerFrame = %d", mBytesPerFrame);
+ ALOGD("mFramesPerBurst = %d", mFramesPerBurst);
+ ALOGD("mFlags = %u", mFlags);
mReadCounterParcelable.dump();
mWriteCounterParcelable.dump();
mDataParcelable.dump();
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.h b/media/libaaudio/src/binding/RingBufferParcelable.h
index bd562f2..1dbcf07 100644
--- a/media/libaaudio/src/binding/RingBufferParcelable.h
+++ b/media/libaaudio/src/binding/RingBufferParcelable.h
@@ -66,11 +66,12 @@
aaudio_result_t resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor);
- aaudio_result_t validate();
-
void dump();
private:
+
+ aaudio_result_t validate() const;
+
SharedRegionParcelable mReadCounterParcelable;
SharedRegionParcelable mWriteCounterParcelable;
SharedRegionParcelable mDataParcelable;
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
index 90217ab..0b0cf77 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "AAudio"
+#define LOG_TAG "SharedMemoryParcelable"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -43,16 +43,18 @@
void SharedMemoryParcelable::setup(const unique_fd& fd, int32_t sizeInBytes) {
mFd.reset(dup(fd.get())); // store a duplicate fd
- ALOGV("SharedMemoryParcelable::setup(%d -> %d, %d) this = %p\n",
- fd.get(), mFd.get(), sizeInBytes, this);
+ ALOGV("setup(%d -> %d, %d) this = %p\n", fd.get(), mFd.get(), sizeInBytes, this);
mSizeInBytes = sizeInBytes;
}
status_t SharedMemoryParcelable::writeToParcel(Parcel* parcel) const {
- status_t status = parcel->writeInt32(mSizeInBytes);
+ status_t status = AAudioConvert_aaudioToAndroidStatus(validate());
+ if (status != NO_ERROR) return status;
+
+ status = parcel->writeInt32(mSizeInBytes);
if (status != NO_ERROR) return status;
if (mSizeInBytes > 0) {
- ALOGV("SharedMemoryParcelable::writeToParcel() mFd = %d, this = %p\n", mFd.get(), this);
+ ALOGV("writeToParcel() mFd = %d, this = %p\n", mFd.get(), this);
status = parcel->writeUniqueFileDescriptor(mFd);
ALOGE_IF(status != NO_ERROR, "SharedMemoryParcelable writeDupFileDescriptor failed : %d",
status);
@@ -62,22 +64,27 @@
status_t SharedMemoryParcelable::readFromParcel(const Parcel* parcel) {
status_t status = parcel->readInt32(&mSizeInBytes);
- if (status != NO_ERROR) {
- return status;
- }
+ if (status != NO_ERROR) goto error;
+
if (mSizeInBytes > 0) {
// The Parcel owns the file descriptor and will close it later.
unique_fd mmapFd;
status = parcel->readUniqueFileDescriptor(&mmapFd);
if (status != NO_ERROR) {
- ALOGE("SharedMemoryParcelable::readFromParcel() readUniqueFileDescriptor() failed : %d",
- status);
- } else {
- // Resolve the memory now while we still have the FD from the Parcel.
- // Closing the FD will not affect the shared memory once mmap() has been called.
- status = AAudioConvert_androidToAAudioResult(resolveSharedMemory(mmapFd));
+ ALOGE("readFromParcel() readUniqueFileDescriptor() failed : %d", status);
+ goto error;
}
+
+ // Resolve the memory now while we still have the FD from the Parcel.
+ // Closing the FD will not affect the shared memory once mmap() has been called.
+ aaudio_result_t result = resolveSharedMemory(mmapFd);
+ status = AAudioConvert_aaudioToAndroidStatus(result);
+ if (status != NO_ERROR) goto error;
}
+
+ return AAudioConvert_aaudioToAndroidStatus(validate());
+
+error:
return status;
}
@@ -85,7 +92,7 @@
if (mResolvedAddress != MMAP_UNRESOLVED_ADDRESS) {
int err = munmap(mResolvedAddress, mSizeInBytes);
if (err < 0) {
- ALOGE("SharedMemoryParcelable::close() munmap() failed %d", err);
+ ALOGE("close() munmap() failed %d", err);
return AAudioConvert_androidToAAudioResult(err);
}
mResolvedAddress = MMAP_UNRESOLVED_ADDRESS;
@@ -97,8 +104,7 @@
mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ | PROT_WRITE,
MAP_SHARED, fd.get(), 0);
if (mResolvedAddress == MMAP_UNRESOLVED_ADDRESS) {
- ALOGE("SharedMemoryParcelable mmap() failed for fd = %d, errno = %s",
- fd.get(), strerror(errno));
+ ALOGE("mmap() failed for fd = %d, errno = %s", fd.get(), strerror(errno));
return AAUDIO_ERROR_INTERNAL;
}
return AAUDIO_OK;
@@ -107,10 +113,10 @@
aaudio_result_t SharedMemoryParcelable::resolve(int32_t offsetInBytes, int32_t sizeInBytes,
void **regionAddressPtr) {
if (offsetInBytes < 0) {
- ALOGE("SharedMemoryParcelable illegal offsetInBytes = %d", offsetInBytes);
+ ALOGE("illegal offsetInBytes = %d", offsetInBytes);
return AAUDIO_ERROR_OUT_OF_RANGE;
} else if ((offsetInBytes + sizeInBytes) > mSizeInBytes) {
- ALOGE("SharedMemoryParcelable out of range, offsetInBytes = %d, "
+ ALOGE("out of range, offsetInBytes = %d, "
"sizeInBytes = %d, mSizeInBytes = %d",
offsetInBytes, sizeInBytes, mSizeInBytes);
return AAUDIO_ERROR_OUT_OF_RANGE;
@@ -122,16 +128,15 @@
if (mFd.get() != -1) {
result = resolveSharedMemory(mFd);
} else {
- ALOGE("SharedMemoryParcelable has no file descriptor for shared memory.");
+ ALOGE("has no file descriptor for shared memory.");
result = AAUDIO_ERROR_INTERNAL;
}
}
if (result == AAUDIO_OK && mResolvedAddress != MMAP_UNRESOLVED_ADDRESS) {
*regionAddressPtr = mResolvedAddress + offsetInBytes;
- ALOGV("SharedMemoryParcelable mResolvedAddress = %p", mResolvedAddress);
- ALOGV("SharedMemoryParcelable offset by %d, *regionAddressPtr = %p",
- offsetInBytes, *regionAddressPtr);
+ ALOGV("mResolvedAddress = %p", mResolvedAddress);
+ ALOGV("offset by %d, *regionAddressPtr = %p", offsetInBytes, *regionAddressPtr);
}
return result;
}
@@ -140,16 +145,16 @@
return mSizeInBytes;
}
-aaudio_result_t SharedMemoryParcelable::validate() {
+aaudio_result_t SharedMemoryParcelable::validate() const {
if (mSizeInBytes < 0 || mSizeInBytes >= MAX_MMAP_SIZE_BYTES) {
- ALOGE("SharedMemoryParcelable invalid mSizeInBytes = %d", mSizeInBytes);
+ ALOGE("invalid mSizeInBytes = %d", mSizeInBytes);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
return AAUDIO_OK;
}
void SharedMemoryParcelable::dump() {
- ALOGD("SharedMemoryParcelable mFd = %d", mFd.get());
- ALOGD("SharedMemoryParcelable mSizeInBytes = %d", mSizeInBytes);
- ALOGD("SharedMemoryParcelable mResolvedAddress = %p", mResolvedAddress);
+ ALOGD("mFd = %d", mFd.get());
+ ALOGD("mSizeInBytes = %d", mSizeInBytes);
+ ALOGD("mResolvedAddress = %p", mResolvedAddress);
}
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.h b/media/libaaudio/src/binding/SharedMemoryParcelable.h
index 2a634e0..82c2240 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.h
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.h
@@ -61,8 +61,6 @@
int32_t getSizeInBytes();
- aaudio_result_t validate();
-
void dump();
protected:
@@ -74,6 +72,11 @@
android::base::unique_fd mFd;
int32_t mSizeInBytes = 0;
uint8_t *mResolvedAddress = MMAP_UNRESOLVED_ADDRESS;
+
+private:
+
+ aaudio_result_t validate() const;
+
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/binding/SharedRegionParcelable.cpp b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
index 7381dcb..c776116 100644
--- a/media/libaaudio/src/binding/SharedRegionParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "AAudio"
+#define LOG_TAG "SharedRegionParcelable"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -24,6 +24,7 @@
#include <binder/Parcelable.h>
#include <aaudio/AAudio.h>
+#include <utility/AAudioUtilities.h>
#include "binding/SharedMemoryParcelable.h"
#include "binding/SharedRegionParcelable.h"
@@ -47,21 +48,38 @@
}
status_t SharedRegionParcelable::writeToParcel(Parcel* parcel) const {
- parcel->writeInt32(mSizeInBytes);
+ status_t status = AAudioConvert_aaudioToAndroidStatus(validate());
+ if (status != NO_ERROR) goto error;
+
+ status = parcel->writeInt32(mSizeInBytes);
+ if (status != NO_ERROR) goto error;
if (mSizeInBytes > 0) {
- parcel->writeInt32(mSharedMemoryIndex);
- parcel->writeInt32(mOffsetInBytes);
+ status = parcel->writeInt32(mSharedMemoryIndex);
+ if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32(mOffsetInBytes);
+ if (status != NO_ERROR) goto error;
}
- return NO_ERROR; // TODO check for errors above
+ return NO_ERROR;
+
+error:
+ ALOGE("%s returning %d", __func__, status);
+ return status;
}
status_t SharedRegionParcelable::readFromParcel(const Parcel* parcel) {
- parcel->readInt32(&mSizeInBytes);
+ status_t status = parcel->readInt32(&mSizeInBytes);
+ if (status != NO_ERROR) goto error;
if (mSizeInBytes > 0) {
- parcel->readInt32(&mSharedMemoryIndex);
- parcel->readInt32(&mOffsetInBytes);
+ status = parcel->readInt32(&mSharedMemoryIndex);
+ if (status != NO_ERROR) goto error;
+ status = parcel->readInt32(&mOffsetInBytes);
+ if (status != NO_ERROR) goto error;
}
- return NO_ERROR; // TODO check for errors above
+ return AAudioConvert_aaudioToAndroidStatus(validate());
+
+error:
+ ALOGE("%s returning %d", __func__, status);
+ return status;
}
aaudio_result_t SharedRegionParcelable::resolve(SharedMemoryParcelable *memoryParcels,
@@ -71,25 +89,25 @@
return AAUDIO_OK;
}
if (mSharedMemoryIndex < 0) {
- ALOGE("SharedRegionParcelable invalid mSharedMemoryIndex = %d", mSharedMemoryIndex);
+ ALOGE("invalid mSharedMemoryIndex = %d", mSharedMemoryIndex);
return AAUDIO_ERROR_INTERNAL;
}
SharedMemoryParcelable *memoryParcel = &memoryParcels[mSharedMemoryIndex];
return memoryParcel->resolve(mOffsetInBytes, mSizeInBytes, regionAddressPtr);
}
-aaudio_result_t SharedRegionParcelable::validate() {
+aaudio_result_t SharedRegionParcelable::validate() const {
if (mSizeInBytes < 0 || mSizeInBytes >= MAX_MMAP_SIZE_BYTES) {
- ALOGE("SharedRegionParcelable invalid mSizeInBytes = %d", mSizeInBytes);
+ ALOGE("invalid mSizeInBytes = %d", mSizeInBytes);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
if (mSizeInBytes > 0) {
if (mOffsetInBytes < 0 || mOffsetInBytes >= MAX_MMAP_OFFSET_BYTES) {
- ALOGE("SharedRegionParcelable invalid mOffsetInBytes = %d", mOffsetInBytes);
+ ALOGE("invalid mOffsetInBytes = %d", mOffsetInBytes);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
if (mSharedMemoryIndex < 0 || mSharedMemoryIndex >= MAX_SHARED_MEMORIES) {
- ALOGE("SharedRegionParcelable invalid mSharedMemoryIndex = %d", mSharedMemoryIndex);
+ ALOGE("invalid mSharedMemoryIndex = %d", mSharedMemoryIndex);
return AAUDIO_ERROR_INTERNAL;
}
}
@@ -97,9 +115,9 @@
}
void SharedRegionParcelable::dump() {
- ALOGD("SharedRegionParcelable mSizeInBytes = %d -----", mSizeInBytes);
+ ALOGD("mSizeInBytes = %d -----", mSizeInBytes);
if (mSizeInBytes > 0) {
- ALOGD("SharedRegionParcelable mSharedMemoryIndex = %d", mSharedMemoryIndex);
- ALOGD("SharedRegionParcelable mOffsetInBytes = %d", mOffsetInBytes);
+ ALOGD("mSharedMemoryIndex = %d", mSharedMemoryIndex);
+ ALOGD("mOffsetInBytes = %d", mOffsetInBytes);
}
}
diff --git a/media/libaaudio/src/binding/SharedRegionParcelable.h b/media/libaaudio/src/binding/SharedRegionParcelable.h
index f6babfd..0cd8c04 100644
--- a/media/libaaudio/src/binding/SharedRegionParcelable.h
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.h
@@ -47,14 +47,15 @@
bool isFileDescriptorSafe(SharedMemoryParcelable *memoryParcels);
- aaudio_result_t validate();
-
void dump();
protected:
int32_t mSharedMemoryIndex = -1;
int32_t mOffsetInBytes = 0;
int32_t mSizeInBytes = 0;
+
+private:
+ aaudio_result_t validate() const;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index 604eed5..f8e34d1 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "AAudio"
+#define LOG_TAG "AudioEndpoint"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -45,6 +45,7 @@
delete mUpCommandQueue;
}
+// TODO Consider moving to a method in RingBufferDescriptor
static aaudio_result_t AudioEndpoint_validateQueueDescriptor(const char *type,
const RingBufferDescriptor *descriptor) {
if (descriptor == nullptr) {
@@ -127,19 +128,19 @@
// ============================ up message queue =============================
const RingBufferDescriptor *descriptor = &pEndpointDescriptor->upMessageQueueDescriptor;
if(descriptor->bytesPerFrame != sizeof(AAudioServiceMessage)) {
- ALOGE("AudioEndpoint.configure() bytesPerFrame != sizeof(AAudioServiceMessage) = %d",
+ ALOGE("configure() bytesPerFrame != sizeof(AAudioServiceMessage) = %d",
descriptor->bytesPerFrame);
return AAUDIO_ERROR_INTERNAL;
}
if(descriptor->readCounterAddress == nullptr || descriptor->writeCounterAddress == nullptr) {
- ALOGE("AudioEndpoint.configure() NULL counter address");
+ ALOGE("configure() NULL counter address");
return AAUDIO_ERROR_NULL;
}
// Prevent memory leak and reuse.
if(mUpCommandQueue != nullptr || mDataQueue != nullptr) {
- ALOGE("AudioEndpoint.configure() endpoint already used");
+ ALOGE("configure() endpoint already used");
return AAUDIO_ERROR_INTERNAL;
}
@@ -153,8 +154,8 @@
// ============================ data queue =============================
descriptor = &pEndpointDescriptor->dataQueueDescriptor;
- ALOGV("AudioEndpoint.configure() data framesPerBurst = %d", descriptor->framesPerBurst);
- ALOGV("AudioEndpoint.configure() data readCounterAddress = %p",
+ ALOGV("configure() data framesPerBurst = %d", descriptor->framesPerBurst);
+ ALOGV("configure() data readCounterAddress = %p",
descriptor->readCounterAddress);
// An example of free running is when the other side is read or written by hardware DMA
@@ -163,7 +164,7 @@
? descriptor->readCounterAddress // read by other side
: descriptor->writeCounterAddress; // written by other side
mFreeRunning = (remoteCounter == nullptr);
- ALOGV("AudioEndpoint.configure() mFreeRunning = %d", mFreeRunning ? 1 : 0);
+ ALOGV("configure() mFreeRunning = %d", mFreeRunning ? 1 : 0);
int64_t *readCounterAddress = (descriptor->readCounterAddress == nullptr)
? &mDataReadCounter
@@ -258,8 +259,8 @@
}
void AudioEndpoint::dump() const {
- ALOGD("AudioEndpoint: data readCounter = %lld", (long long) mDataQueue->getReadCounter());
- ALOGD("AudioEndpoint: data writeCounter = %lld", (long long) mDataQueue->getWriteCounter());
+ ALOGD("data readCounter = %lld", (long long) mDataQueue->getReadCounter());
+ ALOGD("data writeCounter = %lld", (long long) mDataQueue->getWriteCounter());
}
void AudioEndpoint::eraseDataMemory() {
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 2fdbfaf..9204824 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -61,15 +61,12 @@
, mClockModel()
, mAudioEndpoint()
, mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
- , mFramesPerBurst(16)
, mInService(inService)
, mServiceInterface(serviceInterface)
, mAtomicTimestamp()
, mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
, mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
{
- ALOGD("AudioStreamInternal(): mWakeupDelayNanos = %d, mMinimumSleepNanos = %d",
- mWakeupDelayNanos, mMinimumSleepNanos);
}
AudioStreamInternal::~AudioStreamInternal() {
@@ -79,11 +76,12 @@
aaudio_result_t result = AAUDIO_OK;
int32_t capacity;
+ int32_t framesPerBurst;
AAudioStreamRequest request;
AAudioStreamConfiguration configurationOutput;
if (getState() != AAUDIO_STREAM_STATE_UNINITIALIZED) {
- ALOGE("AudioStreamInternal::open(): already open! state = %d", getState());
+ ALOGE("%s - already open! state = %d", __func__, getState());
return AAUDIO_ERROR_INVALID_STATE;
}
@@ -104,7 +102,7 @@
request.setUserId(getuid());
request.setProcessId(getpid());
request.setSharingModeMatchRequired(isSharingModeMatchRequired());
- request.setInService(mInService);
+ request.setInService(isInService());
request.getConfiguration().setDeviceId(getDeviceId());
request.getConfiguration().setSampleRate(getSampleRate());
@@ -112,13 +110,30 @@
request.getConfiguration().setDirection(getDirection());
request.getConfiguration().setSharingMode(getSharingMode());
+ request.getConfiguration().setUsage(getUsage());
+ request.getConfiguration().setContentType(getContentType());
+ request.getConfiguration().setInputPreset(getInputPreset());
+
request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
+ mDeviceChannelCount = getSamplesPerFrame(); // Assume it will be the same. Update if not.
+
mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
+ if (mServiceStreamHandle < 0
+ && request.getConfiguration().getSamplesPerFrame() == 1 // mono?
+ && getDirection() == AAUDIO_DIRECTION_OUTPUT
+ && !isInService()) {
+ // if that failed then try switching from mono to stereo if OUTPUT.
+ // Only do this in the client. Otherwise we end up with a mono mixer in the service
+ // that writes to a stereo MMAP stream.
+ ALOGD("%s - openStream() returned %d, try switching from MONO to STEREO",
+ __func__, mServiceStreamHandle);
+ request.getConfiguration().setSamplesPerFrame(2); // stereo
+ mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
+ }
if (mServiceStreamHandle < 0) {
- result = mServiceStreamHandle;
- ALOGE("AudioStreamInternal::open(): openStream() returned %d", result);
- return result;
+ ALOGE("%s - openStream() returned %d", __func__, mServiceStreamHandle);
+ return mServiceStreamHandle;
}
result = configurationOutput.validate();
@@ -126,13 +141,22 @@
goto error;
}
// Save results of the open.
+ if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
+ setSamplesPerFrame(configurationOutput.getSamplesPerFrame());
+ }
+ mDeviceChannelCount = configurationOutput.getSamplesPerFrame();
+
setSampleRate(configurationOutput.getSampleRate());
- setSamplesPerFrame(configurationOutput.getSamplesPerFrame());
setDeviceId(configurationOutput.getDeviceId());
+ setSessionId(configurationOutput.getSessionId());
setSharingMode(configurationOutput.getSharingMode());
+ setUsage(configurationOutput.getUsage());
+ setContentType(configurationOutput.getContentType());
+ setInputPreset(configurationOutput.getInputPreset());
+
// Save device format so we can do format conversion and volume scaling together.
- mDeviceFormat = configurationOutput.getFormat();
+ setDeviceFormat(configurationOutput.getFormat());
result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
if (result != AAUDIO_OK) {
@@ -151,17 +175,18 @@
goto error;
}
- mFramesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
- capacity = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
-
// Validate result from server.
- if (mFramesPerBurst < 16 || mFramesPerBurst > 16 * 1024) {
- ALOGE("AudioStreamInternal::open(): framesPerBurst out of range = %d", mFramesPerBurst);
+ framesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
+ if (framesPerBurst < MIN_FRAMES_PER_BURST || framesPerBurst > MAX_FRAMES_PER_BURST) {
+ ALOGE("%s - framesPerBurst out of range = %d", __func__, framesPerBurst);
result = AAUDIO_ERROR_OUT_OF_RANGE;
goto error;
}
- if (capacity < mFramesPerBurst || capacity > 32 * 1024) {
- ALOGE("AudioStreamInternal::open(): bufferCapacity out of range = %d", capacity);
+ mFramesPerBurst = framesPerBurst; // only save good value
+
+ capacity = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
+ if (capacity < mFramesPerBurst || capacity > MAX_BUFFER_CAPACITY_IN_FRAMES) {
+ ALOGE("%s - bufferCapacity out of range = %d", __func__, capacity);
result = AAUDIO_ERROR_OUT_OF_RANGE;
goto error;
}
@@ -169,16 +194,16 @@
mClockModel.setSampleRate(getSampleRate());
mClockModel.setFramesPerBurst(mFramesPerBurst);
- if (getDataCallbackProc()) {
+ if (isDataCallbackSet()) {
mCallbackFrames = builder.getFramesPerDataCallback();
if (mCallbackFrames > getBufferCapacity() / 2) {
- ALOGE("AudioStreamInternal::open(): framesPerCallback too big = %d, capacity = %d",
- mCallbackFrames, getBufferCapacity());
+ ALOGE("%s - framesPerCallback too big = %d, capacity = %d",
+ __func__, mCallbackFrames, getBufferCapacity());
result = AAUDIO_ERROR_OUT_OF_RANGE;
goto error;
} else if (mCallbackFrames < 0) {
- ALOGE("AudioStreamInternal::open(): framesPerCallback negative");
+ ALOGE("%s - framesPerCallback negative", __func__);
result = AAUDIO_ERROR_OUT_OF_RANGE;
goto error;
@@ -204,8 +229,7 @@
aaudio_result_t AudioStreamInternal::close() {
aaudio_result_t result = AAUDIO_OK;
- ALOGD("close(): mServiceStreamHandle = 0x%08X",
- mServiceStreamHandle);
+ ALOGD("%s(): mServiceStreamHandle = 0x%08X", __func__, mServiceStreamHandle);
if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
// Don't close a stream while it is running.
aaudio_stream_state_t currentState = getState();
@@ -216,8 +240,8 @@
result = waitForStateChange(currentState, &nextState,
timeoutNanoseconds);
if (result != AAUDIO_OK) {
- ALOGE("close() waitForStateChange() returned %d %s",
- result, AAudio_convertResultToText(result));
+ ALOGE("%s() waitForStateChange() returned %d %s",
+ __func__, result, AAudio_convertResultToText(result));
}
}
setState(AAUDIO_STREAM_STATE_CLOSING);
@@ -240,7 +264,7 @@
static void *aaudio_callback_thread_proc(void *context)
{
AudioStreamInternal *stream = (AudioStreamInternal *)context;
- //LOGD("AudioStreamInternal(): oboe_callback_thread, stream = %p", stream);
+ //LOGD("oboe_callback_thread, stream = %p", stream);
if (stream != NULL) {
return stream->callbackLoop();
} else {
@@ -288,7 +312,7 @@
mNeedCatchUp.request(); // Ask data processing code to catch up when first timestamp received.
// Start data callback thread.
- if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
+ if (result == AAUDIO_OK && isDataCallbackSet()) {
// Launch the callback loop thread.
int64_t periodNanos = mCallbackFrames
* AAUDIO_NANOS_PER_SECOND
@@ -329,8 +353,13 @@
}
}
-aaudio_result_t AudioStreamInternal::requestStopInternal()
+aaudio_result_t AudioStreamInternal::requestStop()
{
+ aaudio_result_t result = stopCallback();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
ALOGE("requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
mServiceStreamHandle);
@@ -344,16 +373,6 @@
return mServiceInterface.stopStream(mServiceStreamHandle);
}
-aaudio_result_t AudioStreamInternal::requestStop()
-{
- aaudio_result_t result = stopCallback();
- if (result != AAUDIO_OK) {
- return result;
- }
- result = requestStopInternal();
- return result;
-}
-
aaudio_result_t AudioStreamInternal::registerThread() {
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
ALOGE("registerThread() mServiceStreamHandle invalid");
@@ -373,19 +392,25 @@
}
aaudio_result_t AudioStreamInternal::startClient(const android::AudioClient& client,
- audio_port_handle_t *clientHandle) {
+ audio_port_handle_t *portHandle) {
+ ALOGV("%s() called", __func__);
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
return AAUDIO_ERROR_INVALID_STATE;
}
-
- return mServiceInterface.startClient(mServiceStreamHandle, client, clientHandle);
+ aaudio_result_t result = mServiceInterface.startClient(mServiceStreamHandle,
+ client, portHandle);
+ ALOGV("%s(%d) returning %d", __func__, *portHandle, result);
+ return result;
}
-aaudio_result_t AudioStreamInternal::stopClient(audio_port_handle_t clientHandle) {
+aaudio_result_t AudioStreamInternal::stopClient(audio_port_handle_t portHandle) {
+ ALOGV("%s(%d) called", __func__, portHandle);
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
return AAUDIO_ERROR_INVALID_STATE;
}
- return mServiceInterface.stopClient(mServiceStreamHandle, clientHandle);
+ aaudio_result_t result = mServiceInterface.stopClient(mServiceStreamHandle, portHandle);
+ ALOGV("%s(%d) returning %d", __func__, portHandle, result);
+ return result;
}
aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
@@ -448,34 +473,30 @@
aaudio_result_t result = AAUDIO_OK;
switch (message->event.event) {
case AAUDIO_SERVICE_EVENT_STARTED:
- ALOGD("AudioStreamInternal::onEventFromServer() got AAUDIO_SERVICE_EVENT_STARTED");
+ ALOGD("%s - got AAUDIO_SERVICE_EVENT_STARTED", __func__);
if (getState() == AAUDIO_STREAM_STATE_STARTING) {
setState(AAUDIO_STREAM_STATE_STARTED);
}
break;
case AAUDIO_SERVICE_EVENT_PAUSED:
- ALOGD("AudioStreamInternal::onEventFromServer() got AAUDIO_SERVICE_EVENT_PAUSED");
+ ALOGD("%s - got AAUDIO_SERVICE_EVENT_PAUSED", __func__);
if (getState() == AAUDIO_STREAM_STATE_PAUSING) {
setState(AAUDIO_STREAM_STATE_PAUSED);
}
break;
case AAUDIO_SERVICE_EVENT_STOPPED:
- ALOGD("AudioStreamInternal::onEventFromServer() got AAUDIO_SERVICE_EVENT_STOPPED");
+ ALOGD("%s - got AAUDIO_SERVICE_EVENT_STOPPED", __func__);
if (getState() == AAUDIO_STREAM_STATE_STOPPING) {
setState(AAUDIO_STREAM_STATE_STOPPED);
}
break;
case AAUDIO_SERVICE_EVENT_FLUSHED:
- ALOGD("AudioStreamInternal::onEventFromServer() got AAUDIO_SERVICE_EVENT_FLUSHED");
+ ALOGD("%s - got AAUDIO_SERVICE_EVENT_FLUSHED", __func__);
if (getState() == AAUDIO_STREAM_STATE_FLUSHING) {
setState(AAUDIO_STREAM_STATE_FLUSHED);
onFlushFromServer();
}
break;
- case AAUDIO_SERVICE_EVENT_CLOSED:
- ALOGD("AudioStreamInternal::onEventFromServer() got AAUDIO_SERVICE_EVENT_CLOSED");
- setState(AAUDIO_STREAM_STATE_CLOSED);
- break;
case AAUDIO_SERVICE_EVENT_DISCONNECTED:
// Prevent hardware from looping on old data and making buzzing sounds.
if (getDirection() == AAUDIO_DIRECTION_OUTPUT) {
@@ -483,18 +504,18 @@
}
result = AAUDIO_ERROR_DISCONNECTED;
setState(AAUDIO_STREAM_STATE_DISCONNECTED);
- ALOGW("WARNING - AudioStreamInternal::onEventFromServer()"
- " AAUDIO_SERVICE_EVENT_DISCONNECTED - FIFO cleared");
+ ALOGW("%s - AAUDIO_SERVICE_EVENT_DISCONNECTED - FIFO cleared", __func__);
break;
case AAUDIO_SERVICE_EVENT_VOLUME:
+ ALOGD("%s - AAUDIO_SERVICE_EVENT_VOLUME %lf", __func__, message->event.dataDouble);
mStreamVolume = (float)message->event.dataDouble;
doSetVolume();
- ALOGD("AudioStreamInternal::onEventFromServer() AAUDIO_SERVICE_EVENT_VOLUME %lf",
- message->event.dataDouble);
+ break;
+ case AAUDIO_SERVICE_EVENT_XRUN:
+ mXRunCount = static_cast<int32_t>(message->event.dataLong);
break;
default:
- ALOGW("WARNING - AudioStreamInternal::onEventFromServer() Unrecognized event = %d",
- (int) message->event.event);
+ ALOGE("%s - Unrecognized event = %d", __func__, (int) message->event.event);
break;
}
return result;
@@ -519,8 +540,7 @@
break;
default:
- ALOGE("WARNING - drainTimestampsFromService() Unrecognized what = %d",
- (int) message.what);
+ ALOGE("%s - unrecognized message.what = %d", __func__, (int) message.what);
result = AAUDIO_ERROR_INTERNAL;
break;
}
@@ -533,7 +553,6 @@
aaudio_result_t result = AAUDIO_OK;
while (result == AAUDIO_OK) {
- //ALOGD("AudioStreamInternal::processCommands() - looping, %d", result);
AAudioServiceMessage message;
if (mAudioEndpoint.readUpCommand(&message) != 1) {
break; // no command this time, no problem
@@ -552,8 +571,7 @@
break;
default:
- ALOGE("WARNING - processCommands() Unrecognized what = %d",
- (int) message.what);
+ ALOGE("%s - unrecognized message.what = %d", __func__, (int) message.what);
result = AAUDIO_ERROR_INTERNAL;
break;
}
@@ -614,13 +632,13 @@
if (wakeTimeNanos > deadlineNanos) {
// If we time out, just return the framesWritten so far.
// TODO remove after we fix the deadline bug
- ALOGW("AudioStreamInternal::processData(): entered at %lld nanos, currently %lld",
+ ALOGW("processData(): entered at %lld nanos, currently %lld",
(long long) entryTimeNanos, (long long) currentTimeNanos);
- ALOGW("AudioStreamInternal::processData(): TIMEOUT after %lld nanos",
+ ALOGW("processData(): TIMEOUT after %lld nanos",
(long long) timeoutNanoseconds);
- ALOGW("AudioStreamInternal::processData(): wakeTime = %lld, deadline = %lld nanos",
+ ALOGW("processData(): wakeTime = %lld, deadline = %lld nanos",
(long long) wakeTimeNanos, (long long) deadlineNanos);
- ALOGW("AudioStreamInternal::processData(): past deadline by %d micros",
+ ALOGW("processData(): past deadline by %d micros",
(int)((wakeTimeNanos - deadlineNanos) / AAUDIO_NANOS_PER_MICROSECOND));
mClockModel.dump();
mAudioEndpoint.dump();
@@ -655,14 +673,29 @@
}
aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
+ int32_t adjustedFrames = requestedFrames;
int32_t actualFrames = 0;
- // Round to the next highest burst size.
- if (getFramesPerBurst() > 0) {
- int32_t numBursts = (requestedFrames + getFramesPerBurst() - 1) / getFramesPerBurst();
- requestedFrames = numBursts * getFramesPerBurst();
+ int32_t maximumSize = getBufferCapacity();
+
+ // Clip to minimum size so that rounding up will work better.
+ if (adjustedFrames < 1) {
+ adjustedFrames = 1;
}
- aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
+ if (adjustedFrames > maximumSize) {
+ // Clip to maximum size.
+ adjustedFrames = maximumSize;
+ } else {
+ // Round to the next highest burst size.
+ int32_t numBursts = (adjustedFrames + mFramesPerBurst - 1) / mFramesPerBurst;
+ adjustedFrames = numBursts * mFramesPerBurst;
+ // Rounding may have gone above maximum.
+ if (adjustedFrames > maximumSize) {
+ adjustedFrames = maximumSize;
+ }
+ }
+
+ aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(adjustedFrames, &actualFrames);
ALOGD("setBufferSize() req = %d => %d", requestedFrames, actualFrames);
if (result < 0) {
return result;
@@ -680,7 +713,7 @@
}
int32_t AudioStreamInternal::getFramesPerBurst() const {
- return mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
+ return mFramesPerBurst;
}
aaudio_result_t AudioStreamInternal::joinThread(void** returnArg) {
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 47024c0..0425cd5 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -34,6 +34,12 @@
namespace aaudio {
+ // These are intended to be outside the range of what is normally encountered.
+ // TODO MAXes should probably be much bigger.
+ constexpr int32_t MIN_FRAMES_PER_BURST = 16; // arbitrary
+ constexpr int32_t MAX_FRAMES_PER_BURST = 16 * 1024; // arbitrary
+ constexpr int32_t MAX_BUFFER_CAPACITY_IN_FRAMES = 32 * 1024; // arbitrary
+
// A stream that talks to the AAudioService or directly to a HAL.
class AudioStreamInternal : public AudioStream {
@@ -115,8 +121,6 @@
aaudio_result_t processCommands();
- aaudio_result_t requestStopInternal();
-
aaudio_result_t stopCallback();
virtual void advanceClientToMatchServerPosition() = 0;
@@ -134,14 +138,19 @@
// Calculate timeout for an operation involving framesPerOperation.
int64_t calculateReasonableTimeout(int32_t framesPerOperation);
- aaudio_format_t mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ int32_t getDeviceChannelCount() const { return mDeviceChannelCount; }
+
+ /**
+ * @return true if running in audio service, versus in app process
+ */
+ bool isInService() const { return mInService; }
IsochronousClockModel mClockModel; // timing model for chasing the HAL
AudioEndpoint mAudioEndpoint; // source for reads or sink for writes
aaudio_handle_t mServiceStreamHandle; // opaque handle returned from service
- int32_t mFramesPerBurst; // frames per HAL transfer
+ int32_t mFramesPerBurst = MIN_FRAMES_PER_BURST; // frames per HAL transfer
int32_t mXRunCount = 0; // how many underrun events?
// Offset from underlying frame position.
@@ -183,6 +192,8 @@
EndpointDescriptor mEndpointDescriptor; // buffer description with resolved addresses
int64_t mServiceLatencyNanos = 0;
+
+ int32_t mDeviceChannelCount = 0;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index b792ecd..0719fe1 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -14,7 +14,8 @@
* limitations under the License.
*/
-#define LOG_TAG (mInService ? "AAudioService" : "AAudio")
+#define LOG_TAG (mInService ? "AudioStreamInternalCapture_Service" \
+ : "AudioStreamInternalCapture_Client")
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -101,7 +102,8 @@
}
// If the write index passed the read index then consider it an overrun.
- if (mAudioEndpoint.getEmptyFramesAvailable() < 0) {
+ // For shared streams, the xRunCount is passed up from the service.
+ if (mAudioEndpoint.isFreeRunning() && mAudioEndpoint.getEmptyFramesAvailable() < 0) {
mXRunCount++;
if (ATRACE_ENABLED()) {
ATRACE_INT("aaOverRuns", mXRunCount);
@@ -152,7 +154,7 @@
aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
int32_t numFrames) {
- // ALOGD("AudioStreamInternalCapture::readNowWithConversion(%p, %d)",
+ // ALOGD("readNowWithConversion(%p, %d)",
// buffer, numFrames);
WrappingBuffer wrappingBuffer;
uint8_t *destination = (uint8_t *) buffer;
@@ -174,16 +176,16 @@
int32_t numSamples = framesToProcess * getSamplesPerFrame();
// TODO factor this out into a utility function
- if (mDeviceFormat == getFormat()) {
+ if (getDeviceFormat() == getFormat()) {
memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
- } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16
+ } else if (getDeviceFormat() == AAUDIO_FORMAT_PCM_I16
&& getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
AAudioConvert_pcm16ToFloat(
(const int16_t *) wrappingBuffer.data[partIndex],
(float *) destination,
numSamples,
1.0f);
- } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT
+ } else if (getDeviceFormat() == AAUDIO_FORMAT_PCM_FLOAT
&& getFormat() == AAUDIO_FORMAT_PCM_I16) {
AAudioConvert_floatToPcm16(
(const float *) wrappingBuffer.data[partIndex],
@@ -201,7 +203,7 @@
int32_t framesProcessed = numFrames - framesLeft;
mAudioEndpoint.advanceReadIndex(framesProcessed);
- //ALOGD("AudioStreamInternalCapture::readNowWithConversion() returns %d", framesProcessed);
+ //ALOGD("readNowWithConversion() returns %d", framesProcessed);
return framesProcessed;
}
@@ -215,14 +217,14 @@
// Prevent retrograde motion.
mLastFramesWritten = std::max(mLastFramesWritten,
framesWrittenHardware + mFramesOffsetFromService);
- //ALOGD("AudioStreamInternalCapture::getFramesWritten() returns %lld",
+ //ALOGD("getFramesWritten() returns %lld",
// (long long)mLastFramesWritten);
return mLastFramesWritten;
}
int64_t AudioStreamInternalCapture::getFramesRead() {
int64_t frames = mAudioEndpoint.getDataReadCounter() + mFramesOffsetFromService;
- //ALOGD("AudioStreamInternalCapture::getFramesRead() returns %lld", (long long)frames);
+ //ALOGD("getFramesRead() returns %lld", (long long)frames);
return frames;
}
@@ -230,8 +232,7 @@
void *AudioStreamInternalCapture::callbackLoop() {
aaudio_result_t result = AAUDIO_OK;
aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
- AAudioStream_dataCallback appCallback = getDataCallbackProc();
- if (appCallback == nullptr) return NULL;
+ if (!isDataCallbackSet()) return NULL;
// result might be a frame count
while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
@@ -242,35 +243,25 @@
// This is a BLOCKING READ!
result = read(mCallbackBuffer, mCallbackFrames, timeoutNanos);
if ((result != mCallbackFrames)) {
- ALOGE("AudioStreamInternalCapture(): callbackLoop: read() returned %d", result);
+ ALOGE("callbackLoop: read() returned %d", result);
if (result >= 0) {
// Only read some of the frames requested. Must have timed out.
result = AAUDIO_ERROR_TIMEOUT;
}
- AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
- if (errorCallback != nullptr) {
- (*errorCallback)(
- (AAudioStream *) this,
- getErrorCallbackUserData(),
- result);
- }
+ maybeCallErrorCallback(result);
break;
}
// Call application using the AAudio callback interface.
- callbackResult = (*appCallback)(
- (AAudioStream *) this,
- getDataCallbackUserData(),
- mCallbackBuffer,
- mCallbackFrames);
+ callbackResult = maybeCallDataCallback(mCallbackBuffer, mCallbackFrames);
if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
- ALOGD("AudioStreamInternalCapture(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
+ ALOGD("callback returned AAUDIO_CALLBACK_RESULT_STOP");
break;
}
}
- ALOGD("AudioStreamInternalCapture(): callbackLoop() exiting, result = %d, isActive() = %d",
+ ALOGD("callbackLoop() exiting, result = %d, isActive() = %d",
result, (int) isActive());
return NULL;
}
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 1e02eee..795ba2c 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -14,7 +14,8 @@
* limitations under the License.
*/
-#define LOG_TAG (mInService ? "AAudioService" : "AAudio")
+#define LOG_TAG (mInService ? "AudioStreamInternalPlay_Service" \
+ : "AudioStreamInternalPlay_Client")
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -37,12 +38,26 @@
AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
+constexpr int kRampMSec = 10; // time to apply a change in volume
-aaudio_result_t AudioStreamInternalPlay::requestPauseInternal()
+aaudio_result_t AudioStreamInternalPlay::open(const AudioStreamBuilder &builder) {
+ aaudio_result_t result = AudioStreamInternal::open(builder);
+ if (result == AAUDIO_OK) {
+ // Sample rate is constrained to common values by now and should not overflow.
+ int32_t numFrames = kRampMSec * getSampleRate() / AAUDIO_MILLIS_PER_SECOND;
+ mVolumeRamp.setLengthInFrames(numFrames);
+ }
+ return result;
+}
+
+aaudio_result_t AudioStreamInternalPlay::requestPause()
{
+ aaudio_result_t result = stopCallback();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
- ALOGE("AudioStreamInternal::requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
- mServiceStreamHandle);
+ ALOGE("%s() mServiceStreamHandle invalid", __func__);
return AAUDIO_ERROR_INVALID_STATE;
}
@@ -52,20 +67,9 @@
return mServiceInterface.pauseStream(mServiceStreamHandle);
}
-aaudio_result_t AudioStreamInternalPlay::requestPause()
-{
- aaudio_result_t result = stopCallback();
- if (result != AAUDIO_OK) {
- return result;
- }
- result = requestPauseInternal();
- return result;
-}
-
aaudio_result_t AudioStreamInternalPlay::requestFlush() {
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
- ALOGE("AudioStreamInternal::requestFlush() mServiceStreamHandle invalid = 0x%08X",
- mServiceStreamHandle);
+ ALOGE("%s() mServiceStreamHandle invalid", __func__);
return AAUDIO_ERROR_INVALID_STATE;
}
@@ -80,7 +84,7 @@
// Bump offset so caller does not see the retrograde motion in getFramesRead().
int64_t offset = writeCounter - readCounter;
mFramesOffsetFromService += offset;
- ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
+ ALOGV("%s() readN = %lld, writeN = %lld, offset = %lld", __func__,
(long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
// Force writeCounter to match readCounter.
@@ -94,9 +98,7 @@
// Write the data, block if needed and timeoutMillis > 0
aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
- int64_t timeoutNanoseconds)
-
-{
+ int64_t timeoutNanoseconds) {
return processData((void *)buffer, numFrames, timeoutNanoseconds);
}
@@ -115,7 +117,7 @@
// Still haven't got any timestamps from server.
// Keep waiting until we get some valid timestamps then start writing to the
// current buffer position.
- ALOGD("processDataNow() wait for valid timestamps");
+ ALOGV("%s() wait for valid timestamps", __func__);
// Sleep very briefly and hope we get a timestamp soon.
*wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
ATRACE_END();
@@ -139,7 +141,8 @@
}
// If the read index passed the write index then consider it an underrun.
- if (mAudioEndpoint.getFullFramesAvailable() < 0) {
+ // For shared streams, the xRunCount is passed up from the service.
+ if (mAudioEndpoint.isFreeRunning() && mAudioEndpoint.getFullFramesAvailable() < 0) {
mXRunCount++;
if (ATRACE_ENABLED()) {
ATRACE_INT("aaUnderRuns", mXRunCount);
@@ -196,10 +199,8 @@
aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
int32_t numFrames) {
- // ALOGD("AudioStreamInternal::writeNowWithConversion(%p, %d)",
- // buffer, numFrames);
WrappingBuffer wrappingBuffer;
- uint8_t *source = (uint8_t *) buffer;
+ uint8_t *byteBuffer = (uint8_t *) buffer;
int32_t framesLeft = numFrames;
mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
@@ -213,70 +214,26 @@
if (framesToWrite > framesAvailable) {
framesToWrite = framesAvailable;
}
+
int32_t numBytes = getBytesPerFrame() * framesToWrite;
- int32_t numSamples = framesToWrite * getSamplesPerFrame();
// Data conversion.
float levelFrom;
float levelTo;
- bool ramping = mVolumeRamp.nextSegment(framesToWrite * getSamplesPerFrame(),
- &levelFrom, &levelTo);
- // The formats are validated when the stream is opened so we do not have to
- // check for illegal combinations here.
- // TODO factor this out into a utility function
- if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
- if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
- AAudio_linearRamp(
- (const float *) source,
- (float *) wrappingBuffer.data[partIndex],
- framesToWrite,
- getSamplesPerFrame(),
- levelFrom,
- levelTo);
- } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
- if (ramping) {
- AAudioConvert_floatToPcm16(
- (const float *) source,
- (int16_t *) wrappingBuffer.data[partIndex],
- framesToWrite,
- getSamplesPerFrame(),
- levelFrom,
- levelTo);
- } else {
- AAudioConvert_floatToPcm16(
- (const float *) source,
- (int16_t *) wrappingBuffer.data[partIndex],
- numSamples,
- levelTo);
- }
- }
- } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
- if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
- if (ramping) {
- AAudioConvert_pcm16ToFloat(
- (const int16_t *) source,
- (float *) wrappingBuffer.data[partIndex],
- framesToWrite,
- getSamplesPerFrame(),
- levelFrom,
- levelTo);
- } else {
- AAudioConvert_pcm16ToFloat(
- (const int16_t *) source,
- (float *) wrappingBuffer.data[partIndex],
- numSamples,
- levelTo);
- }
- } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
- AAudio_linearRamp(
- (const int16_t *) source,
- (int16_t *) wrappingBuffer.data[partIndex],
- framesToWrite,
- getSamplesPerFrame(),
- levelFrom,
- levelTo);
- }
- }
- source += numBytes;
+ mVolumeRamp.nextSegment(framesToWrite, &levelFrom, &levelTo);
+
+ AAudioDataConverter::FormattedData source(
+ (void *)byteBuffer,
+ getFormat(),
+ getSamplesPerFrame());
+ AAudioDataConverter::FormattedData destination(
+ wrappingBuffer.data[partIndex],
+ getDeviceFormat(),
+ getDeviceChannelCount());
+
+ AAudioDataConverter::convert(source, destination, framesToWrite,
+ levelFrom, levelTo);
+
+ byteBuffer += numBytes;
framesLeft -= framesToWrite;
} else {
break;
@@ -286,7 +243,6 @@
int32_t framesWritten = numFrames - framesLeft;
mAudioEndpoint.advanceWriteIndex(framesWritten);
- // ALOGD("AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
return framesWritten;
}
@@ -305,7 +261,6 @@
} else {
mLastFramesRead = framesRead;
}
- //ALOGD("AudioStreamInternalPlay::getFramesRead() returns %lld", (long long)framesRead);
return framesRead;
}
@@ -313,60 +268,51 @@
{
int64_t framesWritten = mAudioEndpoint.getDataWriteCounter()
+ mFramesOffsetFromService;
- //ALOGD("AudioStreamInternalPlay::getFramesWritten() returns %lld", (long long)framesWritten);
return framesWritten;
}
// Render audio in the application callback and then write the data to the stream.
void *AudioStreamInternalPlay::callbackLoop() {
+ ALOGD("%s() entering >>>>>>>>>>>>>>>", __func__);
aaudio_result_t result = AAUDIO_OK;
aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
- AAudioStream_dataCallback appCallback = getDataCallbackProc();
- if (appCallback == nullptr) return NULL;
+ if (!isDataCallbackSet()) return NULL;
int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
// result might be a frame count
while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
// Call application using the AAudio callback interface.
- callbackResult = (*appCallback)(
- (AAudioStream *) this,
- getDataCallbackUserData(),
- mCallbackBuffer,
- mCallbackFrames);
+ callbackResult = maybeCallDataCallback(mCallbackBuffer, mCallbackFrames);
if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
// Write audio data to stream. This is a BLOCKING WRITE!
result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
if ((result != mCallbackFrames)) {
- ALOGE("AudioStreamInternalPlay(): callbackLoop: write() returned %d", result);
if (result >= 0) {
// Only wrote some of the frames requested. Must have timed out.
result = AAUDIO_ERROR_TIMEOUT;
}
- AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
- if (errorCallback != nullptr) {
- (*errorCallback)(
- (AAudioStream *) this,
- getErrorCallbackUserData(),
- result);
- }
+ maybeCallErrorCallback(result);
break;
}
} else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
- ALOGD("AudioStreamInternalPlay(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
+ ALOGV("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
break;
}
}
- ALOGD("AudioStreamInternalPlay(): callbackLoop() exiting, result = %d, isActive() = %d",
- result, (int) isActive());
+ ALOGD("%s() exiting, result = %d, isActive() = %d <<<<<<<<<<<<<<",
+ __func__, result, (int) isActive());
return NULL;
}
//------------------------------------------------------------------------------
// Implementation of PlayerBase
status_t AudioStreamInternalPlay::doSetVolume() {
- mVolumeRamp.setTarget(mStreamVolume * getDuckAndMuteVolume());
+ float combinedVolume = mStreamVolume * getDuckAndMuteVolume();
+ ALOGD("%s() mStreamVolume * duckAndMuteVolume = %f * %f = %f",
+ __func__, mStreamVolume, getDuckAndMuteVolume(), combinedVolume);
+ mVolumeRamp.setTarget(combinedVolume);
return android::NO_ERROR;
}
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.h b/media/libaaudio/src/client/AudioStreamInternalPlay.h
index d5c1b1e..977a909 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.h
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.h
@@ -33,10 +33,22 @@
AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface, bool inService = false);
virtual ~AudioStreamInternalPlay();
+ aaudio_result_t open(const AudioStreamBuilder &builder) override;
+
aaudio_result_t requestPause() override;
aaudio_result_t requestFlush() override;
+ bool isFlushSupported() const override {
+ // Only implement FLUSH for OUTPUT streams.
+ return true;
+ }
+
+ bool isPauseSupported() const override {
+ // Only implement PAUSE for OUTPUT streams.
+ return true;
+ }
+
aaudio_result_t write(const void *buffer,
int32_t numFrames,
int64_t timeoutNanoseconds) override;
@@ -52,8 +64,6 @@
protected:
- aaudio_result_t requestPauseInternal();
-
void advanceClientToMatchServerPosition() override;
void onFlushFromServer() override;
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index bac69f1..95b52be 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "AAudio"
+#define LOG_TAG "IsochronousClockModel"
//#define LOG_NDEBUG 0
#include <log/log.h>
@@ -41,20 +41,20 @@
}
void IsochronousClockModel::setPositionAndTime(int64_t framePosition, int64_t nanoTime) {
- ALOGV("IsochronousClockModel::setPositionAndTime(%lld, %lld)",
+ ALOGV("setPositionAndTime(%lld, %lld)",
(long long) framePosition, (long long) nanoTime);
mMarkerFramePosition = framePosition;
mMarkerNanoTime = nanoTime;
}
void IsochronousClockModel::start(int64_t nanoTime) {
- ALOGV("IsochronousClockModel::start(nanos = %lld)\n", (long long) nanoTime);
+ ALOGV("start(nanos = %lld)\n", (long long) nanoTime);
mMarkerNanoTime = nanoTime;
mState = STATE_STARTING;
}
void IsochronousClockModel::stop(int64_t nanoTime) {
- ALOGV("IsochronousClockModel::stop(nanos = %lld)\n", (long long) nanoTime);
+ ALOGV("stop(nanos = %lld)\n", (long long) nanoTime);
setPositionAndTime(convertTimeToPosition(nanoTime), nanoTime);
// TODO should we set position?
mState = STATE_STOPPED;
@@ -156,7 +156,7 @@
int64_t framesDelta = nextBurstPosition - mMarkerFramePosition;
int64_t nanosDelta = convertDeltaPositionToTime(framesDelta);
int64_t time = mMarkerNanoTime + nanosDelta;
-// ALOGD("IsochronousClockModel::convertPositionToTime: pos = %llu --> time = %llu",
+// ALOGD("convertPositionToTime: pos = %llu --> time = %llu",
// (unsigned long long)framePosition,
// (unsigned long long)time);
return time;
@@ -171,19 +171,19 @@
int64_t nextBurstPosition = mMarkerFramePosition + framesDelta;
int64_t nextBurstIndex = nextBurstPosition / mFramesPerBurst;
int64_t position = nextBurstIndex * mFramesPerBurst;
-// ALOGD("IsochronousClockModel::convertTimeToPosition: time = %llu --> pos = %llu",
+// ALOGD("convertTimeToPosition: time = %llu --> pos = %llu",
// (unsigned long long)nanoTime,
// (unsigned long long)position);
-// ALOGD("IsochronousClockModel::convertTimeToPosition: framesDelta = %llu, mFramesPerBurst = %d",
+// ALOGD("convertTimeToPosition: framesDelta = %llu, mFramesPerBurst = %d",
// (long long) framesDelta, mFramesPerBurst);
return position;
}
void IsochronousClockModel::dump() const {
- ALOGD("IsochronousClockModel::mMarkerFramePosition = %lld", (long long) mMarkerFramePosition);
- ALOGD("IsochronousClockModel::mMarkerNanoTime = %lld", (long long) mMarkerNanoTime);
- ALOGD("IsochronousClockModel::mSampleRate = %6d", mSampleRate);
- ALOGD("IsochronousClockModel::mFramesPerBurst = %6d", mFramesPerBurst);
- ALOGD("IsochronousClockModel::mMaxLatenessInNanos = %6d", mMaxLatenessInNanos);
- ALOGD("IsochronousClockModel::mState = %6d", mState);
+ ALOGD("mMarkerFramePosition = %lld", (long long) mMarkerFramePosition);
+ ALOGD("mMarkerNanoTime = %lld", (long long) mMarkerNanoTime);
+ ALOGD("mSampleRate = %6d", mSampleRate);
+ ALOGD("mFramesPerBurst = %6d", mFramesPerBurst);
+ ALOGD("mMaxLatenessInNanos = %6d", mMaxLatenessInNanos);
+ ALOGD("mState = %6d", mState);
}
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 1eaee81..df0db79 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -18,6 +18,8 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#include <inttypes.h>
+#include <mutex>
#include <time.h>
#include <pthread.h>
@@ -175,13 +177,38 @@
streamBuilder->setSharingMode(sharingMode);
}
+AAUDIO_API void AAudioStreamBuilder_setUsage(AAudioStreamBuilder* builder,
+ aaudio_usage_t usage) {
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setUsage(usage);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setContentType(AAudioStreamBuilder* builder,
+ aaudio_content_type_t contentType) {
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setContentType(contentType);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setInputPreset(AAudioStreamBuilder* builder,
+ aaudio_input_preset_t inputPreset) {
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setInputPreset(inputPreset);
+}
+
AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder* builder,
- int32_t frames)
+ int32_t frames)
{
AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
streamBuilder->setBufferCapacity(frames);
}
+AAUDIO_API void AAudioStreamBuilder_setSessionId(AAudioStreamBuilder* builder,
+ aaudio_session_id_t sessionId)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setSessionId(sessionId);
+}
+
AAUDIO_API void AAudioStreamBuilder_setDataCallback(AAudioStreamBuilder* builder,
AAudioStream_dataCallback callback,
void *userData)
@@ -238,15 +265,26 @@
AAUDIO_API aaudio_result_t AAudioStream_close(AAudioStream* stream)
{
+ aaudio_result_t result = AAUDIO_ERROR_NULL;
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
- ALOGD("AAudioStream_close(%p)", stream);
+ ALOGD("AAudioStream_close(%p) called ---------------", stream);
if (audioStream != nullptr) {
- audioStream->close();
- audioStream->unregisterPlayerBase();
- delete audioStream;
- return AAUDIO_OK;
+ result = audioStream->safeClose();
+ // Close will only fail if called illegally, for example, from a callback.
+ // That would result in deleting an active stream, which would cause a crash.
+ if (result == AAUDIO_OK) {
+ audioStream->unregisterPlayerBase();
+ delete audioStream;
+ } else {
+ ALOGW("%s attempt to close failed. Close it from another thread.", __func__);
+ }
}
- return AAUDIO_ERROR_NULL;
+ // We're potentially freeing `stream` above, so its use here makes some
+ // static analysis tools unhappy. Casting to uintptr_t helps assure
+ // said tools that we're not doing anything bad here.
+ ALOGD("AAudioStream_close(%#" PRIxPTR ") returned %d ---------",
+ reinterpret_cast<uintptr_t>(stream), result);
+ return result;
}
AAUDIO_API aaudio_result_t AAudioStream_requestStart(AAudioStream* stream)
@@ -269,7 +307,7 @@
{
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
ALOGD("AAudioStream_requestFlush(%p)", stream);
- return audioStream->requestFlush();
+ return audioStream->safeFlush();
}
AAUDIO_API aaudio_result_t AAudioStream_requestStop(AAudioStream* stream)
@@ -324,7 +362,7 @@
}
// Don't allow writes when playing with a callback.
- if (audioStream->getDataCallbackProc() != nullptr && audioStream->isActive()) {
+ if (audioStream->isDataCallbackActive()) {
ALOGE("Cannot write to a callback stream when running.");
return AAUDIO_ERROR_INVALID_STATE;
}
@@ -434,6 +472,30 @@
return audioStream->getSharingMode();
}
+AAUDIO_API aaudio_usage_t AAudioStream_getUsage(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getUsage();
+}
+
+AAUDIO_API aaudio_content_type_t AAudioStream_getContentType(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getContentType();
+}
+
+AAUDIO_API aaudio_input_preset_t AAudioStream_getInputPreset(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getInputPreset();
+}
+
+AAUDIO_API int32_t AAudioStream_getSessionId(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getSessionId();
+}
+
AAUDIO_API int64_t AAudioStream_getFramesWritten(AAudioStream* stream)
{
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.cpp b/media/libaaudio/src/core/AAudioStreamParameters.cpp
index 82445e7..d56701b 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.cpp
+++ b/media/libaaudio/src/core/AAudioStreamParameters.cpp
@@ -15,9 +15,9 @@
*/
-#define LOG_TAG "AAudio"
+#define LOG_TAG "AAudioStreamParameters"
#include <utils/Log.h>
-#include <hardware/audio.h>
+#include <system/audio.h>
#include "AAudioStreamParameters.h"
@@ -38,30 +38,43 @@
mSamplesPerFrame = other.mSamplesPerFrame;
mSampleRate = other.mSampleRate;
mDeviceId = other.mDeviceId;
+ mSessionId = other.mSessionId;
mSharingMode = other.mSharingMode;
mAudioFormat = other.mAudioFormat;
mDirection = other.mDirection;
mBufferCapacity = other.mBufferCapacity;
+ mUsage = other.mUsage;
+ mContentType = other.mContentType;
+ mInputPreset = other.mInputPreset;
}
aaudio_result_t AAudioStreamParameters::validate() const {
if (mSamplesPerFrame != AAUDIO_UNSPECIFIED
&& (mSamplesPerFrame < SAMPLES_PER_FRAME_MIN || mSamplesPerFrame > SAMPLES_PER_FRAME_MAX)) {
- ALOGE("AAudioStreamParameters: channelCount out of range = %d", mSamplesPerFrame);
+ ALOGE("channelCount out of range = %d", mSamplesPerFrame);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
if (mDeviceId < 0) {
- ALOGE("AAudioStreamParameters: deviceId out of range = %d", mDeviceId);
+ ALOGE("deviceId out of range = %d", mDeviceId);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
+ // All Session ID values are legal.
+ switch (mSessionId) {
+ case AAUDIO_SESSION_ID_NONE:
+ case AAUDIO_SESSION_ID_ALLOCATE:
+ break;
+ default:
+ break;
+ }
+
switch (mSharingMode) {
case AAUDIO_SHARING_MODE_EXCLUSIVE:
case AAUDIO_SHARING_MODE_SHARED:
break;
default:
- ALOGE("AAudioStreamParameters: illegal sharingMode = %d", mSharingMode);
+ ALOGE("illegal sharingMode = %d", mSharingMode);
return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
// break;
}
@@ -72,19 +85,19 @@
case AAUDIO_FORMAT_PCM_FLOAT:
break; // valid
default:
- ALOGE("AAudioStreamParameters: audioFormat not valid = %d", mAudioFormat);
+ ALOGE("audioFormat not valid = %d", mAudioFormat);
return AAUDIO_ERROR_INVALID_FORMAT;
// break;
}
if (mSampleRate != AAUDIO_UNSPECIFIED
&& (mSampleRate < SAMPLE_RATE_HZ_MIN || mSampleRate > SAMPLE_RATE_HZ_MAX)) {
- ALOGE("AAudioStreamParameters: sampleRate out of range = %d", mSampleRate);
+ ALOGE("sampleRate out of range = %d", mSampleRate);
return AAUDIO_ERROR_INVALID_RATE;
}
if (mBufferCapacity < 0) {
- ALOGE("AAudioStreamParameters: bufferCapacity out of range = %d", mBufferCapacity);
+ ALOGE("bufferCapacity out of range = %d", mBufferCapacity);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
@@ -93,7 +106,55 @@
case AAUDIO_DIRECTION_OUTPUT:
break; // valid
default:
- ALOGE("AAudioStreamParameters: direction not valid = %d", mDirection);
+ ALOGE("direction not valid = %d", mDirection);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ // break;
+ }
+
+ switch (mUsage) {
+ case AAUDIO_UNSPECIFIED:
+ case AAUDIO_USAGE_MEDIA:
+ case AAUDIO_USAGE_VOICE_COMMUNICATION:
+ case AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+ case AAUDIO_USAGE_ALARM:
+ case AAUDIO_USAGE_NOTIFICATION:
+ case AAUDIO_USAGE_NOTIFICATION_RINGTONE:
+ case AAUDIO_USAGE_NOTIFICATION_EVENT:
+ case AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+ case AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+ case AAUDIO_USAGE_ASSISTANCE_SONIFICATION:
+ case AAUDIO_USAGE_GAME:
+ case AAUDIO_USAGE_ASSISTANT:
+ break; // valid
+ default:
+ ALOGE("usage not valid = %d", mUsage);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ // break;
+ }
+
+ switch (mContentType) {
+ case AAUDIO_UNSPECIFIED:
+ case AAUDIO_CONTENT_TYPE_MUSIC:
+ case AAUDIO_CONTENT_TYPE_MOVIE:
+ case AAUDIO_CONTENT_TYPE_SONIFICATION:
+ case AAUDIO_CONTENT_TYPE_SPEECH:
+ break; // valid
+ default:
+ ALOGE("content type not valid = %d", mContentType);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ // break;
+ }
+
+ switch (mInputPreset) {
+ case AAUDIO_UNSPECIFIED:
+ case AAUDIO_INPUT_PRESET_GENERIC:
+ case AAUDIO_INPUT_PRESET_CAMCORDER:
+ case AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION:
+ case AAUDIO_INPUT_PRESET_VOICE_RECOGNITION:
+ case AAUDIO_INPUT_PRESET_UNPROCESSED:
+ break; // valid
+ default:
+ ALOGE("input preset not valid = %d", mInputPreset);
return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
// break;
}
@@ -102,12 +163,15 @@
}
void AAudioStreamParameters::dump() const {
- ALOGD("AAudioStreamParameters mDeviceId = %d", mDeviceId);
- ALOGD("AAudioStreamParameters mSampleRate = %d", mSampleRate);
- ALOGD("AAudioStreamParameters mSamplesPerFrame = %d", mSamplesPerFrame);
- ALOGD("AAudioStreamParameters mSharingMode = %d", (int)mSharingMode);
- ALOGD("AAudioStreamParameters mAudioFormat = %d", (int)mAudioFormat);
- ALOGD("AAudioStreamParameters mDirection = %d", mDirection);
- ALOGD("AAudioStreamParameters mBufferCapacity = %d", mBufferCapacity);
+ ALOGD("mDeviceId = %6d", mDeviceId);
+ ALOGD("mSessionId = %6d", mSessionId);
+ ALOGD("mSampleRate = %6d", mSampleRate);
+ ALOGD("mSamplesPerFrame = %6d", mSamplesPerFrame);
+ ALOGD("mSharingMode = %6d", (int)mSharingMode);
+ ALOGD("mAudioFormat = %6d", (int)mAudioFormat);
+ ALOGD("mDirection = %6d", mDirection);
+ ALOGD("mBufferCapacity = %6d", mBufferCapacity);
+ ALOGD("mUsage = %6d", mUsage);
+ ALOGD("mContentType = %6d", mContentType);
+ ALOGD("mInputPreset = %6d", mInputPreset);
}
-
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.h b/media/libaaudio/src/core/AAudioStreamParameters.h
index 5e67c93..ce5dacd 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.h
+++ b/media/libaaudio/src/core/AAudioStreamParameters.h
@@ -88,6 +88,38 @@
mDirection = direction;
}
+ aaudio_usage_t getUsage() const {
+ return mUsage;
+ }
+
+ void setUsage(aaudio_usage_t usage) {
+ mUsage = usage;
+ }
+
+ aaudio_content_type_t getContentType() const {
+ return mContentType;
+ }
+
+ void setContentType(aaudio_content_type_t contentType) {
+ mContentType = contentType;
+ }
+
+ aaudio_input_preset_t getInputPreset() const {
+ return mInputPreset;
+ }
+
+ void setInputPreset(aaudio_input_preset_t inputPreset) {
+ mInputPreset = inputPreset;
+ }
+
+ aaudio_session_id_t getSessionId() const {
+ return mSessionId;
+ }
+
+ void setSessionId(aaudio_session_id_t sessionId) {
+ mSessionId = sessionId;
+ }
+
int32_t calculateBytesPerFrame() const {
return getSamplesPerFrame() * AAudioConvert_formatToSizeInBytes(getFormat());
}
@@ -109,7 +141,11 @@
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
aaudio_format_t mAudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
+ aaudio_usage_t mUsage = AAUDIO_UNSPECIFIED;
+ aaudio_content_type_t mContentType = AAUDIO_UNSPECIFIED;
+ aaudio_input_preset_t mInputPreset = AAUDIO_UNSPECIFIED;
int32_t mBufferCapacity = AAUDIO_UNSPECIFIED;
+ aaudio_session_id_t mSessionId = AAUDIO_SESSION_ID_NONE;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 8dcc37a..358021b 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -43,7 +43,7 @@
LOG_ALWAYS_FATAL_IF(!(getState() == AAUDIO_STREAM_STATE_CLOSED
|| getState() == AAUDIO_STREAM_STATE_UNINITIALIZED
|| getState() == AAUDIO_STREAM_STATE_DISCONNECTED),
- "aaudio stream still in use, state = %s",
+ "~AudioStream() - still in use, state = %s",
AAudio_convertStreamStateToText(getState()));
mPlayerBase->clearParentReference(); // remove reference to this AudioStream
@@ -74,15 +74,28 @@
}
// Copy parameters from the Builder because the Builder may be deleted after this call.
+ // TODO AudioStream should be a subclass of AudioStreamParameters
mSamplesPerFrame = builder.getSamplesPerFrame();
mSampleRate = builder.getSampleRate();
mDeviceId = builder.getDeviceId();
mFormat = builder.getFormat();
mSharingMode = builder.getSharingMode();
mSharingModeMatchRequired = builder.isSharingModeMatchRequired();
-
mPerformanceMode = builder.getPerformanceMode();
+ mUsage = builder.getUsage();
+ if (mUsage == AAUDIO_UNSPECIFIED) {
+ mUsage = AAUDIO_USAGE_MEDIA;
+ }
+ mContentType = builder.getContentType();
+ if (mContentType == AAUDIO_UNSPECIFIED) {
+ mContentType = AAUDIO_CONTENT_TYPE_MUSIC;
+ }
+ mInputPreset = builder.getInputPreset();
+ if (mInputPreset == AAUDIO_UNSPECIFIED) {
+ mInputPreset = AAUDIO_INPUT_PRESET_VOICE_RECOGNITION;
+ }
+
// callbacks
mFramesPerDataCallback = builder.getFramesPerDataCallback();
mDataCallbackProc = builder.getDataCallbackProc();
@@ -91,18 +104,159 @@
mErrorCallbackUserData = builder.getErrorCallbackUserData();
// This is very helpful for debugging in the future. Please leave it in.
- ALOGI("AudioStream::open() rate = %d, channels = %d, format = %d, sharing = %s, dir = %s",
+ ALOGI("open() rate = %d, channels = %d, format = %d, sharing = %s, dir = %s",
mSampleRate, mSamplesPerFrame, mFormat,
AudioStream_convertSharingModeToShortText(mSharingMode),
(getDirection() == AAUDIO_DIRECTION_OUTPUT) ? "OUTPUT" : "INPUT");
- ALOGI("AudioStream::open() device = %d, perfMode = %d, callback: %s with frames = %d",
- mDeviceId, mPerformanceMode,
- (mDataCallbackProc == nullptr ? "OFF" : "ON"),
+ ALOGI("open() device = %d, sessionId = %d, perfMode = %d, callback: %s with frames = %d",
+ mDeviceId,
+ mSessionId,
+ mPerformanceMode,
+ (isDataCallbackSet() ? "ON" : "OFF"),
mFramesPerDataCallback);
+ ALOGI("open() usage = %d, contentType = %d, inputPreset = %d",
+ mUsage, mContentType, mInputPreset);
return AAUDIO_OK;
}
+aaudio_result_t AudioStream::safeStart() {
+ std::lock_guard<std::mutex> lock(mStreamLock);
+ if (collidesWithCallback()) {
+ ALOGE("%s cannot be called from a callback!", __func__);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return requestStart();
+}
+
+aaudio_result_t AudioStream::safePause() {
+ if (!isPauseSupported()) {
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ std::lock_guard<std::mutex> lock(mStreamLock);
+ if (collidesWithCallback()) {
+ ALOGE("%s cannot be called from a callback!", __func__);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+
+ switch (getState()) {
+ // Proceed with pausing.
+ case AAUDIO_STREAM_STATE_STARTING:
+ case AAUDIO_STREAM_STATE_STARTED:
+ case AAUDIO_STREAM_STATE_DISCONNECTED:
+ break;
+
+ // Transition from one inactive state to another.
+ case AAUDIO_STREAM_STATE_OPEN:
+ case AAUDIO_STREAM_STATE_STOPPED:
+ case AAUDIO_STREAM_STATE_FLUSHED:
+ setState(AAUDIO_STREAM_STATE_PAUSED);
+ return AAUDIO_OK;
+
+ // Redundant?
+ case AAUDIO_STREAM_STATE_PAUSING:
+ case AAUDIO_STREAM_STATE_PAUSED:
+ return AAUDIO_OK;
+
+ // Don't interfere with transitional states or when closed.
+ case AAUDIO_STREAM_STATE_STOPPING:
+ case AAUDIO_STREAM_STATE_FLUSHING:
+ case AAUDIO_STREAM_STATE_CLOSING:
+ case AAUDIO_STREAM_STATE_CLOSED:
+ default:
+ ALOGW("safePause() stream not running, state = %s",
+ AAudio_convertStreamStateToText(getState()));
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+
+ return requestPause();
+}
+
+aaudio_result_t AudioStream::safeFlush() {
+ if (!isFlushSupported()) {
+ ALOGE("flush not supported for this stream");
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ std::lock_guard<std::mutex> lock(mStreamLock);
+ if (collidesWithCallback()) {
+ ALOGE("stream cannot be flushed from a callback!");
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+
+ aaudio_result_t result = AAudio_isFlushAllowed(getState());
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ return requestFlush();
+}
+
+aaudio_result_t AudioStream::safeStop() {
+ std::lock_guard<std::mutex> lock(mStreamLock);
+ if (collidesWithCallback()) {
+ ALOGE("stream cannot be stopped from a callback!");
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+
+ switch (getState()) {
+ // Proceed with stopping.
+ case AAUDIO_STREAM_STATE_STARTING:
+ case AAUDIO_STREAM_STATE_STARTED:
+ case AAUDIO_STREAM_STATE_DISCONNECTED:
+ break;
+
+ // Transition from one inactive state to another.
+ case AAUDIO_STREAM_STATE_OPEN:
+ case AAUDIO_STREAM_STATE_PAUSED:
+ case AAUDIO_STREAM_STATE_FLUSHED:
+ setState(AAUDIO_STREAM_STATE_STOPPED);
+ return AAUDIO_OK;
+
+ // Redundant?
+ case AAUDIO_STREAM_STATE_STOPPING:
+ case AAUDIO_STREAM_STATE_STOPPED:
+ return AAUDIO_OK;
+
+ // Don't interfere with transitional states or when closed.
+ case AAUDIO_STREAM_STATE_PAUSING:
+ case AAUDIO_STREAM_STATE_FLUSHING:
+ case AAUDIO_STREAM_STATE_CLOSING:
+ case AAUDIO_STREAM_STATE_CLOSED:
+ default:
+ ALOGW("requestStop() stream not running, state = %s",
+ AAudio_convertStreamStateToText(getState()));
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+
+ return requestStop();
+}
+
+aaudio_result_t AudioStream::safeClose() {
+ std::lock_guard<std::mutex> lock(mStreamLock);
+ if (collidesWithCallback()) {
+ ALOGE("%s cannot be called from a callback!", __func__);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return close();
+}
+
+void AudioStream::setState(aaudio_stream_state_t state) {
+ ALOGV("%s(%p) from %d to %d", __func__, this, mState, state);
+ // CLOSED is a final state
+ if (mState == AAUDIO_STREAM_STATE_CLOSED) {
+ ALOGE("%s(%p) tried to set to %d but already CLOSED", __func__, this, state);
+
+ // Once DISCONNECTED, we can only move to CLOSED state.
+ } else if (mState == AAUDIO_STREAM_STATE_DISCONNECTED
+ && state != AAUDIO_STREAM_STATE_CLOSED) {
+ ALOGE("%s(%p) tried to set to %d but already DISCONNECTED", __func__, this, state);
+
+ } else {
+ mState = state;
+ }
+}
aaudio_result_t AudioStream::waitForStateChange(aaudio_stream_state_t currentState,
aaudio_stream_state_t *nextState,
@@ -163,7 +317,7 @@
void* threadArg)
{
if (mHasThread) {
- ALOGE("AudioStream::createThread() - mHasThread already true");
+ ALOGE("createThread() - mHasThread already true");
return AAUDIO_ERROR_INVALID_STATE;
}
if (threadProc == nullptr) {
@@ -175,8 +329,22 @@
setPeriodNanoseconds(periodNanoseconds);
int err = pthread_create(&mThread, nullptr, AudioStream_internalThreadProc, this);
if (err != 0) {
- return AAudioConvert_androidToAAudioResult(-errno);
+ android::status_t status = -errno;
+ ALOGE("createThread() - pthread_create() failed, %d", status);
+ return AAudioConvert_androidToAAudioResult(status);
} else {
+ // TODO Use AAudioThread or maybe AndroidThread
+ // Name the thread with an increasing index, "AAudio_#", for debugging.
+ static std::atomic<uint32_t> nextThreadIndex{1};
+ char name[16]; // max length for a pthread_name
+ uint32_t index = nextThreadIndex++;
+ // Wrap the index so that we do not hit the 16 char limit
+ // and to avoid hard-to-read large numbers.
+ index = index % 100000; // arbitrary
+ snprintf(name, sizeof(name), "AAudio_%u", index);
+ err = pthread_setname_np(mThread, name);
+ ALOGW_IF((err != 0), "Could not set name of AAudio thread. err = %d", err);
+
mHasThread = true;
return AAUDIO_OK;
}
@@ -185,7 +353,7 @@
aaudio_result_t AudioStream::joinThread(void** returnArg, int64_t timeoutNanoseconds)
{
if (!mHasThread) {
- ALOGE("AudioStream::joinThread() - but has no thread");
+ ALOGE("joinThread() - but has no thread");
return AAUDIO_ERROR_INVALID_STATE;
}
#if 0
@@ -199,6 +367,57 @@
return err ? AAudioConvert_androidToAAudioResult(-errno) : mThreadRegistrationResult;
}
+aaudio_data_callback_result_t AudioStream::maybeCallDataCallback(void *audioData,
+ int32_t numFrames) {
+ aaudio_data_callback_result_t result = AAUDIO_CALLBACK_RESULT_STOP;
+ AAudioStream_dataCallback dataCallback = getDataCallbackProc();
+ if (dataCallback != nullptr) {
+ // Store thread ID of caller to detect stop() and close() calls from callback.
+ pid_t expected = CALLBACK_THREAD_NONE;
+ if (mDataCallbackThread.compare_exchange_strong(expected, gettid())) {
+ result = (*dataCallback)(
+ (AAudioStream *) this,
+ getDataCallbackUserData(),
+ audioData,
+ numFrames);
+ mDataCallbackThread.store(CALLBACK_THREAD_NONE);
+ } else {
+ ALOGW("%s() data callback already running!", __func__);
+ }
+ }
+ return result;
+}
+
+void AudioStream::maybeCallErrorCallback(aaudio_result_t result) {
+ AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
+ if (errorCallback != nullptr) {
+ // Store thread ID of caller to detect stop() and close() calls from callback.
+ pid_t expected = CALLBACK_THREAD_NONE;
+ if (mErrorCallbackThread.compare_exchange_strong(expected, gettid())) {
+ (*errorCallback)(
+ (AAudioStream *) this,
+ getErrorCallbackUserData(),
+ result);
+ mErrorCallbackThread.store(CALLBACK_THREAD_NONE);
+ } else {
+ ALOGW("%s() error callback already running!", __func__);
+ }
+ }
+}
+
+// Is this running on the same thread as a callback?
+// Note: This cannot be implemented using a thread_local because that would
+// require using a thread_local variable that is shared between streams.
+// So a thread_local variable would prevent stopping or closing stream A from
+// a callback on stream B, which is currently legal and not so terrible.
+bool AudioStream::collidesWithCallback() const {
+ pid_t thisThread = gettid();
+ // Compare the current thread ID with the thread ID of the callback
+ // threads to see it they match. If so then this code is being
+ // called from one of the stream callback functions.
+ return ((mErrorCallbackThread.load() == thisThread)
+ || (mDataCallbackThread.load() == thisThread));
+}
#if AAUDIO_USE_VOLUME_SHAPER
android::media::VolumeShaper::Status AudioStream::applyVolumeShaper(
@@ -209,6 +428,12 @@
}
#endif
+void AudioStream::setDuckAndMuteVolume(float duckAndMuteVolume) {
+ ALOGD("%s() to %f", __func__, duckAndMuteVolume);
+ mDuckAndMuteVolume = duckAndMuteVolume;
+ doSetVolume(); // apply this change
+}
+
AudioStream::MyPlayerBase::MyPlayerBase(AudioStream *parent) : mParent(parent) {
}
@@ -230,7 +455,6 @@
}
}
-
void AudioStream::MyPlayerBase::destroy() {
unregisterWithAudioManager();
}
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 34202d2..31b895c 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -39,6 +39,8 @@
class AudioStreamBuilder;
+constexpr pid_t CALLBACK_THREAD_NONE = 0;
+
/**
* AAudio audio stream.
*/
@@ -49,14 +51,46 @@
virtual ~AudioStream();
+ /**
+ * Lock a mutex and make sure we are not calling from a callback function.
+ * @return result of requestStart();
+ */
+ aaudio_result_t safeStart();
+
+ aaudio_result_t safePause();
+
+ aaudio_result_t safeFlush();
+
+ aaudio_result_t safeStop();
+
+ aaudio_result_t safeClose();
// =========== Begin ABSTRACT methods ===========================
+protected:
/* Asynchronous requests.
* Use waitForStateChange() to wait for completion.
*/
virtual aaudio_result_t requestStart() = 0;
+ /**
+ * Check the state to see if Pause if currently legal.
+ *
+ * @param result pointer to return code
+ * @return true if OK to continue, if false then return result
+ */
+ bool checkPauseStateTransition(aaudio_result_t *result);
+
+ virtual bool isFlushSupported() const {
+ // Only implement FLUSH for OUTPUT streams.
+ return false;
+ }
+
+ virtual bool isPauseSupported() const {
+ // Only implement PAUSE for OUTPUT streams.
+ return false;
+ }
+
virtual aaudio_result_t requestPause()
{
// Only implement this for OUTPUT streams.
@@ -70,6 +104,7 @@
virtual aaudio_result_t requestStop() = 0;
+public:
virtual aaudio_result_t getTimestamp(clockid_t clockId,
int64_t *framePosition,
int64_t *timeNanoseconds) = 0;
@@ -81,7 +116,6 @@
*/
virtual aaudio_result_t updateStateMachine() = 0;
-
// =========== End ABSTRACT methods ===========================
virtual aaudio_result_t waitForStateChange(aaudio_stream_state_t currentState,
@@ -188,6 +222,22 @@
virtual aaudio_direction_t getDirection() const = 0;
+ aaudio_usage_t getUsage() const {
+ return mUsage;
+ }
+
+ aaudio_content_type_t getContentType() const {
+ return mContentType;
+ }
+
+ aaudio_input_preset_t getInputPreset() const {
+ return mInputPreset;
+ }
+
+ int32_t getSessionId() const {
+ return mSessionId;
+ }
+
/**
* This is only valid after setSamplesPerFrame() and setFormat() have been called.
*/
@@ -202,6 +252,20 @@
return AAudioConvert_formatToSizeInBytes(mFormat);
}
+ /**
+ * This is only valid after setSamplesPerFrame() and setDeviceFormat() have been called.
+ */
+ int32_t getBytesPerDeviceFrame() const {
+ return mSamplesPerFrame * getBytesPerDeviceSample();
+ }
+
+ /**
+ * This is only valid after setDeviceFormat() has been called.
+ */
+ int32_t getBytesPerDeviceSample() const {
+ return AAudioConvert_formatToSizeInBytes(getDeviceFormat());
+ }
+
virtual int64_t getFramesWritten() = 0;
virtual int64_t getFramesRead() = 0;
@@ -209,13 +273,19 @@
AAudioStream_dataCallback getDataCallbackProc() const {
return mDataCallbackProc;
}
+
AAudioStream_errorCallback getErrorCallbackProc() const {
return mErrorCallbackProc;
}
+ aaudio_data_callback_result_t maybeCallDataCallback(void *audioData, int32_t numFrames);
+
+ void maybeCallErrorCallback(aaudio_result_t result);
+
void *getDataCallbackUserData() const {
return mDataCallbackUserData;
}
+
void *getErrorCallbackUserData() const {
return mErrorCallbackUserData;
}
@@ -224,10 +294,25 @@
return mFramesPerDataCallback;
}
- bool isDataCallbackActive() {
- return (mDataCallbackProc != nullptr) && isActive();
+ /**
+ * @return true if data callback has been specified
+ */
+ bool isDataCallbackSet() const {
+ return mDataCallbackProc != nullptr;
}
+ /**
+ * @return true if data callback has been specified and stream is running
+ */
+ bool isDataCallbackActive() const {
+ return isDataCallbackSet() && isActive();
+ }
+
+ /**
+ * @return true if called from the same thread as the callback
+ */
+ bool collidesWithCallback() const;
+
// ============== I/O ===========================
// A Stream will only implement read() or write() depending on its direction.
virtual aaudio_result_t write(const void *buffer __unused,
@@ -243,12 +328,9 @@
}
// This is used by the AudioManager to duck and mute the stream when changing audio focus.
- void setDuckAndMuteVolume(float duckAndMuteVolume) {
- mDuckAndMuteVolume = duckAndMuteVolume;
- doSetVolume(); // apply this change
- }
+ void setDuckAndMuteVolume(float duckAndMuteVolume);
- float getDuckAndMuteVolume() {
+ float getDuckAndMuteVolume() const {
return mDuckAndMuteVolume;
}
@@ -288,11 +370,13 @@
return mPlayerBase->getResult();
}
+ // Pass pause request through PlayerBase for tracking.
aaudio_result_t systemPause() {
mPlayerBase->pause();
return mPlayerBase->getResult();
}
+ // Pass stop request through PlayerBase for tracking.
aaudio_result_t systemStop() {
mPlayerBase->stop();
return mPlayerBase->getResult();
@@ -331,17 +415,17 @@
android::status_t playerStart() override {
// mParent should NOT be null. So go ahead and crash if it is.
- mResult = mParent->requestStart();
+ mResult = mParent->safeStart();
return AAudioConvert_aaudioToAndroidStatus(mResult);
}
android::status_t playerPause() override {
- mResult = mParent->requestPause();
+ mResult = mParent->safePause();
return AAudioConvert_aaudioToAndroidStatus(mResult);
}
android::status_t playerStop() override {
- mResult = mParent->requestStop();
+ mResult = mParent->safeStop();
return AAudioConvert_aaudioToAndroidStatus(mResult);
}
@@ -371,6 +455,7 @@
/**
* This should not be called after the open() call.
+ * TODO for multiple setters: assert(mState == AAUDIO_STREAM_STATE_UNINITIALIZED)
*/
void setSampleRate(int32_t sampleRate) {
mSampleRate = sampleRate;
@@ -397,15 +482,26 @@
mFormat = format;
}
- void setState(aaudio_stream_state_t state) {
- mState = state;
+ /**
+ * This should not be called after the open() call.
+ */
+ void setDeviceFormat(aaudio_format_t format) {
+ mDeviceFormat = format;
}
+ aaudio_format_t getDeviceFormat() const {
+ return mDeviceFormat;
+ }
+
+ void setState(aaudio_stream_state_t state);
+
void setDeviceId(int32_t deviceId) {
mDeviceId = deviceId;
}
- std::mutex mStreamMutex;
+ void setSessionId(int32_t sessionId) {
+ mSessionId = sessionId;
+ }
std::atomic<bool> mCallbackEnabled{false};
@@ -413,6 +509,21 @@
protected:
+ /**
+ * Either convert the data from device format to app format and return a pointer
+ * to the conversion buffer,
+ * OR just pass back the original pointer.
+ *
+ * Note that this is only used for the INPUT path.
+ *
+ * @param audioData
+ * @param numFrames
+ * @return original pointer or the conversion buffer
+ */
+ virtual const void * maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
+ return audioData;
+ }
+
void setPeriodNanoseconds(int64_t periodNanoseconds) {
mPeriodNanoseconds.store(periodNanoseconds, std::memory_order_release);
}
@@ -421,40 +532,74 @@
return mPeriodNanoseconds.load(std::memory_order_acquire);
}
+ /**
+ * This should not be called after the open() call.
+ */
+ void setUsage(aaudio_usage_t usage) {
+ mUsage = usage;
+ }
+
+ /**
+ * This should not be called after the open() call.
+ */
+ void setContentType(aaudio_content_type_t contentType) {
+ mContentType = contentType;
+ }
+
+ /**
+ * This should not be called after the open() call.
+ */
+ void setInputPreset(aaudio_input_preset_t inputPreset) {
+ mInputPreset = inputPreset;
+ }
+
private:
+
+ std::mutex mStreamLock;
+
const android::sp<MyPlayerBase> mPlayerBase;
// These do not change after open().
- int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
- int32_t mSampleRate = AAUDIO_UNSPECIFIED;
- int32_t mDeviceId = AAUDIO_UNSPECIFIED;
- aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
- bool mSharingModeMatchRequired = false; // must match sharing mode requested
- aaudio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
- aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+ int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+ int32_t mSampleRate = AAUDIO_UNSPECIFIED;
+ int32_t mDeviceId = AAUDIO_UNSPECIFIED;
+ aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ bool mSharingModeMatchRequired = false; // must match sharing mode requested
+ aaudio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+ aaudio_performance_mode_t mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
- aaudio_performance_mode_t mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
+ aaudio_usage_t mUsage = AAUDIO_UNSPECIFIED;
+ aaudio_content_type_t mContentType = AAUDIO_UNSPECIFIED;
+ aaudio_input_preset_t mInputPreset = AAUDIO_UNSPECIFIED;
+
+ int32_t mSessionId = AAUDIO_UNSPECIFIED;
+
+ // Sometimes the hardware is operating with a different format from the app.
+ // Then we require conversion in AAudio.
+ aaudio_format_t mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
// callback ----------------------------------
AAudioStream_dataCallback mDataCallbackProc = nullptr; // external callback functions
void *mDataCallbackUserData = nullptr;
int32_t mFramesPerDataCallback = AAUDIO_UNSPECIFIED; // frames
+ std::atomic<pid_t> mDataCallbackThread{CALLBACK_THREAD_NONE};
AAudioStream_errorCallback mErrorCallbackProc = nullptr;
void *mErrorCallbackUserData = nullptr;
+ std::atomic<pid_t> mErrorCallbackThread{CALLBACK_THREAD_NONE};
// background thread ----------------------------------
- bool mHasThread = false;
- pthread_t mThread; // initialized in constructor
+ bool mHasThread = false;
+ pthread_t mThread; // initialized in constructor
// These are set by the application thread and then read by the audio pthread.
- std::atomic<int64_t> mPeriodNanoseconds; // for tuning SCHED_FIFO threads
+ std::atomic<int64_t> mPeriodNanoseconds; // for tuning SCHED_FIFO threads
// TODO make atomic?
- aaudio_audio_thread_proc_t mThreadProc = nullptr;
- void* mThreadArg = nullptr;
- aaudio_result_t mThreadRegistrationResult = AAUDIO_OK;
-
+ aaudio_audio_thread_proc_t mThreadProc = nullptr;
+ void *mThreadArg = nullptr;
+ aaudio_result_t mThreadRegistrationResult = AAUDIO_OK;
};
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 09ebb3e..3a7a578 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "AAudio"
+#define LOG_TAG "AudioStreamBuilder"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -87,7 +87,7 @@
break;
default:
- ALOGE("AudioStreamBuilder(): bad direction = %d", direction);
+ ALOGE("%s() bad direction = %d", __func__, direction);
result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
}
return result;
@@ -99,7 +99,7 @@
aaudio_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
AudioStream *audioStream = nullptr;
if (streamPtr == nullptr) {
- ALOGE("AudioStreamBuilder::build() streamPtr is null");
+ ALOGE("%s() streamPtr is null", __func__);
return AAUDIO_ERROR_NULL;
}
*streamPtr = nullptr;
@@ -124,13 +124,11 @@
if (mapExclusivePolicy == AAUDIO_UNSPECIFIED) {
mapExclusivePolicy = AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT;
}
- ALOGD("AudioStreamBuilder(): mmapPolicy = %d, mapExclusivePolicy = %d",
- mmapPolicy, mapExclusivePolicy);
aaudio_sharing_mode_t sharingMode = getSharingMode();
if ((sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE)
&& (mapExclusivePolicy == AAUDIO_POLICY_NEVER)) {
- ALOGW("AudioStreamBuilder(): EXCLUSIVE sharing mode not supported. Use SHARED.");
+ ALOGD("%s() EXCLUSIVE sharing mode not supported. Use SHARED.", __func__);
sharingMode = AAUDIO_SHARING_MODE_SHARED;
setSharingMode(sharingMode);
}
@@ -141,6 +139,14 @@
// TODO Support other performance settings in MMAP mode.
// Disable MMAP if low latency not requested.
if (getPerformanceMode() != AAUDIO_PERFORMANCE_MODE_LOW_LATENCY) {
+ ALOGD("%s() MMAP not available because AAUDIO_PERFORMANCE_MODE_LOW_LATENCY not used.",
+ __func__);
+ allowMMap = false;
+ }
+
+ // SessionID and Effects are only supported in Legacy mode.
+ if (getSessionId() != AAUDIO_SESSION_ID_NONE) {
+ ALOGD("%s() MMAP not available because sessionId used.", __func__);
allowMMap = false;
}
@@ -156,7 +162,7 @@
audioStream = nullptr;
if (isMMap && allowLegacy) {
- ALOGD("AudioStreamBuilder.build() MMAP stream did not open so try Legacy path");
+ ALOGV("%s() MMAP stream did not open so try Legacy path", __func__);
// If MMAP stream failed to open then TRY using a legacy stream.
result = builder_createStream(getDirection(), sharingMode,
false, &audioStream);
@@ -190,7 +196,7 @@
case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
break;
default:
- ALOGE("AudioStreamBuilder: illegal performanceMode = %d", mPerformanceMode);
+ ALOGE("illegal performanceMode = %d", mPerformanceMode);
return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
// break;
}
@@ -199,7 +205,7 @@
if (mFramesPerDataCallback != AAUDIO_UNSPECIFIED
&& (mFramesPerDataCallback < FRAMES_PER_DATA_CALLBACK_MIN
|| mFramesPerDataCallback > FRAMES_PER_DATA_CALLBACK_MAX)) {
- ALOGE("AudioStreamBuilder: framesPerDataCallback out of range = %d",
+ ALOGE("framesPerDataCallback out of range = %d",
mFramesPerDataCallback);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
diff --git a/media/libaaudio/src/fifo/FifoBuffer.cpp b/media/libaaudio/src/fifo/FifoBuffer.cpp
index a869886..b09258e 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.cpp
+++ b/media/libaaudio/src/fifo/FifoBuffer.cpp
@@ -22,6 +22,8 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#include <algorithm>
+
#include "FifoControllerBase.h"
#include "FifoController.h"
#include "FifoControllerIndirect.h"
@@ -43,7 +45,7 @@
int32_t bytesPerBuffer = bytesPerFrame * capacityInFrames;
mStorage = new uint8_t[bytesPerBuffer];
mStorageOwned = true;
- ALOGD("FifoBuffer: capacityInFrames = %d, bytesPerFrame = %d",
+ ALOGV("capacityInFrames = %d, bytesPerFrame = %d",
capacityInFrames, bytesPerFrame);
}
@@ -85,15 +87,14 @@
wrappingBuffer->data[1] = nullptr;
wrappingBuffer->numFrames[1] = 0;
if (framesAvailable > 0) {
-
uint8_t *source = &mStorage[convertFramesToBytes(startIndex)];
// Does the available data cross the end of the FIFO?
if ((startIndex + framesAvailable) > mFrameCapacity) {
wrappingBuffer->data[0] = source;
- wrappingBuffer->numFrames[0] = mFrameCapacity - startIndex;
+ fifo_frames_t firstFrames = mFrameCapacity - startIndex;
+ wrappingBuffer->numFrames[0] = firstFrames;
wrappingBuffer->data[1] = &mStorage[0];
- wrappingBuffer->numFrames[1] = mFrameCapacity - startIndex;
-
+ wrappingBuffer->numFrames[1] = framesAvailable - firstFrames;
} else {
wrappingBuffer->data[0] = source;
wrappingBuffer->numFrames[0] = framesAvailable;
@@ -102,18 +103,19 @@
wrappingBuffer->data[0] = nullptr;
wrappingBuffer->numFrames[0] = 0;
}
-
}
fifo_frames_t FifoBuffer::getFullDataAvailable(WrappingBuffer *wrappingBuffer) {
- fifo_frames_t framesAvailable = mFifo->getFullFramesAvailable();
+ // The FIFO might be overfull so clip to capacity.
+ fifo_frames_t framesAvailable = std::min(mFifo->getFullFramesAvailable(), mFrameCapacity);
fifo_frames_t startIndex = mFifo->getReadIndex();
fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
return framesAvailable;
}
fifo_frames_t FifoBuffer::getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer) {
- fifo_frames_t framesAvailable = mFifo->getEmptyFramesAvailable();
+ // The FIFO might have underrun so clip to capacity.
+ fifo_frames_t framesAvailable = std::min(mFifo->getEmptyFramesAvailable(), mFrameCapacity);
fifo_frames_t startIndex = mFifo->getWriteIndex();
fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
return framesAvailable;
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index ee2504d..a6b9f5d 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -19,10 +19,12 @@
#include <utils/Log.h>
#include <stdint.h>
-#include <utils/String16.h>
+
+#include <aaudio/AAudio.h>
+#include <audio_utils/primitives.h>
#include <media/AudioTrack.h>
#include <media/AudioTimestamp.h>
-#include <aaudio/AAudio.h>
+#include <utils/String16.h>
#include "core/AudioStream.h"
#include "legacy/AudioStreamLegacy.h"
@@ -48,19 +50,17 @@
return AudioStreamLegacy_callback;
}
-int32_t AudioStreamLegacy::callDataCallbackFrames(uint8_t *buffer, int32_t numFrames) {
+aaudio_data_callback_result_t AudioStreamLegacy::callDataCallbackFrames(uint8_t *buffer,
+ int32_t numFrames) {
+ void *finalAudioData = buffer;
if (getDirection() == AAUDIO_DIRECTION_INPUT) {
// Increment before because we already got the data from the device.
incrementFramesRead(numFrames);
+ finalAudioData = (void *) maybeConvertDeviceData(buffer, numFrames);
}
// Call using the AAudio callback interface.
- AAudioStream_dataCallback appCallback = getDataCallbackProc();
- aaudio_data_callback_result_t callbackResult = (*appCallback)(
- (AAudioStream *) this,
- getDataCallbackUserData(),
- buffer,
- numFrames);
+ aaudio_data_callback_result_t callbackResult = maybeCallDataCallback(finalAudioData, numFrames);
if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE
&& getDirection() == AAUDIO_DIRECTION_OUTPUT) {
@@ -72,31 +72,40 @@
// Implement FixedBlockProcessor
int32_t AudioStreamLegacy::onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) {
- int32_t numFrames = numBytes / getBytesPerFrame();
- return callDataCallbackFrames(buffer, numFrames);
+ int32_t numFrames = numBytes / getBytesPerDeviceFrame();
+ return (int32_t) callDataCallbackFrames(buffer, numFrames);
}
void AudioStreamLegacy::processCallbackCommon(aaudio_callback_operation_t opcode, void *info) {
aaudio_data_callback_result_t callbackResult;
+ // This illegal size can be used to tell AudioFlinger to stop calling us.
+ // This takes advantage of AudioFlinger killing the stream.
+ // TODO add to API in AudioRecord and AudioTrack
+ const size_t SIZE_STOP_CALLBACKS = SIZE_MAX;
switch (opcode) {
case AAUDIO_CALLBACK_OPERATION_PROCESS_DATA: {
- checkForDisconnectRequest();
+ (void) checkForDisconnectRequest(true);
// Note that this code assumes an AudioTrack::Buffer is the same as
// AudioRecord::Buffer
// TODO define our own AudioBuffer and pass it from the subclasses.
AudioTrack::Buffer *audioBuffer = static_cast<AudioTrack::Buffer *>(info);
- if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED || !mCallbackEnabled.load()) {
- audioBuffer->size = 0; // silence the buffer
+ if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+ ALOGW("processCallbackCommon() data, stream disconnected");
+ audioBuffer->size = SIZE_STOP_CALLBACKS;
+ } else if (!mCallbackEnabled.load()) {
+ ALOGW("processCallbackCommon() stopping because callback disabled");
+ audioBuffer->size = SIZE_STOP_CALLBACKS;
} else {
if (audioBuffer->frameCount == 0) {
+ ALOGW("processCallbackCommon() data, frameCount is zero");
return;
}
// If the caller specified an exact size then use a block size adapter.
if (mBlockAdapter != nullptr) {
- int32_t byteCount = audioBuffer->frameCount * getBytesPerFrame();
+ int32_t byteCount = audioBuffer->frameCount * getBytesPerDeviceFrame();
callbackResult = mBlockAdapter->processVariableBlock(
(uint8_t *) audioBuffer->raw, byteCount);
} else {
@@ -105,9 +114,12 @@
audioBuffer->frameCount);
}
if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
- audioBuffer->size = audioBuffer->frameCount * getBytesPerFrame();
- } else {
- audioBuffer->size = 0;
+ audioBuffer->size = audioBuffer->frameCount * getBytesPerDeviceFrame();
+ } else { // STOP or invalid result
+ ALOGW("%s() callback requested stop, fake an error", __func__);
+ audioBuffer->size = SIZE_STOP_CALLBACKS;
+ // Disable the callback just in case AudioFlinger keeps trying to call us.
+ mCallbackEnabled.store(false);
}
if (updateStateMachine() != AAUDIO_OK) {
@@ -130,26 +142,23 @@
}
}
-
-
-void AudioStreamLegacy::checkForDisconnectRequest() {
+aaudio_result_t AudioStreamLegacy::checkForDisconnectRequest(bool errorCallbackEnabled) {
if (mRequestDisconnect.isRequested()) {
ALOGD("checkForDisconnectRequest() mRequestDisconnect acknowledged");
- forceDisconnect();
+ forceDisconnect(errorCallbackEnabled);
mRequestDisconnect.acknowledge();
mCallbackEnabled.store(false);
+ return AAUDIO_ERROR_DISCONNECTED;
+ } else {
+ return AAUDIO_OK;
}
}
-void AudioStreamLegacy::forceDisconnect() {
+void AudioStreamLegacy::forceDisconnect(bool errorCallbackEnabled) {
if (getState() != AAUDIO_STREAM_STATE_DISCONNECTED) {
setState(AAUDIO_STREAM_STATE_DISCONNECTED);
- if (getErrorCallbackProc() != nullptr) {
- (*getErrorCallbackProc())(
- (AAudioStream *) this,
- getErrorCallbackUserData(),
- AAUDIO_ERROR_DISCONNECTED
- );
+ if (errorCallbackEnabled) {
+ maybeCallErrorCallback(AAUDIO_ERROR_DISCONNECTED);
}
}
}
@@ -175,19 +184,17 @@
int64_t localPosition;
status_t status = extendedTimestamp->getBestTimestamp(&localPosition, timeNanoseconds,
timebase, &location);
- // use MonotonicCounter to prevent retrograde motion.
- mTimestampPosition.update32((int32_t)localPosition);
- *framePosition = mTimestampPosition.get();
+ if (status == OK) {
+ // use MonotonicCounter to prevent retrograde motion.
+ mTimestampPosition.update32((int32_t) localPosition);
+ *framePosition = mTimestampPosition.get();
+ }
// ALOGD("getBestTimestamp() fposition: server = %6lld, kernel = %6lld, location = %d",
// (long long) extendedTimestamp->mPosition[ExtendedTimestamp::Location::LOCATION_SERVER],
// (long long) extendedTimestamp->mPosition[ExtendedTimestamp::Location::LOCATION_KERNEL],
// (int)location);
- if (status == WOULD_BLOCK) {
- return AAUDIO_ERROR_INVALID_STATE;
- } else {
- return AAudioConvert_androidToAAudioResult(status);
- }
+ return AAudioConvert_androidToAAudioResult(status);
}
void AudioStreamLegacy::onAudioDeviceUpdate(audio_port_handle_t deviceId)
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
index 7e28579..494edbc 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.h
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -112,12 +112,14 @@
void onAudioDeviceUpdate(audio_port_handle_t deviceId);
- void checkForDisconnectRequest();
+ /*
+ * Check to see whether a callback thread has requested a disconnected.
+ * @param errorCallbackEnabled set true to call errorCallback on disconnect
+ * @return AAUDIO_OK or AAUDIO_ERROR_DISCONNECTED
+ */
+ aaudio_result_t checkForDisconnectRequest(bool errorCallbackEnabled);
- void forceDisconnect();
-
- void onStart() { mCallbackEnabled.store(true); }
- void onStop() { mCallbackEnabled.store(false); }
+ void forceDisconnect(bool errorCallbackEnabled = true);
int64_t incrementFramesWritten(int32_t frames) {
return mFramesWritten.increment(frames);
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index bc6e60c..505f2ee 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -19,13 +19,15 @@
#include <utils/Log.h>
#include <stdint.h>
-#include <utils/String16.h>
-#include <media/AudioRecord.h>
-#include <aaudio/AAudio.h>
-#include "AudioClock.h"
+#include <aaudio/AAudio.h>
+#include <audio_utils/primitives.h>
+#include <media/AudioRecord.h>
+#include <utils/String16.h>
+
#include "legacy/AudioStreamLegacy.h"
#include "legacy/AudioStreamRecord.h"
+#include "utility/AudioClock.h"
#include "utility/FixedBlockWriter.h"
using namespace android;
@@ -63,10 +65,6 @@
size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
: builder.getBufferCapacity();
- // TODO implement an unspecified Android format then use that.
- audio_format_t format = (getFormat() == AAUDIO_FORMAT_UNSPECIFIED)
- ? AUDIO_FORMAT_PCM_FLOAT
- : AAudioConvert_aaudioToAndroidDataFormat(getFormat());
audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE;
aaudio_performance_mode_t perfMode = getPerformanceMode();
@@ -82,6 +80,35 @@
break;
}
+ // Preserve behavior of API 26
+ if (getFormat() == AAUDIO_FORMAT_UNSPECIFIED) {
+ setFormat(AAUDIO_FORMAT_PCM_FLOAT);
+ }
+
+ // Maybe change device format to get a FAST path.
+ // AudioRecord does not support FAST mode for FLOAT data.
+ // TODO AudioRecord should allow FLOAT data paths for FAST tracks.
+ // So IF the user asks for low latency FLOAT
+ // AND the sampleRate is likely to be compatible with FAST
+ // THEN request I16 and convert to FLOAT when passing to user.
+ // Note that hard coding 48000 Hz is not ideal because the sampleRate
+ // for a FAST path might not be 48000 Hz.
+ // It normally is but there is a chance that it is not.
+ // And there is no reliable way to know that in advance.
+ // Luckily the consequences of a wrong guess are minor.
+ // We just may not get a FAST track.
+ // But we wouldn't have anyway without this hack.
+ constexpr int32_t kMostLikelySampleRateForFast = 48000;
+ if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT
+ && perfMode == AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
+ && (samplesPerFrame <= 2) // FAST only for mono and stereo
+ && (getSampleRate() == kMostLikelySampleRateForFast
+ || getSampleRate() == AAUDIO_UNSPECIFIED)) {
+ setDeviceFormat(AAUDIO_FORMAT_PCM_I16);
+ } else {
+ setDeviceFormat(getFormat());
+ }
+
uint32_t notificationFrames = 0;
// Setup the callback if there is one.
@@ -96,47 +123,84 @@
}
mCallbackBufferSize = builder.getFramesPerDataCallback();
- ALOGD("AudioStreamRecord::open(), request notificationFrames = %u, frameCount = %u",
- notificationFrames, (uint)frameCount);
- mAudioRecord = new AudioRecord(
- mOpPackageName // const String16& opPackageName TODO does not compile
- );
- if (getDeviceId() != AAUDIO_UNSPECIFIED) {
- mAudioRecord->setInputDevice(getDeviceId());
- }
- mAudioRecord->set(
- AUDIO_SOURCE_VOICE_RECOGNITION,
- getSampleRate(),
- format,
- channelMask,
- frameCount,
- callback,
- callbackData,
- notificationFrames,
- false /*threadCanCallJava*/,
- AUDIO_SESSION_ALLOCATE,
- streamTransferType,
- flags
- // int uid = -1,
- // pid_t pid = -1,
- // const audio_attributes_t* pAttributes = nullptr
- );
+ // Don't call mAudioRecord->setInputDevice() because it will be overwritten by set()!
+ audio_port_handle_t selectedDeviceId = (getDeviceId() == AAUDIO_UNSPECIFIED)
+ ? AUDIO_PORT_HANDLE_NONE
+ : getDeviceId();
- // Did we get a valid track?
- status_t status = mAudioRecord->initCheck();
- if (status != OK) {
- close();
- ALOGE("AudioStreamRecord::open(), initCheck() returned %d", status);
- return AAudioConvert_androidToAAudioResult(status);
+ const audio_content_type_t contentType =
+ AAudioConvert_contentTypeToInternal(builder.getContentType());
+ const audio_source_t source =
+ AAudioConvert_inputPresetToAudioSource(builder.getInputPreset());
+
+ const audio_attributes_t attributes = {
+ .content_type = contentType,
+ .usage = AUDIO_USAGE_UNKNOWN, // only used for output
+ .source = source,
+ .flags = AUDIO_FLAG_NONE, // Different than the AUDIO_INPUT_FLAGS
+ .tags = ""
+ };
+
+ aaudio_session_id_t requestedSessionId = builder.getSessionId();
+ audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
+
+ // ----------- open the AudioRecord ---------------------
+ // Might retry, but never more than once.
+ for (int i = 0; i < 2; i ++) {
+ audio_format_t requestedInternalFormat =
+ AAudioConvert_aaudioToAndroidDataFormat(getDeviceFormat());
+
+ mAudioRecord = new AudioRecord(
+ mOpPackageName // const String16& opPackageName TODO does not compile
+ );
+ mAudioRecord->set(
+ AUDIO_SOURCE_DEFAULT, // ignored because we pass attributes below
+ getSampleRate(),
+ requestedInternalFormat,
+ channelMask,
+ frameCount,
+ callback,
+ callbackData,
+ notificationFrames,
+ false /*threadCanCallJava*/,
+ sessionId,
+ streamTransferType,
+ flags,
+ AUDIO_UID_INVALID, // DEFAULT uid
+ -1, // DEFAULT pid
+ &attributes,
+ selectedDeviceId
+ );
+
+ // Did we get a valid track?
+ status_t status = mAudioRecord->initCheck();
+ if (status != OK) {
+ close();
+ ALOGE("open(), initCheck() returned %d", status);
+ return AAudioConvert_androidToAAudioResult(status);
+ }
+
+ // Check to see if it was worth hacking the deviceFormat.
+ bool gotFastPath = (mAudioRecord->getFlags() & AUDIO_INPUT_FLAG_FAST)
+ == AUDIO_INPUT_FLAG_FAST;
+ if (getFormat() != getDeviceFormat() && !gotFastPath) {
+ // We tried to get a FAST path by switching the device format.
+ // But it didn't work. So we might as well reopen using the same
+ // format for device and for app.
+ ALOGD("%s() used a different device format but no FAST path, reopen", __func__);
+ mAudioRecord.clear();
+ setDeviceFormat(getFormat());
+ } else {
+ break; // Keep the one we just opened.
+ }
}
// Get the actual values from the AudioRecord.
setSamplesPerFrame(mAudioRecord->channelCount());
- setFormat(AAudioConvert_androidToAAudioDataFormat(mAudioRecord->format()));
int32_t actualSampleRate = mAudioRecord->getSampleRate();
ALOGW_IF(actualSampleRate != getSampleRate(),
- "AudioStreamRecord::open() sampleRate changed from %d to %d",
+ "open() sampleRate changed from %d to %d",
getSampleRate(), actualSampleRate);
setSampleRate(actualSampleRate);
@@ -149,6 +213,29 @@
mBlockAdapter = nullptr;
}
+ // Allocate format conversion buffer if needed.
+ if (getDeviceFormat() == AAUDIO_FORMAT_PCM_I16
+ && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
+
+ if (builder.getDataCallbackProc() != nullptr) {
+ // If we have a callback then we need to convert the data into an internal float
+ // array and then pass that entire array to the app.
+ mFormatConversionBufferSizeInFrames =
+ (mCallbackBufferSize != AAUDIO_UNSPECIFIED)
+ ? mCallbackBufferSize : getFramesPerBurst();
+ int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
+ mFormatConversionBufferFloat = std::make_unique<float[]>(numSamples);
+ } else {
+ // If we don't have a callback then we will read into an internal short array
+ // and then convert into the app float array in read().
+ mFormatConversionBufferSizeInFrames = getFramesPerBurst();
+ int32_t numSamples = mFormatConversionBufferSizeInFrames * getSamplesPerFrame();
+ mFormatConversionBufferI16 = std::make_unique<int16_t[]>(numSamples);
+ }
+ ALOGD("%s() setup I16>FLOAT conversion buffer with %d frames",
+ __func__, mFormatConversionBufferSizeInFrames);
+ }
+
// Update performance mode based on the actual stream.
// For example, if the sample rate does not match native then you won't get a FAST track.
audio_input_flags_t actualFlags = mAudioRecord->getFlags();
@@ -164,14 +251,21 @@
// Log warning if we did not get what we asked for.
ALOGW_IF(actualFlags != flags,
- "AudioStreamRecord::open() flags changed from 0x%08X to 0x%08X",
+ "open() flags changed from 0x%08X to 0x%08X",
flags, actualFlags);
ALOGW_IF(actualPerformanceMode != perfMode,
- "AudioStreamRecord::open() perfMode changed from %d to %d",
+ "open() perfMode changed from %d to %d",
perfMode, actualPerformanceMode);
setState(AAUDIO_STREAM_STATE_OPEN);
setDeviceId(mAudioRecord->getRoutedDeviceId());
+
+ aaudio_session_id_t actualSessionId =
+ (requestedSessionId == AAUDIO_SESSION_ID_NONE)
+ ? AAUDIO_SESSION_ID_NONE
+ : (aaudio_session_id_t) mAudioRecord->getSessionId();
+ setSessionId(actualSessionId);
+
mAudioRecord->addAudioDeviceCallback(mDeviceCallback);
return AAUDIO_OK;
@@ -189,6 +283,24 @@
return AudioStream::close();
}
+const void * AudioStreamRecord::maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
+ if (mFormatConversionBufferFloat.get() != nullptr) {
+ LOG_ALWAYS_FATAL_IF(numFrames > mFormatConversionBufferSizeInFrames,
+ "%s() conversion size %d too large for buffer %d",
+ __func__, numFrames, mFormatConversionBufferSizeInFrames);
+
+ int32_t numSamples = numFrames * getSamplesPerFrame();
+ // Only conversion supported is I16 to FLOAT
+ memcpy_to_float_from_i16(
+ mFormatConversionBufferFloat.get(),
+ (const int16_t *) audioData,
+ numSamples);
+ return mFormatConversionBufferFloat.get();
+ } else {
+ return audioData;
+ }
+}
+
void AudioStreamRecord::processCallback(int event, void *info) {
switch (event) {
case AudioRecord::EVENT_MORE_DATA:
@@ -217,11 +329,13 @@
return AAudioConvert_androidToAAudioResult(err);
}
+ // Enable callback before starting AudioTrack to avoid shutting
+ // down because of a race condition.
+ mCallbackEnabled.store(true);
err = mAudioRecord->start();
if (err != OK) {
return AAudioConvert_androidToAAudioResult(err);
} else {
- onStart();
setState(AAUDIO_STREAM_STATE_STARTING);
}
return AAUDIO_OK;
@@ -231,15 +345,16 @@
if (mAudioRecord.get() == nullptr) {
return AAUDIO_ERROR_INVALID_STATE;
}
- onStop();
setState(AAUDIO_STREAM_STATE_STOPPING);
incrementFramesWritten(getFramesRead() - getFramesWritten()); // TODO review
mTimestampPosition.set(getFramesRead());
mAudioRecord->stop();
- mFramesRead.reset32();
+ mCallbackEnabled.store(false);
+ mFramesWritten.reset32(); // service writes frames, service position reset on flush
mTimestampPosition.reset32();
- checkForDisconnectRequest();
- return AAUDIO_OK;
+ // Pass false to prevent errorCallback from being called after disconnect
+ // when app has already requested a stop().
+ return checkForDisconnectRequest(false);
}
aaudio_result_t AudioStreamRecord::updateStateMachine()
@@ -272,9 +387,10 @@
int32_t numFrames,
int64_t timeoutNanoseconds)
{
- int32_t bytesPerFrame = getBytesPerFrame();
+ int32_t bytesPerDeviceFrame = getBytesPerDeviceFrame();
int32_t numBytes;
- aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
+ // This will detect out of range values for numFrames.
+ aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerDeviceFrame, &numBytes);
if (result != AAUDIO_OK) {
return result;
}
@@ -285,19 +401,49 @@
// TODO add timeout to AudioRecord
bool blocking = (timeoutNanoseconds > 0);
- ssize_t bytesRead = mAudioRecord->read(buffer, numBytes, blocking);
- if (bytesRead == WOULD_BLOCK) {
+
+ ssize_t bytesActuallyRead = 0;
+ ssize_t totalBytesRead = 0;
+ if (mFormatConversionBufferI16.get() != nullptr) {
+ // Convert I16 data to float using an intermediate buffer.
+ float *floatBuffer = (float *) buffer;
+ int32_t framesLeft = numFrames;
+ // Perform conversion using multiple read()s if necessary.
+ while (framesLeft > 0) {
+ // Read into short internal buffer.
+ int32_t framesToRead = std::min(framesLeft, mFormatConversionBufferSizeInFrames);
+ size_t bytesToRead = framesToRead * bytesPerDeviceFrame;
+ bytesActuallyRead = mAudioRecord->read(mFormatConversionBufferI16.get(), bytesToRead, blocking);
+ if (bytesActuallyRead <= 0) {
+ break;
+ }
+ totalBytesRead += bytesActuallyRead;
+ int32_t framesToConvert = bytesActuallyRead / bytesPerDeviceFrame;
+ // Convert into app float buffer.
+ size_t numSamples = framesToConvert * getSamplesPerFrame();
+ memcpy_to_float_from_i16(
+ floatBuffer,
+ mFormatConversionBufferI16.get(),
+ numSamples);
+ floatBuffer += numSamples;
+ framesLeft -= framesToConvert;
+ }
+ } else {
+ bytesActuallyRead = mAudioRecord->read(buffer, numBytes, blocking);
+ totalBytesRead = bytesActuallyRead;
+ }
+ if (bytesActuallyRead == WOULD_BLOCK) {
return 0;
- } else if (bytesRead < 0) {
- // in this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
- // AudioRecord invalidation
- if (bytesRead == DEAD_OBJECT) {
+ } else if (bytesActuallyRead < 0) {
+ // In this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
+ // AudioRecord invalidation.
+ if (bytesActuallyRead == DEAD_OBJECT) {
setState(AAUDIO_STREAM_STATE_DISCONNECTED);
return AAUDIO_ERROR_DISCONNECTED;
}
- return AAudioConvert_androidToAAudioResult(bytesRead);
+ return AAudioConvert_androidToAAudioResult(bytesActuallyRead);
}
- int32_t framesRead = (int32_t)(bytesRead / bytesPerFrame);
+ int32_t framesRead = (int32_t)(totalBytesRead / bytesPerDeviceFrame);
incrementFramesRead(framesRead);
result = updateStateMachine();
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index c1723ba..2f41d34 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -76,6 +76,8 @@
return incrementFramesRead(frames);
}
+ const void * maybeConvertDeviceData(const void *audioData, int32_t numFrames) override;
+
private:
android::sp<android::AudioRecord> mAudioRecord;
// adapts between variable sized blocks and fixed size blocks
@@ -83,6 +85,11 @@
// TODO add 64-bit position reporting to AudioRecord and use it.
android::String16 mOpPackageName;
+
+ // Only one type of conversion buffer is used.
+ std::unique_ptr<float[]> mFormatConversionBufferFloat;
+ std::unique_ptr<int16_t[]> mFormatConversionBufferI16;
+ int32_t mFormatConversionBufferSizeInFrames = 0;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 0e9aaef..505cd77 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -22,6 +22,7 @@
#include <media/AudioTrack.h>
#include <aaudio/AAudio.h>
+#include <system/audio.h>
#include "utility/AudioClock.h"
#include "legacy/AudioStreamLegacy.h"
#include "legacy/AudioStreamTrack.h"
@@ -113,14 +114,35 @@
}
mCallbackBufferSize = builder.getFramesPerDataCallback();
- ALOGD("AudioStreamTrack::open(), request notificationFrames = %d, frameCount = %u",
+ ALOGD("open(), request notificationFrames = %d, frameCount = %u",
notificationFrames, (uint)frameCount);
- mAudioTrack = new AudioTrack(); // TODO review
- if (getDeviceId() != AAUDIO_UNSPECIFIED) {
- mAudioTrack->setOutputDevice(getDeviceId());
- }
+
+ // Don't call mAudioTrack->setDeviceId() because it will be overwritten by set()!
+ audio_port_handle_t selectedDeviceId = (getDeviceId() == AAUDIO_UNSPECIFIED)
+ ? AUDIO_PORT_HANDLE_NONE
+ : getDeviceId();
+
+ const audio_content_type_t contentType =
+ AAudioConvert_contentTypeToInternal(builder.getContentType());
+ const audio_usage_t usage =
+ AAudioConvert_usageToInternal(builder.getUsage());
+
+ const audio_attributes_t attributes = {
+ .content_type = contentType,
+ .usage = usage,
+ .source = AUDIO_SOURCE_DEFAULT, // only used for recording
+ .flags = AUDIO_FLAG_NONE, // Different than the AUDIO_OUTPUT_FLAGS
+ .tags = ""
+ };
+
+ static_assert(AAUDIO_UNSPECIFIED == AUDIO_SESSION_ALLOCATE, "Session IDs should match");
+
+ aaudio_session_id_t requestedSessionId = builder.getSessionId();
+ audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
+
+ mAudioTrack = new AudioTrack();
mAudioTrack->set(
- (audio_stream_type_t) AUDIO_STREAM_MUSIC,
+ AUDIO_STREAM_DEFAULT, // ignored because we pass attributes below
getSampleRate(),
format,
channelMask,
@@ -129,17 +151,26 @@
callback,
callbackData,
notificationFrames,
- 0 /*sharedBuffer*/,
- false /*threadCanCallJava*/,
- AUDIO_SESSION_ALLOCATE,
- streamTransferType
- );
+ 0, // DEFAULT sharedBuffer*/,
+ false, // DEFAULT threadCanCallJava
+ sessionId,
+ streamTransferType,
+ NULL, // DEFAULT audio_offload_info_t
+ AUDIO_UID_INVALID, // DEFAULT uid
+ -1, // DEFAULT pid
+ &attributes,
+ // WARNING - If doNotReconnect set true then audio stops after plugging and unplugging
+ // headphones a few times.
+ false, // DEFAULT doNotReconnect,
+ 1.0f, // DEFAULT maxRequiredSpeed
+ selectedDeviceId
+ );
// Did we get a valid track?
status_t status = mAudioTrack->initCheck();
if (status != NO_ERROR) {
close();
- ALOGE("AudioStreamTrack::open(), initCheck() returned %d", status);
+ ALOGE("open(), initCheck() returned %d", status);
return AAudioConvert_androidToAAudioResult(status);
}
@@ -150,10 +181,11 @@
aaudio_format_t aaudioFormat =
AAudioConvert_androidToAAudioDataFormat(mAudioTrack->format());
setFormat(aaudioFormat);
+ setDeviceFormat(aaudioFormat);
int32_t actualSampleRate = mAudioTrack->getSampleRate();
ALOGW_IF(actualSampleRate != getSampleRate(),
- "AudioStreamTrack::open() sampleRate changed from %d to %d",
+ "open() sampleRate changed from %d to %d",
getSampleRate(), actualSampleRate);
setSampleRate(actualSampleRate);
@@ -168,6 +200,13 @@
setState(AAUDIO_STREAM_STATE_OPEN);
setDeviceId(mAudioTrack->getRoutedDeviceId());
+
+ aaudio_session_id_t actualSessionId =
+ (requestedSessionId == AAUDIO_SESSION_ID_NONE)
+ ? AAUDIO_SESSION_ID_NONE
+ : (aaudio_session_id_t) mAudioTrack->getSessionId();
+ setSessionId(actualSessionId);
+
mAudioTrack->addAudioDeviceCallback(mDeviceCallback);
// Update performance mode based on the actual stream flags.
@@ -186,10 +225,10 @@
// Log warning if we did not get what we asked for.
ALOGW_IF(actualFlags != flags,
- "AudioStreamTrack::open() flags changed from 0x%08X to 0x%08X",
+ "open() flags changed from 0x%08X to 0x%08X",
flags, actualFlags);
ALOGW_IF(actualPerformanceMode != perfMode,
- "AudioStreamTrack::open() perfMode changed from %d to %d",
+ "open() perfMode changed from %d to %d",
perfMode, actualPerformanceMode);
return AAUDIO_OK;
@@ -224,10 +263,8 @@
}
aaudio_result_t AudioStreamTrack::requestStart() {
- std::lock_guard<std::mutex> lock(mStreamMutex);
-
if (mAudioTrack.get() == nullptr) {
- ALOGE("AudioStreamTrack::requestStart() no AudioTrack");
+ ALOGE("requestStart() no AudioTrack");
return AAUDIO_ERROR_INVALID_STATE;
}
// Get current position so we can detect when the track is playing.
@@ -236,73 +273,62 @@
return AAudioConvert_androidToAAudioResult(err);
}
+ // Enable callback before starting AudioTrack to avoid shutting
+ // down because of a race condition.
+ mCallbackEnabled.store(true);
err = mAudioTrack->start();
if (err != OK) {
return AAudioConvert_androidToAAudioResult(err);
} else {
- onStart();
setState(AAUDIO_STREAM_STATE_STARTING);
}
return AAUDIO_OK;
}
aaudio_result_t AudioStreamTrack::requestPause() {
- std::lock_guard<std::mutex> lock(mStreamMutex);
-
if (mAudioTrack.get() == nullptr) {
ALOGE("requestPause() no AudioTrack");
return AAUDIO_ERROR_INVALID_STATE;
- } else if (getState() != AAUDIO_STREAM_STATE_STARTING
- && getState() != AAUDIO_STREAM_STATE_STARTED) {
- ALOGE("requestPause(), called when state is %s",
- AAudio_convertStreamStateToText(getState()));
- return AAUDIO_ERROR_INVALID_STATE;
}
- onStop();
+
setState(AAUDIO_STREAM_STATE_PAUSING);
mAudioTrack->pause();
- checkForDisconnectRequest();
+ mCallbackEnabled.store(false);
status_t err = mAudioTrack->getPosition(&mPositionWhenPausing);
if (err != OK) {
return AAudioConvert_androidToAAudioResult(err);
}
- return AAUDIO_OK;
+ return checkForDisconnectRequest(false);
}
aaudio_result_t AudioStreamTrack::requestFlush() {
- std::lock_guard<std::mutex> lock(mStreamMutex);
-
if (mAudioTrack.get() == nullptr) {
- ALOGE("AudioStreamTrack::requestFlush() no AudioTrack");
- return AAUDIO_ERROR_INVALID_STATE;
- } else if (getState() != AAUDIO_STREAM_STATE_PAUSED) {
- ALOGE("AudioStreamTrack::requestFlush() not paused");
+ ALOGE("requestFlush() no AudioTrack");
return AAUDIO_ERROR_INVALID_STATE;
}
+
setState(AAUDIO_STREAM_STATE_FLUSHING);
incrementFramesRead(getFramesWritten() - getFramesRead());
mAudioTrack->flush();
- mFramesWritten.reset32();
+ mFramesRead.reset32(); // service reads frames, service position reset on flush
mTimestampPosition.reset32();
return AAUDIO_OK;
}
aaudio_result_t AudioStreamTrack::requestStop() {
- std::lock_guard<std::mutex> lock(mStreamMutex);
-
if (mAudioTrack.get() == nullptr) {
- ALOGE("AudioStreamTrack::requestStop() no AudioTrack");
+ ALOGE("requestStop() no AudioTrack");
return AAUDIO_ERROR_INVALID_STATE;
}
- onStop();
+
setState(AAUDIO_STREAM_STATE_STOPPING);
incrementFramesRead(getFramesWritten() - getFramesRead()); // TODO review
mTimestampPosition.set(getFramesWritten());
- mFramesWritten.reset32();
+ mFramesRead.reset32(); // service reads frames, service position reset on stop
mTimestampPosition.reset32();
mAudioTrack->stop();
- checkForDisconnectRequest();
- return AAUDIO_OK;
+ mCallbackEnabled.store(false);
+ return checkForDisconnectRequest(false);;
}
aaudio_result_t AudioStreamTrack::updateStateMachine()
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
index a871db4..68608de 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.h
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -48,6 +48,16 @@
aaudio_result_t requestFlush() override;
aaudio_result_t requestStop() override;
+ bool isFlushSupported() const override {
+ // Only implement FLUSH for OUTPUT streams.
+ return true;
+ }
+
+ bool isPauseSupported() const override {
+ // Only implement PAUSE for OUTPUT streams.
+ return true;
+ }
+
aaudio_result_t getTimestamp(clockid_t clockId,
int64_t *framePosition,
int64_t *timeNanoseconds) override;
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 612ad27..40ebb76 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -25,6 +25,9 @@
#include "aaudio/AAudio.h"
#include <aaudio/AAudioTesting.h>
+#include <math.h>
+#include <system/audio-base.h>
+#include <assert.h>
#include "utility/AAudioUtilities.h"
@@ -50,50 +53,21 @@
return size;
}
-
// TODO expose and call clamp16_from_float function in primitives.h
static inline int16_t clamp16_from_float(float f) {
- /* Offset is used to expand the valid range of [-1.0, 1.0) into the 16 lsbs of the
- * floating point significand. The normal shift is 3<<22, but the -15 offset
- * is used to multiply by 32768.
- */
- static const float offset = (float)(3 << (22 - 15));
- /* zero = (0x10f << 22) = 0x43c00000 (not directly used) */
- static const int32_t limneg = (0x10f << 22) /*zero*/ - 32768; /* 0x43bf8000 */
- static const int32_t limpos = (0x10f << 22) /*zero*/ + 32767; /* 0x43c07fff */
-
- union {
- float f;
- int32_t i;
- } u;
-
- u.f = f + offset; /* recenter valid range */
- /* Now the valid range is represented as integers between [limneg, limpos].
- * Clamp using the fact that float representation (as an integer) is an ordered set.
- */
- if (u.i < limneg)
- u.i = -32768;
- else if (u.i > limpos)
- u.i = 32767;
- return u.i; /* Return lower 16 bits, the part of interest in the significand. */
+ static const float scale = 1 << 15;
+ return (int16_t) roundf(fmaxf(fminf(f * scale, scale - 1.f), -scale));
}
-// Same but without clipping.
-// Convert -1.0f to +1.0f to -32768 to +32767
-static inline int16_t floatToInt16(float f) {
- static const float offset = (float)(3 << (22 - 15));
- union {
- float f;
- int32_t i;
- } u;
- u.f = f + offset; /* recenter valid range */
- return u.i; /* Return lower 16 bits, the part of interest in the significand. */
+// Clip to valid range of a float sample to prevent excessive volume.
+// By using fmin and fmax we also protect against NaN.
+static float clipToMinMaxHeadroom(float input) {
+ return fmin(MAX_HEADROOM, fmax(MIN_HEADROOM, input));
}
static float clipAndClampFloatToPcm16(float sample, float scaler) {
// Clip to valid range of a float sample to prevent excessive volume.
- if (sample > MAX_HEADROOM) sample = MAX_HEADROOM;
- else if (sample < MIN_HEADROOM) sample = MIN_HEADROOM;
+ sample = clipToMinMaxHeadroom(sample);
// Scale and convert to a short.
float fval = sample * scaler;
@@ -104,7 +78,7 @@
int16_t *destination,
int32_t numSamples,
float amplitude) {
- float scaler = amplitude;
+ const float scaler = amplitude;
for (int i = 0; i < numSamples; i++) {
float sample = *source++;
*destination++ = clipAndClampFloatToPcm16(sample, scaler);
@@ -135,7 +109,7 @@
float *destination,
int32_t numSamples,
float amplitude) {
- float scaler = amplitude / SHORT_SCALE;
+ const float scaler = amplitude / SHORT_SCALE;
for (int i = 0; i < numSamples; i++) {
destination[i] = source[i] * scaler;
}
@@ -149,7 +123,7 @@
float amplitude1,
float amplitude2) {
float scaler = amplitude1 / SHORT_SCALE;
- float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
+ const float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
*destination++ = *source++ * scaler;
@@ -158,6 +132,7 @@
}
}
+
// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
void AAudio_linearRamp(const float *source,
float *destination,
@@ -166,14 +141,12 @@
float amplitude1,
float amplitude2) {
float scaler = amplitude1;
- float delta = (amplitude2 - amplitude1) / numFrames;
+ const float delta = (amplitude2 - amplitude1) / numFrames;
for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
float sample = *source++;
-
// Clip to valid range of a float sample to prevent excessive volume.
- if (sample > MAX_HEADROOM) sample = MAX_HEADROOM;
- else if (sample < MIN_HEADROOM) sample = MIN_HEADROOM;
+ sample = clipToMinMaxHeadroom(sample);
*destination++ = sample * scaler;
}
@@ -188,18 +161,267 @@
int32_t samplesPerFrame,
float amplitude1,
float amplitude2) {
- float scaler = amplitude1 / SHORT_SCALE;
- float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
+ // Because we are converting from int16 to 1nt16, we do not have to scale by 1/32768.
+ float scaler = amplitude1;
+ const float delta = (amplitude2 - amplitude1) / numFrames;
for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
// No need to clip because int16_t range is inherently limited.
float sample = *source++ * scaler;
- *destination++ = floatToInt16(sample);
+ *destination++ = (int16_t) roundf(sample);
}
scaler += delta;
}
}
+// *************************************************************************************
+// Convert Mono To Stereo at the same time as converting format.
+void AAudioConvert_formatMonoToStereo(const float *source,
+ int16_t *destination,
+ int32_t numFrames,
+ float amplitude) {
+ const float scaler = amplitude;
+ for (int i = 0; i < numFrames; i++) {
+ float sample = *source++;
+ int16_t sample16 = clipAndClampFloatToPcm16(sample, scaler);
+ *destination++ = sample16;
+ *destination++ = sample16;
+ }
+}
+
+void AAudioConvert_formatMonoToStereo(const float *source,
+ int16_t *destination,
+ int32_t numFrames,
+ float amplitude1,
+ float amplitude2) {
+ // divide by numFrames so that we almost reach amplitude2
+ const float delta = (amplitude2 - amplitude1) / numFrames;
+ for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+ const float scaler = amplitude1 + (frameIndex * delta);
+ const float sample = *source++;
+ int16_t sample16 = clipAndClampFloatToPcm16(sample, scaler);
+ *destination++ = sample16;
+ *destination++ = sample16;
+ }
+}
+
+void AAudioConvert_formatMonoToStereo(const int16_t *source,
+ float *destination,
+ int32_t numFrames,
+ float amplitude) {
+ const float scaler = amplitude / SHORT_SCALE;
+ for (int i = 0; i < numFrames; i++) {
+ float sample = source[i] * scaler;
+ *destination++ = sample;
+ *destination++ = sample;
+ }
+}
+
+// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
+void AAudioConvert_formatMonoToStereo(const int16_t *source,
+ float *destination,
+ int32_t numFrames,
+ float amplitude1,
+ float amplitude2) {
+ const float scaler1 = amplitude1 / SHORT_SCALE;
+ const float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
+ for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+ float scaler = scaler1 + (frameIndex * delta);
+ float sample = source[frameIndex] * scaler;
+ *destination++ = sample;
+ *destination++ = sample;
+ }
+}
+
+// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
+void AAudio_linearRampMonoToStereo(const float *source,
+ float *destination,
+ int32_t numFrames,
+ float amplitude1,
+ float amplitude2) {
+ const float delta = (amplitude2 - amplitude1) / numFrames;
+ for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+ float sample = *source++;
+
+ // Clip to valid range of a float sample to prevent excessive volume.
+ sample = clipToMinMaxHeadroom(sample);
+
+ const float scaler = amplitude1 + (frameIndex * delta);
+ float sampleScaled = sample * scaler;
+ *destination++ = sampleScaled;
+ *destination++ = sampleScaled;
+ }
+}
+
+// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
+void AAudio_linearRampMonoToStereo(const int16_t *source,
+ int16_t *destination,
+ int32_t numFrames,
+ float amplitude1,
+ float amplitude2) {
+ // Because we are converting from int16 to 1nt16, we do not have to scale by 1/32768.
+ const float delta = (amplitude2 - amplitude1) / numFrames;
+ for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+ const float scaler = amplitude1 + (frameIndex * delta);
+ // No need to clip because int16_t range is inherently limited.
+ const float sample = *source++ * scaler;
+ int16_t sample16 = (int16_t) roundf(sample);
+ *destination++ = sample16;
+ *destination++ = sample16;
+ }
+}
+
+// *************************************************************************************
+void AAudioDataConverter::convert(
+ const FormattedData &source,
+ const FormattedData &destination,
+ int32_t numFrames,
+ float levelFrom,
+ float levelTo) {
+
+ if (source.channelCount == 1 && destination.channelCount == 2) {
+ convertMonoToStereo(source,
+ destination,
+ numFrames,
+ levelFrom,
+ levelTo);
+ } else {
+ // We only support mono to stereo conversion. Otherwise source and destination
+ // must match.
+ assert(source.channelCount == destination.channelCount);
+ convertChannelsMatch(source,
+ destination,
+ numFrames,
+ levelFrom,
+ levelTo);
+ }
+}
+
+void AAudioDataConverter::convertMonoToStereo(
+ const FormattedData &source,
+ const FormattedData &destination,
+ int32_t numFrames,
+ float levelFrom,
+ float levelTo) {
+
+ // The formats are validated when the stream is opened so we do not have to
+ // check for illegal combinations here.
+ if (source.format == AAUDIO_FORMAT_PCM_FLOAT) {
+ if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
+ AAudio_linearRampMonoToStereo(
+ (const float *) source.data,
+ (float *) destination.data,
+ numFrames,
+ levelFrom,
+ levelTo);
+ } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
+ if (levelFrom != levelTo) {
+ AAudioConvert_formatMonoToStereo(
+ (const float *) source.data,
+ (int16_t *) destination.data,
+ numFrames,
+ levelFrom,
+ levelTo);
+ } else {
+ AAudioConvert_formatMonoToStereo(
+ (const float *) source.data,
+ (int16_t *) destination.data,
+ numFrames,
+ levelTo);
+ }
+ }
+ } else if (source.format == AAUDIO_FORMAT_PCM_I16) {
+ if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
+ if (levelFrom != levelTo) {
+ AAudioConvert_formatMonoToStereo(
+ (const int16_t *) source.data,
+ (float *) destination.data,
+ numFrames,
+ levelFrom,
+ levelTo);
+ } else {
+ AAudioConvert_formatMonoToStereo(
+ (const int16_t *) source.data,
+ (float *) destination.data,
+ numFrames,
+ levelTo);
+ }
+ } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
+ AAudio_linearRampMonoToStereo(
+ (const int16_t *) source.data,
+ (int16_t *) destination.data,
+ numFrames,
+ levelFrom,
+ levelTo);
+ }
+ }
+}
+
+void AAudioDataConverter::convertChannelsMatch(
+ const FormattedData &source,
+ const FormattedData &destination,
+ int32_t numFrames,
+ float levelFrom,
+ float levelTo) {
+ const int32_t numSamples = numFrames * source.channelCount;
+
+ // The formats are validated when the stream is opened so we do not have to
+ // check for illegal combinations here.
+ if (source.format == AAUDIO_FORMAT_PCM_FLOAT) {
+ if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
+ AAudio_linearRamp(
+ (const float *) source.data,
+ (float *) destination.data,
+ numFrames,
+ source.channelCount,
+ levelFrom,
+ levelTo);
+ } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
+ if (levelFrom != levelTo) {
+ AAudioConvert_floatToPcm16(
+ (const float *) source.data,
+ (int16_t *) destination.data,
+ numFrames,
+ source.channelCount,
+ levelFrom,
+ levelTo);
+ } else {
+ AAudioConvert_floatToPcm16(
+ (const float *) source.data,
+ (int16_t *) destination.data,
+ numSamples,
+ levelTo);
+ }
+ }
+ } else if (source.format == AAUDIO_FORMAT_PCM_I16) {
+ if (destination.format == AAUDIO_FORMAT_PCM_FLOAT) {
+ if (levelFrom != levelTo) {
+ AAudioConvert_pcm16ToFloat(
+ (const int16_t *) source.data,
+ (float *) destination.data,
+ numFrames,
+ source.channelCount,
+ levelFrom,
+ levelTo);
+ } else {
+ AAudioConvert_pcm16ToFloat(
+ (const int16_t *) source.data,
+ (float *) destination.data,
+ numSamples,
+ levelTo);
+ }
+ } else if (destination.format == AAUDIO_FORMAT_PCM_I16) {
+ AAudio_linearRamp(
+ (const int16_t *) source.data,
+ (int16_t *) destination.data,
+ numFrames,
+ source.channelCount,
+ levelFrom,
+ levelTo);
+ }
+ }
+}
+
status_t AAudioConvert_aaudioToAndroidStatus(aaudio_result_t result) {
// This covers the case for AAUDIO_OK and for positive results.
if (result >= 0) {
@@ -281,6 +503,13 @@
return result;
}
+audio_session_t AAudioConvert_aaudioToAndroidSessionId(aaudio_session_id_t sessionId) {
+ // If not a regular sessionId then convert to a safe value of AUDIO_SESSION_ALLOCATE.
+ return (sessionId == AAUDIO_SESSION_ID_ALLOCATE || sessionId == AAUDIO_SESSION_ID_NONE)
+ ? AUDIO_SESSION_ALLOCATE
+ : (audio_session_t) sessionId;
+}
+
audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_format_t aaudioFormat) {
audio_format_t androidFormat;
switch (aaudioFormat) {
@@ -315,17 +544,77 @@
return aaudioFormat;
}
+// Make a message string from the condition.
+#define STATIC_ASSERT(condition) static_assert(condition, #condition)
+
+audio_usage_t AAudioConvert_usageToInternal(aaudio_usage_t usage) {
+ // The public aaudio_content_type_t constants are supposed to have the same
+ // values as the internal audio_content_type_t values.
+ STATIC_ASSERT(AAUDIO_USAGE_MEDIA == AUDIO_USAGE_MEDIA);
+ STATIC_ASSERT(AAUDIO_USAGE_VOICE_COMMUNICATION == AUDIO_USAGE_VOICE_COMMUNICATION);
+ STATIC_ASSERT(AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING
+ == AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING);
+ STATIC_ASSERT(AAUDIO_USAGE_ALARM == AUDIO_USAGE_ALARM);
+ STATIC_ASSERT(AAUDIO_USAGE_NOTIFICATION == AUDIO_USAGE_NOTIFICATION);
+ STATIC_ASSERT(AAUDIO_USAGE_NOTIFICATION_RINGTONE
+ == AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE);
+ STATIC_ASSERT(AAUDIO_USAGE_NOTIFICATION_EVENT == AUDIO_USAGE_NOTIFICATION_EVENT);
+ STATIC_ASSERT(AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY == AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY);
+ STATIC_ASSERT(AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE
+ == AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE);
+ STATIC_ASSERT(AAUDIO_USAGE_ASSISTANCE_SONIFICATION == AUDIO_USAGE_ASSISTANCE_SONIFICATION);
+ STATIC_ASSERT(AAUDIO_USAGE_GAME == AUDIO_USAGE_GAME);
+ STATIC_ASSERT(AAUDIO_USAGE_ASSISTANT == AUDIO_USAGE_ASSISTANT);
+ if (usage == AAUDIO_UNSPECIFIED) {
+ usage = AAUDIO_USAGE_MEDIA;
+ }
+ return (audio_usage_t) usage; // same value
+}
+
+audio_content_type_t AAudioConvert_contentTypeToInternal(aaudio_content_type_t contentType) {
+ // The public aaudio_content_type_t constants are supposed to have the same
+ // values as the internal audio_content_type_t values.
+ STATIC_ASSERT(AAUDIO_CONTENT_TYPE_MUSIC == AUDIO_CONTENT_TYPE_MUSIC);
+ STATIC_ASSERT(AAUDIO_CONTENT_TYPE_SPEECH == AUDIO_CONTENT_TYPE_SPEECH);
+ STATIC_ASSERT(AAUDIO_CONTENT_TYPE_SONIFICATION == AUDIO_CONTENT_TYPE_SONIFICATION);
+ STATIC_ASSERT(AAUDIO_CONTENT_TYPE_MOVIE == AUDIO_CONTENT_TYPE_MOVIE);
+ if (contentType == AAUDIO_UNSPECIFIED) {
+ contentType = AAUDIO_CONTENT_TYPE_MUSIC;
+ }
+ return (audio_content_type_t) contentType; // same value
+}
+
+audio_source_t AAudioConvert_inputPresetToAudioSource(aaudio_input_preset_t preset) {
+ // The public aaudio_input_preset_t constants are supposed to have the same
+ // values as the internal audio_source_t values.
+ STATIC_ASSERT(AAUDIO_UNSPECIFIED == AUDIO_SOURCE_DEFAULT);
+ STATIC_ASSERT(AAUDIO_INPUT_PRESET_GENERIC == AUDIO_SOURCE_MIC);
+ STATIC_ASSERT(AAUDIO_INPUT_PRESET_CAMCORDER == AUDIO_SOURCE_CAMCORDER);
+ STATIC_ASSERT(AAUDIO_INPUT_PRESET_VOICE_RECOGNITION == AUDIO_SOURCE_VOICE_RECOGNITION);
+ STATIC_ASSERT(AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION == AUDIO_SOURCE_VOICE_COMMUNICATION);
+ STATIC_ASSERT(AAUDIO_INPUT_PRESET_UNPROCESSED == AUDIO_SOURCE_UNPROCESSED);
+ if (preset == AAUDIO_UNSPECIFIED) {
+ preset = AAUDIO_INPUT_PRESET_VOICE_RECOGNITION;
+ }
+ return (audio_source_t) preset; // same value
+}
+
int32_t AAudioConvert_framesToBytes(int32_t numFrames,
- int32_t bytesPerFrame,
- int32_t *sizeInBytes) {
- // TODO implement more elegantly
- const int32_t maxChannels = 256; // ridiculously large
- const int32_t maxBytesPerFrame = maxChannels * sizeof(float);
- // Prevent overflow by limiting multiplicands.
- if (bytesPerFrame > maxBytesPerFrame || numFrames > (0x3FFFFFFF / maxBytesPerFrame)) {
+ int32_t bytesPerFrame,
+ int32_t *sizeInBytes) {
+ *sizeInBytes = 0;
+
+ if (numFrames < 0 || bytesPerFrame < 0) {
+ ALOGE("negative size, numFrames = %d, frameSize = %d", numFrames, bytesPerFrame);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+
+ // Prevent numeric overflow.
+ if (numFrames > (INT32_MAX / bytesPerFrame)) {
ALOGE("size overflow, numFrames = %d, frameSize = %d", numFrames, bytesPerFrame);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
+
*sizeInBytes = numFrames * bytesPerFrame;
return AAUDIO_OK;
}
@@ -410,3 +699,31 @@
}
return prop;
}
+
+aaudio_result_t AAudio_isFlushAllowed(aaudio_stream_state_t state) {
+ aaudio_result_t result = AAUDIO_OK;
+ switch (state) {
+// Proceed with flushing.
+ case AAUDIO_STREAM_STATE_OPEN:
+ case AAUDIO_STREAM_STATE_PAUSED:
+ case AAUDIO_STREAM_STATE_STOPPED:
+ case AAUDIO_STREAM_STATE_FLUSHED:
+ break;
+
+// Transition from one inactive state to another.
+ case AAUDIO_STREAM_STATE_STARTING:
+ case AAUDIO_STREAM_STATE_STARTED:
+ case AAUDIO_STREAM_STATE_STOPPING:
+ case AAUDIO_STREAM_STATE_PAUSING:
+ case AAUDIO_STREAM_STATE_FLUSHING:
+ case AAUDIO_STREAM_STATE_CLOSING:
+ case AAUDIO_STREAM_STATE_CLOSED:
+ case AAUDIO_STREAM_STATE_DISCONNECTED:
+ default:
+ ALOGE("can only flush stream when PAUSED, OPEN or STOPPED, state = %s",
+ AAudio_convertStreamStateToText(state));
+ result = AAUDIO_ERROR_INVALID_STATE;
+ break;
+ }
+ return result;
+}
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index 3afa976..4b975e8 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -23,7 +23,7 @@
#include <sys/types.h>
#include <utils/Errors.h>
-#include <hardware/audio.h>
+#include <system/audio.h>
#include "aaudio/AAudio.h"
@@ -38,6 +38,13 @@
aaudio_result_t AAudioConvert_androidToAAudioResult(android::status_t status);
/**
+ * Convert an aaudio_session_id_t to a value that is safe to pass to AudioFlinger.
+ * @param sessionId
+ * @return safe value
+ */
+audio_session_t AAudioConvert_aaudioToAndroidSessionId(aaudio_session_id_t sessionId);
+
+/**
* Convert an array of floats to an array of int16_t.
*
* @param source
@@ -152,21 +159,81 @@
float amplitude1,
float amplitude2);
+class AAudioDataConverter {
+public:
+
+ struct FormattedData {
+
+ FormattedData(void *data, aaudio_format_t format, int32_t channelCount)
+ : data(data)
+ , format(format)
+ , channelCount(channelCount) {}
+
+ const void *data = nullptr;
+ const aaudio_format_t format = AAUDIO_FORMAT_UNSPECIFIED;
+ const int32_t channelCount = 1;
+ };
+
+ static void convert(const FormattedData &source,
+ const FormattedData &destination,
+ int32_t numFrames,
+ float levelFrom,
+ float levelTo);
+
+private:
+ static void convertMonoToStereo(const FormattedData &source,
+ const FormattedData &destination,
+ int32_t numFrames,
+ float levelFrom,
+ float levelTo);
+
+ static void convertChannelsMatch(const FormattedData &source,
+ const FormattedData &destination,
+ int32_t numFrames,
+ float levelFrom,
+ float levelTo);
+};
+
/**
* Calculate the number of bytes and prevent numeric overflow.
+ * The *sizeInBytes will be set to zero if there is an error.
+ *
* @param numFrames frame count
* @param bytesPerFrame size of a frame in bytes
- * @param sizeInBytes total size in bytes
+ * @param sizeInBytes pointer to a variable to receive total size in bytes
* @return AAUDIO_OK or negative error, eg. AAUDIO_ERROR_OUT_OF_RANGE
*/
int32_t AAudioConvert_framesToBytes(int32_t numFrames,
- int32_t bytesPerFrame,
- int32_t *sizeInBytes);
+ int32_t bytesPerFrame,
+ int32_t *sizeInBytes);
audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_format_t aaudio_format);
aaudio_format_t AAudioConvert_androidToAAudioDataFormat(audio_format_t format);
+
+/**
+ * Note that this function does not validate the passed in value.
+ * That is done somewhere else.
+ * @return internal value
+ */
+
+audio_usage_t AAudioConvert_usageToInternal(aaudio_usage_t usage);
+
+/**
+ * Note that this function does not validate the passed in value.
+ * That is done somewhere else.
+ * @return internal value
+ */
+audio_content_type_t AAudioConvert_contentTypeToInternal(aaudio_content_type_t contentType);
+
+/**
+ * Note that this function does not validate the passed in value.
+ * That is done somewhere else.
+ * @return internal audio source
+ */
+audio_source_t AAudioConvert_inputPresetToAudioSource(aaudio_input_preset_t preset);
+
/**
* @return the size of a sample of the given format in bytes or AAUDIO_ERROR_ILLEGAL_ARGUMENT
*/
@@ -235,6 +302,14 @@
*/
int32_t AAudioProperty_getHardwareBurstMinMicros();
+
+/**
+ * Is flush allowed for the given state?
+ * @param state
+ * @return AAUDIO_OK if allowed or an error
+ */
+aaudio_result_t AAudio_isFlushAllowed(aaudio_stream_state_t state);
+
/**
* Try a function f until it returns true.
*
diff --git a/media/libaaudio/src/utility/LinearRamp.h b/media/libaaudio/src/utility/LinearRamp.h
index ff09dce..2b1b8e0 100644
--- a/media/libaaudio/src/utility/LinearRamp.h
+++ b/media/libaaudio/src/utility/LinearRamp.h
@@ -87,7 +87,7 @@
std::atomic<float> mTarget;
- int32_t mLengthInFrames = 48000 / 50; // 20 msec at 48000 Hz
+ int32_t mLengthInFrames = 48000 / 100; // 10 msec at 48000 Hz
int32_t mRemaining = 0;
float mLevelFrom = 0.0f;
float mLevelTo = 0.0f;
diff --git a/media/libaaudio/src/utility/MonotonicCounter.h b/media/libaaudio/src/utility/MonotonicCounter.h
index 13c92a2..5833eab 100644
--- a/media/libaaudio/src/utility/MonotonicCounter.h
+++ b/media/libaaudio/src/utility/MonotonicCounter.h
@@ -89,6 +89,18 @@
mCounter32 = 0;
}
+ /**
+ * Round 64-bit counter up to a multiple of the period.
+ *
+ * @param period might be, for example, a buffer capacity
+ */
+ void roundUp64(int32_t period) {
+ if (period > 0) {
+ int64_t numPeriods = (mCounter64 + period - 1) / period;
+ mCounter64 = numPeriods * period;
+ }
+ }
+
private:
int64_t mCounter64 = 0;
int32_t mCounter32 = 0;
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 19c56d3..68194db 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -27,6 +27,7 @@
cc_test {
name: "test_timestamps",
+ defaults: ["libaaudio_tests_defaults"],
srcs: ["test_timestamps.cpp"],
header_libs: ["libaaudio_example_utils"],
shared_libs: ["libaaudio"],
@@ -86,3 +87,83 @@
"libutils",
],
}
+
+cc_test {
+ name: "test_bad_disconnect",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_bad_disconnect.cpp"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
+
+cc_test {
+ name: "test_various",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_various.cpp"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
+
+cc_test {
+ name: "test_session_id",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_session_id.cpp"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
+
+cc_test {
+ name: "test_aaudio_monkey",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_aaudio_monkey.cpp"],
+ header_libs: ["libaaudio_example_utils"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
+
+cc_test {
+ name: "test_attributes",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_attributes.cpp"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
+
+cc_test {
+ name: "test_interference",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_interference.cpp"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
+
+cc_test {
+ name: "test_atomic_fifo",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_atomic_fifo.cpp"],
+ shared_libs: ["libaaudio"],
+}
diff --git a/media/libaaudio/tests/test_aaudio_monkey.cpp b/media/libaaudio/tests/test_aaudio_monkey.cpp
new file mode 100644
index 0000000..be54835
--- /dev/null
+++ b/media/libaaudio/tests/test_aaudio_monkey.cpp
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Try to trigger bugs by playing randomly on multiple streams.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <vector>
+
+#include <aaudio/AAudio.h>
+#include "AAudioArgsParser.h"
+#include "AAudioExampleUtils.h"
+#include "AAudioSimplePlayer.h"
+#include "SineGenerator.h"
+
+#define DEFAULT_TIMEOUT_NANOS (1 * NANOS_PER_SECOND)
+
+#define NUM_LOOPS 1000
+#define MAX_MICROS_DELAY (2 * 1000 * 1000)
+
+// TODO Consider adding an input stream.
+#define PROB_START (0.20)
+#define PROB_PAUSE (PROB_START + 0.10)
+#define PROB_FLUSH (PROB_PAUSE + 0.10)
+#define PROB_STOP (PROB_FLUSH + 0.10)
+#define PROB_CLOSE (PROB_STOP + 0.10)
+static_assert(PROB_CLOSE < 0.9, "Probability sum too high.");
+
+aaudio_data_callback_result_t AAudioMonkeyDataCallback(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames);
+
+void AAudioMonkeyErrorCallbackProc(
+ AAudioStream *stream __unused,
+ void *userData __unused,
+ aaudio_result_t error) {
+ printf("Error Callback, error: %d\n",(int)error);
+}
+
+// This function is not thread safe. Only use this from a single thread.
+double nextRandomDouble() {
+ return drand48();
+}
+
+class AAudioMonkey : public AAudioSimplePlayer {
+public:
+
+ AAudioMonkey(int index, AAudioArgsParser *argParser)
+ : mArgParser(argParser)
+ , mIndex(index) {}
+
+ aaudio_result_t open() {
+ printf("Monkey # %d ---------------------------------------------- OPEN\n", mIndex);
+ double offset = mIndex * 50;
+ mSine1.setup(440.0, 48000);
+ mSine1.setSweep(300.0 + offset, 600.0 + offset, 5.0);
+ mSine2.setup(660.0, 48000);
+ mSine2.setSweep(350.0 + offset, 900.0 + offset, 7.0);
+
+ aaudio_result_t result = AAudioSimplePlayer::open(*mArgParser,
+ AAudioMonkeyDataCallback,
+ AAudioMonkeyErrorCallbackProc,
+ this);
+ if (result != AAUDIO_OK) {
+ printf("ERROR - player.open() returned %d\n", result);
+ }
+
+ mArgParser->compareWithStream(getStream());
+ return result;
+ }
+
+ bool isOpen() {
+ return (getStream() != nullptr);
+
+ }
+ /**
+ *
+ * @return true if stream passes tests
+ */
+ bool validate() {
+ if (!isOpen()) return true; // closed is OK
+
+ // update and query stream state
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+ aaudio_result_t result = AAudioStream_waitForStateChange(getStream(),
+ AAUDIO_STREAM_STATE_UNKNOWN, &state, 0);
+ if (result != AAUDIO_OK) {
+ printf("ERROR - AAudioStream_waitForStateChange returned %d\n", result);
+ return false;
+ }
+
+ int64_t framesRead = AAudioStream_getFramesRead(getStream());
+ int64_t framesWritten = AAudioStream_getFramesWritten(getStream());
+ int32_t xRuns = AAudioStream_getXRunCount(getStream());
+ // Print status
+ printf("%30s, framesWritten = %8lld, framesRead = %8lld, xRuns = %d\n",
+ AAudio_convertStreamStateToText(state),
+ (unsigned long long) framesWritten,
+ (unsigned long long) framesRead,
+ xRuns);
+
+ if (framesWritten < framesRead) {
+ printf("WARNING - UNDERFLOW - diff = %d !!!!!!!!!!!!\n",
+ (int) (framesWritten - framesRead));
+ }
+ return true;
+ }
+
+ aaudio_result_t invoke() {
+ aaudio_result_t result = AAUDIO_OK;
+ if (!isOpen()) {
+ result = open();
+ if (result != AAUDIO_OK) return result;
+ }
+
+ if (!validate()) {
+ return -1;
+ }
+
+ double dice = nextRandomDouble();
+ // Select an action based on a weighted probability.
+ if (dice < PROB_START) {
+ printf("start\n");
+ result = AAudioStream_requestStart(getStream());
+ } else if (dice < PROB_PAUSE) {
+ printf("pause\n");
+ result = AAudioStream_requestPause(getStream());
+ } else if (dice < PROB_FLUSH) {
+ printf("flush\n");
+ result = AAudioStream_requestFlush(getStream());
+ } else if (dice < PROB_STOP) {
+ printf("stop\n");
+ result = AAudioStream_requestStop(getStream());
+ } else if (dice < PROB_CLOSE) {
+ printf("close\n");
+ result = close();
+ } else {
+ printf("do nothing\n");
+ }
+
+ if (result == AAUDIO_ERROR_INVALID_STATE) {
+ printf(" got AAUDIO_ERROR_INVALID_STATE - expected from a monkey\n");
+ result = AAUDIO_OK;
+ }
+ if (result == AAUDIO_OK && isOpen()) {
+ if (!validate()) {
+ result = -1;
+ }
+ }
+ return result;
+ }
+
+ aaudio_data_callback_result_t renderAudio(
+ AAudioStream *stream,
+ void *audioData,
+ int32_t numFrames) {
+
+ int32_t samplesPerFrame = AAudioStream_getChannelCount(stream);
+ // This code only plays on the first one or two channels.
+ // TODO Support arbitrary number of channels.
+ switch (AAudioStream_getFormat(stream)) {
+ case AAUDIO_FORMAT_PCM_I16: {
+ int16_t *audioBuffer = (int16_t *) audioData;
+ // Render sine waves as shorts to first channel.
+ mSine1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+ // Render sine waves to second channel if there is one.
+ if (samplesPerFrame > 1) {
+ mSine2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ }
+ }
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT: {
+ float *audioBuffer = (float *) audioData;
+ // Render sine waves as floats to first channel.
+ mSine1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+ // Render sine waves to second channel if there is one.
+ if (samplesPerFrame > 1) {
+ mSine2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ }
+ }
+ break;
+ default:
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+ }
+
+private:
+ const AAudioArgsParser *mArgParser;
+ const int mIndex;
+ SineGenerator mSine1;
+ SineGenerator mSine2;
+};
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t AAudioMonkeyDataCallback(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames
+) {
+ // should not happen but just in case...
+ if (userData == nullptr) {
+ printf("ERROR - AAudioMonkeyDataCallback needs userData\n");
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+ AAudioMonkey *monkey = (AAudioMonkey *) userData;
+ return monkey->renderAudio(stream, audioData, numFrames);
+}
+
+
+static void usage() {
+ AAudioArgsParser::usage();
+ printf(" -i{seed} Initial random seed\n");
+ printf(" -t{count} number of monkeys in the Troop\n");
+}
+
+int main(int argc, const char **argv) {
+ AAudioArgsParser argParser;
+ std::vector<AAudioMonkey> monkeys;
+ aaudio_result_t result;
+ int numMonkeys = 1;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ printf("%s - Monkeys\n", argv[0]);
+
+ long int seed = (long int)getNanoseconds(); // different every time by default
+
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (argParser.parseArg(arg)) {
+ // Handle options that are not handled by the ArgParser
+ if (arg[0] == '-') {
+ char option = arg[1];
+ switch (option) {
+ case 'i':
+ seed = atol(&arg[2]);
+ break;
+ case 't':
+ numMonkeys = atoi(&arg[2]);
+ break;
+ default:
+ usage();
+ exit(EXIT_FAILURE);
+ break;
+ }
+ } else {
+ usage();
+ exit(EXIT_FAILURE);
+ break;
+ }
+ }
+ }
+
+ srand48(seed);
+ printf("seed = %ld, nextRandomDouble() = %f\n", seed, nextRandomDouble());
+
+ for (int m = 0; m < numMonkeys; m++) {
+ monkeys.emplace_back(m, &argParser);
+ }
+
+ for (int i = 0; i < NUM_LOOPS; i++) {
+ // pick a random monkey and invoke it
+ double dice = nextRandomDouble();
+ int monkeyIndex = floor(dice * numMonkeys);
+ printf("----------- Monkey #%d\n", monkeyIndex);
+ result = monkeys[monkeyIndex].invoke();
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+
+ // sleep some random time
+ dice = nextRandomDouble();
+ dice = dice * dice * dice; // skew towards smaller delays
+ int micros = (int) (dice * MAX_MICROS_DELAY);
+ usleep(micros);
+
+ // TODO consider making this multi-threaded, one thread per monkey, to catch more bugs
+ }
+
+ printf("PASS\n");
+ return EXIT_SUCCESS;
+
+error:
+ printf("FAIL - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+ usleep(1000 * 1000); // give me time to stop the logcat
+ return EXIT_FAILURE;
+}
+
diff --git a/media/libaaudio/tests/test_atomic_fifo.cpp b/media/libaaudio/tests/test_atomic_fifo.cpp
new file mode 100644
index 0000000..0085217
--- /dev/null
+++ b/media/libaaudio/tests/test_atomic_fifo.cpp
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+
+#include <gtest/gtest.h>
+#include <stdlib.h>
+
+#include "fifo/FifoBuffer.h"
+#include "fifo/FifoController.h"
+
+using android::fifo_frames_t;
+using android::FifoController;
+using android::FifoBuffer;
+using android::WrappingBuffer;
+
+//void foo() {
+TEST(test_fifi_controller, fifo_indices) {
+ // Values are arbitrary primes designed to trigger edge cases.
+ constexpr int capacity = 83;
+ constexpr int threshold = 47;
+ FifoController fifoController(capacity, threshold);
+ ASSERT_EQ(capacity, fifoController.getCapacity());
+ ASSERT_EQ(threshold, fifoController.getThreshold());
+
+ ASSERT_EQ(0, fifoController.getReadCounter());
+ ASSERT_EQ(0, fifoController.getWriteCounter());
+ ASSERT_EQ(0, fifoController.getFullFramesAvailable());
+ ASSERT_EQ(threshold, fifoController.getEmptyFramesAvailable());
+
+ // Pretend to write some data.
+ constexpr int advance1 = 23;
+ fifoController.advanceWriteIndex(advance1);
+ int advanced = advance1;
+ ASSERT_EQ(0, fifoController.getReadCounter());
+ ASSERT_EQ(0, fifoController.getReadIndex());
+ ASSERT_EQ(advanced, fifoController.getWriteCounter());
+ ASSERT_EQ(advanced, fifoController.getWriteIndex());
+ ASSERT_EQ(advanced, fifoController.getFullFramesAvailable());
+ ASSERT_EQ(threshold - advanced, fifoController.getEmptyFramesAvailable());
+
+ // Pretend to read the data.
+ fifoController.advanceReadIndex(advance1);
+ ASSERT_EQ(advanced, fifoController.getReadCounter());
+ ASSERT_EQ(advanced, fifoController.getReadIndex());
+ ASSERT_EQ(advanced, fifoController.getWriteCounter());
+ ASSERT_EQ(advanced, fifoController.getWriteIndex());
+ ASSERT_EQ(0, fifoController.getFullFramesAvailable());
+ ASSERT_EQ(threshold, fifoController.getEmptyFramesAvailable());
+
+ // Write past end of buffer.
+ constexpr int advance2 = 13 + capacity - advance1;
+ fifoController.advanceWriteIndex(advance2);
+ advanced += advance2;
+ ASSERT_EQ(advance1, fifoController.getReadCounter());
+ ASSERT_EQ(advance1, fifoController.getReadIndex());
+ ASSERT_EQ(advanced, fifoController.getWriteCounter());
+ ASSERT_EQ(advanced - capacity, fifoController.getWriteIndex());
+ ASSERT_EQ(advance2, fifoController.getFullFramesAvailable());
+ ASSERT_EQ(threshold - advance2, fifoController.getEmptyFramesAvailable());
+}
+
+// TODO consider using a template for other data types.
+class TestFifoBuffer {
+public:
+ explicit TestFifoBuffer(fifo_frames_t capacity, fifo_frames_t threshold = 0)
+ : mFifoBuffer(sizeof(int16_t), capacity) {
+ // For reading and writing.
+ mData = new int16_t[capacity];
+ if (threshold <= 0) {
+ threshold = capacity;
+ }
+ mFifoBuffer.setThreshold(threshold);
+ mThreshold = threshold;
+ }
+
+ void checkMisc() {
+ ASSERT_EQ((int32_t)(2 * sizeof(int16_t)), mFifoBuffer.convertFramesToBytes(2));
+ ASSERT_EQ(mThreshold, mFifoBuffer.getThreshold());
+ }
+
+ // Verify that the available frames in each part add up correctly.
+ void checkWrappingBuffer() {
+ WrappingBuffer wrappingBuffer;
+ fifo_frames_t framesAvailable =
+ mFifoBuffer.getFifoControllerBase()->getEmptyFramesAvailable();
+ fifo_frames_t wrapAvailable = mFifoBuffer.getEmptyRoomAvailable(&wrappingBuffer);
+ EXPECT_EQ(framesAvailable, wrapAvailable);
+ fifo_frames_t bothAvailable = wrappingBuffer.numFrames[0] + wrappingBuffer.numFrames[1];
+ EXPECT_EQ(framesAvailable, bothAvailable);
+
+ framesAvailable =
+ mFifoBuffer.getFifoControllerBase()->getFullFramesAvailable();
+ wrapAvailable = mFifoBuffer.getFullDataAvailable(&wrappingBuffer);
+ EXPECT_EQ(framesAvailable, wrapAvailable);
+ bothAvailable = wrappingBuffer.numFrames[0] + wrappingBuffer.numFrames[1];
+ EXPECT_EQ(framesAvailable, bothAvailable);
+ }
+
+ // Write data but do not overflow.
+ void writeData(fifo_frames_t numFrames) {
+ fifo_frames_t framesAvailable =
+ mFifoBuffer.getFifoControllerBase()->getEmptyFramesAvailable();
+ fifo_frames_t framesToWrite = std::min(framesAvailable, numFrames);
+ for (int i = 0; i < framesToWrite; i++) {
+ mData[i] = mNextWriteIndex++;
+ }
+ fifo_frames_t actual = mFifoBuffer.write(mData, framesToWrite);
+ ASSERT_EQ(framesToWrite, actual);
+ }
+
+ // Read data but do not underflow.
+ void verifyData(fifo_frames_t numFrames) {
+ fifo_frames_t framesAvailable =
+ mFifoBuffer.getFifoControllerBase()->getFullFramesAvailable();
+ fifo_frames_t framesToRead = std::min(framesAvailable, numFrames);
+ fifo_frames_t actual = mFifoBuffer.read(mData, framesToRead);
+ ASSERT_EQ(framesToRead, actual);
+ for (int i = 0; i < framesToRead; i++) {
+ ASSERT_EQ(mNextVerifyIndex++, mData[i]);
+ }
+ }
+
+ // Wrap around the end of the buffer.
+ void checkWrappingWriteRead() {
+ constexpr int frames1 = 43;
+ constexpr int frames2 = 15;
+
+ writeData(frames1);
+ checkWrappingBuffer();
+ verifyData(frames1);
+ checkWrappingBuffer();
+
+ writeData(frames2);
+ checkWrappingBuffer();
+ verifyData(frames2);
+ checkWrappingBuffer();
+ }
+
+ // Write and Read a specific amount of data.
+ void checkWriteRead() {
+ const fifo_frames_t capacity = mFifoBuffer.getBufferCapacityInFrames();
+ // Wrap around with the smaller region in the second half.
+ const int frames1 = capacity - 4;
+ const int frames2 = 7; // arbitrary, small
+ writeData(frames1);
+ verifyData(frames1);
+ writeData(frames2);
+ verifyData(frames2);
+ }
+
+ // Write and Read a specific amount of data.
+ void checkWriteReadSmallLarge() {
+ const fifo_frames_t capacity = mFifoBuffer.getBufferCapacityInFrames();
+ // Wrap around with the larger region in the second half.
+ const int frames1 = capacity - 4;
+ const int frames2 = capacity - 9; // arbitrary, large
+ writeData(frames1);
+ verifyData(frames1);
+ writeData(frames2);
+ verifyData(frames2);
+ }
+
+ // Randomly read or write up to the maximum amount of data.
+ void checkRandomWriteRead() {
+ for (int i = 0; i < 20; i++) {
+ fifo_frames_t framesEmpty =
+ mFifoBuffer.getFifoControllerBase()->getEmptyFramesAvailable();
+ fifo_frames_t numFrames = (fifo_frames_t)(drand48() * framesEmpty);
+ writeData(numFrames);
+
+ fifo_frames_t framesFull =
+ mFifoBuffer.getFifoControllerBase()->getFullFramesAvailable();
+ numFrames = (fifo_frames_t)(drand48() * framesFull);
+ verifyData(numFrames);
+ }
+ }
+
+ FifoBuffer mFifoBuffer;
+ int16_t *mData;
+ fifo_frames_t mNextWriteIndex = 0;
+ fifo_frames_t mNextVerifyIndex = 0;
+ fifo_frames_t mThreshold;
+};
+
+TEST(test_fifo_buffer, fifo_read_write) {
+ constexpr int capacity = 51; // arbitrary
+ TestFifoBuffer tester(capacity);
+ tester.checkMisc();
+ tester.checkWriteRead();
+}
+
+TEST(test_fifo_buffer, fifo_wrapping_read_write) {
+ constexpr int capacity = 59; // arbitrary, a little bigger this time
+ TestFifoBuffer tester(capacity);
+ tester.checkWrappingWriteRead();
+}
+
+TEST(test_fifo_buffer, fifo_read_write_small_large) {
+ constexpr int capacity = 51; // arbitrary
+ TestFifoBuffer tester(capacity);
+ tester.checkWriteReadSmallLarge();
+}
+
+TEST(test_fifo_buffer, fifo_random_read_write) {
+ constexpr int capacity = 51; // arbitrary
+ TestFifoBuffer tester(capacity);
+ tester.checkRandomWriteRead();
+}
+
+TEST(test_fifo_buffer, fifo_random_threshold) {
+ constexpr int capacity = 67; // arbitrary
+ constexpr int threshold = 37; // arbitrary
+ TestFifoBuffer tester(capacity, threshold);
+ tester.checkRandomWriteRead();
+}
diff --git a/media/libaaudio/tests/test_attributes.cpp b/media/libaaudio/tests/test_attributes.cpp
new file mode 100644
index 0000000..b01af25
--- /dev/null
+++ b/media/libaaudio/tests/test_attributes.cpp
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Test AAudio attributes such as Usage, ContentType and InputPreset.
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <aaudio/AAudio.h>
+#include <gtest/gtest.h>
+
+constexpr int64_t kNanosPerSecond = 1000000000;
+constexpr int kNumFrames = 256;
+constexpr int kChannelCount = 2;
+
+constexpr int32_t DONT_SET = -1000;
+
+static void checkAttributes(aaudio_performance_mode_t perfMode,
+ aaudio_usage_t usage,
+ aaudio_content_type_t contentType,
+ aaudio_input_preset_t preset = DONT_SET,
+ aaudio_direction_t direction = AAUDIO_DIRECTION_OUTPUT) {
+
+ float *buffer = new float[kNumFrames * kChannelCount];
+
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+ AAudioStream *aaudioStream = nullptr;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+ // Request stream properties.
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, perfMode);
+ AAudioStreamBuilder_setDirection(aaudioBuilder, direction);
+
+ // Set the attribute in the builder.
+ if (usage != DONT_SET) {
+ AAudioStreamBuilder_setUsage(aaudioBuilder, usage);
+ }
+ if (contentType != DONT_SET) {
+ AAudioStreamBuilder_setContentType(aaudioBuilder, contentType);
+ }
+ if (preset != DONT_SET) {
+ AAudioStreamBuilder_setInputPreset(aaudioBuilder, preset);
+ }
+
+ // Create an AAudioStream using the Builder.
+ ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+ AAudioStreamBuilder_delete(aaudioBuilder);
+
+ // Make sure we get the same attributes back from the stream.
+ aaudio_usage_t expectedUsage =
+ (usage == DONT_SET || usage == AAUDIO_UNSPECIFIED)
+ ? AAUDIO_USAGE_MEDIA // default
+ : usage;
+ EXPECT_EQ(expectedUsage, AAudioStream_getUsage(aaudioStream));
+
+ aaudio_content_type_t expectedContentType =
+ (contentType == DONT_SET || contentType == AAUDIO_UNSPECIFIED)
+ ? AAUDIO_CONTENT_TYPE_MUSIC // default
+ : contentType;
+ EXPECT_EQ(expectedContentType, AAudioStream_getContentType(aaudioStream));
+
+ aaudio_input_preset_t expectedPreset =
+ (preset == DONT_SET || preset == AAUDIO_UNSPECIFIED)
+ ? AAUDIO_INPUT_PRESET_VOICE_RECOGNITION // default
+ : preset;
+ EXPECT_EQ(expectedPreset, AAudioStream_getInputPreset(aaudioStream));
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream));
+
+ if (direction == AAUDIO_DIRECTION_INPUT) {
+ EXPECT_EQ(kNumFrames,
+ AAudioStream_read(aaudioStream, buffer, kNumFrames, kNanosPerSecond));
+ } else {
+ EXPECT_EQ(kNumFrames,
+ AAudioStream_write(aaudioStream, buffer, kNumFrames, kNanosPerSecond));
+ }
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream));
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+ delete[] buffer;
+}
+
+static const aaudio_usage_t sUsages[] = {
+ DONT_SET,
+ AAUDIO_UNSPECIFIED,
+ AAUDIO_USAGE_MEDIA,
+ AAUDIO_USAGE_VOICE_COMMUNICATION,
+ AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
+ AAUDIO_USAGE_ALARM,
+ AAUDIO_USAGE_NOTIFICATION,
+ AAUDIO_USAGE_NOTIFICATION_RINGTONE,
+ AAUDIO_USAGE_NOTIFICATION_EVENT,
+ AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
+ AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+ AAUDIO_USAGE_ASSISTANCE_SONIFICATION,
+ AAUDIO_USAGE_GAME,
+ AAUDIO_USAGE_ASSISTANT
+};
+
+static const aaudio_content_type_t sContentypes[] = {
+ DONT_SET,
+ AAUDIO_UNSPECIFIED,
+ AAUDIO_CONTENT_TYPE_SPEECH,
+ AAUDIO_CONTENT_TYPE_MUSIC,
+ AAUDIO_CONTENT_TYPE_MOVIE,
+ AAUDIO_CONTENT_TYPE_SONIFICATION
+};
+
+static const aaudio_input_preset_t sInputPresets[] = {
+ DONT_SET,
+ AAUDIO_UNSPECIFIED,
+ AAUDIO_INPUT_PRESET_GENERIC,
+ AAUDIO_INPUT_PRESET_CAMCORDER,
+ AAUDIO_INPUT_PRESET_VOICE_RECOGNITION,
+ AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION,
+ AAUDIO_INPUT_PRESET_UNPROCESSED,
+};
+
+static void checkAttributesUsage(aaudio_performance_mode_t perfMode) {
+ for (aaudio_usage_t usage : sUsages) {
+ checkAttributes(perfMode, usage, DONT_SET);
+ }
+}
+
+static void checkAttributesContentType(aaudio_input_preset_t perfMode) {
+ for (aaudio_content_type_t contentType : sContentypes) {
+ checkAttributes(perfMode, DONT_SET, contentType);
+ }
+}
+
+static void checkAttributesInputPreset(aaudio_performance_mode_t perfMode) {
+ for (aaudio_input_preset_t inputPreset : sInputPresets) {
+ checkAttributes(perfMode,
+ DONT_SET,
+ DONT_SET,
+ inputPreset,
+ AAUDIO_DIRECTION_INPUT);
+ }
+}
+
+TEST(test_attributes, aaudio_usage_perfnone) {
+ checkAttributesUsage(AAUDIO_PERFORMANCE_MODE_NONE);
+}
+
+TEST(test_attributes, aaudio_content_type_perfnone) {
+ checkAttributesContentType(AAUDIO_PERFORMANCE_MODE_NONE);
+}
+
+TEST(test_attributes, aaudio_input_preset_perfnone) {
+ checkAttributesInputPreset(AAUDIO_PERFORMANCE_MODE_NONE);
+}
+
+TEST(test_attributes, aaudio_usage_lowlat) {
+ checkAttributesUsage(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+}
+
+TEST(test_attributes, aaudio_content_type_lowlat) {
+ checkAttributesContentType(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+}
+
+TEST(test_attributes, aaudio_input_preset_lowlat) {
+ checkAttributesInputPreset(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+}
diff --git a/media/libaaudio/tests/test_bad_disconnect.cpp b/media/libaaudio/tests/test_bad_disconnect.cpp
new file mode 100644
index 0000000..435990d
--- /dev/null
+++ b/media/libaaudio/tests/test_bad_disconnect.cpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Handle a DISCONNECT by only opening and starting a new stream
+ * without stopping and closing the old one.
+ * This caused the new stream to use the old disconnected device.
+ */
+
+#include <stdio.h>
+#include <thread>
+#include <unistd.h>
+
+#include <aaudio/AAudio.h>
+
+#define DEFAULT_TIMEOUT_NANOS ((int64_t)1000000000)
+
+static void s_myErrorCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ aaudio_result_t error);
+
+struct AudioEngine {
+ AAudioStreamBuilder *builder = nullptr;
+ AAudioStream *stream = nullptr;
+ std::thread *thread = nullptr;
+ int64_t framesRead = 0;
+};
+
+AudioEngine s_AudioEngine;
+
+// Callback function that fills the audio output buffer.
+static aaudio_data_callback_result_t s_myDataCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames
+) {
+ (void) userData;
+ (void) audioData;
+ (void) numFrames;
+ s_AudioEngine.framesRead = AAudioStream_getFramesRead(stream);
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+static aaudio_result_t s_StartAudio() {
+ int32_t framesPerBurst = 0;
+ int32_t deviceId = 0;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ aaudio_result_t result = AAudio_createStreamBuilder(&s_AudioEngine.builder);
+ if (result != AAUDIO_OK) {
+ printf("AAudio_createStreamBuilder returned %s",
+ AAudio_convertResultToText(result));
+ return result;
+ }
+
+ // Request stream properties.
+ AAudioStreamBuilder_setFormat(s_AudioEngine.builder, AAUDIO_FORMAT_PCM_FLOAT);
+ AAudioStreamBuilder_setPerformanceMode(s_AudioEngine.builder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+ AAudioStreamBuilder_setDataCallback(s_AudioEngine.builder, s_myDataCallbackProc, nullptr);
+ AAudioStreamBuilder_setErrorCallback(s_AudioEngine.builder, s_myErrorCallbackProc, nullptr);
+
+ // Create an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(s_AudioEngine.builder, &s_AudioEngine.stream);
+ if (result != AAUDIO_OK) {
+ printf("AAudioStreamBuilder_openStream returned %s",
+ AAudio_convertResultToText(result));
+ return result;
+ }
+
+ result = AAudioStream_requestStart(s_AudioEngine.stream);
+ if (result != AAUDIO_OK) {
+ printf("AAudioStream_requestStart returned %s",
+ AAudio_convertResultToText(result));
+ }
+
+ // Check to see what kind of stream we actually got.
+ deviceId = AAudioStream_getDeviceId(s_AudioEngine.stream);
+ framesPerBurst = AAudioStream_getFramesPerBurst(s_AudioEngine.stream);
+
+ printf("-------- started: deviceId = %3d, framesPerBurst = %3d\n", deviceId, framesPerBurst);
+
+ return result;
+}
+
+static aaudio_result_t s_StopAudio() {
+ aaudio_result_t result = AAUDIO_OK;
+ if (s_AudioEngine.stream != nullptr) {
+ result = AAudioStream_requestStop(s_AudioEngine.stream);
+ if (result != AAUDIO_OK) {
+ printf("AAudioStream_requestStop returned %s\n",
+ AAudio_convertResultToText(result));
+ }
+ result = AAudioStream_close(s_AudioEngine.stream);
+ if (result != AAUDIO_OK) {
+ printf("AAudioStream_close returned %s\n",
+ AAudio_convertResultToText(result));
+ }
+ s_AudioEngine.stream = nullptr;
+ AAudioStreamBuilder_delete(s_AudioEngine.builder);
+ s_AudioEngine.builder = nullptr;
+ }
+ return result;
+}
+
+static void s_StartThreadProc() {
+ // A good app would call s_StopAudio here! This test simulates a bad app.
+ s_StartAudio();
+ s_AudioEngine.thread = nullptr;
+}
+
+static void s_myErrorCallbackProc(
+ AAudioStream *stream __unused,
+ void *userData __unused,
+ aaudio_result_t error) {
+ if (error == AAUDIO_ERROR_DISCONNECTED) {
+ // Handle stream restart on a separate thread
+ if (s_AudioEngine.thread == nullptr) {
+ s_AudioEngine.thread = new std::thread(s_StartThreadProc);
+ }
+ }
+}
+
+int main(int argc, char **argv) {
+ (void) argc;
+ (void) argv;
+
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ printf("Test Bad Disconnect V1.0\n");
+ printf("\n=========== Please PLUG and UNPLUG headphones! ==============\n\n");
+ printf("You should see the deviceID change on each plug event.\n");
+ printf("Headphones will generally get a new deviceId each time.\n");
+ printf("Speakers will have the same deviceId each time.\n");
+ printf("The framesRead should reset on each plug event then increase over time.\n");
+ printf("\n");
+
+ result = s_StartAudio();
+
+ if (result == AAUDIO_OK) {
+ for (int i = 20; i > 0; i--) {
+ sleep(1);
+ printf("playing silence #%d, framesRead = %d\n", i, (int) s_AudioEngine.framesRead);
+ }
+ }
+
+ s_StopAudio();
+
+ printf("result = %d = %s\n", result, AAudio_convertResultToText(result));
+}
diff --git a/media/libaaudio/tests/test_interference.cpp b/media/libaaudio/tests/test_interference.cpp
new file mode 100644
index 0000000..7eaf225
--- /dev/null
+++ b/media/libaaudio/tests/test_interference.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play a shared stream that might use MMAP.
+// Then play a second stream at a different sample rate.
+// Make sure the first stream is still running.
+// See: b/73369112 | AAudio disconnects shared stream if second MMAP open fails
+
+#include <memory.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <android-base/macros.h>
+#include <aaudio/AAudio.h>
+
+#include <gtest/gtest.h>
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t MyDataCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames) {
+ (void) userData;
+ int32_t numSamples = AAudioStream_getChannelCount(stream) * numFrames;
+ aaudio_format_t format = AAudioStream_getFormat(stream);
+ if (format == AAUDIO_FORMAT_PCM_I16) {
+ memset(audioData, 0, numSamples * sizeof(int16_t));
+ } else if (format == AAUDIO_FORMAT_PCM_FLOAT) {
+ memset(audioData, 0, numSamples * sizeof(float));
+ }
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+//void foo() { // for tricking the Android Studio formatter
+TEST(test_interference, aaudio_mmap_interference) {
+
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+ AAudioStream *aaudioStream1 = nullptr;
+ AAudioStream *aaudioStream2 = nullptr;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+ // Request stream properties.
+ AAudioStreamBuilder_setSampleRate(aaudioBuilder, 48000);
+ AAudioStreamBuilder_setDataCallback(aaudioBuilder, MyDataCallbackProc, nullptr);
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+
+ // Create an AAudioStream using the Builder.
+ ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream1));
+ // Start it running.
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream1));
+
+ // Verify that the stream is running.
+ sleep(1);
+ EXPECT_LT(0, AAudioStream_getFramesRead(aaudioStream1));
+ ASSERT_EQ(AAUDIO_STREAM_STATE_STARTED, AAudioStream_getState(aaudioStream1));
+
+ // Now try to open a second stream with a different rate.
+ AAudioStreamBuilder_setSampleRate(aaudioBuilder, 44100);
+ ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream2));
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream2));
+
+ // Verify that the second stream is running.
+ sleep(1);
+ EXPECT_LT(0, AAudioStream_getFramesRead(aaudioStream2));
+
+ EXPECT_EQ(AAUDIO_STREAM_STATE_STARTED, AAudioStream_getState(aaudioStream2));
+
+ // Now verify that the first stream is still running.
+ EXPECT_EQ(AAUDIO_STREAM_STATE_STARTED, AAudioStream_getState(aaudioStream1));
+
+ int32_t framesRead1_1 = AAudioStream_getFramesRead(aaudioStream1);
+ EXPECT_LT(0, framesRead1_1);
+ sleep(1);
+ int32_t framesRead1_2 = AAudioStream_getFramesRead(aaudioStream1);
+ EXPECT_LT(0, framesRead1_2);
+ EXPECT_LT(framesRead1_1, framesRead1_2); // advancing?
+
+ AAudioStream_close(aaudioStream2);
+ AAudioStream_close(aaudioStream1);
+ AAudioStreamBuilder_delete(aaudioBuilder);
+}
diff --git a/media/libaaudio/tests/test_linear_ramp.cpp b/media/libaaudio/tests/test_linear_ramp.cpp
index 5c53982..93226ba 100644
--- a/media/libaaudio/tests/test_linear_ramp.cpp
+++ b/media/libaaudio/tests/test_linear_ramp.cpp
@@ -15,13 +15,13 @@
*/
#include <iostream>
+#include <math.h>
#include <gtest/gtest.h>
#include "utility/AAudioUtilities.h"
#include "utility/LinearRamp.h"
-
TEST(test_linear_ramp, linear_ramp_segments) {
LinearRamp ramp;
const float source[4] = {1.0f, 1.0f, 1.0f, 1.0f };
@@ -32,40 +32,40 @@
ramp.setLengthInFrames(8);
ramp.setTarget(8.0f);
- ASSERT_EQ(8, ramp.getLengthInFrames());
+ EXPECT_EQ(8, ramp.getLengthInFrames());
bool ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(1, ramping);
- ASSERT_EQ(0.0f, levelFrom);
- ASSERT_EQ(4.0f, levelTo);
+ EXPECT_EQ(1, ramping);
+ EXPECT_EQ(0.0f, levelFrom);
+ EXPECT_EQ(4.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(0.0f, destination[0]);
- ASSERT_EQ(1.0f, destination[1]);
- ASSERT_EQ(2.0f, destination[2]);
- ASSERT_EQ(3.0f, destination[3]);
+ EXPECT_EQ(0.0f, destination[0]);
+ EXPECT_EQ(1.0f, destination[1]);
+ EXPECT_EQ(2.0f, destination[2]);
+ EXPECT_EQ(3.0f, destination[3]);
ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(1, ramping);
- ASSERT_EQ(4.0f, levelFrom);
- ASSERT_EQ(8.0f, levelTo);
+ EXPECT_EQ(1, ramping);
+ EXPECT_EQ(4.0f, levelFrom);
+ EXPECT_EQ(8.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(4.0f, destination[0]);
- ASSERT_EQ(5.0f, destination[1]);
- ASSERT_EQ(6.0f, destination[2]);
- ASSERT_EQ(7.0f, destination[3]);
+ EXPECT_EQ(4.0f, destination[0]);
+ EXPECT_EQ(5.0f, destination[1]);
+ EXPECT_EQ(6.0f, destination[2]);
+ EXPECT_EQ(7.0f, destination[3]);
ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(0, ramping);
- ASSERT_EQ(8.0f, levelFrom);
- ASSERT_EQ(8.0f, levelTo);
+ EXPECT_EQ(0, ramping);
+ EXPECT_EQ(8.0f, levelFrom);
+ EXPECT_EQ(8.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(8.0f, destination[0]);
- ASSERT_EQ(8.0f, destination[1]);
- ASSERT_EQ(8.0f, destination[2]);
- ASSERT_EQ(8.0f, destination[3]);
+ EXPECT_EQ(8.0f, destination[0]);
+ EXPECT_EQ(8.0f, destination[1]);
+ EXPECT_EQ(8.0f, destination[2]);
+ EXPECT_EQ(8.0f, destination[3]);
};
@@ -80,29 +80,101 @@
ramp.setLengthInFrames(4);
ramp.setTarget(8.0f);
ramp.forceCurrent(4.0f);
- ASSERT_EQ(4.0f, ramp.getCurrent());
+ EXPECT_EQ(4.0f, ramp.getCurrent());
bool ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(1, ramping);
- ASSERT_EQ(4.0f, levelFrom);
- ASSERT_EQ(8.0f, levelTo);
+ EXPECT_EQ(1, ramping);
+ EXPECT_EQ(4.0f, levelFrom);
+ EXPECT_EQ(8.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(4.0f, destination[0]);
- ASSERT_EQ(5.0f, destination[1]);
- ASSERT_EQ(6.0f, destination[2]);
- ASSERT_EQ(7.0f, destination[3]);
+ EXPECT_EQ(4.0f, destination[0]);
+ EXPECT_EQ(5.0f, destination[1]);
+ EXPECT_EQ(6.0f, destination[2]);
+ EXPECT_EQ(7.0f, destination[3]);
ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
- ASSERT_EQ(0, ramping);
- ASSERT_EQ(8.0f, levelFrom);
- ASSERT_EQ(8.0f, levelTo);
+ EXPECT_EQ(0, ramping);
+ EXPECT_EQ(8.0f, levelFrom);
+ EXPECT_EQ(8.0f, levelTo);
AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
- ASSERT_EQ(8.0f, destination[0]);
- ASSERT_EQ(8.0f, destination[1]);
- ASSERT_EQ(8.0f, destination[2]);
- ASSERT_EQ(8.0f, destination[3]);
+ EXPECT_EQ(8.0f, destination[0]);
+ EXPECT_EQ(8.0f, destination[1]);
+ EXPECT_EQ(8.0f, destination[2]);
+ EXPECT_EQ(8.0f, destination[3]);
};
+constexpr int16_t kMaxI16 = INT16_MAX;
+constexpr int16_t kMinI16 = INT16_MIN;
+constexpr int16_t kHalfI16 = 16384;
+constexpr int16_t kTenthI16 = 3277;
+
+//void AAudioConvert_floatToPcm16(const float *source,
+// int16_t *destination,
+// int32_t numSamples,
+// float amplitude);
+TEST(test_linear_ramp, float_to_i16) {
+ const float source[] = {12345.6f, 1.0f, 0.5f, 0.1f, 0.0f, -0.1f, -0.5f, -1.0f, -12345.6f};
+ constexpr size_t count = sizeof(source) / sizeof(source[0]);
+ int16_t destination[count];
+ const int16_t expected[count] = {kMaxI16, kMaxI16, kHalfI16, kTenthI16, 0,
+ -kTenthI16, -kHalfI16, kMinI16, kMinI16};
+
+ AAudioConvert_floatToPcm16(source, destination, count, 1.0f);
+ for (size_t i = 0; i < count; i++) {
+ EXPECT_EQ(expected[i], destination[i]);
+ }
+
+}
+
+//void AAudioConvert_pcm16ToFloat(const int16_t *source,
+// float *destination,
+// int32_t numSamples,
+// float amplitude);
+TEST(test_linear_ramp, i16_to_float) {
+ const int16_t source[] = {kMaxI16, kHalfI16, kTenthI16, 0,
+ -kTenthI16, -kHalfI16, kMinI16};
+ constexpr size_t count = sizeof(source) / sizeof(source[0]);
+ float destination[count];
+ const float expected[count] = {(32767.0f / 32768.0f), 0.5f, 0.1f, 0.0f, -0.1f, -0.5f, -1.0f};
+
+ AAudioConvert_pcm16ToFloat(source, destination, count, 1.0f);
+ for (size_t i = 0; i < count; i++) {
+ EXPECT_NEAR(expected[i], destination[i], 0.0001f);
+ }
+
+}
+
+//void AAudio_linearRamp(const int16_t *source,
+// int16_t *destination,
+// int32_t numFrames,
+// int32_t samplesPerFrame,
+// float amplitude1,
+// float amplitude2);
+TEST(test_linear_ramp, ramp_i16_to_i16) {
+ const int16_t source[] = {1, 1, 1, 1, 1, 1, 1, 1};
+ constexpr size_t count = sizeof(source) / sizeof(source[0]);
+ int16_t destination[count];
+ // Ramp will sweep from -1 to almost +1
+ const int16_t expected[count] = {
+ -1, // from -1.00
+ -1, // from -0.75
+ -1, // from -0.55, round away from zero
+ 0, // from -0.25, round up to zero
+ 0, // from 0.00
+ 0, // from 0.25, round down to zero
+ 1, // from 0.50, round away from zero
+ 1 // from 0.75
+ };
+
+ // sweep across zero to test symmetry
+ constexpr float amplitude1 = -1.0;
+ constexpr float amplitude2 = 1.0;
+ AAudio_linearRamp(source, destination, count, 1, amplitude1, amplitude2);
+ for (size_t i = 0; i < count; i++) {
+ EXPECT_EQ(expected[i], destination[i]);
+ }
+
+}
diff --git a/media/libaaudio/tests/test_session_id.cpp b/media/libaaudio/tests/test_session_id.cpp
new file mode 100644
index 0000000..3f7d4fc
--- /dev/null
+++ b/media/libaaudio/tests/test_session_id.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Test AAudio SessionId, which is used to associate Effects with a stream
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <aaudio/AAudio.h>
+#include <gtest/gtest.h>
+
+constexpr int64_t kNanosPerSecond = 1000000000;
+constexpr int kNumFrames = 256;
+constexpr int kChannelCount = 2;
+
+// Test AAUDIO_SESSION_ID_NONE default
+static void checkSessionIdNone(aaudio_performance_mode_t perfMode) {
+
+ float *buffer = new float[kNumFrames * kChannelCount];
+
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+
+ AAudioStream *aaudioStream1 = nullptr;
+ int32_t sessionId1 = 0;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+ // Request stream properties.
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, perfMode);
+
+ // Create an AAudioStream using the Builder.
+ ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream1));
+
+ // Since we did not request or specify a SessionID, we should get NONE
+ sessionId1 = AAudioStream_getSessionId(aaudioStream1);
+ ASSERT_EQ(AAUDIO_SESSION_ID_NONE, sessionId1);
+
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream1));
+
+ ASSERT_EQ(kNumFrames, AAudioStream_write(aaudioStream1, buffer, kNumFrames, kNanosPerSecond));
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream1));
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream1));
+ delete[] buffer;
+ AAudioStreamBuilder_delete(aaudioBuilder);
+}
+
+TEST(test_session_id, aaudio_session_id_none_perfnone) {
+ checkSessionIdNone(AAUDIO_PERFORMANCE_MODE_NONE);
+}
+
+TEST(test_session_id, aaudio_session_id_none_lowlat) {
+ checkSessionIdNone(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+}
+
+// Test AAUDIO_SESSION_ID_ALLOCATE
+static void checkSessionIdAllocate(aaudio_performance_mode_t perfMode,
+ aaudio_direction_t direction) {
+
+ float *buffer = new float[kNumFrames * kChannelCount];
+
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+
+ AAudioStream *aaudioStream1 = nullptr;
+ int32_t sessionId1 = 0;
+ AAudioStream *aaudioStream2 = nullptr;
+ int32_t sessionId2 = 0;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+ // Request stream properties.
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, perfMode);
+ // This stream could be input or output.
+ AAudioStreamBuilder_setDirection(aaudioBuilder, direction);
+
+ // Ask AAudio to allocate a Session ID.
+ AAudioStreamBuilder_setSessionId(aaudioBuilder, AAUDIO_SESSION_ID_ALLOCATE);
+
+ // Create an AAudioStream using the Builder.
+ ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream1));
+
+ // Get the allocated ID from the stream.
+ sessionId1 = AAudioStream_getSessionId(aaudioStream1);
+
+ // Check for invalid session IDs.
+ ASSERT_NE(AAUDIO_SESSION_ID_NONE, sessionId1);
+ ASSERT_NE(AAUDIO_SESSION_ID_ALLOCATE, sessionId1);
+
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream1));
+
+ if (direction == AAUDIO_DIRECTION_INPUT) {
+ ASSERT_EQ(kNumFrames, AAudioStream_read(aaudioStream1,
+ buffer, kNumFrames, kNanosPerSecond));
+ } else {
+ ASSERT_EQ(kNumFrames, AAudioStream_write(aaudioStream1,
+ buffer, kNumFrames, kNanosPerSecond));
+ }
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream1));
+
+ // Now open a second stream using the same session ID. ==================
+ AAudioStreamBuilder_setSessionId(aaudioBuilder, sessionId1);
+
+ // Reverse direction for second stream.
+ aaudio_direction_t otherDirection = (direction == AAUDIO_DIRECTION_OUTPUT)
+ ? AAUDIO_DIRECTION_INPUT
+ : AAUDIO_DIRECTION_OUTPUT;
+ AAudioStreamBuilder_setDirection(aaudioBuilder, otherDirection);
+
+ // Create an AAudioStream using the Builder.
+ ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream2));
+
+ // Get the allocated ID from the stream.
+ // It should match the ID that we set it to in the builder.
+ sessionId2 = AAudioStream_getSessionId(aaudioStream2);
+ ASSERT_EQ(sessionId1, sessionId2);
+
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream2));
+
+ if (otherDirection == AAUDIO_DIRECTION_INPUT) {
+ ASSERT_EQ(kNumFrames, AAudioStream_read(aaudioStream2,
+ buffer, kNumFrames, kNanosPerSecond));
+ } else {
+ ASSERT_EQ(kNumFrames, AAudioStream_write(aaudioStream2,
+ buffer, kNumFrames, kNanosPerSecond));
+ }
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream2));
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream2));
+
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream1));
+ delete[] buffer;
+ AAudioStreamBuilder_delete(aaudioBuilder);
+}
+
+TEST(test_session_id, aaudio_session_id_alloc_perfnone_in) {
+ checkSessionIdAllocate(AAUDIO_PERFORMANCE_MODE_NONE, AAUDIO_DIRECTION_INPUT);
+}
+TEST(test_session_id, aaudio_session_id_alloc_perfnone_out) {
+ checkSessionIdAllocate(AAUDIO_PERFORMANCE_MODE_NONE, AAUDIO_DIRECTION_OUTPUT);
+}
+
+TEST(test_session_id, aaudio_session_id_alloc_lowlat_in) {
+ checkSessionIdAllocate(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, AAUDIO_DIRECTION_INPUT);
+}
+TEST(test_session_id, aaudio_session_id_alloc_lowlat_out) {
+ checkSessionIdAllocate(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, AAUDIO_DIRECTION_OUTPUT);
+}
diff --git a/media/libaaudio/tests/test_timestamps.cpp b/media/libaaudio/tests/test_timestamps.cpp
index b57f0a4..dfa7815 100644
--- a/media/libaaudio/tests/test_timestamps.cpp
+++ b/media/libaaudio/tests/test_timestamps.cpp
@@ -22,8 +22,7 @@
#include <aaudio/AAudio.h>
#include <aaudio/AAudioTesting.h>
-#include "utils/AAudioExampleUtils.h"
-#include "../examples/utils/AAudioExampleUtils.h"
+#include "AAudioExampleUtils.h"
// Arbitrary period for glitches, once per second at 48000 Hz.
#define FORCED_UNDERRUN_PERIOD_FRAMES 48000
diff --git a/media/libaaudio/tests/test_various.cpp b/media/libaaudio/tests/test_various.cpp
new file mode 100644
index 0000000..4b065c9
--- /dev/null
+++ b/media/libaaudio/tests/test_various.cpp
@@ -0,0 +1,628 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Test various AAudio features including AAudioStream_setBufferSizeInFrames().
+
+#include <condition_variable>
+#include <mutex>
+#include <stdio.h>
+
+#include <android-base/macros.h>
+#include <aaudio/AAudio.h>
+
+#include <gtest/gtest.h>
+#include <unistd.h>
+
+// Callback function that does nothing.
+aaudio_data_callback_result_t NoopDataCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames
+) {
+ (void) stream;
+ (void) userData;
+ (void) audioData;
+ (void) numFrames;
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+// Test AAudioStream_setBufferSizeInFrames()
+
+constexpr int64_t NANOS_PER_MILLISECOND = 1000 * 1000;
+
+enum FunctionToCall {
+ CALL_START, CALL_STOP, CALL_PAUSE, CALL_FLUSH
+};
+
+void checkStateTransition(aaudio_performance_mode_t perfMode,
+ aaudio_stream_state_t originalState,
+ FunctionToCall functionToCall,
+ aaudio_result_t expectedResult,
+ aaudio_stream_state_t expectedState) {
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+ AAudioStream *aaudioStream = nullptr;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+ // Request stream properties.
+ AAudioStreamBuilder_setDataCallback(aaudioBuilder, NoopDataCallbackProc, nullptr);
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, perfMode);
+
+ // Create an AAudioStream using the Builder.
+ ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+
+ // Verify Open State
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+ AAUDIO_STREAM_STATE_UNKNOWN, &state,
+ 1000 * NANOS_PER_MILLISECOND));
+ EXPECT_EQ(AAUDIO_STREAM_STATE_OPEN, state);
+
+ // Put stream into desired state.
+ aaudio_stream_state_t inputState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+ if (originalState != AAUDIO_STREAM_STATE_OPEN) {
+
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream));
+
+ if (originalState != AAUDIO_STREAM_STATE_STARTING) {
+
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+ AAUDIO_STREAM_STATE_STARTING,
+ &state,
+ 1000 * NANOS_PER_MILLISECOND));
+ ASSERT_EQ(AAUDIO_STREAM_STATE_STARTED, state);
+
+ if (originalState == AAUDIO_STREAM_STATE_STOPPING) {
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream));
+ } else if (originalState == AAUDIO_STREAM_STATE_STOPPED) {
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream));
+ inputState = AAUDIO_STREAM_STATE_STOPPING;
+ } else if (originalState == AAUDIO_STREAM_STATE_PAUSING) {
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_requestPause(aaudioStream));
+ } else if (originalState == AAUDIO_STREAM_STATE_PAUSED) {
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_requestPause(aaudioStream));
+ inputState = AAUDIO_STREAM_STATE_PAUSING;
+ }
+ }
+ }
+
+ // Wait until past transitional state.
+ if (inputState != AAUDIO_STREAM_STATE_UNINITIALIZED) {
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+ inputState,
+ &state,
+ 1000 * NANOS_PER_MILLISECOND));
+ ASSERT_EQ(originalState, state);
+ }
+
+ aaudio_stream_state_t transitionalState = originalState;
+ switch(functionToCall) {
+ case FunctionToCall::CALL_START:
+ EXPECT_EQ(expectedResult, AAudioStream_requestStart(aaudioStream));
+ transitionalState = AAUDIO_STREAM_STATE_STARTING;
+ break;
+ case FunctionToCall::CALL_STOP:
+ EXPECT_EQ(expectedResult, AAudioStream_requestStop(aaudioStream));
+ transitionalState = AAUDIO_STREAM_STATE_STOPPING;
+ break;
+ case FunctionToCall::CALL_PAUSE:
+ EXPECT_EQ(expectedResult, AAudioStream_requestPause(aaudioStream));
+ transitionalState = AAUDIO_STREAM_STATE_PAUSING;
+ break;
+ case FunctionToCall::CALL_FLUSH:
+ EXPECT_EQ(expectedResult, AAudioStream_requestFlush(aaudioStream));
+ transitionalState = AAUDIO_STREAM_STATE_FLUSHING;
+ break;
+ }
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_waitForStateChange(aaudioStream,
+ transitionalState,
+ &state,
+ 1000 * NANOS_PER_MILLISECOND));
+ // We should not change state when a function fails.
+ if (expectedResult != AAUDIO_OK) {
+ ASSERT_EQ(originalState, expectedState);
+ }
+ EXPECT_EQ(expectedState, state);
+ if (state != expectedState) {
+ printf("ERROR - expected %s, actual = %s\n",
+ AAudio_convertStreamStateToText(expectedState),
+ AAudio_convertStreamStateToText(state));
+ fflush(stdout);
+ }
+
+ AAudioStream_close(aaudioStream);
+ AAudioStreamBuilder_delete(aaudioBuilder);
+}
+
+// TODO Use parameterized tests instead of these individual specific tests.
+
+// OPEN =================================================================
+TEST(test_various, aaudio_state_lowlat_open_start) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_STREAM_STATE_OPEN,
+ FunctionToCall::CALL_START,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_STARTED);
+}
+
+TEST(test_various, aaudio_state_none_open_start) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_STREAM_STATE_OPEN,
+ FunctionToCall::CALL_START,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_STARTED);
+}
+
+TEST(test_various, aaudio_state_lowlat_open_stop) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_STREAM_STATE_OPEN,
+ FunctionToCall::CALL_STOP,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_STOPPED);
+}
+
+TEST(test_various, aaudio_state_none_open_stop) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_STREAM_STATE_OPEN,
+ FunctionToCall::CALL_STOP,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_STOPPED);
+}
+
+TEST(test_various, aaudio_state_lowlat_open_pause) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_STREAM_STATE_OPEN,
+ FunctionToCall::CALL_PAUSE,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_PAUSED);
+}
+
+TEST(test_various, aaudio_state_none_open_pause) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_STREAM_STATE_OPEN,
+ FunctionToCall::CALL_PAUSE,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_PAUSED);
+}
+
+TEST(test_various, aaudio_state_lowlat_open_flush) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_STREAM_STATE_OPEN,
+ FunctionToCall::CALL_FLUSH,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_FLUSHED);
+}
+
+TEST(test_various, aaudio_state_none_open_flush) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_STREAM_STATE_OPEN,
+ FunctionToCall::CALL_FLUSH,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_FLUSHED);
+}
+
+
+// STARTED =================================================================
+TEST(test_various, aaudio_state_lowlat_started_start) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_STREAM_STATE_STARTED,
+ FunctionToCall::CALL_START,
+ AAUDIO_ERROR_INVALID_STATE,
+ AAUDIO_STREAM_STATE_STARTED);
+}
+
+TEST(test_various, aaudio_state_none_started_start) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_STREAM_STATE_STARTED,
+ FunctionToCall::CALL_START,
+ AAUDIO_ERROR_INVALID_STATE,
+ AAUDIO_STREAM_STATE_STARTED);
+}
+
+TEST(test_various, aaudio_state_lowlat_started_stop) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_STREAM_STATE_STARTED,
+ FunctionToCall::CALL_STOP,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_STOPPED);
+}
+
+TEST(test_various, aaudio_state_none_started_stop) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_STREAM_STATE_STARTED,
+ FunctionToCall::CALL_STOP,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_STOPPED);
+}
+
+TEST(test_various, aaudio_state_lowlat_started_pause) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_STREAM_STATE_STARTED,
+ FunctionToCall::CALL_PAUSE,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_PAUSED);
+}
+
+TEST(test_various, aaudio_state_none_started_pause) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_STREAM_STATE_STARTED,
+ FunctionToCall::CALL_PAUSE,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_PAUSED);
+}
+
+TEST(test_various, aaudio_state_lowlat_started_flush) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_STREAM_STATE_STARTED,
+ FunctionToCall::CALL_FLUSH,
+ AAUDIO_ERROR_INVALID_STATE,
+ AAUDIO_STREAM_STATE_STARTED);
+}
+
+TEST(test_various, aaudio_state_none_started_flush) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_STREAM_STATE_STARTED,
+ FunctionToCall::CALL_FLUSH,
+ AAUDIO_ERROR_INVALID_STATE,
+ AAUDIO_STREAM_STATE_STARTED);
+}
+
+// STOPPED =================================================================
+TEST(test_various, aaudio_state_lowlat_stopped_start) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_STREAM_STATE_STOPPED,
+ FunctionToCall::CALL_START,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_STARTED);
+}
+
+TEST(test_various, aaudio_state_none_stopped_start) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_STREAM_STATE_STOPPED,
+ FunctionToCall::CALL_START,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_STARTED);
+}
+
+TEST(test_various, aaudio_state_lowlat_stopped_stop) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_STREAM_STATE_STOPPED,
+ FunctionToCall::CALL_STOP,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_STOPPED);
+}
+
+TEST(test_various, aaudio_state_none_stopped_stop) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_STREAM_STATE_STOPPED,
+ FunctionToCall::CALL_STOP,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_STOPPED);
+}
+
+TEST(test_various, aaudio_state_lowlat_stopped_pause) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_STREAM_STATE_STOPPED,
+ FunctionToCall::CALL_PAUSE,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_PAUSED);
+}
+
+TEST(test_various, aaudio_state_none_stopped_pause) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_STREAM_STATE_STOPPED,
+ FunctionToCall::CALL_PAUSE,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_PAUSED);
+}
+
+TEST(test_various, aaudio_state_lowlat_stopped_flush) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_STREAM_STATE_STOPPED,
+ FunctionToCall::CALL_FLUSH,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_FLUSHED);
+}
+
+TEST(test_various, aaudio_state_none_stopped_flush) {
+ checkStateTransition(AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_STREAM_STATE_STOPPED,
+ FunctionToCall::CALL_FLUSH,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_FLUSHED);
+}
+
+// PAUSED =================================================================
+TEST(test_various, aaudio_state_lowlat_paused_start) {
+checkStateTransition(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_STREAM_STATE_PAUSED,
+ FunctionToCall::CALL_START,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_STARTED);
+}
+
+TEST(test_various, aaudio_state_none_paused_start) {
+checkStateTransition(AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_STREAM_STATE_PAUSED,
+ FunctionToCall::CALL_START,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_STARTED);
+}
+
+TEST(test_various, aaudio_state_lowlat_paused_stop) {
+checkStateTransition(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_STREAM_STATE_PAUSED,
+ FunctionToCall::CALL_STOP,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_STOPPED);
+}
+
+TEST(test_various, aaudio_state_none_paused_stop) {
+checkStateTransition(AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_STREAM_STATE_PAUSED,
+ FunctionToCall::CALL_STOP,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_STOPPED);
+}
+
+TEST(test_various, aaudio_state_lowlat_paused_pause) {
+checkStateTransition(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_STREAM_STATE_PAUSED,
+ FunctionToCall::CALL_PAUSE,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_PAUSED);
+}
+
+TEST(test_various, aaudio_state_none_paused_pause) {
+checkStateTransition(AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_STREAM_STATE_PAUSED,
+ FunctionToCall::CALL_PAUSE,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_PAUSED);
+}
+
+TEST(test_various, aaudio_state_lowlat_paused_flush) {
+checkStateTransition(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_STREAM_STATE_PAUSED,
+ FunctionToCall::CALL_FLUSH,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_FLUSHED);
+}
+
+TEST(test_various, aaudio_state_none_paused_flush) {
+checkStateTransition(AAUDIO_PERFORMANCE_MODE_NONE,
+ AAUDIO_STREAM_STATE_PAUSED,
+ FunctionToCall::CALL_FLUSH,
+ AAUDIO_OK,
+ AAUDIO_STREAM_STATE_FLUSHED);
+}
+
+// ==========================================================================
+TEST(test_various, aaudio_set_buffer_size) {
+
+ int32_t bufferCapacity;
+ int32_t framesPerBurst = 0;
+ int32_t actualSize = 0;
+
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+ AAudioStream *aaudioStream = nullptr;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+ // Request stream properties.
+ AAudioStreamBuilder_setDataCallback(aaudioBuilder, NoopDataCallbackProc, nullptr);
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+
+ // Create an AAudioStream using the Builder.
+ EXPECT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+
+ // This is the number of frames that are read in one chunk by a DMA controller
+ // or a DSP or a mixer.
+ framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
+ bufferCapacity = AAudioStream_getBufferCapacityInFrames(aaudioStream);
+ printf(" bufferCapacity = %d, remainder = %d\n",
+ bufferCapacity, bufferCapacity % framesPerBurst);
+
+ actualSize = AAudioStream_setBufferSizeInFrames(aaudioStream, 0);
+ EXPECT_GT(actualSize, 0);
+ EXPECT_LE(actualSize, bufferCapacity);
+
+ actualSize = AAudioStream_setBufferSizeInFrames(aaudioStream, 2 * framesPerBurst);
+ EXPECT_GT(actualSize, framesPerBurst);
+ EXPECT_LE(actualSize, bufferCapacity);
+
+ actualSize = AAudioStream_setBufferSizeInFrames(aaudioStream, bufferCapacity - 1);
+ EXPECT_GT(actualSize, framesPerBurst);
+ EXPECT_LE(actualSize, bufferCapacity);
+
+ actualSize = AAudioStream_setBufferSizeInFrames(aaudioStream, bufferCapacity);
+ EXPECT_GT(actualSize, framesPerBurst);
+ EXPECT_LE(actualSize, bufferCapacity);
+
+ actualSize = AAudioStream_setBufferSizeInFrames(aaudioStream, bufferCapacity + 1);
+ EXPECT_GT(actualSize, framesPerBurst);
+ EXPECT_LE(actualSize, bufferCapacity);
+
+ actualSize = AAudioStream_setBufferSizeInFrames(aaudioStream, 1234567);
+ EXPECT_GT(actualSize, framesPerBurst);
+ EXPECT_LE(actualSize, bufferCapacity);
+
+ actualSize = AAudioStream_setBufferSizeInFrames(aaudioStream, INT32_MAX);
+ EXPECT_GT(actualSize, framesPerBurst);
+ EXPECT_LE(actualSize, bufferCapacity);
+
+ actualSize = AAudioStream_setBufferSizeInFrames(aaudioStream, INT32_MIN);
+ EXPECT_GT(actualSize, 0);
+ EXPECT_LE(actualSize, bufferCapacity);
+
+ AAudioStream_close(aaudioStream);
+ AAudioStreamBuilder_delete(aaudioBuilder);
+}
+
+// ************************************************************
+// Test to make sure that AAUDIO_CALLBACK_RESULT_STOP works.
+
+// Callback function that counts calls.
+aaudio_data_callback_result_t CallbackOnceProc(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames
+) {
+ (void) stream;
+ (void) audioData;
+ (void) numFrames;
+
+ std::atomic<int32_t> *callbackCountPtr = (std::atomic<int32_t> *)userData;
+ (*callbackCountPtr)++;
+
+ return AAUDIO_CALLBACK_RESULT_STOP;
+}
+
+void checkCallbackOnce(aaudio_performance_mode_t perfMode) {
+
+ std::atomic<int32_t> callbackCount{0};
+
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+ AAudioStream *aaudioStream = nullptr;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+ // Request stream properties.
+ AAudioStreamBuilder_setDataCallback(aaudioBuilder, CallbackOnceProc, &callbackCount);
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, perfMode);
+
+ // Create an AAudioStream using the Builder.
+ ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+ AAudioStreamBuilder_delete(aaudioBuilder);
+
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream));
+
+ sleep(1); // Give callback a chance to run many times.
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream));
+
+ EXPECT_EQ(1, callbackCount.load()); // should stop after first call
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+}
+
+TEST(test_various, aaudio_callback_once_none) {
+ checkCallbackOnce(AAUDIO_PERFORMANCE_MODE_NONE);
+}
+
+TEST(test_various, aaudio_callback_once_lowlat) {
+ checkCallbackOnce(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+}
+
+// ************************************************************
+struct WakeUpCallbackData {
+ void wakeOther() {
+ // signal waiting test to wake up
+ {
+ std::lock_guard <std::mutex> lock(mutex);
+ finished = true;
+ }
+ conditionVariable.notify_one();
+ }
+
+ void waitForFinished() {
+ std::unique_lock <std::mutex> aLock(mutex);
+ conditionVariable.wait(aLock, [=] { return finished; });
+ }
+
+ // For signalling foreground test when callback finished
+ std::mutex mutex;
+ std::condition_variable conditionVariable;
+ bool finished = false;
+};
+
+// Test to make sure we cannot call recursively into the system from a callback.
+struct DangerousData : public WakeUpCallbackData {
+ aaudio_result_t resultStart = AAUDIO_OK;
+ aaudio_result_t resultStop = AAUDIO_OK;
+ aaudio_result_t resultPause = AAUDIO_OK;
+ aaudio_result_t resultFlush = AAUDIO_OK;
+ aaudio_result_t resultClose = AAUDIO_OK;
+};
+
+// Callback function that tries to call back into the stream.
+aaudio_data_callback_result_t DangerousDataCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames) {
+ (void) audioData;
+ (void) numFrames;
+
+ DangerousData *data = (DangerousData *)userData;
+ data->resultStart = AAudioStream_requestStart(stream);
+ data->resultStop = AAudioStream_requestStop(stream);
+ data->resultPause = AAudioStream_requestPause(stream);
+ data->resultFlush = AAudioStream_requestFlush(stream);
+ data->resultClose = AAudioStream_close(stream);
+
+ data->wakeOther();
+
+ return AAUDIO_CALLBACK_RESULT_STOP;
+}
+
+//int main() { // To fix Android Studio formatting when editing.
+void checkDangerousCallback(aaudio_performance_mode_t perfMode) {
+ DangerousData dangerousData;
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+ AAudioStream *aaudioStream = nullptr;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+ // Request stream properties.
+ AAudioStreamBuilder_setDataCallback(aaudioBuilder, DangerousDataCallbackProc, &dangerousData);
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, perfMode);
+
+ // Create an AAudioStream using the Builder.
+ ASSERT_EQ(AAUDIO_OK, AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream));
+ AAudioStreamBuilder_delete(aaudioBuilder);
+
+ ASSERT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream));
+
+ dangerousData.waitForFinished();
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream));
+
+ EXPECT_EQ(AAUDIO_ERROR_INVALID_STATE, dangerousData.resultStart);
+ EXPECT_EQ(AAUDIO_ERROR_INVALID_STATE, dangerousData.resultStop);
+ EXPECT_EQ(AAUDIO_ERROR_INVALID_STATE, dangerousData.resultPause);
+ EXPECT_EQ(AAUDIO_ERROR_INVALID_STATE, dangerousData.resultFlush);
+ EXPECT_EQ(AAUDIO_ERROR_INVALID_STATE, dangerousData.resultClose);
+
+ EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
+}
+
+//int main() { // To fix Android Studio formatting when editing.
+
+TEST(test_various, aaudio_callback_blockers_none) {
+ checkDangerousCallback(AAUDIO_PERFORMANCE_MODE_NONE);
+}
+
+TEST(test_various, aaudio_callback_blockers_lowlat) {
+ checkDangerousCallback(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+}
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 61c946c..2df37a8 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -6,7 +6,22 @@
cc_library_shared {
name: "libaudioclient",
+
+ aidl: {
+ export_aidl_headers: true,
+ local_include_dirs: ["aidl"],
+ include_dirs: [
+ "frameworks/av/media/libaudioclient/aidl",
+ ],
+ },
+
srcs: [
+ // AIDL files for audioclient interfaces
+ // The headers for these interfaces will be available to any modules that
+ // include libaudioclient, at the path "aidl/package/path/BnFoo.h"
+ "aidl/android/media/IAudioRecord.aidl",
+ ":libaudioclient_aidl",
+
"AudioEffect.cpp",
"AudioPolicy.cpp",
"AudioRecord.cpp",
@@ -17,7 +32,6 @@
"IAudioFlingerClient.cpp",
"IAudioPolicyService.cpp",
"IAudioPolicyServiceClient.cpp",
- "IAudioRecord.cpp",
"IAudioTrack.cpp",
"IEffect.cpp",
"IEffectClient.cpp",
@@ -33,10 +47,12 @@
"libdl",
"libaudioutils",
"libaudiomanager",
+ "libmedia_helper",
+ "libmediametrics",
],
export_shared_lib_headers: ["libbinder"],
- local_include_dirs: ["include/media"],
+ local_include_dirs: ["include/media", "aidl"],
header_libs: ["libaudioclient_headers"],
export_header_lib_headers: ["libaudioclient_headers"],
@@ -56,3 +72,11 @@
],
},
}
+
+// AIDL interface between libaudioclient and framework.jar
+filegroup {
+ name: "libaudioclient_aidl",
+ srcs: [
+ "aidl/android/media/IPlayer.aidl",
+ ],
+}
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index ba4acc6..f9df5b1 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -26,6 +26,8 @@
#include <utils/Log.h>
#include <private/media/AudioTrackShared.h>
#include <media/IAudioFlinger.h>
+#include <media/MediaAnalyticsItem.h>
+#include <media/TypeConverter.h>
#define WAIT_PERIOD_MS 10
@@ -65,12 +67,90 @@
// ---------------------------------------------------------------------------
+static std::string audioFormatTypeString(audio_format_t value) {
+ std::string formatType;
+ if (FormatConverter::toString(value, formatType)) {
+ return formatType;
+ }
+ char rawbuffer[16]; // room for "%d"
+ snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
+ return rawbuffer;
+}
+
+static std::string audioSourceString(audio_source_t value) {
+ std::string source;
+ if (SourceTypeConverter::toString(value, source)) {
+ return source;
+ }
+ char rawbuffer[16]; // room for "%d"
+ snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
+ return rawbuffer;
+}
+
+void AudioRecord::MediaMetrics::gather(const AudioRecord *record)
+{
+ // key for media statistics is defined in the header
+ // attrs for media statistics
+ // NB: these are matched with public Java API constants defined
+ // in frameworks/base/media/java/android/media/AudioRecord.java
+ // These must be kept synchronized with the constants there.
+ static constexpr char kAudioRecordEncoding[] = "android.media.audiorecord.encoding";
+ static constexpr char kAudioRecordSource[] = "android.media.audiorecord.source";
+ static constexpr char kAudioRecordLatency[] = "android.media.audiorecord.latency";
+ static constexpr char kAudioRecordSampleRate[] = "android.media.audiorecord.samplerate";
+ static constexpr char kAudioRecordChannelCount[] = "android.media.audiorecord.channels";
+ static constexpr char kAudioRecordCreated[] = "android.media.audiorecord.createdMs";
+ static constexpr char kAudioRecordDuration[] = "android.media.audiorecord.durationMs";
+ static constexpr char kAudioRecordCount[] = "android.media.audiorecord.n";
+ static constexpr char kAudioRecordError[] = "android.media.audiorecord.errcode";
+ static constexpr char kAudioRecordErrorFunction[] = "android.media.audiorecord.errfunc";
+
+ // constructor guarantees mAnalyticsItem is valid
+
+ mAnalyticsItem->setInt32(kAudioRecordLatency, record->mLatency);
+ mAnalyticsItem->setInt32(kAudioRecordSampleRate, record->mSampleRate);
+ mAnalyticsItem->setInt32(kAudioRecordChannelCount, record->mChannelCount);
+ mAnalyticsItem->setCString(kAudioRecordEncoding,
+ audioFormatTypeString(record->mFormat).c_str());
+ mAnalyticsItem->setCString(kAudioRecordSource,
+ audioSourceString(record->mAttributes.source).c_str());
+
+ // log total duration recording, including anything currently running [and count].
+ nsecs_t active = 0;
+ if (mStartedNs != 0) {
+ active = systemTime() - mStartedNs;
+ }
+ mAnalyticsItem->setInt64(kAudioRecordDuration, (mDurationNs + active) / (1000 * 1000));
+ mAnalyticsItem->setInt32(kAudioRecordCount, mCount);
+
+ // XXX I don't know that this adds a lot of value, long term
+ if (mCreatedNs != 0) {
+ mAnalyticsItem->setInt64(kAudioRecordCreated, mCreatedNs / (1000 * 1000));
+ }
+
+ if (mLastError != NO_ERROR) {
+ mAnalyticsItem->setInt32(kAudioRecordError, mLastError);
+ mAnalyticsItem->setCString(kAudioRecordErrorFunction, mLastErrorFunc.c_str());
+ }
+}
+
+// hand the user a snapshot of the metrics.
+status_t AudioRecord::getMetrics(MediaAnalyticsItem * &item)
+{
+ mMediaMetrics.gather(this);
+ MediaAnalyticsItem *tmp = mMediaMetrics.dup();
+ if (tmp == nullptr) {
+ return BAD_VALUE;
+ }
+ item = tmp;
+ return NO_ERROR;
+}
+
AudioRecord::AudioRecord(const String16 &opPackageName)
: mActive(false), mStatus(NO_INIT), mOpPackageName(opPackageName),
mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE), mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
- mPortId(AUDIO_PORT_HANDLE_NONE)
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE), mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
}
@@ -89,24 +169,25 @@
audio_input_flags_t flags,
uid_t uid,
pid_t pid,
- const audio_attributes_t* pAttributes)
+ const audio_attributes_t* pAttributes,
+ audio_port_handle_t selectedDeviceId)
: mActive(false),
mStatus(NO_INIT),
mOpPackageName(opPackageName),
mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
- mProxy(NULL),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
- mPortId(AUDIO_PORT_HANDLE_NONE)
+ mProxy(NULL)
{
- mStatus = set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
+ (void)set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
- uid, pid, pAttributes);
+ uid, pid, pAttributes, selectedDeviceId);
}
AudioRecord::~AudioRecord()
{
+ mMediaMetrics.gather(this);
+
if (mStatus == NO_ERROR) {
// Make sure that callback function exits in the case where
// it is looping on buffer empty condition in obtainBuffer().
@@ -148,14 +229,22 @@
audio_input_flags_t flags,
uid_t uid,
pid_t pid,
- const audio_attributes_t* pAttributes)
+ const audio_attributes_t* pAttributes,
+ audio_port_handle_t selectedDeviceId)
{
+ status_t status = NO_ERROR;
+ uint32_t channelCount;
+ pid_t callingPid;
+ pid_t myPid;
+
ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
"notificationFrames %u, sessionId %d, transferType %d, flags %#x, opPackageName %s "
"uid %d, pid %d",
inputSource, sampleRate, format, channelMask, frameCount, notificationFrames,
sessionId, transferType, flags, String8(mOpPackageName).string(), uid, pid);
+ mSelectedDeviceId = selectedDeviceId;
+
switch (transferType) {
case TRANSFER_DEFAULT:
if (cbf == NULL || threadCanCallJava) {
@@ -167,7 +256,8 @@
case TRANSFER_CALLBACK:
if (cbf == NULL) {
ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL");
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
break;
case TRANSFER_OBTAIN:
@@ -175,14 +265,16 @@
break;
default:
ALOGE("Invalid transfer type %d", transferType);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mTransfer = transferType;
// invariant that mAudioRecord != 0 is true only after set() returns successfully
if (mAudioRecord != 0) {
ALOGE("Track already in use");
- return INVALID_OPERATION;
+ status = INVALID_OPERATION;
+ goto exit;
}
if (pAttributes == NULL) {
@@ -206,16 +298,18 @@
// AudioFlinger capture only supports linear PCM
if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
ALOGE("Format %#x is not linear pcm", format);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mFormat = format;
if (!audio_is_input_channel(channelMask)) {
ALOGE("Invalid channel mask %#x", channelMask);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mChannelMask = channelMask;
- uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
+ channelCount = audio_channel_count_from_in_mask(channelMask);
mChannelCount = channelCount;
if (audio_is_linear_pcm(format)) {
@@ -224,28 +318,24 @@
mFrameSize = sizeof(uint8_t);
}
- // mFrameCount is initialized in openRecord_l
+ // mFrameCount is initialized in createRecord_l
mReqFrameCount = frameCount;
mNotificationFramesReq = notificationFrames;
- // mNotificationFramesAct is initialized in openRecord_l
+ // mNotificationFramesAct is initialized in createRecord_l
- if (sessionId == AUDIO_SESSION_ALLOCATE) {
- mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
- } else {
- mSessionId = sessionId;
- }
+ mSessionId = sessionId;
ALOGV("set(): mSessionId %d", mSessionId);
- int callingpid = IPCThreadState::self()->getCallingPid();
- int mypid = getpid();
- if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
+ callingPid = IPCThreadState::self()->getCallingPid();
+ myPid = getpid();
+ if (uid == AUDIO_UID_INVALID || (callingPid != myPid)) {
mClientUid = IPCThreadState::self()->getCallingUid();
} else {
mClientUid = uid;
}
- if (pid == -1 || (callingpid != mypid)) {
- mClientPid = callingpid;
+ if (pid == -1 || (callingPid != myPid)) {
+ mClientPid = callingPid;
} else {
mClientPid = pid;
}
@@ -260,7 +350,7 @@
}
// create the IAudioRecord
- status_t status = openRecord_l(0 /*epoch*/, mOpPackageName);
+ status = createRecord_l(0 /*epoch*/, mOpPackageName);
if (status != NO_ERROR) {
if (mAudioRecordThread != 0) {
@@ -268,10 +358,9 @@
mAudioRecordThread->requestExitAndWait();
mAudioRecordThread.clear();
}
- return status;
+ goto exit;
}
- mStatus = NO_ERROR;
mUserData = user;
// TODO: add audio hardware input latency here
mLatency = (1000LL * mFrameCount) / mSampleRate;
@@ -286,7 +375,12 @@
mFramesRead = 0;
mFramesReadServerOffset = 0;
- return NO_ERROR;
+exit:
+ mStatus = status;
+ if (status != NO_ERROR) {
+ mMediaMetrics.markError(status, __FUNCTION__);
+ }
+ return status;
}
// -------------------------------------------------------------------------
@@ -323,7 +417,7 @@
status_t status = NO_ERROR;
if (!(flags & CBLK_INVALID)) {
- status = mAudioRecord->start(event, triggerSession);
+ status = mAudioRecord->start(event, triggerSession).transactionError();
if (status == DEAD_OBJECT) {
flags |= CBLK_INVALID;
}
@@ -344,8 +438,14 @@
get_sched_policy(0, &mPreviousSchedulingGroup);
androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
}
+
+ // we've successfully started, log that time
+ mMediaMetrics.logStart(systemTime());
}
+ if (status != NO_ERROR) {
+ mMediaMetrics.markError(status, __FUNCTION__);
+ }
return status;
}
@@ -370,6 +470,9 @@
setpriority(PRIO_PROCESS, 0, mPreviousPriority);
set_sched_policy(0, mPreviousSchedulingGroup);
}
+
+ // we've successfully started, log that time
+ mMediaMetrics.logStop(systemTime());
}
bool AudioRecord::stopped() const
@@ -489,6 +592,7 @@
mAudioRecord->stop();
}
android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
+ mProxy->interrupt();
}
}
return NO_ERROR;
@@ -521,6 +625,27 @@
return mRoutedDeviceId;
}
+status_t AudioRecord::dump(int fd, const Vector<String16>& args __unused) const
+{
+ String8 result;
+
+ result.append(" AudioRecord::dump\n");
+ result.appendFormat(" status(%d), active(%d), session Id(%d)\n",
+ mStatus, mActive, mSessionId);
+ result.appendFormat(" flags(%#x), req. flags(%#x), audio source(%d)\n",
+ mFlags, mOrigFlags, mAttributes.source);
+ result.appendFormat(" format(%#x), channel mask(%#x), channel count(%u), sample rate(%u)\n",
+ mFormat, mChannelMask, mChannelCount, mSampleRate);
+ result.appendFormat(" frame count(%zu), req. frame count(%zu)\n",
+ mFrameCount, mReqFrameCount);
+ result.appendFormat(" notif. frame count(%u), req. notif. frame count(%u)\n",
+ mNotificationFramesAct, mNotificationFramesReq);
+ result.appendFormat(" input(%d), latency(%u), selected device Id(%d), routed device Id(%d)\n",
+ mInput, mLatency, mSelectedDeviceId, mRoutedDeviceId);
+ ::write(fd, result.string(), result.size());
+ return NO_ERROR;
+}
+
// -------------------------------------------------------------------------
// TODO Move this macro to a common header file for enum to string conversion in audio framework.
#define MEDIA_CASE_ENUM(name) case name: return #name
@@ -536,70 +661,29 @@
}
// must be called with mLock held
-status_t AudioRecord::openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName)
+status_t AudioRecord::createRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName)
{
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
+ IAudioFlinger::CreateRecordInput input;
+ IAudioFlinger::CreateRecordOutput output;
+ audio_session_t originalSessionId;
+ sp<media::IAudioRecord> record;
+ void *iMemPointer;
+ audio_track_cblk_t* cblk;
+ status_t status;
+
if (audioFlinger == 0) {
ALOGE("Could not get audioflinger");
- return NO_INIT;
+ status = NO_INIT;
+ goto exit;
}
- audio_io_handle_t input;
-
// mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
// After fast request is denied, we will request again if IAudioRecord is re-created.
- status_t status;
-
- // Not a conventional loop, but a retry loop for at most two iterations total.
- // Try first maybe with FAST flag then try again without FAST flag if that fails.
- // Exits loop normally via a return at the bottom, or with error via a break.
- // The sp<> references will be dropped when re-entering scope.
- // The lack of indentation is deliberate, to reduce code churn and ease merges.
- for (;;) {
- audio_config_base_t config = {
- .sample_rate = mSampleRate,
- .channel_mask = mChannelMask,
- .format = mFormat
- };
- mRoutedDeviceId = mSelectedDeviceId;
- status = AudioSystem::getInputForAttr(&mAttributes, &input,
- mSessionId,
- // FIXME compare to AudioTrack
- mClientPid,
- mClientUid,
- &config,
- mFlags, &mRoutedDeviceId, &mPortId);
-
- if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE) {
- ALOGE("Could not get audio input for session %d, record source %d, sample rate %u, "
- "format %#x, channel mask %#x, flags %#x",
- mSessionId, mAttributes.source, mSampleRate, mFormat, mChannelMask, mFlags);
- return BAD_VALUE;
- }
-
// Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
// we must release it ourselves if anything goes wrong.
-#if 0
- size_t afFrameCount;
- status = AudioSystem::getFrameCount(input, &afFrameCount);
- if (status != NO_ERROR) {
- ALOGE("getFrameCount(input=%d) status %d", input, status);
- break;
- }
-#endif
-
- uint32_t afSampleRate;
- status = AudioSystem::getSamplingRate(input, &afSampleRate);
- if (status != NO_ERROR) {
- ALOGE("getSamplingRate(input=%d) status %d", input, status);
- break;
- }
- if (mSampleRate == 0) {
- mSampleRate = afSampleRate;
- }
-
// Client can only express a preference for FAST. Server will perform additional tests.
if (mFlags & AUDIO_INPUT_FLAG_FAST) {
bool useCaseAllowed =
@@ -618,66 +702,41 @@
if (!useCaseAllowed) {
ALOGW("AUDIO_INPUT_FLAG_FAST denied, incompatible transfer = %s",
convertTransferToText(mTransfer));
- }
-
- // sample rates must also match
- bool sampleRateAllowed = mSampleRate == afSampleRate;
- if (!sampleRateAllowed) {
- ALOGW("AUDIO_INPUT_FLAG_FAST denied, rates do not match %u Hz, require %u Hz",
- mSampleRate, afSampleRate);
- }
-
- bool fastAllowed = useCaseAllowed && sampleRateAllowed;
- if (!fastAllowed) {
mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
AUDIO_INPUT_FLAG_RAW));
- AudioSystem::releaseInput(input, mSessionId);
- continue; // retry
}
}
- // The notification frame count is the period between callbacks, as suggested by the client
- // but moderated by the server. For record, the calculations are done entirely on server side.
- size_t notificationFrames = mNotificationFramesReq;
- size_t frameCount = mReqFrameCount;
-
- audio_input_flags_t flags = mFlags;
-
- pid_t tid = -1;
+ input.attr = mAttributes;
+ input.config.sample_rate = mSampleRate;
+ input.config.channel_mask = mChannelMask;
+ input.config.format = mFormat;
+ input.clientInfo.clientUid = mClientUid;
+ input.clientInfo.clientPid = mClientPid;
+ input.clientInfo.clientTid = -1;
if (mFlags & AUDIO_INPUT_FLAG_FAST) {
if (mAudioRecordThread != 0) {
- tid = mAudioRecordThread->getTid();
+ input.clientInfo.clientTid = mAudioRecordThread->getTid();
}
}
+ input.opPackageName = opPackageName;
- size_t temp = frameCount; // temp may be replaced by a revised value of frameCount,
- // but we will still need the original value also
- audio_session_t originalSessionId = mSessionId;
+ input.flags = mFlags;
+ // The notification frame count is the period between callbacks, as suggested by the client
+ // but moderated by the server. For record, the calculations are done entirely on server side.
+ input.frameCount = mReqFrameCount;
+ input.notificationFrameCount = mNotificationFramesReq;
+ input.selectedDeviceId = mSelectedDeviceId;
+ input.sessionId = mSessionId;
+ originalSessionId = mSessionId;
- sp<IMemory> iMem; // for cblk
- sp<IMemory> bufferMem;
- sp<IAudioRecord> record = audioFlinger->openRecord(input,
- mSampleRate,
- mFormat,
- mChannelMask,
- opPackageName,
- &temp,
- &flags,
- mClientPid,
- tid,
- mClientUid,
- &mSessionId,
- ¬ificationFrames,
- iMem,
- bufferMem,
- &status,
- mPortId);
- ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
- "session ID changed from %d to %d", originalSessionId, mSessionId);
+ record = audioFlinger->createRecord(input,
+ output,
+ &status);
if (status != NO_ERROR) {
ALOGE("AudioFlinger could not create record track, status: %d", status);
- break;
+ goto exit;
}
ALOG_ASSERT(record != 0);
@@ -685,41 +744,41 @@
// so we are no longer responsible for releasing it.
mAwaitBoost = false;
- if (mFlags & AUDIO_INPUT_FLAG_FAST) {
- if (flags & AUDIO_INPUT_FLAG_FAST) {
- ALOGI("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu -> %zu", frameCount, temp);
- mAwaitBoost = true;
- } else {
- ALOGW("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", frameCount, temp);
- mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
- AUDIO_INPUT_FLAG_RAW));
- continue; // retry
- }
+ if (output.flags & AUDIO_INPUT_FLAG_FAST) {
+ ALOGI("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu -> %zu",
+ mReqFrameCount, output.frameCount);
+ mAwaitBoost = true;
}
- mFlags = flags;
+ mFlags = output.flags;
+ mRoutedDeviceId = output.selectedDeviceId;
+ mSessionId = output.sessionId;
+ mSampleRate = output.sampleRate;
- if (iMem == 0) {
+ if (output.cblk == 0) {
ALOGE("Could not get control block");
- return NO_INIT;
+ status = NO_INIT;
+ goto exit;
}
- void *iMemPointer = iMem->pointer();
+ iMemPointer = output.cblk ->pointer();
if (iMemPointer == NULL) {
ALOGE("Could not get control block pointer");
- return NO_INIT;
+ status = NO_INIT;
+ goto exit;
}
- audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
+ cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
// Starting address of buffers in shared memory.
// The buffers are either immediately after the control block,
// or in a separate area at discretion of server.
void *buffers;
- if (bufferMem == 0) {
+ if (output.buffers == 0) {
buffers = cblk + 1;
} else {
- buffers = bufferMem->pointer();
+ buffers = output.buffers->pointer();
if (buffers == NULL) {
ALOGE("Could not get buffer pointer");
- return NO_INIT;
+ status = NO_INIT;
+ goto exit;
}
}
@@ -729,43 +788,42 @@
mDeathNotifier.clear();
}
mAudioRecord = record;
- mCblkMemory = iMem;
- mBufferMemory = bufferMem;
+ mCblkMemory = output.cblk;
+ mBufferMemory = output.buffers;
IPCThreadState::self()->flushCommands();
mCblk = cblk;
- // note that temp is the (possibly revised) value of frameCount
- if (temp < frameCount || (frameCount == 0 && temp == 0)) {
- ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
+ // note that output.frameCount is the (possibly revised) value of mReqFrameCount
+ if (output.frameCount < mReqFrameCount || (mReqFrameCount == 0 && output.frameCount == 0)) {
+ ALOGW("Requested frameCount %zu but received frameCount %zu",
+ mReqFrameCount, output.frameCount);
}
- frameCount = temp;
// Make sure that application is notified with sufficient margin before overrun.
// The computation is done on server side.
- if (mNotificationFramesReq > 0 && notificationFrames != mNotificationFramesReq) {
+ if (mNotificationFramesReq > 0 && output.notificationFrameCount != mNotificationFramesReq) {
ALOGW("Server adjusted notificationFrames from %u to %zu for frameCount %zu",
- mNotificationFramesReq, notificationFrames, frameCount);
+ mNotificationFramesReq, output.notificationFrameCount, output.frameCount);
}
- mNotificationFramesAct = (uint32_t) notificationFrames;
-
+ mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
//mInput != input includes the case where mInput == AUDIO_IO_HANDLE_NONE for first creation
- if (mDeviceCallback != 0 && mInput != input) {
+ if (mDeviceCallback != 0 && mInput != output.inputId) {
if (mInput != AUDIO_IO_HANDLE_NONE) {
AudioSystem::removeAudioDeviceCallback(this, mInput);
}
- AudioSystem::addAudioDeviceCallback(this, input);
+ AudioSystem::addAudioDeviceCallback(this, output.inputId);
}
// We retain a copy of the I/O handle, but don't own the reference
- mInput = input;
+ mInput = output.inputId;
mRefreshRemaining = true;
- mFrameCount = frameCount;
+ mFrameCount = output.frameCount;
// If IAudioRecord is re-created, don't let the requested frameCount
// decrease. This can confuse clients that cache frameCount().
- if (frameCount > mReqFrameCount) {
- mReqFrameCount = frameCount;
+ if (mFrameCount > mReqFrameCount) {
+ mReqFrameCount = mFrameCount;
}
// update proxy
@@ -776,17 +834,9 @@
mDeathNotifier = new DeathNotifier(this);
IInterface::asBinder(mAudioRecord)->linkToDeath(mDeathNotifier, this);
- return NO_ERROR;
-
- // End of retry loop.
- // The lack of indentation is deliberate, to reduce code churn and ease merges.
- }
-
-// Arrive here on error, via a break
- AudioSystem::releaseInput(input, mSessionId);
- if (status == NO_ERROR) {
- status = NO_INIT;
- }
+exit:
+ mStatus = status;
+ // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
return status;
}
@@ -1216,22 +1266,43 @@
ALOGW("dead IAudioRecord, creating a new one from %s()", from);
++mSequence;
+ const int INITIAL_RETRIES = 3;
+ int retries = INITIAL_RETRIES;
+retry:
+ if (retries < INITIAL_RETRIES) {
+ // refresh the audio configuration cache in this process to make sure we get new
+ // input parameters and new IAudioRecord in createRecord_l()
+ AudioSystem::clearAudioConfigCache();
+ }
mFlags = mOrigFlags;
- // if the new IAudioRecord is created, openRecord_l() will modify the
+ // if the new IAudioRecord is created, createRecord_l() will modify the
// following member variables: mAudioRecord, mCblkMemory, mCblk, mBufferMemory.
// It will also delete the strong references on previous IAudioRecord and IMemory
Modulo<uint32_t> position(mProxy->getPosition());
mNewPosition = position + mUpdatePeriod;
- status_t result = openRecord_l(position, mOpPackageName);
- if (result == NO_ERROR) {
+ status_t result = createRecord_l(position, mOpPackageName);
+
+ if (result != NO_ERROR) {
+ ALOGW("%s(): createRecord_l failed, do not retry", __func__);
+ retries = 0;
+ } else {
if (mActive) {
// callback thread or sync event hasn't changed
// FIXME this fails if we have a new AudioFlinger instance
- result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, AUDIO_SESSION_NONE);
+ result = mAudioRecord->start(
+ AudioSystem::SYNC_EVENT_SAME, AUDIO_SESSION_NONE).transactionError();
}
mFramesReadServerOffset = mFramesRead; // server resets to zero so we need an offset.
}
+
+ if (result != NO_ERROR) {
+ ALOGW("%s() failed status %d, retries %d", __func__, result, retries);
+ if (--retries > 0) {
+ goto retry;
+ }
+ }
+
if (result != NO_ERROR) {
ALOGW("restoreRecord_l() failed status %d", result);
mActive = false;
@@ -1303,6 +1374,14 @@
}
}
+// -------------------------------------------------------------------------
+
+status_t AudioRecord::getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones)
+{
+ AutoMutex lock(mLock);
+ return mAudioRecord->getActiveMicrophones(activeMicrophones).transactionError();
+}
+
// =========================================================================
void AudioRecord::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index cdc75ac..c072901 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -20,6 +20,8 @@
#include <utils/Log.h>
#include <binder/IServiceManager.h>
#include <binder/ProcessState.h>
+#include <binder/IPCThreadState.h>
+#include <media/AudioResamplerPublic.h>
#include <media/AudioSystem.h>
#include <media/IAudioFlinger.h>
#include <media/IAudioPolicyService.h>
@@ -38,8 +40,7 @@
sp<AudioSystem::AudioFlingerClient> AudioSystem::gAudioFlingerClient;
audio_error_callback AudioSystem::gAudioErrorCallback = NULL;
dynamic_policy_callback AudioSystem::gDynPolicyCallback = NULL;
-record_config_callback AudioSystem::gRecordConfigCallback = NULL;
-
+record_config_callback AudioSystem::gRecordConfigCallback = NULL;
// establish binder interface to AudioFlinger service
const sp<IAudioFlinger> AudioSystem::get_audio_flinger()
@@ -75,7 +76,9 @@
af = gAudioFlinger;
}
if (afc != 0) {
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
af->registerClient(afc);
+ IPCThreadState::self()->restoreCallingIdentity(token);
}
return af;
}
@@ -253,6 +256,31 @@
return volume ? 100 - int(dBConvertInverse * log(volume) + 0.5) : 0;
}
+/* static */ size_t AudioSystem::calculateMinFrameCount(
+ uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
+ uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
+{
+ // Ensure that buffer depth covers at least audio hardware latency
+ uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
+ if (minBufCount < 2) {
+ minBufCount = 2;
+ }
+#if 0
+ // The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
+ // but keeping the code here to make it easier to add later.
+ if (minBufCount < notificationsPerBufferReq) {
+ minBufCount = notificationsPerBufferReq;
+ }
+#endif
+ ALOGV("calculateMinFrameCount afLatency %u afFrameCount %u afSampleRate %u "
+ "sampleRate %u speed %f minBufCount: %u" /*" notificationsPerBufferReq %u"*/,
+ afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
+ /*, notificationsPerBufferReq*/);
+ return minBufCount * sourceFramesNeededWithTimestretch(
+ sampleRate, afFrameCount, afSampleRate, speed);
+}
+
+
status_t AudioSystem::getOutputSamplingRate(uint32_t* samplingRate, audio_stream_type_t streamType)
{
audio_io_handle_t output;
@@ -605,7 +633,7 @@
|| (channelMask != mInChannelMask)) {
size_t inBuffSize = af->getInputBufferSize(sampleRate, format, channelMask);
if (inBuffSize == 0) {
- ALOGE("AudioSystem::getInputBufferSize failed sampleRate %d format %#x channelMask %x",
+ ALOGE("AudioSystem::getInputBufferSize failed sampleRate %d format %#x channelMask %#x",
sampleRate, format, channelMask);
return BAD_VALUE;
}
@@ -742,7 +770,10 @@
ap = gAudioPolicyService;
}
if (apc != 0) {
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
ap->registerClient(apc);
+ ap->setAudioPortCallbacksEnabled(apc->isAudioPortCbEnabled());
+ IPCThreadState::self()->restoreCallingIdentity(token);
}
return ap;
@@ -822,22 +853,18 @@
}
-audio_io_handle_t AudioSystem::getOutput(audio_stream_type_t stream,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo)
+audio_io_handle_t AudioSystem::getOutput(audio_stream_type_t stream)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return 0;
- return aps->getOutput(stream, samplingRate, format, channelMask, flags, offloadInfo);
+ return aps->getOutput(stream);
}
status_t AudioSystem::getOutputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
+ pid_t pid,
uid_t uid,
const audio_config_t *config,
audio_output_flags_t flags,
@@ -846,7 +873,7 @@
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return NO_INIT;
- return aps->getOutputForAttr(attr, output, session, stream, uid,
+ return aps->getOutputForAttr(attr, output, session, stream, pid, uid,
config,
flags, selectedDeviceId, portId);
}
@@ -883,6 +910,7 @@
audio_session_t session,
pid_t pid,
uid_t uid,
+ const String16& opPackageName,
const audio_config_base_t *config,
audio_input_flags_t flags,
audio_port_handle_t *selectedDeviceId,
@@ -891,32 +919,29 @@
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return NO_INIT;
return aps->getInputForAttr(
- attr, input, session, pid, uid,
+ attr, input, session, pid, uid, opPackageName,
config, flags, selectedDeviceId, portId);
}
-status_t AudioSystem::startInput(audio_io_handle_t input,
- audio_session_t session)
+status_t AudioSystem::startInput(audio_port_handle_t portId, bool *silenced)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- return aps->startInput(input, session);
+ return aps->startInput(portId, silenced);
}
-status_t AudioSystem::stopInput(audio_io_handle_t input,
- audio_session_t session)
+status_t AudioSystem::stopInput(audio_port_handle_t portId)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- return aps->stopInput(input, session);
+ return aps->stopInput(portId);
}
-void AudioSystem::releaseInput(audio_io_handle_t input,
- audio_session_t session)
+void AudioSystem::releaseInput(audio_port_handle_t portId)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return;
- aps->releaseInput(input, session);
+ aps->releaseInput(portId);
}
status_t AudioSystem::initStreamVolume(audio_stream_type_t stream,
@@ -1035,11 +1060,11 @@
return af->getPrimaryOutputFrameCount();
}
-status_t AudioSystem::setLowRamDevice(bool isLowRamDevice)
+status_t AudioSystem::setLowRamDevice(bool isLowRamDevice, int64_t totalMemory)
{
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
- return af->setLowRamDevice(isLowRamDevice);
+ return af->setLowRamDevice(isLowRamDevice, totalMemory);
}
void AudioSystem::clearAudioConfigCache()
@@ -1254,6 +1279,31 @@
return aps->getStreamVolumeDB(stream, index, device);
}
+status_t AudioSystem::getMicrophones(std::vector<media::MicrophoneInfo> *microphones)
+{
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
+ return af->getMicrophones(microphones);
+}
+
+status_t AudioSystem::getSurroundFormats(unsigned int *numSurroundFormats,
+ audio_format_t *surroundFormats,
+ bool *surroundFormatsEnabled,
+ bool reported)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->getSurroundFormats(
+ numSurroundFormats, surroundFormats, surroundFormatsEnabled, reported);
+}
+
+status_t AudioSystem::setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->setSurroundFormatEnabled(audioFormat, enabled);
+}
+
// ---------------------------------------------------------------------------
int AudioSystem::AudioPolicyServiceClient::addAudioPortCallback(
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index c6622cd..ab9efe8 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -29,8 +29,11 @@
#include <utils/Log.h>
#include <private/media/AudioTrackShared.h>
#include <media/IAudioFlinger.h>
+#include <media/AudioParameter.h>
#include <media/AudioPolicyHelper.h>
#include <media/AudioResamplerPublic.h>
+#include <media/MediaAnalyticsItem.h>
+#include <media/TypeConverter.h>
#define WAIT_PERIOD_MS 10
#define WAIT_STREAM_END_TIMEOUT_SEC 120
@@ -39,6 +42,8 @@
namespace android {
// ---------------------------------------------------------------------------
+using media::VolumeShaper;
+
// TODO: Move to a separate .h
template <typename T>
@@ -51,8 +56,6 @@
return x > y ? x : y;
}
-static const int32_t NANOS_PER_SECOND = 1000000000;
-
static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
{
return ((double)frames * 1000000000) / ((double)sampleRate * speed);
@@ -97,32 +100,6 @@
return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
}
-// Must match similar computation in createTrack_l in Threads.cpp.
-// TODO: Move to a common library
-static size_t calculateMinFrameCount(
- uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
- uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
-{
- // Ensure that buffer depth covers at least audio hardware latency
- uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
- if (minBufCount < 2) {
- minBufCount = 2;
- }
-#if 0
- // The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
- // but keeping the code here to make it easier to add later.
- if (minBufCount < notificationsPerBufferReq) {
- minBufCount = notificationsPerBufferReq;
- }
-#endif
- ALOGV("calculateMinFrameCount afLatency %u afFrameCount %u afSampleRate %u "
- "sampleRate %u speed %f minBufCount: %u" /*" notificationsPerBufferReq %u"*/,
- afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
- /*, notificationsPerBufferReq*/);
- return minBufCount * sourceFramesNeededWithTimestretch(
- sampleRate, afFrameCount, afSampleRate, speed);
-}
-
// static
status_t AudioTrack::getMinFrameCount(
size_t* frameCount,
@@ -163,8 +140,8 @@
// When called from createTrack, speed is 1.0f (normal speed).
// This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
- *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f
- /*, 0 notificationsPerBufferReq*/);
+ *frameCount = AudioSystem::calculateMinFrameCount(afLatency, afFrameCount, afSampleRate,
+ sampleRate, 1.0f /*, 0 notificationsPerBufferReq*/);
// The formula above should always produce a non-zero value under normal circumstances:
// AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
@@ -181,6 +158,88 @@
// ---------------------------------------------------------------------------
+static std::string audioContentTypeString(audio_content_type_t value) {
+ std::string contentType;
+ if (AudioContentTypeConverter::toString(value, contentType)) {
+ return contentType;
+ }
+ char rawbuffer[16]; // room for "%d"
+ snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
+ return rawbuffer;
+}
+
+static std::string audioUsageString(audio_usage_t value) {
+ std::string usage;
+ if (UsageTypeConverter::toString(value, usage)) {
+ return usage;
+ }
+ char rawbuffer[16]; // room for "%d"
+ snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
+ return rawbuffer;
+}
+
+void AudioTrack::MediaMetrics::gather(const AudioTrack *track)
+{
+
+ // key for media statistics is defined in the header
+ // attrs for media statistics
+ // NB: these are matched with public Java API constants defined
+ // in frameworks/base/media/java/android/media/AudioTrack.java
+ // These must be kept synchronized with the constants there.
+ static constexpr char kAudioTrackStreamType[] = "android.media.audiotrack.streamtype";
+ static constexpr char kAudioTrackContentType[] = "android.media.audiotrack.type";
+ static constexpr char kAudioTrackUsage[] = "android.media.audiotrack.usage";
+ static constexpr char kAudioTrackSampleRate[] = "android.media.audiotrack.samplerate";
+ static constexpr char kAudioTrackChannelMask[] = "android.media.audiotrack.channelmask";
+
+ // NB: These are not yet exposed as public Java API constants.
+ static constexpr char kAudioTrackUnderrunFrames[] = "android.media.audiotrack.underrunframes";
+ static constexpr char kAudioTrackStartupGlitch[] = "android.media.audiotrack.glitch.startup";
+
+ // only if we're in a good state...
+ // XXX: shall we gather alternative info if failing?
+ const status_t lstatus = track->initCheck();
+ if (lstatus != NO_ERROR) {
+ ALOGD("no metrics gathered, track status=%d", (int) lstatus);
+ return;
+ }
+
+ // constructor guarantees mAnalyticsItem is valid
+
+ const int32_t underrunFrames = track->getUnderrunFrames();
+ if (underrunFrames != 0) {
+ mAnalyticsItem->setInt32(kAudioTrackUnderrunFrames, underrunFrames);
+ }
+
+ if (track->mTimestampStartupGlitchReported) {
+ mAnalyticsItem->setInt32(kAudioTrackStartupGlitch, 1);
+ }
+
+ if (track->mStreamType != -1) {
+ // deprecated, but this will tell us who still uses it.
+ mAnalyticsItem->setInt32(kAudioTrackStreamType, track->mStreamType);
+ }
+ // XXX: consider including from mAttributes: source type
+ mAnalyticsItem->setCString(kAudioTrackContentType,
+ audioContentTypeString(track->mAttributes.content_type).c_str());
+ mAnalyticsItem->setCString(kAudioTrackUsage,
+ audioUsageString(track->mAttributes.usage).c_str());
+ mAnalyticsItem->setInt32(kAudioTrackSampleRate, track->mSampleRate);
+ mAnalyticsItem->setInt64(kAudioTrackChannelMask, track->mChannelMask);
+}
+
+// hand the user a snapshot of the metrics.
+status_t AudioTrack::getMetrics(MediaAnalyticsItem * &item)
+{
+ mMediaMetrics.gather(this);
+ MediaAnalyticsItem *tmp = mMediaMetrics.dup();
+ if (tmp == nullptr) {
+ return BAD_VALUE;
+ }
+ item = tmp;
+ return NO_ERROR;
+}
+
AudioTrack::AudioTrack()
: mStatus(NO_INIT),
mState(STATE_STOPPED),
@@ -188,8 +247,7 @@
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
- mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
- mPortId(AUDIO_PORT_HANDLE_NONE)
+ mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
mAttributes.usage = AUDIO_USAGE_UNKNOWN;
@@ -214,19 +272,18 @@
pid_t pid,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
- float maxRequiredSpeed)
+ float maxRequiredSpeed,
+ audio_port_handle_t selectedDeviceId)
: mStatus(NO_INIT),
mState(STATE_STOPPED),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
- mPausedPosition(0),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
- mPortId(AUDIO_PORT_HANDLE_NONE)
+ mPausedPosition(0)
{
- mStatus = set(streamType, sampleRate, format, channelMask,
+ (void)set(streamType, sampleRate, format, channelMask,
frameCount, flags, cbf, user, notificationFrames,
0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
- offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
+ offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
}
AudioTrack::AudioTrack(
@@ -252,10 +309,9 @@
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
- mPortId(AUDIO_PORT_HANDLE_NONE)
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
- mStatus = set(streamType, sampleRate, format, channelMask,
+ (void)set(streamType, sampleRate, format, channelMask,
0 /*frameCount*/, flags, cbf, user, notificationFrames,
sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
@@ -263,6 +319,9 @@
AudioTrack::~AudioTrack()
{
+ // pull together the numbers, before we clean up our structures
+ mMediaMetrics.gather(this);
+
if (mStatus == NO_ERROR) {
// Make sure that callback function exits in the case where
// it is looping on buffer full condition in obtainBuffer().
@@ -308,14 +367,22 @@
pid_t pid,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
- float maxRequiredSpeed)
+ float maxRequiredSpeed,
+ audio_port_handle_t selectedDeviceId)
{
+ status_t status;
+ uint32_t channelCount;
+ pid_t callingPid;
+ pid_t myPid;
+
ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
"flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
sessionId, transferType, uid, pid);
mThreadCanCallJava = threadCanCallJava;
+ mSelectedDeviceId = selectedDeviceId;
+ mSessionId = sessionId;
switch (transferType) {
case TRANSFER_DEFAULT:
@@ -330,25 +397,29 @@
case TRANSFER_CALLBACK:
if (cbf == NULL || sharedBuffer != 0) {
ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
break;
case TRANSFER_OBTAIN:
case TRANSFER_SYNC:
if (sharedBuffer != 0) {
ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
break;
case TRANSFER_SHARED:
if (sharedBuffer == 0) {
ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
break;
default:
ALOGE("Invalid transfer type %d", transferType);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mSharedBuffer = sharedBuffer;
mTransfer = transferType;
@@ -362,7 +433,8 @@
// invariant that mAudioTrack != 0 is true only after set() returns successfully
if (mAudioTrack != 0) {
ALOGE("Track already in use");
- return INVALID_OPERATION;
+ status = INVALID_OPERATION;
+ goto exit;
}
// handle default values first.
@@ -372,7 +444,8 @@
if (pAttributes == NULL) {
if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
ALOGE("Invalid stream type %d", streamType);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mStreamType = streamType;
@@ -404,16 +477,18 @@
// validate parameters
if (!audio_is_valid_format(format)) {
ALOGE("Invalid format %#x", format);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mFormat = format;
if (!audio_is_output_channel(channelMask)) {
ALOGE("Invalid channel mask %#x", channelMask);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mChannelMask = channelMask;
- uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
+ channelCount = audio_channel_count_from_out_mask(channelMask);
mChannelCount = channelCount;
// force direct flag if format is not linear PCM
@@ -448,7 +523,8 @@
// sampling rate must be specified for direct outputs
if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mSampleRate = sampleRate;
mOriginalSampleRate = sampleRate;
@@ -479,12 +555,14 @@
if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
ALOGE("notificationFrames=%d not permitted for non-fast track",
notificationFrames);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
if (frameCount > 0) {
ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
notificationFrames, frameCount);
- return BAD_VALUE;
+ status = BAD_VALUE;
+ goto exit;
}
mNotificationFramesReq = 0;
const uint32_t minNotificationsPerBuffer = 1;
@@ -496,20 +574,15 @@
notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
}
mNotificationFramesAct = 0;
- if (sessionId == AUDIO_SESSION_ALLOCATE) {
- mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
- } else {
- mSessionId = sessionId;
- }
- int callingpid = IPCThreadState::self()->getCallingPid();
- int mypid = getpid();
- if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
+ callingPid = IPCThreadState::self()->getCallingPid();
+ myPid = getpid();
+ if (uid == AUDIO_UID_INVALID || (callingPid != myPid)) {
mClientUid = IPCThreadState::self()->getCallingUid();
} else {
mClientUid = uid;
}
- if (pid == -1 || (callingpid != mypid)) {
- mClientPid = callingpid;
+ if (pid == -1 || (callingPid != myPid)) {
+ mClientPid = callingPid;
} else {
mClientPid = pid;
}
@@ -524,7 +597,7 @@
}
// create the IAudioTrack
- status_t status = createTrack_l();
+ status = createTrack_l();
if (status != NO_ERROR) {
if (mAudioTrackThread != 0) {
@@ -532,10 +605,9 @@
mAudioTrackThread->requestExitAndWait();
mAudioTrackThread.clear();
}
- return status;
+ goto exit;
}
- mStatus = NO_ERROR;
mUserData = user;
mLoopCount = 0;
mLoopStart = 0;
@@ -562,8 +634,11 @@
mFramesWritten = 0;
mFramesWrittenServerOffset = 0;
mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
- mVolumeHandler = new VolumeHandler();
- return NO_ERROR;
+ mVolumeHandler = new media::VolumeHandler();
+
+exit:
+ mStatus = status;
+ return status;
}
// -------------------------------------------------------------------------
@@ -695,6 +770,7 @@
mReleased = 0;
}
+ mProxy->stop(); // notify server not to read beyond current client position until start().
mProxy->interrupt();
mAudioTrack->stop();
@@ -730,7 +806,7 @@
return;
}
AutoMutex lock(mLock);
- if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
+ if (mState == STATE_ACTIVE) {
return;
}
flush_l();
@@ -1219,6 +1295,7 @@
mSelectedDeviceId = deviceId;
if (mStatus == NO_ERROR) {
android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
+ mProxy->interrupt();
}
}
return NO_ERROR;
@@ -1306,76 +1383,19 @@
status_t AudioTrack::createTrack_l()
{
+ status_t status;
+ bool callbackAdded = false;
+
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
if (audioFlinger == 0) {
ALOGE("Could not get audioflinger");
- return NO_INIT;
+ status = NO_INIT;
+ goto exit;
}
- audio_io_handle_t output;
- audio_stream_type_t streamType = mStreamType;
- audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
- bool callbackAdded = false;
-
+ {
// mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
// After fast request is denied, we will request again if IAudioTrack is re-created.
-
- status_t status;
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = mSampleRate;
- config.channel_mask = mChannelMask;
- config.format = mFormat;
- config.offload_info = mOffloadInfoCopy;
- mRoutedDeviceId = mSelectedDeviceId;
- status = AudioSystem::getOutputForAttr(attr, &output,
- mSessionId, &streamType, mClientUid,
- &config,
- mFlags, &mRoutedDeviceId, &mPortId);
-
- if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
- ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u,"
- " format %#x, channel mask %#x, flags %#x",
- mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask,
- mFlags);
- return BAD_VALUE;
- }
- {
- // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
- // we must release it ourselves if anything goes wrong.
-
- // Not all of these values are needed under all conditions, but it is easier to get them all
- status = AudioSystem::getLatency(output, &mAfLatency);
- if (status != NO_ERROR) {
- ALOGE("getLatency(%d) failed status %d", output, status);
- goto release;
- }
- ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);
-
- status = AudioSystem::getFrameCount(output, &mAfFrameCount);
- if (status != NO_ERROR) {
- ALOGE("getFrameCount(output=%d) status %d", output, status);
- goto release;
- }
-
- // TODO consider making this a member variable if there are other uses for it later
- size_t afFrameCountHAL;
- status = AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);
- if (status != NO_ERROR) {
- ALOGE("getFrameCountHAL(output=%d) status %d", output, status);
- goto release;
- }
- ALOG_ASSERT(afFrameCountHAL > 0);
-
- status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
- if (status != NO_ERROR) {
- ALOGE("getSamplingRate(output=%d) status %d", output, status);
- goto release;
- }
- if (mSampleRate == 0) {
- mSampleRate = mAfSampleRate;
- mOriginalSampleRate = mAfSampleRate;
- }
-
// Client can only express a preference for FAST. Server will perform additional tests.
if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
// either of these use cases:
@@ -1389,130 +1409,81 @@
// use case 4: synchronous write
((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
- bool useCaseAllowed = sharedBuffer || transferAllowed;
- if (!useCaseAllowed) {
+ bool fastAllowed = sharedBuffer || transferAllowed;
+ if (!fastAllowed) {
ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client, not shared buffer and transfer = %s",
convertTransferToText(mTransfer));
- }
-
- // sample rates must also match
- bool sampleRateAllowed = mSampleRate == mAfSampleRate;
- if (!sampleRateAllowed) {
- ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client, sample rate %u Hz but HAL needs %u Hz",
- mSampleRate, mAfSampleRate);
- }
-
- bool fastAllowed = useCaseAllowed && sampleRateAllowed;
- if (!fastAllowed) {
mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
}
}
- mNotificationFramesAct = mNotificationFramesReq;
-
- size_t frameCount = mReqFrameCount;
- if (!audio_has_proportional_frames(mFormat)) {
-
- if (mSharedBuffer != 0) {
- // Same comment as below about ignoring frameCount parameter for set()
- frameCount = mSharedBuffer->size();
- } else if (frameCount == 0) {
- frameCount = mAfFrameCount;
- }
- if (mNotificationFramesAct != frameCount) {
- mNotificationFramesAct = frameCount;
- }
- } else if (mSharedBuffer != 0) {
- // FIXME: Ensure client side memory buffers need
- // not have additional alignment beyond sample
- // (e.g. 16 bit stereo accessed as 32 bit frame).
- size_t alignment = audio_bytes_per_sample(mFormat);
- if (alignment & 1) {
- // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
- alignment = 1;
- }
- if (mChannelCount > 1) {
- // More than 2 channels does not require stronger alignment than stereo
- alignment <<= 1;
- }
- if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
- ALOGE("Invalid buffer alignment: address %p, channel count %u",
- mSharedBuffer->pointer(), mChannelCount);
- status = BAD_VALUE;
- goto release;
- }
-
- // When initializing a shared buffer AudioTrack via constructors,
- // there's no frameCount parameter.
- // But when initializing a shared buffer AudioTrack via set(),
- // there _is_ a frameCount parameter. We silently ignore it.
- frameCount = mSharedBuffer->size() / mFrameSize;
+ IAudioFlinger::CreateTrackInput input;
+ if (mStreamType != AUDIO_STREAM_DEFAULT) {
+ stream_type_to_audio_attributes(mStreamType, &input.attr);
} else {
- size_t minFrameCount = 0;
- // For fast tracks the frame count calculations and checks are mostly done by server,
- // but we try to respect the application's request for notifications per buffer.
- if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
- if (mNotificationsPerBufferReq > 0) {
- // Avoid possible arithmetic overflow during multiplication.
- // mNotificationsPerBuffer is clamped to a small integer earlier, so it is unlikely.
- if (mNotificationsPerBufferReq > SIZE_MAX / afFrameCountHAL) {
- ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
- mNotificationsPerBufferReq, afFrameCountHAL);
- } else {
- minFrameCount = afFrameCountHAL * mNotificationsPerBufferReq;
- }
- }
- } else {
- // for normal tracks precompute the frame count based on speed.
- const float speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
- max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
- minFrameCount = calculateMinFrameCount(
- mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
- speed /*, 0 mNotificationsPerBufferReq*/);
- }
- if (frameCount < minFrameCount) {
- frameCount = minFrameCount;
- }
+ input.attr = mAttributes;
}
-
- audio_output_flags_t flags = mFlags;
-
- pid_t tid = -1;
+ input.config = AUDIO_CONFIG_INITIALIZER;
+ input.config.sample_rate = mSampleRate;
+ input.config.channel_mask = mChannelMask;
+ input.config.format = mFormat;
+ input.config.offload_info = mOffloadInfoCopy;
+ input.clientInfo.clientUid = mClientUid;
+ input.clientInfo.clientPid = mClientPid;
+ input.clientInfo.clientTid = -1;
if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
// It is currently meaningless to request SCHED_FIFO for a Java thread. Even if the
// application-level code follows all non-blocking design rules, the language runtime
// doesn't also follow those rules, so the thread will not benefit overall.
if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
- tid = mAudioTrackThread->getTid();
+ input.clientInfo.clientTid = mAudioTrackThread->getTid();
}
}
+ input.sharedBuffer = mSharedBuffer;
+ input.notificationsPerBuffer = mNotificationsPerBufferReq;
+ input.speed = 1.0;
+ if (audio_has_proportional_frames(mFormat) && mSharedBuffer == 0 &&
+ (mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
+ input.speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
+ max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
+ }
+ input.flags = mFlags;
+ input.frameCount = mReqFrameCount;
+ input.notificationFrameCount = mNotificationFramesReq;
+ input.selectedDeviceId = mSelectedDeviceId;
+ input.sessionId = mSessionId;
- size_t temp = frameCount; // temp may be replaced by a revised value of frameCount,
- // but we will still need the original value also
- audio_session_t originalSessionId = mSessionId;
- sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
- mSampleRate,
- mFormat,
- mChannelMask,
- &temp,
- &flags,
- mSharedBuffer,
+ IAudioFlinger::CreateTrackOutput output;
+
+ sp<IAudioTrack> track = audioFlinger->createTrack(input,
output,
- mClientPid,
- tid,
- &mSessionId,
- mClientUid,
- &status,
- mPortId);
- ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
- "session ID changed from %d to %d", originalSessionId, mSessionId);
+ &status);
- if (status != NO_ERROR) {
- ALOGE("AudioFlinger could not create track, status: %d", status);
- goto release;
+ if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
+ ALOGE("AudioFlinger could not create track, status: %d output %d", status, output.outputId);
+ if (status == NO_ERROR) {
+ status = NO_INIT;
+ }
+ goto exit;
}
ALOG_ASSERT(track != 0);
+ mFrameCount = output.frameCount;
+ mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
+ mRoutedDeviceId = output.selectedDeviceId;
+ mSessionId = output.sessionId;
+
+ mSampleRate = output.sampleRate;
+ if (mOriginalSampleRate == 0) {
+ mOriginalSampleRate = mSampleRate;
+ }
+
+ mAfFrameCount = output.afFrameCount;
+ mAfSampleRate = output.afSampleRate;
+ mAfLatency = output.afLatencyMs;
+
+ mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
+
// AudioFlinger now owns the reference to the I/O handle,
// so we are no longer responsible for releasing it.
@@ -1521,13 +1492,13 @@
if (iMem == 0) {
ALOGE("Could not get control block");
status = NO_INIT;
- goto release;
+ goto exit;
}
void *iMemPointer = iMem->pointer();
if (iMemPointer == NULL) {
ALOGE("Could not get control block pointer");
status = NO_INIT;
- goto release;
+ goto exit;
}
// invariant that mAudioTrack != 0 is true only after set() returns successfully
if (mAudioTrack != 0) {
@@ -1540,75 +1511,33 @@
audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
mCblk = cblk;
- // note that temp is the (possibly revised) value of frameCount
- if (temp < frameCount || (frameCount == 0 && temp == 0)) {
- // In current design, AudioTrack client checks and ensures frame count validity before
- // passing it to AudioFlinger so AudioFlinger should not return a different value except
- // for fast track as it uses a special method of assigning frame count.
- ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
- }
- frameCount = temp;
mAwaitBoost = false;
if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
- if (flags & AUDIO_OUTPUT_FLAG_FAST) {
- ALOGI("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu", frameCount, temp);
+ if (output.flags & AUDIO_OUTPUT_FLAG_FAST) {
+ ALOGI("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu",
+ mReqFrameCount, mFrameCount);
if (!mThreadCanCallJava) {
mAwaitBoost = true;
}
} else {
- ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", frameCount,
- temp);
+ ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", mReqFrameCount,
+ mFrameCount);
}
}
- mFlags = flags;
-
- // Make sure that application is notified with sufficient margin before underrun.
- // The client can divide the AudioTrack buffer into sub-buffers,
- // and expresses its desire to server as the notification frame count.
- if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
- size_t maxNotificationFrames;
- if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
- // notify every HAL buffer, regardless of the size of the track buffer
- maxNotificationFrames = afFrameCountHAL;
- } else {
- // For normal tracks, use at least double-buffering if no sample rate conversion,
- // or at least triple-buffering if there is sample rate conversion
- const int nBuffering = mOriginalSampleRate == mAfSampleRate ? 2 : 3;
- maxNotificationFrames = frameCount / nBuffering;
- // If client requested a fast track but this was denied, then use the smaller maximum.
- // FMS_20 is the minimum task wakeup period in ms for which CFS operates reliably.
-#define FMS_20 20 // FIXME share a common declaration with the same symbol in Threads.cpp
- if (mOrigFlags & AUDIO_OUTPUT_FLAG_FAST) {
- size_t maxNotificationFramesFastDenied = FMS_20 * mSampleRate / 1000;
- if (maxNotificationFrames > maxNotificationFramesFastDenied) {
- maxNotificationFrames = maxNotificationFramesFastDenied;
- }
- }
- }
- if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) {
- if (mNotificationFramesAct == 0) {
- ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
- maxNotificationFrames, frameCount);
- } else {
- ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu",
- mNotificationFramesAct, maxNotificationFrames, frameCount);
- }
- mNotificationFramesAct = (uint32_t) maxNotificationFrames;
- }
- }
+ mFlags = output.flags;
//mOutput != output includes the case where mOutput == AUDIO_IO_HANDLE_NONE for first creation
- if (mDeviceCallback != 0 && mOutput != output) {
+ if (mDeviceCallback != 0 && mOutput != output.outputId) {
if (mOutput != AUDIO_IO_HANDLE_NONE) {
AudioSystem::removeAudioDeviceCallback(this, mOutput);
}
- AudioSystem::addAudioDeviceCallback(this, output);
+ AudioSystem::addAudioDeviceCallback(this, output.outputId);
callbackAdded = true;
}
// We retain a copy of the I/O handle, but don't own the reference
- mOutput = output;
+ mOutput = output.outputId;
mRefreshRemaining = true;
// Starting address of buffers in shared memory. If there is a shared buffer, buffers
@@ -1623,18 +1552,16 @@
if (buffers == NULL) {
ALOGE("Could not get buffer pointer");
status = NO_INIT;
- goto release;
+ goto exit;
}
}
mAudioTrack->attachAuxEffect(mAuxEffectId);
- mFrameCount = frameCount;
- updateLatency_l(); // this refetches mAfLatency and sets mLatency
// If IAudioTrack is re-created, don't let the requested frameCount
// decrease. This can confuse clients that cache frameCount().
- if (frameCount > mReqFrameCount) {
- mReqFrameCount = frameCount;
+ if (mFrameCount > mReqFrameCount) {
+ mReqFrameCount = mFrameCount;
}
// reset server position to 0 as we have new cblk.
@@ -1643,9 +1570,9 @@
// update proxy
if (mSharedBuffer == 0) {
mStaticProxy.clear();
- mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
+ mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
} else {
- mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
+ mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
mProxy = mStaticProxy;
}
@@ -1668,18 +1595,17 @@
mDeathNotifier = new DeathNotifier(this);
IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
- return NO_ERROR;
}
-release:
- AudioSystem::releaseOutput(output, streamType, mSessionId);
- if (callbackAdded) {
+exit:
+ if (status != NO_ERROR && callbackAdded) {
// note: mOutput is always valid is callbackAdded is true
AudioSystem::removeAudioDeviceCallback(this, mOutput);
}
- if (status == NO_ERROR) {
- status = NO_INIT;
- }
+
+ mStatus = status;
+
+ // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
return status;
}
@@ -2323,6 +2249,16 @@
staticPosition = mStaticProxy->getPosition().unsignedValue();
}
+ // See b/74409267. Connecting to a BT A2DP device supporting multiple codecs
+ // causes a lot of churn on the service side, and it can reject starting
+ // playback of a previously created track. May also apply to other cases.
+ const int INITIAL_RETRIES = 3;
+ int retries = INITIAL_RETRIES;
+retry:
+ if (retries < INITIAL_RETRIES) {
+ // See the comment for clearAudioConfigCache at the start of the function.
+ AudioSystem::clearAudioConfigCache();
+ }
mFlags = mOrigFlags;
// If a new IAudioTrack is successfully created, createTrack_l() will modify the
@@ -2331,7 +2267,10 @@
// If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
status_t result = createTrack_l();
- if (result == NO_ERROR) {
+ if (result != NO_ERROR) {
+ ALOGW("%s(): createTrack_l failed, do not retry", __func__);
+ retries = 0;
+ } else {
// take the frames that will be lost by track recreation into account in saved position
// For streaming tracks, this is the amount we obtained from the user/client
// (not the number actually consumed at the server - those are already lost).
@@ -2376,7 +2315,10 @@
mFramesWrittenAtRestore = mFramesWrittenServerOffset;
}
if (result != NO_ERROR) {
- ALOGW("restoreTrack_l() failed status %d", result);
+ ALOGW("%s() failed status %d, retries %d", __func__, result, retries);
+ if (--retries > 0) {
+ goto retry;
+ }
mState = STATE_STOPPED;
mReleased = 0;
}
@@ -2415,8 +2357,8 @@
return true; // static tracks do not have issues with buffer sizing.
}
const size_t minFrameCount =
- calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed
- /*, 0 mNotificationsPerBufferReq*/);
+ AudioSystem::calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate,
+ sampleRate, speed /*, 0 mNotificationsPerBufferReq*/);
const bool allowed = mFrameCount >= minFrameCount;
ALOGD_IF(!allowed,
"isSampleRateSpeedAllowed_l denied "
@@ -2433,6 +2375,17 @@
return mAudioTrack->setParameters(keyValuePairs);
}
+status_t AudioTrack::selectPresentation(int presentationId, int programId)
+{
+ AutoMutex lock(mLock);
+ AudioParameter param = AudioParameter();
+ param.addInt(String8(AudioParameter::keyPresentationId), presentationId);
+ param.addInt(String8(AudioParameter::keyProgramId), programId);
+ ALOGV("PresentationId/ProgramId[%s]",param.toString().string());
+
+ return mAudioTrack->setParameters(param.toString());
+}
+
VolumeShaper::Status AudioTrack::applyVolumeShaper(
const sp<VolumeShaper::Configuration>& configuration,
const sp<VolumeShaper::Operation>& operation)
@@ -2832,23 +2785,28 @@
status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
{
-
- const size_t SIZE = 256;
- char buffer[SIZE];
String8 result;
result.append(" AudioTrack::dump\n");
- snprintf(buffer, 255, " stream type(%d), left - right volume(%f, %f)\n", mStreamType,
- mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
- result.append(buffer);
- snprintf(buffer, 255, " format(%d), channel count(%d), frame count(%zu)\n", mFormat,
- mChannelCount, mFrameCount);
- result.append(buffer);
- snprintf(buffer, 255, " sample rate(%u), speed(%f), status(%d)\n",
- mSampleRate, mPlaybackRate.mSpeed, mStatus);
- result.append(buffer);
- snprintf(buffer, 255, " state(%d), latency (%d)\n", mState, mLatency);
- result.append(buffer);
+ result.appendFormat(" status(%d), state(%d), session Id(%d), flags(%#x)\n",
+ mStatus, mState, mSessionId, mFlags);
+ result.appendFormat(" stream type(%d), left - right volume(%f, %f)\n",
+ (mStreamType == AUDIO_STREAM_DEFAULT) ?
+ audio_attributes_to_stream_type(&mAttributes) : mStreamType,
+ mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
+ result.appendFormat(" format(%#x), channel mask(%#x), channel count(%u)\n",
+ mFormat, mChannelMask, mChannelCount);
+ result.appendFormat(" sample rate(%u), original sample rate(%u), speed(%f)\n",
+ mSampleRate, mOriginalSampleRate, mPlaybackRate.mSpeed);
+ result.appendFormat(" frame count(%zu), req. frame count(%zu)\n",
+ mFrameCount, mReqFrameCount);
+ result.appendFormat(" notif. frame count(%u), req. notif. frame count(%u),"
+ " req. notif. per buff(%u)\n",
+ mNotificationFramesAct, mNotificationFramesReq, mNotificationsPerBufferReq);
+ result.appendFormat(" latency (%d), selected device Id(%d), routed device Id(%d)\n",
+ mLatency, mSelectedDeviceId, mRoutedDeviceId);
+ result.appendFormat(" output(%d) AF latency (%u) AF frame count(%zu) AF SampleRate(%u)\n",
+ mOutput, mAfLatency, mAfFrameCount, mAfSampleRate);
::write(fd, result.string(), result.size());
return NO_ERROR;
}
diff --git a/media/libaudioclient/AudioTrackShared.cpp b/media/libaudioclient/AudioTrackShared.cpp
index 7bf4f99..dced3c4 100644
--- a/media/libaudioclient/AudioTrackShared.cpp
+++ b/media/libaudioclient/AudioTrackShared.cpp
@@ -393,19 +393,50 @@
// ---------------------------------------------------------------------------
-__attribute__((no_sanitize("integer")))
void AudioTrackClientProxy::flush()
{
+ sendStreamingFlushStop(true /* flush */);
+}
+
+void AudioTrackClientProxy::stop()
+{
+ sendStreamingFlushStop(false /* flush */);
+}
+
+// Sets the client-written mFlush and mStop positions, which control server behavior.
+//
+// @param flush indicates whether the operation is a flush or stop.
+// A client stop sets mStop to the current write position;
+// the server will not read past this point until start() or subsequent flush().
+// A client flush sets both mStop and mFlush to the current write position.
+// This advances the server read limit (if previously set) and on the next
+// server read advances the server read position to this limit.
+//
+void AudioTrackClientProxy::sendStreamingFlushStop(bool flush)
+{
+ // TODO: Replace this by 64 bit counters - avoids wrap complication.
// This works for mFrameCountP2 <= 2^30
- size_t increment = mFrameCountP2 << 1;
- size_t mask = increment - 1;
- audio_track_cblk_t* cblk = mCblk;
// mFlush is 32 bits concatenated as [ flush_counter ] [ newfront_offset ]
// Should newFlush = cblk->u.mStreaming.mRear? Only problem is
// if you want to flush twice to the same rear location after a 32 bit wrap.
- int32_t newFlush = (cblk->u.mStreaming.mRear & mask) |
- ((cblk->u.mStreaming.mFlush & ~mask) + increment);
- android_atomic_release_store(newFlush, &cblk->u.mStreaming.mFlush);
+
+ const size_t increment = mFrameCountP2 << 1;
+ const size_t mask = increment - 1;
+ // No need for client atomic synchronization on mRear, mStop, mFlush
+ // as AudioTrack client only read/writes to them under client lock. Server only reads.
+ const int32_t rearMasked = mCblk->u.mStreaming.mRear & mask;
+
+ // update stop before flush so that the server front
+ // never advances beyond a (potential) previous stop's rear limit.
+ int32_t stopBits; // the following add can overflow
+ __builtin_add_overflow(mCblk->u.mStreaming.mStop & ~mask, increment, &stopBits);
+ android_atomic_release_store(rearMasked | stopBits, &mCblk->u.mStreaming.mStop);
+
+ if (flush) {
+ int32_t flushBits; // the following add can overflow
+ __builtin_add_overflow(mCblk->u.mStreaming.mFlush & ~mask, increment, &flushBits);
+ android_atomic_release_store(rearMasked | flushBits, &mCblk->u.mStreaming.mFlush);
+ }
}
bool AudioTrackClientProxy::clearStreamEndDone() {
@@ -540,6 +571,11 @@
LOG_ALWAYS_FATAL("static flush");
}
+void StaticAudioTrackClientProxy::stop()
+{
+ ; // no special handling required for static tracks.
+}
+
void StaticAudioTrackClientProxy::setLoop(size_t loopStart, size_t loopEnd, int loopCount)
{
// This can only happen on a 64-bit client
@@ -638,6 +674,7 @@
if (flush != mFlush) {
ALOGV("ServerProxy::flushBufferIfNeeded() mStreaming.mFlush = 0x%x, mFlush = 0x%0x",
flush, mFlush);
+ // shouldn't matter, but for range safety use mRear instead of getRear().
int32_t rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
int32_t front = cblk->u.mStreaming.mFront;
@@ -677,6 +714,46 @@
}
__attribute__((no_sanitize("integer")))
+int32_t AudioTrackServerProxy::getRear() const
+{
+ const int32_t stop = android_atomic_acquire_load(&mCblk->u.mStreaming.mStop);
+ const int32_t rear = android_atomic_acquire_load(&mCblk->u.mStreaming.mRear);
+ const int32_t stopLast = mStopLast.load(std::memory_order_acquire);
+ if (stop != stopLast) {
+ const int32_t front = mCblk->u.mStreaming.mFront;
+ const size_t overflowBit = mFrameCountP2 << 1;
+ const size_t mask = overflowBit - 1;
+ int32_t newRear = (rear & ~mask) | (stop & mask);
+ ssize_t filled = newRear - front;
+ // overflowBit is unsigned, so cast to signed for comparison.
+ if (filled >= (ssize_t)overflowBit) {
+ // front and rear offsets span the overflow bit of the p2 mask
+ // so rebasing newRear on the rear offset is off by the overflow bit.
+ ALOGV("stop wrap: filled %zx >= overflowBit %zx", filled, overflowBit);
+ newRear -= overflowBit;
+ filled -= overflowBit;
+ }
+ if (0 <= filled && (size_t) filled <= mFrameCount) {
+ // we're stopped, return the stop level as newRear
+ return newRear;
+ }
+
+ // A corrupt stop. Log error and ignore.
+ ALOGE("mStopLast %#x -> stop %#x, front %#x, rear %#x, mask %#x, newRear %#x, "
+ "filled %zd=%#x",
+ stopLast, stop, front, rear,
+ (unsigned)mask, newRear, filled, (unsigned)filled);
+ // Don't reset mStopLast as this is const.
+ }
+ return rear;
+}
+
+void AudioTrackServerProxy::start()
+{
+ mStopLast = android_atomic_acquire_load(&mCblk->u.mStreaming.mStop);
+}
+
+__attribute__((no_sanitize("integer")))
status_t ServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush)
{
LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0,
@@ -693,7 +770,7 @@
// See notes on barriers at ClientProxy::obtainBuffer()
if (mIsOut) {
flushBufferIfNeeded(); // might modify mFront
- rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+ rear = getRear();
front = cblk->u.mStreaming.mFront;
} else {
front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
@@ -825,8 +902,7 @@
// FIXME should return an accurate value, but over-estimate is better than under-estimate
return mFrameCount;
}
- // the acquire might not be necessary since not doing a subsequent read
- int32_t rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+ const int32_t rear = getRear();
ssize_t filled = rear - cblk->u.mStreaming.mFront;
// pipe should not already be overfull
if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
@@ -852,7 +928,7 @@
if (flush != mFlush) {
return mFrameCount;
}
- const int32_t rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+ const int32_t rear = getRear();
const ssize_t filled = rear - cblk->u.mStreaming.mFront;
if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
return 0; // error condition, silently return 0.
@@ -1149,6 +1225,12 @@
}
}
+int32_t StaticAudioTrackServerProxy::getRear() const
+{
+ LOG_ALWAYS_FATAL("getRear() not permitted for static tracks");
+ return 0;
+}
+
// ---------------------------------------------------------------------------
} // namespace android
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 14feada..00af7e8 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -22,7 +22,11 @@
#include <stdint.h>
#include <sys/types.h>
+#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
+#include <cutils/multiuser.h>
+#include <media/TimeCheck.h>
+#include <private/android_filesystem_config.h>
#include "IAudioFlinger.h"
@@ -30,7 +34,7 @@
enum {
CREATE_TRACK = IBinder::FIRST_CALL_TRANSACTION,
- OPEN_RECORD,
+ CREATE_RECORD,
SAMPLE_RATE,
RESERVED, // obsolete, was CHANNEL_COUNT
FORMAT,
@@ -47,6 +51,7 @@
SET_MODE,
SET_MIC_MUTE,
GET_MIC_MUTE,
+ SET_RECORD_SILENCED,
SET_PARAMETERS,
GET_PARAMETERS,
REGISTER_CLIENT,
@@ -83,6 +88,7 @@
GET_AUDIO_HW_SYNC_FOR_SESSION,
SYSTEM_READY,
FRAME_COUNT_HAL,
+ GET_MICROPHONES,
};
#define MAX_ITEMS_PER_LIST 1024
@@ -95,182 +101,74 @@
{
}
- virtual sp<IAudioTrack> createTrack(
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t *pFrameCount,
- audio_output_flags_t *flags,
- const sp<IMemory>& sharedBuffer,
- audio_io_handle_t output,
- pid_t pid,
- pid_t tid,
- audio_session_t *sessionId,
- int clientUid,
- status_t *status,
- audio_port_handle_t portId)
+ virtual sp<IAudioTrack> createTrack(const CreateTrackInput& input,
+ CreateTrackOutput& output,
+ status_t *status)
{
Parcel data, reply;
sp<IAudioTrack> track;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) streamType);
- data.writeInt32(sampleRate);
- data.writeInt32(format);
- data.writeInt32(channelMask);
- size_t frameCount = pFrameCount != NULL ? *pFrameCount : 0;
- data.writeInt64(frameCount);
- audio_output_flags_t lFlags = flags != NULL ? *flags : AUDIO_OUTPUT_FLAG_NONE;
- data.writeInt32(lFlags);
- // haveSharedBuffer
- if (sharedBuffer != 0) {
- data.writeInt32(true);
- data.writeStrongBinder(IInterface::asBinder(sharedBuffer));
- } else {
- data.writeInt32(false);
+
+ if (status == nullptr) {
+ return track;
}
- data.writeInt32((int32_t) output);
- data.writeInt32((int32_t) pid);
- data.writeInt32((int32_t) tid);
- audio_session_t lSessionId = AUDIO_SESSION_ALLOCATE;
- if (sessionId != NULL) {
- lSessionId = *sessionId;
- }
- data.writeInt32(lSessionId);
- data.writeInt32(clientUid);
- data.writeInt32(portId);
+
+ input.writeToParcel(&data);
+
status_t lStatus = remote()->transact(CREATE_TRACK, data, &reply);
if (lStatus != NO_ERROR) {
- ALOGE("createTrack error: %s", strerror(-lStatus));
- } else {
- frameCount = reply.readInt64();
- if (pFrameCount != NULL) {
- *pFrameCount = frameCount;
- }
- lFlags = (audio_output_flags_t)reply.readInt32();
- if (flags != NULL) {
- *flags = lFlags;
- }
- lSessionId = (audio_session_t) reply.readInt32();
- if (sessionId != NULL) {
- *sessionId = lSessionId;
- }
- lStatus = reply.readInt32();
- track = interface_cast<IAudioTrack>(reply.readStrongBinder());
- if (lStatus == NO_ERROR) {
- if (track == 0) {
- ALOGE("createTrack should have returned an IAudioTrack");
- lStatus = UNKNOWN_ERROR;
- }
- } else {
- if (track != 0) {
- ALOGE("createTrack returned an IAudioTrack but with status %d", lStatus);
- track.clear();
- }
- }
+ ALOGE("createTrack transaction error %d", lStatus);
+ *status = DEAD_OBJECT;
+ return track;
}
- if (status != NULL) {
- *status = lStatus;
+ *status = reply.readInt32();
+ if (*status != NO_ERROR) {
+ ALOGE("createTrack returned error %d", *status);
+ return track;
}
+ track = interface_cast<IAudioTrack>(reply.readStrongBinder());
+ if (track == 0) {
+ ALOGE("createTrack returned an NULL IAudioTrack with status OK");
+ *status = DEAD_OBJECT;
+ return track;
+ }
+ output.readFromParcel(&reply);
return track;
}
- virtual sp<IAudioRecord> openRecord(
- audio_io_handle_t input,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- const String16& opPackageName,
- size_t *pFrameCount,
- audio_input_flags_t *flags,
- pid_t pid,
- pid_t tid,
- int clientUid,
- audio_session_t *sessionId,
- size_t *notificationFrames,
- sp<IMemory>& cblk,
- sp<IMemory>& buffers,
- status_t *status,
- audio_port_handle_t portId)
+ virtual sp<media::IAudioRecord> createRecord(const CreateRecordInput& input,
+ CreateRecordOutput& output,
+ status_t *status)
{
Parcel data, reply;
- sp<IAudioRecord> record;
+ sp<media::IAudioRecord> record;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) input);
- data.writeInt32(sampleRate);
- data.writeInt32(format);
- data.writeInt32(channelMask);
- data.writeString16(opPackageName);
- size_t frameCount = pFrameCount != NULL ? *pFrameCount : 0;
- data.writeInt64(frameCount);
- audio_input_flags_t lFlags = flags != NULL ? *flags : AUDIO_INPUT_FLAG_NONE;
- data.writeInt32(lFlags);
- data.writeInt32((int32_t) pid);
- data.writeInt32((int32_t) tid);
- data.writeInt32((int32_t) clientUid);
- audio_session_t lSessionId = AUDIO_SESSION_ALLOCATE;
- if (sessionId != NULL) {
- lSessionId = *sessionId;
+
+ if (status == nullptr) {
+ return record;
}
- data.writeInt32(lSessionId);
- data.writeInt64(notificationFrames != NULL ? *notificationFrames : 0);
- data.writeInt32(portId);
- cblk.clear();
- buffers.clear();
- status_t lStatus = remote()->transact(OPEN_RECORD, data, &reply);
+
+ input.writeToParcel(&data);
+
+ status_t lStatus = remote()->transact(CREATE_RECORD, data, &reply);
if (lStatus != NO_ERROR) {
- ALOGE("openRecord error: %s", strerror(-lStatus));
- } else {
- frameCount = reply.readInt64();
- if (pFrameCount != NULL) {
- *pFrameCount = frameCount;
- }
- lFlags = (audio_input_flags_t)reply.readInt32();
- if (flags != NULL) {
- *flags = lFlags;
- }
- lSessionId = (audio_session_t) reply.readInt32();
- if (sessionId != NULL) {
- *sessionId = lSessionId;
- }
- size_t lNotificationFrames = (size_t) reply.readInt64();
- if (notificationFrames != NULL) {
- *notificationFrames = lNotificationFrames;
- }
- lStatus = reply.readInt32();
- record = interface_cast<IAudioRecord>(reply.readStrongBinder());
- cblk = interface_cast<IMemory>(reply.readStrongBinder());
- if (cblk != 0 && cblk->pointer() == NULL) {
- cblk.clear();
- }
- buffers = interface_cast<IMemory>(reply.readStrongBinder());
- if (buffers != 0 && buffers->pointer() == NULL) {
- buffers.clear();
- }
- if (lStatus == NO_ERROR) {
- if (record == 0) {
- ALOGE("openRecord should have returned an IAudioRecord");
- lStatus = UNKNOWN_ERROR;
- } else if (cblk == 0) {
- ALOGE("openRecord should have returned a cblk");
- lStatus = NO_MEMORY;
- }
- // buffers is permitted to be 0
- } else {
- if (record != 0 || cblk != 0 || buffers != 0) {
- ALOGE("openRecord returned an IAudioRecord, cblk, "
- "or buffers but with status %d", lStatus);
- }
- }
- if (lStatus != NO_ERROR) {
- record.clear();
- cblk.clear();
- buffers.clear();
- }
+ ALOGE("createRecord transaction error %d", lStatus);
+ *status = DEAD_OBJECT;
+ return record;
}
- if (status != NULL) {
- *status = lStatus;
+ *status = reply.readInt32();
+ if (*status != NO_ERROR) {
+ ALOGE("createRecord returned error %d", *status);
+ return record;
}
+
+ record = interface_cast<media::IAudioRecord>(reply.readStrongBinder());
+ if (record == 0) {
+ ALOGE("createRecord returned a NULL IAudioRecord with status OK");
+ *status = DEAD_OBJECT;
+ return record;
+ }
+ output.readFromParcel(&reply);
return record;
}
@@ -413,6 +311,15 @@
return reply.readInt32();
}
+ virtual void setRecordSilenced(uid_t uid, bool silenced)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.writeInt32(uid);
+ data.writeInt32(silenced ? 1 : 0);
+ remote()->transact(SET_RECORD_SILENCED, data, &reply);
+ }
+
virtual status_t setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
{
Parcel data, reply;
@@ -804,14 +711,18 @@
return reply.readInt64();
}
- virtual status_t setLowRamDevice(bool isLowRamDevice)
+ virtual status_t setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) override
{
Parcel data, reply;
- data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int) isLowRamDevice);
- remote()->transact(SET_LOW_RAM_DEVICE, data, &reply);
- return reply.readInt32();
+
+ static_assert(NO_ERROR == 0, "NO_ERROR must be 0");
+ return data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor())
+ ?: data.writeInt32((int) isLowRamDevice)
+ ?: data.writeInt64(totalMemory)
+ ?: remote()->transact(SET_LOW_RAM_DEVICE, data, &reply)
+ ?: reply.readInt32();
}
+
virtual status_t listAudioPorts(unsigned int *num_ports,
struct audio_port *ports)
{
@@ -935,6 +846,18 @@
}
return reply.readInt64();
}
+ virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ status_t status = remote()->transact(GET_MICROPHONES, data, &reply);
+ if (status != NO_ERROR ||
+ (status = (status_t)reply.readInt32()) != NO_ERROR) {
+ return status;
+ }
+ status = reply.readParcelableVector(microphones);
+ return status;
+ }
};
IMPLEMENT_META_INTERFACE(AudioFlinger, "android.media.IAudioFlinger");
@@ -944,21 +867,81 @@
status_t BnAudioFlinger::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
+ // make sure transactions reserved to AudioPolicyManager do not come from other processes
+ switch (code) {
+ case SET_STREAM_VOLUME:
+ case SET_STREAM_MUTE:
+ case OPEN_OUTPUT:
+ case OPEN_DUPLICATE_OUTPUT:
+ case CLOSE_OUTPUT:
+ case SUSPEND_OUTPUT:
+ case RESTORE_OUTPUT:
+ case OPEN_INPUT:
+ case CLOSE_INPUT:
+ case INVALIDATE_STREAM:
+ case SET_VOICE_VOLUME:
+ case MOVE_EFFECTS:
+ case LOAD_HW_MODULE:
+ case LIST_AUDIO_PORTS:
+ case GET_AUDIO_PORT:
+ case CREATE_AUDIO_PATCH:
+ case RELEASE_AUDIO_PATCH:
+ case LIST_AUDIO_PATCHES:
+ case SET_AUDIO_PORT_CONFIG:
+ case SET_RECORD_SILENCED:
+ ALOGW("%s: transaction %d received from PID %d",
+ __func__, code, IPCThreadState::self()->getCallingPid());
+ // return status only for non void methods
+ switch (code) {
+ case SET_RECORD_SILENCED:
+ break;
+ default:
+ reply->writeInt32(static_cast<int32_t> (INVALID_OPERATION));
+ break;
+ }
+ return OK;
+ default:
+ break;
+ }
+
+ // make sure the following transactions come from system components
+ switch (code) {
+ case SET_MASTER_VOLUME:
+ case SET_MASTER_MUTE:
+ case SET_MODE:
+ case SET_MIC_MUTE:
+ case SET_LOW_RAM_DEVICE:
+ case SYSTEM_READY: {
+ if (multiuser_get_app_id(IPCThreadState::self()->getCallingUid()) >= AID_APP_START) {
+ ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
+ __func__, code, IPCThreadState::self()->getCallingPid(),
+ IPCThreadState::self()->getCallingUid());
+ // return status only for non void methods
+ switch (code) {
+ case SYSTEM_READY:
+ break;
+ default:
+ reply->writeInt32(static_cast<int32_t> (INVALID_OPERATION));
+ break;
+ }
+ return OK;
+ }
+ } break;
+ default:
+ break;
+ }
+
// Whitelist of relevant events to trigger log merging.
// Log merging should activate during audio activity of any kind. This are considered the
// most relevant events.
// TODO should select more wisely the items from the list
switch (code) {
case CREATE_TRACK:
- case OPEN_RECORD:
+ case CREATE_RECORD:
case SET_MASTER_VOLUME:
case SET_MASTER_MUTE:
- case SET_STREAM_VOLUME:
- case SET_STREAM_MUTE:
case SET_MIC_MUTE:
case SET_PARAMETERS:
- case OPEN_INPUT:
- case SET_VOICE_VOLUME:
case CREATE_EFFECT:
case SYSTEM_READY: {
requestLogMerge();
@@ -967,77 +950,58 @@
default:
break;
}
+
+ TimeCheck check("IAudioFlinger");
+
switch (code) {
case CREATE_TRACK: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- int streamType = data.readInt32();
- uint32_t sampleRate = data.readInt32();
- audio_format_t format = (audio_format_t) data.readInt32();
- audio_channel_mask_t channelMask = data.readInt32();
- size_t frameCount = data.readInt64();
- audio_output_flags_t flags = (audio_output_flags_t) data.readInt32();
- bool haveSharedBuffer = data.readInt32() != 0;
- sp<IMemory> buffer;
- if (haveSharedBuffer) {
- buffer = interface_cast<IMemory>(data.readStrongBinder());
+
+ CreateTrackInput input;
+ if (input.readFromParcel((Parcel*)&data) != NO_ERROR) {
+ reply->writeInt32(DEAD_OBJECT);
+ return NO_ERROR;
}
- audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
- pid_t pid = (pid_t) data.readInt32();
- pid_t tid = (pid_t) data.readInt32();
- audio_session_t sessionId = (audio_session_t) data.readInt32();
- int clientUid = data.readInt32();
- audio_port_handle_t portId = (audio_port_handle_t) data.readInt32();
- status_t status = NO_ERROR;
- sp<IAudioTrack> track;
- if ((haveSharedBuffer && (buffer == 0)) ||
- ((buffer != 0) && (buffer->pointer() == NULL))) {
- ALOGW("CREATE_TRACK: cannot retrieve shared memory");
- status = DEAD_OBJECT;
- } else {
- track = createTrack(
- (audio_stream_type_t) streamType, sampleRate, format,
- channelMask, &frameCount, &flags, buffer, output, pid, tid,
- &sessionId, clientUid, &status, portId);
- LOG_ALWAYS_FATAL_IF((track != 0) != (status == NO_ERROR));
- }
- reply->writeInt64(frameCount);
- reply->writeInt32(flags);
- reply->writeInt32(sessionId);
+
+ status_t status;
+ CreateTrackOutput output;
+
+ sp<IAudioTrack> track= createTrack(input,
+ output,
+ &status);
+
+ LOG_ALWAYS_FATAL_IF((track != 0) != (status == NO_ERROR));
reply->writeInt32(status);
+ if (status != NO_ERROR) {
+ return NO_ERROR;
+ }
reply->writeStrongBinder(IInterface::asBinder(track));
+ output.writeToParcel(reply);
return NO_ERROR;
} break;
- case OPEN_RECORD: {
+ case CREATE_RECORD: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- audio_io_handle_t input = (audio_io_handle_t) data.readInt32();
- uint32_t sampleRate = data.readInt32();
- audio_format_t format = (audio_format_t) data.readInt32();
- audio_channel_mask_t channelMask = data.readInt32();
- const String16& opPackageName = data.readString16();
- size_t frameCount = data.readInt64();
- audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
- pid_t pid = (pid_t) data.readInt32();
- pid_t tid = (pid_t) data.readInt32();
- int clientUid = data.readInt32();
- audio_session_t sessionId = (audio_session_t) data.readInt32();
- size_t notificationFrames = data.readInt64();
- audio_port_handle_t portId = (audio_port_handle_t) data.readInt32();
- sp<IMemory> cblk;
- sp<IMemory> buffers;
- status_t status = NO_ERROR;
- sp<IAudioRecord> record = openRecord(input,
- sampleRate, format, channelMask, opPackageName, &frameCount, &flags,
- pid, tid, clientUid, &sessionId, ¬ificationFrames, cblk, buffers,
- &status, portId);
+
+ CreateRecordInput input;
+ if (input.readFromParcel((Parcel*)&data) != NO_ERROR) {
+ reply->writeInt32(DEAD_OBJECT);
+ return NO_ERROR;
+ }
+
+ status_t status;
+ CreateRecordOutput output;
+
+ sp<media::IAudioRecord> record = createRecord(input,
+ output,
+ &status);
+
LOG_ALWAYS_FATAL_IF((record != 0) != (status == NO_ERROR));
- reply->writeInt64(frameCount);
- reply->writeInt32(flags);
- reply->writeInt32(sessionId);
- reply->writeInt64(notificationFrames);
reply->writeInt32(status);
+ if (status != NO_ERROR) {
+ return NO_ERROR;
+ }
reply->writeStrongBinder(IInterface::asBinder(record));
- reply->writeStrongBinder(IInterface::asBinder(cblk));
- reply->writeStrongBinder(IInterface::asBinder(buffers));
+ output.writeToParcel(reply);
return NO_ERROR;
} break;
case SAMPLE_RATE: {
@@ -1127,6 +1091,15 @@
reply->writeInt32( getMicMute() );
return NO_ERROR;
} break;
+ case SET_RECORD_SILENCED: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ uid_t uid = data.readInt32();
+ audio_source_t source;
+ data.read(&source, sizeof(audio_source_t));
+ bool silenced = data.readInt32() == 1;
+ setRecordSilenced(uid, silenced);
+ return NO_ERROR;
+ } break;
case SET_PARAMETERS: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32();
@@ -1364,8 +1337,13 @@
} break;
case SET_LOW_RAM_DEVICE: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- bool isLowRamDevice = data.readInt32() != 0;
- reply->writeInt32(setLowRamDevice(isLowRamDevice));
+ int32_t isLowRamDevice;
+ int64_t totalMemory;
+ const status_t status =
+ data.readInt32(&isLowRamDevice) ?:
+ data.readInt64(&totalMemory) ?:
+ setLowRamDevice(isLowRamDevice != 0, totalMemory);
+ (void)reply->writeInt32(status);
return NO_ERROR;
} break;
case LIST_AUDIO_PORTS: {
@@ -1481,6 +1459,16 @@
reply->writeInt64( frameCountHAL((audio_io_handle_t) data.readInt32()) );
return NO_ERROR;
} break;
+ case GET_MICROPHONES: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ std::vector<media::MicrophoneInfo> microphones;
+ status_t status = getMicrophones(µphones);
+ reply->writeInt32(status);
+ if (status == NO_ERROR) {
+ reply->writeParcelableVector(microphones);
+ }
+ return NO_ERROR;
+ }
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index ceba211..a1236e7 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -22,11 +22,13 @@
#include <math.h>
#include <sys/types.h>
+#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
-
+#include <cutils/multiuser.h>
#include <media/AudioEffect.h>
#include <media/IAudioPolicyService.h>
-
+#include <media/TimeCheck.h>
+#include <private/android_filesystem_config.h>
#include <system/audio.h>
namespace android {
@@ -78,7 +80,9 @@
SET_AUDIO_PORT_CALLBACK_ENABLED,
SET_MASTER_MONO,
GET_MASTER_MONO,
- GET_STREAM_VOLUME_DB
+ GET_STREAM_VOLUME_DB,
+ GET_SURROUND_FORMATS,
+ SET_SURROUND_FORMAT_ENABLED
};
#define MAX_ITEMS_PER_LIST 1024
@@ -160,28 +164,11 @@
return static_cast <audio_policy_forced_cfg_t> (reply.readInt32());
}
- virtual audio_io_handle_t getOutput(
- audio_stream_type_t stream,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo)
+ virtual audio_io_handle_t getOutput(audio_stream_type_t stream)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
data.writeInt32(static_cast <uint32_t>(stream));
- data.writeInt32(samplingRate);
- data.writeInt32(static_cast <uint32_t>(format));
- data.writeInt32(channelMask);
- data.writeInt32(static_cast <uint32_t>(flags));
- // hasOffloadInfo
- if (offloadInfo == NULL) {
- data.writeInt32(0);
- } else {
- data.writeInt32(1);
- data.write(offloadInfo, sizeof(audio_offload_info_t));
- }
remote()->transact(GET_OUTPUT, data, &reply);
return static_cast <audio_io_handle_t> (reply.readInt32());
}
@@ -190,6 +177,7 @@
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
+ pid_t pid,
uid_t uid,
const audio_config_t *config,
audio_output_flags_t flags,
@@ -233,6 +221,7 @@
data.writeInt32(1);
data.writeInt32(*stream);
}
+ data.writeInt32(pid);
data.writeInt32(uid);
data.write(config, sizeof(audio_config_t));
data.writeInt32(static_cast <uint32_t>(flags));
@@ -299,6 +288,7 @@
audio_session_t session,
pid_t pid,
uid_t uid,
+ const String16& opPackageName,
const audio_config_base_t *config,
audio_input_flags_t flags,
audio_port_handle_t *selectedDeviceId,
@@ -327,6 +317,7 @@
data.writeInt32(session);
data.writeInt32(pid);
data.writeInt32(uid);
+ data.writeString16(opPackageName);
data.write(config, sizeof(audio_config_base_t));
data.writeInt32(flags);
data.writeInt32(*selectedDeviceId);
@@ -345,35 +336,33 @@
return NO_ERROR;
}
- virtual status_t startInput(audio_io_handle_t input,
- audio_session_t session)
+ virtual status_t startInput(audio_port_handle_t portId,
+ bool *silenced)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
- data.writeInt32(input);
- data.writeInt32(session);
+ data.writeInt32(portId);
+ data.writeInt32(*silenced ? 1 : 0);
remote()->transact(START_INPUT, data, &reply);
- return static_cast <status_t> (reply.readInt32());
+ status_t status = static_cast <status_t> (reply.readInt32());
+ *silenced = reply.readInt32() == 1;
+ return status;
}
- virtual status_t stopInput(audio_io_handle_t input,
- audio_session_t session)
+ virtual status_t stopInput(audio_port_handle_t portId)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
- data.writeInt32(input);
- data.writeInt32(session);
+ data.writeInt32(portId);
remote()->transact(STOP_INPUT, data, &reply);
return static_cast <status_t> (reply.readInt32());
}
- virtual void releaseInput(audio_io_handle_t input,
- audio_session_t session)
+ virtual void releaseInput(audio_port_handle_t portId)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
- data.writeInt32(input);
- data.writeInt32(session);
+ data.writeInt32(portId);
remote()->transact(RELEASE_INPUT, data, &reply);
}
@@ -842,16 +831,125 @@
}
return reply.readFloat();
}
+
+ virtual status_t getSurroundFormats(unsigned int *numSurroundFormats,
+ audio_format_t *surroundFormats,
+ bool *surroundFormatsEnabled,
+ bool reported)
+ {
+ if (numSurroundFormats == NULL || (*numSurroundFormats != 0 &&
+ (surroundFormats == NULL || surroundFormatsEnabled == NULL))) {
+ return BAD_VALUE;
+ }
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ unsigned int numSurroundFormatsReq = *numSurroundFormats;
+ data.writeUint32(numSurroundFormatsReq);
+ data.writeBool(reported);
+ status_t status = remote()->transact(GET_SURROUND_FORMATS, data, &reply);
+ if (status == NO_ERROR && (status = (status_t)reply.readInt32()) == NO_ERROR) {
+ *numSurroundFormats = reply.readUint32();
+ }
+ if (status == NO_ERROR) {
+ if (numSurroundFormatsReq > *numSurroundFormats) {
+ numSurroundFormatsReq = *numSurroundFormats;
+ }
+ if (numSurroundFormatsReq > 0) {
+ status = reply.read(surroundFormats,
+ numSurroundFormatsReq * sizeof(audio_format_t));
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = reply.read(surroundFormatsEnabled,
+ numSurroundFormatsReq * sizeof(bool));
+ }
+ }
+ return status;
+ }
+
+ virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(audioFormat);
+ data.writeBool(enabled);
+ status_t status = remote()->transact(SET_SURROUND_FORMAT_ENABLED, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return reply.readInt32();
+ }
};
IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
// ----------------------------------------------------------------------
-
status_t BnAudioPolicyService::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
+ // make sure transactions reserved to AudioFlinger do not come from other processes
+ switch (code) {
+ case START_OUTPUT:
+ case STOP_OUTPUT:
+ case RELEASE_OUTPUT:
+ case GET_INPUT_FOR_ATTR:
+ case START_INPUT:
+ case STOP_INPUT:
+ case RELEASE_INPUT:
+ case GET_STRATEGY_FOR_STREAM:
+ case GET_OUTPUT_FOR_EFFECT:
+ case REGISTER_EFFECT:
+ case UNREGISTER_EFFECT:
+ case SET_EFFECT_ENABLED:
+ case GET_OUTPUT_FOR_ATTR:
+ case ACQUIRE_SOUNDTRIGGER_SESSION:
+ case RELEASE_SOUNDTRIGGER_SESSION:
+ ALOGW("%s: transaction %d received from PID %d",
+ __func__, code, IPCThreadState::self()->getCallingPid());
+ // return status only for non void methods
+ switch (code) {
+ case RELEASE_OUTPUT:
+ case RELEASE_INPUT:
+ break;
+ default:
+ reply->writeInt32(static_cast<int32_t> (INVALID_OPERATION));
+ break;
+ }
+ return OK;
+ default:
+ break;
+ }
+
+ // make sure the following transactions come from system components
+ switch (code) {
+ case SET_DEVICE_CONNECTION_STATE:
+ case HANDLE_DEVICE_CONFIG_CHANGE:
+ case SET_PHONE_STATE:
+//FIXME: Allow SET_FORCE_USE calls from system apps until a better use case routing API is available
+// case SET_FORCE_USE:
+ case INIT_STREAM_VOLUME:
+ case SET_STREAM_VOLUME:
+ case REGISTER_POLICY_MIXES:
+ case SET_MASTER_MONO:
+ case START_AUDIO_SOURCE:
+ case STOP_AUDIO_SOURCE:
+ case GET_SURROUND_FORMATS:
+ case SET_SURROUND_FORMAT_ENABLED: {
+ if (multiuser_get_app_id(IPCThreadState::self()->getCallingUid()) >= AID_APP_START) {
+ ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
+ __func__, code, IPCThreadState::self()->getCallingPid(),
+ IPCThreadState::self()->getCallingUid());
+ reply->writeInt32(static_cast<int32_t> (INVALID_OPERATION));
+ return OK;
+ }
+ } break;
+ default:
+ break;
+ }
+
+ TimeCheck check("IAudioPolicyService");
+
switch (code) {
case SET_DEVICE_CONNECTION_STATE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
@@ -934,32 +1032,18 @@
CHECK_INTERFACE(IAudioPolicyService, data, reply);
audio_stream_type_t stream =
static_cast <audio_stream_type_t>(data.readInt32());
- uint32_t samplingRate = data.readInt32();
- audio_format_t format = (audio_format_t) data.readInt32();
- audio_channel_mask_t channelMask = data.readInt32();
- audio_output_flags_t flags =
- static_cast <audio_output_flags_t>(data.readInt32());
- bool hasOffloadInfo = data.readInt32() != 0;
- audio_offload_info_t offloadInfo;
- if (hasOffloadInfo) {
- data.read(&offloadInfo, sizeof(audio_offload_info_t));
- }
- audio_io_handle_t output = getOutput(stream,
- samplingRate,
- format,
- channelMask,
- flags,
- hasOffloadInfo ? &offloadInfo : NULL);
+ audio_io_handle_t output = getOutput(stream);
reply->writeInt32(static_cast <int>(output));
return NO_ERROR;
} break;
case GET_OUTPUT_FOR_ATTR: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_attributes_t attr;
+ audio_attributes_t attr = {};
bool hasAttributes = data.readInt32() != 0;
if (hasAttributes) {
data.read(&attr, sizeof(audio_attributes_t));
+ sanetizeAudioAttributes(&attr);
}
audio_session_t session = (audio_session_t)data.readInt32();
audio_stream_type_t stream = AUDIO_STREAM_DEFAULT;
@@ -967,6 +1051,7 @@
if (hasStream) {
stream = (audio_stream_type_t)data.readInt32();
}
+ pid_t pid = (pid_t)data.readInt32();
uid_t uid = (uid_t)data.readInt32();
audio_config_t config;
memset(&config, 0, sizeof(audio_config_t));
@@ -977,7 +1062,7 @@
audio_port_handle_t portId = (audio_port_handle_t)data.readInt32();
audio_io_handle_t output = 0;
status_t status = getOutputForAttr(hasAttributes ? &attr : NULL,
- &output, session, &stream, uid,
+ &output, session, &stream, pid, uid,
&config,
flags, &selectedDeviceId, &portId);
reply->writeInt32(status);
@@ -1023,12 +1108,14 @@
case GET_INPUT_FOR_ATTR: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_attributes_t attr;
+ audio_attributes_t attr = {};
data.read(&attr, sizeof(audio_attributes_t));
+ sanetizeAudioAttributes(&attr);
audio_io_handle_t input = (audio_io_handle_t)data.readInt32();
audio_session_t session = (audio_session_t)data.readInt32();
pid_t pid = (pid_t)data.readInt32();
uid_t uid = (uid_t)data.readInt32();
+ const String16 opPackageName = data.readString16();
audio_config_base_t config;
memset(&config, 0, sizeof(audio_config_base_t));
data.read(&config, sizeof(audio_config_base_t));
@@ -1036,7 +1123,7 @@
audio_port_handle_t selectedDeviceId = (audio_port_handle_t) data.readInt32();
audio_port_handle_t portId = (audio_port_handle_t)data.readInt32();
status_t status = getInputForAttr(&attr, &input, session, pid, uid,
- &config,
+ opPackageName, &config,
flags, &selectedDeviceId, &portId);
reply->writeInt32(status);
if (status == NO_ERROR) {
@@ -1049,25 +1136,25 @@
case START_INPUT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_io_handle_t input = static_cast <audio_io_handle_t>(data.readInt32());
- audio_session_t session = static_cast <audio_session_t>(data.readInt32());
- reply->writeInt32(static_cast <uint32_t>(startInput(input, session)));
+ audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
+ bool silenced = data.readInt32() == 1;
+ status_t status = startInput(portId, &silenced);
+ reply->writeInt32(static_cast <uint32_t>(status));
+ reply->writeInt32(silenced ? 1 : 0);
return NO_ERROR;
} break;
case STOP_INPUT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_io_handle_t input = static_cast <audio_io_handle_t>(data.readInt32());
- audio_session_t session = static_cast <audio_session_t>(data.readInt32());
- reply->writeInt32(static_cast <uint32_t>(stopInput(input, session)));
+ audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
+ reply->writeInt32(static_cast <uint32_t>(stopInput(portId)));
return NO_ERROR;
} break;
case RELEASE_INPUT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_io_handle_t input = static_cast <audio_io_handle_t>(data.readInt32());
- audio_session_t session = static_cast <audio_session_t>(data.readInt32());
- releaseInput(input, session);
+ audio_port_handle_t portId = static_cast <audio_port_handle_t>(data.readInt32());
+ releaseInput(portId);
return NO_ERROR;
} break;
@@ -1123,8 +1210,11 @@
case GET_OUTPUT_FOR_EFFECT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- effect_descriptor_t desc;
- data.read(&desc, sizeof(effect_descriptor_t));
+ effect_descriptor_t desc = {};
+ if (data.read(&desc, sizeof(desc)) != NO_ERROR) {
+ android_errorWriteLog(0x534e4554, "73126106");
+ }
+ (void)sanitizeEffectDescriptor(&desc);
audio_io_handle_t output = getOutputForEffect(&desc);
reply->writeInt32(static_cast <int>(output));
return NO_ERROR;
@@ -1132,8 +1222,11 @@
case REGISTER_EFFECT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- effect_descriptor_t desc;
- data.read(&desc, sizeof(effect_descriptor_t));
+ effect_descriptor_t desc = {};
+ if (data.read(&desc, sizeof(desc)) != NO_ERROR) {
+ android_errorWriteLog(0x534e4554, "73126106");
+ }
+ (void)sanitizeEffectDescriptor(&desc);
audio_io_handle_t io = data.readInt32();
uint32_t strategy = data.readInt32();
audio_session_t session = (audio_session_t) data.readInt32();
@@ -1192,7 +1285,7 @@
count = AudioEffect::kMaxPreProcessing;
}
uint32_t retCount = count;
- effect_descriptor_t *descriptors = new effect_descriptor_t[count];
+ effect_descriptor_t *descriptors = new effect_descriptor_t[count]{};
status_t status = queryDefaultPreProcessing(audioSession, descriptors, &retCount);
reply->writeInt32(status);
if (status != NO_ERROR && status != NO_MEMORY) {
@@ -1211,7 +1304,7 @@
case IS_OFFLOAD_SUPPORTED: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_offload_info_t info;
+ audio_offload_info_t info = {};
data.read(&info, sizeof(audio_offload_info_t));
bool isSupported = isOffloadSupported(info);
reply->writeInt32(isSupported);
@@ -1266,7 +1359,7 @@
case CREATE_AUDIO_PATCH: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- struct audio_patch patch;
+ struct audio_patch patch = {};
data.read(&patch, sizeof(struct audio_patch));
audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
if (data.read(&handle, sizeof(audio_patch_handle_t)) != NO_ERROR) {
@@ -1282,7 +1375,7 @@
case RELEASE_AUDIO_PATCH: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_patch_handle_t handle;
+ audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
data.read(&handle, sizeof(audio_patch_handle_t));
status_t status = releaseAudioPatch(handle);
reply->writeInt32(status);
@@ -1321,8 +1414,9 @@
case SET_AUDIO_PORT_CONFIG: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- struct audio_port_config config;
+ struct audio_port_config config = {};
data.read(&config, sizeof(struct audio_port_config));
+ (void)sanitizeAudioPortConfig(&config);
status_t status = setAudioPortConfig(&config);
reply->writeInt32(status);
return NO_ERROR;
@@ -1396,10 +1490,12 @@
case START_AUDIO_SOURCE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- struct audio_port_config source;
+ struct audio_port_config source = {};
data.read(&source, sizeof(struct audio_port_config));
- audio_attributes_t attributes;
+ (void)sanitizeAudioPortConfig(&source);
+ audio_attributes_t attributes = {};
data.read(&attributes, sizeof(audio_attributes_t));
+ sanetizeAudioAttributes(&attributes);
audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
status_t status = startAudioSource(&source, &attributes, &handle);
reply->writeInt32(status);
@@ -1445,11 +1541,93 @@
return NO_ERROR;
}
+ case GET_SURROUND_FORMATS: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ unsigned int numSurroundFormatsReq = data.readUint32();
+ if (numSurroundFormatsReq > MAX_ITEMS_PER_LIST) {
+ numSurroundFormatsReq = MAX_ITEMS_PER_LIST;
+ }
+ bool reported = data.readBool();
+ unsigned int numSurroundFormats = numSurroundFormatsReq;
+ audio_format_t *surroundFormats = (audio_format_t *)calloc(
+ numSurroundFormats, sizeof(audio_format_t));
+ bool *surroundFormatsEnabled = (bool *)calloc(numSurroundFormats, sizeof(bool));
+ if (numSurroundFormatsReq > 0 &&
+ (surroundFormats == NULL || surroundFormatsEnabled == NULL)) {
+ free(surroundFormats);
+ free(surroundFormatsEnabled);
+ reply->writeInt32(NO_MEMORY);
+ return NO_ERROR;
+ }
+ status_t status = getSurroundFormats(
+ &numSurroundFormats, surroundFormats, surroundFormatsEnabled, reported);
+ reply->writeInt32(status);
+
+ if (status == NO_ERROR) {
+ reply->writeUint32(numSurroundFormats);
+ if (numSurroundFormatsReq > numSurroundFormats) {
+ numSurroundFormatsReq = numSurroundFormats;
+ }
+ reply->write(surroundFormats, numSurroundFormatsReq * sizeof(audio_format_t));
+ reply->write(surroundFormatsEnabled, numSurroundFormatsReq * sizeof(bool));
+ }
+ free(surroundFormats);
+ free(surroundFormatsEnabled);
+ return NO_ERROR;
+ }
+
+ case SET_SURROUND_FORMAT_ENABLED: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_format_t audioFormat = (audio_format_t) data.readInt32();
+ bool enabled = data.readBool();
+ status_t status = setSurroundFormatEnabled(audioFormat, enabled);
+ reply->writeInt32(status);
+ return NO_ERROR;
+ }
+
default:
return BBinder::onTransact(code, data, reply, flags);
}
}
+/** returns true if string overflow was prevented by zero termination */
+template <size_t size>
+static bool preventStringOverflow(char (&s)[size]) {
+ if (strnlen(s, size) < size) return false;
+ s[size - 1] = '\0';
+ return true;
+}
+
+void BnAudioPolicyService::sanetizeAudioAttributes(audio_attributes_t* attr)
+{
+ const size_t tagsMaxSize = AUDIO_ATTRIBUTES_TAGS_MAX_SIZE;
+ if (strnlen(attr->tags, tagsMaxSize) >= tagsMaxSize) {
+ android_errorWriteLog(0x534e4554, "68953950"); // SafetyNet logging
+ }
+ attr->tags[tagsMaxSize - 1] = '\0';
+}
+
+/** returns BAD_VALUE if sanitization was required. */
+status_t BnAudioPolicyService::sanitizeEffectDescriptor(effect_descriptor_t* desc)
+{
+ if (preventStringOverflow(desc->name)
+ | /* always */ preventStringOverflow(desc->implementor)) {
+ android_errorWriteLog(0x534e4554, "73126106"); // SafetyNet logging
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+}
+
+/** returns BAD_VALUE if sanitization was required. */
+status_t BnAudioPolicyService::sanitizeAudioPortConfig(struct audio_port_config* config)
+{
+ if (config->type == AUDIO_PORT_TYPE_DEVICE &&
+ preventStringOverflow(config->ext.device.address)) {
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+}
+
// ----------------------------------------------------------------------------
} // namespace android
diff --git a/media/libaudioclient/IAudioRecord.cpp b/media/libaudioclient/IAudioRecord.cpp
deleted file mode 100644
index 1331c0d..0000000
--- a/media/libaudioclient/IAudioRecord.cpp
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
-**
-** Copyright 2007, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#define LOG_TAG "IAudioRecord"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <binder/Parcel.h>
-
-#include <media/IAudioRecord.h>
-
-namespace android {
-
-enum {
- UNUSED_WAS_GET_CBLK = IBinder::FIRST_CALL_TRANSACTION,
- START,
- STOP
-};
-
-class BpAudioRecord : public BpInterface<IAudioRecord>
-{
-public:
- explicit BpAudioRecord(const sp<IBinder>& impl)
- : BpInterface<IAudioRecord>(impl)
- {
- }
-
- virtual status_t start(int /*AudioSystem::sync_event_t*/ event, audio_session_t triggerSession)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioRecord::getInterfaceDescriptor());
- data.writeInt32(event);
- data.writeInt32(triggerSession);
- status_t status = remote()->transact(START, data, &reply);
- if (status == NO_ERROR) {
- status = reply.readInt32();
- } else {
- ALOGW("start() error: %s", strerror(-status));
- }
- return status;
- }
-
- virtual void stop()
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioRecord::getInterfaceDescriptor());
- remote()->transact(STOP, data, &reply);
- }
-
-};
-
-IMPLEMENT_META_INTERFACE(AudioRecord, "android.media.IAudioRecord");
-
-// ----------------------------------------------------------------------
-
-status_t BnAudioRecord::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
- switch (code) {
- case START: {
- CHECK_INTERFACE(IAudioRecord, data, reply);
- int /*AudioSystem::sync_event_t*/ event = data.readInt32();
- audio_session_t triggerSession = (audio_session_t) data.readInt32();
- reply->writeInt32(start(event, triggerSession));
- return NO_ERROR;
- } break;
- case STOP: {
- CHECK_INTERFACE(IAudioRecord, data, reply);
- stop();
- return NO_ERROR;
- } break;
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-} // namespace android
diff --git a/media/libaudioclient/IAudioTrack.cpp b/media/libaudioclient/IAudioTrack.cpp
index 79e864d..adff057 100644
--- a/media/libaudioclient/IAudioTrack.cpp
+++ b/media/libaudioclient/IAudioTrack.cpp
@@ -28,6 +28,8 @@
namespace android {
+using media::VolumeShaper;
+
enum {
GET_CBLK = IBinder::FIRST_CALL_TRANSACTION,
START,
@@ -185,7 +187,7 @@
return nullptr;
}
sp<VolumeShaper::State> state = new VolumeShaper::State;
- status = state->readFromParcel(reply);
+ status = state->readFromParcel(&reply);
if (status != NO_ERROR) {
return nullptr;
}
@@ -263,12 +265,12 @@
status_t status = data.readInt32(&present);
if (status == NO_ERROR && present != 0) {
configuration = new VolumeShaper::Configuration();
- status = configuration->readFromParcel(data);
+ status = configuration->readFromParcel(&data);
}
status = status ?: data.readInt32(&present);
if (status == NO_ERROR && present != 0) {
operation = new VolumeShaper::Operation();
- status = operation->readFromParcel(data);
+ status = operation->readFromParcel(&data);
}
if (status == NO_ERROR) {
status = (status_t)applyVolumeShaper(configuration, operation);
diff --git a/media/libaudioclient/PlayerBase.cpp b/media/libaudioclient/PlayerBase.cpp
index 7868318..b0c68e5 100644
--- a/media/libaudioclient/PlayerBase.cpp
+++ b/media/libaudioclient/PlayerBase.cpp
@@ -22,6 +22,8 @@
namespace android {
+using media::VolumeShaper;
+
//--------------------------------------------------------------------------------------------------
PlayerBase::PlayerBase() : BnPlayer(),
mPanMultiplierL(1.0f), mPanMultiplierR(1.0f),
@@ -117,23 +119,26 @@
//------------------------------------------------------------------------------
// Implementation of IPlayer
-void PlayerBase::start() {
+binder::Status PlayerBase::start() {
ALOGD("PlayerBase::start() from IPlayer");
(void)startWithStatus();
+ return binder::Status::ok();
}
-void PlayerBase::pause() {
+binder::Status PlayerBase::pause() {
ALOGD("PlayerBase::pause() from IPlayer");
(void)pauseWithStatus();
+ return binder::Status::ok();
}
-void PlayerBase::stop() {
+binder::Status PlayerBase::stop() {
ALOGD("PlayerBase::stop() from IPlayer");
(void)stopWithStatus();
+ return binder::Status::ok();
}
-void PlayerBase::setVolume(float vol) {
+binder::Status PlayerBase::setVolume(float vol) {
ALOGD("PlayerBase::setVolume() from IPlayer");
{
Mutex::Autolock _l(mSettingsLock);
@@ -144,9 +149,10 @@
if (status != NO_ERROR) {
ALOGW("PlayerBase::setVolume() error %d", status);
}
+ return binder::Status::fromStatusT(status);
}
-void PlayerBase::setPan(float pan) {
+binder::Status PlayerBase::setPan(float pan) {
ALOGD("PlayerBase::setPan() from IPlayer");
{
Mutex::Autolock _l(mSettingsLock);
@@ -163,22 +169,19 @@
if (status != NO_ERROR) {
ALOGW("PlayerBase::setPan() error %d", status);
}
+ return binder::Status::fromStatusT(status);
}
-void PlayerBase::setStartDelayMs(int32_t delayMs __unused) {
+binder::Status PlayerBase::setStartDelayMs(int32_t delayMs __unused) {
ALOGW("setStartDelay() is not supported");
+ return binder::Status::ok();
}
-void PlayerBase::applyVolumeShaper(
- const sp<VolumeShaper::Configuration>& configuration __unused,
- const sp<VolumeShaper::Operation>& operation __unused) {
+binder::Status PlayerBase::applyVolumeShaper(
+ const VolumeShaper::Configuration& configuration __unused,
+ const VolumeShaper::Operation& operation __unused) {
ALOGW("applyVolumeShaper() is not supported");
-}
-
-status_t PlayerBase::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
- return BnPlayer::onTransact(code, data, reply, flags);
+ return binder::Status::ok();
}
} // namespace android
diff --git a/media/libaudioclient/ToneGenerator.cpp b/media/libaudioclient/ToneGenerator.cpp
index 5338059..5716727 100644
--- a/media/libaudioclient/ToneGenerator.cpp
+++ b/media/libaudioclient/ToneGenerator.cpp
@@ -20,6 +20,7 @@
#include <math.h>
#include <utils/Log.h>
#include <cutils/properties.h>
+#include <media/AudioPolicyHelper.h>
#include "media/ToneGenerator.h"
@@ -1044,7 +1045,7 @@
}
}
- ALOGV("startTone");
+ ALOGV("startTone toneType %d", toneType);
mLock.lock();
@@ -1196,9 +1197,16 @@
mpAudioTrack = new AudioTrack();
ALOGV("AudioTrack(%p) created", mpAudioTrack.get());
+ audio_attributes_t attr;
+ audio_stream_type_t streamType = mStreamType;
+ if (mStreamType == AUDIO_STREAM_VOICE_CALL) {
+ streamType = AUDIO_STREAM_DTMF;
+ }
+ stream_type_to_audio_attributes(streamType, &attr);
+
const size_t frameCount = mProcessSize;
status_t status = mpAudioTrack->set(
- mStreamType,
+ AUDIO_STREAM_DEFAULT,
0, // sampleRate
AUDIO_FORMAT_PCM_16_BIT,
AUDIO_CHANNEL_OUT_MONO,
@@ -1210,7 +1218,11 @@
0, // sharedBuffer
mThreadCanCallJava,
AUDIO_SESSION_ALLOCATE,
- AudioTrack::TRANSFER_CALLBACK);
+ AudioTrack::TRANSFER_CALLBACK,
+ nullptr,
+ AUDIO_UID_INVALID,
+ -1,
+ &attr);
if (status != NO_ERROR) {
ALOGE("AudioTrack(%p) set failed with error %d", mpAudioTrack.get(), status);
diff --git a/media/libaudioclient/TrackPlayerBase.cpp b/media/libaudioclient/TrackPlayerBase.cpp
index 48cd803..0a914fc 100644
--- a/media/libaudioclient/TrackPlayerBase.cpp
+++ b/media/libaudioclient/TrackPlayerBase.cpp
@@ -18,6 +18,8 @@
namespace android {
+using media::VolumeShaper;
+
//--------------------------------------------------------------------------------------------------
TrackPlayerBase::TrackPlayerBase() : PlayerBase(),
mPlayerVolumeL(1.0f), mPlayerVolumeR(1.0f)
@@ -103,18 +105,24 @@
}
-void TrackPlayerBase::applyVolumeShaper(
- const sp<VolumeShaper::Configuration>& configuration,
- const sp<VolumeShaper::Operation>& operation) {
+binder::Status TrackPlayerBase::applyVolumeShaper(
+ const VolumeShaper::Configuration& configuration,
+ const VolumeShaper::Operation& operation) {
+
+ sp<VolumeShaper::Configuration> spConfiguration = new VolumeShaper::Configuration(configuration);
+ sp<VolumeShaper::Operation> spOperation = new VolumeShaper::Operation(operation);
+
if (mAudioTrack != 0) {
ALOGD("TrackPlayerBase::applyVolumeShaper() from IPlayer");
- VolumeShaper::Status status = mAudioTrack->applyVolumeShaper(configuration, operation);
+ VolumeShaper::Status status = mAudioTrack->applyVolumeShaper(spConfiguration, spOperation);
if (status < 0) { // a non-negative value is the volume shaper id.
ALOGE("TrackPlayerBase::applyVolumeShaper() failed with status %d", status);
}
+ return binder::Status::fromStatusT(status);
} else {
ALOGD("TrackPlayerBase::applyVolumeShaper()"
- " no AudioTrack for volume control from IPlayer");
+ " no AudioTrack for volume control from IPlayer");
+ return binder::Status::ok();
}
}
diff --git a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
new file mode 100644
index 0000000..01e0a71
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.MicrophoneInfo;
+
+/* Native code must specify namespace media (media::IAudioRecord) when referring to this class */
+interface IAudioRecord {
+
+ /* After it's created the track is not active. Call start() to
+ * make it active.
+ */
+ void start(int /*AudioSystem::sync_event_t*/ event,
+ int /*audio_session_t*/ triggerSession);
+
+ /* Stop a track. If set, the callback will cease being called and
+ * obtainBuffer will return an error. Buffers that are already released
+ * will be processed, unless flush() is called.
+ */
+ void stop();
+
+ /* Get a list of current active microphones.
+ */
+ void getActiveMicrophones(out MicrophoneInfo[] activeMicrophones);
+}
diff --git a/media/libaudioclient/aidl/android/media/IPlayer.aidl b/media/libaudioclient/aidl/android/media/IPlayer.aidl
new file mode 100644
index 0000000..a90fcdd
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/IPlayer.aidl
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.VolumeShaper.Configuration;
+import android.media.VolumeShaper.Operation;
+
+/**
+ * @hide
+ */
+interface IPlayer {
+ oneway void start();
+ oneway void pause();
+ oneway void stop();
+ oneway void setVolume(float vol);
+ oneway void setPan(float pan);
+ oneway void setStartDelayMs(int delayMs);
+ oneway void applyVolumeShaper(in Configuration configuration,
+ in Operation operation);
+}
diff --git a/media/libaudioclient/aidl/android/media/MicrophoneInfo.aidl b/media/libaudioclient/aidl/android/media/MicrophoneInfo.aidl
new file mode 100644
index 0000000..d6e46cb
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/MicrophoneInfo.aidl
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+parcelable MicrophoneInfo cpp_header "media/MicrophoneInfo.h";
diff --git a/media/libaudioclient/aidl/android/media/VolumeShaper/Configuration.aidl b/media/libaudioclient/aidl/android/media/VolumeShaper/Configuration.aidl
new file mode 100644
index 0000000..fd0e60f
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/VolumeShaper/Configuration.aidl
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.VolumeShaper;
+
+parcelable Configuration cpp_header "media/VolumeShaper.h";
diff --git a/media/libaudioclient/aidl/android/media/VolumeShaper/Operation.aidl b/media/libaudioclient/aidl/android/media/VolumeShaper/Operation.aidl
new file mode 100644
index 0000000..4290d9d
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/VolumeShaper/Operation.aidl
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.VolumeShaper;
+
+parcelable Operation cpp_header "media/VolumeShaper.h";
diff --git a/media/libaudioclient/aidl/android/media/VolumeShaper/State.aidl b/media/libaudioclient/aidl/android/media/VolumeShaper/State.aidl
new file mode 100644
index 0000000..f6a22b8
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/VolumeShaper/State.aidl
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media.VolumeShaper;
+
+parcelable State cpp_header "media/VolumeShaper.h";
diff --git a/media/libaudioclient/include/media/AudioClient.h b/media/libaudioclient/include/media/AudioClient.h
index 9efd76d..247af9e 100644
--- a/media/libaudioclient/include/media/AudioClient.h
+++ b/media/libaudioclient/include/media/AudioClient.h
@@ -18,19 +18,38 @@
#ifndef ANDROID_AUDIO_CLIENT_H
#define ANDROID_AUDIO_CLIENT_H
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
#include <system/audio.h>
#include <utils/String16.h>
namespace android {
-class AudioClient {
+class AudioClient : public Parcelable {
public:
AudioClient() :
- clientUid(-1), clientPid(-1), packageName("") {}
+ clientUid(-1), clientPid(-1), clientTid(-1), packageName("") {}
uid_t clientUid;
pid_t clientPid;
+ pid_t clientTid;
String16 packageName;
+
+ status_t readFromParcel(const Parcel *parcel) override {
+ clientUid = parcel->readInt32();
+ clientPid = parcel->readInt32();
+ clientTid = parcel->readInt32();
+ packageName = parcel->readString16();
+ return NO_ERROR;
+ }
+
+ status_t writeToParcel(Parcel *parcel) const override {
+ parcel->writeInt32(clientUid);
+ parcel->writeInt32(clientPid);
+ parcel->writeInt32(clientTid);
+ parcel->writeString16(packageName);
+ return NO_ERROR;
+ }
};
}; // namespace android
diff --git a/media/libaudioclient/include/media/AudioMixer.h b/media/libaudioclient/include/media/AudioMixer.h
index 2bd2d01..cf7d90f 100644
--- a/media/libaudioclient/include/media/AudioMixer.h
+++ b/media/libaudioclient/include/media/AudioMixer.h
@@ -18,14 +18,17 @@
#ifndef ANDROID_AUDIO_MIXER_H
#define ANDROID_AUDIO_MIXER_H
+#include <pthread.h>
+#include <sstream>
#include <stdint.h>
#include <sys/types.h>
+#include <unordered_map>
#include <media/AudioBufferProvider.h>
#include <media/AudioResampler.h>
#include <media/AudioResamplerPublic.h>
#include <media/BufferProviders.h>
-#include <media/nbaio/NBLog.h>
+#include <media/nblog/NBLog.h>
#include <system/audio.h>
#include <utils/Compat.h>
#include <utils/threads.h>
@@ -33,6 +36,9 @@
// FIXME This is actually unity gain, which might not be max in future, expressed in U.12
#define MAX_GAIN_INT AudioMixer::UNITY_GAIN_INT
+// This must match frameworks/av/services/audioflinger/Configuration.h
+#define FLOAT_AUX
+
namespace android {
// ----------------------------------------------------------------------------
@@ -40,20 +46,10 @@
class AudioMixer
{
public:
- AudioMixer(size_t frameCount, uint32_t sampleRate,
- uint32_t maxNumTracks = MAX_NUM_TRACKS);
-
- /*virtual*/ ~AudioMixer(); // non-virtual saves a v-table, restore if sub-classed
-
-
- // This mixer has a hard-coded upper limit of 32 active track inputs.
- // Adding support for > 32 tracks would require more than simply changing this value.
- static const uint32_t MAX_NUM_TRACKS = 32;
- // maximum number of channels supported by the mixer
-
+ // Do not change these unless underlying code changes.
// This mixer has a hard-coded upper limit of 8 channels for output.
- static const uint32_t MAX_NUM_CHANNELS = 8;
- static const uint32_t MAX_NUM_VOLUMES = 2; // stereo volume only
+ static constexpr uint32_t MAX_NUM_CHANNELS = FCC_8;
+ static constexpr uint32_t MAX_NUM_VOLUMES = FCC_2; // stereo volume only
// maximum number of channels supported for the content
static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = AUDIO_CHANNEL_COUNT_MAX;
@@ -61,12 +57,6 @@
static const CONSTEXPR float UNITY_GAIN_FLOAT = 1.0f;
enum { // names
-
- // track names (MAX_NUM_TRACKS units)
- TRACK0 = 0x1000,
-
- // 0x2000 is unused
-
// setParameter targets
TRACK = 0x3000,
RESAMPLE = 0x3001,
@@ -105,17 +95,33 @@
// parameter 'value' is a pointer to the new playback rate.
};
+ AudioMixer(size_t frameCount, uint32_t sampleRate)
+ : mSampleRate(sampleRate)
+ , mFrameCount(frameCount) {
+ pthread_once(&sOnceControl, &sInitRoutine);
+ }
- // For all APIs with "name": TRACK0 <= name < TRACK0 + MAX_NUM_TRACKS
+ // Create a new track in the mixer.
+ //
+ // \param name a unique user-provided integer associated with the track.
+ // If name already exists, the function will abort.
+ // \param channelMask output channel mask.
+ // \param format PCM format
+ // \param sessionId Session id for the track. Tracks with the same
+ // session id will be submixed together.
+ //
+ // \return OK on success.
+ // BAD_VALUE if the format does not satisfy isValidFormat()
+ // or the channelMask does not satisfy isValidChannelMask().
+ status_t create(
+ int name, audio_channel_mask_t channelMask, audio_format_t format, int sessionId);
- // Allocate a track name. Returns new track name if successful, -1 on failure.
- // The failure could be because of an invalid channelMask or format, or that
- // the track capacity of the mixer is exceeded.
- int getTrackName(audio_channel_mask_t channelMask,
- audio_format_t format, int sessionId);
+ bool exists(int name) const {
+ return mTracks.count(name) > 0;
+ }
- // Free an allocated track by name
- void deleteTrackName(int name);
+ // Free an allocated track by name.
+ void destroy(int name);
// Enable or disable an allocated track by name
void enable(int name);
@@ -124,13 +130,26 @@
void setParameter(int name, int target, int param, void *value);
void setBufferProvider(int name, AudioBufferProvider* bufferProvider);
- void process();
- uint32_t trackNames() const { return mTrackNames; }
+ void process() {
+ (this->*mHook)();
+ }
size_t getUnreleasedFrames(int name) const;
- static inline bool isValidPcmTrackFormat(audio_format_t format) {
+ std::string trackNames() const {
+ std::stringstream ss;
+ for (const auto &pair : mTracks) {
+ ss << pair.first << " ";
+ }
+ return ss.str();
+ }
+
+ void setNBLogWriter(NBLog::Writer *logWriter) {
+ mNBLogWriter = logWriter;
+ }
+
+ static inline bool isValidFormat(audio_format_t format) {
switch (format) {
case AUDIO_FORMAT_PCM_8_BIT:
case AUDIO_FORMAT_PCM_16_BIT:
@@ -143,8 +162,23 @@
}
}
+ static inline bool isValidChannelMask(audio_channel_mask_t channelMask) {
+ return audio_channel_mask_is_valid(channelMask); // the RemixBufferProvider is flexible.
+ }
+
private:
+ /* For multi-format functions (calls template functions
+ * in AudioMixerOps.h). The template parameters are as follows:
+ *
+ * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * USEFLOATVOL (set to true if float volume is used)
+ * ADJUSTVOL (set to true if volume ramp parameters needs adjustment afterwards)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+
enum {
// FIXME this representation permits up to 8 channels
NEEDS_CHANNEL_COUNT__MASK = 0x00000007,
@@ -161,14 +195,67 @@
NEEDS_AUX = 0x00010000,
};
- struct state_t;
- struct track_t;
+ // hook types
+ enum {
+ PROCESSTYPE_NORESAMPLEONETRACK, // others set elsewhere
+ };
- typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp,
- int32_t* aux);
- static const int BLOCKSIZE = 16; // 4 cache lines
+ enum {
+ TRACKTYPE_NOP,
+ TRACKTYPE_RESAMPLE,
+ TRACKTYPE_NORESAMPLE,
+ TRACKTYPE_NORESAMPLEMONO,
+ };
- struct track_t {
+ // process hook functionality
+ using process_hook_t = void(AudioMixer::*)();
+
+ struct Track;
+ using hook_t = void(Track::*)(int32_t* output, size_t numOutFrames, int32_t* temp, int32_t* aux);
+
+ struct Track {
+ Track()
+ : bufferProvider(nullptr)
+ {
+ // TODO: move additional initialization here.
+ }
+
+ ~Track()
+ {
+ // bufferProvider, mInputBufferProvider need not be deleted.
+ mResampler.reset(nullptr);
+ // Ensure the order of destruction of buffer providers as they
+ // release the upstream provider in the destructor.
+ mTimestretchBufferProvider.reset(nullptr);
+ mPostDownmixReformatBufferProvider.reset(nullptr);
+ mDownmixerBufferProvider.reset(nullptr);
+ mReformatBufferProvider.reset(nullptr);
+ }
+
+ bool needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
+ bool setResampler(uint32_t trackSampleRate, uint32_t devSampleRate);
+ bool doesResample() const { return mResampler.get() != nullptr; }
+ void resetResampler() { if (mResampler.get() != nullptr) mResampler->reset(); }
+ void adjustVolumeRamp(bool aux, bool useFloat = false);
+ size_t getUnreleasedFrames() const { return mResampler.get() != nullptr ?
+ mResampler->getUnreleasedFrames() : 0; };
+
+ status_t prepareForDownmix();
+ void unprepareForDownmix();
+ status_t prepareForReformat();
+ void unprepareForReformat();
+ bool setPlaybackRate(const AudioPlaybackRate &playbackRate);
+ void reconfigureBufferProviders();
+
+ static hook_t getTrackHook(int trackType, uint32_t channelCount,
+ audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
+
+ void track__nop(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+
+ template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
+ typename TO, typename TI, typename TA>
+ void volumeMix(TO *out, size_t outFrames, const TI *in, TA *aux, bool ramp);
+
uint32_t needs;
// TODO: Eventually remove legacy integer volume settings
@@ -178,16 +265,11 @@
};
int32_t prevVolume[MAX_NUM_VOLUMES];
-
- // 16-byte boundary
-
int32_t volumeInc[MAX_NUM_VOLUMES];
int32_t auxInc;
int32_t prevAuxLevel;
-
- // 16-byte boundary
-
int16_t auxLevel; // 0 <= auxLevel <= MAX_GAIN_INT, but signed for mul performance
+
uint16_t frameCount;
uint8_t channelCount; // 1 or 2, redundant with (needs & NEEDS_CHANNEL_COUNT__MASK)
@@ -199,22 +281,16 @@
// for how the Track buffer provider is wrapped by another one when dowmixing is required
AudioBufferProvider* bufferProvider;
- // 16-byte boundary
-
mutable AudioBufferProvider::Buffer buffer; // 8 bytes
hook_t hook;
- const void* in; // current location in buffer
+ const void *mIn; // current location in buffer
- // 16-byte boundary
-
- AudioResampler* resampler;
+ std::unique_ptr<AudioResampler> mResampler;
uint32_t sampleRate;
int32_t* mainBuffer;
int32_t* auxBuffer;
- // 16-byte boundary
-
/* Buffer providers are constructed to translate the track input data as needed.
*
* TODO: perhaps make a single PlaybackConverterProvider class to move
@@ -225,17 +301,17 @@
* match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
* requires reformat. For example, it may convert floating point input to
* PCM_16_bit if that's required by the downmixer.
- * 3) downmixerBufferProvider: If not NULL, performs the channel remixing to match
+ * 3) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
* the number of channels required by the mixer sink.
* 4) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
* the downmixer requirements to the mixer engine input requirements.
* 5) mTimestretchBufferProvider: Adds timestretching for playback rate
*/
AudioBufferProvider* mInputBufferProvider; // externally provided buffer provider.
- PassthruBufferProvider* mReformatBufferProvider; // provider wrapper for reformatting.
- PassthruBufferProvider* downmixerBufferProvider; // wrapper for channel conversion.
- PassthruBufferProvider* mPostDownmixReformatBufferProvider;
- PassthruBufferProvider* mTimestretchBufferProvider;
+ std::unique_ptr<PassthruBufferProvider> mReformatBufferProvider;
+ std::unique_ptr<PassthruBufferProvider> mDownmixerBufferProvider;
+ std::unique_ptr<PassthruBufferProvider> mPostDownmixReformatBufferProvider;
+ std::unique_ptr<PassthruBufferProvider> mTimestretchBufferProvider;
int32_t sessionId;
@@ -260,129 +336,74 @@
AudioPlaybackRate mPlaybackRate;
- bool needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
- bool setResampler(uint32_t trackSampleRate, uint32_t devSampleRate);
- bool doesResample() const { return resampler != NULL; }
- void resetResampler() { if (resampler != NULL) resampler->reset(); }
- void adjustVolumeRamp(bool aux, bool useFloat = false);
- size_t getUnreleasedFrames() const { return resampler != NULL ?
- resampler->getUnreleasedFrames() : 0; };
+ private:
+ // hooks
+ void track__genericResample(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+ void track__16BitsStereo(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+ void track__16BitsMono(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
- status_t prepareForDownmix();
- void unprepareForDownmix();
- status_t prepareForReformat();
- void unprepareForReformat();
- bool setPlaybackRate(const AudioPlaybackRate &playbackRate);
- void reconfigureBufferProviders();
+ void volumeRampStereo(int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
+ void volumeStereo(int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
+
+ // multi-format track hooks
+ template <int MIXTYPE, typename TO, typename TI, typename TA>
+ void track__Resample(TO* out, size_t frameCount, TO* temp __unused, TA* aux);
+ template <int MIXTYPE, typename TO, typename TI, typename TA>
+ void track__NoResample(TO* out, size_t frameCount, TO* temp __unused, TA* aux);
};
- typedef void (*process_hook_t)(state_t* state);
-
- // pad to 32-bytes to fill cache line
- struct state_t {
- uint32_t enabledTracks;
- uint32_t needsChanged;
- size_t frameCount;
- process_hook_t hook; // one of process__*, never NULL
- int32_t *outputTemp;
- int32_t *resampleTemp;
- NBLog::Writer* mNBLogWriter; // associated NBLog::Writer or &mDummyLog
- int32_t reserved[1];
- // FIXME allocate dynamically to save some memory when maxNumTracks < MAX_NUM_TRACKS
- track_t tracks[MAX_NUM_TRACKS] __attribute__((aligned(32)));
- };
-
- // bitmask of allocated track names, where bit 0 corresponds to TRACK0 etc.
- uint32_t mTrackNames;
-
- // bitmask of configured track names; ~0 if maxNumTracks == MAX_NUM_TRACKS,
- // but will have fewer bits set if maxNumTracks < MAX_NUM_TRACKS
- const uint32_t mConfiguredNames;
-
- const uint32_t mSampleRate;
-
- NBLog::Writer mDummyLogWriter;
-public:
- // Called by FastMixer to inform AudioMixer of it's associated NBLog::Writer.
- // FIXME It would be safer to use TLS for this, so we don't accidentally use wrong one.
- void setNBLogWriter(NBLog::Writer* log);
-private:
- state_t mState __attribute__((aligned(32)));
-
- // Call after changing either the enabled status of a track, or parameters of an enabled track.
- // OK to call more often than that, but unnecessary.
- void invalidateState(uint32_t mask);
+ // TODO: remove BLOCKSIZE unit of processing - it isn't needed anymore.
+ static constexpr int BLOCKSIZE = 16;
bool setChannelMasks(int name,
audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask);
- static void track__genericResample(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
- int32_t* aux);
- static void track__nop(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
- static void track__16BitsStereo(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
- int32_t* aux);
- static void track__16BitsMono(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
- int32_t* aux);
- static void volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
- int32_t* aux);
- static void volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
- int32_t* aux);
+ // Called when track info changes and a new process hook should be determined.
+ void invalidate() {
+ mHook = &AudioMixer::process__validate;
+ }
- static void process__validate(state_t* state);
- static void process__nop(state_t* state);
- static void process__genericNoResampling(state_t* state);
- static void process__genericResampling(state_t* state);
- static void process__OneTrack16BitsStereoNoResampling(state_t* state);
+ void process__validate();
+ void process__nop();
+ void process__genericNoResampling();
+ void process__genericResampling();
+ void process__oneTrack16BitsStereoNoResampling();
- static pthread_once_t sOnceControl;
- static void sInitRoutine();
-
- /* multi-format volume mixing function (calls template functions
- * in AudioMixerOps.h). The template parameters are as follows:
- *
- * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
- * USEFLOATVOL (set to true if float volume is used)
- * ADJUSTVOL (set to true if volume ramp parameters needs adjustment afterwards)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
- */
- template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
- typename TO, typename TI, typename TA>
- static void volumeMix(TO *out, size_t outFrames,
- const TI *in, TA *aux, bool ramp, AudioMixer::track_t *t);
-
- // multi-format process hooks
template <int MIXTYPE, typename TO, typename TI, typename TA>
- static void process_NoResampleOneTrack(state_t* state);
+ void process__noResampleOneTrack();
- // multi-format track hooks
- template <int MIXTYPE, typename TO, typename TI, typename TA>
- static void track__Resample(track_t* t, TO* out, size_t frameCount,
- TO* temp __unused, TA* aux);
- template <int MIXTYPE, typename TO, typename TI, typename TA>
- static void track__NoResample(track_t* t, TO* out, size_t frameCount,
- TO* temp __unused, TA* aux);
+ static process_hook_t getProcessHook(int processType, uint32_t channelCount,
+ audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
static void convertMixerFormat(void *out, audio_format_t mixerOutFormat,
void *in, audio_format_t mixerInFormat, size_t sampleCount);
- // hook types
- enum {
- PROCESSTYPE_NORESAMPLEONETRACK,
- };
- enum {
- TRACKTYPE_NOP,
- TRACKTYPE_RESAMPLE,
- TRACKTYPE_NORESAMPLE,
- TRACKTYPE_NORESAMPLEMONO,
- };
+ static void sInitRoutine();
- // functions for determining the proper process and track hooks.
- static process_hook_t getProcessHook(int processType, uint32_t channelCount,
- audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
- static hook_t getTrackHook(int trackType, uint32_t channelCount,
- audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
+ // initialization constants
+ const uint32_t mSampleRate;
+ const size_t mFrameCount;
+
+ NBLog::Writer *mNBLogWriter = nullptr; // associated NBLog::Writer
+
+ process_hook_t mHook = &AudioMixer::process__nop; // one of process__*, never nullptr
+
+ // the size of the type (int32_t) should be the largest of all types supported
+ // by the mixer.
+ std::unique_ptr<int32_t[]> mOutputTemp;
+ std::unique_ptr<int32_t[]> mResampleTemp;
+
+ // track names grouped by main buffer, in no particular order of main buffer.
+ // however names for a particular main buffer are in order (by construction).
+ std::unordered_map<void * /* mainBuffer */, std::vector<int /* name */>> mGroups;
+
+ // track names that are enabled, in increasing order (by construction).
+ std::vector<int /* name */> mEnabled;
+
+ // track smart pointers, by name, in increasing order of name.
+ std::map<int /* name */, std::shared_ptr<Track>> mTracks;
+
+ static pthread_once_t sOnceControl; // initialized in constructor by first new
};
// ----------------------------------------------------------------------------
diff --git a/media/libaudioclient/include/media/AudioParameter.h b/media/libaudioclient/include/media/AudioParameter.h
index 1ace607..967d895 100644
--- a/media/libaudioclient/include/media/AudioParameter.h
+++ b/media/libaudioclient/include/media/AudioParameter.h
@@ -58,6 +58,12 @@
static const char * const keyMonoOutput;
static const char * const keyStreamHwAvSync;
+ // keys for presentation selection
+ // keyPresentationId: Audio presentation identifier
+ // keyProgramId: Audio presentation program identifier
+ static const char * const keyPresentationId;
+ static const char * const keyProgramId;
+
// keyStreamConnect / Disconnect: value is an int in audio_devices_t
static const char * const keyStreamConnect;
static const char * const keyStreamDisconnect;
@@ -75,6 +81,11 @@
static const char * const valueListSeparator;
+ // keyReconfigA2dp: Ask HwModule to reconfigure A2DP offloaded codec
+ // keyReconfigA2dpSupported: Query if HwModule supports A2DP offload codec config
+ static const char * const keyReconfigA2dp;
+ static const char * const keyReconfigA2dpSupported;
+
String8 toString() const { return toStringImpl(true); }
String8 keysToString() const { return toStringImpl(false); }
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index dd72170..cf446a5 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -17,12 +17,18 @@
#ifndef ANDROID_AUDIORECORD_H
#define ANDROID_AUDIORECORD_H
+#include <binder/IMemory.h>
#include <cutils/sched_policy.h>
#include <media/AudioSystem.h>
#include <media/AudioTimestamp.h>
-#include <media/IAudioRecord.h>
+#include <media/MediaAnalyticsItem.h>
#include <media/Modulo.h>
+#include <media/MicrophoneInfo.h>
+#include <utils/RefBase.h>
#include <utils/threads.h>
+#include <vector>
+
+#include "android/media/IAudioRecord.h"
namespace android {
@@ -182,7 +188,8 @@
audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
uid_t uid = AUDIO_UID_INVALID,
pid_t pid = -1,
- const audio_attributes_t* pAttributes = NULL);
+ const audio_attributes_t* pAttributes = NULL,
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
/* Terminates the AudioRecord and unregisters it from AudioFlinger.
* Also destroys all resources associated with the AudioRecord.
@@ -220,7 +227,8 @@
audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
uid_t uid = AUDIO_UID_INVALID,
pid_t pid = -1,
- const audio_attributes_t* pAttributes = NULL);
+ const audio_attributes_t* pAttributes = NULL,
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
/* Result of constructing the AudioRecord. This must be checked for successful initialization
* before using any AudioRecord API (except for set()), because using
@@ -250,6 +258,11 @@
*/
uint32_t getNotificationPeriodInFrames() const { return mNotificationFramesAct; }
+ /*
+ * return metrics information for the current instance.
+ */
+ status_t getMetrics(MediaAnalyticsItem * &item);
+
/* After it's created the track is not active. Call start() to
* make it active. If set, the callback will start being called.
* If event is not AudioSystem::SYNC_EVENT_NONE, the capture start will be delayed until
@@ -516,6 +529,16 @@
/* Get the flags */
audio_input_flags_t getFlags() const { AutoMutex _l(mLock); return mFlags; }
+ /* Get active microphones. A empty vector of MicrophoneInfo will be passed as a parameter,
+ * the data will be filled when querying the hal.
+ */
+ status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
+
+ /*
+ * Dumps the state of an audio record.
+ */
+ status_t dump(int fd, const Vector<String16>& args) const;
+
private:
/* copying audio record objects is not allowed */
AudioRecord(const AudioRecord& other);
@@ -565,7 +588,7 @@
// caller must hold lock on mLock for all _l methods
- status_t openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName);
+ status_t createRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName);
// FIXME enum is faster than strcmp() for parameter 'from'
status_t restoreRecord_l(const char *from);
@@ -635,7 +658,7 @@
// Next 5 fields may be changed if IAudioRecord is re-created, but always != 0
// provided the initial set() was successful
- sp<IAudioRecord> mAudioRecord;
+ sp<media::IAudioRecord> mAudioRecord;
sp<IMemory> mCblkMemory;
audio_track_cblk_t* mCblk; // re-load after mLock.unlock()
sp<IMemory> mBufferMemory;
@@ -677,8 +700,40 @@
// May not match the app selection depending on other
// activity and connected devices
wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
- audio_port_handle_t mPortId; // unique ID allocated by audio policy
+private:
+ class MediaMetrics {
+ public:
+ MediaMetrics() : mAnalyticsItem(new MediaAnalyticsItem("audiorecord")),
+ mCreatedNs(systemTime(SYSTEM_TIME_REALTIME)),
+ mStartedNs(0), mDurationNs(0), mCount(0),
+ mLastError(NO_ERROR) {
+ }
+ ~MediaMetrics() {
+ // mAnalyticsItem alloc failure will be flagged in the constructor
+ // don't log empty records
+ if (mAnalyticsItem->count() > 0) {
+ mAnalyticsItem->selfrecord();
+ }
+ }
+ void gather(const AudioRecord *record);
+ MediaAnalyticsItem *dup() { return mAnalyticsItem->dup(); }
+
+ void logStart(nsecs_t when) { mStartedNs = when; mCount++; }
+ void logStop(nsecs_t when) { mDurationNs += (when-mStartedNs); mStartedNs = 0;}
+ void markError(status_t errcode, const char *func)
+ { mLastError = errcode; mLastErrorFunc = func;}
+ private:
+ std::unique_ptr<MediaAnalyticsItem> mAnalyticsItem;
+ nsecs_t mCreatedNs; // XXX: perhaps not worth it in production
+ nsecs_t mStartedNs;
+ nsecs_t mDurationNs;
+ int32_t mCount;
+
+ status_t mLastError;
+ std::string mLastErrorFunc;
+ };
+ MediaMetrics mMediaMetrics;
};
}; // namespace android
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 5a81d83..4c0f796 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -23,11 +23,13 @@
#include <media/AudioIoDescriptor.h>
#include <media/IAudioFlingerClient.h>
#include <media/IAudioPolicyServiceClient.h>
+#include <media/MicrophoneInfo.h>
#include <system/audio.h>
#include <system/audio_effect.h>
#include <system/audio_policy.h>
#include <utils/Errors.h>
#include <utils/Mutex.h>
+#include <vector>
namespace android {
@@ -106,6 +108,9 @@
static float linearToLog(int volume);
static int logToLinear(float volume);
+ static size_t calculateMinFrameCount(
+ uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
+ uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/);
// Returned samplingRate and frameCount output values are guaranteed
// to be non-zero if status == NO_ERROR
@@ -209,18 +214,11 @@
static status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
static audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
- // Client must successfully hand off the handle reference to AudioFlinger via createTrack(),
- // or release it with releaseOutput().
- static audio_io_handle_t getOutput(audio_stream_type_t stream,
- uint32_t samplingRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- const audio_offload_info_t *offloadInfo = NULL);
static status_t getOutputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
+ pid_t pid,
uid_t uid,
const audio_config_t *config,
audio_output_flags_t flags,
@@ -236,24 +234,23 @@
audio_stream_type_t stream,
audio_session_t session);
- // Client must successfully hand off the handle reference to AudioFlinger via openRecord(),
+ // Client must successfully hand off the handle reference to AudioFlinger via createRecord(),
// or release it with releaseInput().
static status_t getInputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *input,
audio_session_t session,
pid_t pid,
uid_t uid,
+ const String16& opPackageName,
const audio_config_base_t *config,
audio_input_flags_t flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId);
- static status_t startInput(audio_io_handle_t input,
- audio_session_t session);
- static status_t stopInput(audio_io_handle_t input,
- audio_session_t session);
- static void releaseInput(audio_io_handle_t input,
- audio_session_t session);
+ static status_t startInput(audio_port_handle_t portId,
+ bool *silenced);
+ static status_t stopInput(audio_port_handle_t portId);
+ static void releaseInput(audio_port_handle_t portId);
static status_t initStreamVolume(audio_stream_type_t stream,
int indexMin,
int indexMax);
@@ -286,7 +283,7 @@
static uint32_t getPrimaryOutputSamplingRate();
static size_t getPrimaryOutputFrameCount();
- static status_t setLowRamDevice(bool isLowRamDevice);
+ static status_t setLowRamDevice(bool isLowRamDevice, int64_t totalMemory);
// Check if hw offload is possible for given format, stream type, sample rate,
// bit rate, duration, video and streaming or offload property is enabled
@@ -341,6 +338,17 @@
static float getStreamVolumeDB(
audio_stream_type_t stream, int index, audio_devices_t device);
+ static status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+ // numSurroundFormats holds the maximum number of formats and bool value allowed in the array.
+ // When numSurroundFormats is 0, surroundFormats and surroundFormatsEnabled will not be
+ // populated. The actual number of surround formats should be returned at numSurroundFormats.
+ static status_t getSurroundFormats(unsigned int *numSurroundFormats,
+ audio_format_t *surroundFormats,
+ bool *surroundFormatsEnabled,
+ bool reported);
+ static status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
+
// ----------------------------------------------------------------------------
class AudioPortCallback : public RefBase
@@ -432,6 +440,7 @@
int addAudioPortCallback(const sp<AudioPortCallback>& callback);
int removeAudioPortCallback(const sp<AudioPortCallback>& callback);
+ bool isAudioPortCbEnabled() const { return (mAudioPortCallbacks.size() != 0); }
// DeathRecipient
virtual void binderDied(const wp<IBinder>& who);
@@ -450,6 +459,7 @@
Vector <sp <AudioPortCallback> > mAudioPortCallbacks;
};
+ static audio_io_handle_t getOutput(audio_stream_type_t stream);
static const sp<AudioFlingerClient> getAudioFlingerClient();
static sp<AudioIoDescriptor> getIoDescriptor(audio_io_handle_t ioHandle);
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 47d87e9..3eb627d 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -22,6 +22,7 @@
#include <media/AudioTimestamp.h>
#include <media/IAudioTrack.h>
#include <media/AudioResamplerPublic.h>
+#include <media/MediaAnalyticsItem.h>
#include <media/Modulo.h>
#include <utils/threads.h>
@@ -218,6 +219,8 @@
* maxRequiredSpeed playback. Values less than 1.0f and greater than
* AUDIO_TIMESTRETCH_SPEED_MAX will be clamped. For non-PCM tracks
* and direct or offloaded tracks, this parameter is ignored.
+ * selectedDeviceId: Selected device id of the app which initially requested the AudioTrack
+ * to open with a specific device.
* threadCanCallJava: Not present in parameter list, and so is fixed at false.
*/
@@ -237,7 +240,8 @@
pid_t pid = -1,
const audio_attributes_t* pAttributes = NULL,
bool doNotReconnect = false,
- float maxRequiredSpeed = 1.0f);
+ float maxRequiredSpeed = 1.0f,
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
/* Creates an audio track and registers it with AudioFlinger.
* With this constructor, the track is configured for static buffer mode.
@@ -313,7 +317,8 @@
pid_t pid = -1,
const audio_attributes_t* pAttributes = NULL,
bool doNotReconnect = false,
- float maxRequiredSpeed = 1.0f);
+ float maxRequiredSpeed = 1.0f,
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
/* Result of constructing the AudioTrack. This must be checked for successful initialization
* before using any AudioTrack API (except for set()), because using
@@ -381,6 +386,11 @@
/* Return the static buffer specified in constructor or set(), or 0 for streaming mode */
sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
+ /*
+ * return metrics information for the current track.
+ */
+ status_t getMetrics(MediaAnalyticsItem * &item);
+
/* After it's created the track is not active. Call start() to
* make it active. If set, the callback will start being called.
* If the track was previously paused, volume is ramped up over the first mix buffer.
@@ -748,12 +758,15 @@
status_t setParameters(const String8& keyValuePairs);
/* Sets the volume shaper object */
- VolumeShaper::Status applyVolumeShaper(
- const sp<VolumeShaper::Configuration>& configuration,
- const sp<VolumeShaper::Operation>& operation);
+ media::VolumeShaper::Status applyVolumeShaper(
+ const sp<media::VolumeShaper::Configuration>& configuration,
+ const sp<media::VolumeShaper::Operation>& operation);
/* Gets the volume shaper state */
- sp<VolumeShaper::State> getVolumeShaperState(int id);
+ sp<media::VolumeShaper::State> getVolumeShaperState(int id);
+
+ /* Selects the presentation (if available) */
+ status_t selectPresentation(int presentationId, int programId);
/* Get parameters */
String8 getParameters(const String8& keys);
@@ -990,7 +1003,7 @@
sp<IAudioTrack> mAudioTrack;
sp<IMemory> mCblkMemory;
audio_track_cblk_t* mCblk; // re-load after mLock.unlock()
- audio_io_handle_t mOutput; // returned by AudioSystem::getOutput()
+ audio_io_handle_t mOutput; // returned by AudioSystem::getOutputForAttr()
sp<AudioTrackThread> mAudioTrackThread;
bool mThreadCanCallJava;
@@ -1160,7 +1173,7 @@
// May not match the app selection depending on other
// activity and connected devices.
- sp<VolumeHandler> mVolumeHandler;
+ sp<media::VolumeHandler> mVolumeHandler;
private:
class DeathNotifier : public IBinder::DeathRecipient {
@@ -1178,7 +1191,25 @@
pid_t mClientPid;
wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
- audio_port_handle_t mPortId; // unique ID allocated by audio policy
+
+private:
+ class MediaMetrics {
+ public:
+ MediaMetrics() : mAnalyticsItem(new MediaAnalyticsItem("audiotrack")) {
+ }
+ ~MediaMetrics() {
+ // mAnalyticsItem alloc failure will be flagged in the constructor
+ // don't log empty records
+ if (mAnalyticsItem->count() > 0) {
+ mAnalyticsItem->selfrecord();
+ }
+ }
+ void gather(const AudioTrack *track);
+ MediaAnalyticsItem *dup() { return mAnalyticsItem->dup(); }
+ private:
+ std::unique_ptr<MediaAnalyticsItem> mAnalyticsItem;
+ };
+ MediaMetrics mMediaMetrics;
};
}; // namespace android
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 0ad4231..e6bf72f 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -24,8 +24,10 @@
#include <utils/RefBase.h>
#include <utils/Errors.h>
#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <media/AudioClient.h>
#include <media/IAudioTrack.h>
-#include <media/IAudioRecord.h>
#include <media/IAudioFlingerClient.h>
#include <system/audio.h>
#include <system/audio_effect.h>
@@ -33,6 +35,10 @@
#include <media/IEffect.h>
#include <media/IEffectClient.h>
#include <utils/String8.h>
+#include <media/MicrophoneInfo.h>
+#include <vector>
+
+#include "android/media/IAudioRecord.h"
namespace android {
@@ -43,6 +49,271 @@
public:
DECLARE_META_INTERFACE(AudioFlinger);
+ /* CreateTrackInput contains all input arguments sent by AudioTrack to AudioFlinger
+ * when calling createTrack() including arguments that will be updated by AudioFlinger
+ * and returned in CreateTrackOutput object
+ */
+ class CreateTrackInput : public Parcelable {
+ public:
+ status_t readFromParcel(const Parcel *parcel) override {
+ /* input arguments*/
+ memset(&attr, 0, sizeof(audio_attributes_t));
+ if (parcel->read(&attr, sizeof(audio_attributes_t)) != NO_ERROR) {
+ return DEAD_OBJECT;
+ }
+ attr.tags[AUDIO_ATTRIBUTES_TAGS_MAX_SIZE -1] = '\0';
+ memset(&config, 0, sizeof(audio_config_t));
+ if (parcel->read(&config, sizeof(audio_config_t)) != NO_ERROR) {
+ return DEAD_OBJECT;
+ }
+ if (clientInfo.readFromParcel(parcel) != NO_ERROR) {
+ return DEAD_OBJECT;
+ }
+ if (parcel->readInt32() != 0) {
+ sharedBuffer = interface_cast<IMemory>(parcel->readStrongBinder());
+ if (sharedBuffer == 0 || sharedBuffer->pointer() == NULL) {
+ return BAD_VALUE;
+ }
+ }
+ notificationsPerBuffer = parcel->readInt32();
+ speed = parcel->readFloat();
+
+ /* input/output arguments*/
+ (void)parcel->read(&flags, sizeof(audio_output_flags_t));
+ frameCount = parcel->readInt64();
+ notificationFrameCount = parcel->readInt64();
+ (void)parcel->read(&selectedDeviceId, sizeof(audio_port_handle_t));
+ (void)parcel->read(&sessionId, sizeof(audio_session_t));
+ return NO_ERROR;
+ }
+
+ status_t writeToParcel(Parcel *parcel) const override {
+ /* input arguments*/
+ (void)parcel->write(&attr, sizeof(audio_attributes_t));
+ (void)parcel->write(&config, sizeof(audio_config_t));
+ (void)clientInfo.writeToParcel(parcel);
+ if (sharedBuffer != 0) {
+ (void)parcel->writeInt32(1);
+ (void)parcel->writeStrongBinder(IInterface::asBinder(sharedBuffer));
+ } else {
+ (void)parcel->writeInt32(0);
+ }
+ (void)parcel->writeInt32(notificationsPerBuffer);
+ (void)parcel->writeFloat(speed);
+
+ /* input/output arguments*/
+ (void)parcel->write(&flags, sizeof(audio_output_flags_t));
+ (void)parcel->writeInt64(frameCount);
+ (void)parcel->writeInt64(notificationFrameCount);
+ (void)parcel->write(&selectedDeviceId, sizeof(audio_port_handle_t));
+ (void)parcel->write(&sessionId, sizeof(audio_session_t));
+ return NO_ERROR;
+ }
+
+ /* input */
+ audio_attributes_t attr;
+ audio_config_t config;
+ AudioClient clientInfo;
+ sp<IMemory> sharedBuffer;
+ uint32_t notificationsPerBuffer;
+ float speed;
+
+ /* input/output */
+ audio_output_flags_t flags;
+ size_t frameCount;
+ size_t notificationFrameCount;
+ audio_port_handle_t selectedDeviceId;
+ audio_session_t sessionId;
+ };
+
+ /* CreateTrackOutput contains all output arguments returned by AudioFlinger to AudioTrack
+ * when calling createTrack() including arguments that were passed as I/O for update by
+ * CreateTrackInput.
+ */
+ class CreateTrackOutput : public Parcelable {
+ public:
+ status_t readFromParcel(const Parcel *parcel) override {
+ /* input/output arguments*/
+ (void)parcel->read(&flags, sizeof(audio_output_flags_t));
+ frameCount = parcel->readInt64();
+ notificationFrameCount = parcel->readInt64();
+ (void)parcel->read(&selectedDeviceId, sizeof(audio_port_handle_t));
+ (void)parcel->read(&sessionId, sizeof(audio_session_t));
+
+ /* output arguments*/
+ sampleRate = parcel->readUint32();
+ afFrameCount = parcel->readInt64();
+ afSampleRate = parcel->readInt64();
+ afLatencyMs = parcel->readInt32();
+ (void)parcel->read(&outputId, sizeof(audio_io_handle_t));
+ return NO_ERROR;
+ }
+
+ status_t writeToParcel(Parcel *parcel) const override {
+ /* input/output arguments*/
+ (void)parcel->write(&flags, sizeof(audio_output_flags_t));
+ (void)parcel->writeInt64(frameCount);
+ (void)parcel->writeInt64(notificationFrameCount);
+ (void)parcel->write(&selectedDeviceId, sizeof(audio_port_handle_t));
+ (void)parcel->write(&sessionId, sizeof(audio_session_t));
+
+ /* output arguments*/
+ (void)parcel->writeUint32(sampleRate);
+ (void)parcel->writeInt64(afFrameCount);
+ (void)parcel->writeInt64(afSampleRate);
+ (void)parcel->writeInt32(afLatencyMs);
+ (void)parcel->write(&outputId, sizeof(audio_io_handle_t));
+ return NO_ERROR;
+ }
+
+ /* input/output */
+ audio_output_flags_t flags;
+ size_t frameCount;
+ size_t notificationFrameCount;
+ audio_port_handle_t selectedDeviceId;
+ audio_session_t sessionId;
+
+ /* output */
+ uint32_t sampleRate;
+ size_t afFrameCount;
+ uint32_t afSampleRate;
+ uint32_t afLatencyMs;
+ audio_io_handle_t outputId;
+ };
+
+ /* CreateRecordInput contains all input arguments sent by AudioRecord to AudioFlinger
+ * when calling createRecord() including arguments that will be updated by AudioFlinger
+ * and returned in CreateRecordOutput object
+ */
+ class CreateRecordInput : public Parcelable {
+ public:
+ status_t readFromParcel(const Parcel *parcel) override {
+ /* input arguments*/
+ memset(&attr, 0, sizeof(audio_attributes_t));
+ if (parcel->read(&attr, sizeof(audio_attributes_t)) != NO_ERROR) {
+ return DEAD_OBJECT;
+ }
+ attr.tags[AUDIO_ATTRIBUTES_TAGS_MAX_SIZE -1] = '\0';
+ memset(&config, 0, sizeof(audio_config_base_t));
+ if (parcel->read(&config, sizeof(audio_config_base_t)) != NO_ERROR) {
+ return DEAD_OBJECT;
+ }
+ if (clientInfo.readFromParcel(parcel) != NO_ERROR) {
+ return DEAD_OBJECT;
+ }
+ opPackageName = parcel->readString16();
+
+ /* input/output arguments*/
+ (void)parcel->read(&flags, sizeof(audio_input_flags_t));
+ frameCount = parcel->readInt64();
+ notificationFrameCount = parcel->readInt64();
+ (void)parcel->read(&selectedDeviceId, sizeof(audio_port_handle_t));
+ (void)parcel->read(&sessionId, sizeof(audio_session_t));
+ return NO_ERROR;
+ }
+
+ status_t writeToParcel(Parcel *parcel) const override {
+ /* input arguments*/
+ (void)parcel->write(&attr, sizeof(audio_attributes_t));
+ (void)parcel->write(&config, sizeof(audio_config_base_t));
+ (void)clientInfo.writeToParcel(parcel);
+ (void)parcel->writeString16(opPackageName);
+
+ /* input/output arguments*/
+ (void)parcel->write(&flags, sizeof(audio_input_flags_t));
+ (void)parcel->writeInt64(frameCount);
+ (void)parcel->writeInt64(notificationFrameCount);
+ (void)parcel->write(&selectedDeviceId, sizeof(audio_port_handle_t));
+ (void)parcel->write(&sessionId, sizeof(audio_session_t));
+ return NO_ERROR;
+ }
+
+ /* input */
+ audio_attributes_t attr;
+ audio_config_base_t config;
+ AudioClient clientInfo;
+ String16 opPackageName;
+
+ /* input/output */
+ audio_input_flags_t flags;
+ size_t frameCount;
+ size_t notificationFrameCount;
+ audio_port_handle_t selectedDeviceId;
+ audio_session_t sessionId;
+ };
+
+ /* CreateRecordOutput contains all output arguments returned by AudioFlinger to AudioRecord
+ * when calling createRecord() including arguments that were passed as I/O for update by
+ * CreateRecordInput.
+ */
+ class CreateRecordOutput : public Parcelable {
+ public:
+ status_t readFromParcel(const Parcel *parcel) override {
+ /* input/output arguments*/
+ (void)parcel->read(&flags, sizeof(audio_input_flags_t));
+ frameCount = parcel->readInt64();
+ notificationFrameCount = parcel->readInt64();
+ (void)parcel->read(&selectedDeviceId, sizeof(audio_port_handle_t));
+ (void)parcel->read(&sessionId, sizeof(audio_session_t));
+
+ /* output arguments*/
+ sampleRate = parcel->readUint32();
+ (void)parcel->read(&inputId, sizeof(audio_io_handle_t));
+ if (parcel->readInt32() != 0) {
+ cblk = interface_cast<IMemory>(parcel->readStrongBinder());
+ if (cblk == 0 || cblk->pointer() == NULL) {
+ return BAD_VALUE;
+ }
+ }
+ if (parcel->readInt32() != 0) {
+ buffers = interface_cast<IMemory>(parcel->readStrongBinder());
+ if (buffers == 0 || buffers->pointer() == NULL) {
+ return BAD_VALUE;
+ }
+ }
+ return NO_ERROR;
+ }
+
+ status_t writeToParcel(Parcel *parcel) const override {
+ /* input/output arguments*/
+ (void)parcel->write(&flags, sizeof(audio_input_flags_t));
+ (void)parcel->writeInt64(frameCount);
+ (void)parcel->writeInt64(notificationFrameCount);
+ (void)parcel->write(&selectedDeviceId, sizeof(audio_port_handle_t));
+ (void)parcel->write(&sessionId, sizeof(audio_session_t));
+
+ /* output arguments*/
+ (void)parcel->writeUint32(sampleRate);
+ (void)parcel->write(&inputId, sizeof(audio_io_handle_t));
+ if (cblk != 0) {
+ (void)parcel->writeInt32(1);
+ (void)parcel->writeStrongBinder(IInterface::asBinder(cblk));
+ } else {
+ (void)parcel->writeInt32(0);
+ }
+ if (buffers != 0) {
+ (void)parcel->writeInt32(1);
+ (void)parcel->writeStrongBinder(IInterface::asBinder(buffers));
+ } else {
+ (void)parcel->writeInt32(0);
+ }
+
+ return NO_ERROR;
+ }
+
+ /* input/output */
+ audio_input_flags_t flags;
+ size_t frameCount;
+ size_t notificationFrameCount;
+ audio_port_handle_t selectedDeviceId;
+ audio_session_t sessionId;
+
+ /* output */
+ uint32_t sampleRate;
+ audio_io_handle_t inputId;
+ sp<IMemory> cblk;
+ sp<IMemory> buffers;
+ };
// invariant on exit for all APIs that return an sp<>:
// (return value != 0) == (*status == NO_ERROR)
@@ -50,45 +321,13 @@
/* create an audio track and registers it with AudioFlinger.
* return null if the track cannot be created.
*/
- virtual sp<IAudioTrack> createTrack(
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t *pFrameCount,
- audio_output_flags_t *flags,
- const sp<IMemory>& sharedBuffer,
- // On successful return, AudioFlinger takes over the handle
- // reference and will release it when the track is destroyed.
- // However on failure, the client is responsible for release.
- audio_io_handle_t output,
- pid_t pid,
- pid_t tid, // -1 means unused, otherwise must be valid non-0
- audio_session_t *sessionId,
- int clientUid,
- status_t *status,
- audio_port_handle_t portId) = 0;
+ virtual sp<IAudioTrack> createTrack(const CreateTrackInput& input,
+ CreateTrackOutput& output,
+ status_t *status) = 0;
- virtual sp<IAudioRecord> openRecord(
- // On successful return, AudioFlinger takes over the handle
- // reference and will release it when the track is destroyed.
- // However on failure, the client is responsible for release.
- audio_io_handle_t input,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- const String16& callingPackage,
- size_t *pFrameCount,
- audio_input_flags_t *flags,
- pid_t pid,
- pid_t tid, // -1 means unused, otherwise must be valid non-0
- int clientUid,
- audio_session_t *sessionId,
- size_t *notificationFrames,
- sp<IMemory>& cblk,
- sp<IMemory>& buffers, // return value 0 means it follows cblk
- status_t *status,
- audio_port_handle_t portId) = 0;
+ virtual sp<media::IAudioRecord> createRecord(const CreateRecordInput& input,
+ CreateRecordOutput& output,
+ status_t *status) = 0;
// FIXME Surprisingly, format/latency don't work for input handles
@@ -131,6 +370,7 @@
// mic mute/state
virtual status_t setMicMute(bool state) = 0;
virtual bool getMicMute() const = 0;
+ virtual void setRecordSilenced(uid_t uid, bool silenced) = 0;
virtual status_t setParameters(audio_io_handle_t ioHandle,
const String8& keyValuePairs) = 0;
@@ -216,8 +456,9 @@
// Intended for AudioService to inform AudioFlinger of device's low RAM attribute,
// and should be called at most once. For a definition of what "low RAM" means, see
- // android.app.ActivityManager.isLowRamDevice().
- virtual status_t setLowRamDevice(bool isLowRamDevice) = 0;
+ // android.app.ActivityManager.isLowRamDevice(). The totalMemory parameter
+ // is obtained from android.app.ActivityManager.MemoryInfo.totalMem.
+ virtual status_t setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) = 0;
/* List available audio ports and their attributes */
virtual status_t listAudioPorts(unsigned int *num_ports,
@@ -247,6 +488,9 @@
// Returns the number of frames per audio HAL buffer.
virtual size_t frameCountHAL(audio_io_handle_t ioHandle) const = 0;
+
+ /* List available microphones and their characteristics */
+ virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
};
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index 9b3e35e..c3876af 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -55,16 +55,12 @@
virtual status_t setForceUse(audio_policy_force_use_t usage,
audio_policy_forced_cfg_t config) = 0;
virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) = 0;
- virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
- uint32_t samplingRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = 0,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- const audio_offload_info_t *offloadInfo = NULL) = 0;
+ virtual audio_io_handle_t getOutput(audio_stream_type_t stream) = 0;
virtual status_t getOutputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
+ pid_t pid,
uid_t uid,
const audio_config_t *config,
audio_output_flags_t flags,
@@ -84,16 +80,15 @@
audio_session_t session,
pid_t pid,
uid_t uid,
+ const String16& opPackageName,
const audio_config_base_t *config,
audio_input_flags_t flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId) = 0;
- virtual status_t startInput(audio_io_handle_t input,
- audio_session_t session) = 0;
- virtual status_t stopInput(audio_io_handle_t input,
- audio_session_t session) = 0;
- virtual void releaseInput(audio_io_handle_t input,
- audio_session_t session) = 0;
+ virtual status_t startInput(audio_port_handle_t portId,
+ bool *silenced) = 0;
+ virtual status_t stopInput(audio_port_handle_t portId) = 0;
+ virtual void releaseInput(audio_port_handle_t portId) = 0;
virtual status_t initStreamVolume(audio_stream_type_t stream,
int indexMin,
int indexMax) = 0;
@@ -171,6 +166,12 @@
virtual status_t getMasterMono(bool *mono) = 0;
virtual float getStreamVolumeDB(
audio_stream_type_t stream, int index, audio_devices_t device) = 0;
+
+ virtual status_t getSurroundFormats(unsigned int *numSurroundFormats,
+ audio_format_t *surroundFormats,
+ bool *surroundFormatsEnabled,
+ bool reported) = 0;
+ virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled) = 0;
};
@@ -183,6 +184,10 @@
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
+private:
+ void sanetizeAudioAttributes(audio_attributes_t* attr);
+ status_t sanitizeEffectDescriptor(effect_descriptor_t* desc);
+ status_t sanitizeAudioPortConfig(struct audio_port_config* config);
};
// ----------------------------------------------------------------------------
diff --git a/media/libaudioclient/include/media/IAudioRecord.h b/media/libaudioclient/include/media/IAudioRecord.h
deleted file mode 100644
index 7768176..0000000
--- a/media/libaudioclient/include/media/IAudioRecord.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef IAUDIORECORD_H_
-#define IAUDIORECORD_H_
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
-#include <binder/IInterface.h>
-#include <binder/IMemory.h>
-#include <system/audio.h>
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-class IAudioRecord : public IInterface
-{
-public:
- DECLARE_META_INTERFACE(AudioRecord);
-
- /* After it's created the track is not active. Call start() to
- * make it active.
- */
- virtual status_t start(int /*AudioSystem::sync_event_t*/ event,
- audio_session_t triggerSession) = 0;
-
- /* Stop a track. If set, the callback will cease being called and
- * obtainBuffer will return an error. Buffers that are already released
- * will be processed, unless flush() is called.
- */
- virtual void stop() = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnAudioRecord : public BnInterface<IAudioRecord>
-{
-public:
- virtual status_t onTransact( uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
-};
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
-#endif /*IAUDIORECORD_H_*/
diff --git a/media/libaudioclient/include/media/IAudioTrack.h b/media/libaudioclient/include/media/IAudioTrack.h
index 27a62d6..94afe3c 100644
--- a/media/libaudioclient/include/media/IAudioTrack.h
+++ b/media/libaudioclient/include/media/IAudioTrack.h
@@ -77,12 +77,12 @@
virtual void signal() = 0;
/* Sets the volume shaper */
- virtual VolumeShaper::Status applyVolumeShaper(
- const sp<VolumeShaper::Configuration>& configuration,
- const sp<VolumeShaper::Operation>& operation) = 0;
+ virtual media::VolumeShaper::Status applyVolumeShaper(
+ const sp<media::VolumeShaper::Configuration>& configuration,
+ const sp<media::VolumeShaper::Operation>& operation) = 0;
/* gets the volume shaper state */
- virtual sp<VolumeShaper::State> getVolumeShaperState(int id) = 0;
+ virtual sp<media::VolumeShaper::State> getVolumeShaperState(int id) = 0;
};
// ----------------------------------------------------------------------------
diff --git a/media/libaudioclient/include/media/PlayerBase.h b/media/libaudioclient/include/media/PlayerBase.h
index e63090b..e7a8abc 100644
--- a/media/libaudioclient/include/media/PlayerBase.h
+++ b/media/libaudioclient/include/media/PlayerBase.h
@@ -17,35 +17,31 @@
#ifndef __ANDROID_PLAYER_BASE_H__
#define __ANDROID_PLAYER_BASE_H__
-#include <audiomanager/IPlayer.h>
#include <audiomanager/AudioManager.h>
#include <audiomanager/IAudioManager.h>
+#include "android/media/BnPlayer.h"
namespace android {
-class PlayerBase : public BnPlayer
+class PlayerBase : public ::android::media::BnPlayer
{
public:
explicit PlayerBase();
- virtual ~PlayerBase();
+ virtual ~PlayerBase() override;
virtual void destroy() = 0;
//IPlayer implementation
- virtual void start();
- virtual void pause();
- virtual void stop();
- virtual void setVolume(float vol);
- virtual void setPan(float pan);
- virtual void setStartDelayMs(int32_t delayMs);
- virtual void applyVolumeShaper(
- const sp<VolumeShaper::Configuration>& configuration,
- const sp<VolumeShaper::Operation>& operation) override;
-
- virtual status_t onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
-
+ virtual binder::Status start() override;
+ virtual binder::Status pause() override;
+ virtual binder::Status stop() override;
+ virtual binder::Status setVolume(float vol) override;
+ virtual binder::Status setPan(float pan) override;
+ virtual binder::Status setStartDelayMs(int32_t delayMs) override;
+ virtual binder::Status applyVolumeShaper(
+ const media::VolumeShaper::Configuration& configuration,
+ const media::VolumeShaper::Operation& operation) override;
status_t startWithStatus();
status_t pauseWithStatus();
diff --git a/media/libaudioclient/include/media/TrackPlayerBase.h b/media/libaudioclient/include/media/TrackPlayerBase.h
index 2d113c0..66e9b3b 100644
--- a/media/libaudioclient/include/media/TrackPlayerBase.h
+++ b/media/libaudioclient/include/media/TrackPlayerBase.h
@@ -32,9 +32,9 @@
virtual void destroy();
//IPlayer implementation
- virtual void applyVolumeShaper(
- const sp<VolumeShaper::Configuration>& configuration,
- const sp<VolumeShaper::Operation>& operation);
+ virtual binder::Status applyVolumeShaper(
+ const media::VolumeShaper::Configuration& configuration,
+ const media::VolumeShaper::Operation& operation);
//FIXME move to protected field, so far made public to minimize changes to AudioTrack logic
sp<AudioTrack> mAudioTrack;
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
new file mode 100644
index 0000000..52bb2fb
--- /dev/null
+++ b/media/libaudioclient/tests/Android.bp
@@ -0,0 +1,35 @@
+cc_defaults {
+ name: "libaudioclient_tests_defaults",
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+}
+
+cc_test {
+ name: "test_create_audiotrack",
+ defaults: ["libaudioclient_tests_defaults"],
+ srcs: ["test_create_audiotrack.cpp",
+ "test_create_utils.cpp"],
+ shared_libs: [
+ "libaudioclient",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+ data: ["track_test_input_*.txt"],
+}
+
+cc_test {
+ name: "test_create_audiorecord",
+ defaults: ["libaudioclient_tests_defaults"],
+ srcs: ["test_create_audiorecord.cpp",
+ "test_create_utils.cpp"],
+ shared_libs: [
+ "libaudioclient",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+ data: ["record_test_input_*.txt"],
+}
diff --git a/media/libaudioclient/tests/record_test_input_v1.0_ref.txt b/media/libaudioclient/tests/record_test_input_v1.0_ref.txt
new file mode 100644
index 0000000..e01598e
--- /dev/null
+++ b/media/libaudioclient/tests/record_test_input_v1.0_ref.txt
@@ -0,0 +1,33 @@
+version 1.0
+# Input file for test_create_audiorecord
+# Add one line for each tested AudioRecord constructor with the following arguments:
+# sampleRate format channelMask frameCount notificationFrames flags sessionId inputSource
+# sample rate tests
+ 48000 0x1 0x10 4800 2400 0x0 0 0
+ 24000 0x1 0x10 4800 2400 0x0 0 0
+ 16000 0x1 0x10 4800 2400 0x0 0 0
+ 8000 0x1 0x10 4800 2400 0x0 0 0
+ 44100 0x1 0x10 4410 2205 0x0 0 0
+ 22050 0x1 0x10 4410 2205 0x0 0 0
+ 11025 0x1 0x10 4410 2205 0x0 0 0
+# format tests
+ 48000 0x2 0x10 4800 2400 0x0 0 0
+ 48000 0x3 0x10 4800 2400 0x0 0 0
+ 48000 0x5 0x10 4800 2400 0x0 0 0
+# channel mask tests
+ 48000 0x1 0x0C 4800 2400 0x0 0 0
+# frame count tests
+ 48000 0x1 0x10 0 0 0x0 0 0
+ 48000 0x1 0x10 48000 0 0x0 0 0
+ 48000 0x1 0x10 12000 6000 0x0 0 0
+# flags test
+ 48000 0x1 0x0C 0 0 0x1 0 0
+ 44100 0x1 0x0C 0 0 0x5 0 0
+# session tests
+ 48000 0x1 0x10 0 0 0 1001 0
+# input source tests
+ 48000 0x1 0x10 0 0 0 0 1
+ 48000 0x1 0x10 0 0 0 0 5
+ 48000 0x1 0x10 0 0 0 0 6
+ 48000 0x1 0x10 0 0 0 0 7
+ 48000 0x1 0x10 0 0 0 0 9
diff --git a/media/libaudioclient/tests/record_test_output_v1.0_ref_walleye.txt b/media/libaudioclient/tests/record_test_output_v1.0_ref_walleye.txt
new file mode 100644
index 0000000..76608eb
--- /dev/null
+++ b/media/libaudioclient/tests/record_test_output_v1.0_ref_walleye.txt
@@ -0,0 +1,198 @@
+
+#### Test 1 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(65)
+ flags(0), req. flags(0), audio source(0)
+ format(0x1), channel mask(0x10), channel count(1), sample rate(48000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(960), req. notif. frame count(2400)
+ input(150), latency(100), selected device Id(0), routed device Id(11)
+
+#### Test 2 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(73)
+ flags(0), req. flags(0), audio source(0)
+ format(0x1), channel mask(0x10), channel count(1), sample rate(24000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(480), req. notif. frame count(2400)
+ input(158), latency(200), selected device Id(0), routed device Id(11)
+
+#### Test 3 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(81)
+ flags(0), req. flags(0), audio source(0)
+ format(0x1), channel mask(0x10), channel count(1), sample rate(16000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(320), req. notif. frame count(2400)
+ input(166), latency(300), selected device Id(0), routed device Id(11)
+
+#### Test 4 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(89)
+ flags(0), req. flags(0), audio source(0)
+ format(0x1), channel mask(0x10), channel count(1), sample rate(8000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(160), req. notif. frame count(2400)
+ input(174), latency(600), selected device Id(0), routed device Id(11)
+
+#### Test 5 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(97)
+ flags(0), req. flags(0), audio source(0)
+ format(0x1), channel mask(0x10), channel count(1), sample rate(44100)
+ frame count(4410), req. frame count(4410)
+ notif. frame count(896), req. notif. frame count(2205)
+ input(182), latency(100), selected device Id(0), routed device Id(11)
+
+#### Test 6 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(105)
+ flags(0), req. flags(0), audio source(0)
+ format(0x1), channel mask(0x10), channel count(1), sample rate(22050)
+ frame count(4410), req. frame count(4410)
+ notif. frame count(448), req. notif. frame count(2205)
+ input(190), latency(200), selected device Id(0), routed device Id(11)
+
+#### Test 7 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(113)
+ flags(0), req. flags(0), audio source(0)
+ format(0x1), channel mask(0x10), channel count(1), sample rate(11025)
+ frame count(4410), req. frame count(4410)
+ notif. frame count(224), req. notif. frame count(2205)
+ input(198), latency(400), selected device Id(0), routed device Id(11)
+
+#### Test 8 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(121)
+ flags(0), req. flags(0), audio source(0)
+ format(0x2), channel mask(0x10), channel count(1), sample rate(48000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(960), req. notif. frame count(2400)
+ input(206), latency(100), selected device Id(0), routed device Id(11)
+
+#### Test 9 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(129)
+ flags(0), req. flags(0), audio source(0)
+ format(0x3), channel mask(0x10), channel count(1), sample rate(48000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(960), req. notif. frame count(2400)
+ input(214), latency(100), selected device Id(0), routed device Id(11)
+
+#### Test 10 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(137)
+ flags(0), req. flags(0), audio source(0)
+ format(0x5), channel mask(0x10), channel count(1), sample rate(48000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(960), req. notif. frame count(2400)
+ input(222), latency(100), selected device Id(0), routed device Id(11)
+
+#### Test 11 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(145)
+ flags(0), req. flags(0), audio source(0)
+ format(0x1), channel mask(0xc), channel count(2), sample rate(48000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(960), req. notif. frame count(2400)
+ input(230), latency(100), selected device Id(0), routed device Id(11)
+
+#### Test 12 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(153)
+ flags(0), req. flags(0), audio source(0)
+ format(0x1), channel mask(0x10), channel count(1), sample rate(48000)
+ frame count(2880), req. frame count(2880)
+ notif. frame count(960), req. notif. frame count(0)
+ input(238), latency(60), selected device Id(0), routed device Id(11)
+
+#### Test 13 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(161)
+ flags(0), req. flags(0), audio source(0)
+ format(0x1), channel mask(0x10), channel count(1), sample rate(48000)
+ frame count(48000), req. frame count(48000)
+ notif. frame count(960), req. notif. frame count(0)
+ input(246), latency(1000), selected device Id(0), routed device Id(11)
+
+#### Test 14 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(169)
+ flags(0), req. flags(0), audio source(0)
+ format(0x1), channel mask(0x10), channel count(1), sample rate(48000)
+ frame count(12000), req. frame count(12000)
+ notif. frame count(960), req. notif. frame count(6000)
+ input(254), latency(250), selected device Id(0), routed device Id(11)
+
+#### Test 15 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(177)
+ flags(0x1), req. flags(0x1), audio source(0)
+ format(0x1), channel mask(0xc), channel count(2), sample rate(48000)
+ frame count(4096), req. frame count(4096)
+ notif. frame count(96), req. notif. frame count(0)
+ input(262), latency(85), selected device Id(0), routed device Id(11)
+
+#### Test 16 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(185)
+ flags(0), req. flags(0x5), audio source(0)
+ format(0x1), channel mask(0xc), channel count(2), sample rate(44100)
+ frame count(2664), req. frame count(2664)
+ notif. frame count(888), req. notif. frame count(0)
+ input(278), latency(60), selected device Id(0), routed device Id(11)
+
+#### Test 17 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(1001)
+ flags(0), req. flags(0), audio source(0)
+ format(0x1), channel mask(0x10), channel count(1), sample rate(48000)
+ frame count(2880), req. frame count(2880)
+ notif. frame count(960), req. notif. frame count(0)
+ input(286), latency(60), selected device Id(0), routed device Id(11)
+
+#### Test 18 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(193)
+ flags(0), req. flags(0), audio source(1)
+ format(0x1), channel mask(0x10), channel count(1), sample rate(48000)
+ frame count(2880), req. frame count(2880)
+ notif. frame count(960), req. notif. frame count(0)
+ input(294), latency(60), selected device Id(0), routed device Id(11)
+
+#### Test 19 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(201)
+ flags(0), req. flags(0), audio source(5)
+ format(0x1), channel mask(0x10), channel count(1), sample rate(48000)
+ frame count(2880), req. frame count(2880)
+ notif. frame count(960), req. notif. frame count(0)
+ input(302), latency(60), selected device Id(0), routed device Id(12)
+
+#### Test 20 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(209)
+ flags(0), req. flags(0), audio source(6)
+ format(0x1), channel mask(0x10), channel count(1), sample rate(48000)
+ frame count(2880), req. frame count(2880)
+ notif. frame count(960), req. notif. frame count(0)
+ input(310), latency(60), selected device Id(0), routed device Id(11)
+
+#### Test 21 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(217)
+ flags(0), req. flags(0), audio source(7)
+ format(0x1), channel mask(0x10), channel count(1), sample rate(48000)
+ frame count(2880), req. frame count(2880)
+ notif. frame count(960), req. notif. frame count(0)
+ input(318), latency(60), selected device Id(0), routed device Id(11)
+
+#### Test 22 status 0
+ AudioRecord::dump
+ status(0), active(0), session Id(225)
+ flags(0), req. flags(0), audio source(9)
+ format(0x1), channel mask(0x10), channel count(1), sample rate(48000)
+ frame count(2880), req. frame count(2880)
+ notif. frame count(960), req. notif. frame count(0)
+ input(326), latency(60), selected device Id(0), routed device Id(11)
diff --git a/media/libaudioclient/tests/test_create_audiorecord.cpp b/media/libaudioclient/tests/test_create_audiorecord.cpp
new file mode 100644
index 0000000..cf6a734
--- /dev/null
+++ b/media/libaudioclient/tests/test_create_audiorecord.cpp
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <binder/MemoryBase.h>
+#include <binder/MemoryDealer.h>
+#include <binder/MemoryHeapBase.h>
+#include <media/AudioRecord.h>
+
+#include "test_create_utils.h"
+
+#define NUM_ARGUMENTS 8
+#define VERSION_VALUE "1.0"
+#define PACKAGE_NAME "AudioRecord test"
+
+namespace android {
+
+int testRecord(FILE *inputFile, int outputFileFd)
+{
+ char line[MAX_INPUT_FILE_LINE_LENGTH];
+ uint32_t testCount = 0;
+ Vector<String16> args;
+ int ret = 0;
+
+ if (inputFile == nullptr) {
+ sp<AudioRecord> record = new AudioRecord(AUDIO_SOURCE_DEFAULT,
+ 0 /* sampleRate */,
+ AUDIO_FORMAT_DEFAULT,
+ AUDIO_CHANNEL_IN_MONO,
+ String16(PACKAGE_NAME));
+ if (record == 0 || record->initCheck() != NO_ERROR) {
+ write(outputFileFd, "Error creating AudioRecord\n",
+ sizeof("Error creating AudioRecord\n"));
+ } else {
+ record->dump(outputFileFd, args);
+ }
+ return 0;
+ }
+
+ // check version
+ if (!checkVersion(inputFile, VERSION_VALUE)) {
+ return 1;
+ }
+
+ while (readLine(inputFile, line, MAX_INPUT_FILE_LINE_LENGTH) == 0) {
+ uint32_t sampleRate;
+ audio_format_t format;
+ audio_channel_mask_t channelMask;
+ size_t frameCount;
+ int32_t notificationFrames;
+ audio_input_flags_t flags;
+ audio_session_t sessionId;
+ audio_source_t inputSource;
+ audio_attributes_t attributes;
+ status_t status;
+ char statusStr[MAX_OUTPUT_FILE_LINE_LENGTH];
+ bool fast = false;
+
+ if (sscanf(line, " %u %x %x %zu %d %x %u %u",
+ &sampleRate, &format, &channelMask,
+ &frameCount, ¬ificationFrames,
+ &flags, &sessionId, &inputSource) != NUM_ARGUMENTS) {
+ fprintf(stderr, "Malformed line for test #%u in input file\n", testCount+1);
+ ret = 1;
+ continue;
+ }
+ testCount++;
+
+ if ((flags & AUDIO_INPUT_FLAG_FAST) != 0) {
+ fast = true;
+ }
+
+ memset(&attributes, 0, sizeof(attributes));
+ attributes.source = inputSource;
+
+ sp<AudioRecord> record = new AudioRecord(String16(PACKAGE_NAME));
+
+ record->set(AUDIO_SOURCE_DEFAULT,
+ sampleRate,
+ format,
+ channelMask,
+ frameCount,
+ fast ? callback : nullptr,
+ nullptr,
+ notificationFrames,
+ false,
+ sessionId,
+ fast ? AudioRecord::TRANSFER_CALLBACK : AudioRecord::TRANSFER_DEFAULT,
+ flags,
+ getuid(),
+ getpid(),
+ &attributes,
+ AUDIO_PORT_HANDLE_NONE);
+ status = record->initCheck();
+ sprintf(statusStr, "\n#### Test %u status %d\n", testCount, status);
+ write(outputFileFd, statusStr, strlen(statusStr));
+ if (status != NO_ERROR) {
+ continue;
+ }
+ record->dump(outputFileFd, args);
+ }
+ return ret;
+}
+
+}; // namespace android
+
+
+int main(int argc, char **argv)
+{
+ return android::main(argc, argv, android::testRecord);
+}
+
diff --git a/media/libaudioclient/tests/test_create_audiotrack.cpp b/media/libaudioclient/tests/test_create_audiotrack.cpp
new file mode 100644
index 0000000..cf9b925
--- /dev/null
+++ b/media/libaudioclient/tests/test_create_audiotrack.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <binder/MemoryBase.h>
+#include <binder/MemoryDealer.h>
+#include <binder/MemoryHeapBase.h>
+#include <media/AudioTrack.h>
+
+#include "test_create_utils.h"
+
+#define NUM_ARGUMENTS 10
+#define VERSION_VALUE "1.0"
+
+namespace android {
+
+int testTrack(FILE *inputFile, int outputFileFd)
+{
+ char line[MAX_INPUT_FILE_LINE_LENGTH];
+ uint32_t testCount = 0;
+ Vector<String16> args;
+ int ret = 0;
+
+ if (inputFile == nullptr) {
+ sp<AudioTrack> track = new AudioTrack(AUDIO_STREAM_DEFAULT,
+ 0 /* sampleRate */,
+ AUDIO_FORMAT_DEFAULT,
+ AUDIO_CHANNEL_OUT_STEREO);
+ if (track == 0 || track->initCheck() != NO_ERROR) {
+ write(outputFileFd, "Error creating AudioTrack\n",
+ sizeof("Error creating AudioTrack\n"));
+ } else {
+ track->dump(outputFileFd, args);
+ }
+ return 0;
+ }
+
+ // check version
+ if (!checkVersion(inputFile, VERSION_VALUE)) {
+ return 1;
+ }
+
+ while (readLine(inputFile, line, MAX_INPUT_FILE_LINE_LENGTH) == 0) {
+ uint32_t sampleRate;
+ audio_format_t format;
+ audio_channel_mask_t channelMask;
+ size_t frameCount;
+ int32_t notificationFrames;
+ uint32_t useSharedBuffer;
+ audio_output_flags_t flags;
+ audio_session_t sessionId;
+ audio_usage_t usage;
+ audio_content_type_t contentType;
+ audio_attributes_t attributes;
+ sp<IMemory> sharedBuffer;
+ sp<MemoryDealer> heap;
+ audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
+ status_t status;
+ char statusStr[MAX_OUTPUT_FILE_LINE_LENGTH];
+ bool offload = false;
+ bool fast = false;
+
+ if (sscanf(line, " %u %x %x %zu %d %u %x %u %u %u",
+ &sampleRate, &format, &channelMask,
+ &frameCount, ¬ificationFrames, &useSharedBuffer,
+ &flags, &sessionId, &usage, &contentType) != NUM_ARGUMENTS) {
+ fprintf(stderr, "Malformed line for test #%u in input file\n", testCount+1);
+ ret = 1;
+ continue;
+ }
+ testCount++;
+
+ if (useSharedBuffer != 0) {
+ size_t heapSize = audio_channel_count_from_out_mask(channelMask) *
+ audio_bytes_per_sample(format) * frameCount;
+ heap = new MemoryDealer(heapSize, "AudioTrack Heap Base");
+ sharedBuffer = heap->allocate(heapSize);
+ frameCount = 0;
+ notificationFrames = 0;
+ }
+ if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+ offloadInfo.sample_rate = sampleRate;
+ offloadInfo.channel_mask = channelMask;
+ offloadInfo.format = format;
+ offload = true;
+ }
+ if ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0) {
+ fast = true;
+ }
+
+ memset(&attributes, 0, sizeof(attributes));
+ attributes.content_type = contentType;
+ attributes.usage = usage;
+
+ sp<AudioTrack> track = new AudioTrack();
+
+ track->set(AUDIO_STREAM_DEFAULT,
+ sampleRate,
+ format,
+ channelMask,
+ frameCount,
+ flags,
+ (fast || offload) ? callback : nullptr,
+ nullptr,
+ notificationFrames,
+ sharedBuffer,
+ false,
+ sessionId,
+ ((fast && sharedBuffer == 0) || offload) ?
+ AudioTrack::TRANSFER_CALLBACK : AudioTrack::TRANSFER_DEFAULT,
+ offload ? &offloadInfo : nullptr,
+ getuid(),
+ getpid(),
+ &attributes,
+ false,
+ 1.0f,
+ AUDIO_PORT_HANDLE_NONE);
+ status = track->initCheck();
+ sprintf(statusStr, "\n#### Test %u status %d\n", testCount, status);
+ write(outputFileFd, statusStr, strlen(statusStr));
+ if (status != NO_ERROR) {
+ continue;
+ }
+ track->dump(outputFileFd, args);
+ }
+ return ret;
+}
+
+}; // namespace android
+
+
+int main(int argc, char **argv)
+{
+ return android::main(argc, argv, android::testTrack);
+}
+
diff --git a/media/libaudioclient/tests/test_create_utils.cpp b/media/libaudioclient/tests/test_create_utils.cpp
new file mode 100644
index 0000000..8aa1f13
--- /dev/null
+++ b/media/libaudioclient/tests/test_create_utils.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "test_create_utils.h"
+
+namespace android {
+
+int readLine(FILE *inputFile, char *line, int size) {
+ int ret = 0;
+ while (true) {
+ char *str = fgets(line, size, inputFile);
+ if (str == nullptr) {
+ ret = -1;
+ break;
+ }
+ if (feof(inputFile) != 0 || ferror(inputFile) != 0) {
+ ret = -1;
+ break;
+ }
+ if (strlen(str) != 0 && str[0] != COMMENT_CHAR) {
+ break;
+ }
+ }
+ return ret;
+}
+
+bool checkVersion(FILE *inputFile, const char *version)
+{
+ char line[MAX_INPUT_FILE_LINE_LENGTH];
+ char versionKey[MAX_INPUT_FILE_LINE_LENGTH];
+ char versionValue[MAX_INPUT_FILE_LINE_LENGTH];
+
+ if (readLine(inputFile, line, MAX_INPUT_FILE_LINE_LENGTH) != 0) {
+ fprintf(stderr, "Missing version in input file\n");
+ return false;
+ }
+
+ if (sscanf(line, " %s %s", versionKey, versionValue) != 2) {
+ fprintf(stderr, "Malformed version in input file\n");
+ return false;
+ }
+ if (strcmp(versionKey, VERSION_KEY) != 0) {
+ fprintf(stderr, "Malformed version in input file\n");
+ return false;
+ }
+ if (strcmp(versionValue, version) != 0) {
+ fprintf(stderr, "Wrong input file version %s expecting %s\n", versionValue, version);
+ return false;
+ }
+ return true;
+}
+
+void callback(int event __unused, void* user __unused, void *info __unused)
+{
+}
+
+int main(int argc, char **argv, test_func_t testFunc)
+{
+ FILE *inputFile = nullptr;
+ int outputFileFd = STDOUT_FILENO;
+ mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
+ int ret = 0;
+
+ if (argc > 5) {
+ fprintf(stderr, "Usage: %s [-i input_params.txt] [-o output_params.txt]\n", argv[0]);
+ return 1;
+ }
+
+ argv++;
+ while (*argv) {
+ if (strcmp(*argv, "-i") == 0) {
+ argv++;
+ if (*argv) {
+ inputFile = fopen(*argv, "r");
+ if (inputFile == nullptr) {
+ ret = 1;
+ }
+ } else {
+ ret = 1;
+ }
+ }
+ if (strcmp(*argv, "-o") == 0) {
+ argv++;
+ if (*argv) {
+ outputFileFd = open(*argv, O_WRONLY|O_CREAT, mode);
+ if (outputFileFd < 0) {
+ ret = 1;
+ }
+ } else {
+ ret = 1;
+ }
+ argv++;
+ }
+ if (*argv) {
+ argv++;
+ }
+ }
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ ret = testFunc(inputFile, outputFileFd);
+
+ if (inputFile) {
+ fclose(inputFile);
+ }
+ if (outputFileFd >= 0 && outputFileFd != STDOUT_FILENO) {
+ close(outputFileFd);
+ }
+
+ return ret;
+}
+
+}; // namespace android
+
diff --git a/media/libaudioclient/tests/test_create_utils.h b/media/libaudioclient/tests/test_create_utils.h
new file mode 100644
index 0000000..2ad646e
--- /dev/null
+++ b/media/libaudioclient/tests/test_create_utils.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#define MAX_INPUT_FILE_LINE_LENGTH 512
+#define MAX_OUTPUT_FILE_LINE_LENGTH 512
+
+#define COMMENT_CHAR '#'
+#define VERSION_KEY "version"
+
+namespace android {
+
+int readLine(FILE *inputFile, char *line, int size);
+
+bool checkVersion(FILE *inputFile, const char *version);
+
+void callback(int event, void* user, void *info);
+
+typedef int (*test_func_t)(FILE *inputFile, int outputFileFd);
+
+int main(int argc, char **argv, test_func_t testFunc);
+
+}; // namespace android
diff --git a/media/libaudioclient/tests/track_test_input_v1.0_ref.txt b/media/libaudioclient/tests/track_test_input_v1.0_ref.txt
new file mode 100644
index 0000000..b923ff3
--- /dev/null
+++ b/media/libaudioclient/tests/track_test_input_v1.0_ref.txt
@@ -0,0 +1,40 @@
+version 1.0
+# Input file for test_create_audiotrack
+# Add one line for each tested AudioTrack constructor with the following arguments:
+# sampleRate format channelMask frameCount notificationFrames sharedBuffer flags sessionId usage contentType
+# sample rate tests
+ 48000 0x1 0x3 4800 2400 0 0x0 0 1 2
+ 24000 0x1 0x3 4800 2400 0 0x0 0 1 2
+ 16000 0x1 0x3 4800 2400 0 0x0 0 1 2
+ 8000 0x1 0x3 4800 2400 0 0x0 0 1 2
+ 44100 0x1 0x3 4410 2205 0 0x0 0 1 2
+ 22050 0x1 0x3 4410 2205 0 0x0 0 1 2
+ 11025 0x1 0x3 4410 2205 0 0x0 0 1 2
+# format tests
+ 48000 0x2 0x3 4800 2400 0 0x0 0 1 2
+ 48000 0x3 0x3 4800 2400 0 0x0 0 1 2
+ 48000 0x5 0x3 4800 2400 0 0x0 0 1 2
+# channel mask tests
+ 48000 0x1 0x1 4800 2400 0 0x0 0 1 2
+ 48000 0x1 0x3F 4800 2400 0 0x0 0 1 2
+ 48000 0x1 0x63F 4800 2400 0 0x0 0 1 2
+# framecount tests
+ 48000 0x1 0x3 0 0 0 0x0 0 1 2
+ 48000 0x1 0x3 48000 0 0 0x0 0 1 2
+ 48000 0x1 0x3 0 -2 0 0x4 0 1 2
+# shared memory tests
+ 48000 0x1 0x3 4800 2400 1 0x0 0 1 2
+ 48000 0x1 0x3 4800 2400 1 0x4 0 1 2
+# flags test
+ 48000 0x1 0x3 4800 2400 0 0x4 0 1 2
+ 48000 0x1 0x3 4800 2400 0 0x8 0 1 2
+ 44100 0x1000000 0x3 4800 2400 0 0x11 0 1 2
+# session tests
+ 48000 0x1 0x3 4800 2400 0 0x0 1001 1 2
+# attributes tests
+ 48000 0x1 0x3 4800 2400 0 0x0 0 0 0
+ 48000 0x1 0x3 4800 2400 0 0x0 0 2 1
+ 48000 0x1 0x3 4800 2400 0 0x0 0 4 2
+ 48000 0x1 0x3 4800 2400 0 0x0 0 5 2
+ 48000 0x1 0x3 4800 2400 0 0x0 0 11 1
+ 48000 0x1 0x3 4800 2400 0 0x0 0 12 1
diff --git a/media/libaudioclient/tests/track_test_output_v1.0_ref_walleye.txt b/media/libaudioclient/tests/track_test_output_v1.0_ref_walleye.txt
new file mode 100644
index 0000000..5fe433c
--- /dev/null
+++ b/media/libaudioclient/tests/track_test_output_v1.0_ref_walleye.txt
@@ -0,0 +1,308 @@
+
+#### Test 1 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(49), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 2 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(57), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(24000), original sample rate(24000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(1600), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (250), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 3 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(65), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(16000), original sample rate(16000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(1600), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (350), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 4 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(73), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(8000), original sample rate(8000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(1600), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (650), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 5 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(81), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(44100), original sample rate(44100), speed(1.000000)
+ frame count(4410), req. frame count(4410)
+ notif. frame count(1470), req. notif. frame count(2205), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 6 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(89), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(22050), original sample rate(22050), speed(1.000000)
+ frame count(4410), req. frame count(4410)
+ notif. frame count(1470), req. notif. frame count(2205), req. notif. per buff(0)
+ latency (250), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 7 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(97), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(11025), original sample rate(11025), speed(1.000000)
+ frame count(4410), req. frame count(4410)
+ notif. frame count(1470), req. notif. frame count(2205), req. notif. per buff(0)
+ latency (450), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 8 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(105), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(2), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 9 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(113), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(3), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (180), selected device Id(0), routed device Id(2)
+ output(29) AF latency (80) AF frame count(1920) AF SampleRate(48000)
+
+#### Test 10 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(121), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(5), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (180), selected device Id(0), routed device Id(2)
+ output(29) AF latency (80) AF frame count(1920) AF SampleRate(48000)
+
+#### Test 11 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(129), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(1), channel count(1)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 12 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(137), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3f), channel count(6)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 13 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(145), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(63f), channel count(8)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 14 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(153), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(1924), req. frame count(1924)
+ notif. frame count(962), req. notif. frame count(0), req. notif. per buff(0)
+ latency (90), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 15 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(161), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(48000), req. frame count(48000)
+ notif. frame count(24000), req. notif. frame count(0), req. notif. per buff(0)
+ latency (1050), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 16 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(169), flags(4)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(480), req. frame count(480)
+ notif. frame count(240), req. notif. frame count(0), req. notif. per buff(2)
+ latency (60), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 17 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(177), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(0), req. notif. frame count(0), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 18 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(185), flags(4)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(0), req. notif. frame count(0), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 19 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(193), flags(4)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(240), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 20 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(201), flags(8)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (180), selected device Id(0), routed device Id(2)
+ output(29) AF latency (80) AF frame count(1920) AF SampleRate(48000)
+
+#### Test 21 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(209), flags(11)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1000000), channel mask(3), channel count(2)
+ sample rate(44100), original sample rate(44100), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(4800), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (204), selected device Id(0), routed device Id(2)
+ output(53) AF latency (96) AF frame count(262144) AF SampleRate(44100)
+
+#### Test 22 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(1001), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 23 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(217), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 24 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(225), flags(0)
+ stream type(0), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (140), selected device Id(0), routed device Id(1)
+ output(45) AF latency (40) AF frame count(960) AF SampleRate(48000)
+
+#### Test 25 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(233), flags(0)
+ stream type(4), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(3)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 26 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(241), flags(0)
+ stream type(5), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(3)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 27 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(249), flags(0)
+ stream type(10), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
+
+#### Test 28 status 0
+ AudioTrack::dump
+ status(0), state(1), session Id(257), flags(0)
+ stream type(3), left - right volume(1.000000, 1.000000)
+ format(1), channel mask(3), channel count(2)
+ sample rate(48000), original sample rate(48000), speed(1.000000)
+ frame count(4800), req. frame count(4800)
+ notif. frame count(2400), req. notif. frame count(2400), req. notif. per buff(0)
+ latency (150), selected device Id(0), routed device Id(2)
+ output(13) AF latency (50) AF frame count(960) AF SampleRate(48000)
diff --git a/media/libaudiohal/2.0/Android.bp b/media/libaudiohal/2.0/Android.bp
new file mode 100644
index 0000000..574b435
--- /dev/null
+++ b/media/libaudiohal/2.0/Android.bp
@@ -0,0 +1,54 @@
+cc_library_shared {
+ name: "libaudiohal@2.0",
+
+ srcs: [
+ "DeviceHalLocal.cpp",
+ "DevicesFactoryHalHybrid.cpp",
+ "DevicesFactoryHalLocal.cpp",
+ "StreamHalLocal.cpp",
+
+ "ConversionHelperHidl.cpp",
+ "DeviceHalHidl.cpp",
+ "DevicesFactoryHalHidl.cpp",
+ "EffectBufferHalHidl.cpp",
+ "EffectHalHidl.cpp",
+ "EffectsFactoryHalHidl.cpp",
+ "StreamHalHidl.cpp",
+ ],
+
+ export_include_dirs: ["."],
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+ shared_libs: [
+ "libaudiohal_deathhandler",
+ "libaudioutils",
+ "libcutils",
+ "liblog",
+ "libutils",
+ "libhardware",
+ "libbase",
+ "libfmq",
+ "libhwbinder",
+ "libhidlbase",
+ "libhidlmemory",
+ "libhidltransport",
+ "android.hardware.audio@2.0",
+ "android.hardware.audio.common@2.0",
+ "android.hardware.audio.common@2.0-util",
+ "android.hardware.audio.effect@2.0",
+ "android.hidl.allocator@1.0",
+ "android.hidl.memory@1.0",
+ "libmedia_helper",
+ "libmediautils",
+ ],
+ header_libs: [
+ "libaudiohal_headers"
+ ],
+
+ export_shared_lib_headers: [
+ "libfmq",
+ ],
+}
diff --git a/media/libaudiohal/ConversionHelperHidl.cpp b/media/libaudiohal/2.0/ConversionHelperHidl.cpp
similarity index 100%
rename from media/libaudiohal/ConversionHelperHidl.cpp
rename to media/libaudiohal/2.0/ConversionHelperHidl.cpp
diff --git a/media/libaudiohal/ConversionHelperHidl.h b/media/libaudiohal/2.0/ConversionHelperHidl.h
similarity index 100%
rename from media/libaudiohal/ConversionHelperHidl.h
rename to media/libaudiohal/2.0/ConversionHelperHidl.h
diff --git a/media/libaudiohal/2.0/DeviceHalHidl.cpp b/media/libaudiohal/2.0/DeviceHalHidl.cpp
new file mode 100644
index 0000000..5b99d70
--- /dev/null
+++ b/media/libaudiohal/2.0/DeviceHalHidl.cpp
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#define LOG_TAG "DeviceHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hardware/audio/2.0/IPrimaryDevice.h>
+#include <cutils/native_handle.h>
+#include <hwbinder/IPCThreadState.h>
+#include <utils/Log.h>
+
+#include "DeviceHalHidl.h"
+#include "HidlUtils.h"
+#include "StreamHalHidl.h"
+
+using ::android::hardware::audio::common::V2_0::AudioConfig;
+using ::android::hardware::audio::common::V2_0::AudioDevice;
+using ::android::hardware::audio::common::V2_0::AudioInputFlag;
+using ::android::hardware::audio::common::V2_0::AudioOutputFlag;
+using ::android::hardware::audio::common::V2_0::AudioPatchHandle;
+using ::android::hardware::audio::common::V2_0::AudioPort;
+using ::android::hardware::audio::common::V2_0::AudioPortConfig;
+using ::android::hardware::audio::common::V2_0::AudioMode;
+using ::android::hardware::audio::common::V2_0::AudioSource;
+using ::android::hardware::audio::common::V2_0::HidlUtils;
+using ::android::hardware::audio::V2_0::DeviceAddress;
+using ::android::hardware::audio::V2_0::IPrimaryDevice;
+using ::android::hardware::audio::V2_0::ParameterValue;
+using ::android::hardware::audio::V2_0::Result;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+
+namespace android {
+
+namespace {
+
+status_t deviceAddressFromHal(
+ audio_devices_t device, const char* halAddress, DeviceAddress* address) {
+ address->device = AudioDevice(device);
+
+ if (halAddress == nullptr || strnlen(halAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
+ return OK;
+ }
+ const bool isInput = (device & AUDIO_DEVICE_BIT_IN) != 0;
+ if (isInput) device &= ~AUDIO_DEVICE_BIT_IN;
+ if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_A2DP) != 0)
+ || (isInput && (device & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) != 0)) {
+ int status = sscanf(halAddress,
+ "%hhX:%hhX:%hhX:%hhX:%hhX:%hhX",
+ &address->address.mac[0], &address->address.mac[1], &address->address.mac[2],
+ &address->address.mac[3], &address->address.mac[4], &address->address.mac[5]);
+ return status == 6 ? OK : BAD_VALUE;
+ } else if ((!isInput && (device & AUDIO_DEVICE_OUT_IP) != 0)
+ || (isInput && (device & AUDIO_DEVICE_IN_IP) != 0)) {
+ int status = sscanf(halAddress,
+ "%hhu.%hhu.%hhu.%hhu",
+ &address->address.ipv4[0], &address->address.ipv4[1],
+ &address->address.ipv4[2], &address->address.ipv4[3]);
+ return status == 4 ? OK : BAD_VALUE;
+ } else if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_USB)) != 0
+ || (isInput && (device & AUDIO_DEVICE_IN_ALL_USB)) != 0) {
+ int status = sscanf(halAddress,
+ "card=%d;device=%d",
+ &address->address.alsa.card, &address->address.alsa.device);
+ return status == 2 ? OK : BAD_VALUE;
+ } else if ((!isInput && (device & AUDIO_DEVICE_OUT_BUS) != 0)
+ || (isInput && (device & AUDIO_DEVICE_IN_BUS) != 0)) {
+ if (halAddress != NULL) {
+ address->busAddress = halAddress;
+ return OK;
+ }
+ return BAD_VALUE;
+ } else if ((!isInput && (device & AUDIO_DEVICE_OUT_REMOTE_SUBMIX)) != 0
+ || (isInput && (device & AUDIO_DEVICE_IN_REMOTE_SUBMIX) != 0)) {
+ if (halAddress != NULL) {
+ address->rSubmixAddress = halAddress;
+ return OK;
+ }
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+} // namespace
+
+DeviceHalHidl::DeviceHalHidl(const sp<IDevice>& device)
+ : ConversionHelperHidl("Device"), mDevice(device),
+ mPrimaryDevice(IPrimaryDevice::castFrom(device)) {
+}
+
+DeviceHalHidl::~DeviceHalHidl() {
+ if (mDevice != 0) {
+ mDevice.clear();
+ hardware::IPCThreadState::self()->flushCommands();
+ }
+}
+
+status_t DeviceHalHidl::getSupportedDevices(uint32_t*) {
+ // Obsolete.
+ return INVALID_OPERATION;
+}
+
+status_t DeviceHalHidl::initCheck() {
+ if (mDevice == 0) return NO_INIT;
+ return processReturn("initCheck", mDevice->initCheck());
+}
+
+status_t DeviceHalHidl::setVoiceVolume(float volume) {
+ if (mDevice == 0) return NO_INIT;
+ if (mPrimaryDevice == 0) return INVALID_OPERATION;
+ return processReturn("setVoiceVolume", mPrimaryDevice->setVoiceVolume(volume));
+}
+
+status_t DeviceHalHidl::setMasterVolume(float volume) {
+ if (mDevice == 0) return NO_INIT;
+ if (mPrimaryDevice == 0) return INVALID_OPERATION;
+ return processReturn("setMasterVolume", mPrimaryDevice->setMasterVolume(volume));
+}
+
+status_t DeviceHalHidl::getMasterVolume(float *volume) {
+ if (mDevice == 0) return NO_INIT;
+ if (mPrimaryDevice == 0) return INVALID_OPERATION;
+ Result retval;
+ Return<void> ret = mPrimaryDevice->getMasterVolume(
+ [&](Result r, float v) {
+ retval = r;
+ if (retval == Result::OK) {
+ *volume = v;
+ }
+ });
+ return processReturn("getMasterVolume", ret, retval);
+}
+
+status_t DeviceHalHidl::setMode(audio_mode_t mode) {
+ if (mDevice == 0) return NO_INIT;
+ if (mPrimaryDevice == 0) return INVALID_OPERATION;
+ return processReturn("setMode", mPrimaryDevice->setMode(AudioMode(mode)));
+}
+
+status_t DeviceHalHidl::setMicMute(bool state) {
+ if (mDevice == 0) return NO_INIT;
+ return processReturn("setMicMute", mDevice->setMicMute(state));
+}
+
+status_t DeviceHalHidl::getMicMute(bool *state) {
+ if (mDevice == 0) return NO_INIT;
+ Result retval;
+ Return<void> ret = mDevice->getMicMute(
+ [&](Result r, bool mute) {
+ retval = r;
+ if (retval == Result::OK) {
+ *state = mute;
+ }
+ });
+ return processReturn("getMicMute", ret, retval);
+}
+
+status_t DeviceHalHidl::setMasterMute(bool state) {
+ if (mDevice == 0) return NO_INIT;
+ return processReturn("setMasterMute", mDevice->setMasterMute(state));
+}
+
+status_t DeviceHalHidl::getMasterMute(bool *state) {
+ if (mDevice == 0) return NO_INIT;
+ Result retval;
+ Return<void> ret = mDevice->getMasterMute(
+ [&](Result r, bool mute) {
+ retval = r;
+ if (retval == Result::OK) {
+ *state = mute;
+ }
+ });
+ return processReturn("getMasterMute", ret, retval);
+}
+
+status_t DeviceHalHidl::setParameters(const String8& kvPairs) {
+ if (mDevice == 0) return NO_INIT;
+ hidl_vec<ParameterValue> hidlParams;
+ status_t status = parametersFromHal(kvPairs, &hidlParams);
+ if (status != OK) return status;
+ return processReturn("setParameters", mDevice->setParameters(hidlParams));
+}
+
+status_t DeviceHalHidl::getParameters(const String8& keys, String8 *values) {
+ values->clear();
+ if (mDevice == 0) return NO_INIT;
+ hidl_vec<hidl_string> hidlKeys;
+ status_t status = keysFromHal(keys, &hidlKeys);
+ if (status != OK) return status;
+ Result retval;
+ Return<void> ret = mDevice->getParameters(
+ hidlKeys,
+ [&](Result r, const hidl_vec<ParameterValue>& parameters) {
+ retval = r;
+ if (retval == Result::OK) {
+ parametersToHal(parameters, values);
+ }
+ });
+ return processReturn("getParameters", ret, retval);
+}
+
+status_t DeviceHalHidl::getInputBufferSize(
+ const struct audio_config *config, size_t *size) {
+ if (mDevice == 0) return NO_INIT;
+ AudioConfig hidlConfig;
+ HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+ Result retval;
+ Return<void> ret = mDevice->getInputBufferSize(
+ hidlConfig,
+ [&](Result r, uint64_t bufferSize) {
+ retval = r;
+ if (retval == Result::OK) {
+ *size = static_cast<size_t>(bufferSize);
+ }
+ });
+ return processReturn("getInputBufferSize", ret, retval);
+}
+
+status_t DeviceHalHidl::openOutputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ const char *address,
+ sp<StreamOutHalInterface> *outStream) {
+ if (mDevice == 0) return NO_INIT;
+ DeviceAddress hidlDevice;
+ status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
+ if (status != OK) return status;
+ AudioConfig hidlConfig;
+ HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mDevice->openOutputStream(
+ handle,
+ hidlDevice,
+ hidlConfig,
+ AudioOutputFlag(flags),
+ [&](Result r, const sp<IStreamOut>& result, const AudioConfig& suggestedConfig) {
+ retval = r;
+ if (retval == Result::OK) {
+ *outStream = new StreamOutHalHidl(result);
+ }
+ HidlUtils::audioConfigToHal(suggestedConfig, config);
+ });
+ return processReturn("openOutputStream", ret, retval);
+}
+
+status_t DeviceHalHidl::openInputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ audio_input_flags_t flags,
+ const char *address,
+ audio_source_t source,
+ sp<StreamInHalInterface> *inStream) {
+ if (mDevice == 0) return NO_INIT;
+ DeviceAddress hidlDevice;
+ status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
+ if (status != OK) return status;
+ AudioConfig hidlConfig;
+ HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mDevice->openInputStream(
+ handle,
+ hidlDevice,
+ hidlConfig,
+ AudioInputFlag(flags),
+ AudioSource(source),
+ [&](Result r, const sp<IStreamIn>& result, const AudioConfig& suggestedConfig) {
+ retval = r;
+ if (retval == Result::OK) {
+ *inStream = new StreamInHalHidl(result);
+ }
+ HidlUtils::audioConfigToHal(suggestedConfig, config);
+ });
+ return processReturn("openInputStream", ret, retval);
+}
+
+status_t DeviceHalHidl::supportsAudioPatches(bool *supportsPatches) {
+ if (mDevice == 0) return NO_INIT;
+ return processReturn("supportsAudioPatches", mDevice->supportsAudioPatches(), supportsPatches);
+}
+
+status_t DeviceHalHidl::createAudioPatch(
+ unsigned int num_sources,
+ const struct audio_port_config *sources,
+ unsigned int num_sinks,
+ const struct audio_port_config *sinks,
+ audio_patch_handle_t *patch) {
+ if (mDevice == 0) return NO_INIT;
+ hidl_vec<AudioPortConfig> hidlSources, hidlSinks;
+ HidlUtils::audioPortConfigsFromHal(num_sources, sources, &hidlSources);
+ HidlUtils::audioPortConfigsFromHal(num_sinks, sinks, &hidlSinks);
+ Result retval;
+ Return<void> ret = mDevice->createAudioPatch(
+ hidlSources, hidlSinks,
+ [&](Result r, AudioPatchHandle hidlPatch) {
+ retval = r;
+ if (retval == Result::OK) {
+ *patch = static_cast<audio_patch_handle_t>(hidlPatch);
+ }
+ });
+ return processReturn("createAudioPatch", ret, retval);
+}
+
+status_t DeviceHalHidl::releaseAudioPatch(audio_patch_handle_t patch) {
+ if (mDevice == 0) return NO_INIT;
+ return processReturn("releaseAudioPatch", mDevice->releaseAudioPatch(patch));
+}
+
+status_t DeviceHalHidl::getAudioPort(struct audio_port *port) {
+ if (mDevice == 0) return NO_INIT;
+ AudioPort hidlPort;
+ HidlUtils::audioPortFromHal(*port, &hidlPort);
+ Result retval;
+ Return<void> ret = mDevice->getAudioPort(
+ hidlPort,
+ [&](Result r, const AudioPort& p) {
+ retval = r;
+ if (retval == Result::OK) {
+ HidlUtils::audioPortToHal(p, port);
+ }
+ });
+ return processReturn("getAudioPort", ret, retval);
+}
+
+status_t DeviceHalHidl::setAudioPortConfig(const struct audio_port_config *config) {
+ if (mDevice == 0) return NO_INIT;
+ AudioPortConfig hidlConfig;
+ HidlUtils::audioPortConfigFromHal(*config, &hidlConfig);
+ return processReturn("setAudioPortConfig", mDevice->setAudioPortConfig(hidlConfig));
+}
+
+status_t DeviceHalHidl::getMicrophones(
+ std::vector<media::MicrophoneInfo> *microphonesInfo __unused) {
+ if (mDevice == 0) return NO_INIT;
+ return INVALID_OPERATION;
+}
+
+status_t DeviceHalHidl::dump(int fd) {
+ if (mDevice == 0) return NO_INIT;
+ native_handle_t* hidlHandle = native_handle_create(1, 0);
+ hidlHandle->data[0] = fd;
+ Return<void> ret = mDevice->debugDump(hidlHandle);
+ native_handle_delete(hidlHandle);
+ return processReturn("dump", ret);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/2.0/DeviceHalHidl.h b/media/libaudiohal/2.0/DeviceHalHidl.h
new file mode 100644
index 0000000..3c1cb59
--- /dev/null
+++ b/media/libaudiohal/2.0/DeviceHalHidl.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
+#define ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
+
+#include <android/hardware/audio/2.0/IDevice.h>
+#include <android/hardware/audio/2.0/IPrimaryDevice.h>
+#include <media/audiohal/DeviceHalInterface.h>
+
+#include "ConversionHelperHidl.h"
+
+using ::android::hardware::audio::V2_0::IDevice;
+using ::android::hardware::audio::V2_0::IPrimaryDevice;
+using ::android::hardware::Return;
+
+namespace android {
+
+class DeviceHalHidl : public DeviceHalInterface, public ConversionHelperHidl
+{
+ public:
+ // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
+ virtual status_t getSupportedDevices(uint32_t *devices);
+
+ // Check to see if the audio hardware interface has been initialized.
+ virtual status_t initCheck();
+
+ // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
+ virtual status_t setVoiceVolume(float volume);
+
+ // Set the audio volume for all audio activities other than voice call.
+ virtual status_t setMasterVolume(float volume);
+
+ // Get the current master volume value for the HAL.
+ virtual status_t getMasterVolume(float *volume);
+
+ // Called when the audio mode changes.
+ virtual status_t setMode(audio_mode_t mode);
+
+ // Muting control.
+ virtual status_t setMicMute(bool state);
+ virtual status_t getMicMute(bool *state);
+ virtual status_t setMasterMute(bool state);
+ virtual status_t getMasterMute(bool *state);
+
+ // Set global audio parameters.
+ virtual status_t setParameters(const String8& kvPairs);
+
+ // Get global audio parameters.
+ virtual status_t getParameters(const String8& keys, String8 *values);
+
+ // Returns audio input buffer size according to parameters passed.
+ virtual status_t getInputBufferSize(const struct audio_config *config,
+ size_t *size);
+
+ // Creates and opens the audio hardware output stream. The stream is closed
+ // by releasing all references to the returned object.
+ virtual status_t openOutputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ const char *address,
+ sp<StreamOutHalInterface> *outStream);
+
+ // Creates and opens the audio hardware input stream. The stream is closed
+ // by releasing all references to the returned object.
+ virtual status_t openInputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ audio_input_flags_t flags,
+ const char *address,
+ audio_source_t source,
+ sp<StreamInHalInterface> *inStream);
+
+ // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
+ virtual status_t supportsAudioPatches(bool *supportsPatches);
+
+ // Creates an audio patch between several source and sink ports.
+ virtual status_t createAudioPatch(
+ unsigned int num_sources,
+ const struct audio_port_config *sources,
+ unsigned int num_sinks,
+ const struct audio_port_config *sinks,
+ audio_patch_handle_t *patch);
+
+ // Releases an audio patch.
+ virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
+
+ // Fills the list of supported attributes for a given audio port.
+ virtual status_t getAudioPort(struct audio_port *port);
+
+ // Set audio port configuration.
+ virtual status_t setAudioPortConfig(const struct audio_port_config *config);
+
+ // List microphones
+ virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+ virtual status_t dump(int fd);
+
+ private:
+ friend class DevicesFactoryHalHidl;
+ sp<IDevice> mDevice;
+ sp<IPrimaryDevice> mPrimaryDevice; // Null if it's not a primary device.
+
+ // Can not be constructed directly by clients.
+ explicit DeviceHalHidl(const sp<IDevice>& device);
+
+ // The destructor automatically closes the device.
+ virtual ~DeviceHalHidl();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
diff --git a/media/libaudiohal/2.0/DeviceHalLocal.cpp b/media/libaudiohal/2.0/DeviceHalLocal.cpp
new file mode 100644
index 0000000..ec3bf78
--- /dev/null
+++ b/media/libaudiohal/2.0/DeviceHalLocal.cpp
@@ -0,0 +1,204 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DeviceHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include "DeviceHalLocal.h"
+#include "StreamHalLocal.h"
+
+namespace android {
+
+DeviceHalLocal::DeviceHalLocal(audio_hw_device_t *dev)
+ : mDev(dev) {
+}
+
+DeviceHalLocal::~DeviceHalLocal() {
+ int status = audio_hw_device_close(mDev);
+ ALOGW_IF(status, "Error closing audio hw device %p: %s", mDev, strerror(-status));
+ mDev = 0;
+}
+
+status_t DeviceHalLocal::getSupportedDevices(uint32_t *devices) {
+ if (mDev->get_supported_devices == NULL) return INVALID_OPERATION;
+ *devices = mDev->get_supported_devices(mDev);
+ return OK;
+}
+
+status_t DeviceHalLocal::initCheck() {
+ return mDev->init_check(mDev);
+}
+
+status_t DeviceHalLocal::setVoiceVolume(float volume) {
+ return mDev->set_voice_volume(mDev, volume);
+}
+
+status_t DeviceHalLocal::setMasterVolume(float volume) {
+ if (mDev->set_master_volume == NULL) return INVALID_OPERATION;
+ return mDev->set_master_volume(mDev, volume);
+}
+
+status_t DeviceHalLocal::getMasterVolume(float *volume) {
+ if (mDev->get_master_volume == NULL) return INVALID_OPERATION;
+ return mDev->get_master_volume(mDev, volume);
+}
+
+status_t DeviceHalLocal::setMode(audio_mode_t mode) {
+ return mDev->set_mode(mDev, mode);
+}
+
+status_t DeviceHalLocal::setMicMute(bool state) {
+ return mDev->set_mic_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::getMicMute(bool *state) {
+ return mDev->get_mic_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::setMasterMute(bool state) {
+ if (mDev->set_master_mute == NULL) return INVALID_OPERATION;
+ return mDev->set_master_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::getMasterMute(bool *state) {
+ if (mDev->get_master_mute == NULL) return INVALID_OPERATION;
+ return mDev->get_master_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::setParameters(const String8& kvPairs) {
+ return mDev->set_parameters(mDev, kvPairs.string());
+}
+
+status_t DeviceHalLocal::getParameters(const String8& keys, String8 *values) {
+ char *halValues = mDev->get_parameters(mDev, keys.string());
+ if (halValues != NULL) {
+ values->setTo(halValues);
+ free(halValues);
+ } else {
+ values->clear();
+ }
+ return OK;
+}
+
+status_t DeviceHalLocal::getInputBufferSize(
+ const struct audio_config *config, size_t *size) {
+ *size = mDev->get_input_buffer_size(mDev, config);
+ return OK;
+}
+
+status_t DeviceHalLocal::openOutputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ const char *address,
+ sp<StreamOutHalInterface> *outStream) {
+ audio_stream_out_t *halStream;
+ ALOGV("open_output_stream handle: %d devices: %x flags: %#x"
+ "srate: %d format %#x channels %x address %s",
+ handle, devices, flags,
+ config->sample_rate, config->format, config->channel_mask,
+ address);
+ int openResut = mDev->open_output_stream(
+ mDev, handle, devices, flags, config, &halStream, address);
+ if (openResut == OK) {
+ *outStream = new StreamOutHalLocal(halStream, this);
+ }
+ ALOGV("open_output_stream status %d stream %p", openResut, halStream);
+ return openResut;
+}
+
+status_t DeviceHalLocal::openInputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ audio_input_flags_t flags,
+ const char *address,
+ audio_source_t source,
+ sp<StreamInHalInterface> *inStream) {
+ audio_stream_in_t *halStream;
+ ALOGV("open_input_stream handle: %d devices: %x flags: %#x "
+ "srate: %d format %#x channels %x address %s source %d",
+ handle, devices, flags,
+ config->sample_rate, config->format, config->channel_mask,
+ address, source);
+ int openResult = mDev->open_input_stream(
+ mDev, handle, devices, config, &halStream, flags, address, source);
+ if (openResult == OK) {
+ *inStream = new StreamInHalLocal(halStream, this);
+ }
+ ALOGV("open_input_stream status %d stream %p", openResult, inStream);
+ return openResult;
+}
+
+status_t DeviceHalLocal::supportsAudioPatches(bool *supportsPatches) {
+ *supportsPatches = version() >= AUDIO_DEVICE_API_VERSION_3_0;
+ return OK;
+}
+
+status_t DeviceHalLocal::createAudioPatch(
+ unsigned int num_sources,
+ const struct audio_port_config *sources,
+ unsigned int num_sinks,
+ const struct audio_port_config *sinks,
+ audio_patch_handle_t *patch) {
+ if (version() >= AUDIO_DEVICE_API_VERSION_3_0) {
+ return mDev->create_audio_patch(
+ mDev, num_sources, sources, num_sinks, sinks, patch);
+ } else {
+ return INVALID_OPERATION;
+ }
+}
+
+status_t DeviceHalLocal::releaseAudioPatch(audio_patch_handle_t patch) {
+ if (version() >= AUDIO_DEVICE_API_VERSION_3_0) {
+ return mDev->release_audio_patch(mDev, patch);
+ } else {
+ return INVALID_OPERATION;
+ }
+}
+
+status_t DeviceHalLocal::getAudioPort(struct audio_port *port) {
+ return mDev->get_audio_port(mDev, port);
+}
+
+status_t DeviceHalLocal::setAudioPortConfig(const struct audio_port_config *config) {
+ if (version() >= AUDIO_DEVICE_API_VERSION_3_0)
+ return mDev->set_audio_port_config(mDev, config);
+ else
+ return INVALID_OPERATION;
+}
+
+status_t DeviceHalLocal::getMicrophones(
+ std::vector<media::MicrophoneInfo> *microphones __unused) {
+ return INVALID_OPERATION;
+}
+
+status_t DeviceHalLocal::dump(int fd) {
+ return mDev->dump(mDev, fd);
+}
+
+void DeviceHalLocal::closeOutputStream(struct audio_stream_out *stream_out) {
+ mDev->close_output_stream(mDev, stream_out);
+}
+
+void DeviceHalLocal::closeInputStream(struct audio_stream_in *stream_in) {
+ mDev->close_input_stream(mDev, stream_in);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/2.0/DeviceHalLocal.h b/media/libaudiohal/2.0/DeviceHalLocal.h
new file mode 100644
index 0000000..aec201a
--- /dev/null
+++ b/media/libaudiohal/2.0/DeviceHalLocal.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
+#define ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
+
+#include <hardware/audio.h>
+#include <media/audiohal/DeviceHalInterface.h>
+
+namespace android {
+
+class DeviceHalLocal : public DeviceHalInterface
+{
+ public:
+ // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
+ virtual status_t getSupportedDevices(uint32_t *devices);
+
+ // Check to see if the audio hardware interface has been initialized.
+ virtual status_t initCheck();
+
+ // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
+ virtual status_t setVoiceVolume(float volume);
+
+ // Set the audio volume for all audio activities other than voice call.
+ virtual status_t setMasterVolume(float volume);
+
+ // Get the current master volume value for the HAL.
+ virtual status_t getMasterVolume(float *volume);
+
+ // Called when the audio mode changes.
+ virtual status_t setMode(audio_mode_t mode);
+
+ // Muting control.
+ virtual status_t setMicMute(bool state);
+ virtual status_t getMicMute(bool *state);
+ virtual status_t setMasterMute(bool state);
+ virtual status_t getMasterMute(bool *state);
+
+ // Set global audio parameters.
+ virtual status_t setParameters(const String8& kvPairs);
+
+ // Get global audio parameters.
+ virtual status_t getParameters(const String8& keys, String8 *values);
+
+ // Returns audio input buffer size according to parameters passed.
+ virtual status_t getInputBufferSize(const struct audio_config *config,
+ size_t *size);
+
+ // Creates and opens the audio hardware output stream. The stream is closed
+ // by releasing all references to the returned object.
+ virtual status_t openOutputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ const char *address,
+ sp<StreamOutHalInterface> *outStream);
+
+ // Creates and opens the audio hardware input stream. The stream is closed
+ // by releasing all references to the returned object.
+ virtual status_t openInputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ audio_input_flags_t flags,
+ const char *address,
+ audio_source_t source,
+ sp<StreamInHalInterface> *inStream);
+
+ // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
+ virtual status_t supportsAudioPatches(bool *supportsPatches);
+
+ // Creates an audio patch between several source and sink ports.
+ virtual status_t createAudioPatch(
+ unsigned int num_sources,
+ const struct audio_port_config *sources,
+ unsigned int num_sinks,
+ const struct audio_port_config *sinks,
+ audio_patch_handle_t *patch);
+
+ // Releases an audio patch.
+ virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
+
+ // Fills the list of supported attributes for a given audio port.
+ virtual status_t getAudioPort(struct audio_port *port);
+
+ // Set audio port configuration.
+ virtual status_t setAudioPortConfig(const struct audio_port_config *config);
+
+ // List microphones
+ virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+ virtual status_t dump(int fd);
+
+ void closeOutputStream(struct audio_stream_out *stream_out);
+ void closeInputStream(struct audio_stream_in *stream_in);
+
+ private:
+ audio_hw_device_t *mDev;
+
+ friend class DevicesFactoryHalLocal;
+
+ // Can not be constructed directly by clients.
+ explicit DeviceHalLocal(audio_hw_device_t *dev);
+
+ // The destructor automatically closes the device.
+ virtual ~DeviceHalLocal();
+
+ uint32_t version() const { return mDev->common.version; }
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalHidl.cpp b/media/libaudiohal/2.0/DevicesFactoryHalHidl.cpp
new file mode 100644
index 0000000..5b33592
--- /dev/null
+++ b/media/libaudiohal/2.0/DevicesFactoryHalHidl.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string.h>
+
+#define LOG_TAG "DevicesFactoryHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hardware/audio/2.0/IDevice.h>
+#include <media/audiohal/hidl/HalDeathHandler.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+#include "DeviceHalHidl.h"
+#include "DevicesFactoryHalHidl.h"
+
+using ::android::hardware::audio::V2_0::IDevice;
+using ::android::hardware::audio::V2_0::Result;
+using ::android::hardware::Return;
+
+namespace android {
+
+DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
+ mDevicesFactory = IDevicesFactory::getService();
+ if (mDevicesFactory != 0) {
+ // It is assumed that DevicesFactory is owned by AudioFlinger
+ // and thus have the same lifespan.
+ mDevicesFactory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+ } else {
+ ALOGE("Failed to obtain IDevicesFactory service, terminating process.");
+ exit(1);
+ }
+ // The MSD factory is optional
+ mDevicesFactoryMsd = IDevicesFactory::getService(AUDIO_HAL_SERVICE_NAME_MSD);
+ // TODO: Register death handler, and add 'restart' directive to audioserver.rc
+}
+
+DevicesFactoryHalHidl::~DevicesFactoryHalHidl() {
+}
+
+// static
+status_t DevicesFactoryHalHidl::nameFromHal(const char *name, IDevicesFactory::Device *device) {
+ if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_PRIMARY) == 0) {
+ *device = IDevicesFactory::Device::PRIMARY;
+ return OK;
+ } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_A2DP) == 0) {
+ *device = IDevicesFactory::Device::A2DP;
+ return OK;
+ } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_USB) == 0) {
+ *device = IDevicesFactory::Device::USB;
+ return OK;
+ } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX) == 0) {
+ *device = IDevicesFactory::Device::R_SUBMIX;
+ return OK;
+ } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_STUB) == 0) {
+ *device = IDevicesFactory::Device::STUB;
+ return OK;
+ }
+ ALOGE("Invalid device name %s", name);
+ return BAD_VALUE;
+}
+
+status_t DevicesFactoryHalHidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
+ if (mDevicesFactory == 0) return NO_INIT;
+ IDevicesFactory::Device hidlDevice;
+ status_t status = nameFromHal(name, &hidlDevice);
+ if (status != OK) return status;
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mDevicesFactory->openDevice(
+ hidlDevice,
+ [&](Result r, const sp<IDevice>& result) {
+ retval = r;
+ if (retval == Result::OK) {
+ *device = new DeviceHalHidl(result);
+ }
+ });
+ if (ret.isOk()) {
+ if (retval == Result::OK) return OK;
+ else if (retval == Result::INVALID_ARGUMENTS) return BAD_VALUE;
+ else return NO_INIT;
+ }
+ return FAILED_TRANSACTION;
+}
+
+} // namespace android
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalHidl.h b/media/libaudiohal/2.0/DevicesFactoryHalHidl.h
new file mode 100644
index 0000000..0748849
--- /dev/null
+++ b/media/libaudiohal/2.0/DevicesFactoryHalHidl.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
+#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
+
+#include <android/hardware/audio/2.0/IDevicesFactory.h>
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include "DeviceHalHidl.h"
+
+using ::android::hardware::audio::V2_0::IDevicesFactory;
+
+namespace android {
+
+class DevicesFactoryHalHidl : public DevicesFactoryHalInterface
+{
+ public:
+ // Opens a device with the specified name. To close the device, it is
+ // necessary to release references to the returned object.
+ virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
+
+ private:
+ friend class DevicesFactoryHalHybrid;
+
+ sp<IDevicesFactory> mDevicesFactory;
+ sp<IDevicesFactory> mDevicesFactoryMsd;
+
+ static status_t nameFromHal(const char *name, IDevicesFactory::Device *device);
+
+ // Can not be constructed directly by clients.
+ DevicesFactoryHalHidl();
+
+ virtual ~DevicesFactoryHalHidl();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/2.0/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/2.0/DevicesFactoryHalHybrid.cpp
new file mode 100644
index 0000000..1c4be74
--- /dev/null
+++ b/media/libaudiohal/2.0/DevicesFactoryHalHybrid.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DevicesFactoryHalHybrid"
+//#define LOG_NDEBUG 0
+
+#include "DevicesFactoryHalHybrid.h"
+#include "DevicesFactoryHalLocal.h"
+#include "DevicesFactoryHalHidl.h"
+
+namespace android {
+
+DevicesFactoryHalHybrid::DevicesFactoryHalHybrid()
+ : mLocalFactory(new DevicesFactoryHalLocal()),
+ mHidlFactory(new DevicesFactoryHalHidl()) {
+}
+
+DevicesFactoryHalHybrid::~DevicesFactoryHalHybrid() {
+}
+
+status_t DevicesFactoryHalHybrid::openDevice(const char *name, sp<DeviceHalInterface> *device) {
+ if (mHidlFactory != 0 && strcmp(AUDIO_HARDWARE_MODULE_ID_A2DP, name) != 0 &&
+ strcmp(AUDIO_HARDWARE_MODULE_ID_HEARING_AID, name) != 0) {
+ return mHidlFactory->openDevice(name, device);
+ }
+ return mLocalFactory->openDevice(name, device);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/DevicesFactoryHalHybrid.h b/media/libaudiohal/2.0/DevicesFactoryHalHybrid.h
similarity index 100%
rename from media/libaudiohal/DevicesFactoryHalHybrid.h
rename to media/libaudiohal/2.0/DevicesFactoryHalHybrid.h
diff --git a/media/libaudiohal/DevicesFactoryHalLocal.cpp b/media/libaudiohal/2.0/DevicesFactoryHalLocal.cpp
similarity index 100%
rename from media/libaudiohal/DevicesFactoryHalLocal.cpp
rename to media/libaudiohal/2.0/DevicesFactoryHalLocal.cpp
diff --git a/media/libaudiohal/DevicesFactoryHalLocal.h b/media/libaudiohal/2.0/DevicesFactoryHalLocal.h
similarity index 100%
rename from media/libaudiohal/DevicesFactoryHalLocal.h
rename to media/libaudiohal/2.0/DevicesFactoryHalLocal.h
diff --git a/media/libaudiohal/2.0/EffectBufferHalHidl.cpp b/media/libaudiohal/2.0/EffectBufferHalHidl.cpp
new file mode 100644
index 0000000..226a500
--- /dev/null
+++ b/media/libaudiohal/2.0/EffectBufferHalHidl.cpp
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <atomic>
+
+#define LOG_TAG "EffectBufferHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hidl/allocator/1.0/IAllocator.h>
+#include <hidlmemory/mapping.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+#include "EffectBufferHalHidl.h"
+
+using ::android::hardware::Return;
+using ::android::hidl::allocator::V1_0::IAllocator;
+
+namespace android {
+
+// static
+uint64_t EffectBufferHalHidl::makeUniqueId() {
+ static std::atomic<uint64_t> counter{1};
+ return counter++;
+}
+
+status_t EffectBufferHalHidl::allocate(
+ size_t size, sp<EffectBufferHalInterface>* buffer) {
+ return mirror(nullptr, size, buffer);
+}
+
+status_t EffectBufferHalHidl::mirror(
+ void* external, size_t size, sp<EffectBufferHalInterface>* buffer) {
+ sp<EffectBufferHalInterface> tempBuffer = new EffectBufferHalHidl(size);
+ status_t result = static_cast<EffectBufferHalHidl*>(tempBuffer.get())->init();
+ if (result == OK) {
+ tempBuffer->setExternalData(external);
+ *buffer = tempBuffer;
+ }
+ return result;
+}
+
+EffectBufferHalHidl::EffectBufferHalHidl(size_t size)
+ : mBufferSize(size), mFrameCountChanged(false),
+ mExternalData(nullptr), mAudioBuffer{0, {nullptr}} {
+ mHidlBuffer.id = makeUniqueId();
+ mHidlBuffer.frameCount = 0;
+}
+
+EffectBufferHalHidl::~EffectBufferHalHidl() {
+}
+
+status_t EffectBufferHalHidl::init() {
+ sp<IAllocator> ashmem = IAllocator::getService("ashmem");
+ if (ashmem == 0) {
+ ALOGE("Failed to retrieve ashmem allocator service");
+ return NO_INIT;
+ }
+ status_t retval = NO_MEMORY;
+ Return<void> result = ashmem->allocate(
+ mBufferSize,
+ [&](bool success, const hidl_memory& memory) {
+ if (success) {
+ mHidlBuffer.data = memory;
+ retval = OK;
+ }
+ });
+ if (result.isOk() && retval == OK) {
+ mMemory = hardware::mapMemory(mHidlBuffer.data);
+ if (mMemory != 0) {
+ mMemory->update();
+ mAudioBuffer.raw = static_cast<void*>(mMemory->getPointer());
+ memset(mAudioBuffer.raw, 0, mMemory->getSize());
+ mMemory->commit();
+ } else {
+ ALOGE("Failed to map allocated ashmem");
+ retval = NO_MEMORY;
+ }
+ } else {
+ ALOGE("Failed to allocate %d bytes from ashmem", (int)mBufferSize);
+ }
+ return result.isOk() ? retval : FAILED_TRANSACTION;
+}
+
+audio_buffer_t* EffectBufferHalHidl::audioBuffer() {
+ return &mAudioBuffer;
+}
+
+void* EffectBufferHalHidl::externalData() const {
+ return mExternalData;
+}
+
+void EffectBufferHalHidl::setFrameCount(size_t frameCount) {
+ mHidlBuffer.frameCount = frameCount;
+ mAudioBuffer.frameCount = frameCount;
+ mFrameCountChanged = true;
+}
+
+bool EffectBufferHalHidl::checkFrameCountChange() {
+ bool result = mFrameCountChanged;
+ mFrameCountChanged = false;
+ return result;
+}
+
+void EffectBufferHalHidl::setExternalData(void* external) {
+ mExternalData = external;
+}
+
+void EffectBufferHalHidl::update() {
+ update(mBufferSize);
+}
+
+void EffectBufferHalHidl::commit() {
+ commit(mBufferSize);
+}
+
+void EffectBufferHalHidl::update(size_t size) {
+ if (mExternalData == nullptr) return;
+ mMemory->update();
+ if (size > mBufferSize) size = mBufferSize;
+ memcpy(mAudioBuffer.raw, mExternalData, size);
+ mMemory->commit();
+}
+
+void EffectBufferHalHidl::commit(size_t size) {
+ if (mExternalData == nullptr) return;
+ if (size > mBufferSize) size = mBufferSize;
+ memcpy(mExternalData, mAudioBuffer.raw, size);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/2.0/EffectBufferHalHidl.h b/media/libaudiohal/2.0/EffectBufferHalHidl.h
new file mode 100644
index 0000000..31e0087
--- /dev/null
+++ b/media/libaudiohal/2.0/EffectBufferHalHidl.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
+#define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
+
+#include <android/hardware/audio/effect/2.0/types.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidl/HidlSupport.h>
+#include <media/audiohal/EffectBufferHalInterface.h>
+#include <system/audio_effect.h>
+
+using android::hardware::audio::effect::V2_0::AudioBuffer;
+using android::hardware::hidl_memory;
+using android::hidl::memory::V1_0::IMemory;
+
+namespace android {
+
+class EffectBufferHalHidl : public EffectBufferHalInterface
+{
+ public:
+ static status_t allocate(size_t size, sp<EffectBufferHalInterface>* buffer);
+ static status_t mirror(void* external, size_t size, sp<EffectBufferHalInterface>* buffer);
+
+ virtual audio_buffer_t* audioBuffer();
+ virtual void* externalData() const;
+
+ virtual size_t getSize() const override { return mBufferSize; }
+
+ virtual void setExternalData(void* external);
+ virtual void setFrameCount(size_t frameCount);
+ virtual bool checkFrameCountChange();
+
+ virtual void update();
+ virtual void commit();
+ virtual void update(size_t size);
+ virtual void commit(size_t size);
+
+ const AudioBuffer& hidlBuffer() const { return mHidlBuffer; }
+
+ private:
+ friend class EffectBufferHalInterface;
+
+ static uint64_t makeUniqueId();
+
+ const size_t mBufferSize;
+ bool mFrameCountChanged;
+ void* mExternalData;
+ AudioBuffer mHidlBuffer;
+ sp<IMemory> mMemory;
+ audio_buffer_t mAudioBuffer;
+
+ // Can not be constructed directly by clients.
+ explicit EffectBufferHalHidl(size_t size);
+
+ virtual ~EffectBufferHalHidl();
+
+ status_t init();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
diff --git a/media/libaudiohal/2.0/EffectHalHidl.cpp b/media/libaudiohal/2.0/EffectHalHidl.cpp
new file mode 100644
index 0000000..4fb032c
--- /dev/null
+++ b/media/libaudiohal/2.0/EffectHalHidl.cpp
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <hwbinder/IPCThreadState.h>
+#include <media/EffectsFactoryApi.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+#include "EffectBufferHalHidl.h"
+#include "EffectHalHidl.h"
+#include "HidlUtils.h"
+
+using ::android::hardware::audio::effect::V2_0::AudioBuffer;
+using ::android::hardware::audio::effect::V2_0::EffectBufferAccess;
+using ::android::hardware::audio::effect::V2_0::EffectConfigParameters;
+using ::android::hardware::audio::effect::V2_0::MessageQueueFlagBits;
+using ::android::hardware::audio::effect::V2_0::Result;
+using ::android::hardware::audio::common::V2_0::HidlUtils;
+using ::android::hardware::audio::common::V2_0::AudioChannelMask;
+using ::android::hardware::audio::common::V2_0::AudioFormat;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::MQDescriptorSync;
+using ::android::hardware::Return;
+
+namespace android {
+
+EffectHalHidl::EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId)
+ : mEffect(effect), mEffectId(effectId), mBuffersChanged(true), mEfGroup(nullptr) {
+}
+
+EffectHalHidl::~EffectHalHidl() {
+ if (mEffect != 0) {
+ close();
+ mEffect.clear();
+ hardware::IPCThreadState::self()->flushCommands();
+ }
+ if (mEfGroup) {
+ EventFlag::deleteEventFlag(&mEfGroup);
+ }
+}
+
+// static
+void EffectHalHidl::effectDescriptorToHal(
+ const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor) {
+ HidlUtils::uuidToHal(descriptor.type, &halDescriptor->type);
+ HidlUtils::uuidToHal(descriptor.uuid, &halDescriptor->uuid);
+ halDescriptor->flags = static_cast<uint32_t>(descriptor.flags);
+ halDescriptor->cpuLoad = descriptor.cpuLoad;
+ halDescriptor->memoryUsage = descriptor.memoryUsage;
+ memcpy(halDescriptor->name, descriptor.name.data(), descriptor.name.size());
+ memcpy(halDescriptor->implementor,
+ descriptor.implementor.data(), descriptor.implementor.size());
+}
+
+// TODO(mnaganov): These buffer conversion functions should be shared with Effect wrapper
+// via HidlUtils. Move them there when hardware/interfaces will get un-frozen again.
+
+// static
+void EffectHalHidl::effectBufferConfigFromHal(
+ const buffer_config_t& halConfig, EffectBufferConfig* config) {
+ config->samplingRateHz = halConfig.samplingRate;
+ config->channels = AudioChannelMask(halConfig.channels);
+ config->format = AudioFormat(halConfig.format);
+ config->accessMode = EffectBufferAccess(halConfig.accessMode);
+ config->mask = EffectConfigParameters(halConfig.mask);
+}
+
+// static
+void EffectHalHidl::effectBufferConfigToHal(
+ const EffectBufferConfig& config, buffer_config_t* halConfig) {
+ halConfig->buffer.frameCount = 0;
+ halConfig->buffer.raw = NULL;
+ halConfig->samplingRate = config.samplingRateHz;
+ halConfig->channels = static_cast<uint32_t>(config.channels);
+ halConfig->bufferProvider.cookie = NULL;
+ halConfig->bufferProvider.getBuffer = NULL;
+ halConfig->bufferProvider.releaseBuffer = NULL;
+ halConfig->format = static_cast<uint8_t>(config.format);
+ halConfig->accessMode = static_cast<uint8_t>(config.accessMode);
+ halConfig->mask = static_cast<uint8_t>(config.mask);
+}
+
+// static
+void EffectHalHidl::effectConfigFromHal(const effect_config_t& halConfig, EffectConfig* config) {
+ effectBufferConfigFromHal(halConfig.inputCfg, &config->inputCfg);
+ effectBufferConfigFromHal(halConfig.outputCfg, &config->outputCfg);
+}
+
+// static
+void EffectHalHidl::effectConfigToHal(const EffectConfig& config, effect_config_t* halConfig) {
+ effectBufferConfigToHal(config.inputCfg, &halConfig->inputCfg);
+ effectBufferConfigToHal(config.outputCfg, &halConfig->outputCfg);
+}
+
+// static
+status_t EffectHalHidl::analyzeResult(const Result& result) {
+ switch (result) {
+ case Result::OK: return OK;
+ case Result::INVALID_ARGUMENTS: return BAD_VALUE;
+ case Result::INVALID_STATE: return NOT_ENOUGH_DATA;
+ case Result::NOT_INITIALIZED: return NO_INIT;
+ case Result::NOT_SUPPORTED: return INVALID_OPERATION;
+ case Result::RESULT_TOO_BIG: return NO_MEMORY;
+ default: return NO_INIT;
+ }
+}
+
+status_t EffectHalHidl::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
+ if (!mBuffersChanged) {
+ if (buffer.get() == nullptr || mInBuffer.get() == nullptr) {
+ mBuffersChanged = buffer.get() != mInBuffer.get();
+ } else {
+ mBuffersChanged = buffer->audioBuffer() != mInBuffer->audioBuffer();
+ }
+ }
+ mInBuffer = buffer;
+ return OK;
+}
+
+status_t EffectHalHidl::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
+ if (!mBuffersChanged) {
+ if (buffer.get() == nullptr || mOutBuffer.get() == nullptr) {
+ mBuffersChanged = buffer.get() != mOutBuffer.get();
+ } else {
+ mBuffersChanged = buffer->audioBuffer() != mOutBuffer->audioBuffer();
+ }
+ }
+ mOutBuffer = buffer;
+ return OK;
+}
+
+status_t EffectHalHidl::process() {
+ return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS));
+}
+
+status_t EffectHalHidl::processReverse() {
+ return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS_REVERSE));
+}
+
+status_t EffectHalHidl::prepareForProcessing() {
+ std::unique_ptr<StatusMQ> tempStatusMQ;
+ Result retval;
+ Return<void> ret = mEffect->prepareForProcessing(
+ [&](Result r, const MQDescriptorSync<Result>& statusMQ) {
+ retval = r;
+ if (retval == Result::OK) {
+ tempStatusMQ.reset(new StatusMQ(statusMQ));
+ if (tempStatusMQ->isValid() && tempStatusMQ->getEventFlagWord()) {
+ EventFlag::createEventFlag(tempStatusMQ->getEventFlagWord(), &mEfGroup);
+ }
+ }
+ });
+ if (!ret.isOk() || retval != Result::OK) {
+ return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
+ }
+ if (!tempStatusMQ || !tempStatusMQ->isValid() || !mEfGroup) {
+ ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for effects");
+ ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
+ "Status message queue for effects is invalid");
+ ALOGE_IF(!mEfGroup, "Event flag creation for effects failed");
+ return NO_INIT;
+ }
+ mStatusMQ = std::move(tempStatusMQ);
+ return OK;
+}
+
+bool EffectHalHidl::needToResetBuffers() {
+ if (mBuffersChanged) return true;
+ bool inBufferFrameCountUpdated = mInBuffer->checkFrameCountChange();
+ bool outBufferFrameCountUpdated = mOutBuffer->checkFrameCountChange();
+ return inBufferFrameCountUpdated || outBufferFrameCountUpdated;
+}
+
+status_t EffectHalHidl::processImpl(uint32_t mqFlag) {
+ if (mEffect == 0 || mInBuffer == 0 || mOutBuffer == 0) return NO_INIT;
+ status_t status;
+ if (!mStatusMQ && (status = prepareForProcessing()) != OK) {
+ return status;
+ }
+ if (needToResetBuffers() && (status = setProcessBuffers()) != OK) {
+ return status;
+ }
+ // The data is already in the buffers, just need to flush it and wake up the server side.
+ std::atomic_thread_fence(std::memory_order_release);
+ mEfGroup->wake(mqFlag);
+ uint32_t efState = 0;
+retry:
+ status_t ret = mEfGroup->wait(
+ static_cast<uint32_t>(MessageQueueFlagBits::DONE_PROCESSING), &efState);
+ if (efState & static_cast<uint32_t>(MessageQueueFlagBits::DONE_PROCESSING)) {
+ Result retval = Result::NOT_INITIALIZED;
+ mStatusMQ->read(&retval);
+ if (retval == Result::OK || retval == Result::INVALID_STATE) {
+ // Sync back the changed contents of the buffer.
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ return analyzeResult(retval);
+ }
+ if (ret == -EAGAIN || ret == -EINTR) {
+ // Spurious wakeup. This normally retries no more than once.
+ goto retry;
+ }
+ return ret;
+}
+
+status_t EffectHalHidl::setProcessBuffers() {
+ Return<Result> ret = mEffect->setProcessBuffers(
+ static_cast<EffectBufferHalHidl*>(mInBuffer.get())->hidlBuffer(),
+ static_cast<EffectBufferHalHidl*>(mOutBuffer.get())->hidlBuffer());
+ if (ret.isOk() && ret == Result::OK) {
+ mBuffersChanged = false;
+ return OK;
+ }
+ return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
+}
+
+status_t EffectHalHidl::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+ uint32_t *replySize, void *pReplyData) {
+ if (mEffect == 0) return NO_INIT;
+
+ // Special cases.
+ if (cmdCode == EFFECT_CMD_SET_CONFIG || cmdCode == EFFECT_CMD_SET_CONFIG_REVERSE) {
+ return setConfigImpl(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+ } else if (cmdCode == EFFECT_CMD_GET_CONFIG || cmdCode == EFFECT_CMD_GET_CONFIG_REVERSE) {
+ return getConfigImpl(cmdCode, replySize, pReplyData);
+ }
+
+ // Common case.
+ hidl_vec<uint8_t> hidlData;
+ if (pCmdData != nullptr && cmdSize > 0) {
+ hidlData.setToExternal(reinterpret_cast<uint8_t*>(pCmdData), cmdSize);
+ }
+ status_t status;
+ uint32_t replySizeStub = 0;
+ if (replySize == nullptr || pReplyData == nullptr) replySize = &replySizeStub;
+ Return<void> ret = mEffect->command(cmdCode, hidlData, *replySize,
+ [&](int32_t s, const hidl_vec<uint8_t>& result) {
+ status = s;
+ if (status == 0) {
+ if (*replySize > result.size()) *replySize = result.size();
+ if (pReplyData != nullptr && *replySize > 0) {
+ memcpy(pReplyData, &result[0], *replySize);
+ }
+ }
+ });
+ return ret.isOk() ? status : FAILED_TRANSACTION;
+}
+
+status_t EffectHalHidl::getDescriptor(effect_descriptor_t *pDescriptor) {
+ if (mEffect == 0) return NO_INIT;
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mEffect->getDescriptor(
+ [&](Result r, const EffectDescriptor& result) {
+ retval = r;
+ if (retval == Result::OK) {
+ effectDescriptorToHal(result, pDescriptor);
+ }
+ });
+ return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
+}
+
+status_t EffectHalHidl::close() {
+ if (mEffect == 0) return NO_INIT;
+ Return<Result> ret = mEffect->close();
+ return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
+}
+
+status_t EffectHalHidl::getConfigImpl(
+ uint32_t cmdCode, uint32_t *replySize, void *pReplyData) {
+ if (replySize == NULL || *replySize != sizeof(effect_config_t) || pReplyData == NULL) {
+ return BAD_VALUE;
+ }
+ status_t result = FAILED_TRANSACTION;
+ Return<void> ret;
+ if (cmdCode == EFFECT_CMD_GET_CONFIG) {
+ ret = mEffect->getConfig([&] (Result r, const EffectConfig &hidlConfig) {
+ result = analyzeResult(r);
+ if (r == Result::OK) {
+ effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
+ }
+ });
+ } else {
+ ret = mEffect->getConfigReverse([&] (Result r, const EffectConfig &hidlConfig) {
+ result = analyzeResult(r);
+ if (r == Result::OK) {
+ effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
+ }
+ });
+ }
+ if (!ret.isOk()) {
+ result = FAILED_TRANSACTION;
+ }
+ return result;
+}
+
+status_t EffectHalHidl::setConfigImpl(
+ uint32_t cmdCode, uint32_t cmdSize, void *pCmdData, uint32_t *replySize, void *pReplyData) {
+ if (pCmdData == NULL || cmdSize != sizeof(effect_config_t) ||
+ replySize == NULL || *replySize != sizeof(int32_t) || pReplyData == NULL) {
+ return BAD_VALUE;
+ }
+ const effect_config_t *halConfig = static_cast<effect_config_t*>(pCmdData);
+ if (halConfig->inputCfg.bufferProvider.getBuffer != NULL ||
+ halConfig->inputCfg.bufferProvider.releaseBuffer != NULL ||
+ halConfig->outputCfg.bufferProvider.getBuffer != NULL ||
+ halConfig->outputCfg.bufferProvider.releaseBuffer != NULL) {
+ ALOGE("Buffer provider callbacks are not supported");
+ }
+ EffectConfig hidlConfig;
+ effectConfigFromHal(*halConfig, &hidlConfig);
+ Return<Result> ret = cmdCode == EFFECT_CMD_SET_CONFIG ?
+ mEffect->setConfig(hidlConfig, nullptr, nullptr) :
+ mEffect->setConfigReverse(hidlConfig, nullptr, nullptr);
+ status_t result = FAILED_TRANSACTION;
+ if (ret.isOk()) {
+ result = analyzeResult(ret);
+ *static_cast<int32_t*>(pReplyData) = result;
+ }
+ return result;
+}
+
+} // namespace android
diff --git a/media/libaudiohal/EffectHalHidl.h b/media/libaudiohal/2.0/EffectHalHidl.h
similarity index 100%
rename from media/libaudiohal/EffectHalHidl.h
rename to media/libaudiohal/2.0/EffectHalHidl.h
diff --git a/media/libaudiohal/2.0/EffectsFactoryHalHidl.cpp b/media/libaudiohal/2.0/EffectsFactoryHalHidl.cpp
new file mode 100644
index 0000000..0d40e6d
--- /dev/null
+++ b/media/libaudiohal/2.0/EffectsFactoryHalHidl.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectsFactoryHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <cutils/native_handle.h>
+
+#include "ConversionHelperHidl.h"
+#include "EffectBufferHalHidl.h"
+#include "EffectHalHidl.h"
+#include "EffectsFactoryHalHidl.h"
+#include "HidlUtils.h"
+
+using ::android::hardware::audio::common::V2_0::HidlUtils;
+using ::android::hardware::audio::common::V2_0::Uuid;
+using ::android::hardware::audio::effect::V2_0::IEffect;
+using ::android::hardware::audio::effect::V2_0::Result;
+using ::android::hardware::Return;
+
+namespace android {
+
+EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
+ mEffectsFactory = IEffectsFactory::getService();
+ if (mEffectsFactory == 0) {
+ ALOGE("Failed to obtain IEffectsFactory service, terminating process.");
+ exit(1);
+ }
+}
+
+EffectsFactoryHalHidl::~EffectsFactoryHalHidl() {
+}
+
+status_t EffectsFactoryHalHidl::queryAllDescriptors() {
+ if (mEffectsFactory == 0) return NO_INIT;
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mEffectsFactory->getAllDescriptors(
+ [&](Result r, const hidl_vec<EffectDescriptor>& result) {
+ retval = r;
+ if (retval == Result::OK) {
+ mLastDescriptors = result;
+ }
+ });
+ if (ret.isOk()) {
+ return retval == Result::OK ? OK : NO_INIT;
+ }
+ mLastDescriptors.resize(0);
+ return processReturn(__FUNCTION__, ret);
+}
+
+status_t EffectsFactoryHalHidl::queryNumberEffects(uint32_t *pNumEffects) {
+ status_t queryResult = queryAllDescriptors();
+ if (queryResult == OK) {
+ *pNumEffects = mLastDescriptors.size();
+ }
+ return queryResult;
+}
+
+status_t EffectsFactoryHalHidl::getDescriptor(
+ uint32_t index, effect_descriptor_t *pDescriptor) {
+ // TODO: We need somehow to track the changes on the server side
+ // or figure out how to convert everybody to query all the descriptors at once.
+ // TODO: check for nullptr
+ if (mLastDescriptors.size() == 0) {
+ status_t queryResult = queryAllDescriptors();
+ if (queryResult != OK) return queryResult;
+ }
+ if (index >= mLastDescriptors.size()) return NAME_NOT_FOUND;
+ EffectHalHidl::effectDescriptorToHal(mLastDescriptors[index], pDescriptor);
+ return OK;
+}
+
+status_t EffectsFactoryHalHidl::getDescriptor(
+ const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
+ // TODO: check for nullptr
+ if (mEffectsFactory == 0) return NO_INIT;
+ Uuid hidlUuid;
+ HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mEffectsFactory->getDescriptor(hidlUuid,
+ [&](Result r, const EffectDescriptor& result) {
+ retval = r;
+ if (retval == Result::OK) {
+ EffectHalHidl::effectDescriptorToHal(result, pDescriptor);
+ }
+ });
+ if (ret.isOk()) {
+ if (retval == Result::OK) return OK;
+ else if (retval == Result::INVALID_ARGUMENTS) return NAME_NOT_FOUND;
+ else return NO_INIT;
+ }
+ return processReturn(__FUNCTION__, ret);
+}
+
+status_t EffectsFactoryHalHidl::createEffect(
+ const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
+ sp<EffectHalInterface> *effect) {
+ if (mEffectsFactory == 0) return NO_INIT;
+ Uuid hidlUuid;
+ HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mEffectsFactory->createEffect(
+ hidlUuid, sessionId, ioId,
+ [&](Result r, const sp<IEffect>& result, uint64_t effectId) {
+ retval = r;
+ if (retval == Result::OK) {
+ *effect = new EffectHalHidl(result, effectId);
+ }
+ });
+ if (ret.isOk()) {
+ if (retval == Result::OK) return OK;
+ else if (retval == Result::INVALID_ARGUMENTS) return NAME_NOT_FOUND;
+ else return NO_INIT;
+ }
+ return processReturn(__FUNCTION__, ret);
+}
+
+status_t EffectsFactoryHalHidl::dumpEffects(int fd) {
+ if (mEffectsFactory == 0) return NO_INIT;
+ native_handle_t* hidlHandle = native_handle_create(1, 0);
+ hidlHandle->data[0] = fd;
+ Return<void> ret = mEffectsFactory->debugDump(hidlHandle);
+ native_handle_delete(hidlHandle);
+ return processReturn(__FUNCTION__, ret);
+}
+
+status_t EffectsFactoryHalHidl::allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) {
+ return EffectBufferHalHidl::allocate(size, buffer);
+}
+
+status_t EffectsFactoryHalHidl::mirrorBuffer(void* external, size_t size,
+ sp<EffectBufferHalInterface>* buffer) {
+ return EffectBufferHalHidl::mirror(external, size, buffer);
+}
+
+
+} // namespace android
diff --git a/media/libaudiohal/2.0/EffectsFactoryHalHidl.h b/media/libaudiohal/2.0/EffectsFactoryHalHidl.h
new file mode 100644
index 0000000..82b5481
--- /dev/null
+++ b/media/libaudiohal/2.0/EffectsFactoryHalHidl.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
+#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
+
+#include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
+#include <android/hardware/audio/effect/2.0/types.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+
+#include "ConversionHelperHidl.h"
+
+namespace android {
+
+using ::android::hardware::audio::effect::V2_0::EffectDescriptor;
+using ::android::hardware::audio::effect::V2_0::IEffectsFactory;
+using ::android::hardware::hidl_vec;
+
+class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
+{
+ public:
+ // Returns the number of different effects in all loaded libraries.
+ virtual status_t queryNumberEffects(uint32_t *pNumEffects);
+
+ // Returns a descriptor of the next available effect.
+ virtual status_t getDescriptor(uint32_t index,
+ effect_descriptor_t *pDescriptor);
+
+ virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
+ effect_descriptor_t *pDescriptor);
+
+ // Creates an effect engine of the specified type.
+ // To release the effect engine, it is necessary to release references
+ // to the returned effect object.
+ virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
+ int32_t sessionId, int32_t ioId,
+ sp<EffectHalInterface> *effect);
+
+ virtual status_t dumpEffects(int fd);
+
+ status_t allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) override;
+ status_t mirrorBuffer(void* external, size_t size,
+ sp<EffectBufferHalInterface>* buffer) override;
+
+ private:
+ friend class EffectsFactoryHalInterface;
+
+ sp<IEffectsFactory> mEffectsFactory;
+ hidl_vec<EffectDescriptor> mLastDescriptors;
+
+ // Can not be constructed directly by clients.
+ EffectsFactoryHalHidl();
+ virtual ~EffectsFactoryHalHidl();
+
+ status_t queryAllDescriptors();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/2.0/StreamHalHidl.cpp b/media/libaudiohal/2.0/StreamHalHidl.cpp
new file mode 100644
index 0000000..9869cd2
--- /dev/null
+++ b/media/libaudiohal/2.0/StreamHalHidl.cpp
@@ -0,0 +1,768 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "StreamHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hardware/audio/2.0/IStreamOutCallback.h>
+#include <hwbinder/IPCThreadState.h>
+#include <mediautils/SchedulingPolicyService.h>
+#include <utils/Log.h>
+
+#include "DeviceHalHidl.h"
+#include "EffectHalHidl.h"
+#include "StreamHalHidl.h"
+
+using ::android::hardware::audio::common::V2_0::AudioChannelMask;
+using ::android::hardware::audio::common::V2_0::AudioFormat;
+using ::android::hardware::audio::common::V2_0::ThreadInfo;
+using ::android::hardware::audio::V2_0::AudioDrain;
+using ::android::hardware::audio::V2_0::IStreamOutCallback;
+using ::android::hardware::audio::V2_0::MessageQueueFlagBits;
+using ::android::hardware::audio::V2_0::MmapBufferInfo;
+using ::android::hardware::audio::V2_0::MmapPosition;
+using ::android::hardware::audio::V2_0::ParameterValue;
+using ::android::hardware::audio::V2_0::Result;
+using ::android::hardware::audio::V2_0::TimeSpec;
+using ::android::hardware::MQDescriptorSync;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ReadCommand = ::android::hardware::audio::V2_0::IStreamIn::ReadCommand;
+
+namespace android {
+
+StreamHalHidl::StreamHalHidl(IStream *stream)
+ : ConversionHelperHidl("Stream"),
+ mStream(stream),
+ mHalThreadPriority(HAL_THREAD_PRIORITY_DEFAULT),
+ mCachedBufferSize(0){
+
+ // Instrument audio signal power logging.
+ // Note: This assumes channel mask, format, and sample rate do not change after creation.
+ if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
+ // Obtain audio properties (see StreamHalHidl::getAudioProperties() below).
+ Return<void> ret = mStream->getAudioProperties(
+ [&](uint32_t sr, AudioChannelMask m, AudioFormat f) {
+ mStreamPowerLog.init(sr,
+ static_cast<audio_channel_mask_t>(m),
+ static_cast<audio_format_t>(f));
+ });
+ }
+}
+
+StreamHalHidl::~StreamHalHidl() {
+ mStream = nullptr;
+}
+
+status_t StreamHalHidl::getSampleRate(uint32_t *rate) {
+ if (!mStream) return NO_INIT;
+ return processReturn("getSampleRate", mStream->getSampleRate(), rate);
+}
+
+status_t StreamHalHidl::getBufferSize(size_t *size) {
+ if (!mStream) return NO_INIT;
+ status_t status = processReturn("getBufferSize", mStream->getBufferSize(), size);
+ if (status == OK) {
+ mCachedBufferSize = *size;
+ }
+ return status;
+}
+
+status_t StreamHalHidl::getChannelMask(audio_channel_mask_t *mask) {
+ if (!mStream) return NO_INIT;
+ return processReturn("getChannelMask", mStream->getChannelMask(), mask);
+}
+
+status_t StreamHalHidl::getFormat(audio_format_t *format) {
+ if (!mStream) return NO_INIT;
+ return processReturn("getFormat", mStream->getFormat(), format);
+}
+
+status_t StreamHalHidl::getAudioProperties(
+ uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
+ if (!mStream) return NO_INIT;
+ Return<void> ret = mStream->getAudioProperties(
+ [&](uint32_t sr, AudioChannelMask m, AudioFormat f) {
+ *sampleRate = sr;
+ *mask = static_cast<audio_channel_mask_t>(m);
+ *format = static_cast<audio_format_t>(f);
+ });
+ return processReturn("getAudioProperties", ret);
+}
+
+status_t StreamHalHidl::setParameters(const String8& kvPairs) {
+ if (!mStream) return NO_INIT;
+ hidl_vec<ParameterValue> hidlParams;
+ status_t status = parametersFromHal(kvPairs, &hidlParams);
+ if (status != OK) return status;
+ return processReturn("setParameters", mStream->setParameters(hidlParams));
+}
+
+status_t StreamHalHidl::getParameters(const String8& keys, String8 *values) {
+ values->clear();
+ if (!mStream) return NO_INIT;
+ hidl_vec<hidl_string> hidlKeys;
+ status_t status = keysFromHal(keys, &hidlKeys);
+ if (status != OK) return status;
+ Result retval;
+ Return<void> ret = mStream->getParameters(
+ hidlKeys,
+ [&](Result r, const hidl_vec<ParameterValue>& parameters) {
+ retval = r;
+ if (retval == Result::OK) {
+ parametersToHal(parameters, values);
+ }
+ });
+ return processReturn("getParameters", ret, retval);
+}
+
+status_t StreamHalHidl::addEffect(sp<EffectHalInterface> effect) {
+ if (!mStream) return NO_INIT;
+ return processReturn("addEffect", mStream->addEffect(
+ static_cast<EffectHalHidl*>(effect.get())->effectId()));
+}
+
+status_t StreamHalHidl::removeEffect(sp<EffectHalInterface> effect) {
+ if (!mStream) return NO_INIT;
+ return processReturn("removeEffect", mStream->removeEffect(
+ static_cast<EffectHalHidl*>(effect.get())->effectId()));
+}
+
+status_t StreamHalHidl::standby() {
+ if (!mStream) return NO_INIT;
+ return processReturn("standby", mStream->standby());
+}
+
+status_t StreamHalHidl::dump(int fd) {
+ if (!mStream) return NO_INIT;
+ native_handle_t* hidlHandle = native_handle_create(1, 0);
+ hidlHandle->data[0] = fd;
+ Return<void> ret = mStream->debugDump(hidlHandle);
+ native_handle_delete(hidlHandle);
+ mStreamPowerLog.dump(fd);
+ return processReturn("dump", ret);
+}
+
+status_t StreamHalHidl::start() {
+ if (!mStream) return NO_INIT;
+ return processReturn("start", mStream->start());
+}
+
+status_t StreamHalHidl::stop() {
+ if (!mStream) return NO_INIT;
+ return processReturn("stop", mStream->stop());
+}
+
+status_t StreamHalHidl::createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info) {
+ Result retval;
+ Return<void> ret = mStream->createMmapBuffer(
+ minSizeFrames,
+ [&](Result r, const MmapBufferInfo& hidlInfo) {
+ retval = r;
+ if (retval == Result::OK) {
+ const native_handle *handle = hidlInfo.sharedMemory.handle();
+ if (handle->numFds > 0) {
+ info->shared_memory_fd = handle->data[0];
+ info->buffer_size_frames = hidlInfo.bufferSizeFrames;
+ info->burst_size_frames = hidlInfo.burstSizeFrames;
+ // info->shared_memory_address is not needed in HIDL context
+ info->shared_memory_address = NULL;
+ } else {
+ retval = Result::NOT_INITIALIZED;
+ }
+ }
+ });
+ return processReturn("createMmapBuffer", ret, retval);
+}
+
+status_t StreamHalHidl::getMmapPosition(struct audio_mmap_position *position) {
+ Result retval;
+ Return<void> ret = mStream->getMmapPosition(
+ [&](Result r, const MmapPosition& hidlPosition) {
+ retval = r;
+ if (retval == Result::OK) {
+ position->time_nanoseconds = hidlPosition.timeNanoseconds;
+ position->position_frames = hidlPosition.positionFrames;
+ }
+ });
+ return processReturn("getMmapPosition", ret, retval);
+}
+
+status_t StreamHalHidl::setHalThreadPriority(int priority) {
+ mHalThreadPriority = priority;
+ return OK;
+}
+
+status_t StreamHalHidl::getCachedBufferSize(size_t *size) {
+ if (mCachedBufferSize != 0) {
+ *size = mCachedBufferSize;
+ return OK;
+ }
+ return getBufferSize(size);
+}
+
+bool StreamHalHidl::requestHalThreadPriority(pid_t threadPid, pid_t threadId) {
+ if (mHalThreadPriority == HAL_THREAD_PRIORITY_DEFAULT) {
+ return true;
+ }
+ int err = requestPriority(
+ threadPid, threadId,
+ mHalThreadPriority, false /*isForApp*/, true /*asynchronous*/);
+ ALOGE_IF(err, "failed to set priority %d for pid %d tid %d; error %d",
+ mHalThreadPriority, threadPid, threadId, err);
+ // Audio will still work, but latency will be higher and sometimes unacceptable.
+ return err == 0;
+}
+
+namespace {
+
+/* Notes on callback ownership.
+
+This is how (Hw)Binder ownership model looks like. The server implementation
+is owned by Binder framework (via sp<>). Proxies are owned by clients.
+When the last proxy disappears, Binder framework releases the server impl.
+
+Thus, it is not needed to keep any references to StreamOutCallback (this is
+the server impl) -- it will live as long as HAL server holds a strong ref to
+IStreamOutCallback proxy. We clear that reference by calling 'clearCallback'
+from the destructor of StreamOutHalHidl.
+
+The callback only keeps a weak reference to the stream. The stream is owned
+by AudioFlinger.
+
+*/
+
+struct StreamOutCallback : public IStreamOutCallback {
+ StreamOutCallback(const wp<StreamOutHalHidl>& stream) : mStream(stream) {}
+
+ // IStreamOutCallback implementation
+ Return<void> onWriteReady() override {
+ sp<StreamOutHalHidl> stream = mStream.promote();
+ if (stream != 0) {
+ stream->onWriteReady();
+ }
+ return Void();
+ }
+
+ Return<void> onDrainReady() override {
+ sp<StreamOutHalHidl> stream = mStream.promote();
+ if (stream != 0) {
+ stream->onDrainReady();
+ }
+ return Void();
+ }
+
+ Return<void> onError() override {
+ sp<StreamOutHalHidl> stream = mStream.promote();
+ if (stream != 0) {
+ stream->onError();
+ }
+ return Void();
+ }
+
+ private:
+ wp<StreamOutHalHidl> mStream;
+};
+
+} // namespace
+
+StreamOutHalHidl::StreamOutHalHidl(const sp<IStreamOut>& stream)
+ : StreamHalHidl(stream.get()), mStream(stream), mWriterClient(0), mEfGroup(nullptr) {
+}
+
+StreamOutHalHidl::~StreamOutHalHidl() {
+ if (mStream != 0) {
+ if (mCallback.unsafe_get()) {
+ processReturn("clearCallback", mStream->clearCallback());
+ }
+ processReturn("close", mStream->close());
+ mStream.clear();
+ }
+ mCallback.clear();
+ hardware::IPCThreadState::self()->flushCommands();
+ if (mEfGroup) {
+ EventFlag::deleteEventFlag(&mEfGroup);
+ }
+}
+
+status_t StreamOutHalHidl::getFrameSize(size_t *size) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("getFrameSize", mStream->getFrameSize(), size);
+}
+
+status_t StreamOutHalHidl::getLatency(uint32_t *latency) {
+ if (mStream == 0) return NO_INIT;
+ if (mWriterClient == gettid() && mCommandMQ) {
+ return callWriterThread(
+ WriteCommand::GET_LATENCY, "getLatency", nullptr, 0,
+ [&](const WriteStatus& writeStatus) {
+ *latency = writeStatus.reply.latencyMs;
+ });
+ } else {
+ return processReturn("getLatency", mStream->getLatency(), latency);
+ }
+}
+
+status_t StreamOutHalHidl::setVolume(float left, float right) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("setVolume", mStream->setVolume(left, right));
+}
+
+status_t StreamOutHalHidl::write(const void *buffer, size_t bytes, size_t *written) {
+ if (mStream == 0) return NO_INIT;
+ *written = 0;
+
+ if (bytes == 0 && !mDataMQ) {
+ // Can't determine the size for the MQ buffer. Wait for a non-empty write request.
+ ALOGW_IF(mCallback.unsafe_get(), "First call to async write with 0 bytes");
+ return OK;
+ }
+
+ status_t status;
+ if (!mDataMQ) {
+ // In case if playback starts close to the end of a compressed track, the bytes
+ // that need to be written is less than the actual buffer size. Need to use
+ // full buffer size for the MQ since otherwise after seeking back to the middle
+ // data will be truncated.
+ size_t bufferSize;
+ if ((status = getCachedBufferSize(&bufferSize)) != OK) {
+ return status;
+ }
+ if (bytes > bufferSize) bufferSize = bytes;
+ if ((status = prepareForWriting(bufferSize)) != OK) {
+ return status;
+ }
+ }
+
+ status = callWriterThread(
+ WriteCommand::WRITE, "write", static_cast<const uint8_t*>(buffer), bytes,
+ [&] (const WriteStatus& writeStatus) {
+ *written = writeStatus.reply.written;
+ // Diagnostics of the cause of b/35813113.
+ ALOGE_IF(*written > bytes,
+ "hal reports more bytes written than asked for: %lld > %lld",
+ (long long)*written, (long long)bytes);
+ });
+ mStreamPowerLog.log(buffer, *written);
+ return status;
+}
+
+status_t StreamOutHalHidl::callWriterThread(
+ WriteCommand cmd, const char* cmdName,
+ const uint8_t* data, size_t dataSize, StreamOutHalHidl::WriterCallback callback) {
+ if (!mCommandMQ->write(&cmd)) {
+ ALOGE("command message queue write failed for \"%s\"", cmdName);
+ return -EAGAIN;
+ }
+ if (data != nullptr) {
+ size_t availableToWrite = mDataMQ->availableToWrite();
+ if (dataSize > availableToWrite) {
+ ALOGW("truncating write data from %lld to %lld due to insufficient data queue space",
+ (long long)dataSize, (long long)availableToWrite);
+ dataSize = availableToWrite;
+ }
+ if (!mDataMQ->write(data, dataSize)) {
+ ALOGE("data message queue write failed for \"%s\"", cmdName);
+ }
+ }
+ mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY));
+
+ // TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
+ uint32_t efState = 0;
+retry:
+ status_t ret = mEfGroup->wait(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL), &efState);
+ if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL)) {
+ WriteStatus writeStatus;
+ writeStatus.retval = Result::NOT_INITIALIZED;
+ if (!mStatusMQ->read(&writeStatus)) {
+ ALOGE("status message read failed for \"%s\"", cmdName);
+ }
+ if (writeStatus.retval == Result::OK) {
+ ret = OK;
+ callback(writeStatus);
+ } else {
+ ret = processReturn(cmdName, writeStatus.retval);
+ }
+ return ret;
+ }
+ if (ret == -EAGAIN || ret == -EINTR) {
+ // Spurious wakeup. This normally retries no more than once.
+ goto retry;
+ }
+ return ret;
+}
+
+status_t StreamOutHalHidl::prepareForWriting(size_t bufferSize) {
+ std::unique_ptr<CommandMQ> tempCommandMQ;
+ std::unique_ptr<DataMQ> tempDataMQ;
+ std::unique_ptr<StatusMQ> tempStatusMQ;
+ Result retval;
+ pid_t halThreadPid, halThreadTid;
+ Return<void> ret = mStream->prepareForWriting(
+ 1, bufferSize,
+ [&](Result r,
+ const CommandMQ::Descriptor& commandMQ,
+ const DataMQ::Descriptor& dataMQ,
+ const StatusMQ::Descriptor& statusMQ,
+ const ThreadInfo& halThreadInfo) {
+ retval = r;
+ if (retval == Result::OK) {
+ tempCommandMQ.reset(new CommandMQ(commandMQ));
+ tempDataMQ.reset(new DataMQ(dataMQ));
+ tempStatusMQ.reset(new StatusMQ(statusMQ));
+ if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
+ EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
+ }
+ halThreadPid = halThreadInfo.pid;
+ halThreadTid = halThreadInfo.tid;
+ }
+ });
+ if (!ret.isOk() || retval != Result::OK) {
+ return processReturn("prepareForWriting", ret, retval);
+ }
+ if (!tempCommandMQ || !tempCommandMQ->isValid() ||
+ !tempDataMQ || !tempDataMQ->isValid() ||
+ !tempStatusMQ || !tempStatusMQ->isValid() ||
+ !mEfGroup) {
+ ALOGE_IF(!tempCommandMQ, "Failed to obtain command message queue for writing");
+ ALOGE_IF(tempCommandMQ && !tempCommandMQ->isValid(),
+ "Command message queue for writing is invalid");
+ ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for writing");
+ ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for writing is invalid");
+ ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for writing");
+ ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
+ "Status message queue for writing is invalid");
+ ALOGE_IF(!mEfGroup, "Event flag creation for writing failed");
+ return NO_INIT;
+ }
+ requestHalThreadPriority(halThreadPid, halThreadTid);
+
+ mCommandMQ = std::move(tempCommandMQ);
+ mDataMQ = std::move(tempDataMQ);
+ mStatusMQ = std::move(tempStatusMQ);
+ mWriterClient = gettid();
+ return OK;
+}
+
+status_t StreamOutHalHidl::getRenderPosition(uint32_t *dspFrames) {
+ if (mStream == 0) return NO_INIT;
+ Result retval;
+ Return<void> ret = mStream->getRenderPosition(
+ [&](Result r, uint32_t d) {
+ retval = r;
+ if (retval == Result::OK) {
+ *dspFrames = d;
+ }
+ });
+ return processReturn("getRenderPosition", ret, retval);
+}
+
+status_t StreamOutHalHidl::getNextWriteTimestamp(int64_t *timestamp) {
+ if (mStream == 0) return NO_INIT;
+ Result retval;
+ Return<void> ret = mStream->getNextWriteTimestamp(
+ [&](Result r, int64_t t) {
+ retval = r;
+ if (retval == Result::OK) {
+ *timestamp = t;
+ }
+ });
+ return processReturn("getRenderPosition", ret, retval);
+}
+
+status_t StreamOutHalHidl::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
+ if (mStream == 0) return NO_INIT;
+ status_t status = processReturn(
+ "setCallback", mStream->setCallback(new StreamOutCallback(this)));
+ if (status == OK) {
+ mCallback = callback;
+ }
+ return status;
+}
+
+status_t StreamOutHalHidl::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
+ if (mStream == 0) return NO_INIT;
+ Return<void> ret = mStream->supportsPauseAndResume(
+ [&](bool p, bool r) {
+ *supportsPause = p;
+ *supportsResume = r;
+ });
+ return processReturn("supportsPauseAndResume", ret);
+}
+
+status_t StreamOutHalHidl::pause() {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("pause", mStream->pause());
+}
+
+status_t StreamOutHalHidl::resume() {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("pause", mStream->resume());
+}
+
+status_t StreamOutHalHidl::supportsDrain(bool *supportsDrain) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("supportsDrain", mStream->supportsDrain(), supportsDrain);
+}
+
+status_t StreamOutHalHidl::drain(bool earlyNotify) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn(
+ "drain", mStream->drain(earlyNotify ? AudioDrain::EARLY_NOTIFY : AudioDrain::ALL));
+}
+
+status_t StreamOutHalHidl::flush() {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("pause", mStream->flush());
+}
+
+status_t StreamOutHalHidl::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
+ if (mStream == 0) return NO_INIT;
+ if (mWriterClient == gettid() && mCommandMQ) {
+ return callWriterThread(
+ WriteCommand::GET_PRESENTATION_POSITION, "getPresentationPosition", nullptr, 0,
+ [&](const WriteStatus& writeStatus) {
+ *frames = writeStatus.reply.presentationPosition.frames;
+ timestamp->tv_sec = writeStatus.reply.presentationPosition.timeStamp.tvSec;
+ timestamp->tv_nsec = writeStatus.reply.presentationPosition.timeStamp.tvNSec;
+ });
+ } else {
+ Result retval;
+ Return<void> ret = mStream->getPresentationPosition(
+ [&](Result r, uint64_t hidlFrames, const TimeSpec& hidlTimeStamp) {
+ retval = r;
+ if (retval == Result::OK) {
+ *frames = hidlFrames;
+ timestamp->tv_sec = hidlTimeStamp.tvSec;
+ timestamp->tv_nsec = hidlTimeStamp.tvNSec;
+ }
+ });
+ return processReturn("getPresentationPosition", ret, retval);
+ }
+}
+
+status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& /* sourceMetadata */) {
+ // Audio HAL V2.0 does not support propagating source metadata
+ return INVALID_OPERATION;
+}
+
+void StreamOutHalHidl::onWriteReady() {
+ sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
+ if (callback == 0) return;
+ ALOGV("asyncCallback onWriteReady");
+ callback->onWriteReady();
+}
+
+void StreamOutHalHidl::onDrainReady() {
+ sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
+ if (callback == 0) return;
+ ALOGV("asyncCallback onDrainReady");
+ callback->onDrainReady();
+}
+
+void StreamOutHalHidl::onError() {
+ sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
+ if (callback == 0) return;
+ ALOGV("asyncCallback onError");
+ callback->onError();
+}
+
+
+StreamInHalHidl::StreamInHalHidl(const sp<IStreamIn>& stream)
+ : StreamHalHidl(stream.get()), mStream(stream), mReaderClient(0), mEfGroup(nullptr) {
+}
+
+StreamInHalHidl::~StreamInHalHidl() {
+ if (mStream != 0) {
+ processReturn("close", mStream->close());
+ mStream.clear();
+ hardware::IPCThreadState::self()->flushCommands();
+ }
+ if (mEfGroup) {
+ EventFlag::deleteEventFlag(&mEfGroup);
+ }
+}
+
+status_t StreamInHalHidl::getFrameSize(size_t *size) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("getFrameSize", mStream->getFrameSize(), size);
+}
+
+status_t StreamInHalHidl::setGain(float gain) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("setGain", mStream->setGain(gain));
+}
+
+status_t StreamInHalHidl::read(void *buffer, size_t bytes, size_t *read) {
+ if (mStream == 0) return NO_INIT;
+ *read = 0;
+
+ if (bytes == 0 && !mDataMQ) {
+ // Can't determine the size for the MQ buffer. Wait for a non-empty read request.
+ return OK;
+ }
+
+ status_t status;
+ if (!mDataMQ && (status = prepareForReading(bytes)) != OK) {
+ return status;
+ }
+
+ ReadParameters params;
+ params.command = ReadCommand::READ;
+ params.params.read = bytes;
+ status = callReaderThread(params, "read",
+ [&](const ReadStatus& readStatus) {
+ const size_t availToRead = mDataMQ->availableToRead();
+ if (!mDataMQ->read(static_cast<uint8_t*>(buffer), std::min(bytes, availToRead))) {
+ ALOGE("data message queue read failed for \"read\"");
+ }
+ ALOGW_IF(availToRead != readStatus.reply.read,
+ "HAL read report inconsistent: mq = %d, status = %d",
+ (int32_t)availToRead, (int32_t)readStatus.reply.read);
+ *read = readStatus.reply.read;
+ });
+ mStreamPowerLog.log(buffer, *read);
+ return status;
+}
+
+status_t StreamInHalHidl::callReaderThread(
+ const ReadParameters& params, const char* cmdName,
+ StreamInHalHidl::ReaderCallback callback) {
+ if (!mCommandMQ->write(¶ms)) {
+ ALOGW("command message queue write failed");
+ return -EAGAIN;
+ }
+ mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL));
+
+ // TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
+ uint32_t efState = 0;
+retry:
+ status_t ret = mEfGroup->wait(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY), &efState);
+ if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY)) {
+ ReadStatus readStatus;
+ readStatus.retval = Result::NOT_INITIALIZED;
+ if (!mStatusMQ->read(&readStatus)) {
+ ALOGE("status message read failed for \"%s\"", cmdName);
+ }
+ if (readStatus.retval == Result::OK) {
+ ret = OK;
+ callback(readStatus);
+ } else {
+ ret = processReturn(cmdName, readStatus.retval);
+ }
+ return ret;
+ }
+ if (ret == -EAGAIN || ret == -EINTR) {
+ // Spurious wakeup. This normally retries no more than once.
+ goto retry;
+ }
+ return ret;
+}
+
+status_t StreamInHalHidl::prepareForReading(size_t bufferSize) {
+ std::unique_ptr<CommandMQ> tempCommandMQ;
+ std::unique_ptr<DataMQ> tempDataMQ;
+ std::unique_ptr<StatusMQ> tempStatusMQ;
+ Result retval;
+ pid_t halThreadPid, halThreadTid;
+ Return<void> ret = mStream->prepareForReading(
+ 1, bufferSize,
+ [&](Result r,
+ const CommandMQ::Descriptor& commandMQ,
+ const DataMQ::Descriptor& dataMQ,
+ const StatusMQ::Descriptor& statusMQ,
+ const ThreadInfo& halThreadInfo) {
+ retval = r;
+ if (retval == Result::OK) {
+ tempCommandMQ.reset(new CommandMQ(commandMQ));
+ tempDataMQ.reset(new DataMQ(dataMQ));
+ tempStatusMQ.reset(new StatusMQ(statusMQ));
+ if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
+ EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
+ }
+ halThreadPid = halThreadInfo.pid;
+ halThreadTid = halThreadInfo.tid;
+ }
+ });
+ if (!ret.isOk() || retval != Result::OK) {
+ return processReturn("prepareForReading", ret, retval);
+ }
+ if (!tempCommandMQ || !tempCommandMQ->isValid() ||
+ !tempDataMQ || !tempDataMQ->isValid() ||
+ !tempStatusMQ || !tempStatusMQ->isValid() ||
+ !mEfGroup) {
+ ALOGE_IF(!tempCommandMQ, "Failed to obtain command message queue for writing");
+ ALOGE_IF(tempCommandMQ && !tempCommandMQ->isValid(),
+ "Command message queue for writing is invalid");
+ ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for reading");
+ ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for reading is invalid");
+ ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for reading");
+ ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
+ "Status message queue for reading is invalid");
+ ALOGE_IF(!mEfGroup, "Event flag creation for reading failed");
+ return NO_INIT;
+ }
+ requestHalThreadPriority(halThreadPid, halThreadTid);
+
+ mCommandMQ = std::move(tempCommandMQ);
+ mDataMQ = std::move(tempDataMQ);
+ mStatusMQ = std::move(tempStatusMQ);
+ mReaderClient = gettid();
+ return OK;
+}
+
+status_t StreamInHalHidl::getInputFramesLost(uint32_t *framesLost) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("getInputFramesLost", mStream->getInputFramesLost(), framesLost);
+}
+
+status_t StreamInHalHidl::getCapturePosition(int64_t *frames, int64_t *time) {
+ if (mStream == 0) return NO_INIT;
+ if (mReaderClient == gettid() && mCommandMQ) {
+ ReadParameters params;
+ params.command = ReadCommand::GET_CAPTURE_POSITION;
+ return callReaderThread(params, "getCapturePosition",
+ [&](const ReadStatus& readStatus) {
+ *frames = readStatus.reply.capturePosition.frames;
+ *time = readStatus.reply.capturePosition.time;
+ });
+ } else {
+ Result retval;
+ Return<void> ret = mStream->getCapturePosition(
+ [&](Result r, uint64_t hidlFrames, uint64_t hidlTime) {
+ retval = r;
+ if (retval == Result::OK) {
+ *frames = hidlFrames;
+ *time = hidlTime;
+ }
+ });
+ return processReturn("getCapturePosition", ret, retval);
+ }
+}
+
+status_t StreamInHalHidl::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo> *microphones __unused) {
+ if (mStream == 0) return NO_INIT;
+ return INVALID_OPERATION;
+}
+
+status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& /* sinkMetadata */) {
+ // Audio HAL V2.0 does not support propagating sink metadata
+ return INVALID_OPERATION;
+}
+
+} // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalHidl.h b/media/libaudiohal/2.0/StreamHalHidl.h
new file mode 100644
index 0000000..ebad8ae
--- /dev/null
+++ b/media/libaudiohal/2.0/StreamHalHidl.h
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_STREAM_HAL_HIDL_H
+#define ANDROID_HARDWARE_STREAM_HAL_HIDL_H
+
+#include <atomic>
+
+#include <android/hardware/audio/2.0/IStream.h>
+#include <android/hardware/audio/2.0/IStreamIn.h>
+#include <android/hardware/audio/2.0/IStreamOut.h>
+#include <fmq/EventFlag.h>
+#include <fmq/MessageQueue.h>
+#include <media/audiohal/StreamHalInterface.h>
+
+#include "ConversionHelperHidl.h"
+#include "StreamPowerLog.h"
+
+using ::android::hardware::audio::V2_0::IStream;
+using ::android::hardware::audio::V2_0::IStreamIn;
+using ::android::hardware::audio::V2_0::IStreamOut;
+using ::android::hardware::EventFlag;
+using ::android::hardware::MessageQueue;
+using ::android::hardware::Return;
+using ReadParameters = ::android::hardware::audio::V2_0::IStreamIn::ReadParameters;
+using ReadStatus = ::android::hardware::audio::V2_0::IStreamIn::ReadStatus;
+using WriteCommand = ::android::hardware::audio::V2_0::IStreamOut::WriteCommand;
+using WriteStatus = ::android::hardware::audio::V2_0::IStreamOut::WriteStatus;
+
+namespace android {
+
+class DeviceHalHidl;
+
+class StreamHalHidl : public virtual StreamHalInterface, public ConversionHelperHidl
+{
+ public:
+ // Return the sampling rate in Hz - eg. 44100.
+ virtual status_t getSampleRate(uint32_t *rate);
+
+ // Return size of input/output buffer in bytes for this stream - eg. 4800.
+ virtual status_t getBufferSize(size_t *size);
+
+ // Return the channel mask.
+ virtual status_t getChannelMask(audio_channel_mask_t *mask);
+
+ // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
+ virtual status_t getFormat(audio_format_t *format);
+
+ // Convenience method.
+ virtual status_t getAudioProperties(
+ uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
+
+ // Set audio stream parameters.
+ virtual status_t setParameters(const String8& kvPairs);
+
+ // Get audio stream parameters.
+ virtual status_t getParameters(const String8& keys, String8 *values);
+
+ // Add or remove the effect on the stream.
+ virtual status_t addEffect(sp<EffectHalInterface> effect);
+ virtual status_t removeEffect(sp<EffectHalInterface> effect);
+
+ // Put the audio hardware input/output into standby mode.
+ virtual status_t standby();
+
+ virtual status_t dump(int fd);
+
+ // Start a stream operating in mmap mode.
+ virtual status_t start();
+
+ // Stop a stream operating in mmap mode.
+ virtual status_t stop();
+
+ // Retrieve information on the data buffer in mmap mode.
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info);
+
+ // Get current read/write position in the mmap buffer
+ virtual status_t getMmapPosition(struct audio_mmap_position *position);
+
+ // Set the priority of the thread that interacts with the HAL
+ // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
+ virtual status_t setHalThreadPriority(int priority);
+
+ protected:
+ // Subclasses can not be constructed directly by clients.
+ explicit StreamHalHidl(IStream *stream);
+
+ // The destructor automatically closes the stream.
+ virtual ~StreamHalHidl();
+
+ status_t getCachedBufferSize(size_t *size);
+
+ bool requestHalThreadPriority(pid_t threadPid, pid_t threadId);
+
+ // mStreamPowerLog is used for audio signal power logging.
+ StreamPowerLog mStreamPowerLog;
+
+ private:
+ const int HAL_THREAD_PRIORITY_DEFAULT = -1;
+ IStream *mStream;
+ int mHalThreadPriority;
+ size_t mCachedBufferSize;
+};
+
+class StreamOutHalHidl : public StreamOutHalInterface, public StreamHalHidl {
+ public:
+ // Return the frame size (number of bytes per sample) of a stream.
+ virtual status_t getFrameSize(size_t *size);
+
+ // Return the audio hardware driver estimated latency in milliseconds.
+ virtual status_t getLatency(uint32_t *latency);
+
+ // Use this method in situations where audio mixing is done in the hardware.
+ virtual status_t setVolume(float left, float right);
+
+ // Write audio buffer to driver.
+ virtual status_t write(const void *buffer, size_t bytes, size_t *written);
+
+ // Return the number of audio frames written by the audio dsp to DAC since
+ // the output has exited standby.
+ virtual status_t getRenderPosition(uint32_t *dspFrames);
+
+ // Get the local time at which the next write to the audio driver will be presented.
+ virtual status_t getNextWriteTimestamp(int64_t *timestamp);
+
+ // Set the callback for notifying completion of non-blocking write and drain.
+ virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback);
+
+ // Returns whether pause and resume operations are supported.
+ virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume);
+
+ // Notifies to the audio driver to resume playback following a pause.
+ virtual status_t pause();
+
+ // Notifies to the audio driver to resume playback following a pause.
+ virtual status_t resume();
+
+ // Returns whether drain operation is supported.
+ virtual status_t supportsDrain(bool *supportsDrain);
+
+ // Requests notification when data buffered by the driver/hardware has been played.
+ virtual status_t drain(bool earlyNotify);
+
+ // Notifies to the audio driver to flush the queued data.
+ virtual status_t flush();
+
+ // Return a recent count of the number of audio frames presented to an external observer.
+ virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
+
+ // Called when the metadata of the stream's source has been changed.
+ status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
+
+ // Methods used by StreamOutCallback (HIDL).
+ void onWriteReady();
+ void onDrainReady();
+ void onError();
+
+ private:
+ friend class DeviceHalHidl;
+ typedef MessageQueue<WriteCommand, hardware::kSynchronizedReadWrite> CommandMQ;
+ typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
+ typedef MessageQueue<WriteStatus, hardware::kSynchronizedReadWrite> StatusMQ;
+
+ wp<StreamOutHalInterfaceCallback> mCallback;
+ sp<IStreamOut> mStream;
+ std::unique_ptr<CommandMQ> mCommandMQ;
+ std::unique_ptr<DataMQ> mDataMQ;
+ std::unique_ptr<StatusMQ> mStatusMQ;
+ std::atomic<pid_t> mWriterClient;
+ EventFlag* mEfGroup;
+
+ // Can not be constructed directly by clients.
+ StreamOutHalHidl(const sp<IStreamOut>& stream);
+
+ virtual ~StreamOutHalHidl();
+
+ using WriterCallback = std::function<void(const WriteStatus& writeStatus)>;
+ status_t callWriterThread(
+ WriteCommand cmd, const char* cmdName,
+ const uint8_t* data, size_t dataSize, WriterCallback callback);
+ status_t prepareForWriting(size_t bufferSize);
+};
+
+class StreamInHalHidl : public StreamInHalInterface, public StreamHalHidl {
+ public:
+ // Return the frame size (number of bytes per sample) of a stream.
+ virtual status_t getFrameSize(size_t *size);
+
+ // Set the input gain for the audio driver.
+ virtual status_t setGain(float gain);
+
+ // Read audio buffer in from driver.
+ virtual status_t read(void *buffer, size_t bytes, size_t *read);
+
+ // Return the amount of input frames lost in the audio driver.
+ virtual status_t getInputFramesLost(uint32_t *framesLost);
+
+ // Return a recent count of the number of audio frames received and
+ // the clock time associated with that frame count.
+ virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
+
+ // Get active microphones
+ virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+ // Called when the metadata of the stream's sink has been changed.
+ status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
+
+ private:
+ friend class DeviceHalHidl;
+ typedef MessageQueue<ReadParameters, hardware::kSynchronizedReadWrite> CommandMQ;
+ typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
+ typedef MessageQueue<ReadStatus, hardware::kSynchronizedReadWrite> StatusMQ;
+
+ sp<IStreamIn> mStream;
+ std::unique_ptr<CommandMQ> mCommandMQ;
+ std::unique_ptr<DataMQ> mDataMQ;
+ std::unique_ptr<StatusMQ> mStatusMQ;
+ std::atomic<pid_t> mReaderClient;
+ EventFlag* mEfGroup;
+
+ // Can not be constructed directly by clients.
+ StreamInHalHidl(const sp<IStreamIn>& stream);
+
+ virtual ~StreamInHalHidl();
+
+ using ReaderCallback = std::function<void(const ReadStatus& readStatus)>;
+ status_t callReaderThread(
+ const ReadParameters& params, const char* cmdName, ReaderCallback callback);
+ status_t prepareForReading(size_t bufferSize);
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_STREAM_HAL_HIDL_H
diff --git a/media/libaudiohal/2.0/StreamHalLocal.cpp b/media/libaudiohal/2.0/StreamHalLocal.cpp
new file mode 100644
index 0000000..98107e5
--- /dev/null
+++ b/media/libaudiohal/2.0/StreamHalLocal.cpp
@@ -0,0 +1,347 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "StreamHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <hardware/audio.h>
+#include <utils/Log.h>
+
+#include "DeviceHalLocal.h"
+#include "StreamHalLocal.h"
+
+namespace android {
+
+StreamHalLocal::StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device)
+ : mDevice(device),
+ mStream(stream) {
+ // Instrument audio signal power logging.
+ // Note: This assumes channel mask, format, and sample rate do not change after creation.
+ if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
+ mStreamPowerLog.init(mStream->get_sample_rate(mStream),
+ mStream->get_channels(mStream),
+ mStream->get_format(mStream));
+ }
+}
+
+StreamHalLocal::~StreamHalLocal() {
+ mStream = 0;
+ mDevice.clear();
+}
+
+status_t StreamHalLocal::getSampleRate(uint32_t *rate) {
+ *rate = mStream->get_sample_rate(mStream);
+ return OK;
+}
+
+status_t StreamHalLocal::getBufferSize(size_t *size) {
+ *size = mStream->get_buffer_size(mStream);
+ return OK;
+}
+
+status_t StreamHalLocal::getChannelMask(audio_channel_mask_t *mask) {
+ *mask = mStream->get_channels(mStream);
+ return OK;
+}
+
+status_t StreamHalLocal::getFormat(audio_format_t *format) {
+ *format = mStream->get_format(mStream);
+ return OK;
+}
+
+status_t StreamHalLocal::getAudioProperties(
+ uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
+ *sampleRate = mStream->get_sample_rate(mStream);
+ *mask = mStream->get_channels(mStream);
+ *format = mStream->get_format(mStream);
+ return OK;
+}
+
+status_t StreamHalLocal::setParameters(const String8& kvPairs) {
+ return mStream->set_parameters(mStream, kvPairs.string());
+}
+
+status_t StreamHalLocal::getParameters(const String8& keys, String8 *values) {
+ char *halValues = mStream->get_parameters(mStream, keys.string());
+ if (halValues != NULL) {
+ values->setTo(halValues);
+ free(halValues);
+ } else {
+ values->clear();
+ }
+ return OK;
+}
+
+status_t StreamHalLocal::addEffect(sp<EffectHalInterface>) {
+ LOG_ALWAYS_FATAL("Local streams can not have effects");
+ return INVALID_OPERATION;
+}
+
+status_t StreamHalLocal::removeEffect(sp<EffectHalInterface>) {
+ LOG_ALWAYS_FATAL("Local streams can not have effects");
+ return INVALID_OPERATION;
+}
+
+status_t StreamHalLocal::standby() {
+ return mStream->standby(mStream);
+}
+
+status_t StreamHalLocal::dump(int fd) {
+ status_t status = mStream->dump(mStream, fd);
+ mStreamPowerLog.dump(fd);
+ return status;
+}
+
+status_t StreamHalLocal::setHalThreadPriority(int) {
+ // Don't need to do anything as local hal is executed by audioflinger directly
+ // on the same thread.
+ return OK;
+}
+
+StreamOutHalLocal::StreamOutHalLocal(audio_stream_out_t *stream, sp<DeviceHalLocal> device)
+ : StreamHalLocal(&stream->common, device), mStream(stream) {
+}
+
+StreamOutHalLocal::~StreamOutHalLocal() {
+ mCallback.clear();
+ mDevice->closeOutputStream(mStream);
+ mStream = 0;
+}
+
+status_t StreamOutHalLocal::getFrameSize(size_t *size) {
+ *size = audio_stream_out_frame_size(mStream);
+ return OK;
+}
+
+status_t StreamOutHalLocal::getLatency(uint32_t *latency) {
+ *latency = mStream->get_latency(mStream);
+ return OK;
+}
+
+status_t StreamOutHalLocal::setVolume(float left, float right) {
+ if (mStream->set_volume == NULL) return INVALID_OPERATION;
+ return mStream->set_volume(mStream, left, right);
+}
+
+status_t StreamOutHalLocal::write(const void *buffer, size_t bytes, size_t *written) {
+ ssize_t writeResult = mStream->write(mStream, buffer, bytes);
+ if (writeResult > 0) {
+ *written = writeResult;
+ mStreamPowerLog.log(buffer, *written);
+ return OK;
+ } else {
+ *written = 0;
+ return writeResult;
+ }
+}
+
+status_t StreamOutHalLocal::getRenderPosition(uint32_t *dspFrames) {
+ return mStream->get_render_position(mStream, dspFrames);
+}
+
+status_t StreamOutHalLocal::getNextWriteTimestamp(int64_t *timestamp) {
+ if (mStream->get_next_write_timestamp == NULL) return INVALID_OPERATION;
+ return mStream->get_next_write_timestamp(mStream, timestamp);
+}
+
+status_t StreamOutHalLocal::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
+ if (mStream->set_callback == NULL) return INVALID_OPERATION;
+ status_t result = mStream->set_callback(mStream, StreamOutHalLocal::asyncCallback, this);
+ if (result == OK) {
+ mCallback = callback;
+ }
+ return result;
+}
+
+// static
+int StreamOutHalLocal::asyncCallback(stream_callback_event_t event, void*, void *cookie) {
+ // We act as if we gave a wp<StreamOutHalLocal> to HAL. This way we should handle
+ // correctly the case when the callback is invoked while StreamOutHalLocal's destructor is
+ // already running, because the destructor is invoked after the refcount has been atomically
+ // decremented.
+ wp<StreamOutHalLocal> weakSelf(static_cast<StreamOutHalLocal*>(cookie));
+ sp<StreamOutHalLocal> self = weakSelf.promote();
+ if (self == 0) return 0;
+ sp<StreamOutHalInterfaceCallback> callback = self->mCallback.promote();
+ if (callback == 0) return 0;
+ ALOGV("asyncCallback() event %d", event);
+ switch (event) {
+ case STREAM_CBK_EVENT_WRITE_READY:
+ callback->onWriteReady();
+ break;
+ case STREAM_CBK_EVENT_DRAIN_READY:
+ callback->onDrainReady();
+ break;
+ case STREAM_CBK_EVENT_ERROR:
+ callback->onError();
+ break;
+ default:
+ ALOGW("asyncCallback() unknown event %d", event);
+ break;
+ }
+ return 0;
+}
+
+status_t StreamOutHalLocal::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
+ *supportsPause = mStream->pause != NULL;
+ *supportsResume = mStream->resume != NULL;
+ return OK;
+}
+
+status_t StreamOutHalLocal::pause() {
+ if (mStream->pause == NULL) return INVALID_OPERATION;
+ return mStream->pause(mStream);
+}
+
+status_t StreamOutHalLocal::resume() {
+ if (mStream->resume == NULL) return INVALID_OPERATION;
+ return mStream->resume(mStream);
+}
+
+status_t StreamOutHalLocal::supportsDrain(bool *supportsDrain) {
+ *supportsDrain = mStream->drain != NULL;
+ return OK;
+}
+
+status_t StreamOutHalLocal::drain(bool earlyNotify) {
+ if (mStream->drain == NULL) return INVALID_OPERATION;
+ return mStream->drain(mStream, earlyNotify ? AUDIO_DRAIN_EARLY_NOTIFY : AUDIO_DRAIN_ALL);
+}
+
+status_t StreamOutHalLocal::flush() {
+ if (mStream->flush == NULL) return INVALID_OPERATION;
+ return mStream->flush(mStream);
+}
+
+status_t StreamOutHalLocal::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
+ if (mStream->get_presentation_position == NULL) return INVALID_OPERATION;
+ return mStream->get_presentation_position(mStream, frames, timestamp);
+}
+
+status_t StreamOutHalLocal::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
+ if (mStream->update_source_metadata == nullptr) {
+ return INVALID_OPERATION;
+ }
+ const source_metadata_t metadata {
+ .track_count = sourceMetadata.tracks.size(),
+ // const cast is fine as it is in a const structure
+ .tracks = const_cast<playback_track_metadata*>(sourceMetadata.tracks.data()),
+ };
+ mStream->update_source_metadata(mStream, &metadata);
+ return OK;
+}
+
+status_t StreamOutHalLocal::start() {
+ if (mStream->start == NULL) return INVALID_OPERATION;
+ return mStream->start(mStream);
+}
+
+status_t StreamOutHalLocal::stop() {
+ if (mStream->stop == NULL) return INVALID_OPERATION;
+ return mStream->stop(mStream);
+}
+
+status_t StreamOutHalLocal::createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info) {
+ if (mStream->create_mmap_buffer == NULL) return INVALID_OPERATION;
+ return mStream->create_mmap_buffer(mStream, minSizeFrames, info);
+}
+
+status_t StreamOutHalLocal::getMmapPosition(struct audio_mmap_position *position) {
+ if (mStream->get_mmap_position == NULL) return INVALID_OPERATION;
+ return mStream->get_mmap_position(mStream, position);
+}
+
+StreamInHalLocal::StreamInHalLocal(audio_stream_in_t *stream, sp<DeviceHalLocal> device)
+ : StreamHalLocal(&stream->common, device), mStream(stream) {
+}
+
+StreamInHalLocal::~StreamInHalLocal() {
+ mDevice->closeInputStream(mStream);
+ mStream = 0;
+}
+
+status_t StreamInHalLocal::getFrameSize(size_t *size) {
+ *size = audio_stream_in_frame_size(mStream);
+ return OK;
+}
+
+status_t StreamInHalLocal::setGain(float gain) {
+ return mStream->set_gain(mStream, gain);
+}
+
+status_t StreamInHalLocal::read(void *buffer, size_t bytes, size_t *read) {
+ ssize_t readResult = mStream->read(mStream, buffer, bytes);
+ if (readResult > 0) {
+ *read = readResult;
+ mStreamPowerLog.log( buffer, *read);
+ return OK;
+ } else {
+ *read = 0;
+ return readResult;
+ }
+}
+
+status_t StreamInHalLocal::getInputFramesLost(uint32_t *framesLost) {
+ *framesLost = mStream->get_input_frames_lost(mStream);
+ return OK;
+}
+
+status_t StreamInHalLocal::getCapturePosition(int64_t *frames, int64_t *time) {
+ if (mStream->get_capture_position == NULL) return INVALID_OPERATION;
+ return mStream->get_capture_position(mStream, frames, time);
+}
+
+status_t StreamInHalLocal::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
+ if (mStream->update_sink_metadata == nullptr) {
+ return INVALID_OPERATION;
+ }
+ const sink_metadata_t metadata {
+ .track_count = sinkMetadata.tracks.size(),
+ // const cast is fine as it is in a const structure
+ .tracks = const_cast<record_track_metadata*>(sinkMetadata.tracks.data()),
+ };
+ mStream->update_sink_metadata(mStream, &metadata);
+ return OK;
+}
+
+status_t StreamInHalLocal::start() {
+ if (mStream->start == NULL) return INVALID_OPERATION;
+ return mStream->start(mStream);
+}
+
+status_t StreamInHalLocal::stop() {
+ if (mStream->stop == NULL) return INVALID_OPERATION;
+ return mStream->stop(mStream);
+}
+
+status_t StreamInHalLocal::createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info) {
+ if (mStream->create_mmap_buffer == NULL) return INVALID_OPERATION;
+ return mStream->create_mmap_buffer(mStream, minSizeFrames, info);
+}
+
+status_t StreamInHalLocal::getMmapPosition(struct audio_mmap_position *position) {
+ if (mStream->get_mmap_position == NULL) return INVALID_OPERATION;
+ return mStream->get_mmap_position(mStream, position);
+}
+
+status_t StreamInHalLocal::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo> *microphones __unused) {
+ return INVALID_OPERATION;
+}
+
+} // namespace android
diff --git a/media/libaudiohal/2.0/StreamHalLocal.h b/media/libaudiohal/2.0/StreamHalLocal.h
new file mode 100644
index 0000000..cda8d0c
--- /dev/null
+++ b/media/libaudiohal/2.0/StreamHalLocal.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
+#define ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
+
+#include <media/audiohal/StreamHalInterface.h>
+#include "StreamPowerLog.h"
+
+namespace android {
+
+class DeviceHalLocal;
+
+class StreamHalLocal : public virtual StreamHalInterface
+{
+ public:
+ // Return the sampling rate in Hz - eg. 44100.
+ virtual status_t getSampleRate(uint32_t *rate);
+
+ // Return size of input/output buffer in bytes for this stream - eg. 4800.
+ virtual status_t getBufferSize(size_t *size);
+
+ // Return the channel mask.
+ virtual status_t getChannelMask(audio_channel_mask_t *mask);
+
+ // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
+ virtual status_t getFormat(audio_format_t *format);
+
+ // Convenience method.
+ virtual status_t getAudioProperties(
+ uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
+
+ // Set audio stream parameters.
+ virtual status_t setParameters(const String8& kvPairs);
+
+ // Get audio stream parameters.
+ virtual status_t getParameters(const String8& keys, String8 *values);
+
+ // Add or remove the effect on the stream.
+ virtual status_t addEffect(sp<EffectHalInterface> effect);
+ virtual status_t removeEffect(sp<EffectHalInterface> effect);
+
+ // Put the audio hardware input/output into standby mode.
+ virtual status_t standby();
+
+ virtual status_t dump(int fd);
+
+ // Start a stream operating in mmap mode.
+ virtual status_t start() = 0;
+
+ // Stop a stream operating in mmap mode.
+ virtual status_t stop() = 0;
+
+ // Retrieve information on the data buffer in mmap mode.
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info) = 0;
+
+ // Get current read/write position in the mmap buffer
+ virtual status_t getMmapPosition(struct audio_mmap_position *position) = 0;
+
+ // Set the priority of the thread that interacts with the HAL
+ // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
+ virtual status_t setHalThreadPriority(int priority);
+
+ protected:
+ // Subclasses can not be constructed directly by clients.
+ StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device);
+
+ // The destructor automatically closes the stream.
+ virtual ~StreamHalLocal();
+
+ sp<DeviceHalLocal> mDevice;
+
+ // mStreamPowerLog is used for audio signal power logging.
+ StreamPowerLog mStreamPowerLog;
+
+ private:
+ audio_stream_t *mStream;
+};
+
+class StreamOutHalLocal : public StreamOutHalInterface, public StreamHalLocal {
+ public:
+ // Return the frame size (number of bytes per sample) of a stream.
+ virtual status_t getFrameSize(size_t *size);
+
+ // Return the audio hardware driver estimated latency in milliseconds.
+ virtual status_t getLatency(uint32_t *latency);
+
+ // Use this method in situations where audio mixing is done in the hardware.
+ virtual status_t setVolume(float left, float right);
+
+ // Write audio buffer to driver.
+ virtual status_t write(const void *buffer, size_t bytes, size_t *written);
+
+ // Return the number of audio frames written by the audio dsp to DAC since
+ // the output has exited standby.
+ virtual status_t getRenderPosition(uint32_t *dspFrames);
+
+ // Get the local time at which the next write to the audio driver will be presented.
+ virtual status_t getNextWriteTimestamp(int64_t *timestamp);
+
+ // Set the callback for notifying completion of non-blocking write and drain.
+ virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback);
+
+ // Returns whether pause and resume operations are supported.
+ virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume);
+
+ // Notifies to the audio driver to resume playback following a pause.
+ virtual status_t pause();
+
+ // Notifies to the audio driver to resume playback following a pause.
+ virtual status_t resume();
+
+ // Returns whether drain operation is supported.
+ virtual status_t supportsDrain(bool *supportsDrain);
+
+ // Requests notification when data buffered by the driver/hardware has been played.
+ virtual status_t drain(bool earlyNotify);
+
+ // Notifies to the audio driver to flush the queued data.
+ virtual status_t flush();
+
+ // Return a recent count of the number of audio frames presented to an external observer.
+ virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
+
+ // Start a stream operating in mmap mode.
+ virtual status_t start();
+
+ // Stop a stream operating in mmap mode.
+ virtual status_t stop();
+
+ // Retrieve information on the data buffer in mmap mode.
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info);
+
+ // Get current read/write position in the mmap buffer
+ virtual status_t getMmapPosition(struct audio_mmap_position *position);
+
+ // Called when the metadata of the stream's source has been changed.
+ status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
+
+ private:
+ audio_stream_out_t *mStream;
+ wp<StreamOutHalInterfaceCallback> mCallback;
+
+ friend class DeviceHalLocal;
+
+ // Can not be constructed directly by clients.
+ StreamOutHalLocal(audio_stream_out_t *stream, sp<DeviceHalLocal> device);
+
+ virtual ~StreamOutHalLocal();
+
+ static int asyncCallback(stream_callback_event_t event, void *param, void *cookie);
+};
+
+class StreamInHalLocal : public StreamInHalInterface, public StreamHalLocal {
+ public:
+ // Return the frame size (number of bytes per sample) of a stream.
+ virtual status_t getFrameSize(size_t *size);
+
+ // Set the input gain for the audio driver.
+ virtual status_t setGain(float gain);
+
+ // Read audio buffer in from driver.
+ virtual status_t read(void *buffer, size_t bytes, size_t *read);
+
+ // Return the amount of input frames lost in the audio driver.
+ virtual status_t getInputFramesLost(uint32_t *framesLost);
+
+ // Return a recent count of the number of audio frames received and
+ // the clock time associated with that frame count.
+ virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
+
+ // Start a stream operating in mmap mode.
+ virtual status_t start();
+
+ // Stop a stream operating in mmap mode.
+ virtual status_t stop();
+
+ // Retrieve information on the data buffer in mmap mode.
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info);
+
+ // Get current read/write position in the mmap buffer
+ virtual status_t getMmapPosition(struct audio_mmap_position *position);
+
+ // Get active microphones
+ virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+ // Called when the metadata of the stream's sink has been changed.
+ status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
+
+ private:
+ audio_stream_in_t *mStream;
+
+ friend class DeviceHalLocal;
+
+ // Can not be constructed directly by clients.
+ StreamInHalLocal(audio_stream_in_t *stream, sp<DeviceHalLocal> device);
+
+ virtual ~StreamInHalLocal();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
diff --git a/media/libaudiohal/StreamPowerLog.h b/media/libaudiohal/2.0/StreamPowerLog.h
similarity index 100%
rename from media/libaudiohal/StreamPowerLog.h
rename to media/libaudiohal/2.0/StreamPowerLog.h
diff --git a/media/libaudiohal/4.0/Android.bp b/media/libaudiohal/4.0/Android.bp
new file mode 100644
index 0000000..833defa
--- /dev/null
+++ b/media/libaudiohal/4.0/Android.bp
@@ -0,0 +1,58 @@
+cc_library_shared {
+ name: "libaudiohal@4.0",
+
+ srcs: [
+ "DeviceHalLocal.cpp",
+ "DevicesFactoryHalHybrid.cpp",
+ "DevicesFactoryHalLocal.cpp",
+ "StreamHalLocal.cpp",
+
+ "ConversionHelperHidl.cpp",
+ "DeviceHalHidl.cpp",
+ "DevicesFactoryHalHidl.cpp",
+ "EffectBufferHalHidl.cpp",
+ "EffectHalHidl.cpp",
+ "EffectsFactoryHalHidl.cpp",
+ "StreamHalHidl.cpp",
+ ],
+
+ export_include_dirs: ["include"],
+
+ cflags: [
+ "-Wall",
+ "-Wextra",
+ "-Werror",
+ ],
+ shared_libs: [
+ "libaudiohal_deathhandler",
+ "libaudioutils",
+ "libbinder",
+ "libcutils",
+ "liblog",
+ "libutils",
+ "libhardware",
+ "libbase",
+ "libfmq",
+ "libhwbinder",
+ "libhidlbase",
+ "libhidlmemory",
+ "libhidltransport",
+ "android.hardware.audio@4.0",
+ "android.hardware.audio.common-util",
+ "android.hardware.audio.common@4.0",
+ "android.hardware.audio.common@4.0-util",
+ "android.hardware.audio.effect@4.0",
+ "android.hidl.allocator@1.0",
+ "android.hidl.memory@1.0",
+ "libmedia_helper",
+ "libmediautils",
+ ],
+ header_libs: [
+ "android.hardware.audio.common.util@all-versions",
+ "libaudiohal_headers"
+ ],
+
+ export_shared_lib_headers: [
+ "libfmq",
+ ],
+}
diff --git a/media/libaudiohal/4.0/ConversionHelperHidl.cpp b/media/libaudiohal/4.0/ConversionHelperHidl.cpp
new file mode 100644
index 0000000..fe27504
--- /dev/null
+++ b/media/libaudiohal/4.0/ConversionHelperHidl.cpp
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string.h>
+
+#define LOG_TAG "HalHidl"
+#include <media/AudioParameter.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+
+using ::android::hardware::audio::V4_0::AudioMicrophoneChannelMapping;
+using ::android::hardware::audio::V4_0::AudioMicrophoneDirectionality;
+using ::android::hardware::audio::V4_0::AudioMicrophoneLocation;
+using ::android::hardware::audio::V4_0::DeviceAddress;
+using ::android::hardware::audio::V4_0::MicrophoneInfo;
+using ::android::hardware::audio::V4_0::Result;
+
+namespace android {
+namespace V4_0 {
+
+// static
+status_t ConversionHelperHidl::keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys) {
+ AudioParameter halKeys(keys);
+ if (halKeys.size() == 0) return BAD_VALUE;
+ hidlKeys->resize(halKeys.size());
+ //FIXME: keyStreamSupportedChannels and keyStreamSupportedSamplingRates come with a
+ // "keyFormat=<value>" pair. We need to transform it into a single key string so that it is
+ // carried over to the legacy HAL via HIDL.
+ String8 value;
+ bool keepFormatValue = halKeys.size() == 2 &&
+ (halKeys.get(String8(AudioParameter::keyStreamSupportedChannels), value) == NO_ERROR ||
+ halKeys.get(String8(AudioParameter::keyStreamSupportedSamplingRates), value) == NO_ERROR);
+
+ for (size_t i = 0; i < halKeys.size(); ++i) {
+ String8 key;
+ status_t status = halKeys.getAt(i, key);
+ if (status != OK) return status;
+ if (keepFormatValue && key == AudioParameter::keyFormat) {
+ AudioParameter formatParam;
+ halKeys.getAt(i, key, value);
+ formatParam.add(key, value);
+ key = formatParam.toString();
+ }
+ (*hidlKeys)[i] = key.string();
+ }
+ return OK;
+}
+
+// static
+status_t ConversionHelperHidl::parametersFromHal(
+ const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams) {
+ AudioParameter params(kvPairs);
+ if (params.size() == 0) return BAD_VALUE;
+ hidlParams->resize(params.size());
+ for (size_t i = 0; i < params.size(); ++i) {
+ String8 key, value;
+ status_t status = params.getAt(i, key, value);
+ if (status != OK) return status;
+ (*hidlParams)[i].key = key.string();
+ (*hidlParams)[i].value = value.string();
+ }
+ return OK;
+}
+
+// static
+void ConversionHelperHidl::parametersToHal(
+ const hidl_vec<ParameterValue>& parameters, String8 *values) {
+ AudioParameter params;
+ for (size_t i = 0; i < parameters.size(); ++i) {
+ params.add(String8(parameters[i].key.c_str()), String8(parameters[i].value.c_str()));
+ }
+ values->setTo(params.toString());
+}
+
+ConversionHelperHidl::ConversionHelperHidl(const char* className)
+ : mClassName(className) {
+}
+
+// static
+status_t ConversionHelperHidl::analyzeResult(const Result& result) {
+ switch (result) {
+ case Result::OK: return OK;
+ case Result::INVALID_ARGUMENTS: return BAD_VALUE;
+ case Result::INVALID_STATE: return NOT_ENOUGH_DATA;
+ case Result::NOT_INITIALIZED: return NO_INIT;
+ case Result::NOT_SUPPORTED: return INVALID_OPERATION;
+ default: return NO_INIT;
+ }
+}
+
+void ConversionHelperHidl::emitError(const char* funcName, const char* description) {
+ ALOGE("%s %p %s: %s (from rpc)", mClassName, this, funcName, description);
+}
+
+// TODO: Use the same implementation in the hal when it moves to a util library.
+std::string deviceAddressToHal(const DeviceAddress& address) {
+ // HAL assumes that the address is NUL-terminated.
+ char halAddress[AUDIO_DEVICE_MAX_ADDRESS_LEN];
+ memset(halAddress, 0, sizeof(halAddress));
+ audio_devices_t halDevice = static_cast<audio_devices_t>(address.device);
+ const bool isInput = (halDevice & AUDIO_DEVICE_BIT_IN) != 0;
+ if (isInput) halDevice &= ~AUDIO_DEVICE_BIT_IN;
+ if ((!isInput && (halDevice & AUDIO_DEVICE_OUT_ALL_A2DP) != 0) ||
+ (isInput && (halDevice & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) != 0)) {
+ snprintf(halAddress, sizeof(halAddress), "%02X:%02X:%02X:%02X:%02X:%02X",
+ address.address.mac[0], address.address.mac[1], address.address.mac[2],
+ address.address.mac[3], address.address.mac[4], address.address.mac[5]);
+ } else if ((!isInput && (halDevice & AUDIO_DEVICE_OUT_IP) != 0) ||
+ (isInput && (halDevice & AUDIO_DEVICE_IN_IP) != 0)) {
+ snprintf(halAddress, sizeof(halAddress), "%d.%d.%d.%d", address.address.ipv4[0],
+ address.address.ipv4[1], address.address.ipv4[2], address.address.ipv4[3]);
+ } else if ((!isInput && (halDevice & AUDIO_DEVICE_OUT_ALL_USB) != 0) ||
+ (isInput && (halDevice & AUDIO_DEVICE_IN_ALL_USB) != 0)) {
+ snprintf(halAddress, sizeof(halAddress), "card=%d;device=%d", address.address.alsa.card,
+ address.address.alsa.device);
+ } else if ((!isInput && (halDevice & AUDIO_DEVICE_OUT_BUS) != 0) ||
+ (isInput && (halDevice & AUDIO_DEVICE_IN_BUS) != 0)) {
+ snprintf(halAddress, sizeof(halAddress), "%s", address.busAddress.c_str());
+ } else if ((!isInput && (halDevice & AUDIO_DEVICE_OUT_REMOTE_SUBMIX)) != 0 ||
+ (isInput && (halDevice & AUDIO_DEVICE_IN_REMOTE_SUBMIX) != 0)) {
+ snprintf(halAddress, sizeof(halAddress), "%s", address.rSubmixAddress.c_str());
+ } else {
+ snprintf(halAddress, sizeof(halAddress), "%s", address.busAddress.c_str());
+ }
+ return halAddress;
+}
+
+//local conversion helpers
+
+audio_microphone_channel_mapping_t channelMappingToHal(AudioMicrophoneChannelMapping mapping) {
+ switch (mapping) {
+ case AudioMicrophoneChannelMapping::UNUSED:
+ return AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED;
+ case AudioMicrophoneChannelMapping::DIRECT:
+ return AUDIO_MICROPHONE_CHANNEL_MAPPING_DIRECT;
+ case AudioMicrophoneChannelMapping::PROCESSED:
+ return AUDIO_MICROPHONE_CHANNEL_MAPPING_PROCESSED;
+ default:
+ LOG_ALWAYS_FATAL("Unknown channelMappingToHal conversion %d", mapping);
+ }
+}
+
+audio_microphone_location_t locationToHal(AudioMicrophoneLocation location) {
+ switch (location) {
+ case AudioMicrophoneLocation::UNKNOWN:
+ return AUDIO_MICROPHONE_LOCATION_UNKNOWN;
+ case AudioMicrophoneLocation::MAINBODY:
+ return AUDIO_MICROPHONE_LOCATION_MAINBODY;
+ case AudioMicrophoneLocation::MAINBODY_MOVABLE:
+ return AUDIO_MICROPHONE_LOCATION_MAINBODY_MOVABLE;
+ case AudioMicrophoneLocation::PERIPHERAL:
+ return AUDIO_MICROPHONE_LOCATION_PERIPHERAL;
+ default:
+ LOG_ALWAYS_FATAL("Unknown locationToHal conversion %d", location);
+ }
+}
+audio_microphone_directionality_t directionalityToHal(AudioMicrophoneDirectionality dir) {
+ switch (dir) {
+ case AudioMicrophoneDirectionality::UNKNOWN:
+ return AUDIO_MICROPHONE_DIRECTIONALITY_UNKNOWN;
+ case AudioMicrophoneDirectionality::OMNI:
+ return AUDIO_MICROPHONE_DIRECTIONALITY_OMNI;
+ case AudioMicrophoneDirectionality::BI_DIRECTIONAL:
+ return AUDIO_MICROPHONE_DIRECTIONALITY_BI_DIRECTIONAL;
+ case AudioMicrophoneDirectionality::CARDIOID:
+ return AUDIO_MICROPHONE_DIRECTIONALITY_CARDIOID;
+ case AudioMicrophoneDirectionality::HYPER_CARDIOID:
+ return AUDIO_MICROPHONE_DIRECTIONALITY_HYPER_CARDIOID;
+ case AudioMicrophoneDirectionality::SUPER_CARDIOID:
+ return AUDIO_MICROPHONE_DIRECTIONALITY_SUPER_CARDIOID;
+ default:
+ LOG_ALWAYS_FATAL("Unknown directionalityToHal conversion %d", dir);
+ }
+}
+
+// static
+void ConversionHelperHidl::microphoneInfoToHal(const MicrophoneInfo& src,
+ audio_microphone_characteristic_t *pDst) {
+ if (pDst != NULL) {
+ snprintf(pDst->device_id, sizeof(pDst->device_id),
+ "%s", src.deviceId.c_str());
+ pDst->device = static_cast<audio_devices_t>(src.deviceAddress.device);
+ snprintf(pDst->address, sizeof(pDst->address),
+ "%s", deviceAddressToHal(src.deviceAddress).c_str());
+ if (src.channelMapping.size() > AUDIO_CHANNEL_COUNT_MAX) {
+ ALOGW("microphoneInfoToStruct found %zu channelMapping elements. Max expected is %d",
+ src.channelMapping.size(), AUDIO_CHANNEL_COUNT_MAX);
+ }
+ size_t ch;
+ for (ch = 0; ch < src.channelMapping.size() && ch < AUDIO_CHANNEL_COUNT_MAX; ch++) {
+ pDst->channel_mapping[ch] = channelMappingToHal(src.channelMapping[ch]);
+ }
+ for (; ch < AUDIO_CHANNEL_COUNT_MAX; ch++) {
+ pDst->channel_mapping[ch] = AUDIO_MICROPHONE_CHANNEL_MAPPING_UNUSED;
+ }
+ pDst->location = locationToHal(src.location);
+ pDst->group = (audio_microphone_group_t)src.group;
+ pDst->index_in_the_group = (unsigned int)src.indexInTheGroup;
+ pDst->sensitivity = src.sensitivity;
+ pDst->max_spl = src.maxSpl;
+ pDst->min_spl = src.minSpl;
+ pDst->directionality = directionalityToHal(src.directionality);
+ pDst->num_frequency_responses = (unsigned int)src.frequencyResponse.size();
+ if (pDst->num_frequency_responses > AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES) {
+ ALOGW("microphoneInfoToStruct found %d frequency responses. Max expected is %d",
+ pDst->num_frequency_responses, AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES);
+ pDst->num_frequency_responses = AUDIO_MICROPHONE_MAX_FREQUENCY_RESPONSES;
+ }
+ for (size_t k = 0; k < pDst->num_frequency_responses; k++) {
+ pDst->frequency_responses[0][k] = src.frequencyResponse[k].frequency;
+ pDst->frequency_responses[1][k] = src.frequencyResponse[k].level;
+ }
+ pDst->geometric_location.x = src.position.x;
+ pDst->geometric_location.y = src.position.y;
+ pDst->geometric_location.z = src.position.z;
+ pDst->orientation.x = src.orientation.x;
+ pDst->orientation.y = src.orientation.y;
+ pDst->orientation.z = src.orientation.z;
+ }
+}
+
+} // namespace V4_0
+} // namespace android
diff --git a/media/libaudiohal/4.0/ConversionHelperHidl.h b/media/libaudiohal/4.0/ConversionHelperHidl.h
new file mode 100644
index 0000000..8823a8d
--- /dev/null
+++ b/media/libaudiohal/4.0/ConversionHelperHidl.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_4_0_H
+#define ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_4_0_H
+
+#include <android/hardware/audio/4.0/types.h>
+#include <hidl/HidlSupport.h>
+#include <system/audio.h>
+#include <utils/String8.h>
+
+using ::android::hardware::audio::V4_0::ParameterValue;
+using ::android::hardware::audio::V4_0::MicrophoneInfo;
+using ::android::hardware::Return;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+
+namespace android {
+namespace V4_0 {
+
+class ConversionHelperHidl {
+ protected:
+ static status_t keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys);
+ static status_t parametersFromHal(const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams);
+ static void parametersToHal(const hidl_vec<ParameterValue>& parameters, String8 *values);
+ static void microphoneInfoToHal(const MicrophoneInfo& src,
+ audio_microphone_characteristic_t *pDst);
+
+ ConversionHelperHidl(const char* className);
+
+ template<typename R, typename T>
+ status_t processReturn(const char* funcName, const Return<R>& ret, T *retval) {
+ if (ret.isOk()) {
+ // This way it also works for enum class to unscoped enum conversion.
+ *retval = static_cast<T>(static_cast<R>(ret));
+ return OK;
+ }
+ return processReturn(funcName, ret);
+ }
+
+ template<typename T>
+ status_t processReturn(const char* funcName, const Return<T>& ret) {
+ if (!ret.isOk()) {
+ emitError(funcName, ret.description().c_str());
+ }
+ return ret.isOk() ? OK : FAILED_TRANSACTION;
+ }
+
+ status_t processReturn(const char* funcName, const Return<hardware::audio::V4_0::Result>& ret) {
+ if (!ret.isOk()) {
+ emitError(funcName, ret.description().c_str());
+ }
+ return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
+ }
+
+ template<typename T>
+ status_t processReturn(
+ const char* funcName, const Return<T>& ret, hardware::audio::V4_0::Result retval) {
+ if (!ret.isOk()) {
+ emitError(funcName, ret.description().c_str());
+ }
+ return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
+ }
+
+ private:
+ const char* mClassName;
+
+ static status_t analyzeResult(const hardware::audio::V4_0::Result& result);
+
+ void emitError(const char* funcName, const char* description);
+};
+
+} // namespace V4_0
+} // namespace android
+
+#endif // ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/DeviceHalHidl.cpp b/media/libaudiohal/4.0/DeviceHalHidl.cpp
new file mode 100644
index 0000000..6facca9
--- /dev/null
+++ b/media/libaudiohal/4.0/DeviceHalHidl.cpp
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#define LOG_TAG "DeviceHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hardware/audio/4.0/IPrimaryDevice.h>
+#include <cutils/native_handle.h>
+#include <hwbinder/IPCThreadState.h>
+#include <utils/Log.h>
+
+#include <common/all-versions/VersionUtils.h>
+
+#include "DeviceHalHidl.h"
+#include "HidlUtils.h"
+#include "StreamHalHidl.h"
+#include "VersionUtils.h"
+
+using ::android::hardware::audio::common::V4_0::AudioConfig;
+using ::android::hardware::audio::common::V4_0::AudioDevice;
+using ::android::hardware::audio::common::V4_0::AudioInputFlag;
+using ::android::hardware::audio::common::V4_0::AudioOutputFlag;
+using ::android::hardware::audio::common::V4_0::AudioPatchHandle;
+using ::android::hardware::audio::common::V4_0::AudioPort;
+using ::android::hardware::audio::common::V4_0::AudioPortConfig;
+using ::android::hardware::audio::common::V4_0::AudioMode;
+using ::android::hardware::audio::common::V4_0::AudioSource;
+using ::android::hardware::audio::common::V4_0::HidlUtils;
+using ::android::hardware::audio::common::utils::mkEnumConverter;
+using ::android::hardware::audio::V4_0::DeviceAddress;
+using ::android::hardware::audio::V4_0::IPrimaryDevice;
+using ::android::hardware::audio::V4_0::ParameterValue;
+using ::android::hardware::audio::V4_0::Result;
+using ::android::hardware::audio::V4_0::SinkMetadata;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+
+namespace android {
+namespace V4_0 {
+
+namespace {
+
+status_t deviceAddressFromHal(
+ audio_devices_t device, const char* halAddress, DeviceAddress* address) {
+ address->device = AudioDevice(device);
+
+ if (halAddress == nullptr || strnlen(halAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
+ return OK;
+ }
+ const bool isInput = (device & AUDIO_DEVICE_BIT_IN) != 0;
+ if (isInput) device &= ~AUDIO_DEVICE_BIT_IN;
+ if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_A2DP) != 0)
+ || (isInput && (device & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) != 0)) {
+ int status = sscanf(halAddress,
+ "%hhX:%hhX:%hhX:%hhX:%hhX:%hhX",
+ &address->address.mac[0], &address->address.mac[1], &address->address.mac[2],
+ &address->address.mac[3], &address->address.mac[4], &address->address.mac[5]);
+ return status == 6 ? OK : BAD_VALUE;
+ } else if ((!isInput && (device & AUDIO_DEVICE_OUT_IP) != 0)
+ || (isInput && (device & AUDIO_DEVICE_IN_IP) != 0)) {
+ int status = sscanf(halAddress,
+ "%hhu.%hhu.%hhu.%hhu",
+ &address->address.ipv4[0], &address->address.ipv4[1],
+ &address->address.ipv4[2], &address->address.ipv4[3]);
+ return status == 4 ? OK : BAD_VALUE;
+ } else if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_USB)) != 0
+ || (isInput && (device & AUDIO_DEVICE_IN_ALL_USB)) != 0) {
+ int status = sscanf(halAddress,
+ "card=%d;device=%d",
+ &address->address.alsa.card, &address->address.alsa.device);
+ return status == 2 ? OK : BAD_VALUE;
+ } else if ((!isInput && (device & AUDIO_DEVICE_OUT_BUS) != 0)
+ || (isInput && (device & AUDIO_DEVICE_IN_BUS) != 0)) {
+ if (halAddress != NULL) {
+ address->busAddress = halAddress;
+ return OK;
+ }
+ return BAD_VALUE;
+ } else if ((!isInput && (device & AUDIO_DEVICE_OUT_REMOTE_SUBMIX)) != 0
+ || (isInput && (device & AUDIO_DEVICE_IN_REMOTE_SUBMIX) != 0)) {
+ if (halAddress != NULL) {
+ address->rSubmixAddress = halAddress;
+ return OK;
+ }
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+} // namespace
+
+DeviceHalHidl::DeviceHalHidl(const sp<IDevice>& device)
+ : ConversionHelperHidl("Device"), mDevice(device),
+ mPrimaryDevice(IPrimaryDevice::castFrom(device)) {
+}
+
+DeviceHalHidl::~DeviceHalHidl() {
+ if (mDevice != 0) {
+ mDevice.clear();
+ hardware::IPCThreadState::self()->flushCommands();
+ }
+}
+
+status_t DeviceHalHidl::getSupportedDevices(uint32_t*) {
+ // Obsolete.
+ return INVALID_OPERATION;
+}
+
+status_t DeviceHalHidl::initCheck() {
+ if (mDevice == 0) return NO_INIT;
+ return processReturn("initCheck", mDevice->initCheck());
+}
+
+status_t DeviceHalHidl::setVoiceVolume(float volume) {
+ if (mDevice == 0) return NO_INIT;
+ if (mPrimaryDevice == 0) return INVALID_OPERATION;
+ return processReturn("setVoiceVolume", mPrimaryDevice->setVoiceVolume(volume));
+}
+
+status_t DeviceHalHidl::setMasterVolume(float volume) {
+ if (mDevice == 0) return NO_INIT;
+ if (mPrimaryDevice == 0) return INVALID_OPERATION;
+ return processReturn("setMasterVolume", mPrimaryDevice->setMasterVolume(volume));
+}
+
+status_t DeviceHalHidl::getMasterVolume(float *volume) {
+ if (mDevice == 0) return NO_INIT;
+ if (mPrimaryDevice == 0) return INVALID_OPERATION;
+ Result retval;
+ Return<void> ret = mPrimaryDevice->getMasterVolume(
+ [&](Result r, float v) {
+ retval = r;
+ if (retval == Result::OK) {
+ *volume = v;
+ }
+ });
+ return processReturn("getMasterVolume", ret, retval);
+}
+
+status_t DeviceHalHidl::setMode(audio_mode_t mode) {
+ if (mDevice == 0) return NO_INIT;
+ if (mPrimaryDevice == 0) return INVALID_OPERATION;
+ return processReturn("setMode", mPrimaryDevice->setMode(AudioMode(mode)));
+}
+
+status_t DeviceHalHidl::setMicMute(bool state) {
+ if (mDevice == 0) return NO_INIT;
+ return processReturn("setMicMute", mDevice->setMicMute(state));
+}
+
+status_t DeviceHalHidl::getMicMute(bool *state) {
+ if (mDevice == 0) return NO_INIT;
+ Result retval;
+ Return<void> ret = mDevice->getMicMute(
+ [&](Result r, bool mute) {
+ retval = r;
+ if (retval == Result::OK) {
+ *state = mute;
+ }
+ });
+ return processReturn("getMicMute", ret, retval);
+}
+
+status_t DeviceHalHidl::setMasterMute(bool state) {
+ if (mDevice == 0) return NO_INIT;
+ return processReturn("setMasterMute", mDevice->setMasterMute(state));
+}
+
+status_t DeviceHalHidl::getMasterMute(bool *state) {
+ if (mDevice == 0) return NO_INIT;
+ Result retval;
+ Return<void> ret = mDevice->getMasterMute(
+ [&](Result r, bool mute) {
+ retval = r;
+ if (retval == Result::OK) {
+ *state = mute;
+ }
+ });
+ return processReturn("getMasterMute", ret, retval);
+}
+
+status_t DeviceHalHidl::setParameters(const String8& kvPairs) {
+ if (mDevice == 0) return NO_INIT;
+ hidl_vec<ParameterValue> hidlParams;
+ status_t status = parametersFromHal(kvPairs, &hidlParams);
+ if (status != OK) return status;
+ // TODO: change the API so that context and kvPairs are separated
+ return processReturn("setParameters",
+ utils::setParameters(mDevice, {} /* context */, hidlParams));
+}
+
+status_t DeviceHalHidl::getParameters(const String8& keys, String8 *values) {
+ values->clear();
+ if (mDevice == 0) return NO_INIT;
+ hidl_vec<hidl_string> hidlKeys;
+ status_t status = keysFromHal(keys, &hidlKeys);
+ if (status != OK) return status;
+ Result retval;
+ Return<void> ret = utils::getParameters(mDevice,
+ {} /* context */,
+ hidlKeys,
+ [&](Result r, const hidl_vec<ParameterValue>& parameters) {
+ retval = r;
+ if (retval == Result::OK) {
+ parametersToHal(parameters, values);
+ }
+ });
+ return processReturn("getParameters", ret, retval);
+}
+
+status_t DeviceHalHidl::getInputBufferSize(
+ const struct audio_config *config, size_t *size) {
+ if (mDevice == 0) return NO_INIT;
+ AudioConfig hidlConfig;
+ HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+ Result retval;
+ Return<void> ret = mDevice->getInputBufferSize(
+ hidlConfig,
+ [&](Result r, uint64_t bufferSize) {
+ retval = r;
+ if (retval == Result::OK) {
+ *size = static_cast<size_t>(bufferSize);
+ }
+ });
+ return processReturn("getInputBufferSize", ret, retval);
+}
+
+status_t DeviceHalHidl::openOutputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ const char *address,
+ sp<StreamOutHalInterface> *outStream) {
+ if (mDevice == 0) return NO_INIT;
+ DeviceAddress hidlDevice;
+ status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
+ if (status != OK) return status;
+ AudioConfig hidlConfig;
+ HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mDevice->openOutputStream(
+ handle,
+ hidlDevice,
+ hidlConfig,
+ mkEnumConverter<AudioOutputFlag>(flags),
+ {} /* metadata */,
+ [&](Result r, const sp<IStreamOut>& result, const AudioConfig& suggestedConfig) {
+ retval = r;
+ if (retval == Result::OK) {
+ *outStream = new StreamOutHalHidl(result);
+ }
+ HidlUtils::audioConfigToHal(suggestedConfig, config);
+ });
+ return processReturn("openOutputStream", ret, retval);
+}
+
+status_t DeviceHalHidl::openInputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ audio_input_flags_t flags,
+ const char *address,
+ audio_source_t source,
+ sp<StreamInHalInterface> *inStream) {
+ if (mDevice == 0) return NO_INIT;
+ DeviceAddress hidlDevice;
+ status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
+ if (status != OK) return status;
+ AudioConfig hidlConfig;
+ HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+ Result retval = Result::NOT_INITIALIZED;
+ // TODO: correctly propagate the tracks sources and volume
+ // for now, only send the main source at 1dbfs
+ SinkMetadata metadata = {{{AudioSource(source), 1}}};
+ Return<void> ret = mDevice->openInputStream(
+ handle,
+ hidlDevice,
+ hidlConfig,
+ flags,
+ metadata,
+ [&](Result r, const sp<IStreamIn>& result, const AudioConfig& suggestedConfig) {
+ retval = r;
+ if (retval == Result::OK) {
+ *inStream = new StreamInHalHidl(result);
+ }
+ HidlUtils::audioConfigToHal(suggestedConfig, config);
+ });
+ return processReturn("openInputStream", ret, retval);
+}
+
+status_t DeviceHalHidl::supportsAudioPatches(bool *supportsPatches) {
+ if (mDevice == 0) return NO_INIT;
+ return processReturn("supportsAudioPatches", mDevice->supportsAudioPatches(), supportsPatches);
+}
+
+status_t DeviceHalHidl::createAudioPatch(
+ unsigned int num_sources,
+ const struct audio_port_config *sources,
+ unsigned int num_sinks,
+ const struct audio_port_config *sinks,
+ audio_patch_handle_t *patch) {
+ if (mDevice == 0) return NO_INIT;
+ hidl_vec<AudioPortConfig> hidlSources, hidlSinks;
+ HidlUtils::audioPortConfigsFromHal(num_sources, sources, &hidlSources);
+ HidlUtils::audioPortConfigsFromHal(num_sinks, sinks, &hidlSinks);
+ Result retval;
+ Return<void> ret = mDevice->createAudioPatch(
+ hidlSources, hidlSinks,
+ [&](Result r, AudioPatchHandle hidlPatch) {
+ retval = r;
+ if (retval == Result::OK) {
+ *patch = static_cast<audio_patch_handle_t>(hidlPatch);
+ }
+ });
+ return processReturn("createAudioPatch", ret, retval);
+}
+
+status_t DeviceHalHidl::releaseAudioPatch(audio_patch_handle_t patch) {
+ if (mDevice == 0) return NO_INIT;
+ return processReturn("releaseAudioPatch", mDevice->releaseAudioPatch(patch));
+}
+
+status_t DeviceHalHidl::getAudioPort(struct audio_port *port) {
+ if (mDevice == 0) return NO_INIT;
+ AudioPort hidlPort;
+ HidlUtils::audioPortFromHal(*port, &hidlPort);
+ Result retval;
+ Return<void> ret = mDevice->getAudioPort(
+ hidlPort,
+ [&](Result r, const AudioPort& p) {
+ retval = r;
+ if (retval == Result::OK) {
+ HidlUtils::audioPortToHal(p, port);
+ }
+ });
+ return processReturn("getAudioPort", ret, retval);
+}
+
+status_t DeviceHalHidl::setAudioPortConfig(const struct audio_port_config *config) {
+ if (mDevice == 0) return NO_INIT;
+ AudioPortConfig hidlConfig;
+ HidlUtils::audioPortConfigFromHal(*config, &hidlConfig);
+ return processReturn("setAudioPortConfig", mDevice->setAudioPortConfig(hidlConfig));
+}
+
+status_t DeviceHalHidl::getMicrophones(std::vector<media::MicrophoneInfo> *microphonesInfo) {
+ if (mDevice == 0) return NO_INIT;
+ Result retval;
+ Return<void> ret = mDevice->getMicrophones(
+ [&](Result r, hidl_vec<MicrophoneInfo> micArrayHal) {
+ retval = r;
+ for (size_t k = 0; k < micArrayHal.size(); k++) {
+ audio_microphone_characteristic_t dst;
+ //convert
+ microphoneInfoToHal(micArrayHal[k], &dst);
+ media::MicrophoneInfo microphone = media::MicrophoneInfo(dst);
+ microphonesInfo->push_back(microphone);
+ }
+ });
+ return processReturn("getMicrophones", ret, retval);
+}
+
+status_t DeviceHalHidl::dump(int fd) {
+ if (mDevice == 0) return NO_INIT;
+ native_handle_t* hidlHandle = native_handle_create(1, 0);
+ hidlHandle->data[0] = fd;
+ Return<void> ret = mDevice->debug(hidlHandle, {} /* options */);
+ native_handle_delete(hidlHandle);
+ return processReturn("dump", ret);
+}
+
+} // namespace V4_0
+} // namespace android
diff --git a/media/libaudiohal/4.0/DeviceHalHidl.h b/media/libaudiohal/4.0/DeviceHalHidl.h
new file mode 100644
index 0000000..0bd2175
--- /dev/null
+++ b/media/libaudiohal/4.0/DeviceHalHidl.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICE_HAL_HIDL_4_0_H
+#define ANDROID_HARDWARE_DEVICE_HAL_HIDL_4_0_H
+
+#include <android/hardware/audio/4.0/IDevice.h>
+#include <android/hardware/audio/4.0/IPrimaryDevice.h>
+#include <media/audiohal/DeviceHalInterface.h>
+
+#include "ConversionHelperHidl.h"
+
+using ::android::hardware::audio::V4_0::IDevice;
+using ::android::hardware::audio::V4_0::IPrimaryDevice;
+using ::android::hardware::Return;
+
+namespace android {
+namespace V4_0 {
+
+class DeviceHalHidl : public DeviceHalInterface, public ConversionHelperHidl
+{
+ public:
+ // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
+ virtual status_t getSupportedDevices(uint32_t *devices);
+
+ // Check to see if the audio hardware interface has been initialized.
+ virtual status_t initCheck();
+
+ // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
+ virtual status_t setVoiceVolume(float volume);
+
+ // Set the audio volume for all audio activities other than voice call.
+ virtual status_t setMasterVolume(float volume);
+
+ // Get the current master volume value for the HAL.
+ virtual status_t getMasterVolume(float *volume);
+
+ // Called when the audio mode changes.
+ virtual status_t setMode(audio_mode_t mode);
+
+ // Muting control.
+ virtual status_t setMicMute(bool state);
+ virtual status_t getMicMute(bool *state);
+ virtual status_t setMasterMute(bool state);
+ virtual status_t getMasterMute(bool *state);
+
+ // Set global audio parameters.
+ virtual status_t setParameters(const String8& kvPairs);
+
+ // Get global audio parameters.
+ virtual status_t getParameters(const String8& keys, String8 *values);
+
+ // Returns audio input buffer size according to parameters passed.
+ virtual status_t getInputBufferSize(const struct audio_config *config,
+ size_t *size);
+
+ // Creates and opens the audio hardware output stream. The stream is closed
+ // by releasing all references to the returned object.
+ virtual status_t openOutputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ const char *address,
+ sp<StreamOutHalInterface> *outStream);
+
+ // Creates and opens the audio hardware input stream. The stream is closed
+ // by releasing all references to the returned object.
+ virtual status_t openInputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ audio_input_flags_t flags,
+ const char *address,
+ audio_source_t source,
+ sp<StreamInHalInterface> *inStream);
+
+ // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
+ virtual status_t supportsAudioPatches(bool *supportsPatches);
+
+ // Creates an audio patch between several source and sink ports.
+ virtual status_t createAudioPatch(
+ unsigned int num_sources,
+ const struct audio_port_config *sources,
+ unsigned int num_sinks,
+ const struct audio_port_config *sinks,
+ audio_patch_handle_t *patch);
+
+ // Releases an audio patch.
+ virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
+
+ // Fills the list of supported attributes for a given audio port.
+ virtual status_t getAudioPort(struct audio_port *port);
+
+ // Set audio port configuration.
+ virtual status_t setAudioPortConfig(const struct audio_port_config *config);
+
+ // List microphones
+ virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+ virtual status_t dump(int fd);
+
+ private:
+ friend class DevicesFactoryHalHidl;
+ sp<IDevice> mDevice;
+ sp<IPrimaryDevice> mPrimaryDevice; // Null if it's not a primary device.
+
+ // Can not be constructed directly by clients.
+ explicit DeviceHalHidl(const sp<IDevice>& device);
+
+ // The destructor automatically closes the device.
+ virtual ~DeviceHalHidl();
+};
+
+} // namespace V4_0
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICE_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/DeviceHalLocal.cpp b/media/libaudiohal/4.0/DeviceHalLocal.cpp
new file mode 100644
index 0000000..a245dd9
--- /dev/null
+++ b/media/libaudiohal/4.0/DeviceHalLocal.cpp
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DeviceHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include "DeviceHalLocal.h"
+#include "StreamHalLocal.h"
+
+namespace android {
+namespace V4_0 {
+
+DeviceHalLocal::DeviceHalLocal(audio_hw_device_t *dev)
+ : mDev(dev) {
+}
+
+DeviceHalLocal::~DeviceHalLocal() {
+ int status = audio_hw_device_close(mDev);
+ ALOGW_IF(status, "Error closing audio hw device %p: %s", mDev, strerror(-status));
+ mDev = 0;
+}
+
+status_t DeviceHalLocal::getSupportedDevices(uint32_t *devices) {
+ if (mDev->get_supported_devices == NULL) return INVALID_OPERATION;
+ *devices = mDev->get_supported_devices(mDev);
+ return OK;
+}
+
+status_t DeviceHalLocal::initCheck() {
+ return mDev->init_check(mDev);
+}
+
+status_t DeviceHalLocal::setVoiceVolume(float volume) {
+ return mDev->set_voice_volume(mDev, volume);
+}
+
+status_t DeviceHalLocal::setMasterVolume(float volume) {
+ if (mDev->set_master_volume == NULL) return INVALID_OPERATION;
+ return mDev->set_master_volume(mDev, volume);
+}
+
+status_t DeviceHalLocal::getMasterVolume(float *volume) {
+ if (mDev->get_master_volume == NULL) return INVALID_OPERATION;
+ return mDev->get_master_volume(mDev, volume);
+}
+
+status_t DeviceHalLocal::setMode(audio_mode_t mode) {
+ return mDev->set_mode(mDev, mode);
+}
+
+status_t DeviceHalLocal::setMicMute(bool state) {
+ return mDev->set_mic_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::getMicMute(bool *state) {
+ return mDev->get_mic_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::setMasterMute(bool state) {
+ if (mDev->set_master_mute == NULL) return INVALID_OPERATION;
+ return mDev->set_master_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::getMasterMute(bool *state) {
+ if (mDev->get_master_mute == NULL) return INVALID_OPERATION;
+ return mDev->get_master_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::setParameters(const String8& kvPairs) {
+ return mDev->set_parameters(mDev, kvPairs.string());
+}
+
+status_t DeviceHalLocal::getParameters(const String8& keys, String8 *values) {
+ char *halValues = mDev->get_parameters(mDev, keys.string());
+ if (halValues != NULL) {
+ values->setTo(halValues);
+ free(halValues);
+ } else {
+ values->clear();
+ }
+ return OK;
+}
+
+status_t DeviceHalLocal::getInputBufferSize(
+ const struct audio_config *config, size_t *size) {
+ *size = mDev->get_input_buffer_size(mDev, config);
+ return OK;
+}
+
+status_t DeviceHalLocal::openOutputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ const char *address,
+ sp<StreamOutHalInterface> *outStream) {
+ audio_stream_out_t *halStream;
+ ALOGV("open_output_stream handle: %d devices: %x flags: %#x"
+ "srate: %d format %#x channels %x address %s",
+ handle, devices, flags,
+ config->sample_rate, config->format, config->channel_mask,
+ address);
+ int openResut = mDev->open_output_stream(
+ mDev, handle, devices, flags, config, &halStream, address);
+ if (openResut == OK) {
+ *outStream = new StreamOutHalLocal(halStream, this);
+ }
+ ALOGV("open_output_stream status %d stream %p", openResut, halStream);
+ return openResut;
+}
+
+status_t DeviceHalLocal::openInputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ audio_input_flags_t flags,
+ const char *address,
+ audio_source_t source,
+ sp<StreamInHalInterface> *inStream) {
+ audio_stream_in_t *halStream;
+ ALOGV("open_input_stream handle: %d devices: %x flags: %#x "
+ "srate: %d format %#x channels %x address %s source %d",
+ handle, devices, flags,
+ config->sample_rate, config->format, config->channel_mask,
+ address, source);
+ int openResult = mDev->open_input_stream(
+ mDev, handle, devices, config, &halStream, flags, address, source);
+ if (openResult == OK) {
+ *inStream = new StreamInHalLocal(halStream, this);
+ }
+ ALOGV("open_input_stream status %d stream %p", openResult, inStream);
+ return openResult;
+}
+
+status_t DeviceHalLocal::supportsAudioPatches(bool *supportsPatches) {
+ *supportsPatches = version() >= AUDIO_DEVICE_API_VERSION_3_0;
+ return OK;
+}
+
+status_t DeviceHalLocal::createAudioPatch(
+ unsigned int num_sources,
+ const struct audio_port_config *sources,
+ unsigned int num_sinks,
+ const struct audio_port_config *sinks,
+ audio_patch_handle_t *patch) {
+ if (version() >= AUDIO_DEVICE_API_VERSION_3_0) {
+ return mDev->create_audio_patch(
+ mDev, num_sources, sources, num_sinks, sinks, patch);
+ } else {
+ return INVALID_OPERATION;
+ }
+}
+
+status_t DeviceHalLocal::releaseAudioPatch(audio_patch_handle_t patch) {
+ if (version() >= AUDIO_DEVICE_API_VERSION_3_0) {
+ return mDev->release_audio_patch(mDev, patch);
+ } else {
+ return INVALID_OPERATION;
+ }
+}
+
+status_t DeviceHalLocal::getAudioPort(struct audio_port *port) {
+ return mDev->get_audio_port(mDev, port);
+}
+
+status_t DeviceHalLocal::setAudioPortConfig(const struct audio_port_config *config) {
+ if (version() >= AUDIO_DEVICE_API_VERSION_3_0)
+ return mDev->set_audio_port_config(mDev, config);
+ else
+ return INVALID_OPERATION;
+}
+
+status_t DeviceHalLocal::getMicrophones(std::vector<media::MicrophoneInfo> *microphones) {
+ if (mDev->get_microphones == NULL) return INVALID_OPERATION;
+ size_t actual_mics = AUDIO_MICROPHONE_MAX_COUNT;
+ audio_microphone_characteristic_t mic_array[AUDIO_MICROPHONE_MAX_COUNT];
+ status_t status = mDev->get_microphones(mDev, &mic_array[0], &actual_mics);
+ for (size_t i = 0; i < actual_mics; i++) {
+ media::MicrophoneInfo microphoneInfo = media::MicrophoneInfo(mic_array[i]);
+ microphones->push_back(microphoneInfo);
+ }
+ return status;
+}
+
+status_t DeviceHalLocal::dump(int fd) {
+ return mDev->dump(mDev, fd);
+}
+
+void DeviceHalLocal::closeOutputStream(struct audio_stream_out *stream_out) {
+ mDev->close_output_stream(mDev, stream_out);
+}
+
+void DeviceHalLocal::closeInputStream(struct audio_stream_in *stream_in) {
+ mDev->close_input_stream(mDev, stream_in);
+}
+
+} // namespace V4_0
+} // namespace android
diff --git a/media/libaudiohal/4.0/DeviceHalLocal.h b/media/libaudiohal/4.0/DeviceHalLocal.h
new file mode 100644
index 0000000..08341a4
--- /dev/null
+++ b/media/libaudiohal/4.0/DeviceHalLocal.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICE_HAL_LOCAL_4_0_H
+#define ANDROID_HARDWARE_DEVICE_HAL_LOCAL_4_0_H
+
+#include <hardware/audio.h>
+#include <media/audiohal/DeviceHalInterface.h>
+
+namespace android {
+namespace V4_0 {
+
+class DeviceHalLocal : public DeviceHalInterface
+{
+ public:
+ // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
+ virtual status_t getSupportedDevices(uint32_t *devices);
+
+ // Check to see if the audio hardware interface has been initialized.
+ virtual status_t initCheck();
+
+ // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
+ virtual status_t setVoiceVolume(float volume);
+
+ // Set the audio volume for all audio activities other than voice call.
+ virtual status_t setMasterVolume(float volume);
+
+ // Get the current master volume value for the HAL.
+ virtual status_t getMasterVolume(float *volume);
+
+ // Called when the audio mode changes.
+ virtual status_t setMode(audio_mode_t mode);
+
+ // Muting control.
+ virtual status_t setMicMute(bool state);
+ virtual status_t getMicMute(bool *state);
+ virtual status_t setMasterMute(bool state);
+ virtual status_t getMasterMute(bool *state);
+
+ // Set global audio parameters.
+ virtual status_t setParameters(const String8& kvPairs);
+
+ // Get global audio parameters.
+ virtual status_t getParameters(const String8& keys, String8 *values);
+
+ // Returns audio input buffer size according to parameters passed.
+ virtual status_t getInputBufferSize(const struct audio_config *config,
+ size_t *size);
+
+ // Creates and opens the audio hardware output stream. The stream is closed
+ // by releasing all references to the returned object.
+ virtual status_t openOutputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ const char *address,
+ sp<StreamOutHalInterface> *outStream);
+
+ // Creates and opens the audio hardware input stream. The stream is closed
+ // by releasing all references to the returned object.
+ virtual status_t openInputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ audio_input_flags_t flags,
+ const char *address,
+ audio_source_t source,
+ sp<StreamInHalInterface> *inStream);
+
+ // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
+ virtual status_t supportsAudioPatches(bool *supportsPatches);
+
+ // Creates an audio patch between several source and sink ports.
+ virtual status_t createAudioPatch(
+ unsigned int num_sources,
+ const struct audio_port_config *sources,
+ unsigned int num_sinks,
+ const struct audio_port_config *sinks,
+ audio_patch_handle_t *patch);
+
+ // Releases an audio patch.
+ virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
+
+ // Fills the list of supported attributes for a given audio port.
+ virtual status_t getAudioPort(struct audio_port *port);
+
+ // Set audio port configuration.
+ virtual status_t setAudioPortConfig(const struct audio_port_config *config);
+
+ // List microphones
+ virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+ virtual status_t dump(int fd);
+
+ void closeOutputStream(struct audio_stream_out *stream_out);
+ void closeInputStream(struct audio_stream_in *stream_in);
+
+ private:
+ audio_hw_device_t *mDev;
+
+ friend class DevicesFactoryHalLocal;
+
+ // Can not be constructed directly by clients.
+ explicit DeviceHalLocal(audio_hw_device_t *dev);
+
+ // The destructor automatically closes the device.
+ virtual ~DeviceHalLocal();
+
+ uint32_t version() const { return mDev->common.version; }
+};
+
+} // namespace V4_0
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICE_HAL_LOCAL_4_0_H
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalHidl.cpp b/media/libaudiohal/4.0/DevicesFactoryHalHidl.cpp
new file mode 100644
index 0000000..c83194e
--- /dev/null
+++ b/media/libaudiohal/4.0/DevicesFactoryHalHidl.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string.h>
+
+#define LOG_TAG "DevicesFactoryHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hardware/audio/4.0/IDevice.h>
+#include <media/audiohal/hidl/HalDeathHandler.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+#include "DeviceHalHidl.h"
+#include "DevicesFactoryHalHidl.h"
+
+using ::android::hardware::audio::V4_0::IDevice;
+using ::android::hardware::audio::V4_0::Result;
+using ::android::hardware::Return;
+
+namespace android {
+namespace V4_0 {
+
+DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
+ mDevicesFactory = IDevicesFactory::getService();
+ if (mDevicesFactory != 0) {
+ // It is assumed that DevicesFactory is owned by AudioFlinger
+ // and thus have the same lifespan.
+ mDevicesFactory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+ } else {
+ ALOGE("Failed to obtain IDevicesFactory service, terminating process.");
+ exit(1);
+ }
+ // The MSD factory is optional
+ mDevicesFactoryMsd = IDevicesFactory::getService(AUDIO_HAL_SERVICE_NAME_MSD);
+ // TODO: Register death handler, and add 'restart' directive to audioserver.rc
+}
+
+DevicesFactoryHalHidl::~DevicesFactoryHalHidl() {
+}
+
+status_t DevicesFactoryHalHidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
+ if (mDevicesFactory == 0) return NO_INIT;
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mDevicesFactory->openDevice(
+ name,
+ [&](Result r, const sp<IDevice>& result) {
+ retval = r;
+ if (retval == Result::OK) {
+ *device = new DeviceHalHidl(result);
+ }
+ });
+ if (ret.isOk()) {
+ if (retval == Result::OK) return OK;
+ else if (retval == Result::INVALID_ARGUMENTS) return BAD_VALUE;
+ else return NO_INIT;
+ }
+ return FAILED_TRANSACTION;
+}
+
+} // namespace V4_0
+} // namespace android
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalHidl.h b/media/libaudiohal/4.0/DevicesFactoryHalHidl.h
new file mode 100644
index 0000000..114889b
--- /dev/null
+++ b/media/libaudiohal/4.0/DevicesFactoryHalHidl.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_4_0_H
+#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_4_0_H
+
+#include <android/hardware/audio/4.0/IDevicesFactory.h>
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include "DeviceHalHidl.h"
+
+using ::android::hardware::audio::V4_0::IDevicesFactory;
+
+namespace android {
+namespace V4_0 {
+
+class DevicesFactoryHalHidl : public DevicesFactoryHalInterface
+{
+ public:
+ // Opens a device with the specified name. To close the device, it is
+ // necessary to release references to the returned object.
+ virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
+
+ private:
+ friend class DevicesFactoryHalHybrid;
+
+ sp<IDevicesFactory> mDevicesFactory;
+ sp<IDevicesFactory> mDevicesFactoryMsd;
+
+ // Can not be constructed directly by clients.
+ DevicesFactoryHalHidl();
+
+ virtual ~DevicesFactoryHalHidl();
+};
+
+} // namespace V4_0
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/4.0/DevicesFactoryHalHybrid.cpp
new file mode 100644
index 0000000..7ff1ec7d
--- /dev/null
+++ b/media/libaudiohal/4.0/DevicesFactoryHalHybrid.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DevicesFactoryHalHybrid"
+//#define LOG_NDEBUG 0
+
+#include <libaudiohal/4.0/DevicesFactoryHalHybrid.h>
+#include "DevicesFactoryHalLocal.h"
+#include "DevicesFactoryHalHidl.h"
+
+namespace android {
+namespace V4_0 {
+
+DevicesFactoryHalHybrid::DevicesFactoryHalHybrid()
+ : mLocalFactory(new DevicesFactoryHalLocal()),
+ mHidlFactory(new DevicesFactoryHalHidl()) {
+}
+
+DevicesFactoryHalHybrid::~DevicesFactoryHalHybrid() {
+}
+
+status_t DevicesFactoryHalHybrid::openDevice(const char *name, sp<DeviceHalInterface> *device) {
+ if (mHidlFactory != 0 && strcmp(AUDIO_HARDWARE_MODULE_ID_A2DP, name) != 0 &&
+ strcmp(AUDIO_HARDWARE_MODULE_ID_HEARING_AID, name) != 0) {
+ return mHidlFactory->openDevice(name, device);
+ }
+ return mLocalFactory->openDevice(name, device);
+}
+
+} // namespace V4_0
+} // namespace android
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalLocal.cpp b/media/libaudiohal/4.0/DevicesFactoryHalLocal.cpp
new file mode 100644
index 0000000..e54edd4
--- /dev/null
+++ b/media/libaudiohal/4.0/DevicesFactoryHalLocal.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DevicesFactoryHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <string.h>
+
+#include <hardware/audio.h>
+#include <utils/Log.h>
+
+#include "DeviceHalLocal.h"
+#include "DevicesFactoryHalLocal.h"
+
+namespace android {
+namespace V4_0 {
+
+static status_t load_audio_interface(const char *if_name, audio_hw_device_t **dev)
+{
+ const hw_module_t *mod;
+ int rc;
+
+ rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);
+ if (rc) {
+ ALOGE("%s couldn't load audio hw module %s.%s (%s)", __func__,
+ AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
+ goto out;
+ }
+ rc = audio_hw_device_open(mod, dev);
+ if (rc) {
+ ALOGE("%s couldn't open audio hw device in %s.%s (%s)", __func__,
+ AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
+ goto out;
+ }
+ if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {
+ ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
+ rc = BAD_VALUE;
+ audio_hw_device_close(*dev);
+ goto out;
+ }
+ return OK;
+
+out:
+ *dev = NULL;
+ return rc;
+}
+
+status_t DevicesFactoryHalLocal::openDevice(const char *name, sp<DeviceHalInterface> *device) {
+ audio_hw_device_t *dev;
+ status_t rc = load_audio_interface(name, &dev);
+ if (rc == OK) {
+ *device = new DeviceHalLocal(dev);
+ }
+ return rc;
+}
+
+} // namespace V4_0
+} // namespace android
diff --git a/media/libaudiohal/4.0/DevicesFactoryHalLocal.h b/media/libaudiohal/4.0/DevicesFactoryHalLocal.h
new file mode 100644
index 0000000..bc1c521
--- /dev/null
+++ b/media/libaudiohal/4.0/DevicesFactoryHalLocal.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_4_0_H
+#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_4_0_H
+
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include "DeviceHalLocal.h"
+
+namespace android {
+namespace V4_0 {
+
+class DevicesFactoryHalLocal : public DevicesFactoryHalInterface
+{
+ public:
+ // Opens a device with the specified name. To close the device, it is
+ // necessary to release references to the returned object.
+ virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
+
+ private:
+ friend class DevicesFactoryHalHybrid;
+
+ // Can not be constructed directly by clients.
+ DevicesFactoryHalLocal() {}
+
+ virtual ~DevicesFactoryHalLocal() {}
+};
+
+} // namespace V4_0
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_4_0_H
diff --git a/media/libaudiohal/4.0/EffectBufferHalHidl.cpp b/media/libaudiohal/4.0/EffectBufferHalHidl.cpp
new file mode 100644
index 0000000..957c89f
--- /dev/null
+++ b/media/libaudiohal/4.0/EffectBufferHalHidl.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <atomic>
+
+#define LOG_TAG "EffectBufferHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hidl/allocator/1.0/IAllocator.h>
+#include <hidlmemory/mapping.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+#include "EffectBufferHalHidl.h"
+
+using ::android::hardware::Return;
+using ::android::hidl::allocator::V1_0::IAllocator;
+
+namespace android {
+namespace V4_0 {
+
+// static
+uint64_t EffectBufferHalHidl::makeUniqueId() {
+ static std::atomic<uint64_t> counter{1};
+ return counter++;
+}
+
+status_t EffectBufferHalHidl::allocate(
+ size_t size, sp<EffectBufferHalInterface>* buffer) {
+ return mirror(nullptr, size, buffer);
+}
+
+status_t EffectBufferHalHidl::mirror(
+ void* external, size_t size, sp<EffectBufferHalInterface>* buffer) {
+ sp<EffectBufferHalInterface> tempBuffer = new EffectBufferHalHidl(size);
+ status_t result = static_cast<EffectBufferHalHidl*>(tempBuffer.get())->init();
+ if (result == OK) {
+ tempBuffer->setExternalData(external);
+ *buffer = tempBuffer;
+ }
+ return result;
+}
+
+EffectBufferHalHidl::EffectBufferHalHidl(size_t size)
+ : mBufferSize(size), mFrameCountChanged(false),
+ mExternalData(nullptr), mAudioBuffer{0, {nullptr}} {
+ mHidlBuffer.id = makeUniqueId();
+ mHidlBuffer.frameCount = 0;
+}
+
+EffectBufferHalHidl::~EffectBufferHalHidl() {
+}
+
+status_t EffectBufferHalHidl::init() {
+ sp<IAllocator> ashmem = IAllocator::getService("ashmem");
+ if (ashmem == 0) {
+ ALOGE("Failed to retrieve ashmem allocator service");
+ return NO_INIT;
+ }
+ status_t retval = NO_MEMORY;
+ Return<void> result = ashmem->allocate(
+ mBufferSize,
+ [&](bool success, const hidl_memory& memory) {
+ if (success) {
+ mHidlBuffer.data = memory;
+ retval = OK;
+ }
+ });
+ if (result.isOk() && retval == OK) {
+ mMemory = hardware::mapMemory(mHidlBuffer.data);
+ if (mMemory != 0) {
+ mMemory->update();
+ mAudioBuffer.raw = static_cast<void*>(mMemory->getPointer());
+ memset(mAudioBuffer.raw, 0, mMemory->getSize());
+ mMemory->commit();
+ } else {
+ ALOGE("Failed to map allocated ashmem");
+ retval = NO_MEMORY;
+ }
+ } else {
+ ALOGE("Failed to allocate %d bytes from ashmem", (int)mBufferSize);
+ }
+ return result.isOk() ? retval : FAILED_TRANSACTION;
+}
+
+audio_buffer_t* EffectBufferHalHidl::audioBuffer() {
+ return &mAudioBuffer;
+}
+
+void* EffectBufferHalHidl::externalData() const {
+ return mExternalData;
+}
+
+void EffectBufferHalHidl::setFrameCount(size_t frameCount) {
+ mHidlBuffer.frameCount = frameCount;
+ mAudioBuffer.frameCount = frameCount;
+ mFrameCountChanged = true;
+}
+
+bool EffectBufferHalHidl::checkFrameCountChange() {
+ bool result = mFrameCountChanged;
+ mFrameCountChanged = false;
+ return result;
+}
+
+void EffectBufferHalHidl::setExternalData(void* external) {
+ mExternalData = external;
+}
+
+void EffectBufferHalHidl::update() {
+ update(mBufferSize);
+}
+
+void EffectBufferHalHidl::commit() {
+ commit(mBufferSize);
+}
+
+void EffectBufferHalHidl::update(size_t size) {
+ if (mExternalData == nullptr) return;
+ mMemory->update();
+ if (size > mBufferSize) size = mBufferSize;
+ memcpy(mAudioBuffer.raw, mExternalData, size);
+ mMemory->commit();
+}
+
+void EffectBufferHalHidl::commit(size_t size) {
+ if (mExternalData == nullptr) return;
+ if (size > mBufferSize) size = mBufferSize;
+ memcpy(mExternalData, mAudioBuffer.raw, size);
+}
+
+} // namespace V4_0
+} // namespace android
diff --git a/media/libaudiohal/4.0/EffectBufferHalHidl.h b/media/libaudiohal/4.0/EffectBufferHalHidl.h
new file mode 100644
index 0000000..6d578c6
--- /dev/null
+++ b/media/libaudiohal/4.0/EffectBufferHalHidl.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_4_0_H
+#define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_4_0_H
+
+#include <android/hardware/audio/effect/4.0/types.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidl/HidlSupport.h>
+#include <media/audiohal/EffectBufferHalInterface.h>
+#include <system/audio_effect.h>
+
+using android::hardware::audio::effect::V4_0::AudioBuffer;
+using android::hardware::hidl_memory;
+using android::hidl::memory::V1_0::IMemory;
+
+namespace android {
+namespace V4_0 {
+
+class EffectBufferHalHidl : public EffectBufferHalInterface
+{
+ public:
+ static status_t allocate(size_t size, sp<EffectBufferHalInterface>* buffer);
+ static status_t mirror(void* external, size_t size, sp<EffectBufferHalInterface>* buffer);
+
+ virtual audio_buffer_t* audioBuffer();
+ virtual void* externalData() const;
+
+ virtual size_t getSize() const override { return mBufferSize; }
+
+ virtual void setExternalData(void* external);
+ virtual void setFrameCount(size_t frameCount);
+ virtual bool checkFrameCountChange();
+
+ virtual void update();
+ virtual void commit();
+ virtual void update(size_t size);
+ virtual void commit(size_t size);
+
+ const AudioBuffer& hidlBuffer() const { return mHidlBuffer; }
+
+ private:
+ friend class EffectBufferHalInterface;
+
+ static uint64_t makeUniqueId();
+
+ const size_t mBufferSize;
+ bool mFrameCountChanged;
+ void* mExternalData;
+ AudioBuffer mHidlBuffer;
+ sp<IMemory> mMemory;
+ audio_buffer_t mAudioBuffer;
+
+ // Can not be constructed directly by clients.
+ explicit EffectBufferHalHidl(size_t size);
+
+ virtual ~EffectBufferHalHidl();
+
+ status_t init();
+};
+
+} // namespace V4_0
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/EffectHalHidl.cpp b/media/libaudiohal/4.0/EffectHalHidl.cpp
new file mode 100644
index 0000000..c99c4c8
--- /dev/null
+++ b/media/libaudiohal/4.0/EffectHalHidl.cpp
@@ -0,0 +1,342 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <common/all-versions/VersionUtils.h>
+#include <hwbinder/IPCThreadState.h>
+#include <media/EffectsFactoryApi.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+#include "EffectBufferHalHidl.h"
+#include "EffectHalHidl.h"
+#include "HidlUtils.h"
+
+using ::android::hardware::audio::effect::V4_0::AudioBuffer;
+using ::android::hardware::audio::effect::V4_0::EffectBufferAccess;
+using ::android::hardware::audio::effect::V4_0::EffectConfigParameters;
+using ::android::hardware::audio::effect::V4_0::MessageQueueFlagBits;
+using ::android::hardware::audio::effect::V4_0::Result;
+using ::android::hardware::audio::common::V4_0::HidlUtils;
+using ::android::hardware::audio::common::V4_0::AudioChannelMask;
+using ::android::hardware::audio::common::V4_0::AudioFormat;
+using ::android::hardware::audio::common::utils::mkEnumConverter;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::MQDescriptorSync;
+using ::android::hardware::Return;
+
+namespace android {
+namespace V4_0 {
+
+EffectHalHidl::EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId)
+ : mEffect(effect), mEffectId(effectId), mBuffersChanged(true), mEfGroup(nullptr) {
+}
+
+EffectHalHidl::~EffectHalHidl() {
+ if (mEffect != 0) {
+ close();
+ mEffect.clear();
+ hardware::IPCThreadState::self()->flushCommands();
+ }
+ if (mEfGroup) {
+ EventFlag::deleteEventFlag(&mEfGroup);
+ }
+}
+
+// static
+void EffectHalHidl::effectDescriptorToHal(
+ const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor) {
+ HidlUtils::uuidToHal(descriptor.type, &halDescriptor->type);
+ HidlUtils::uuidToHal(descriptor.uuid, &halDescriptor->uuid);
+ halDescriptor->flags = static_cast<uint32_t>(descriptor.flags);
+ halDescriptor->cpuLoad = descriptor.cpuLoad;
+ halDescriptor->memoryUsage = descriptor.memoryUsage;
+ memcpy(halDescriptor->name, descriptor.name.data(), descriptor.name.size());
+ memcpy(halDescriptor->implementor,
+ descriptor.implementor.data(), descriptor.implementor.size());
+}
+
+// TODO(mnaganov): These buffer conversion functions should be shared with Effect wrapper
+// via HidlUtils. Move them there when hardware/interfaces will get un-frozen again.
+
+// static
+void EffectHalHidl::effectBufferConfigFromHal(
+ const buffer_config_t& halConfig, EffectBufferConfig* config) {
+ config->samplingRateHz = halConfig.samplingRate;
+ config->channels = mkEnumConverter<AudioChannelMask>(halConfig.channels);
+ config->format = AudioFormat(halConfig.format);
+ config->accessMode = EffectBufferAccess(halConfig.accessMode);
+ config->mask = mkEnumConverter<EffectConfigParameters>(halConfig.mask);
+}
+
+// static
+void EffectHalHidl::effectBufferConfigToHal(
+ const EffectBufferConfig& config, buffer_config_t* halConfig) {
+ halConfig->buffer.frameCount = 0;
+ halConfig->buffer.raw = NULL;
+ halConfig->samplingRate = config.samplingRateHz;
+ halConfig->channels = static_cast<uint32_t>(config.channels);
+ halConfig->bufferProvider.cookie = NULL;
+ halConfig->bufferProvider.getBuffer = NULL;
+ halConfig->bufferProvider.releaseBuffer = NULL;
+ halConfig->format = static_cast<uint8_t>(config.format);
+ halConfig->accessMode = static_cast<uint8_t>(config.accessMode);
+ halConfig->mask = static_cast<uint8_t>(config.mask);
+}
+
+// static
+void EffectHalHidl::effectConfigFromHal(const effect_config_t& halConfig, EffectConfig* config) {
+ effectBufferConfigFromHal(halConfig.inputCfg, &config->inputCfg);
+ effectBufferConfigFromHal(halConfig.outputCfg, &config->outputCfg);
+}
+
+// static
+void EffectHalHidl::effectConfigToHal(const EffectConfig& config, effect_config_t* halConfig) {
+ effectBufferConfigToHal(config.inputCfg, &halConfig->inputCfg);
+ effectBufferConfigToHal(config.outputCfg, &halConfig->outputCfg);
+}
+
+// static
+status_t EffectHalHidl::analyzeResult(const Result& result) {
+ switch (result) {
+ case Result::OK: return OK;
+ case Result::INVALID_ARGUMENTS: return BAD_VALUE;
+ case Result::INVALID_STATE: return NOT_ENOUGH_DATA;
+ case Result::NOT_INITIALIZED: return NO_INIT;
+ case Result::NOT_SUPPORTED: return INVALID_OPERATION;
+ case Result::RESULT_TOO_BIG: return NO_MEMORY;
+ default: return NO_INIT;
+ }
+}
+
+status_t EffectHalHidl::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
+ if (!mBuffersChanged) {
+ if (buffer.get() == nullptr || mInBuffer.get() == nullptr) {
+ mBuffersChanged = buffer.get() != mInBuffer.get();
+ } else {
+ mBuffersChanged = buffer->audioBuffer() != mInBuffer->audioBuffer();
+ }
+ }
+ mInBuffer = buffer;
+ return OK;
+}
+
+status_t EffectHalHidl::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
+ if (!mBuffersChanged) {
+ if (buffer.get() == nullptr || mOutBuffer.get() == nullptr) {
+ mBuffersChanged = buffer.get() != mOutBuffer.get();
+ } else {
+ mBuffersChanged = buffer->audioBuffer() != mOutBuffer->audioBuffer();
+ }
+ }
+ mOutBuffer = buffer;
+ return OK;
+}
+
+status_t EffectHalHidl::process() {
+ return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS));
+}
+
+status_t EffectHalHidl::processReverse() {
+ return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS_REVERSE));
+}
+
+status_t EffectHalHidl::prepareForProcessing() {
+ std::unique_ptr<StatusMQ> tempStatusMQ;
+ Result retval;
+ Return<void> ret = mEffect->prepareForProcessing(
+ [&](Result r, const MQDescriptorSync<Result>& statusMQ) {
+ retval = r;
+ if (retval == Result::OK) {
+ tempStatusMQ.reset(new StatusMQ(statusMQ));
+ if (tempStatusMQ->isValid() && tempStatusMQ->getEventFlagWord()) {
+ EventFlag::createEventFlag(tempStatusMQ->getEventFlagWord(), &mEfGroup);
+ }
+ }
+ });
+ if (!ret.isOk() || retval != Result::OK) {
+ return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
+ }
+ if (!tempStatusMQ || !tempStatusMQ->isValid() || !mEfGroup) {
+ ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for effects");
+ ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
+ "Status message queue for effects is invalid");
+ ALOGE_IF(!mEfGroup, "Event flag creation for effects failed");
+ return NO_INIT;
+ }
+ mStatusMQ = std::move(tempStatusMQ);
+ return OK;
+}
+
+bool EffectHalHidl::needToResetBuffers() {
+ if (mBuffersChanged) return true;
+ bool inBufferFrameCountUpdated = mInBuffer->checkFrameCountChange();
+ bool outBufferFrameCountUpdated = mOutBuffer->checkFrameCountChange();
+ return inBufferFrameCountUpdated || outBufferFrameCountUpdated;
+}
+
+status_t EffectHalHidl::processImpl(uint32_t mqFlag) {
+ if (mEffect == 0 || mInBuffer == 0 || mOutBuffer == 0) return NO_INIT;
+ status_t status;
+ if (!mStatusMQ && (status = prepareForProcessing()) != OK) {
+ return status;
+ }
+ if (needToResetBuffers() && (status = setProcessBuffers()) != OK) {
+ return status;
+ }
+ // The data is already in the buffers, just need to flush it and wake up the server side.
+ std::atomic_thread_fence(std::memory_order_release);
+ mEfGroup->wake(mqFlag);
+ uint32_t efState = 0;
+retry:
+ status_t ret = mEfGroup->wait(
+ static_cast<uint32_t>(MessageQueueFlagBits::DONE_PROCESSING), &efState);
+ if (efState & static_cast<uint32_t>(MessageQueueFlagBits::DONE_PROCESSING)) {
+ Result retval = Result::NOT_INITIALIZED;
+ mStatusMQ->read(&retval);
+ if (retval == Result::OK || retval == Result::INVALID_STATE) {
+ // Sync back the changed contents of the buffer.
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ return analyzeResult(retval);
+ }
+ if (ret == -EAGAIN || ret == -EINTR) {
+ // Spurious wakeup. This normally retries no more than once.
+ goto retry;
+ }
+ return ret;
+}
+
+status_t EffectHalHidl::setProcessBuffers() {
+ Return<Result> ret = mEffect->setProcessBuffers(
+ static_cast<EffectBufferHalHidl*>(mInBuffer.get())->hidlBuffer(),
+ static_cast<EffectBufferHalHidl*>(mOutBuffer.get())->hidlBuffer());
+ if (ret.isOk() && ret == Result::OK) {
+ mBuffersChanged = false;
+ return OK;
+ }
+ return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
+}
+
+status_t EffectHalHidl::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+ uint32_t *replySize, void *pReplyData) {
+ if (mEffect == 0) return NO_INIT;
+
+ // Special cases.
+ if (cmdCode == EFFECT_CMD_SET_CONFIG || cmdCode == EFFECT_CMD_SET_CONFIG_REVERSE) {
+ return setConfigImpl(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+ } else if (cmdCode == EFFECT_CMD_GET_CONFIG || cmdCode == EFFECT_CMD_GET_CONFIG_REVERSE) {
+ return getConfigImpl(cmdCode, replySize, pReplyData);
+ }
+
+ // Common case.
+ hidl_vec<uint8_t> hidlData;
+ if (pCmdData != nullptr && cmdSize > 0) {
+ hidlData.setToExternal(reinterpret_cast<uint8_t*>(pCmdData), cmdSize);
+ }
+ status_t status;
+ uint32_t replySizeStub = 0;
+ if (replySize == nullptr || pReplyData == nullptr) replySize = &replySizeStub;
+ Return<void> ret = mEffect->command(cmdCode, hidlData, *replySize,
+ [&](int32_t s, const hidl_vec<uint8_t>& result) {
+ status = s;
+ if (status == 0) {
+ if (*replySize > result.size()) *replySize = result.size();
+ if (pReplyData != nullptr && *replySize > 0) {
+ memcpy(pReplyData, &result[0], *replySize);
+ }
+ }
+ });
+ return ret.isOk() ? status : FAILED_TRANSACTION;
+}
+
+status_t EffectHalHidl::getDescriptor(effect_descriptor_t *pDescriptor) {
+ if (mEffect == 0) return NO_INIT;
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mEffect->getDescriptor(
+ [&](Result r, const EffectDescriptor& result) {
+ retval = r;
+ if (retval == Result::OK) {
+ effectDescriptorToHal(result, pDescriptor);
+ }
+ });
+ return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
+}
+
+status_t EffectHalHidl::close() {
+ if (mEffect == 0) return NO_INIT;
+ Return<Result> ret = mEffect->close();
+ return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
+}
+
+status_t EffectHalHidl::getConfigImpl(
+ uint32_t cmdCode, uint32_t *replySize, void *pReplyData) {
+ if (replySize == NULL || *replySize != sizeof(effect_config_t) || pReplyData == NULL) {
+ return BAD_VALUE;
+ }
+ status_t result = FAILED_TRANSACTION;
+ Return<void> ret;
+ if (cmdCode == EFFECT_CMD_GET_CONFIG) {
+ ret = mEffect->getConfig([&] (Result r, const EffectConfig &hidlConfig) {
+ result = analyzeResult(r);
+ if (r == Result::OK) {
+ effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
+ }
+ });
+ } else {
+ ret = mEffect->getConfigReverse([&] (Result r, const EffectConfig &hidlConfig) {
+ result = analyzeResult(r);
+ if (r == Result::OK) {
+ effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
+ }
+ });
+ }
+ if (!ret.isOk()) {
+ result = FAILED_TRANSACTION;
+ }
+ return result;
+}
+
+status_t EffectHalHidl::setConfigImpl(
+ uint32_t cmdCode, uint32_t cmdSize, void *pCmdData, uint32_t *replySize, void *pReplyData) {
+ if (pCmdData == NULL || cmdSize != sizeof(effect_config_t) ||
+ replySize == NULL || *replySize != sizeof(int32_t) || pReplyData == NULL) {
+ return BAD_VALUE;
+ }
+ const effect_config_t *halConfig = static_cast<effect_config_t*>(pCmdData);
+ if (halConfig->inputCfg.bufferProvider.getBuffer != NULL ||
+ halConfig->inputCfg.bufferProvider.releaseBuffer != NULL ||
+ halConfig->outputCfg.bufferProvider.getBuffer != NULL ||
+ halConfig->outputCfg.bufferProvider.releaseBuffer != NULL) {
+ ALOGE("Buffer provider callbacks are not supported");
+ }
+ EffectConfig hidlConfig;
+ effectConfigFromHal(*halConfig, &hidlConfig);
+ Return<Result> ret = cmdCode == EFFECT_CMD_SET_CONFIG ?
+ mEffect->setConfig(hidlConfig, nullptr, nullptr) :
+ mEffect->setConfigReverse(hidlConfig, nullptr, nullptr);
+ status_t result = FAILED_TRANSACTION;
+ if (ret.isOk()) {
+ result = analyzeResult(ret);
+ *static_cast<int32_t*>(pReplyData) = result;
+ }
+ return result;
+}
+
+} // namespace V4_0
+} // namespace android
diff --git a/media/libaudiohal/4.0/EffectHalHidl.h b/media/libaudiohal/4.0/EffectHalHidl.h
new file mode 100644
index 0000000..5a4dab1
--- /dev/null
+++ b/media/libaudiohal/4.0/EffectHalHidl.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_HAL_HIDL_4_0_H
+#define ANDROID_HARDWARE_EFFECT_HAL_HIDL_4_0_H
+
+#include <android/hardware/audio/effect/4.0/IEffect.h>
+#include <media/audiohal/EffectHalInterface.h>
+#include <fmq/EventFlag.h>
+#include <fmq/MessageQueue.h>
+#include <system/audio_effect.h>
+
+using ::android::hardware::audio::effect::V4_0::EffectBufferConfig;
+using ::android::hardware::audio::effect::V4_0::EffectConfig;
+using ::android::hardware::audio::effect::V4_0::EffectDescriptor;
+using ::android::hardware::audio::effect::V4_0::IEffect;
+using ::android::hardware::EventFlag;
+using ::android::hardware::MessageQueue;
+
+namespace android {
+namespace V4_0 {
+
+class EffectHalHidl : public EffectHalInterface
+{
+ public:
+ // Set the input buffer.
+ virtual status_t setInBuffer(const sp<EffectBufferHalInterface>& buffer);
+
+ // Set the output buffer.
+ virtual status_t setOutBuffer(const sp<EffectBufferHalInterface>& buffer);
+
+ // Effect process function.
+ virtual status_t process();
+
+ // Process reverse stream function. This function is used to pass
+ // a reference stream to the effect engine.
+ virtual status_t processReverse();
+
+ // Send a command and receive a response to/from effect engine.
+ virtual status_t command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+ uint32_t *replySize, void *pReplyData);
+
+ // Returns the effect descriptor.
+ virtual status_t getDescriptor(effect_descriptor_t *pDescriptor);
+
+ // Free resources on the remote side.
+ virtual status_t close();
+
+ // Whether it's a local implementation.
+ virtual bool isLocal() const { return false; }
+
+ uint64_t effectId() const { return mEffectId; }
+
+ static void effectDescriptorToHal(
+ const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor);
+
+ private:
+ friend class EffectsFactoryHalHidl;
+ typedef MessageQueue<
+ hardware::audio::effect::V4_0::Result, hardware::kSynchronizedReadWrite> StatusMQ;
+
+ sp<IEffect> mEffect;
+ const uint64_t mEffectId;
+ sp<EffectBufferHalInterface> mInBuffer;
+ sp<EffectBufferHalInterface> mOutBuffer;
+ bool mBuffersChanged;
+ std::unique_ptr<StatusMQ> mStatusMQ;
+ EventFlag* mEfGroup;
+
+ static status_t analyzeResult(const hardware::audio::effect::V4_0::Result& result);
+ static void effectBufferConfigFromHal(
+ const buffer_config_t& halConfig, EffectBufferConfig* config);
+ static void effectBufferConfigToHal(
+ const EffectBufferConfig& config, buffer_config_t* halConfig);
+ static void effectConfigFromHal(const effect_config_t& halConfig, EffectConfig* config);
+ static void effectConfigToHal(const EffectConfig& config, effect_config_t* halConfig);
+
+ // Can not be constructed directly by clients.
+ EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId);
+
+ // The destructor automatically releases the effect.
+ virtual ~EffectHalHidl();
+
+ status_t getConfigImpl(uint32_t cmdCode, uint32_t *replySize, void *pReplyData);
+ status_t prepareForProcessing();
+ bool needToResetBuffers();
+ status_t processImpl(uint32_t mqFlag);
+ status_t setConfigImpl(
+ uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+ uint32_t *replySize, void *pReplyData);
+ status_t setProcessBuffers();
+};
+
+} // namespace V4_0
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/EffectsFactoryHalHidl.cpp b/media/libaudiohal/4.0/EffectsFactoryHalHidl.cpp
new file mode 100644
index 0000000..dfed784
--- /dev/null
+++ b/media/libaudiohal/4.0/EffectsFactoryHalHidl.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectsFactoryHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <cutils/native_handle.h>
+#include <libaudiohal/4.0/EffectsFactoryHalHidl.h>
+
+#include "ConversionHelperHidl.h"
+#include "EffectBufferHalHidl.h"
+#include "EffectHalHidl.h"
+#include "HidlUtils.h"
+
+using ::android::hardware::audio::common::V4_0::HidlUtils;
+using ::android::hardware::audio::common::V4_0::Uuid;
+using ::android::hardware::audio::effect::V4_0::IEffect;
+using ::android::hardware::audio::effect::V4_0::Result;
+using ::android::hardware::Return;
+
+namespace android {
+namespace V4_0 {
+
+EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
+ mEffectsFactory = IEffectsFactory::getService();
+ if (mEffectsFactory == 0) {
+ ALOGE("Failed to obtain IEffectsFactory service, terminating process.");
+ exit(1);
+ }
+}
+
+EffectsFactoryHalHidl::~EffectsFactoryHalHidl() {
+}
+
+status_t EffectsFactoryHalHidl::queryAllDescriptors() {
+ if (mEffectsFactory == 0) return NO_INIT;
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mEffectsFactory->getAllDescriptors(
+ [&](Result r, const hidl_vec<EffectDescriptor>& result) {
+ retval = r;
+ if (retval == Result::OK) {
+ mLastDescriptors = result;
+ }
+ });
+ if (ret.isOk()) {
+ return retval == Result::OK ? OK : NO_INIT;
+ }
+ mLastDescriptors.resize(0);
+ return processReturn(__FUNCTION__, ret);
+}
+
+status_t EffectsFactoryHalHidl::queryNumberEffects(uint32_t *pNumEffects) {
+ status_t queryResult = queryAllDescriptors();
+ if (queryResult == OK) {
+ *pNumEffects = mLastDescriptors.size();
+ }
+ return queryResult;
+}
+
+status_t EffectsFactoryHalHidl::getDescriptor(
+ uint32_t index, effect_descriptor_t *pDescriptor) {
+ // TODO: We need somehow to track the changes on the server side
+ // or figure out how to convert everybody to query all the descriptors at once.
+ // TODO: check for nullptr
+ if (mLastDescriptors.size() == 0) {
+ status_t queryResult = queryAllDescriptors();
+ if (queryResult != OK) return queryResult;
+ }
+ if (index >= mLastDescriptors.size()) return NAME_NOT_FOUND;
+ EffectHalHidl::effectDescriptorToHal(mLastDescriptors[index], pDescriptor);
+ return OK;
+}
+
+status_t EffectsFactoryHalHidl::getDescriptor(
+ const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
+ // TODO: check for nullptr
+ if (mEffectsFactory == 0) return NO_INIT;
+ Uuid hidlUuid;
+ HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mEffectsFactory->getDescriptor(hidlUuid,
+ [&](Result r, const EffectDescriptor& result) {
+ retval = r;
+ if (retval == Result::OK) {
+ EffectHalHidl::effectDescriptorToHal(result, pDescriptor);
+ }
+ });
+ if (ret.isOk()) {
+ if (retval == Result::OK) return OK;
+ else if (retval == Result::INVALID_ARGUMENTS) return NAME_NOT_FOUND;
+ else return NO_INIT;
+ }
+ return processReturn(__FUNCTION__, ret);
+}
+
+status_t EffectsFactoryHalHidl::createEffect(
+ const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
+ sp<EffectHalInterface> *effect) {
+ if (mEffectsFactory == 0) return NO_INIT;
+ Uuid hidlUuid;
+ HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mEffectsFactory->createEffect(
+ hidlUuid, sessionId, ioId,
+ [&](Result r, const sp<IEffect>& result, uint64_t effectId) {
+ retval = r;
+ if (retval == Result::OK) {
+ *effect = new EffectHalHidl(result, effectId);
+ }
+ });
+ if (ret.isOk()) {
+ if (retval == Result::OK) return OK;
+ else if (retval == Result::INVALID_ARGUMENTS) return NAME_NOT_FOUND;
+ else return NO_INIT;
+ }
+ return processReturn(__FUNCTION__, ret);
+}
+
+status_t EffectsFactoryHalHidl::dumpEffects(int fd) {
+ if (mEffectsFactory == 0) return NO_INIT;
+ native_handle_t* hidlHandle = native_handle_create(1, 0);
+ hidlHandle->data[0] = fd;
+ Return<void> ret = mEffectsFactory->debug(hidlHandle, {} /* options */);
+ native_handle_delete(hidlHandle);
+ return processReturn(__FUNCTION__, ret);
+}
+
+status_t EffectsFactoryHalHidl::allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) {
+ return EffectBufferHalHidl::allocate(size, buffer);
+}
+
+status_t EffectsFactoryHalHidl::mirrorBuffer(void* external, size_t size,
+ sp<EffectBufferHalInterface>* buffer) {
+ return EffectBufferHalHidl::mirror(external, size, buffer);
+}
+
+
+} // namespace V4_0
+} // namespace android
diff --git a/media/libaudiohal/4.0/StreamHalHidl.cpp b/media/libaudiohal/4.0/StreamHalHidl.cpp
new file mode 100644
index 0000000..1c2fdb0
--- /dev/null
+++ b/media/libaudiohal/4.0/StreamHalHidl.cpp
@@ -0,0 +1,817 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "StreamHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hardware/audio/4.0/IStreamOutCallback.h>
+#include <hwbinder/IPCThreadState.h>
+#include <mediautils/SchedulingPolicyService.h>
+#include <utils/Log.h>
+
+#include "DeviceHalHidl.h"
+#include "EffectHalHidl.h"
+#include "StreamHalHidl.h"
+#include "VersionUtils.h"
+
+using ::android::hardware::audio::common::V4_0::AudioChannelMask;
+using ::android::hardware::audio::common::V4_0::AudioContentType;
+using ::android::hardware::audio::common::V4_0::AudioFormat;
+using ::android::hardware::audio::common::V4_0::AudioSource;
+using ::android::hardware::audio::common::V4_0::AudioUsage;
+using ::android::hardware::audio::common::V4_0::ThreadInfo;
+using ::android::hardware::audio::V4_0::AudioDrain;
+using ::android::hardware::audio::V4_0::IStreamOutCallback;
+using ::android::hardware::audio::V4_0::MessageQueueFlagBits;
+using ::android::hardware::audio::V4_0::MicrophoneInfo;
+using ::android::hardware::audio::V4_0::MmapBufferInfo;
+using ::android::hardware::audio::V4_0::MmapPosition;
+using ::android::hardware::audio::V4_0::ParameterValue;
+using ::android::hardware::audio::V4_0::PlaybackTrackMetadata;
+using ::android::hardware::audio::V4_0::RecordTrackMetadata;
+using ::android::hardware::audio::V4_0::Result;
+using ::android::hardware::audio::V4_0::TimeSpec;
+using ::android::hardware::MQDescriptorSync;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ReadCommand = ::android::hardware::audio::V4_0::IStreamIn::ReadCommand;
+
+namespace android {
+namespace V4_0 {
+
+StreamHalHidl::StreamHalHidl(IStream *stream)
+ : ConversionHelperHidl("Stream"),
+ mStream(stream),
+ mHalThreadPriority(HAL_THREAD_PRIORITY_DEFAULT),
+ mCachedBufferSize(0){
+
+ // Instrument audio signal power logging.
+ // Note: This assumes channel mask, format, and sample rate do not change after creation.
+ if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
+ // Obtain audio properties (see StreamHalHidl::getAudioProperties() below).
+ Return<void> ret = mStream->getAudioProperties(
+ [&](auto sr, auto m, auto f) {
+ mStreamPowerLog.init(sr,
+ static_cast<audio_channel_mask_t>(m),
+ static_cast<audio_format_t>(f));
+ });
+ }
+}
+
+StreamHalHidl::~StreamHalHidl() {
+ mStream = nullptr;
+}
+
+status_t StreamHalHidl::getSampleRate(uint32_t *rate) {
+ if (!mStream) return NO_INIT;
+ return processReturn("getSampleRate", mStream->getSampleRate(), rate);
+}
+
+status_t StreamHalHidl::getBufferSize(size_t *size) {
+ if (!mStream) return NO_INIT;
+ status_t status = processReturn("getBufferSize", mStream->getBufferSize(), size);
+ if (status == OK) {
+ mCachedBufferSize = *size;
+ }
+ return status;
+}
+
+status_t StreamHalHidl::getChannelMask(audio_channel_mask_t *mask) {
+ if (!mStream) return NO_INIT;
+ return processReturn("getChannelMask", mStream->getChannelMask(), mask);
+}
+
+status_t StreamHalHidl::getFormat(audio_format_t *format) {
+ if (!mStream) return NO_INIT;
+ return processReturn("getFormat", mStream->getFormat(), format);
+}
+
+status_t StreamHalHidl::getAudioProperties(
+ uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
+ if (!mStream) return NO_INIT;
+ Return<void> ret = mStream->getAudioProperties(
+ [&](uint32_t sr, auto m, auto f) {
+ *sampleRate = sr;
+ *mask = static_cast<audio_channel_mask_t>(m);
+ *format = static_cast<audio_format_t>(f);
+ });
+ return processReturn("getAudioProperties", ret);
+}
+
+status_t StreamHalHidl::setParameters(const String8& kvPairs) {
+ if (!mStream) return NO_INIT;
+ hidl_vec<ParameterValue> hidlParams;
+ status_t status = parametersFromHal(kvPairs, &hidlParams);
+ if (status != OK) return status;
+ return processReturn("setParameters",
+ utils::setParameters(mStream, hidlParams, {} /* options */));
+}
+
+status_t StreamHalHidl::getParameters(const String8& keys, String8 *values) {
+ values->clear();
+ if (!mStream) return NO_INIT;
+ hidl_vec<hidl_string> hidlKeys;
+ status_t status = keysFromHal(keys, &hidlKeys);
+ if (status != OK) return status;
+ Result retval;
+ Return<void> ret = utils::getParameters(
+ mStream,
+ {} /* context */,
+ hidlKeys,
+ [&](Result r, const hidl_vec<ParameterValue>& parameters) {
+ retval = r;
+ if (retval == Result::OK) {
+ parametersToHal(parameters, values);
+ }
+ });
+ return processReturn("getParameters", ret, retval);
+}
+
+status_t StreamHalHidl::addEffect(sp<EffectHalInterface> effect) {
+ if (!mStream) return NO_INIT;
+ return processReturn("addEffect", mStream->addEffect(
+ static_cast<EffectHalHidl*>(effect.get())->effectId()));
+}
+
+status_t StreamHalHidl::removeEffect(sp<EffectHalInterface> effect) {
+ if (!mStream) return NO_INIT;
+ return processReturn("removeEffect", mStream->removeEffect(
+ static_cast<EffectHalHidl*>(effect.get())->effectId()));
+}
+
+status_t StreamHalHidl::standby() {
+ if (!mStream) return NO_INIT;
+ return processReturn("standby", mStream->standby());
+}
+
+status_t StreamHalHidl::dump(int fd) {
+ if (!mStream) return NO_INIT;
+ native_handle_t* hidlHandle = native_handle_create(1, 0);
+ hidlHandle->data[0] = fd;
+ Return<void> ret = mStream->debug(hidlHandle, {} /* options */);
+ native_handle_delete(hidlHandle);
+ mStreamPowerLog.dump(fd);
+ return processReturn("dump", ret);
+}
+
+status_t StreamHalHidl::start() {
+ if (!mStream) return NO_INIT;
+ return processReturn("start", mStream->start());
+}
+
+status_t StreamHalHidl::stop() {
+ if (!mStream) return NO_INIT;
+ return processReturn("stop", mStream->stop());
+}
+
+status_t StreamHalHidl::createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info) {
+ Result retval;
+ Return<void> ret = mStream->createMmapBuffer(
+ minSizeFrames,
+ [&](Result r, const MmapBufferInfo& hidlInfo) {
+ retval = r;
+ if (retval == Result::OK) {
+ const native_handle *handle = hidlInfo.sharedMemory.handle();
+ if (handle->numFds > 0) {
+ info->shared_memory_fd = handle->data[0];
+ info->buffer_size_frames = hidlInfo.bufferSizeFrames;
+ info->burst_size_frames = hidlInfo.burstSizeFrames;
+ // info->shared_memory_address is not needed in HIDL context
+ info->shared_memory_address = NULL;
+ } else {
+ retval = Result::NOT_INITIALIZED;
+ }
+ }
+ });
+ return processReturn("createMmapBuffer", ret, retval);
+}
+
+status_t StreamHalHidl::getMmapPosition(struct audio_mmap_position *position) {
+ Result retval;
+ Return<void> ret = mStream->getMmapPosition(
+ [&](Result r, const MmapPosition& hidlPosition) {
+ retval = r;
+ if (retval == Result::OK) {
+ position->time_nanoseconds = hidlPosition.timeNanoseconds;
+ position->position_frames = hidlPosition.positionFrames;
+ }
+ });
+ return processReturn("getMmapPosition", ret, retval);
+}
+
+status_t StreamHalHidl::setHalThreadPriority(int priority) {
+ mHalThreadPriority = priority;
+ return OK;
+}
+
+status_t StreamHalHidl::getCachedBufferSize(size_t *size) {
+ if (mCachedBufferSize != 0) {
+ *size = mCachedBufferSize;
+ return OK;
+ }
+ return getBufferSize(size);
+}
+
+bool StreamHalHidl::requestHalThreadPriority(pid_t threadPid, pid_t threadId) {
+ if (mHalThreadPriority == HAL_THREAD_PRIORITY_DEFAULT) {
+ return true;
+ }
+ int err = requestPriority(
+ threadPid, threadId,
+ mHalThreadPriority, false /*isForApp*/, true /*asynchronous*/);
+ ALOGE_IF(err, "failed to set priority %d for pid %d tid %d; error %d",
+ mHalThreadPriority, threadPid, threadId, err);
+ // Audio will still work, but latency will be higher and sometimes unacceptable.
+ return err == 0;
+}
+
+namespace {
+
+/* Notes on callback ownership.
+
+This is how (Hw)Binder ownership model looks like. The server implementation
+is owned by Binder framework (via sp<>). Proxies are owned by clients.
+When the last proxy disappears, Binder framework releases the server impl.
+
+Thus, it is not needed to keep any references to StreamOutCallback (this is
+the server impl) -- it will live as long as HAL server holds a strong ref to
+IStreamOutCallback proxy. We clear that reference by calling 'clearCallback'
+from the destructor of StreamOutHalHidl.
+
+The callback only keeps a weak reference to the stream. The stream is owned
+by AudioFlinger.
+
+*/
+
+struct StreamOutCallback : public IStreamOutCallback {
+ StreamOutCallback(const wp<StreamOutHalHidl>& stream) : mStream(stream) {}
+
+ // IStreamOutCallback implementation
+ Return<void> onWriteReady() override {
+ sp<StreamOutHalHidl> stream = mStream.promote();
+ if (stream != 0) {
+ stream->onWriteReady();
+ }
+ return Void();
+ }
+
+ Return<void> onDrainReady() override {
+ sp<StreamOutHalHidl> stream = mStream.promote();
+ if (stream != 0) {
+ stream->onDrainReady();
+ }
+ return Void();
+ }
+
+ Return<void> onError() override {
+ sp<StreamOutHalHidl> stream = mStream.promote();
+ if (stream != 0) {
+ stream->onError();
+ }
+ return Void();
+ }
+
+ private:
+ wp<StreamOutHalHidl> mStream;
+};
+
+} // namespace
+
+StreamOutHalHidl::StreamOutHalHidl(const sp<IStreamOut>& stream)
+ : StreamHalHidl(stream.get()), mStream(stream), mWriterClient(0), mEfGroup(nullptr) {
+}
+
+StreamOutHalHidl::~StreamOutHalHidl() {
+ if (mStream != 0) {
+ if (mCallback.unsafe_get()) {
+ processReturn("clearCallback", mStream->clearCallback());
+ }
+ processReturn("close", mStream->close());
+ mStream.clear();
+ }
+ mCallback.clear();
+ hardware::IPCThreadState::self()->flushCommands();
+ if (mEfGroup) {
+ EventFlag::deleteEventFlag(&mEfGroup);
+ }
+}
+
+status_t StreamOutHalHidl::getFrameSize(size_t *size) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("getFrameSize", mStream->getFrameSize(), size);
+}
+
+status_t StreamOutHalHidl::getLatency(uint32_t *latency) {
+ if (mStream == 0) return NO_INIT;
+ if (mWriterClient == gettid() && mCommandMQ) {
+ return callWriterThread(
+ WriteCommand::GET_LATENCY, "getLatency", nullptr, 0,
+ [&](const WriteStatus& writeStatus) {
+ *latency = writeStatus.reply.latencyMs;
+ });
+ } else {
+ return processReturn("getLatency", mStream->getLatency(), latency);
+ }
+}
+
+status_t StreamOutHalHidl::setVolume(float left, float right) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("setVolume", mStream->setVolume(left, right));
+}
+
+status_t StreamOutHalHidl::write(const void *buffer, size_t bytes, size_t *written) {
+ if (mStream == 0) return NO_INIT;
+ *written = 0;
+
+ if (bytes == 0 && !mDataMQ) {
+ // Can't determine the size for the MQ buffer. Wait for a non-empty write request.
+ ALOGW_IF(mCallback.unsafe_get(), "First call to async write with 0 bytes");
+ return OK;
+ }
+
+ status_t status;
+ if (!mDataMQ) {
+ // In case if playback starts close to the end of a compressed track, the bytes
+ // that need to be written is less than the actual buffer size. Need to use
+ // full buffer size for the MQ since otherwise after seeking back to the middle
+ // data will be truncated.
+ size_t bufferSize;
+ if ((status = getCachedBufferSize(&bufferSize)) != OK) {
+ return status;
+ }
+ if (bytes > bufferSize) bufferSize = bytes;
+ if ((status = prepareForWriting(bufferSize)) != OK) {
+ return status;
+ }
+ }
+
+ status = callWriterThread(
+ WriteCommand::WRITE, "write", static_cast<const uint8_t*>(buffer), bytes,
+ [&] (const WriteStatus& writeStatus) {
+ *written = writeStatus.reply.written;
+ // Diagnostics of the cause of b/35813113.
+ ALOGE_IF(*written > bytes,
+ "hal reports more bytes written than asked for: %lld > %lld",
+ (long long)*written, (long long)bytes);
+ });
+ mStreamPowerLog.log(buffer, *written);
+ return status;
+}
+
+status_t StreamOutHalHidl::callWriterThread(
+ WriteCommand cmd, const char* cmdName,
+ const uint8_t* data, size_t dataSize, StreamOutHalHidl::WriterCallback callback) {
+ if (!mCommandMQ->write(&cmd)) {
+ ALOGE("command message queue write failed for \"%s\"", cmdName);
+ return -EAGAIN;
+ }
+ if (data != nullptr) {
+ size_t availableToWrite = mDataMQ->availableToWrite();
+ if (dataSize > availableToWrite) {
+ ALOGW("truncating write data from %lld to %lld due to insufficient data queue space",
+ (long long)dataSize, (long long)availableToWrite);
+ dataSize = availableToWrite;
+ }
+ if (!mDataMQ->write(data, dataSize)) {
+ ALOGE("data message queue write failed for \"%s\"", cmdName);
+ }
+ }
+ mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY));
+
+ // TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
+ uint32_t efState = 0;
+retry:
+ status_t ret = mEfGroup->wait(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL), &efState);
+ if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL)) {
+ WriteStatus writeStatus;
+ writeStatus.retval = Result::NOT_INITIALIZED;
+ if (!mStatusMQ->read(&writeStatus)) {
+ ALOGE("status message read failed for \"%s\"", cmdName);
+ }
+ if (writeStatus.retval == Result::OK) {
+ ret = OK;
+ callback(writeStatus);
+ } else {
+ ret = processReturn(cmdName, writeStatus.retval);
+ }
+ return ret;
+ }
+ if (ret == -EAGAIN || ret == -EINTR) {
+ // Spurious wakeup. This normally retries no more than once.
+ goto retry;
+ }
+ return ret;
+}
+
+status_t StreamOutHalHidl::prepareForWriting(size_t bufferSize) {
+ std::unique_ptr<CommandMQ> tempCommandMQ;
+ std::unique_ptr<DataMQ> tempDataMQ;
+ std::unique_ptr<StatusMQ> tempStatusMQ;
+ Result retval;
+ pid_t halThreadPid, halThreadTid;
+ Return<void> ret = mStream->prepareForWriting(
+ 1, bufferSize,
+ [&](Result r,
+ const CommandMQ::Descriptor& commandMQ,
+ const DataMQ::Descriptor& dataMQ,
+ const StatusMQ::Descriptor& statusMQ,
+ const ThreadInfo& halThreadInfo) {
+ retval = r;
+ if (retval == Result::OK) {
+ tempCommandMQ.reset(new CommandMQ(commandMQ));
+ tempDataMQ.reset(new DataMQ(dataMQ));
+ tempStatusMQ.reset(new StatusMQ(statusMQ));
+ if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
+ EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
+ }
+ halThreadPid = halThreadInfo.pid;
+ halThreadTid = halThreadInfo.tid;
+ }
+ });
+ if (!ret.isOk() || retval != Result::OK) {
+ return processReturn("prepareForWriting", ret, retval);
+ }
+ if (!tempCommandMQ || !tempCommandMQ->isValid() ||
+ !tempDataMQ || !tempDataMQ->isValid() ||
+ !tempStatusMQ || !tempStatusMQ->isValid() ||
+ !mEfGroup) {
+ ALOGE_IF(!tempCommandMQ, "Failed to obtain command message queue for writing");
+ ALOGE_IF(tempCommandMQ && !tempCommandMQ->isValid(),
+ "Command message queue for writing is invalid");
+ ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for writing");
+ ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for writing is invalid");
+ ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for writing");
+ ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
+ "Status message queue for writing is invalid");
+ ALOGE_IF(!mEfGroup, "Event flag creation for writing failed");
+ return NO_INIT;
+ }
+ requestHalThreadPriority(halThreadPid, halThreadTid);
+
+ mCommandMQ = std::move(tempCommandMQ);
+ mDataMQ = std::move(tempDataMQ);
+ mStatusMQ = std::move(tempStatusMQ);
+ mWriterClient = gettid();
+ return OK;
+}
+
+status_t StreamOutHalHidl::getRenderPosition(uint32_t *dspFrames) {
+ if (mStream == 0) return NO_INIT;
+ Result retval;
+ Return<void> ret = mStream->getRenderPosition(
+ [&](Result r, uint32_t d) {
+ retval = r;
+ if (retval == Result::OK) {
+ *dspFrames = d;
+ }
+ });
+ return processReturn("getRenderPosition", ret, retval);
+}
+
+status_t StreamOutHalHidl::getNextWriteTimestamp(int64_t *timestamp) {
+ if (mStream == 0) return NO_INIT;
+ Result retval;
+ Return<void> ret = mStream->getNextWriteTimestamp(
+ [&](Result r, int64_t t) {
+ retval = r;
+ if (retval == Result::OK) {
+ *timestamp = t;
+ }
+ });
+ return processReturn("getRenderPosition", ret, retval);
+}
+
+status_t StreamOutHalHidl::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
+ if (mStream == 0) return NO_INIT;
+ status_t status = processReturn(
+ "setCallback", mStream->setCallback(new StreamOutCallback(this)));
+ if (status == OK) {
+ mCallback = callback;
+ }
+ return status;
+}
+
+status_t StreamOutHalHidl::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
+ if (mStream == 0) return NO_INIT;
+ Return<void> ret = mStream->supportsPauseAndResume(
+ [&](bool p, bool r) {
+ *supportsPause = p;
+ *supportsResume = r;
+ });
+ return processReturn("supportsPauseAndResume", ret);
+}
+
+status_t StreamOutHalHidl::pause() {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("pause", mStream->pause());
+}
+
+status_t StreamOutHalHidl::resume() {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("pause", mStream->resume());
+}
+
+status_t StreamOutHalHidl::supportsDrain(bool *supportsDrain) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("supportsDrain", mStream->supportsDrain(), supportsDrain);
+}
+
+status_t StreamOutHalHidl::drain(bool earlyNotify) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn(
+ "drain", mStream->drain(earlyNotify ? AudioDrain::EARLY_NOTIFY : AudioDrain::ALL));
+}
+
+status_t StreamOutHalHidl::flush() {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("pause", mStream->flush());
+}
+
+status_t StreamOutHalHidl::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
+ if (mStream == 0) return NO_INIT;
+ if (mWriterClient == gettid() && mCommandMQ) {
+ return callWriterThread(
+ WriteCommand::GET_PRESENTATION_POSITION, "getPresentationPosition", nullptr, 0,
+ [&](const WriteStatus& writeStatus) {
+ *frames = writeStatus.reply.presentationPosition.frames;
+ timestamp->tv_sec = writeStatus.reply.presentationPosition.timeStamp.tvSec;
+ timestamp->tv_nsec = writeStatus.reply.presentationPosition.timeStamp.tvNSec;
+ });
+ } else {
+ Result retval;
+ Return<void> ret = mStream->getPresentationPosition(
+ [&](Result r, uint64_t hidlFrames, const TimeSpec& hidlTimeStamp) {
+ retval = r;
+ if (retval == Result::OK) {
+ *frames = hidlFrames;
+ timestamp->tv_sec = hidlTimeStamp.tvSec;
+ timestamp->tv_nsec = hidlTimeStamp.tvNSec;
+ }
+ });
+ return processReturn("getPresentationPosition", ret, retval);
+ }
+}
+
+/** Transform a standard collection to an HIDL vector. */
+template <class Values, class ElementConverter>
+static auto transformToHidlVec(const Values& values, ElementConverter converter) {
+ hidl_vec<decltype(converter(*values.begin()))> result{values.size()};
+ using namespace std;
+ transform(begin(values), end(values), begin(result), converter);
+ return result;
+}
+
+status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
+ hardware::audio::V4_0::SourceMetadata halMetadata = {
+ .tracks = transformToHidlVec(sourceMetadata.tracks,
+ [](const playback_track_metadata& metadata) -> PlaybackTrackMetadata {
+ return {
+ .usage=static_cast<AudioUsage>(metadata.usage),
+ .contentType=static_cast<AudioContentType>(metadata.content_type),
+ .gain=metadata.gain,
+ };
+ })};
+ return processReturn("updateSourceMetadata", mStream->updateSourceMetadata(halMetadata));
+}
+
+void StreamOutHalHidl::onWriteReady() {
+ sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
+ if (callback == 0) return;
+ ALOGV("asyncCallback onWriteReady");
+ callback->onWriteReady();
+}
+
+void StreamOutHalHidl::onDrainReady() {
+ sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
+ if (callback == 0) return;
+ ALOGV("asyncCallback onDrainReady");
+ callback->onDrainReady();
+}
+
+void StreamOutHalHidl::onError() {
+ sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
+ if (callback == 0) return;
+ ALOGV("asyncCallback onError");
+ callback->onError();
+}
+
+
+StreamInHalHidl::StreamInHalHidl(const sp<IStreamIn>& stream)
+ : StreamHalHidl(stream.get()), mStream(stream), mReaderClient(0), mEfGroup(nullptr) {
+}
+
+StreamInHalHidl::~StreamInHalHidl() {
+ if (mStream != 0) {
+ processReturn("close", mStream->close());
+ mStream.clear();
+ hardware::IPCThreadState::self()->flushCommands();
+ }
+ if (mEfGroup) {
+ EventFlag::deleteEventFlag(&mEfGroup);
+ }
+}
+
+status_t StreamInHalHidl::getFrameSize(size_t *size) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("getFrameSize", mStream->getFrameSize(), size);
+}
+
+status_t StreamInHalHidl::setGain(float gain) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("setGain", mStream->setGain(gain));
+}
+
+status_t StreamInHalHidl::read(void *buffer, size_t bytes, size_t *read) {
+ if (mStream == 0) return NO_INIT;
+ *read = 0;
+
+ if (bytes == 0 && !mDataMQ) {
+ // Can't determine the size for the MQ buffer. Wait for a non-empty read request.
+ return OK;
+ }
+
+ status_t status;
+ if (!mDataMQ && (status = prepareForReading(bytes)) != OK) {
+ return status;
+ }
+
+ ReadParameters params;
+ params.command = ReadCommand::READ;
+ params.params.read = bytes;
+ status = callReaderThread(params, "read",
+ [&](const ReadStatus& readStatus) {
+ const size_t availToRead = mDataMQ->availableToRead();
+ if (!mDataMQ->read(static_cast<uint8_t*>(buffer), std::min(bytes, availToRead))) {
+ ALOGE("data message queue read failed for \"read\"");
+ }
+ ALOGW_IF(availToRead != readStatus.reply.read,
+ "HAL read report inconsistent: mq = %d, status = %d",
+ (int32_t)availToRead, (int32_t)readStatus.reply.read);
+ *read = readStatus.reply.read;
+ });
+ mStreamPowerLog.log(buffer, *read);
+ return status;
+}
+
+status_t StreamInHalHidl::callReaderThread(
+ const ReadParameters& params, const char* cmdName,
+ StreamInHalHidl::ReaderCallback callback) {
+ if (!mCommandMQ->write(¶ms)) {
+ ALOGW("command message queue write failed");
+ return -EAGAIN;
+ }
+ mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL));
+
+ // TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
+ uint32_t efState = 0;
+retry:
+ status_t ret = mEfGroup->wait(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY), &efState);
+ if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY)) {
+ ReadStatus readStatus;
+ readStatus.retval = Result::NOT_INITIALIZED;
+ if (!mStatusMQ->read(&readStatus)) {
+ ALOGE("status message read failed for \"%s\"", cmdName);
+ }
+ if (readStatus.retval == Result::OK) {
+ ret = OK;
+ callback(readStatus);
+ } else {
+ ret = processReturn(cmdName, readStatus.retval);
+ }
+ return ret;
+ }
+ if (ret == -EAGAIN || ret == -EINTR) {
+ // Spurious wakeup. This normally retries no more than once.
+ goto retry;
+ }
+ return ret;
+}
+
+status_t StreamInHalHidl::prepareForReading(size_t bufferSize) {
+ std::unique_ptr<CommandMQ> tempCommandMQ;
+ std::unique_ptr<DataMQ> tempDataMQ;
+ std::unique_ptr<StatusMQ> tempStatusMQ;
+ Result retval;
+ pid_t halThreadPid, halThreadTid;
+ Return<void> ret = mStream->prepareForReading(
+ 1, bufferSize,
+ [&](Result r,
+ const CommandMQ::Descriptor& commandMQ,
+ const DataMQ::Descriptor& dataMQ,
+ const StatusMQ::Descriptor& statusMQ,
+ const ThreadInfo& halThreadInfo) {
+ retval = r;
+ if (retval == Result::OK) {
+ tempCommandMQ.reset(new CommandMQ(commandMQ));
+ tempDataMQ.reset(new DataMQ(dataMQ));
+ tempStatusMQ.reset(new StatusMQ(statusMQ));
+ if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
+ EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
+ }
+ halThreadPid = halThreadInfo.pid;
+ halThreadTid = halThreadInfo.tid;
+ }
+ });
+ if (!ret.isOk() || retval != Result::OK) {
+ return processReturn("prepareForReading", ret, retval);
+ }
+ if (!tempCommandMQ || !tempCommandMQ->isValid() ||
+ !tempDataMQ || !tempDataMQ->isValid() ||
+ !tempStatusMQ || !tempStatusMQ->isValid() ||
+ !mEfGroup) {
+ ALOGE_IF(!tempCommandMQ, "Failed to obtain command message queue for writing");
+ ALOGE_IF(tempCommandMQ && !tempCommandMQ->isValid(),
+ "Command message queue for writing is invalid");
+ ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for reading");
+ ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for reading is invalid");
+ ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for reading");
+ ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
+ "Status message queue for reading is invalid");
+ ALOGE_IF(!mEfGroup, "Event flag creation for reading failed");
+ return NO_INIT;
+ }
+ requestHalThreadPriority(halThreadPid, halThreadTid);
+
+ mCommandMQ = std::move(tempCommandMQ);
+ mDataMQ = std::move(tempDataMQ);
+ mStatusMQ = std::move(tempStatusMQ);
+ mReaderClient = gettid();
+ return OK;
+}
+
+status_t StreamInHalHidl::getInputFramesLost(uint32_t *framesLost) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("getInputFramesLost", mStream->getInputFramesLost(), framesLost);
+}
+
+status_t StreamInHalHidl::getCapturePosition(int64_t *frames, int64_t *time) {
+ if (mStream == 0) return NO_INIT;
+ if (mReaderClient == gettid() && mCommandMQ) {
+ ReadParameters params;
+ params.command = ReadCommand::GET_CAPTURE_POSITION;
+ return callReaderThread(params, "getCapturePosition",
+ [&](const ReadStatus& readStatus) {
+ *frames = readStatus.reply.capturePosition.frames;
+ *time = readStatus.reply.capturePosition.time;
+ });
+ } else {
+ Result retval;
+ Return<void> ret = mStream->getCapturePosition(
+ [&](Result r, uint64_t hidlFrames, uint64_t hidlTime) {
+ retval = r;
+ if (retval == Result::OK) {
+ *frames = hidlFrames;
+ *time = hidlTime;
+ }
+ });
+ return processReturn("getCapturePosition", ret, retval);
+ }
+}
+
+
+status_t StreamInHalHidl::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo> *microphonesInfo) {
+ if (!mStream) return NO_INIT;
+ Result retval;
+ Return<void> ret = mStream->getActiveMicrophones(
+ [&](Result r, hidl_vec<MicrophoneInfo> micArrayHal) {
+ retval = r;
+ for (size_t k = 0; k < micArrayHal.size(); k++) {
+ audio_microphone_characteristic_t dst;
+ // convert
+ microphoneInfoToHal(micArrayHal[k], &dst);
+ media::MicrophoneInfo microphone = media::MicrophoneInfo(dst);
+ microphonesInfo->push_back(microphone);
+ }
+ });
+ return processReturn("getActiveMicrophones", ret, retval);
+}
+
+status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
+ hardware::audio::V4_0::SinkMetadata halMetadata = {
+ .tracks = transformToHidlVec(sinkMetadata.tracks,
+ [](const record_track_metadata& metadata) -> RecordTrackMetadata {
+ return {
+ .source=static_cast<AudioSource>(metadata.source),
+ .gain=metadata.gain,
+ };
+ })};
+ return processReturn("updateSinkMetadata", mStream->updateSinkMetadata(halMetadata));
+}
+
+} // namespace V4_0
+} // namespace android
diff --git a/media/libaudiohal/4.0/StreamHalHidl.h b/media/libaudiohal/4.0/StreamHalHidl.h
new file mode 100644
index 0000000..2dda0f8
--- /dev/null
+++ b/media/libaudiohal/4.0/StreamHalHidl.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_STREAM_HAL_HIDL_4_0_H
+#define ANDROID_HARDWARE_STREAM_HAL_HIDL_4_0_H
+
+#include <atomic>
+
+#include <android/hardware/audio/4.0/IStream.h>
+#include <android/hardware/audio/4.0/IStreamIn.h>
+#include <android/hardware/audio/4.0/IStreamOut.h>
+#include <fmq/EventFlag.h>
+#include <fmq/MessageQueue.h>
+#include <media/audiohal/StreamHalInterface.h>
+
+#include "ConversionHelperHidl.h"
+#include "StreamPowerLog.h"
+
+using ::android::hardware::audio::V4_0::IStream;
+using ::android::hardware::audio::V4_0::IStreamIn;
+using ::android::hardware::audio::V4_0::IStreamOut;
+using ::android::hardware::EventFlag;
+using ::android::hardware::MessageQueue;
+using ::android::hardware::Return;
+using ReadParameters = ::android::hardware::audio::V4_0::IStreamIn::ReadParameters;
+using ReadStatus = ::android::hardware::audio::V4_0::IStreamIn::ReadStatus;
+using WriteCommand = ::android::hardware::audio::V4_0::IStreamOut::WriteCommand;
+using WriteStatus = ::android::hardware::audio::V4_0::IStreamOut::WriteStatus;
+
+namespace android {
+namespace V4_0 {
+
+class DeviceHalHidl;
+
+class StreamHalHidl : public virtual StreamHalInterface, public ConversionHelperHidl
+{
+ public:
+ // Return the sampling rate in Hz - eg. 44100.
+ virtual status_t getSampleRate(uint32_t *rate);
+
+ // Return size of input/output buffer in bytes for this stream - eg. 4800.
+ virtual status_t getBufferSize(size_t *size);
+
+ // Return the channel mask.
+ virtual status_t getChannelMask(audio_channel_mask_t *mask);
+
+ // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
+ virtual status_t getFormat(audio_format_t *format);
+
+ // Convenience method.
+ virtual status_t getAudioProperties(
+ uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
+
+ // Set audio stream parameters.
+ virtual status_t setParameters(const String8& kvPairs);
+
+ // Get audio stream parameters.
+ virtual status_t getParameters(const String8& keys, String8 *values);
+
+ // Add or remove the effect on the stream.
+ virtual status_t addEffect(sp<EffectHalInterface> effect);
+ virtual status_t removeEffect(sp<EffectHalInterface> effect);
+
+ // Put the audio hardware input/output into standby mode.
+ virtual status_t standby();
+
+ virtual status_t dump(int fd);
+
+ // Start a stream operating in mmap mode.
+ virtual status_t start();
+
+ // Stop a stream operating in mmap mode.
+ virtual status_t stop();
+
+ // Retrieve information on the data buffer in mmap mode.
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info);
+
+ // Get current read/write position in the mmap buffer
+ virtual status_t getMmapPosition(struct audio_mmap_position *position);
+
+ // Set the priority of the thread that interacts with the HAL
+ // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
+ virtual status_t setHalThreadPriority(int priority);
+
+ protected:
+ // Subclasses can not be constructed directly by clients.
+ explicit StreamHalHidl(IStream *stream);
+
+ // The destructor automatically closes the stream.
+ virtual ~StreamHalHidl();
+
+ status_t getCachedBufferSize(size_t *size);
+
+ bool requestHalThreadPriority(pid_t threadPid, pid_t threadId);
+
+ // mStreamPowerLog is used for audio signal power logging.
+ StreamPowerLog mStreamPowerLog;
+
+ private:
+ const int HAL_THREAD_PRIORITY_DEFAULT = -1;
+ IStream *mStream;
+ int mHalThreadPriority;
+ size_t mCachedBufferSize;
+};
+
+class StreamOutHalHidl : public StreamOutHalInterface, public StreamHalHidl {
+ public:
+ // Return the frame size (number of bytes per sample) of a stream.
+ virtual status_t getFrameSize(size_t *size);
+
+ // Return the audio hardware driver estimated latency in milliseconds.
+ virtual status_t getLatency(uint32_t *latency);
+
+ // Use this method in situations where audio mixing is done in the hardware.
+ virtual status_t setVolume(float left, float right);
+
+ // Write audio buffer to driver.
+ virtual status_t write(const void *buffer, size_t bytes, size_t *written);
+
+ // Return the number of audio frames written by the audio dsp to DAC since
+ // the output has exited standby.
+ virtual status_t getRenderPosition(uint32_t *dspFrames);
+
+ // Get the local time at which the next write to the audio driver will be presented.
+ virtual status_t getNextWriteTimestamp(int64_t *timestamp);
+
+ // Set the callback for notifying completion of non-blocking write and drain.
+ virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback);
+
+ // Returns whether pause and resume operations are supported.
+ virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume);
+
+ // Notifies to the audio driver to resume playback following a pause.
+ virtual status_t pause();
+
+ // Notifies to the audio driver to resume playback following a pause.
+ virtual status_t resume();
+
+ // Returns whether drain operation is supported.
+ virtual status_t supportsDrain(bool *supportsDrain);
+
+ // Requests notification when data buffered by the driver/hardware has been played.
+ virtual status_t drain(bool earlyNotify);
+
+ // Notifies to the audio driver to flush the queued data.
+ virtual status_t flush();
+
+ // Return a recent count of the number of audio frames presented to an external observer.
+ virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
+
+ // Called when the metadata of the stream's source has been changed.
+ status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
+
+ // Methods used by StreamOutCallback (HIDL).
+ void onWriteReady();
+ void onDrainReady();
+ void onError();
+
+ private:
+ friend class DeviceHalHidl;
+ typedef MessageQueue<WriteCommand, hardware::kSynchronizedReadWrite> CommandMQ;
+ typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
+ typedef MessageQueue<WriteStatus, hardware::kSynchronizedReadWrite> StatusMQ;
+
+ wp<StreamOutHalInterfaceCallback> mCallback;
+ sp<IStreamOut> mStream;
+ std::unique_ptr<CommandMQ> mCommandMQ;
+ std::unique_ptr<DataMQ> mDataMQ;
+ std::unique_ptr<StatusMQ> mStatusMQ;
+ std::atomic<pid_t> mWriterClient;
+ EventFlag* mEfGroup;
+
+ // Can not be constructed directly by clients.
+ StreamOutHalHidl(const sp<IStreamOut>& stream);
+
+ virtual ~StreamOutHalHidl();
+
+ using WriterCallback = std::function<void(const WriteStatus& writeStatus)>;
+ status_t callWriterThread(
+ WriteCommand cmd, const char* cmdName,
+ const uint8_t* data, size_t dataSize, WriterCallback callback);
+ status_t prepareForWriting(size_t bufferSize);
+};
+
+class StreamInHalHidl : public StreamInHalInterface, public StreamHalHidl {
+ public:
+ // Return the frame size (number of bytes per sample) of a stream.
+ virtual status_t getFrameSize(size_t *size);
+
+ // Set the input gain for the audio driver.
+ virtual status_t setGain(float gain);
+
+ // Read audio buffer in from driver.
+ virtual status_t read(void *buffer, size_t bytes, size_t *read);
+
+ // Return the amount of input frames lost in the audio driver.
+ virtual status_t getInputFramesLost(uint32_t *framesLost);
+
+ // Return a recent count of the number of audio frames received and
+ // the clock time associated with that frame count.
+ virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
+
+ // Get active microphones
+ virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+ // Called when the metadata of the stream's sink has been changed.
+ status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
+
+ private:
+ friend class DeviceHalHidl;
+ typedef MessageQueue<ReadParameters, hardware::kSynchronizedReadWrite> CommandMQ;
+ typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
+ typedef MessageQueue<ReadStatus, hardware::kSynchronizedReadWrite> StatusMQ;
+
+ sp<IStreamIn> mStream;
+ std::unique_ptr<CommandMQ> mCommandMQ;
+ std::unique_ptr<DataMQ> mDataMQ;
+ std::unique_ptr<StatusMQ> mStatusMQ;
+ std::atomic<pid_t> mReaderClient;
+ EventFlag* mEfGroup;
+
+ // Can not be constructed directly by clients.
+ StreamInHalHidl(const sp<IStreamIn>& stream);
+
+ virtual ~StreamInHalHidl();
+
+ using ReaderCallback = std::function<void(const ReadStatus& readStatus)>;
+ status_t callReaderThread(
+ const ReadParameters& params, const char* cmdName, ReaderCallback callback);
+ status_t prepareForReading(size_t bufferSize);
+};
+
+} // namespace V4_0
+} // namespace android
+
+#endif // ANDROID_HARDWARE_STREAM_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/4.0/StreamHalLocal.cpp b/media/libaudiohal/4.0/StreamHalLocal.cpp
new file mode 100644
index 0000000..e9d96bf
--- /dev/null
+++ b/media/libaudiohal/4.0/StreamHalLocal.cpp
@@ -0,0 +1,357 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "StreamHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <hardware/audio.h>
+#include <utils/Log.h>
+
+#include "DeviceHalLocal.h"
+#include "StreamHalLocal.h"
+#include "VersionUtils.h"
+
+namespace android {
+namespace V4_0 {
+
+StreamHalLocal::StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device)
+ : mDevice(device),
+ mStream(stream) {
+ // Instrument audio signal power logging.
+ // Note: This assumes channel mask, format, and sample rate do not change after creation.
+ if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
+ mStreamPowerLog.init(mStream->get_sample_rate(mStream),
+ mStream->get_channels(mStream),
+ mStream->get_format(mStream));
+ }
+}
+
+StreamHalLocal::~StreamHalLocal() {
+ mStream = 0;
+ mDevice.clear();
+}
+
+status_t StreamHalLocal::getSampleRate(uint32_t *rate) {
+ *rate = mStream->get_sample_rate(mStream);
+ return OK;
+}
+
+status_t StreamHalLocal::getBufferSize(size_t *size) {
+ *size = mStream->get_buffer_size(mStream);
+ return OK;
+}
+
+status_t StreamHalLocal::getChannelMask(audio_channel_mask_t *mask) {
+ *mask = mStream->get_channels(mStream);
+ return OK;
+}
+
+status_t StreamHalLocal::getFormat(audio_format_t *format) {
+ *format = mStream->get_format(mStream);
+ return OK;
+}
+
+status_t StreamHalLocal::getAudioProperties(
+ uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
+ *sampleRate = mStream->get_sample_rate(mStream);
+ *mask = mStream->get_channels(mStream);
+ *format = mStream->get_format(mStream);
+ return OK;
+}
+
+status_t StreamHalLocal::setParameters(const String8& kvPairs) {
+ return mStream->set_parameters(mStream, kvPairs.string());
+}
+
+status_t StreamHalLocal::getParameters(const String8& keys, String8 *values) {
+ char *halValues = mStream->get_parameters(mStream, keys.string());
+ if (halValues != NULL) {
+ values->setTo(halValues);
+ free(halValues);
+ } else {
+ values->clear();
+ }
+ return OK;
+}
+
+status_t StreamHalLocal::addEffect(sp<EffectHalInterface>) {
+ LOG_ALWAYS_FATAL("Local streams can not have effects");
+ return INVALID_OPERATION;
+}
+
+status_t StreamHalLocal::removeEffect(sp<EffectHalInterface>) {
+ LOG_ALWAYS_FATAL("Local streams can not have effects");
+ return INVALID_OPERATION;
+}
+
+status_t StreamHalLocal::standby() {
+ return mStream->standby(mStream);
+}
+
+status_t StreamHalLocal::dump(int fd) {
+ status_t status = mStream->dump(mStream, fd);
+ mStreamPowerLog.dump(fd);
+ return status;
+}
+
+status_t StreamHalLocal::setHalThreadPriority(int) {
+ // Don't need to do anything as local hal is executed by audioflinger directly
+ // on the same thread.
+ return OK;
+}
+
+StreamOutHalLocal::StreamOutHalLocal(audio_stream_out_t *stream, sp<DeviceHalLocal> device)
+ : StreamHalLocal(&stream->common, device), mStream(stream) {
+}
+
+StreamOutHalLocal::~StreamOutHalLocal() {
+ mCallback.clear();
+ mDevice->closeOutputStream(mStream);
+ mStream = 0;
+}
+
+status_t StreamOutHalLocal::getFrameSize(size_t *size) {
+ *size = audio_stream_out_frame_size(mStream);
+ return OK;
+}
+
+status_t StreamOutHalLocal::getLatency(uint32_t *latency) {
+ *latency = mStream->get_latency(mStream);
+ return OK;
+}
+
+status_t StreamOutHalLocal::setVolume(float left, float right) {
+ if (mStream->set_volume == NULL) return INVALID_OPERATION;
+ return mStream->set_volume(mStream, left, right);
+}
+
+status_t StreamOutHalLocal::write(const void *buffer, size_t bytes, size_t *written) {
+ ssize_t writeResult = mStream->write(mStream, buffer, bytes);
+ if (writeResult > 0) {
+ *written = writeResult;
+ mStreamPowerLog.log(buffer, *written);
+ return OK;
+ } else {
+ *written = 0;
+ return writeResult;
+ }
+}
+
+status_t StreamOutHalLocal::getRenderPosition(uint32_t *dspFrames) {
+ return mStream->get_render_position(mStream, dspFrames);
+}
+
+status_t StreamOutHalLocal::getNextWriteTimestamp(int64_t *timestamp) {
+ if (mStream->get_next_write_timestamp == NULL) return INVALID_OPERATION;
+ return mStream->get_next_write_timestamp(mStream, timestamp);
+}
+
+status_t StreamOutHalLocal::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
+ if (mStream->set_callback == NULL) return INVALID_OPERATION;
+ status_t result = mStream->set_callback(mStream, StreamOutHalLocal::asyncCallback, this);
+ if (result == OK) {
+ mCallback = callback;
+ }
+ return result;
+}
+
+// static
+int StreamOutHalLocal::asyncCallback(stream_callback_event_t event, void*, void *cookie) {
+ // We act as if we gave a wp<StreamOutHalLocal> to HAL. This way we should handle
+ // correctly the case when the callback is invoked while StreamOutHalLocal's destructor is
+ // already running, because the destructor is invoked after the refcount has been atomically
+ // decremented.
+ wp<StreamOutHalLocal> weakSelf(static_cast<StreamOutHalLocal*>(cookie));
+ sp<StreamOutHalLocal> self = weakSelf.promote();
+ if (self == 0) return 0;
+ sp<StreamOutHalInterfaceCallback> callback = self->mCallback.promote();
+ if (callback == 0) return 0;
+ ALOGV("asyncCallback() event %d", event);
+ switch (event) {
+ case STREAM_CBK_EVENT_WRITE_READY:
+ callback->onWriteReady();
+ break;
+ case STREAM_CBK_EVENT_DRAIN_READY:
+ callback->onDrainReady();
+ break;
+ case STREAM_CBK_EVENT_ERROR:
+ callback->onError();
+ break;
+ default:
+ ALOGW("asyncCallback() unknown event %d", event);
+ break;
+ }
+ return 0;
+}
+
+status_t StreamOutHalLocal::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
+ *supportsPause = mStream->pause != NULL;
+ *supportsResume = mStream->resume != NULL;
+ return OK;
+}
+
+status_t StreamOutHalLocal::pause() {
+ if (mStream->pause == NULL) return INVALID_OPERATION;
+ return mStream->pause(mStream);
+}
+
+status_t StreamOutHalLocal::resume() {
+ if (mStream->resume == NULL) return INVALID_OPERATION;
+ return mStream->resume(mStream);
+}
+
+status_t StreamOutHalLocal::supportsDrain(bool *supportsDrain) {
+ *supportsDrain = mStream->drain != NULL;
+ return OK;
+}
+
+status_t StreamOutHalLocal::drain(bool earlyNotify) {
+ if (mStream->drain == NULL) return INVALID_OPERATION;
+ return mStream->drain(mStream, earlyNotify ? AUDIO_DRAIN_EARLY_NOTIFY : AUDIO_DRAIN_ALL);
+}
+
+status_t StreamOutHalLocal::flush() {
+ if (mStream->flush == NULL) return INVALID_OPERATION;
+ return mStream->flush(mStream);
+}
+
+status_t StreamOutHalLocal::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
+ if (mStream->get_presentation_position == NULL) return INVALID_OPERATION;
+ return mStream->get_presentation_position(mStream, frames, timestamp);
+}
+
+status_t StreamOutHalLocal::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
+ if (mStream->update_source_metadata == nullptr) {
+ return INVALID_OPERATION;
+ }
+ const source_metadata_t metadata {
+ .track_count = sourceMetadata.tracks.size(),
+ // const cast is fine as it is in a const structure
+ .tracks = const_cast<playback_track_metadata*>(sourceMetadata.tracks.data()),
+ };
+ mStream->update_source_metadata(mStream, &metadata);
+ return OK;
+}
+
+status_t StreamOutHalLocal::start() {
+ if (mStream->start == NULL) return INVALID_OPERATION;
+ return mStream->start(mStream);
+}
+
+status_t StreamOutHalLocal::stop() {
+ if (mStream->stop == NULL) return INVALID_OPERATION;
+ return mStream->stop(mStream);
+}
+
+status_t StreamOutHalLocal::createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info) {
+ if (mStream->create_mmap_buffer == NULL) return INVALID_OPERATION;
+ return mStream->create_mmap_buffer(mStream, minSizeFrames, info);
+}
+
+status_t StreamOutHalLocal::getMmapPosition(struct audio_mmap_position *position) {
+ if (mStream->get_mmap_position == NULL) return INVALID_OPERATION;
+ return mStream->get_mmap_position(mStream, position);
+}
+
+StreamInHalLocal::StreamInHalLocal(audio_stream_in_t *stream, sp<DeviceHalLocal> device)
+ : StreamHalLocal(&stream->common, device), mStream(stream) {
+}
+
+StreamInHalLocal::~StreamInHalLocal() {
+ mDevice->closeInputStream(mStream);
+ mStream = 0;
+}
+
+status_t StreamInHalLocal::getFrameSize(size_t *size) {
+ *size = audio_stream_in_frame_size(mStream);
+ return OK;
+}
+
+status_t StreamInHalLocal::setGain(float gain) {
+ return mStream->set_gain(mStream, gain);
+}
+
+status_t StreamInHalLocal::read(void *buffer, size_t bytes, size_t *read) {
+ ssize_t readResult = mStream->read(mStream, buffer, bytes);
+ if (readResult > 0) {
+ *read = readResult;
+ mStreamPowerLog.log( buffer, *read);
+ return OK;
+ } else {
+ *read = 0;
+ return readResult;
+ }
+}
+
+status_t StreamInHalLocal::getInputFramesLost(uint32_t *framesLost) {
+ *framesLost = mStream->get_input_frames_lost(mStream);
+ return OK;
+}
+
+status_t StreamInHalLocal::getCapturePosition(int64_t *frames, int64_t *time) {
+ if (mStream->get_capture_position == NULL) return INVALID_OPERATION;
+ return mStream->get_capture_position(mStream, frames, time);
+}
+
+status_t StreamInHalLocal::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
+ if (mStream->update_sink_metadata == nullptr) {
+ return INVALID_OPERATION;
+ }
+ const sink_metadata_t metadata {
+ .track_count = sinkMetadata.tracks.size(),
+ // const cast is fine as it is in a const structure
+ .tracks = const_cast<record_track_metadata*>(sinkMetadata.tracks.data()),
+ };
+ mStream->update_sink_metadata(mStream, &metadata);
+ return OK;
+}
+
+status_t StreamInHalLocal::start() {
+ if (mStream->start == NULL) return INVALID_OPERATION;
+ return mStream->start(mStream);
+}
+
+status_t StreamInHalLocal::stop() {
+ if (mStream->stop == NULL) return INVALID_OPERATION;
+ return mStream->stop(mStream);
+}
+
+status_t StreamInHalLocal::createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info) {
+ if (mStream->create_mmap_buffer == NULL) return INVALID_OPERATION;
+ return mStream->create_mmap_buffer(mStream, minSizeFrames, info);
+}
+
+status_t StreamInHalLocal::getMmapPosition(struct audio_mmap_position *position) {
+ if (mStream->get_mmap_position == NULL) return INVALID_OPERATION;
+ return mStream->get_mmap_position(mStream, position);
+}
+
+status_t StreamInHalLocal::getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones) {
+ if (mStream->get_active_microphones == NULL) return INVALID_OPERATION;
+ size_t actual_mics = AUDIO_MICROPHONE_MAX_COUNT;
+ audio_microphone_characteristic_t mic_array[AUDIO_MICROPHONE_MAX_COUNT];
+ status_t status = mStream->get_active_microphones(mStream, &mic_array[0], &actual_mics);
+ for (size_t i = 0; i < actual_mics; i++) {
+ media::MicrophoneInfo microphoneInfo = media::MicrophoneInfo(mic_array[i]);
+ microphones->push_back(microphoneInfo);
+ }
+ return status;
+}
+
+} // namespace V4_0
+} // namespace android
diff --git a/media/libaudiohal/4.0/StreamHalLocal.h b/media/libaudiohal/4.0/StreamHalLocal.h
new file mode 100644
index 0000000..7237509
--- /dev/null
+++ b/media/libaudiohal/4.0/StreamHalLocal.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_STREAM_HAL_LOCAL_4_0_H
+#define ANDROID_HARDWARE_STREAM_HAL_LOCAL_4_0_H
+
+#include <media/audiohal/StreamHalInterface.h>
+#include "StreamPowerLog.h"
+
+namespace android {
+namespace V4_0 {
+
+class DeviceHalLocal;
+
+class StreamHalLocal : public virtual StreamHalInterface
+{
+ public:
+ // Return the sampling rate in Hz - eg. 44100.
+ virtual status_t getSampleRate(uint32_t *rate);
+
+ // Return size of input/output buffer in bytes for this stream - eg. 4800.
+ virtual status_t getBufferSize(size_t *size);
+
+ // Return the channel mask.
+ virtual status_t getChannelMask(audio_channel_mask_t *mask);
+
+ // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
+ virtual status_t getFormat(audio_format_t *format);
+
+ // Convenience method.
+ virtual status_t getAudioProperties(
+ uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
+
+ // Set audio stream parameters.
+ virtual status_t setParameters(const String8& kvPairs);
+
+ // Get audio stream parameters.
+ virtual status_t getParameters(const String8& keys, String8 *values);
+
+ // Add or remove the effect on the stream.
+ virtual status_t addEffect(sp<EffectHalInterface> effect);
+ virtual status_t removeEffect(sp<EffectHalInterface> effect);
+
+ // Put the audio hardware input/output into standby mode.
+ virtual status_t standby();
+
+ virtual status_t dump(int fd);
+
+ // Start a stream operating in mmap mode.
+ virtual status_t start() = 0;
+
+ // Stop a stream operating in mmap mode.
+ virtual status_t stop() = 0;
+
+ // Retrieve information on the data buffer in mmap mode.
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info) = 0;
+
+ // Get current read/write position in the mmap buffer
+ virtual status_t getMmapPosition(struct audio_mmap_position *position) = 0;
+
+ // Set the priority of the thread that interacts with the HAL
+ // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
+ virtual status_t setHalThreadPriority(int priority);
+
+ protected:
+ // Subclasses can not be constructed directly by clients.
+ StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device);
+
+ // The destructor automatically closes the stream.
+ virtual ~StreamHalLocal();
+
+ sp<DeviceHalLocal> mDevice;
+
+ // mStreamPowerLog is used for audio signal power logging.
+ StreamPowerLog mStreamPowerLog;
+
+ private:
+ audio_stream_t *mStream;
+};
+
+class StreamOutHalLocal : public StreamOutHalInterface, public StreamHalLocal {
+ public:
+ // Return the frame size (number of bytes per sample) of a stream.
+ virtual status_t getFrameSize(size_t *size);
+
+ // Return the audio hardware driver estimated latency in milliseconds.
+ virtual status_t getLatency(uint32_t *latency);
+
+ // Use this method in situations where audio mixing is done in the hardware.
+ virtual status_t setVolume(float left, float right);
+
+ // Write audio buffer to driver.
+ virtual status_t write(const void *buffer, size_t bytes, size_t *written);
+
+ // Return the number of audio frames written by the audio dsp to DAC since
+ // the output has exited standby.
+ virtual status_t getRenderPosition(uint32_t *dspFrames);
+
+ // Get the local time at which the next write to the audio driver will be presented.
+ virtual status_t getNextWriteTimestamp(int64_t *timestamp);
+
+ // Set the callback for notifying completion of non-blocking write and drain.
+ virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback);
+
+ // Returns whether pause and resume operations are supported.
+ virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume);
+
+ // Notifies to the audio driver to resume playback following a pause.
+ virtual status_t pause();
+
+ // Notifies to the audio driver to resume playback following a pause.
+ virtual status_t resume();
+
+ // Returns whether drain operation is supported.
+ virtual status_t supportsDrain(bool *supportsDrain);
+
+ // Requests notification when data buffered by the driver/hardware has been played.
+ virtual status_t drain(bool earlyNotify);
+
+ // Notifies to the audio driver to flush the queued data.
+ virtual status_t flush();
+
+ // Return a recent count of the number of audio frames presented to an external observer.
+ virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
+
+ // Start a stream operating in mmap mode.
+ virtual status_t start();
+
+ // Stop a stream operating in mmap mode.
+ virtual status_t stop();
+
+ // Retrieve information on the data buffer in mmap mode.
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info);
+
+ // Get current read/write position in the mmap buffer
+ virtual status_t getMmapPosition(struct audio_mmap_position *position);
+
+ // Called when the metadata of the stream's source has been changed.
+ status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
+
+ private:
+ audio_stream_out_t *mStream;
+ wp<StreamOutHalInterfaceCallback> mCallback;
+
+ friend class DeviceHalLocal;
+
+ // Can not be constructed directly by clients.
+ StreamOutHalLocal(audio_stream_out_t *stream, sp<DeviceHalLocal> device);
+
+ virtual ~StreamOutHalLocal();
+
+ static int asyncCallback(stream_callback_event_t event, void *param, void *cookie);
+};
+
+class StreamInHalLocal : public StreamInHalInterface, public StreamHalLocal {
+ public:
+ // Return the frame size (number of bytes per sample) of a stream.
+ virtual status_t getFrameSize(size_t *size);
+
+ // Set the input gain for the audio driver.
+ virtual status_t setGain(float gain);
+
+ // Read audio buffer in from driver.
+ virtual status_t read(void *buffer, size_t bytes, size_t *read);
+
+ // Return the amount of input frames lost in the audio driver.
+ virtual status_t getInputFramesLost(uint32_t *framesLost);
+
+ // Return a recent count of the number of audio frames received and
+ // the clock time associated with that frame count.
+ virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
+
+ // Start a stream operating in mmap mode.
+ virtual status_t start();
+
+ // Stop a stream operating in mmap mode.
+ virtual status_t stop();
+
+ // Retrieve information on the data buffer in mmap mode.
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info);
+
+ // Get current read/write position in the mmap buffer
+ virtual status_t getMmapPosition(struct audio_mmap_position *position);
+
+ // Get active microphones
+ virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
+ // Called when the metadata of the stream's sink has been changed.
+ status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
+
+ private:
+ audio_stream_in_t *mStream;
+
+ friend class DeviceHalLocal;
+
+ // Can not be constructed directly by clients.
+ StreamInHalLocal(audio_stream_in_t *stream, sp<DeviceHalLocal> device);
+
+ virtual ~StreamInHalLocal();
+};
+
+} // namespace V4_0
+} // namespace android
+
+#endif // ANDROID_HARDWARE_STREAM_HAL_LOCAL_4_0_H
diff --git a/media/libaudiohal/4.0/StreamPowerLog.h b/media/libaudiohal/4.0/StreamPowerLog.h
new file mode 100644
index 0000000..57b7201
--- /dev/null
+++ b/media/libaudiohal/4.0/StreamPowerLog.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_STREAM_POWER_LOG_4_0_H
+#define ANDROID_HARDWARE_STREAM_POWER_LOG_4_0_H
+
+#include <audio_utils/clock.h>
+#include <audio_utils/PowerLog.h>
+#include <cutils/properties.h>
+#include <system/audio.h>
+
+namespace android {
+namespace V4_0 {
+
+class StreamPowerLog {
+public:
+ StreamPowerLog() :
+ mIsUserDebugOrEngBuild(is_userdebug_or_eng_build()),
+ mPowerLog(nullptr),
+ mFrameSize(0) {
+ // use init() to set up the power log.
+ }
+
+ ~StreamPowerLog() {
+ power_log_destroy(mPowerLog); // OK for null mPowerLog
+ mPowerLog = nullptr;
+ }
+
+ // A one-time initialization (do not call twice) before using StreamPowerLog.
+ void init(uint32_t sampleRate, audio_channel_mask_t channelMask, audio_format_t format) {
+ if (mPowerLog == nullptr) {
+ // Note: A way to get channel count for both input and output channel masks
+ // but does not check validity of the channel mask.
+ const uint32_t channelCount = popcount(audio_channel_mask_get_bits(channelMask));
+ mFrameSize = channelCount * audio_bytes_per_sample(format);
+ if (mFrameSize > 0) {
+ const size_t kPowerLogFramesPerEntry =
+ (long long)sampleRate * kPowerLogSamplingIntervalMs / 1000;
+ mPowerLog = power_log_create(
+ sampleRate,
+ channelCount,
+ format,
+ kPowerLogEntries,
+ kPowerLogFramesPerEntry);
+ }
+ }
+ // mPowerLog may be NULL (not the right build, format not accepted, etc.).
+ }
+
+ // Dump the power log to fd.
+ void dump(int fd) const {
+ // OK for null mPowerLog
+ (void)power_log_dump(
+ mPowerLog, fd, " " /* prefix */, kPowerLogLines, 0 /* limit_ns */);
+ }
+
+ // Log the audio data contained in buffer.
+ void log(const void *buffer, size_t sizeInBytes) const {
+ if (mPowerLog != nullptr) { // mFrameSize is always nonzero if mPowerLog exists.
+ power_log_log(
+ mPowerLog, buffer, sizeInBytes / mFrameSize, audio_utils_get_real_time_ns());
+ }
+ }
+
+ bool isUserDebugOrEngBuild() const {
+ return mIsUserDebugOrEngBuild;
+ }
+
+private:
+
+ static inline bool is_userdebug_or_eng_build() {
+ char value[PROPERTY_VALUE_MAX];
+ (void)property_get("ro.build.type", value, "unknown"); // ignore actual length
+ return strcmp(value, "userdebug") == 0 || strcmp(value, "eng") == 0;
+ }
+
+ // Audio signal power log configuration.
+ static const size_t kPowerLogLines = 40;
+ static const size_t kPowerLogSamplingIntervalMs = 50;
+ static const size_t kPowerLogEntries = (1 /* minutes */ * 60 /* seconds */ * 1000 /* msec */
+ / kPowerLogSamplingIntervalMs);
+
+ const bool mIsUserDebugOrEngBuild;
+ power_log_t *mPowerLog;
+ size_t mFrameSize;
+};
+
+} // namespace V4_0
+} // namespace android
+
+#endif // ANDROID_HARDWARE_STREAM_POWER_LOG_4_0_H
diff --git a/media/libaudiohal/4.0/VersionUtils.h b/media/libaudiohal/4.0/VersionUtils.h
new file mode 100644
index 0000000..1246c2e
--- /dev/null
+++ b/media/libaudiohal/4.0/VersionUtils.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_VERSION_UTILS_4_0_H
+#define ANDROID_HARDWARE_VERSION_UTILS_4_0_H
+
+#include <android/hardware/audio/4.0/types.h>
+#include <hidl/HidlSupport.h>
+
+using ::android::hardware::audio::V4_0::ParameterValue;
+using ::android::hardware::audio::V4_0::Result;
+using ::android::hardware::Return;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::hidl_string;
+
+namespace android {
+namespace V4_0 {
+namespace utils {
+
+template <class T, class Callback>
+Return<void> getParameters(T& object, hidl_vec<ParameterValue> context,
+ hidl_vec<hidl_string> keys, Callback callback) {
+ return object->getParameters(context, keys, callback);
+}
+
+template <class T>
+Return<Result> setParameters(T& object, hidl_vec<ParameterValue> context,
+ hidl_vec<ParameterValue> keys) {
+ return object->setParameters(context, keys);
+}
+
+} // namespace utils
+} // namespace V4_0
+} // namespace android
+
+#endif // ANDROID_HARDWARE_VERSION_UTILS_4_0_H
diff --git a/media/libaudiohal/4.0/include/libaudiohal/4.0/DevicesFactoryHalHybrid.h b/media/libaudiohal/4.0/include/libaudiohal/4.0/DevicesFactoryHalHybrid.h
new file mode 100644
index 0000000..abf6de0
--- /dev/null
+++ b/media/libaudiohal/4.0/include/libaudiohal/4.0/DevicesFactoryHalHybrid.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_4_0_H
+#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_4_0_H
+
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+namespace V4_0 {
+
+class DevicesFactoryHalHybrid : public DevicesFactoryHalInterface
+{
+ public:
+ // Opens a device with the specified name. To close the device, it is
+ // necessary to release references to the returned object.
+ virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
+
+ private:
+ friend class DevicesFactoryHalInterface;
+
+ // Can not be constructed directly by clients.
+ DevicesFactoryHalHybrid();
+
+ virtual ~DevicesFactoryHalHybrid();
+
+ sp<DevicesFactoryHalInterface> mLocalFactory;
+ sp<DevicesFactoryHalInterface> mHidlFactory;
+};
+
+} // namespace V4_0
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_4_0_H
diff --git a/media/libaudiohal/4.0/include/libaudiohal/4.0/EffectsFactoryHalHidl.h b/media/libaudiohal/4.0/include/libaudiohal/4.0/EffectsFactoryHalHidl.h
new file mode 100644
index 0000000..680b7a1
--- /dev/null
+++ b/media/libaudiohal/4.0/include/libaudiohal/4.0/EffectsFactoryHalHidl.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_4_0_H
+#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_4_0_H
+
+#include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
+#include <android/hardware/audio/effect/4.0/types.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+
+#include "ConversionHelperHidl.h"
+
+namespace android {
+namespace V4_0 {
+
+using ::android::hardware::audio::effect::V4_0::EffectDescriptor;
+using ::android::hardware::audio::effect::V4_0::IEffectsFactory;
+using ::android::hardware::hidl_vec;
+
+class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
+{
+ public:
+ // Returns the number of different effects in all loaded libraries.
+ virtual status_t queryNumberEffects(uint32_t *pNumEffects);
+
+ // Returns a descriptor of the next available effect.
+ virtual status_t getDescriptor(uint32_t index,
+ effect_descriptor_t *pDescriptor);
+
+ virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
+ effect_descriptor_t *pDescriptor);
+
+ // Creates an effect engine of the specified type.
+ // To release the effect engine, it is necessary to release references
+ // to the returned effect object.
+ virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
+ int32_t sessionId, int32_t ioId,
+ sp<EffectHalInterface> *effect);
+
+ virtual status_t dumpEffects(int fd);
+
+ status_t allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) override;
+ status_t mirrorBuffer(void* external, size_t size,
+ sp<EffectBufferHalInterface>* buffer) override;
+
+ private:
+ friend class EffectsFactoryHalInterface;
+
+ sp<IEffectsFactory> mEffectsFactory;
+ hidl_vec<EffectDescriptor> mLastDescriptors;
+
+ // Can not be constructed directly by clients.
+ EffectsFactoryHalHidl();
+ virtual ~EffectsFactoryHalHidl();
+
+ status_t queryAllDescriptors();
+};
+
+} // namespace V4_0
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_4_0_H
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
new file mode 100644
index 0000000..3a5df27
--- /dev/null
+++ b/media/libaudiohal/Android.bp
@@ -0,0 +1,56 @@
+cc_library_shared {
+ name: "libaudiohal",
+
+ srcs: [
+ "DevicesFactoryHalInterface.cpp",
+ "EffectsFactoryHalInterface.cpp",
+ ],
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+
+ shared_libs: [
+ "android.hardware.audio.effect@2.0",
+ "android.hardware.audio.effect@4.0",
+ "android.hardware.audio@2.0",
+ "android.hardware.audio@4.0",
+ "libaudiohal@2.0",
+ "libaudiohal@4.0",
+ "libutils",
+ ],
+
+ header_libs: [
+ "libaudiohal_headers"
+ ]
+}
+
+cc_library_shared {
+ name: "libaudiohal_deathhandler",
+
+ srcs: [
+ "HalDeathHandlerHidl.cpp",
+ ],
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+
+ shared_libs: [
+ "libhidlbase",
+ "libutils",
+ "liblog",
+ ],
+
+ header_libs: [
+ "libaudiohal_headers"
+ ]
+}
+
+cc_library_headers {
+ name: "libaudiohal_headers",
+
+ export_include_dirs: ["include"],
+}
diff --git a/media/libaudiohal/Android.mk b/media/libaudiohal/Android.mk
deleted file mode 100644
index 827908e..0000000
--- a/media/libaudiohal/Android.mk
+++ /dev/null
@@ -1,71 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SHARED_LIBRARIES := \
- libaudioutils \
- libcutils \
- liblog \
- libutils \
- libhardware
-
-LOCAL_SRC_FILES := \
- DeviceHalLocal.cpp \
- DevicesFactoryHalHybrid.cpp \
- DevicesFactoryHalLocal.cpp \
- StreamHalLocal.cpp
-
-LOCAL_CFLAGS := -Wall -Werror
-
-ifeq ($(USE_LEGACY_LOCAL_AUDIO_HAL), true)
-
-# Use audiohal directly w/o hwbinder middleware.
-# This is for performance comparison and debugging only.
-
-LOCAL_SRC_FILES += \
- EffectBufferHalLocal.cpp \
- EffectsFactoryHalLocal.cpp \
- EffectHalLocal.cpp
-
-LOCAL_SHARED_LIBRARIES += \
- libeffects
-
-LOCAL_CFLAGS += -DUSE_LEGACY_LOCAL_AUDIO_HAL
-
-else # if !USE_LEGACY_LOCAL_AUDIO_HAL
-
-LOCAL_SRC_FILES += \
- ConversionHelperHidl.cpp \
- HalDeathHandlerHidl.cpp \
- DeviceHalHidl.cpp \
- DevicesFactoryHalHidl.cpp \
- EffectBufferHalHidl.cpp \
- EffectHalHidl.cpp \
- EffectsFactoryHalHidl.cpp \
- StreamHalHidl.cpp
-
-LOCAL_SHARED_LIBRARIES += \
- libbase \
- libfmq \
- libhwbinder \
- libhidlbase \
- libhidlmemory \
- libhidltransport \
- android.hardware.audio@2.0 \
- android.hardware.audio.common@2.0 \
- android.hardware.audio.common@2.0-util \
- android.hardware.audio.effect@2.0 \
- android.hidl.allocator@1.0 \
- android.hidl.memory@1.0 \
- libmedia_helper \
- libmediautils
-
-endif # USE_LEGACY_LOCAL_AUDIO_HAL
-
-LOCAL_C_INCLUDES := $(LOCAL_PATH)/include
-
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
-
-LOCAL_MODULE := libaudiohal
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libaudiohal/DeviceHalHidl.cpp b/media/libaudiohal/DeviceHalHidl.cpp
deleted file mode 100644
index 49ef991..0000000
--- a/media/libaudiohal/DeviceHalHidl.cpp
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdio.h>
-
-#define LOG_TAG "DeviceHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hardware/audio/2.0/IPrimaryDevice.h>
-#include <cutils/native_handle.h>
-#include <hwbinder/IPCThreadState.h>
-#include <utils/Log.h>
-
-#include "DeviceHalHidl.h"
-#include "HidlUtils.h"
-#include "StreamHalHidl.h"
-
-using ::android::hardware::audio::common::V2_0::AudioConfig;
-using ::android::hardware::audio::common::V2_0::AudioDevice;
-using ::android::hardware::audio::common::V2_0::AudioInputFlag;
-using ::android::hardware::audio::common::V2_0::AudioOutputFlag;
-using ::android::hardware::audio::common::V2_0::AudioPatchHandle;
-using ::android::hardware::audio::common::V2_0::AudioPort;
-using ::android::hardware::audio::common::V2_0::AudioPortConfig;
-using ::android::hardware::audio::common::V2_0::AudioMode;
-using ::android::hardware::audio::common::V2_0::AudioSource;
-using ::android::hardware::audio::V2_0::DeviceAddress;
-using ::android::hardware::audio::V2_0::IPrimaryDevice;
-using ::android::hardware::audio::V2_0::ParameterValue;
-using ::android::hardware::audio::V2_0::Result;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-
-namespace android {
-
-namespace {
-
-status_t deviceAddressFromHal(
- audio_devices_t device, const char* halAddress, DeviceAddress* address) {
- address->device = AudioDevice(device);
-
- if (address == nullptr || strnlen(halAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
- return OK;
- }
- const bool isInput = (device & AUDIO_DEVICE_BIT_IN) != 0;
- if (isInput) device &= ~AUDIO_DEVICE_BIT_IN;
- if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_A2DP) != 0)
- || (isInput && (device & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) != 0)) {
- int status = sscanf(halAddress,
- "%hhX:%hhX:%hhX:%hhX:%hhX:%hhX",
- &address->address.mac[0], &address->address.mac[1], &address->address.mac[2],
- &address->address.mac[3], &address->address.mac[4], &address->address.mac[5]);
- return status == 6 ? OK : BAD_VALUE;
- } else if ((!isInput && (device & AUDIO_DEVICE_OUT_IP) != 0)
- || (isInput && (device & AUDIO_DEVICE_IN_IP) != 0)) {
- int status = sscanf(halAddress,
- "%hhu.%hhu.%hhu.%hhu",
- &address->address.ipv4[0], &address->address.ipv4[1],
- &address->address.ipv4[2], &address->address.ipv4[3]);
- return status == 4 ? OK : BAD_VALUE;
- } else if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_USB)) != 0
- || (isInput && (device & AUDIO_DEVICE_IN_ALL_USB)) != 0) {
- int status = sscanf(halAddress,
- "card=%d;device=%d",
- &address->address.alsa.card, &address->address.alsa.device);
- return status == 2 ? OK : BAD_VALUE;
- } else if ((!isInput && (device & AUDIO_DEVICE_OUT_BUS) != 0)
- || (isInput && (device & AUDIO_DEVICE_IN_BUS) != 0)) {
- if (halAddress != NULL) {
- address->busAddress = halAddress;
- return OK;
- }
- return BAD_VALUE;
- } else if ((!isInput && (device & AUDIO_DEVICE_OUT_REMOTE_SUBMIX)) != 0
- || (isInput && (device & AUDIO_DEVICE_IN_REMOTE_SUBMIX) != 0)) {
- if (halAddress != NULL) {
- address->rSubmixAddress = halAddress;
- return OK;
- }
- return BAD_VALUE;
- }
- return OK;
-}
-
-} // namespace
-
-DeviceHalHidl::DeviceHalHidl(const sp<IDevice>& device)
- : ConversionHelperHidl("Device"), mDevice(device),
- mPrimaryDevice(IPrimaryDevice::castFrom(device)) {
-}
-
-DeviceHalHidl::~DeviceHalHidl() {
- if (mDevice != 0) {
- mDevice.clear();
- hardware::IPCThreadState::self()->flushCommands();
- }
-}
-
-status_t DeviceHalHidl::getSupportedDevices(uint32_t*) {
- // Obsolete.
- return INVALID_OPERATION;
-}
-
-status_t DeviceHalHidl::initCheck() {
- if (mDevice == 0) return NO_INIT;
- return processReturn("initCheck", mDevice->initCheck());
-}
-
-status_t DeviceHalHidl::setVoiceVolume(float volume) {
- if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
- return processReturn("setVoiceVolume", mPrimaryDevice->setVoiceVolume(volume));
-}
-
-status_t DeviceHalHidl::setMasterVolume(float volume) {
- if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
- return processReturn("setMasterVolume", mPrimaryDevice->setMasterVolume(volume));
-}
-
-status_t DeviceHalHidl::getMasterVolume(float *volume) {
- if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
- Result retval;
- Return<void> ret = mPrimaryDevice->getMasterVolume(
- [&](Result r, float v) {
- retval = r;
- if (retval == Result::OK) {
- *volume = v;
- }
- });
- return processReturn("getMasterVolume", ret, retval);
-}
-
-status_t DeviceHalHidl::setMode(audio_mode_t mode) {
- if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
- return processReturn("setMode", mPrimaryDevice->setMode(AudioMode(mode)));
-}
-
-status_t DeviceHalHidl::setMicMute(bool state) {
- if (mDevice == 0) return NO_INIT;
- return processReturn("setMicMute", mDevice->setMicMute(state));
-}
-
-status_t DeviceHalHidl::getMicMute(bool *state) {
- if (mDevice == 0) return NO_INIT;
- Result retval;
- Return<void> ret = mDevice->getMicMute(
- [&](Result r, bool mute) {
- retval = r;
- if (retval == Result::OK) {
- *state = mute;
- }
- });
- return processReturn("getMicMute", ret, retval);
-}
-
-status_t DeviceHalHidl::setMasterMute(bool state) {
- if (mDevice == 0) return NO_INIT;
- return processReturn("setMasterMute", mDevice->setMasterMute(state));
-}
-
-status_t DeviceHalHidl::getMasterMute(bool *state) {
- if (mDevice == 0) return NO_INIT;
- Result retval;
- Return<void> ret = mDevice->getMasterMute(
- [&](Result r, bool mute) {
- retval = r;
- if (retval == Result::OK) {
- *state = mute;
- }
- });
- return processReturn("getMasterMute", ret, retval);
-}
-
-status_t DeviceHalHidl::setParameters(const String8& kvPairs) {
- if (mDevice == 0) return NO_INIT;
- hidl_vec<ParameterValue> hidlParams;
- status_t status = parametersFromHal(kvPairs, &hidlParams);
- if (status != OK) return status;
- return processReturn("setParameters", mDevice->setParameters(hidlParams));
-}
-
-status_t DeviceHalHidl::getParameters(const String8& keys, String8 *values) {
- values->clear();
- if (mDevice == 0) return NO_INIT;
- hidl_vec<hidl_string> hidlKeys;
- status_t status = keysFromHal(keys, &hidlKeys);
- if (status != OK) return status;
- Result retval;
- Return<void> ret = mDevice->getParameters(
- hidlKeys,
- [&](Result r, const hidl_vec<ParameterValue>& parameters) {
- retval = r;
- if (retval == Result::OK) {
- parametersToHal(parameters, values);
- }
- });
- return processReturn("getParameters", ret, retval);
-}
-
-status_t DeviceHalHidl::getInputBufferSize(
- const struct audio_config *config, size_t *size) {
- if (mDevice == 0) return NO_INIT;
- AudioConfig hidlConfig;
- HidlUtils::audioConfigFromHal(*config, &hidlConfig);
- Result retval;
- Return<void> ret = mDevice->getInputBufferSize(
- hidlConfig,
- [&](Result r, uint64_t bufferSize) {
- retval = r;
- if (retval == Result::OK) {
- *size = static_cast<size_t>(bufferSize);
- }
- });
- return processReturn("getInputBufferSize", ret, retval);
-}
-
-status_t DeviceHalHidl::openOutputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- audio_output_flags_t flags,
- struct audio_config *config,
- const char *address,
- sp<StreamOutHalInterface> *outStream) {
- if (mDevice == 0) return NO_INIT;
- DeviceAddress hidlDevice;
- status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
- if (status != OK) return status;
- AudioConfig hidlConfig;
- HidlUtils::audioConfigFromHal(*config, &hidlConfig);
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mDevice->openOutputStream(
- handle,
- hidlDevice,
- hidlConfig,
- AudioOutputFlag(flags),
- [&](Result r, const sp<IStreamOut>& result, const AudioConfig& suggestedConfig) {
- retval = r;
- if (retval == Result::OK) {
- *outStream = new StreamOutHalHidl(result);
- }
- HidlUtils::audioConfigToHal(suggestedConfig, config);
- });
- return processReturn("openOutputStream", ret, retval);
-}
-
-status_t DeviceHalHidl::openInputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- audio_input_flags_t flags,
- const char *address,
- audio_source_t source,
- sp<StreamInHalInterface> *inStream) {
- if (mDevice == 0) return NO_INIT;
- DeviceAddress hidlDevice;
- status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
- if (status != OK) return status;
- AudioConfig hidlConfig;
- HidlUtils::audioConfigFromHal(*config, &hidlConfig);
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mDevice->openInputStream(
- handle,
- hidlDevice,
- hidlConfig,
- AudioInputFlag(flags),
- AudioSource(source),
- [&](Result r, const sp<IStreamIn>& result, const AudioConfig& suggestedConfig) {
- retval = r;
- if (retval == Result::OK) {
- *inStream = new StreamInHalHidl(result);
- }
- HidlUtils::audioConfigToHal(suggestedConfig, config);
- });
- return processReturn("openInputStream", ret, retval);
-}
-
-status_t DeviceHalHidl::supportsAudioPatches(bool *supportsPatches) {
- if (mDevice == 0) return NO_INIT;
- return processReturn("supportsAudioPatches", mDevice->supportsAudioPatches(), supportsPatches);
-}
-
-status_t DeviceHalHidl::createAudioPatch(
- unsigned int num_sources,
- const struct audio_port_config *sources,
- unsigned int num_sinks,
- const struct audio_port_config *sinks,
- audio_patch_handle_t *patch) {
- if (mDevice == 0) return NO_INIT;
- hidl_vec<AudioPortConfig> hidlSources, hidlSinks;
- HidlUtils::audioPortConfigsFromHal(num_sources, sources, &hidlSources);
- HidlUtils::audioPortConfigsFromHal(num_sinks, sinks, &hidlSinks);
- Result retval;
- Return<void> ret = mDevice->createAudioPatch(
- hidlSources, hidlSinks,
- [&](Result r, AudioPatchHandle hidlPatch) {
- retval = r;
- if (retval == Result::OK) {
- *patch = static_cast<audio_patch_handle_t>(hidlPatch);
- }
- });
- return processReturn("createAudioPatch", ret, retval);
-}
-
-status_t DeviceHalHidl::releaseAudioPatch(audio_patch_handle_t patch) {
- if (mDevice == 0) return NO_INIT;
- return processReturn("releaseAudioPatch", mDevice->releaseAudioPatch(patch));
-}
-
-status_t DeviceHalHidl::getAudioPort(struct audio_port *port) {
- if (mDevice == 0) return NO_INIT;
- AudioPort hidlPort;
- HidlUtils::audioPortFromHal(*port, &hidlPort);
- Result retval;
- Return<void> ret = mDevice->getAudioPort(
- hidlPort,
- [&](Result r, const AudioPort& p) {
- retval = r;
- if (retval == Result::OK) {
- HidlUtils::audioPortToHal(p, port);
- }
- });
- return processReturn("getAudioPort", ret, retval);
-}
-
-status_t DeviceHalHidl::setAudioPortConfig(const struct audio_port_config *config) {
- if (mDevice == 0) return NO_INIT;
- AudioPortConfig hidlConfig;
- HidlUtils::audioPortConfigFromHal(*config, &hidlConfig);
- return processReturn("setAudioPortConfig", mDevice->setAudioPortConfig(hidlConfig));
-}
-
-status_t DeviceHalHidl::dump(int fd) {
- if (mDevice == 0) return NO_INIT;
- native_handle_t* hidlHandle = native_handle_create(1, 0);
- hidlHandle->data[0] = fd;
- Return<void> ret = mDevice->debugDump(hidlHandle);
- native_handle_delete(hidlHandle);
- return processReturn("dump", ret);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/DeviceHalHidl.h b/media/libaudiohal/DeviceHalHidl.h
deleted file mode 100644
index 8651b51..0000000
--- a/media/libaudiohal/DeviceHalHidl.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
-#define ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
-
-#include <android/hardware/audio/2.0/IDevice.h>
-#include <android/hardware/audio/2.0/IPrimaryDevice.h>
-#include <media/audiohal/DeviceHalInterface.h>
-
-#include "ConversionHelperHidl.h"
-
-using ::android::hardware::audio::V2_0::IDevice;
-using ::android::hardware::audio::V2_0::IPrimaryDevice;
-using ::android::hardware::Return;
-
-namespace android {
-
-class DeviceHalHidl : public DeviceHalInterface, public ConversionHelperHidl
-{
- public:
- // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
- virtual status_t getSupportedDevices(uint32_t *devices);
-
- // Check to see if the audio hardware interface has been initialized.
- virtual status_t initCheck();
-
- // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
- virtual status_t setVoiceVolume(float volume);
-
- // Set the audio volume for all audio activities other than voice call.
- virtual status_t setMasterVolume(float volume);
-
- // Get the current master volume value for the HAL.
- virtual status_t getMasterVolume(float *volume);
-
- // Called when the audio mode changes.
- virtual status_t setMode(audio_mode_t mode);
-
- // Muting control.
- virtual status_t setMicMute(bool state);
- virtual status_t getMicMute(bool *state);
- virtual status_t setMasterMute(bool state);
- virtual status_t getMasterMute(bool *state);
-
- // Set global audio parameters.
- virtual status_t setParameters(const String8& kvPairs);
-
- // Get global audio parameters.
- virtual status_t getParameters(const String8& keys, String8 *values);
-
- // Returns audio input buffer size according to parameters passed.
- virtual status_t getInputBufferSize(const struct audio_config *config,
- size_t *size);
-
- // Creates and opens the audio hardware output stream. The stream is closed
- // by releasing all references to the returned object.
- virtual status_t openOutputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- audio_output_flags_t flags,
- struct audio_config *config,
- const char *address,
- sp<StreamOutHalInterface> *outStream);
-
- // Creates and opens the audio hardware input stream. The stream is closed
- // by releasing all references to the returned object.
- virtual status_t openInputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- audio_input_flags_t flags,
- const char *address,
- audio_source_t source,
- sp<StreamInHalInterface> *inStream);
-
- // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
- virtual status_t supportsAudioPatches(bool *supportsPatches);
-
- // Creates an audio patch between several source and sink ports.
- virtual status_t createAudioPatch(
- unsigned int num_sources,
- const struct audio_port_config *sources,
- unsigned int num_sinks,
- const struct audio_port_config *sinks,
- audio_patch_handle_t *patch);
-
- // Releases an audio patch.
- virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
-
- // Fills the list of supported attributes for a given audio port.
- virtual status_t getAudioPort(struct audio_port *port);
-
- // Set audio port configuration.
- virtual status_t setAudioPortConfig(const struct audio_port_config *config);
-
- virtual status_t dump(int fd);
-
- private:
- friend class DevicesFactoryHalHidl;
- sp<IDevice> mDevice;
- sp<IPrimaryDevice> mPrimaryDevice; // Null if it's not a primary device.
-
- // Can not be constructed directly by clients.
- explicit DeviceHalHidl(const sp<IDevice>& device);
-
- // The destructor automatically closes the device.
- virtual ~DeviceHalHidl();
-};
-
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
diff --git a/media/libaudiohal/DeviceHalLocal.cpp b/media/libaudiohal/DeviceHalLocal.cpp
deleted file mode 100644
index fc098f5..0000000
--- a/media/libaudiohal/DeviceHalLocal.cpp
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "DeviceHalLocal"
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-
-#include "DeviceHalLocal.h"
-#include "StreamHalLocal.h"
-
-namespace android {
-
-DeviceHalLocal::DeviceHalLocal(audio_hw_device_t *dev)
- : mDev(dev) {
-}
-
-DeviceHalLocal::~DeviceHalLocal() {
- int status = audio_hw_device_close(mDev);
- ALOGW_IF(status, "Error closing audio hw device %p: %s", mDev, strerror(-status));
- mDev = 0;
-}
-
-status_t DeviceHalLocal::getSupportedDevices(uint32_t *devices) {
- if (mDev->get_supported_devices == NULL) return INVALID_OPERATION;
- *devices = mDev->get_supported_devices(mDev);
- return OK;
-}
-
-status_t DeviceHalLocal::initCheck() {
- return mDev->init_check(mDev);
-}
-
-status_t DeviceHalLocal::setVoiceVolume(float volume) {
- return mDev->set_voice_volume(mDev, volume);
-}
-
-status_t DeviceHalLocal::setMasterVolume(float volume) {
- if (mDev->set_master_volume == NULL) return INVALID_OPERATION;
- return mDev->set_master_volume(mDev, volume);
-}
-
-status_t DeviceHalLocal::getMasterVolume(float *volume) {
- if (mDev->get_master_volume == NULL) return INVALID_OPERATION;
- return mDev->get_master_volume(mDev, volume);
-}
-
-status_t DeviceHalLocal::setMode(audio_mode_t mode) {
- return mDev->set_mode(mDev, mode);
-}
-
-status_t DeviceHalLocal::setMicMute(bool state) {
- return mDev->set_mic_mute(mDev, state);
-}
-
-status_t DeviceHalLocal::getMicMute(bool *state) {
- return mDev->get_mic_mute(mDev, state);
-}
-
-status_t DeviceHalLocal::setMasterMute(bool state) {
- if (mDev->set_master_mute == NULL) return INVALID_OPERATION;
- return mDev->set_master_mute(mDev, state);
-}
-
-status_t DeviceHalLocal::getMasterMute(bool *state) {
- if (mDev->get_master_mute == NULL) return INVALID_OPERATION;
- return mDev->get_master_mute(mDev, state);
-}
-
-status_t DeviceHalLocal::setParameters(const String8& kvPairs) {
- return mDev->set_parameters(mDev, kvPairs.string());
-}
-
-status_t DeviceHalLocal::getParameters(const String8& keys, String8 *values) {
- char *halValues = mDev->get_parameters(mDev, keys.string());
- if (halValues != NULL) {
- values->setTo(halValues);
- free(halValues);
- } else {
- values->clear();
- }
- return OK;
-}
-
-status_t DeviceHalLocal::getInputBufferSize(
- const struct audio_config *config, size_t *size) {
- *size = mDev->get_input_buffer_size(mDev, config);
- return OK;
-}
-
-status_t DeviceHalLocal::openOutputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- audio_output_flags_t flags,
- struct audio_config *config,
- const char *address,
- sp<StreamOutHalInterface> *outStream) {
- audio_stream_out_t *halStream;
- ALOGV("open_output_stream handle: %d devices: %x flags: %#x"
- "srate: %d format %#x channels %x address %s",
- handle, devices, flags,
- config->sample_rate, config->format, config->channel_mask,
- address);
- int openResut = mDev->open_output_stream(
- mDev, handle, devices, flags, config, &halStream, address);
- if (openResut == OK) {
- *outStream = new StreamOutHalLocal(halStream, this);
- }
- ALOGV("open_output_stream status %d stream %p", openResut, halStream);
- return openResut;
-}
-
-status_t DeviceHalLocal::openInputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- audio_input_flags_t flags,
- const char *address,
- audio_source_t source,
- sp<StreamInHalInterface> *inStream) {
- audio_stream_in_t *halStream;
- ALOGV("open_input_stream handle: %d devices: %x flags: %#x "
- "srate: %d format %#x channels %x address %s source %d",
- handle, devices, flags,
- config->sample_rate, config->format, config->channel_mask,
- address, source);
- int openResult = mDev->open_input_stream(
- mDev, handle, devices, config, &halStream, flags, address, source);
- if (openResult == OK) {
- *inStream = new StreamInHalLocal(halStream, this);
- }
- ALOGV("open_input_stream status %d stream %p", openResult, inStream);
- return openResult;
-}
-
-status_t DeviceHalLocal::supportsAudioPatches(bool *supportsPatches) {
- *supportsPatches = version() >= AUDIO_DEVICE_API_VERSION_3_0;
- return OK;
-}
-
-status_t DeviceHalLocal::createAudioPatch(
- unsigned int num_sources,
- const struct audio_port_config *sources,
- unsigned int num_sinks,
- const struct audio_port_config *sinks,
- audio_patch_handle_t *patch) {
- if (version() >= AUDIO_DEVICE_API_VERSION_3_0) {
- return mDev->create_audio_patch(
- mDev, num_sources, sources, num_sinks, sinks, patch);
- } else {
- return INVALID_OPERATION;
- }
-}
-
-status_t DeviceHalLocal::releaseAudioPatch(audio_patch_handle_t patch) {
- if (version() >= AUDIO_DEVICE_API_VERSION_3_0) {
- return mDev->release_audio_patch(mDev, patch);
- } else {
- return INVALID_OPERATION;
- }
-}
-
-status_t DeviceHalLocal::getAudioPort(struct audio_port *port) {
- return mDev->get_audio_port(mDev, port);
-}
-
-status_t DeviceHalLocal::setAudioPortConfig(const struct audio_port_config *config) {
- if (version() >= AUDIO_DEVICE_API_VERSION_3_0)
- return mDev->set_audio_port_config(mDev, config);
- else
- return INVALID_OPERATION;
-}
-
-status_t DeviceHalLocal::dump(int fd) {
- return mDev->dump(mDev, fd);
-}
-
-void DeviceHalLocal::closeOutputStream(struct audio_stream_out *stream_out) {
- mDev->close_output_stream(mDev, stream_out);
-}
-
-void DeviceHalLocal::closeInputStream(struct audio_stream_in *stream_in) {
- mDev->close_input_stream(mDev, stream_in);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/DeviceHalLocal.h b/media/libaudiohal/DeviceHalLocal.h
deleted file mode 100644
index 865f296..0000000
--- a/media/libaudiohal/DeviceHalLocal.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
-#define ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
-
-#include <hardware/audio.h>
-#include <media/audiohal/DeviceHalInterface.h>
-
-namespace android {
-
-class DeviceHalLocal : public DeviceHalInterface
-{
- public:
- // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
- virtual status_t getSupportedDevices(uint32_t *devices);
-
- // Check to see if the audio hardware interface has been initialized.
- virtual status_t initCheck();
-
- // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
- virtual status_t setVoiceVolume(float volume);
-
- // Set the audio volume for all audio activities other than voice call.
- virtual status_t setMasterVolume(float volume);
-
- // Get the current master volume value for the HAL.
- virtual status_t getMasterVolume(float *volume);
-
- // Called when the audio mode changes.
- virtual status_t setMode(audio_mode_t mode);
-
- // Muting control.
- virtual status_t setMicMute(bool state);
- virtual status_t getMicMute(bool *state);
- virtual status_t setMasterMute(bool state);
- virtual status_t getMasterMute(bool *state);
-
- // Set global audio parameters.
- virtual status_t setParameters(const String8& kvPairs);
-
- // Get global audio parameters.
- virtual status_t getParameters(const String8& keys, String8 *values);
-
- // Returns audio input buffer size according to parameters passed.
- virtual status_t getInputBufferSize(const struct audio_config *config,
- size_t *size);
-
- // Creates and opens the audio hardware output stream. The stream is closed
- // by releasing all references to the returned object.
- virtual status_t openOutputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- audio_output_flags_t flags,
- struct audio_config *config,
- const char *address,
- sp<StreamOutHalInterface> *outStream);
-
- // Creates and opens the audio hardware input stream. The stream is closed
- // by releasing all references to the returned object.
- virtual status_t openInputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- audio_input_flags_t flags,
- const char *address,
- audio_source_t source,
- sp<StreamInHalInterface> *inStream);
-
- // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
- virtual status_t supportsAudioPatches(bool *supportsPatches);
-
- // Creates an audio patch between several source and sink ports.
- virtual status_t createAudioPatch(
- unsigned int num_sources,
- const struct audio_port_config *sources,
- unsigned int num_sinks,
- const struct audio_port_config *sinks,
- audio_patch_handle_t *patch);
-
- // Releases an audio patch.
- virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
-
- // Fills the list of supported attributes for a given audio port.
- virtual status_t getAudioPort(struct audio_port *port);
-
- // Set audio port configuration.
- virtual status_t setAudioPortConfig(const struct audio_port_config *config);
-
- virtual status_t dump(int fd);
-
- void closeOutputStream(struct audio_stream_out *stream_out);
- void closeInputStream(struct audio_stream_in *stream_in);
-
- private:
- audio_hw_device_t *mDev;
-
- friend class DevicesFactoryHalLocal;
-
- // Can not be constructed directly by clients.
- explicit DeviceHalLocal(audio_hw_device_t *dev);
-
- // The destructor automatically closes the device.
- virtual ~DeviceHalLocal();
-
- uint32_t version() const { return mDev->common.version; }
-};
-
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
diff --git a/media/libaudiohal/DevicesFactoryHalHidl.cpp b/media/libaudiohal/DevicesFactoryHalHidl.cpp
deleted file mode 100644
index 31da263..0000000
--- a/media/libaudiohal/DevicesFactoryHalHidl.cpp
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <string.h>
-
-#define LOG_TAG "DevicesFactoryHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hardware/audio/2.0/IDevice.h>
-#include <media/audiohal/hidl/HalDeathHandler.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-#include "DeviceHalHidl.h"
-#include "DevicesFactoryHalHidl.h"
-
-using ::android::hardware::audio::V2_0::IDevice;
-using ::android::hardware::audio::V2_0::Result;
-using ::android::hardware::Return;
-
-namespace android {
-
-DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
- mDevicesFactory = IDevicesFactory::getService();
- if (mDevicesFactory != 0) {
- // It is assumed that DevicesFactory is owned by AudioFlinger
- // and thus have the same lifespan.
- mDevicesFactory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
- } else {
- ALOGE("Failed to obtain IDevicesFactory service, terminating process.");
- exit(1);
- }
-}
-
-DevicesFactoryHalHidl::~DevicesFactoryHalHidl() {
-}
-
-// static
-status_t DevicesFactoryHalHidl::nameFromHal(const char *name, IDevicesFactory::Device *device) {
- if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_PRIMARY) == 0) {
- *device = IDevicesFactory::Device::PRIMARY;
- return OK;
- } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_A2DP) == 0) {
- *device = IDevicesFactory::Device::A2DP;
- return OK;
- } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_USB) == 0) {
- *device = IDevicesFactory::Device::USB;
- return OK;
- } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX) == 0) {
- *device = IDevicesFactory::Device::R_SUBMIX;
- return OK;
- } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_STUB) == 0) {
- *device = IDevicesFactory::Device::STUB;
- return OK;
- }
- ALOGE("Invalid device name %s", name);
- return BAD_VALUE;
-}
-
-status_t DevicesFactoryHalHidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
- if (mDevicesFactory == 0) return NO_INIT;
- IDevicesFactory::Device hidlDevice;
- status_t status = nameFromHal(name, &hidlDevice);
- if (status != OK) return status;
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mDevicesFactory->openDevice(
- hidlDevice,
- [&](Result r, const sp<IDevice>& result) {
- retval = r;
- if (retval == Result::OK) {
- *device = new DeviceHalHidl(result);
- }
- });
- if (ret.isOk()) {
- if (retval == Result::OK) return OK;
- else if (retval == Result::INVALID_ARGUMENTS) return BAD_VALUE;
- else return NO_INIT;
- }
- return FAILED_TRANSACTION;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/DevicesFactoryHalHidl.h b/media/libaudiohal/DevicesFactoryHalHidl.h
deleted file mode 100644
index e2f1ad1..0000000
--- a/media/libaudiohal/DevicesFactoryHalHidl.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
-#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
-
-#include <android/hardware/audio/2.0/IDevicesFactory.h>
-#include <media/audiohal/DevicesFactoryHalInterface.h>
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-
-#include "DeviceHalHidl.h"
-
-using ::android::hardware::audio::V2_0::IDevicesFactory;
-
-namespace android {
-
-class DevicesFactoryHalHidl : public DevicesFactoryHalInterface
-{
- public:
- // Opens a device with the specified name. To close the device, it is
- // necessary to release references to the returned object.
- virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
-
- private:
- friend class DevicesFactoryHalHybrid;
-
- sp<IDevicesFactory> mDevicesFactory;
-
- static status_t nameFromHal(const char *name, IDevicesFactory::Device *device);
-
- // Can not be constructed directly by clients.
- DevicesFactoryHalHidl();
-
- virtual ~DevicesFactoryHalHidl();
-};
-
-} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/DevicesFactoryHalHybrid.cpp
deleted file mode 100644
index 454b03b..0000000
--- a/media/libaudiohal/DevicesFactoryHalHybrid.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "DevicesFactoryHalHybrid"
-//#define LOG_NDEBUG 0
-
-#include "DevicesFactoryHalHybrid.h"
-#include "DevicesFactoryHalLocal.h"
-#ifndef USE_LEGACY_LOCAL_AUDIO_HAL
-#include "DevicesFactoryHalHidl.h"
-#endif
-
-namespace android {
-
-// static
-sp<DevicesFactoryHalInterface> DevicesFactoryHalInterface::create() {
- return new DevicesFactoryHalHybrid();
-}
-
-DevicesFactoryHalHybrid::DevicesFactoryHalHybrid()
- : mLocalFactory(new DevicesFactoryHalLocal()),
- mHidlFactory(
-#ifdef USE_LEGACY_LOCAL_AUDIO_HAL
- nullptr
-#else
- new DevicesFactoryHalHidl()
-#endif
- ) {
-}
-
-DevicesFactoryHalHybrid::~DevicesFactoryHalHybrid() {
-}
-
-status_t DevicesFactoryHalHybrid::openDevice(const char *name, sp<DeviceHalInterface> *device) {
- if (mHidlFactory != 0 && strcmp(AUDIO_HARDWARE_MODULE_ID_A2DP, name) != 0) {
- return mHidlFactory->openDevice(name, device);
- }
- return mLocalFactory->openDevice(name, device);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/DevicesFactoryHalInterface.cpp b/media/libaudiohal/DevicesFactoryHalInterface.cpp
new file mode 100644
index 0000000..4c8eaf6
--- /dev/null
+++ b/media/libaudiohal/DevicesFactoryHalInterface.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android/hardware/audio/2.0/IDevicesFactory.h>
+#include <android/hardware/audio/4.0/IDevicesFactory.h>
+
+#include <DevicesFactoryHalHybrid.h>
+#include <libaudiohal/4.0/DevicesFactoryHalHybrid.h>
+
+namespace android {
+
+// static
+sp<DevicesFactoryHalInterface> DevicesFactoryHalInterface::create() {
+ if (hardware::audio::V4_0::IDevicesFactory::getService() != nullptr) {
+ return new V4_0::DevicesFactoryHalHybrid();
+ }
+ if (hardware::audio::V2_0::IDevicesFactory::getService() != nullptr) {
+ return new DevicesFactoryHalHybrid();
+ }
+ return nullptr;
+}
+
+} // namespace android
diff --git a/media/libaudiohal/EffectBufferHalHidl.cpp b/media/libaudiohal/EffectBufferHalHidl.cpp
deleted file mode 100644
index 8b5201b..0000000
--- a/media/libaudiohal/EffectBufferHalHidl.cpp
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <atomic>
-
-#define LOG_TAG "EffectBufferHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hidl/allocator/1.0/IAllocator.h>
-#include <hidlmemory/mapping.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-#include "EffectBufferHalHidl.h"
-
-using ::android::hardware::Return;
-using ::android::hidl::allocator::V1_0::IAllocator;
-
-namespace android {
-
-// static
-uint64_t EffectBufferHalHidl::makeUniqueId() {
- static std::atomic<uint64_t> counter{1};
- return counter++;
-}
-
-// static
-status_t EffectBufferHalInterface::allocate(
- size_t size, sp<EffectBufferHalInterface>* buffer) {
- return mirror(nullptr, size, buffer);
-}
-
-// static
-status_t EffectBufferHalInterface::mirror(
- void* external, size_t size, sp<EffectBufferHalInterface>* buffer) {
- sp<EffectBufferHalInterface> tempBuffer = new EffectBufferHalHidl(size);
- status_t result = static_cast<EffectBufferHalHidl*>(tempBuffer.get())->init();
- if (result == OK) {
- tempBuffer->setExternalData(external);
- *buffer = tempBuffer;
- }
- return result;
-}
-
-EffectBufferHalHidl::EffectBufferHalHidl(size_t size)
- : mBufferSize(size), mFrameCountChanged(false),
- mExternalData(nullptr), mAudioBuffer{0, {nullptr}} {
- mHidlBuffer.id = makeUniqueId();
- mHidlBuffer.frameCount = 0;
-}
-
-EffectBufferHalHidl::~EffectBufferHalHidl() {
-}
-
-status_t EffectBufferHalHidl::init() {
- sp<IAllocator> ashmem = IAllocator::getService("ashmem");
- if (ashmem == 0) {
- ALOGE("Failed to retrieve ashmem allocator service");
- return NO_INIT;
- }
- status_t retval = NO_MEMORY;
- Return<void> result = ashmem->allocate(
- mBufferSize,
- [&](bool success, const hidl_memory& memory) {
- if (success) {
- mHidlBuffer.data = memory;
- retval = OK;
- }
- });
- if (result.isOk() && retval == OK) {
- mMemory = hardware::mapMemory(mHidlBuffer.data);
- if (mMemory != 0) {
- mMemory->update();
- mAudioBuffer.raw = static_cast<void*>(mMemory->getPointer());
- memset(mAudioBuffer.raw, 0, mMemory->getSize());
- mMemory->commit();
- } else {
- ALOGE("Failed to map allocated ashmem");
- retval = NO_MEMORY;
- }
- } else {
- ALOGE("Failed to allocate %d bytes from ashmem", (int)mBufferSize);
- }
- return result.isOk() ? retval : FAILED_TRANSACTION;
-}
-
-audio_buffer_t* EffectBufferHalHidl::audioBuffer() {
- return &mAudioBuffer;
-}
-
-void* EffectBufferHalHidl::externalData() const {
- return mExternalData;
-}
-
-void EffectBufferHalHidl::setFrameCount(size_t frameCount) {
- mHidlBuffer.frameCount = frameCount;
- mAudioBuffer.frameCount = frameCount;
- mFrameCountChanged = true;
-}
-
-bool EffectBufferHalHidl::checkFrameCountChange() {
- bool result = mFrameCountChanged;
- mFrameCountChanged = false;
- return result;
-}
-
-void EffectBufferHalHidl::setExternalData(void* external) {
- mExternalData = external;
-}
-
-void EffectBufferHalHidl::update() {
- update(mBufferSize);
-}
-
-void EffectBufferHalHidl::commit() {
- commit(mBufferSize);
-}
-
-void EffectBufferHalHidl::update(size_t size) {
- if (mExternalData == nullptr) return;
- mMemory->update();
- if (size > mBufferSize) size = mBufferSize;
- memcpy(mAudioBuffer.raw, mExternalData, size);
- mMemory->commit();
-}
-
-void EffectBufferHalHidl::commit(size_t size) {
- if (mExternalData == nullptr) return;
- if (size > mBufferSize) size = mBufferSize;
- memcpy(mExternalData, mAudioBuffer.raw, size);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/EffectBufferHalHidl.h b/media/libaudiohal/EffectBufferHalHidl.h
deleted file mode 100644
index 66a81c2..0000000
--- a/media/libaudiohal/EffectBufferHalHidl.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
-#define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
-
-#include <android/hardware/audio/effect/2.0/types.h>
-#include <android/hidl/memory/1.0/IMemory.h>
-#include <hidl/HidlSupport.h>
-#include <media/audiohal/EffectBufferHalInterface.h>
-#include <system/audio_effect.h>
-
-using android::hardware::audio::effect::V2_0::AudioBuffer;
-using android::hardware::hidl_memory;
-using android::hidl::memory::V1_0::IMemory;
-
-namespace android {
-
-class EffectBufferHalHidl : public EffectBufferHalInterface
-{
- public:
- virtual audio_buffer_t* audioBuffer();
- virtual void* externalData() const;
-
- virtual void setExternalData(void* external);
- virtual void setFrameCount(size_t frameCount);
- virtual bool checkFrameCountChange();
-
- virtual void update();
- virtual void commit();
- virtual void update(size_t size);
- virtual void commit(size_t size);
-
- const AudioBuffer& hidlBuffer() const { return mHidlBuffer; }
-
- private:
- friend class EffectBufferHalInterface;
-
- static uint64_t makeUniqueId();
-
- const size_t mBufferSize;
- bool mFrameCountChanged;
- void* mExternalData;
- AudioBuffer mHidlBuffer;
- sp<IMemory> mMemory;
- audio_buffer_t mAudioBuffer;
-
- // Can not be constructed directly by clients.
- explicit EffectBufferHalHidl(size_t size);
-
- virtual ~EffectBufferHalHidl();
-
- status_t init();
-};
-
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
diff --git a/media/libaudiohal/EffectBufferHalLocal.cpp b/media/libaudiohal/EffectBufferHalLocal.cpp
deleted file mode 100644
index 7951c8e..0000000
--- a/media/libaudiohal/EffectBufferHalLocal.cpp
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "EffectBufferHalLocal"
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-
-#include "EffectBufferHalLocal.h"
-
-namespace android {
-
-// static
-status_t EffectBufferHalInterface::allocate(
- size_t size, sp<EffectBufferHalInterface>* buffer) {
- *buffer = new EffectBufferHalLocal(size);
- return OK;
-}
-
-// static
-status_t EffectBufferHalInterface::mirror(
- void* external, size_t size, sp<EffectBufferHalInterface>* buffer) {
- *buffer = new EffectBufferHalLocal(external, size);
- return OK;
-}
-
-EffectBufferHalLocal::EffectBufferHalLocal(size_t size)
- : mOwnBuffer(new uint8_t[size]),
- mBufferSize(size), mFrameCountChanged(false),
- mAudioBuffer{0, {mOwnBuffer.get()}} {
-}
-
-EffectBufferHalLocal::EffectBufferHalLocal(void* external, size_t size)
- : mOwnBuffer(nullptr),
- mBufferSize(size), mFrameCountChanged(false),
- mAudioBuffer{0, {external}} {
-}
-
-EffectBufferHalLocal::~EffectBufferHalLocal() {
-}
-
-audio_buffer_t* EffectBufferHalLocal::audioBuffer() {
- return &mAudioBuffer;
-}
-
-void* EffectBufferHalLocal::externalData() const {
- return mAudioBuffer.raw;
-}
-
-void EffectBufferHalLocal::setFrameCount(size_t frameCount) {
- mAudioBuffer.frameCount = frameCount;
- mFrameCountChanged = true;
-}
-
-void EffectBufferHalLocal::setExternalData(void* external) {
- ALOGE_IF(mOwnBuffer != nullptr, "Attempt to set external data for allocated buffer");
- mAudioBuffer.raw = external;
-}
-
-bool EffectBufferHalLocal::checkFrameCountChange() {
- bool result = mFrameCountChanged;
- mFrameCountChanged = false;
- return result;
-}
-
-void EffectBufferHalLocal::update() {
-}
-
-void EffectBufferHalLocal::commit() {
-}
-
-void EffectBufferHalLocal::update(size_t) {
-}
-
-void EffectBufferHalLocal::commit(size_t) {
-}
-
-} // namespace android
diff --git a/media/libaudiohal/EffectBufferHalLocal.h b/media/libaudiohal/EffectBufferHalLocal.h
deleted file mode 100644
index d2b624b..0000000
--- a/media/libaudiohal/EffectBufferHalLocal.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECT_BUFFER_HAL_LOCAL_H
-#define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_LOCAL_H
-
-#include <memory>
-
-#include <media/audiohal/EffectBufferHalInterface.h>
-#include <system/audio_effect.h>
-
-namespace android {
-
-class EffectBufferHalLocal : public EffectBufferHalInterface
-{
- public:
- virtual audio_buffer_t* audioBuffer();
- virtual void* externalData() const;
-
- virtual void setExternalData(void* external);
- virtual void setFrameCount(size_t frameCount);
- virtual bool checkFrameCountChange();
-
- virtual void update();
- virtual void commit();
- virtual void update(size_t size);
- virtual void commit(size_t size);
-
- private:
- friend class EffectBufferHalInterface;
-
- std::unique_ptr<uint8_t[]> mOwnBuffer;
- const size_t mBufferSize;
- bool mFrameCountChanged;
- audio_buffer_t mAudioBuffer;
-
- // Can not be constructed directly by clients.
- explicit EffectBufferHalLocal(size_t size);
- EffectBufferHalLocal(void* external, size_t size);
-
- virtual ~EffectBufferHalLocal();
-
- status_t init();
-};
-
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_LOCAL_H
diff --git a/media/libaudiohal/EffectHalHidl.cpp b/media/libaudiohal/EffectHalHidl.cpp
deleted file mode 100644
index 61fb6bab..0000000
--- a/media/libaudiohal/EffectHalHidl.cpp
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "EffectHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <hwbinder/IPCThreadState.h>
-#include <media/EffectsFactoryApi.h>
-#include <utils/Log.h>
-
-#include "ConversionHelperHidl.h"
-#include "EffectBufferHalHidl.h"
-#include "EffectHalHidl.h"
-#include "HidlUtils.h"
-
-using ::android::hardware::audio::effect::V2_0::AudioBuffer;
-using ::android::hardware::audio::effect::V2_0::EffectBufferAccess;
-using ::android::hardware::audio::effect::V2_0::EffectConfigParameters;
-using ::android::hardware::audio::effect::V2_0::MessageQueueFlagBits;
-using ::android::hardware::audio::effect::V2_0::Result;
-using ::android::hardware::audio::common::V2_0::AudioChannelMask;
-using ::android::hardware::audio::common::V2_0::AudioFormat;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::Return;
-
-namespace android {
-
-EffectHalHidl::EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId)
- : mEffect(effect), mEffectId(effectId), mBuffersChanged(true), mEfGroup(nullptr) {
-}
-
-EffectHalHidl::~EffectHalHidl() {
- if (mEffect != 0) {
- close();
- mEffect.clear();
- hardware::IPCThreadState::self()->flushCommands();
- }
- if (mEfGroup) {
- EventFlag::deleteEventFlag(&mEfGroup);
- }
-}
-
-// static
-void EffectHalHidl::effectDescriptorToHal(
- const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor) {
- HidlUtils::uuidToHal(descriptor.type, &halDescriptor->type);
- HidlUtils::uuidToHal(descriptor.uuid, &halDescriptor->uuid);
- halDescriptor->flags = static_cast<uint32_t>(descriptor.flags);
- halDescriptor->cpuLoad = descriptor.cpuLoad;
- halDescriptor->memoryUsage = descriptor.memoryUsage;
- memcpy(halDescriptor->name, descriptor.name.data(), descriptor.name.size());
- memcpy(halDescriptor->implementor,
- descriptor.implementor.data(), descriptor.implementor.size());
-}
-
-// TODO(mnaganov): These buffer conversion functions should be shared with Effect wrapper
-// via HidlUtils. Move them there when hardware/interfaces will get un-frozen again.
-
-// static
-void EffectHalHidl::effectBufferConfigFromHal(
- const buffer_config_t& halConfig, EffectBufferConfig* config) {
- config->samplingRateHz = halConfig.samplingRate;
- config->channels = AudioChannelMask(halConfig.channels);
- config->format = AudioFormat(halConfig.format);
- config->accessMode = EffectBufferAccess(halConfig.accessMode);
- config->mask = EffectConfigParameters(halConfig.mask);
-}
-
-// static
-void EffectHalHidl::effectBufferConfigToHal(
- const EffectBufferConfig& config, buffer_config_t* halConfig) {
- halConfig->buffer.frameCount = 0;
- halConfig->buffer.raw = NULL;
- halConfig->samplingRate = config.samplingRateHz;
- halConfig->channels = static_cast<uint32_t>(config.channels);
- halConfig->bufferProvider.cookie = NULL;
- halConfig->bufferProvider.getBuffer = NULL;
- halConfig->bufferProvider.releaseBuffer = NULL;
- halConfig->format = static_cast<uint8_t>(config.format);
- halConfig->accessMode = static_cast<uint8_t>(config.accessMode);
- halConfig->mask = static_cast<uint8_t>(config.mask);
-}
-
-// static
-void EffectHalHidl::effectConfigFromHal(const effect_config_t& halConfig, EffectConfig* config) {
- effectBufferConfigFromHal(halConfig.inputCfg, &config->inputCfg);
- effectBufferConfigFromHal(halConfig.outputCfg, &config->outputCfg);
-}
-
-// static
-void EffectHalHidl::effectConfigToHal(const EffectConfig& config, effect_config_t* halConfig) {
- effectBufferConfigToHal(config.inputCfg, &halConfig->inputCfg);
- effectBufferConfigToHal(config.outputCfg, &halConfig->outputCfg);
-}
-
-// static
-status_t EffectHalHidl::analyzeResult(const Result& result) {
- switch (result) {
- case Result::OK: return OK;
- case Result::INVALID_ARGUMENTS: return BAD_VALUE;
- case Result::INVALID_STATE: return NOT_ENOUGH_DATA;
- case Result::NOT_INITIALIZED: return NO_INIT;
- case Result::NOT_SUPPORTED: return INVALID_OPERATION;
- case Result::RESULT_TOO_BIG: return NO_MEMORY;
- default: return NO_INIT;
- }
-}
-
-status_t EffectHalHidl::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
- if (mInBuffer == 0 || buffer->audioBuffer() != mInBuffer->audioBuffer()) {
- mBuffersChanged = true;
- }
- mInBuffer = buffer;
- return OK;
-}
-
-status_t EffectHalHidl::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
- if (mOutBuffer == 0 || buffer->audioBuffer() != mOutBuffer->audioBuffer()) {
- mBuffersChanged = true;
- }
- mOutBuffer = buffer;
- return OK;
-}
-
-status_t EffectHalHidl::process() {
- return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS));
-}
-
-status_t EffectHalHidl::processReverse() {
- return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS_REVERSE));
-}
-
-status_t EffectHalHidl::prepareForProcessing() {
- std::unique_ptr<StatusMQ> tempStatusMQ;
- Result retval;
- Return<void> ret = mEffect->prepareForProcessing(
- [&](Result r, const MQDescriptorSync<Result>& statusMQ) {
- retval = r;
- if (retval == Result::OK) {
- tempStatusMQ.reset(new StatusMQ(statusMQ));
- if (tempStatusMQ->isValid() && tempStatusMQ->getEventFlagWord()) {
- EventFlag::createEventFlag(tempStatusMQ->getEventFlagWord(), &mEfGroup);
- }
- }
- });
- if (!ret.isOk() || retval != Result::OK) {
- return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
- }
- if (!tempStatusMQ || !tempStatusMQ->isValid() || !mEfGroup) {
- ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for effects");
- ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
- "Status message queue for effects is invalid");
- ALOGE_IF(!mEfGroup, "Event flag creation for effects failed");
- return NO_INIT;
- }
- mStatusMQ = std::move(tempStatusMQ);
- return OK;
-}
-
-bool EffectHalHidl::needToResetBuffers() {
- if (mBuffersChanged) return true;
- bool inBufferFrameCountUpdated = mInBuffer->checkFrameCountChange();
- bool outBufferFrameCountUpdated = mOutBuffer->checkFrameCountChange();
- return inBufferFrameCountUpdated || outBufferFrameCountUpdated;
-}
-
-status_t EffectHalHidl::processImpl(uint32_t mqFlag) {
- if (mEffect == 0 || mInBuffer == 0 || mOutBuffer == 0) return NO_INIT;
- status_t status;
- if (!mStatusMQ && (status = prepareForProcessing()) != OK) {
- return status;
- }
- if (needToResetBuffers() && (status = setProcessBuffers()) != OK) {
- return status;
- }
- // The data is already in the buffers, just need to flush it and wake up the server side.
- std::atomic_thread_fence(std::memory_order_release);
- mEfGroup->wake(mqFlag);
- uint32_t efState = 0;
-retry:
- status_t ret = mEfGroup->wait(
- static_cast<uint32_t>(MessageQueueFlagBits::DONE_PROCESSING), &efState);
- if (efState & static_cast<uint32_t>(MessageQueueFlagBits::DONE_PROCESSING)) {
- Result retval = Result::NOT_INITIALIZED;
- mStatusMQ->read(&retval);
- if (retval == Result::OK || retval == Result::INVALID_STATE) {
- // Sync back the changed contents of the buffer.
- std::atomic_thread_fence(std::memory_order_acquire);
- }
- return analyzeResult(retval);
- }
- if (ret == -EAGAIN || ret == -EINTR) {
- // Spurious wakeup. This normally retries no more than once.
- goto retry;
- }
- return ret;
-}
-
-status_t EffectHalHidl::setProcessBuffers() {
- Return<Result> ret = mEffect->setProcessBuffers(
- static_cast<EffectBufferHalHidl*>(mInBuffer.get())->hidlBuffer(),
- static_cast<EffectBufferHalHidl*>(mOutBuffer.get())->hidlBuffer());
- if (ret.isOk() && ret == Result::OK) {
- mBuffersChanged = false;
- return OK;
- }
- return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
-}
-
-status_t EffectHalHidl::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
- uint32_t *replySize, void *pReplyData) {
- if (mEffect == 0) return NO_INIT;
-
- // Special cases.
- if (cmdCode == EFFECT_CMD_SET_CONFIG || cmdCode == EFFECT_CMD_SET_CONFIG_REVERSE) {
- return setConfigImpl(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
- } else if (cmdCode == EFFECT_CMD_GET_CONFIG || cmdCode == EFFECT_CMD_GET_CONFIG_REVERSE) {
- return getConfigImpl(cmdCode, replySize, pReplyData);
- }
-
- // Common case.
- hidl_vec<uint8_t> hidlData;
- if (pCmdData != nullptr && cmdSize > 0) {
- hidlData.setToExternal(reinterpret_cast<uint8_t*>(pCmdData), cmdSize);
- }
- status_t status;
- uint32_t replySizeStub = 0;
- if (replySize == nullptr || pReplyData == nullptr) replySize = &replySizeStub;
- Return<void> ret = mEffect->command(cmdCode, hidlData, *replySize,
- [&](int32_t s, const hidl_vec<uint8_t>& result) {
- status = s;
- if (status == 0) {
- if (*replySize > result.size()) *replySize = result.size();
- if (pReplyData != nullptr && *replySize > 0) {
- memcpy(pReplyData, &result[0], *replySize);
- }
- }
- });
- return ret.isOk() ? status : FAILED_TRANSACTION;
-}
-
-status_t EffectHalHidl::getDescriptor(effect_descriptor_t *pDescriptor) {
- if (mEffect == 0) return NO_INIT;
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mEffect->getDescriptor(
- [&](Result r, const EffectDescriptor& result) {
- retval = r;
- if (retval == Result::OK) {
- effectDescriptorToHal(result, pDescriptor);
- }
- });
- return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
-}
-
-status_t EffectHalHidl::close() {
- if (mEffect == 0) return NO_INIT;
- Return<Result> ret = mEffect->close();
- return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
-}
-
-status_t EffectHalHidl::getConfigImpl(
- uint32_t cmdCode, uint32_t *replySize, void *pReplyData) {
- if (replySize == NULL || *replySize != sizeof(effect_config_t) || pReplyData == NULL) {
- return BAD_VALUE;
- }
- status_t result = FAILED_TRANSACTION;
- Return<void> ret;
- if (cmdCode == EFFECT_CMD_GET_CONFIG) {
- ret = mEffect->getConfig([&] (Result r, const EffectConfig &hidlConfig) {
- result = analyzeResult(r);
- if (r == Result::OK) {
- effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
- }
- });
- } else {
- ret = mEffect->getConfigReverse([&] (Result r, const EffectConfig &hidlConfig) {
- result = analyzeResult(r);
- if (r == Result::OK) {
- effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
- }
- });
- }
- if (!ret.isOk()) {
- result = FAILED_TRANSACTION;
- }
- return result;
-}
-
-status_t EffectHalHidl::setConfigImpl(
- uint32_t cmdCode, uint32_t cmdSize, void *pCmdData, uint32_t *replySize, void *pReplyData) {
- if (pCmdData == NULL || cmdSize != sizeof(effect_config_t) ||
- replySize == NULL || *replySize != sizeof(int32_t) || pReplyData == NULL) {
- return BAD_VALUE;
- }
- const effect_config_t *halConfig = static_cast<effect_config_t*>(pCmdData);
- if (halConfig->inputCfg.bufferProvider.getBuffer != NULL ||
- halConfig->inputCfg.bufferProvider.releaseBuffer != NULL ||
- halConfig->outputCfg.bufferProvider.getBuffer != NULL ||
- halConfig->outputCfg.bufferProvider.releaseBuffer != NULL) {
- ALOGE("Buffer provider callbacks are not supported");
- }
- EffectConfig hidlConfig;
- effectConfigFromHal(*halConfig, &hidlConfig);
- Return<Result> ret = cmdCode == EFFECT_CMD_SET_CONFIG ?
- mEffect->setConfig(hidlConfig, nullptr, nullptr) :
- mEffect->setConfigReverse(hidlConfig, nullptr, nullptr);
- status_t result = FAILED_TRANSACTION;
- if (ret.isOk()) {
- result = analyzeResult(ret);
- *static_cast<int32_t*>(pReplyData) = result;
- }
- return result;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/EffectHalLocal.cpp b/media/libaudiohal/EffectHalLocal.cpp
deleted file mode 100644
index dd465c3..0000000
--- a/media/libaudiohal/EffectHalLocal.cpp
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "EffectHalLocal"
-//#define LOG_NDEBUG 0
-
-#include <media/EffectsFactoryApi.h>
-#include <utils/Log.h>
-
-#include "EffectHalLocal.h"
-
-namespace android {
-
-EffectHalLocal::EffectHalLocal(effect_handle_t handle)
- : mHandle(handle) {
-}
-
-EffectHalLocal::~EffectHalLocal() {
- int status = EffectRelease(mHandle);
- ALOGW_IF(status, "Error releasing effect %p: %s", mHandle, strerror(-status));
- mHandle = 0;
-}
-
-status_t EffectHalLocal::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
- mInBuffer = buffer;
- return OK;
-}
-
-status_t EffectHalLocal::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
- mOutBuffer = buffer;
- return OK;
-}
-
-status_t EffectHalLocal::process() {
- if (mInBuffer == nullptr || mOutBuffer == nullptr) {
- ALOGE_IF(mInBuffer == nullptr, "Input buffer not set");
- ALOGE_IF(mOutBuffer == nullptr, "Output buffer not set");
- return NO_INIT;
- }
- return (*mHandle)->process(mHandle, mInBuffer->audioBuffer(), mOutBuffer->audioBuffer());
-}
-
-status_t EffectHalLocal::processReverse() {
- if ((*mHandle)->process_reverse != NULL) {
- if (mInBuffer == nullptr || mOutBuffer == nullptr) {
- ALOGE_IF(mInBuffer == nullptr, "Input buffer not set");
- ALOGE_IF(mOutBuffer == nullptr, "Output buffer not set");
- return NO_INIT;
- }
- return (*mHandle)->process_reverse(
- mHandle, mInBuffer->audioBuffer(), mOutBuffer->audioBuffer());
- } else {
- return INVALID_OPERATION;
- }
-}
-
-status_t EffectHalLocal::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
- uint32_t *replySize, void *pReplyData) {
- return (*mHandle)->command(mHandle, cmdCode, cmdSize, pCmdData, replySize, pReplyData);
-}
-
-status_t EffectHalLocal::getDescriptor(effect_descriptor_t *pDescriptor) {
- return (*mHandle)->get_descriptor(mHandle, pDescriptor);
-}
-
-status_t EffectHalLocal::close() {
- return OK;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/EffectHalLocal.h b/media/libaudiohal/EffectHalLocal.h
deleted file mode 100644
index 693fb50..0000000
--- a/media/libaudiohal/EffectHalLocal.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECT_HAL_LOCAL_H
-#define ANDROID_HARDWARE_EFFECT_HAL_LOCAL_H
-
-#include <hardware/audio_effect.h>
-#include <media/audiohal/EffectHalInterface.h>
-
-namespace android {
-
-class EffectHalLocal : public EffectHalInterface
-{
- public:
- // Set the input buffer.
- virtual status_t setInBuffer(const sp<EffectBufferHalInterface>& buffer);
-
- // Set the output buffer.
- virtual status_t setOutBuffer(const sp<EffectBufferHalInterface>& buffer);
-
- // Effect process function.
- virtual status_t process();
-
- // Process reverse stream function. This function is used to pass
- // a reference stream to the effect engine.
- virtual status_t processReverse();
-
- // Send a command and receive a response to/from effect engine.
- virtual status_t command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
- uint32_t *replySize, void *pReplyData);
-
- // Returns the effect descriptor.
- virtual status_t getDescriptor(effect_descriptor_t *pDescriptor);
-
- // Free resources on the remote side.
- virtual status_t close();
-
- // Whether it's a local implementation.
- virtual bool isLocal() const { return true; }
-
- effect_handle_t handle() const { return mHandle; }
-
- private:
- effect_handle_t mHandle;
- sp<EffectBufferHalInterface> mInBuffer;
- sp<EffectBufferHalInterface> mOutBuffer;
-
- friend class EffectsFactoryHalLocal;
-
- // Can not be constructed directly by clients.
- explicit EffectHalLocal(effect_handle_t handle);
-
- // The destructor automatically releases the effect.
- virtual ~EffectHalLocal();
-};
-
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECT_HAL_LOCAL_H
diff --git a/media/libaudiohal/EffectsFactoryHalHidl.cpp b/media/libaudiohal/EffectsFactoryHalHidl.cpp
deleted file mode 100644
index a8081b7..0000000
--- a/media/libaudiohal/EffectsFactoryHalHidl.cpp
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "EffectsFactoryHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <cutils/native_handle.h>
-
-#include "ConversionHelperHidl.h"
-#include "EffectHalHidl.h"
-#include "EffectsFactoryHalHidl.h"
-#include "HidlUtils.h"
-
-using ::android::hardware::audio::common::V2_0::Uuid;
-using ::android::hardware::audio::effect::V2_0::IEffect;
-using ::android::hardware::audio::effect::V2_0::Result;
-using ::android::hardware::Return;
-
-namespace android {
-
-// static
-sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
- return new EffectsFactoryHalHidl();
-}
-
-// static
-bool EffectsFactoryHalInterface::isNullUuid(const effect_uuid_t *pEffectUuid) {
- return memcmp(pEffectUuid, EFFECT_UUID_NULL, sizeof(effect_uuid_t)) == 0;
-}
-
-EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
- mEffectsFactory = IEffectsFactory::getService();
- if (mEffectsFactory == 0) {
- ALOGE("Failed to obtain IEffectsFactory service, terminating process.");
- exit(1);
- }
-}
-
-EffectsFactoryHalHidl::~EffectsFactoryHalHidl() {
-}
-
-status_t EffectsFactoryHalHidl::queryAllDescriptors() {
- if (mEffectsFactory == 0) return NO_INIT;
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mEffectsFactory->getAllDescriptors(
- [&](Result r, const hidl_vec<EffectDescriptor>& result) {
- retval = r;
- if (retval == Result::OK) {
- mLastDescriptors = result;
- }
- });
- if (ret.isOk()) {
- return retval == Result::OK ? OK : NO_INIT;
- }
- mLastDescriptors.resize(0);
- return processReturn(__FUNCTION__, ret);
-}
-
-status_t EffectsFactoryHalHidl::queryNumberEffects(uint32_t *pNumEffects) {
- status_t queryResult = queryAllDescriptors();
- if (queryResult == OK) {
- *pNumEffects = mLastDescriptors.size();
- }
- return queryResult;
-}
-
-status_t EffectsFactoryHalHidl::getDescriptor(
- uint32_t index, effect_descriptor_t *pDescriptor) {
- // TODO: We need somehow to track the changes on the server side
- // or figure out how to convert everybody to query all the descriptors at once.
- // TODO: check for nullptr
- if (mLastDescriptors.size() == 0) {
- status_t queryResult = queryAllDescriptors();
- if (queryResult != OK) return queryResult;
- }
- if (index >= mLastDescriptors.size()) return NAME_NOT_FOUND;
- EffectHalHidl::effectDescriptorToHal(mLastDescriptors[index], pDescriptor);
- return OK;
-}
-
-status_t EffectsFactoryHalHidl::getDescriptor(
- const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
- // TODO: check for nullptr
- if (mEffectsFactory == 0) return NO_INIT;
- Uuid hidlUuid;
- HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mEffectsFactory->getDescriptor(hidlUuid,
- [&](Result r, const EffectDescriptor& result) {
- retval = r;
- if (retval == Result::OK) {
- EffectHalHidl::effectDescriptorToHal(result, pDescriptor);
- }
- });
- if (ret.isOk()) {
- if (retval == Result::OK) return OK;
- else if (retval == Result::INVALID_ARGUMENTS) return NAME_NOT_FOUND;
- else return NO_INIT;
- }
- return processReturn(__FUNCTION__, ret);
-}
-
-status_t EffectsFactoryHalHidl::createEffect(
- const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
- sp<EffectHalInterface> *effect) {
- if (mEffectsFactory == 0) return NO_INIT;
- Uuid hidlUuid;
- HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
- Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mEffectsFactory->createEffect(
- hidlUuid, sessionId, ioId,
- [&](Result r, const sp<IEffect>& result, uint64_t effectId) {
- retval = r;
- if (retval == Result::OK) {
- *effect = new EffectHalHidl(result, effectId);
- }
- });
- if (ret.isOk()) {
- if (retval == Result::OK) return OK;
- else if (retval == Result::INVALID_ARGUMENTS) return NAME_NOT_FOUND;
- else return NO_INIT;
- }
- return processReturn(__FUNCTION__, ret);
-}
-
-status_t EffectsFactoryHalHidl::dumpEffects(int fd) {
- if (mEffectsFactory == 0) return NO_INIT;
- native_handle_t* hidlHandle = native_handle_create(1, 0);
- hidlHandle->data[0] = fd;
- Return<void> ret = mEffectsFactory->debugDump(hidlHandle);
- native_handle_delete(hidlHandle);
- return processReturn(__FUNCTION__, ret);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/EffectsFactoryHalHidl.h b/media/libaudiohal/EffectsFactoryHalHidl.h
deleted file mode 100644
index e89f042..0000000
--- a/media/libaudiohal/EffectsFactoryHalHidl.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
-#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
-
-#include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
-#include <android/hardware/audio/effect/2.0/types.h>
-#include <media/audiohal/EffectsFactoryHalInterface.h>
-
-namespace android {
-
-using ::android::hardware::audio::effect::V2_0::EffectDescriptor;
-using ::android::hardware::audio::effect::V2_0::IEffectsFactory;
-using ::android::hardware::hidl_vec;
-
-class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
-{
- public:
- // Returns the number of different effects in all loaded libraries.
- virtual status_t queryNumberEffects(uint32_t *pNumEffects);
-
- // Returns a descriptor of the next available effect.
- virtual status_t getDescriptor(uint32_t index,
- effect_descriptor_t *pDescriptor);
-
- virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
- effect_descriptor_t *pDescriptor);
-
- // Creates an effect engine of the specified type.
- // To release the effect engine, it is necessary to release references
- // to the returned effect object.
- virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
- int32_t sessionId, int32_t ioId,
- sp<EffectHalInterface> *effect);
-
- virtual status_t dumpEffects(int fd);
-
- private:
- friend class EffectsFactoryHalInterface;
-
- sp<IEffectsFactory> mEffectsFactory;
- hidl_vec<EffectDescriptor> mLastDescriptors;
-
- // Can not be constructed directly by clients.
- EffectsFactoryHalHidl();
- virtual ~EffectsFactoryHalHidl();
-
- status_t queryAllDescriptors();
-};
-
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/EffectsFactoryHalInterface.cpp b/media/libaudiohal/EffectsFactoryHalInterface.cpp
new file mode 100644
index 0000000..ead1fa2
--- /dev/null
+++ b/media/libaudiohal/EffectsFactoryHalInterface.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
+#include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
+
+#include <EffectsFactoryHalHidl.h>
+#include <libaudiohal/4.0/EffectsFactoryHalHidl.h>
+
+
+namespace android {
+
+// static
+sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
+ if (hardware::audio::effect::V4_0::IEffectsFactory::getService() != nullptr) {
+ return new V4_0::EffectsFactoryHalHidl();
+ }
+ if (hardware::audio::effect::V2_0::IEffectsFactory::getService() != nullptr) {
+ return new EffectsFactoryHalHidl();
+ }
+ return nullptr;
+}
+
+// static
+bool EffectsFactoryHalInterface::isNullUuid(const effect_uuid_t *pEffectUuid) {
+ return memcmp(pEffectUuid, EFFECT_UUID_NULL, sizeof(effect_uuid_t)) == 0;
+}
+
+} // namespace android
diff --git a/media/libaudiohal/EffectsFactoryHalLocal.cpp b/media/libaudiohal/EffectsFactoryHalLocal.cpp
deleted file mode 100644
index bbdef5d..0000000
--- a/media/libaudiohal/EffectsFactoryHalLocal.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <media/EffectsFactoryApi.h>
-
-#include "EffectHalLocal.h"
-#include "EffectsFactoryHalLocal.h"
-
-namespace android {
-
-// static
-sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
- return new EffectsFactoryHalLocal();
-}
-
-// static
-bool EffectsFactoryHalInterface::isNullUuid(const effect_uuid_t *pEffectUuid) {
- return EffectIsNullUuid(pEffectUuid);
-}
-
-status_t EffectsFactoryHalLocal::queryNumberEffects(uint32_t *pNumEffects) {
- return EffectQueryNumberEffects(pNumEffects);
-}
-
-status_t EffectsFactoryHalLocal::getDescriptor(
- uint32_t index, effect_descriptor_t *pDescriptor) {
- return EffectQueryEffect(index, pDescriptor);
-}
-
-status_t EffectsFactoryHalLocal::getDescriptor(
- const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
- return EffectGetDescriptor(pEffectUuid, pDescriptor);
-}
-
-status_t EffectsFactoryHalLocal::createEffect(
- const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
- sp<EffectHalInterface> *effect) {
- effect_handle_t handle;
- int result = EffectCreate(pEffectUuid, sessionId, ioId, &handle);
- if (result == 0) {
- *effect = new EffectHalLocal(handle);
- }
- return result;
-}
-
-status_t EffectsFactoryHalLocal::dumpEffects(int fd) {
- return EffectDumpEffects(fd);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/EffectsFactoryHalLocal.h b/media/libaudiohal/EffectsFactoryHalLocal.h
deleted file mode 100644
index d5b81be..0000000
--- a/media/libaudiohal/EffectsFactoryHalLocal.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_LOCAL_H
-#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_LOCAL_H
-
-#include <media/audiohal/EffectsFactoryHalInterface.h>
-
-namespace android {
-
-class EffectsFactoryHalLocal : public EffectsFactoryHalInterface
-{
- public:
- // Returns the number of different effects in all loaded libraries.
- virtual status_t queryNumberEffects(uint32_t *pNumEffects);
-
- // Returns a descriptor of the next available effect.
- virtual status_t getDescriptor(uint32_t index,
- effect_descriptor_t *pDescriptor);
-
- virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
- effect_descriptor_t *pDescriptor);
-
- // Creates an effect engine of the specified type.
- // To release the effect engine, it is necessary to release references
- // to the returned effect object.
- virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
- int32_t sessionId, int32_t ioId,
- sp<EffectHalInterface> *effect);
-
- virtual status_t dumpEffects(int fd);
-
- private:
- friend class EffectsFactoryHalInterface;
-
- // Can not be constructed directly by clients.
- EffectsFactoryHalLocal() {}
-
- virtual ~EffectsFactoryHalLocal() {}
-};
-
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_LOCAL_H
diff --git a/media/libaudiohal/HalDeathHandlerHidl.cpp b/media/libaudiohal/HalDeathHandlerHidl.cpp
index a742671..1e3ab58 100644
--- a/media/libaudiohal/HalDeathHandlerHidl.cpp
+++ b/media/libaudiohal/HalDeathHandlerHidl.cpp
@@ -48,12 +48,13 @@
void HalDeathHandler::serviceDied(uint64_t /*cookie*/, const wp<IBase>& /*who*/) {
// No matter which of the service objects has died,
- // we need to run all the registered handlers and crash our process.
+ // we need to run all the registered handlers and exit.
std::lock_guard<std::mutex> guard(mHandlersLock);
for (const auto& handler : mHandlers) {
handler.second();
}
- LOG_ALWAYS_FATAL("HAL server crashed, need to restart");
+ ALOGE("HAL server crashed, audio server is restarting");
+ exit(1);
}
} // namespace android
diff --git a/media/libaudiohal/StreamHalHidl.cpp b/media/libaudiohal/StreamHalHidl.cpp
deleted file mode 100644
index 0cafa36..0000000
--- a/media/libaudiohal/StreamHalHidl.cpp
+++ /dev/null
@@ -1,752 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "StreamHalHidl"
-//#define LOG_NDEBUG 0
-
-#include <android/hardware/audio/2.0/IStreamOutCallback.h>
-#include <hwbinder/IPCThreadState.h>
-#include <mediautils/SchedulingPolicyService.h>
-#include <utils/Log.h>
-
-#include "DeviceHalHidl.h"
-#include "EffectHalHidl.h"
-#include "StreamHalHidl.h"
-
-using ::android::hardware::audio::common::V2_0::AudioChannelMask;
-using ::android::hardware::audio::common::V2_0::AudioFormat;
-using ::android::hardware::audio::common::V2_0::ThreadInfo;
-using ::android::hardware::audio::V2_0::AudioDrain;
-using ::android::hardware::audio::V2_0::IStreamOutCallback;
-using ::android::hardware::audio::V2_0::MessageQueueFlagBits;
-using ::android::hardware::audio::V2_0::MmapBufferInfo;
-using ::android::hardware::audio::V2_0::MmapPosition;
-using ::android::hardware::audio::V2_0::ParameterValue;
-using ::android::hardware::audio::V2_0::Result;
-using ::android::hardware::audio::V2_0::TimeSpec;
-using ::android::hardware::MQDescriptorSync;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ReadCommand = ::android::hardware::audio::V2_0::IStreamIn::ReadCommand;
-
-namespace android {
-
-StreamHalHidl::StreamHalHidl(IStream *stream)
- : ConversionHelperHidl("Stream"),
- mStream(stream),
- mHalThreadPriority(HAL_THREAD_PRIORITY_DEFAULT),
- mCachedBufferSize(0){
-
- // Instrument audio signal power logging.
- // Note: This assumes channel mask, format, and sample rate do not change after creation.
- if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
- // Obtain audio properties (see StreamHalHidl::getAudioProperties() below).
- Return<void> ret = mStream->getAudioProperties(
- [&](uint32_t sr, AudioChannelMask m, AudioFormat f) {
- mStreamPowerLog.init(sr,
- static_cast<audio_channel_mask_t>(m),
- static_cast<audio_format_t>(f));
- });
- }
-}
-
-StreamHalHidl::~StreamHalHidl() {
- mStream = nullptr;
-}
-
-status_t StreamHalHidl::getSampleRate(uint32_t *rate) {
- if (!mStream) return NO_INIT;
- return processReturn("getSampleRate", mStream->getSampleRate(), rate);
-}
-
-status_t StreamHalHidl::getBufferSize(size_t *size) {
- if (!mStream) return NO_INIT;
- status_t status = processReturn("getBufferSize", mStream->getBufferSize(), size);
- if (status == OK) {
- mCachedBufferSize = *size;
- }
- return status;
-}
-
-status_t StreamHalHidl::getChannelMask(audio_channel_mask_t *mask) {
- if (!mStream) return NO_INIT;
- return processReturn("getChannelMask", mStream->getChannelMask(), mask);
-}
-
-status_t StreamHalHidl::getFormat(audio_format_t *format) {
- if (!mStream) return NO_INIT;
- return processReturn("getFormat", mStream->getFormat(), format);
-}
-
-status_t StreamHalHidl::getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
- if (!mStream) return NO_INIT;
- Return<void> ret = mStream->getAudioProperties(
- [&](uint32_t sr, AudioChannelMask m, AudioFormat f) {
- *sampleRate = sr;
- *mask = static_cast<audio_channel_mask_t>(m);
- *format = static_cast<audio_format_t>(f);
- });
- return processReturn("getAudioProperties", ret);
-}
-
-status_t StreamHalHidl::setParameters(const String8& kvPairs) {
- if (!mStream) return NO_INIT;
- hidl_vec<ParameterValue> hidlParams;
- status_t status = parametersFromHal(kvPairs, &hidlParams);
- if (status != OK) return status;
- return processReturn("setParameters", mStream->setParameters(hidlParams));
-}
-
-status_t StreamHalHidl::getParameters(const String8& keys, String8 *values) {
- values->clear();
- if (!mStream) return NO_INIT;
- hidl_vec<hidl_string> hidlKeys;
- status_t status = keysFromHal(keys, &hidlKeys);
- if (status != OK) return status;
- Result retval;
- Return<void> ret = mStream->getParameters(
- hidlKeys,
- [&](Result r, const hidl_vec<ParameterValue>& parameters) {
- retval = r;
- if (retval == Result::OK) {
- parametersToHal(parameters, values);
- }
- });
- return processReturn("getParameters", ret, retval);
-}
-
-status_t StreamHalHidl::addEffect(sp<EffectHalInterface> effect) {
- if (!mStream) return NO_INIT;
- return processReturn("addEffect", mStream->addEffect(
- static_cast<EffectHalHidl*>(effect.get())->effectId()));
-}
-
-status_t StreamHalHidl::removeEffect(sp<EffectHalInterface> effect) {
- if (!mStream) return NO_INIT;
- return processReturn("removeEffect", mStream->removeEffect(
- static_cast<EffectHalHidl*>(effect.get())->effectId()));
-}
-
-status_t StreamHalHidl::standby() {
- if (!mStream) return NO_INIT;
- return processReturn("standby", mStream->standby());
-}
-
-status_t StreamHalHidl::dump(int fd) {
- if (!mStream) return NO_INIT;
- native_handle_t* hidlHandle = native_handle_create(1, 0);
- hidlHandle->data[0] = fd;
- Return<void> ret = mStream->debugDump(hidlHandle);
- native_handle_delete(hidlHandle);
- mStreamPowerLog.dump(fd);
- return processReturn("dump", ret);
-}
-
-status_t StreamHalHidl::start() {
- if (!mStream) return NO_INIT;
- return processReturn("start", mStream->start());
-}
-
-status_t StreamHalHidl::stop() {
- if (!mStream) return NO_INIT;
- return processReturn("stop", mStream->stop());
-}
-
-status_t StreamHalHidl::createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info) {
- Result retval;
- Return<void> ret = mStream->createMmapBuffer(
- minSizeFrames,
- [&](Result r, const MmapBufferInfo& hidlInfo) {
- retval = r;
- if (retval == Result::OK) {
- const native_handle *handle = hidlInfo.sharedMemory.handle();
- if (handle->numFds > 0) {
- info->shared_memory_fd = handle->data[0];
- info->buffer_size_frames = hidlInfo.bufferSizeFrames;
- info->burst_size_frames = hidlInfo.burstSizeFrames;
- // info->shared_memory_address is not needed in HIDL context
- info->shared_memory_address = NULL;
- } else {
- retval = Result::NOT_INITIALIZED;
- }
- }
- });
- return processReturn("createMmapBuffer", ret, retval);
-}
-
-status_t StreamHalHidl::getMmapPosition(struct audio_mmap_position *position) {
- Result retval;
- Return<void> ret = mStream->getMmapPosition(
- [&](Result r, const MmapPosition& hidlPosition) {
- retval = r;
- if (retval == Result::OK) {
- position->time_nanoseconds = hidlPosition.timeNanoseconds;
- position->position_frames = hidlPosition.positionFrames;
- }
- });
- return processReturn("getMmapPosition", ret, retval);
-}
-
-status_t StreamHalHidl::setHalThreadPriority(int priority) {
- mHalThreadPriority = priority;
- return OK;
-}
-
-status_t StreamHalHidl::getCachedBufferSize(size_t *size) {
- if (mCachedBufferSize != 0) {
- *size = mCachedBufferSize;
- return OK;
- }
- return getBufferSize(size);
-}
-
-bool StreamHalHidl::requestHalThreadPriority(pid_t threadPid, pid_t threadId) {
- if (mHalThreadPriority == HAL_THREAD_PRIORITY_DEFAULT) {
- return true;
- }
- int err = requestPriority(
- threadPid, threadId,
- mHalThreadPriority, false /*isForApp*/, true /*asynchronous*/);
- ALOGE_IF(err, "failed to set priority %d for pid %d tid %d; error %d",
- mHalThreadPriority, threadPid, threadId, err);
- // Audio will still work, but latency will be higher and sometimes unacceptable.
- return err == 0;
-}
-
-namespace {
-
-/* Notes on callback ownership.
-
-This is how (Hw)Binder ownership model looks like. The server implementation
-is owned by Binder framework (via sp<>). Proxies are owned by clients.
-When the last proxy disappears, Binder framework releases the server impl.
-
-Thus, it is not needed to keep any references to StreamOutCallback (this is
-the server impl) -- it will live as long as HAL server holds a strong ref to
-IStreamOutCallback proxy. We clear that reference by calling 'clearCallback'
-from the destructor of StreamOutHalHidl.
-
-The callback only keeps a weak reference to the stream. The stream is owned
-by AudioFlinger.
-
-*/
-
-struct StreamOutCallback : public IStreamOutCallback {
- StreamOutCallback(const wp<StreamOutHalHidl>& stream) : mStream(stream) {}
-
- // IStreamOutCallback implementation
- Return<void> onWriteReady() override {
- sp<StreamOutHalHidl> stream = mStream.promote();
- if (stream != 0) {
- stream->onWriteReady();
- }
- return Void();
- }
-
- Return<void> onDrainReady() override {
- sp<StreamOutHalHidl> stream = mStream.promote();
- if (stream != 0) {
- stream->onDrainReady();
- }
- return Void();
- }
-
- Return<void> onError() override {
- sp<StreamOutHalHidl> stream = mStream.promote();
- if (stream != 0) {
- stream->onError();
- }
- return Void();
- }
-
- private:
- wp<StreamOutHalHidl> mStream;
-};
-
-} // namespace
-
-StreamOutHalHidl::StreamOutHalHidl(const sp<IStreamOut>& stream)
- : StreamHalHidl(stream.get()), mStream(stream), mWriterClient(0), mEfGroup(nullptr) {
-}
-
-StreamOutHalHidl::~StreamOutHalHidl() {
- if (mStream != 0) {
- if (mCallback.unsafe_get()) {
- processReturn("clearCallback", mStream->clearCallback());
- }
- processReturn("close", mStream->close());
- mStream.clear();
- }
- mCallback.clear();
- hardware::IPCThreadState::self()->flushCommands();
- if (mEfGroup) {
- EventFlag::deleteEventFlag(&mEfGroup);
- }
-}
-
-status_t StreamOutHalHidl::getFrameSize(size_t *size) {
- if (mStream == 0) return NO_INIT;
- return processReturn("getFrameSize", mStream->getFrameSize(), size);
-}
-
-status_t StreamOutHalHidl::getLatency(uint32_t *latency) {
- if (mStream == 0) return NO_INIT;
- if (mWriterClient == gettid() && mCommandMQ) {
- return callWriterThread(
- WriteCommand::GET_LATENCY, "getLatency", nullptr, 0,
- [&](const WriteStatus& writeStatus) {
- *latency = writeStatus.reply.latencyMs;
- });
- } else {
- return processReturn("getLatency", mStream->getLatency(), latency);
- }
-}
-
-status_t StreamOutHalHidl::setVolume(float left, float right) {
- if (mStream == 0) return NO_INIT;
- return processReturn("setVolume", mStream->setVolume(left, right));
-}
-
-status_t StreamOutHalHidl::write(const void *buffer, size_t bytes, size_t *written) {
- if (mStream == 0) return NO_INIT;
- *written = 0;
-
- if (bytes == 0 && !mDataMQ) {
- // Can't determine the size for the MQ buffer. Wait for a non-empty write request.
- ALOGW_IF(mCallback.unsafe_get(), "First call to async write with 0 bytes");
- return OK;
- }
-
- status_t status;
- if (!mDataMQ) {
- // In case if playback starts close to the end of a compressed track, the bytes
- // that need to be written is less than the actual buffer size. Need to use
- // full buffer size for the MQ since otherwise after seeking back to the middle
- // data will be truncated.
- size_t bufferSize;
- if ((status = getCachedBufferSize(&bufferSize)) != OK) {
- return status;
- }
- if (bytes > bufferSize) bufferSize = bytes;
- if ((status = prepareForWriting(bufferSize)) != OK) {
- return status;
- }
- }
-
- status = callWriterThread(
- WriteCommand::WRITE, "write", static_cast<const uint8_t*>(buffer), bytes,
- [&] (const WriteStatus& writeStatus) {
- *written = writeStatus.reply.written;
- // Diagnostics of the cause of b/35813113.
- ALOGE_IF(*written > bytes,
- "hal reports more bytes written than asked for: %lld > %lld",
- (long long)*written, (long long)bytes);
- });
- mStreamPowerLog.log(buffer, *written);
- return status;
-}
-
-status_t StreamOutHalHidl::callWriterThread(
- WriteCommand cmd, const char* cmdName,
- const uint8_t* data, size_t dataSize, StreamOutHalHidl::WriterCallback callback) {
- if (!mCommandMQ->write(&cmd)) {
- ALOGE("command message queue write failed for \"%s\"", cmdName);
- return -EAGAIN;
- }
- if (data != nullptr) {
- size_t availableToWrite = mDataMQ->availableToWrite();
- if (dataSize > availableToWrite) {
- ALOGW("truncating write data from %lld to %lld due to insufficient data queue space",
- (long long)dataSize, (long long)availableToWrite);
- dataSize = availableToWrite;
- }
- if (!mDataMQ->write(data, dataSize)) {
- ALOGE("data message queue write failed for \"%s\"", cmdName);
- }
- }
- mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY));
-
- // TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
- uint32_t efState = 0;
-retry:
- status_t ret = mEfGroup->wait(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL), &efState);
- if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL)) {
- WriteStatus writeStatus;
- writeStatus.retval = Result::NOT_INITIALIZED;
- if (!mStatusMQ->read(&writeStatus)) {
- ALOGE("status message read failed for \"%s\"", cmdName);
- }
- if (writeStatus.retval == Result::OK) {
- ret = OK;
- callback(writeStatus);
- } else {
- ret = processReturn(cmdName, writeStatus.retval);
- }
- return ret;
- }
- if (ret == -EAGAIN || ret == -EINTR) {
- // Spurious wakeup. This normally retries no more than once.
- goto retry;
- }
- return ret;
-}
-
-status_t StreamOutHalHidl::prepareForWriting(size_t bufferSize) {
- std::unique_ptr<CommandMQ> tempCommandMQ;
- std::unique_ptr<DataMQ> tempDataMQ;
- std::unique_ptr<StatusMQ> tempStatusMQ;
- Result retval;
- pid_t halThreadPid, halThreadTid;
- Return<void> ret = mStream->prepareForWriting(
- 1, bufferSize,
- [&](Result r,
- const CommandMQ::Descriptor& commandMQ,
- const DataMQ::Descriptor& dataMQ,
- const StatusMQ::Descriptor& statusMQ,
- const ThreadInfo& halThreadInfo) {
- retval = r;
- if (retval == Result::OK) {
- tempCommandMQ.reset(new CommandMQ(commandMQ));
- tempDataMQ.reset(new DataMQ(dataMQ));
- tempStatusMQ.reset(new StatusMQ(statusMQ));
- if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
- EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
- }
- halThreadPid = halThreadInfo.pid;
- halThreadTid = halThreadInfo.tid;
- }
- });
- if (!ret.isOk() || retval != Result::OK) {
- return processReturn("prepareForWriting", ret, retval);
- }
- if (!tempCommandMQ || !tempCommandMQ->isValid() ||
- !tempDataMQ || !tempDataMQ->isValid() ||
- !tempStatusMQ || !tempStatusMQ->isValid() ||
- !mEfGroup) {
- ALOGE_IF(!tempCommandMQ, "Failed to obtain command message queue for writing");
- ALOGE_IF(tempCommandMQ && !tempCommandMQ->isValid(),
- "Command message queue for writing is invalid");
- ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for writing");
- ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for writing is invalid");
- ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for writing");
- ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
- "Status message queue for writing is invalid");
- ALOGE_IF(!mEfGroup, "Event flag creation for writing failed");
- return NO_INIT;
- }
- requestHalThreadPriority(halThreadPid, halThreadTid);
-
- mCommandMQ = std::move(tempCommandMQ);
- mDataMQ = std::move(tempDataMQ);
- mStatusMQ = std::move(tempStatusMQ);
- mWriterClient = gettid();
- return OK;
-}
-
-status_t StreamOutHalHidl::getRenderPosition(uint32_t *dspFrames) {
- if (mStream == 0) return NO_INIT;
- Result retval;
- Return<void> ret = mStream->getRenderPosition(
- [&](Result r, uint32_t d) {
- retval = r;
- if (retval == Result::OK) {
- *dspFrames = d;
- }
- });
- return processReturn("getRenderPosition", ret, retval);
-}
-
-status_t StreamOutHalHidl::getNextWriteTimestamp(int64_t *timestamp) {
- if (mStream == 0) return NO_INIT;
- Result retval;
- Return<void> ret = mStream->getNextWriteTimestamp(
- [&](Result r, int64_t t) {
- retval = r;
- if (retval == Result::OK) {
- *timestamp = t;
- }
- });
- return processReturn("getRenderPosition", ret, retval);
-}
-
-status_t StreamOutHalHidl::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
- if (mStream == 0) return NO_INIT;
- status_t status = processReturn(
- "setCallback", mStream->setCallback(new StreamOutCallback(this)));
- if (status == OK) {
- mCallback = callback;
- }
- return status;
-}
-
-status_t StreamOutHalHidl::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
- if (mStream == 0) return NO_INIT;
- Return<void> ret = mStream->supportsPauseAndResume(
- [&](bool p, bool r) {
- *supportsPause = p;
- *supportsResume = r;
- });
- return processReturn("supportsPauseAndResume", ret);
-}
-
-status_t StreamOutHalHidl::pause() {
- if (mStream == 0) return NO_INIT;
- return processReturn("pause", mStream->pause());
-}
-
-status_t StreamOutHalHidl::resume() {
- if (mStream == 0) return NO_INIT;
- return processReturn("pause", mStream->resume());
-}
-
-status_t StreamOutHalHidl::supportsDrain(bool *supportsDrain) {
- if (mStream == 0) return NO_INIT;
- return processReturn("supportsDrain", mStream->supportsDrain(), supportsDrain);
-}
-
-status_t StreamOutHalHidl::drain(bool earlyNotify) {
- if (mStream == 0) return NO_INIT;
- return processReturn(
- "drain", mStream->drain(earlyNotify ? AudioDrain::EARLY_NOTIFY : AudioDrain::ALL));
-}
-
-status_t StreamOutHalHidl::flush() {
- if (mStream == 0) return NO_INIT;
- return processReturn("pause", mStream->flush());
-}
-
-status_t StreamOutHalHidl::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
- if (mStream == 0) return NO_INIT;
- if (mWriterClient == gettid() && mCommandMQ) {
- return callWriterThread(
- WriteCommand::GET_PRESENTATION_POSITION, "getPresentationPosition", nullptr, 0,
- [&](const WriteStatus& writeStatus) {
- *frames = writeStatus.reply.presentationPosition.frames;
- timestamp->tv_sec = writeStatus.reply.presentationPosition.timeStamp.tvSec;
- timestamp->tv_nsec = writeStatus.reply.presentationPosition.timeStamp.tvNSec;
- });
- } else {
- Result retval;
- Return<void> ret = mStream->getPresentationPosition(
- [&](Result r, uint64_t hidlFrames, const TimeSpec& hidlTimeStamp) {
- retval = r;
- if (retval == Result::OK) {
- *frames = hidlFrames;
- timestamp->tv_sec = hidlTimeStamp.tvSec;
- timestamp->tv_nsec = hidlTimeStamp.tvNSec;
- }
- });
- return processReturn("getPresentationPosition", ret, retval);
- }
-}
-
-void StreamOutHalHidl::onWriteReady() {
- sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
- if (callback == 0) return;
- ALOGV("asyncCallback onWriteReady");
- callback->onWriteReady();
-}
-
-void StreamOutHalHidl::onDrainReady() {
- sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
- if (callback == 0) return;
- ALOGV("asyncCallback onDrainReady");
- callback->onDrainReady();
-}
-
-void StreamOutHalHidl::onError() {
- sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
- if (callback == 0) return;
- ALOGV("asyncCallback onError");
- callback->onError();
-}
-
-
-StreamInHalHidl::StreamInHalHidl(const sp<IStreamIn>& stream)
- : StreamHalHidl(stream.get()), mStream(stream), mReaderClient(0), mEfGroup(nullptr) {
-}
-
-StreamInHalHidl::~StreamInHalHidl() {
- if (mStream != 0) {
- processReturn("close", mStream->close());
- mStream.clear();
- hardware::IPCThreadState::self()->flushCommands();
- }
- if (mEfGroup) {
- EventFlag::deleteEventFlag(&mEfGroup);
- }
-}
-
-status_t StreamInHalHidl::getFrameSize(size_t *size) {
- if (mStream == 0) return NO_INIT;
- return processReturn("getFrameSize", mStream->getFrameSize(), size);
-}
-
-status_t StreamInHalHidl::setGain(float gain) {
- if (mStream == 0) return NO_INIT;
- return processReturn("setGain", mStream->setGain(gain));
-}
-
-status_t StreamInHalHidl::read(void *buffer, size_t bytes, size_t *read) {
- if (mStream == 0) return NO_INIT;
- *read = 0;
-
- if (bytes == 0 && !mDataMQ) {
- // Can't determine the size for the MQ buffer. Wait for a non-empty read request.
- return OK;
- }
-
- status_t status;
- if (!mDataMQ && (status = prepareForReading(bytes)) != OK) {
- return status;
- }
-
- ReadParameters params;
- params.command = ReadCommand::READ;
- params.params.read = bytes;
- status = callReaderThread(params, "read",
- [&](const ReadStatus& readStatus) {
- const size_t availToRead = mDataMQ->availableToRead();
- if (!mDataMQ->read(static_cast<uint8_t*>(buffer), std::min(bytes, availToRead))) {
- ALOGE("data message queue read failed for \"read\"");
- }
- ALOGW_IF(availToRead != readStatus.reply.read,
- "HAL read report inconsistent: mq = %d, status = %d",
- (int32_t)availToRead, (int32_t)readStatus.reply.read);
- *read = readStatus.reply.read;
- });
- mStreamPowerLog.log(buffer, *read);
- return status;
-}
-
-status_t StreamInHalHidl::callReaderThread(
- const ReadParameters& params, const char* cmdName,
- StreamInHalHidl::ReaderCallback callback) {
- if (!mCommandMQ->write(¶ms)) {
- ALOGW("command message queue write failed");
- return -EAGAIN;
- }
- mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL));
-
- // TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
- uint32_t efState = 0;
-retry:
- status_t ret = mEfGroup->wait(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY), &efState);
- if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY)) {
- ReadStatus readStatus;
- readStatus.retval = Result::NOT_INITIALIZED;
- if (!mStatusMQ->read(&readStatus)) {
- ALOGE("status message read failed for \"%s\"", cmdName);
- }
- if (readStatus.retval == Result::OK) {
- ret = OK;
- callback(readStatus);
- } else {
- ret = processReturn(cmdName, readStatus.retval);
- }
- return ret;
- }
- if (ret == -EAGAIN || ret == -EINTR) {
- // Spurious wakeup. This normally retries no more than once.
- goto retry;
- }
- return ret;
-}
-
-status_t StreamInHalHidl::prepareForReading(size_t bufferSize) {
- std::unique_ptr<CommandMQ> tempCommandMQ;
- std::unique_ptr<DataMQ> tempDataMQ;
- std::unique_ptr<StatusMQ> tempStatusMQ;
- Result retval;
- pid_t halThreadPid, halThreadTid;
- Return<void> ret = mStream->prepareForReading(
- 1, bufferSize,
- [&](Result r,
- const CommandMQ::Descriptor& commandMQ,
- const DataMQ::Descriptor& dataMQ,
- const StatusMQ::Descriptor& statusMQ,
- const ThreadInfo& halThreadInfo) {
- retval = r;
- if (retval == Result::OK) {
- tempCommandMQ.reset(new CommandMQ(commandMQ));
- tempDataMQ.reset(new DataMQ(dataMQ));
- tempStatusMQ.reset(new StatusMQ(statusMQ));
- if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
- EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
- }
- halThreadPid = halThreadInfo.pid;
- halThreadTid = halThreadInfo.tid;
- }
- });
- if (!ret.isOk() || retval != Result::OK) {
- return processReturn("prepareForReading", ret, retval);
- }
- if (!tempCommandMQ || !tempCommandMQ->isValid() ||
- !tempDataMQ || !tempDataMQ->isValid() ||
- !tempStatusMQ || !tempStatusMQ->isValid() ||
- !mEfGroup) {
- ALOGE_IF(!tempCommandMQ, "Failed to obtain command message queue for writing");
- ALOGE_IF(tempCommandMQ && !tempCommandMQ->isValid(),
- "Command message queue for writing is invalid");
- ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for reading");
- ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for reading is invalid");
- ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for reading");
- ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
- "Status message queue for reading is invalid");
- ALOGE_IF(!mEfGroup, "Event flag creation for reading failed");
- return NO_INIT;
- }
- requestHalThreadPriority(halThreadPid, halThreadTid);
-
- mCommandMQ = std::move(tempCommandMQ);
- mDataMQ = std::move(tempDataMQ);
- mStatusMQ = std::move(tempStatusMQ);
- mReaderClient = gettid();
- return OK;
-}
-
-status_t StreamInHalHidl::getInputFramesLost(uint32_t *framesLost) {
- if (mStream == 0) return NO_INIT;
- return processReturn("getInputFramesLost", mStream->getInputFramesLost(), framesLost);
-}
-
-status_t StreamInHalHidl::getCapturePosition(int64_t *frames, int64_t *time) {
- if (mStream == 0) return NO_INIT;
- if (mReaderClient == gettid() && mCommandMQ) {
- ReadParameters params;
- params.command = ReadCommand::GET_CAPTURE_POSITION;
- return callReaderThread(params, "getCapturePosition",
- [&](const ReadStatus& readStatus) {
- *frames = readStatus.reply.capturePosition.frames;
- *time = readStatus.reply.capturePosition.time;
- });
- } else {
- Result retval;
- Return<void> ret = mStream->getCapturePosition(
- [&](Result r, uint64_t hidlFrames, uint64_t hidlTime) {
- retval = r;
- if (retval == Result::OK) {
- *frames = hidlFrames;
- *time = hidlTime;
- }
- });
- return processReturn("getCapturePosition", ret, retval);
- }
-}
-
-} // namespace android
diff --git a/media/libaudiohal/StreamHalHidl.h b/media/libaudiohal/StreamHalHidl.h
deleted file mode 100644
index d4ab943..0000000
--- a/media/libaudiohal/StreamHalHidl.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_STREAM_HAL_HIDL_H
-#define ANDROID_HARDWARE_STREAM_HAL_HIDL_H
-
-#include <atomic>
-
-#include <android/hardware/audio/2.0/IStream.h>
-#include <android/hardware/audio/2.0/IStreamIn.h>
-#include <android/hardware/audio/2.0/IStreamOut.h>
-#include <fmq/EventFlag.h>
-#include <fmq/MessageQueue.h>
-#include <media/audiohal/StreamHalInterface.h>
-
-#include "ConversionHelperHidl.h"
-#include "StreamPowerLog.h"
-
-using ::android::hardware::audio::V2_0::IStream;
-using ::android::hardware::audio::V2_0::IStreamIn;
-using ::android::hardware::audio::V2_0::IStreamOut;
-using ::android::hardware::EventFlag;
-using ::android::hardware::MessageQueue;
-using ::android::hardware::Return;
-using ReadParameters = ::android::hardware::audio::V2_0::IStreamIn::ReadParameters;
-using ReadStatus = ::android::hardware::audio::V2_0::IStreamIn::ReadStatus;
-using WriteCommand = ::android::hardware::audio::V2_0::IStreamOut::WriteCommand;
-using WriteStatus = ::android::hardware::audio::V2_0::IStreamOut::WriteStatus;
-
-namespace android {
-
-class DeviceHalHidl;
-
-class StreamHalHidl : public virtual StreamHalInterface, public ConversionHelperHidl
-{
- public:
- // Return the sampling rate in Hz - eg. 44100.
- virtual status_t getSampleRate(uint32_t *rate);
-
- // Return size of input/output buffer in bytes for this stream - eg. 4800.
- virtual status_t getBufferSize(size_t *size);
-
- // Return the channel mask.
- virtual status_t getChannelMask(audio_channel_mask_t *mask);
-
- // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
- virtual status_t getFormat(audio_format_t *format);
-
- // Convenience method.
- virtual status_t getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
-
- // Set audio stream parameters.
- virtual status_t setParameters(const String8& kvPairs);
-
- // Get audio stream parameters.
- virtual status_t getParameters(const String8& keys, String8 *values);
-
- // Add or remove the effect on the stream.
- virtual status_t addEffect(sp<EffectHalInterface> effect);
- virtual status_t removeEffect(sp<EffectHalInterface> effect);
-
- // Put the audio hardware input/output into standby mode.
- virtual status_t standby();
-
- virtual status_t dump(int fd);
-
- // Start a stream operating in mmap mode.
- virtual status_t start();
-
- // Stop a stream operating in mmap mode.
- virtual status_t stop();
-
- // Retrieve information on the data buffer in mmap mode.
- virtual status_t createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info);
-
- // Get current read/write position in the mmap buffer
- virtual status_t getMmapPosition(struct audio_mmap_position *position);
-
- // Set the priority of the thread that interacts with the HAL
- // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
- virtual status_t setHalThreadPriority(int priority);
-
- protected:
- // Subclasses can not be constructed directly by clients.
- explicit StreamHalHidl(IStream *stream);
-
- // The destructor automatically closes the stream.
- virtual ~StreamHalHidl();
-
- status_t getCachedBufferSize(size_t *size);
-
- bool requestHalThreadPriority(pid_t threadPid, pid_t threadId);
-
- // mStreamPowerLog is used for audio signal power logging.
- StreamPowerLog mStreamPowerLog;
-
- private:
- const int HAL_THREAD_PRIORITY_DEFAULT = -1;
- IStream *mStream;
- int mHalThreadPriority;
- size_t mCachedBufferSize;
-};
-
-class StreamOutHalHidl : public StreamOutHalInterface, public StreamHalHidl {
- public:
- // Return the frame size (number of bytes per sample) of a stream.
- virtual status_t getFrameSize(size_t *size);
-
- // Return the audio hardware driver estimated latency in milliseconds.
- virtual status_t getLatency(uint32_t *latency);
-
- // Use this method in situations where audio mixing is done in the hardware.
- virtual status_t setVolume(float left, float right);
-
- // Write audio buffer to driver.
- virtual status_t write(const void *buffer, size_t bytes, size_t *written);
-
- // Return the number of audio frames written by the audio dsp to DAC since
- // the output has exited standby.
- virtual status_t getRenderPosition(uint32_t *dspFrames);
-
- // Get the local time at which the next write to the audio driver will be presented.
- virtual status_t getNextWriteTimestamp(int64_t *timestamp);
-
- // Set the callback for notifying completion of non-blocking write and drain.
- virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback);
-
- // Returns whether pause and resume operations are supported.
- virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume);
-
- // Notifies to the audio driver to resume playback following a pause.
- virtual status_t pause();
-
- // Notifies to the audio driver to resume playback following a pause.
- virtual status_t resume();
-
- // Returns whether drain operation is supported.
- virtual status_t supportsDrain(bool *supportsDrain);
-
- // Requests notification when data buffered by the driver/hardware has been played.
- virtual status_t drain(bool earlyNotify);
-
- // Notifies to the audio driver to flush the queued data.
- virtual status_t flush();
-
- // Return a recent count of the number of audio frames presented to an external observer.
- virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
-
- // Methods used by StreamOutCallback (HIDL).
- void onWriteReady();
- void onDrainReady();
- void onError();
-
- private:
- friend class DeviceHalHidl;
- typedef MessageQueue<WriteCommand, hardware::kSynchronizedReadWrite> CommandMQ;
- typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
- typedef MessageQueue<WriteStatus, hardware::kSynchronizedReadWrite> StatusMQ;
-
- wp<StreamOutHalInterfaceCallback> mCallback;
- sp<IStreamOut> mStream;
- std::unique_ptr<CommandMQ> mCommandMQ;
- std::unique_ptr<DataMQ> mDataMQ;
- std::unique_ptr<StatusMQ> mStatusMQ;
- std::atomic<pid_t> mWriterClient;
- EventFlag* mEfGroup;
-
- // Can not be constructed directly by clients.
- StreamOutHalHidl(const sp<IStreamOut>& stream);
-
- virtual ~StreamOutHalHidl();
-
- using WriterCallback = std::function<void(const WriteStatus& writeStatus)>;
- status_t callWriterThread(
- WriteCommand cmd, const char* cmdName,
- const uint8_t* data, size_t dataSize, WriterCallback callback);
- status_t prepareForWriting(size_t bufferSize);
-};
-
-class StreamInHalHidl : public StreamInHalInterface, public StreamHalHidl {
- public:
- // Return the frame size (number of bytes per sample) of a stream.
- virtual status_t getFrameSize(size_t *size);
-
- // Set the input gain for the audio driver.
- virtual status_t setGain(float gain);
-
- // Read audio buffer in from driver.
- virtual status_t read(void *buffer, size_t bytes, size_t *read);
-
- // Return the amount of input frames lost in the audio driver.
- virtual status_t getInputFramesLost(uint32_t *framesLost);
-
- // Return a recent count of the number of audio frames received and
- // the clock time associated with that frame count.
- virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
-
- private:
- friend class DeviceHalHidl;
- typedef MessageQueue<ReadParameters, hardware::kSynchronizedReadWrite> CommandMQ;
- typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
- typedef MessageQueue<ReadStatus, hardware::kSynchronizedReadWrite> StatusMQ;
-
- sp<IStreamIn> mStream;
- std::unique_ptr<CommandMQ> mCommandMQ;
- std::unique_ptr<DataMQ> mDataMQ;
- std::unique_ptr<StatusMQ> mStatusMQ;
- std::atomic<pid_t> mReaderClient;
- EventFlag* mEfGroup;
-
- // Can not be constructed directly by clients.
- StreamInHalHidl(const sp<IStreamIn>& stream);
-
- virtual ~StreamInHalHidl();
-
- using ReaderCallback = std::function<void(const ReadStatus& readStatus)>;
- status_t callReaderThread(
- const ReadParameters& params, const char* cmdName, ReaderCallback callback);
- status_t prepareForReading(size_t bufferSize);
-};
-
-} // namespace android
-
-#endif // ANDROID_HARDWARE_STREAM_HAL_HIDL_H
diff --git a/media/libaudiohal/StreamHalLocal.cpp b/media/libaudiohal/StreamHalLocal.cpp
deleted file mode 100644
index dc17f5c..0000000
--- a/media/libaudiohal/StreamHalLocal.cpp
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "StreamHalLocal"
-//#define LOG_NDEBUG 0
-
-#include <hardware/audio.h>
-#include <utils/Log.h>
-
-#include "DeviceHalLocal.h"
-#include "EffectHalLocal.h"
-#include "StreamHalLocal.h"
-
-namespace android {
-
-StreamHalLocal::StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device)
- : mDevice(device),
- mStream(stream) {
- // Instrument audio signal power logging.
- // Note: This assumes channel mask, format, and sample rate do not change after creation.
- if (mStream != nullptr && mStreamPowerLog.isUserDebugOrEngBuild()) {
- mStreamPowerLog.init(mStream->get_sample_rate(mStream),
- mStream->get_channels(mStream),
- mStream->get_format(mStream));
- }
-}
-
-StreamHalLocal::~StreamHalLocal() {
- mStream = 0;
- mDevice.clear();
-}
-
-status_t StreamHalLocal::getSampleRate(uint32_t *rate) {
- *rate = mStream->get_sample_rate(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::getBufferSize(size_t *size) {
- *size = mStream->get_buffer_size(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::getChannelMask(audio_channel_mask_t *mask) {
- *mask = mStream->get_channels(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::getFormat(audio_format_t *format) {
- *format = mStream->get_format(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
- *sampleRate = mStream->get_sample_rate(mStream);
- *mask = mStream->get_channels(mStream);
- *format = mStream->get_format(mStream);
- return OK;
-}
-
-status_t StreamHalLocal::setParameters(const String8& kvPairs) {
- return mStream->set_parameters(mStream, kvPairs.string());
-}
-
-status_t StreamHalLocal::getParameters(const String8& keys, String8 *values) {
- char *halValues = mStream->get_parameters(mStream, keys.string());
- if (halValues != NULL) {
- values->setTo(halValues);
- free(halValues);
- } else {
- values->clear();
- }
- return OK;
-}
-
-status_t StreamHalLocal::addEffect(sp<EffectHalInterface> effect) {
- LOG_ALWAYS_FATAL_IF(!effect->isLocal(), "Only local effects can be added for a local stream");
- return mStream->add_audio_effect(mStream,
- static_cast<EffectHalLocal*>(effect.get())->handle());
-}
-
-status_t StreamHalLocal::removeEffect(sp<EffectHalInterface> effect) {
- LOG_ALWAYS_FATAL_IF(!effect->isLocal(), "Only local effects can be removed for a local stream");
- return mStream->remove_audio_effect(mStream,
- static_cast<EffectHalLocal*>(effect.get())->handle());
-}
-
-status_t StreamHalLocal::standby() {
- return mStream->standby(mStream);
-}
-
-status_t StreamHalLocal::dump(int fd) {
- status_t status = mStream->dump(mStream, fd);
- mStreamPowerLog.dump(fd);
- return status;
-}
-
-status_t StreamHalLocal::setHalThreadPriority(int) {
- // Don't need to do anything as local hal is executed by audioflinger directly
- // on the same thread.
- return OK;
-}
-
-StreamOutHalLocal::StreamOutHalLocal(audio_stream_out_t *stream, sp<DeviceHalLocal> device)
- : StreamHalLocal(&stream->common, device), mStream(stream) {
-}
-
-StreamOutHalLocal::~StreamOutHalLocal() {
- mCallback.clear();
- mDevice->closeOutputStream(mStream);
- mStream = 0;
-}
-
-status_t StreamOutHalLocal::getFrameSize(size_t *size) {
- *size = audio_stream_out_frame_size(mStream);
- return OK;
-}
-
-status_t StreamOutHalLocal::getLatency(uint32_t *latency) {
- *latency = mStream->get_latency(mStream);
- return OK;
-}
-
-status_t StreamOutHalLocal::setVolume(float left, float right) {
- if (mStream->set_volume == NULL) return INVALID_OPERATION;
- return mStream->set_volume(mStream, left, right);
-}
-
-status_t StreamOutHalLocal::write(const void *buffer, size_t bytes, size_t *written) {
- ssize_t writeResult = mStream->write(mStream, buffer, bytes);
- if (writeResult > 0) {
- *written = writeResult;
- mStreamPowerLog.log(buffer, *written);
- return OK;
- } else {
- *written = 0;
- return writeResult;
- }
-}
-
-status_t StreamOutHalLocal::getRenderPosition(uint32_t *dspFrames) {
- return mStream->get_render_position(mStream, dspFrames);
-}
-
-status_t StreamOutHalLocal::getNextWriteTimestamp(int64_t *timestamp) {
- if (mStream->get_next_write_timestamp == NULL) return INVALID_OPERATION;
- return mStream->get_next_write_timestamp(mStream, timestamp);
-}
-
-status_t StreamOutHalLocal::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
- if (mStream->set_callback == NULL) return INVALID_OPERATION;
- status_t result = mStream->set_callback(mStream, StreamOutHalLocal::asyncCallback, this);
- if (result == OK) {
- mCallback = callback;
- }
- return result;
-}
-
-// static
-int StreamOutHalLocal::asyncCallback(stream_callback_event_t event, void*, void *cookie) {
- // We act as if we gave a wp<StreamOutHalLocal> to HAL. This way we should handle
- // correctly the case when the callback is invoked while StreamOutHalLocal's destructor is
- // already running, because the destructor is invoked after the refcount has been atomically
- // decremented.
- wp<StreamOutHalLocal> weakSelf(static_cast<StreamOutHalLocal*>(cookie));
- sp<StreamOutHalLocal> self = weakSelf.promote();
- if (self == 0) return 0;
- sp<StreamOutHalInterfaceCallback> callback = self->mCallback.promote();
- if (callback == 0) return 0;
- ALOGV("asyncCallback() event %d", event);
- switch (event) {
- case STREAM_CBK_EVENT_WRITE_READY:
- callback->onWriteReady();
- break;
- case STREAM_CBK_EVENT_DRAIN_READY:
- callback->onDrainReady();
- break;
- case STREAM_CBK_EVENT_ERROR:
- callback->onError();
- break;
- default:
- ALOGW("asyncCallback() unknown event %d", event);
- break;
- }
- return 0;
-}
-
-status_t StreamOutHalLocal::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
- *supportsPause = mStream->pause != NULL;
- *supportsResume = mStream->resume != NULL;
- return OK;
-}
-
-status_t StreamOutHalLocal::pause() {
- if (mStream->pause == NULL) return INVALID_OPERATION;
- return mStream->pause(mStream);
-}
-
-status_t StreamOutHalLocal::resume() {
- if (mStream->resume == NULL) return INVALID_OPERATION;
- return mStream->resume(mStream);
-}
-
-status_t StreamOutHalLocal::supportsDrain(bool *supportsDrain) {
- *supportsDrain = mStream->drain != NULL;
- return OK;
-}
-
-status_t StreamOutHalLocal::drain(bool earlyNotify) {
- if (mStream->drain == NULL) return INVALID_OPERATION;
- return mStream->drain(mStream, earlyNotify ? AUDIO_DRAIN_EARLY_NOTIFY : AUDIO_DRAIN_ALL);
-}
-
-status_t StreamOutHalLocal::flush() {
- if (mStream->flush == NULL) return INVALID_OPERATION;
- return mStream->flush(mStream);
-}
-
-status_t StreamOutHalLocal::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
- if (mStream->get_presentation_position == NULL) return INVALID_OPERATION;
- return mStream->get_presentation_position(mStream, frames, timestamp);
-}
-
-status_t StreamOutHalLocal::start() {
- if (mStream->start == NULL) return INVALID_OPERATION;
- return mStream->start(mStream);
-}
-
-status_t StreamOutHalLocal::stop() {
- if (mStream->stop == NULL) return INVALID_OPERATION;
- return mStream->stop(mStream);
-}
-
-status_t StreamOutHalLocal::createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info) {
- if (mStream->create_mmap_buffer == NULL) return INVALID_OPERATION;
- return mStream->create_mmap_buffer(mStream, minSizeFrames, info);
-}
-
-status_t StreamOutHalLocal::getMmapPosition(struct audio_mmap_position *position) {
- if (mStream->get_mmap_position == NULL) return INVALID_OPERATION;
- return mStream->get_mmap_position(mStream, position);
-}
-
-StreamInHalLocal::StreamInHalLocal(audio_stream_in_t *stream, sp<DeviceHalLocal> device)
- : StreamHalLocal(&stream->common, device), mStream(stream) {
-}
-
-StreamInHalLocal::~StreamInHalLocal() {
- mDevice->closeInputStream(mStream);
- mStream = 0;
-}
-
-status_t StreamInHalLocal::getFrameSize(size_t *size) {
- *size = audio_stream_in_frame_size(mStream);
- return OK;
-}
-
-status_t StreamInHalLocal::setGain(float gain) {
- return mStream->set_gain(mStream, gain);
-}
-
-status_t StreamInHalLocal::read(void *buffer, size_t bytes, size_t *read) {
- ssize_t readResult = mStream->read(mStream, buffer, bytes);
- if (readResult > 0) {
- *read = readResult;
- mStreamPowerLog.log( buffer, *read);
- return OK;
- } else {
- *read = 0;
- return readResult;
- }
-}
-
-status_t StreamInHalLocal::getInputFramesLost(uint32_t *framesLost) {
- *framesLost = mStream->get_input_frames_lost(mStream);
- return OK;
-}
-
-status_t StreamInHalLocal::getCapturePosition(int64_t *frames, int64_t *time) {
- if (mStream->get_capture_position == NULL) return INVALID_OPERATION;
- return mStream->get_capture_position(mStream, frames, time);
-}
-
-status_t StreamInHalLocal::start() {
- if (mStream->start == NULL) return INVALID_OPERATION;
- return mStream->start(mStream);
-}
-
-status_t StreamInHalLocal::stop() {
- if (mStream->stop == NULL) return INVALID_OPERATION;
- return mStream->stop(mStream);
-}
-
-status_t StreamInHalLocal::createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info) {
- if (mStream->create_mmap_buffer == NULL) return INVALID_OPERATION;
- return mStream->create_mmap_buffer(mStream, minSizeFrames, info);
-}
-
-status_t StreamInHalLocal::getMmapPosition(struct audio_mmap_position *position) {
- if (mStream->get_mmap_position == NULL) return INVALID_OPERATION;
- return mStream->get_mmap_position(mStream, position);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/StreamHalLocal.h b/media/libaudiohal/StreamHalLocal.h
deleted file mode 100644
index c7136df..0000000
--- a/media/libaudiohal/StreamHalLocal.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
-#define ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
-
-#include <media/audiohal/StreamHalInterface.h>
-#include "StreamPowerLog.h"
-
-namespace android {
-
-class DeviceHalLocal;
-
-class StreamHalLocal : public virtual StreamHalInterface
-{
- public:
- // Return the sampling rate in Hz - eg. 44100.
- virtual status_t getSampleRate(uint32_t *rate);
-
- // Return size of input/output buffer in bytes for this stream - eg. 4800.
- virtual status_t getBufferSize(size_t *size);
-
- // Return the channel mask.
- virtual status_t getChannelMask(audio_channel_mask_t *mask);
-
- // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
- virtual status_t getFormat(audio_format_t *format);
-
- // Convenience method.
- virtual status_t getAudioProperties(
- uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
-
- // Set audio stream parameters.
- virtual status_t setParameters(const String8& kvPairs);
-
- // Get audio stream parameters.
- virtual status_t getParameters(const String8& keys, String8 *values);
-
- // Add or remove the effect on the stream.
- virtual status_t addEffect(sp<EffectHalInterface> effect);
- virtual status_t removeEffect(sp<EffectHalInterface> effect);
-
- // Put the audio hardware input/output into standby mode.
- virtual status_t standby();
-
- virtual status_t dump(int fd);
-
- // Start a stream operating in mmap mode.
- virtual status_t start() = 0;
-
- // Stop a stream operating in mmap mode.
- virtual status_t stop() = 0;
-
- // Retrieve information on the data buffer in mmap mode.
- virtual status_t createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info) = 0;
-
- // Get current read/write position in the mmap buffer
- virtual status_t getMmapPosition(struct audio_mmap_position *position) = 0;
-
- // Set the priority of the thread that interacts with the HAL
- // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
- virtual status_t setHalThreadPriority(int priority);
-
- protected:
- // Subclasses can not be constructed directly by clients.
- StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device);
-
- // The destructor automatically closes the stream.
- virtual ~StreamHalLocal();
-
- sp<DeviceHalLocal> mDevice;
-
- // mStreamPowerLog is used for audio signal power logging.
- StreamPowerLog mStreamPowerLog;
-
- private:
- audio_stream_t *mStream;
-};
-
-class StreamOutHalLocal : public StreamOutHalInterface, public StreamHalLocal {
- public:
- // Return the frame size (number of bytes per sample) of a stream.
- virtual status_t getFrameSize(size_t *size);
-
- // Return the audio hardware driver estimated latency in milliseconds.
- virtual status_t getLatency(uint32_t *latency);
-
- // Use this method in situations where audio mixing is done in the hardware.
- virtual status_t setVolume(float left, float right);
-
- // Write audio buffer to driver.
- virtual status_t write(const void *buffer, size_t bytes, size_t *written);
-
- // Return the number of audio frames written by the audio dsp to DAC since
- // the output has exited standby.
- virtual status_t getRenderPosition(uint32_t *dspFrames);
-
- // Get the local time at which the next write to the audio driver will be presented.
- virtual status_t getNextWriteTimestamp(int64_t *timestamp);
-
- // Set the callback for notifying completion of non-blocking write and drain.
- virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback);
-
- // Returns whether pause and resume operations are supported.
- virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume);
-
- // Notifies to the audio driver to resume playback following a pause.
- virtual status_t pause();
-
- // Notifies to the audio driver to resume playback following a pause.
- virtual status_t resume();
-
- // Returns whether drain operation is supported.
- virtual status_t supportsDrain(bool *supportsDrain);
-
- // Requests notification when data buffered by the driver/hardware has been played.
- virtual status_t drain(bool earlyNotify);
-
- // Notifies to the audio driver to flush the queued data.
- virtual status_t flush();
-
- // Return a recent count of the number of audio frames presented to an external observer.
- virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
-
- // Start a stream operating in mmap mode.
- virtual status_t start();
-
- // Stop a stream operating in mmap mode.
- virtual status_t stop();
-
- // Retrieve information on the data buffer in mmap mode.
- virtual status_t createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info);
-
- // Get current read/write position in the mmap buffer
- virtual status_t getMmapPosition(struct audio_mmap_position *position);
-
- private:
- audio_stream_out_t *mStream;
- wp<StreamOutHalInterfaceCallback> mCallback;
-
- friend class DeviceHalLocal;
-
- // Can not be constructed directly by clients.
- StreamOutHalLocal(audio_stream_out_t *stream, sp<DeviceHalLocal> device);
-
- virtual ~StreamOutHalLocal();
-
- static int asyncCallback(stream_callback_event_t event, void *param, void *cookie);
-};
-
-class StreamInHalLocal : public StreamInHalInterface, public StreamHalLocal {
- public:
- // Return the frame size (number of bytes per sample) of a stream.
- virtual status_t getFrameSize(size_t *size);
-
- // Set the input gain for the audio driver.
- virtual status_t setGain(float gain);
-
- // Read audio buffer in from driver.
- virtual status_t read(void *buffer, size_t bytes, size_t *read);
-
- // Return the amount of input frames lost in the audio driver.
- virtual status_t getInputFramesLost(uint32_t *framesLost);
-
- // Return a recent count of the number of audio frames received and
- // the clock time associated with that frame count.
- virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
-
- // Start a stream operating in mmap mode.
- virtual status_t start();
-
- // Stop a stream operating in mmap mode.
- virtual status_t stop();
-
- // Retrieve information on the data buffer in mmap mode.
- virtual status_t createMmapBuffer(int32_t minSizeFrames,
- struct audio_mmap_buffer_info *info);
-
- // Get current read/write position in the mmap buffer
- virtual status_t getMmapPosition(struct audio_mmap_position *position);
-
- private:
- audio_stream_in_t *mStream;
-
- friend class DeviceHalLocal;
-
- // Can not be constructed directly by clients.
- StreamInHalLocal(audio_stream_in_t *stream, sp<DeviceHalLocal> device);
-
- virtual ~StreamInHalLocal();
-};
-
-} // namespace android
-
-#endif // ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
diff --git a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
index caf01be..7de8eb3 100644
--- a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
@@ -17,6 +17,7 @@
#ifndef ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
#define ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
+#include <media/MicrophoneInfo.h>
#include <system/audio.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
@@ -105,6 +106,9 @@
// Set audio port configuration.
virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
+ // List microphones
+ virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
+
virtual status_t dump(int fd) = 0;
protected:
diff --git a/media/libaudiohal/include/media/audiohal/EffectBufferHalInterface.h b/media/libaudiohal/include/media/audiohal/EffectBufferHalInterface.h
index e862f6e..d0603cd 100644
--- a/media/libaudiohal/include/media/audiohal/EffectBufferHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/EffectBufferHalInterface.h
@@ -26,6 +26,7 @@
// Abstraction for an audio buffer. It may be a "mirror" for
// a buffer that the effect chain doesn't own, or a buffer owned by
// the effect chain.
+// Buffers are created from EffectsFactoryHalInterface
class EffectBufferHalInterface : public RefBase
{
public:
@@ -37,6 +38,8 @@
return externalData() != nullptr ? externalData() : audioBuffer()->raw;
}
+ virtual size_t getSize() const = 0;
+
virtual void setExternalData(void* external) = 0;
virtual void setFrameCount(size_t frameCount) = 0;
virtual bool checkFrameCountChange() = 0; // returns whether frame count has been updated
@@ -47,9 +50,6 @@
virtual void update(size_t size) = 0; // copies partial data from external buffer
virtual void commit(size_t size) = 0; // copies partial data to external buffer
- static status_t allocate(size_t size, sp<EffectBufferHalInterface>* buffer);
- static status_t mirror(void* external, size_t size, sp<EffectBufferHalInterface>* buffer);
-
protected:
// Subclasses can not be constructed directly by clients.
EffectBufferHalInterface() {}
diff --git a/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h b/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
index a616e86..316a46c 100644
--- a/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
@@ -48,6 +48,10 @@
static sp<EffectsFactoryHalInterface> create();
+ virtual status_t allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) = 0;
+ virtual status_t mirrorBuffer(void* external, size_t size,
+ sp<EffectBufferHalInterface>* buffer) = 0;
+
// Helper function to compare effect uuid to EFFECT_UUID_NULL.
static bool isNullUuid(const effect_uuid_t *pEffectUuid);
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index 7419c34..c969e28 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -17,7 +17,10 @@
#ifndef ANDROID_HARDWARE_STREAM_HAL_INTERFACE_H
#define ANDROID_HARDWARE_STREAM_HAL_INTERFACE_H
+#include <vector>
+
#include <media/audiohal/EffectHalInterface.h>
+#include <media/MicrophoneInfo.h>
#include <system/audio.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
@@ -142,6 +145,15 @@
// Return a recent count of the number of audio frames presented to an external observer.
virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp) = 0;
+ struct SourceMetadata {
+ std::vector<playback_track_metadata_t> tracks;
+ };
+ /**
+ * Called when the metadata of the stream's source has been changed.
+ * @param sourceMetadata Description of the audio that is played by the clients.
+ */
+ virtual status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) = 0;
+
protected:
virtual ~StreamOutHalInterface() {}
};
@@ -161,6 +173,18 @@
// the clock time associated with that frame count.
virtual status_t getCapturePosition(int64_t *frames, int64_t *time) = 0;
+ // Get active microphones
+ virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
+
+ struct SinkMetadata {
+ std::vector<record_track_metadata_t> tracks;
+ };
+ /**
+ * Called when the metadata of the stream's sink has been changed.
+ * @param sinkMetadata Description of the audio that is suggested by the clients.
+ */
+ virtual status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) = 0;
+
protected:
virtual ~StreamInHalInterface() {}
};
diff --git a/media/libaudioprocessing/Android.mk b/media/libaudioprocessing/Android.mk
index c850984..da1ecc2 100644
--- a/media/libaudioprocessing/Android.mk
+++ b/media/libaudioprocessing/Android.mk
@@ -24,6 +24,7 @@
libcutils \
liblog \
libnbaio \
+ libnblog \
libsonic \
libutils \
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index 238925d..f6f817a 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -62,21 +62,22 @@
#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
#endif
-// TODO: Move these macro/inlines to a header file.
-template <typename T>
-static inline
-T max(const T& x, const T& y) {
- return x > y ? x : y;
-}
-
// Set kUseNewMixer to true to use the new mixer engine always. Otherwise the
// original code will be used for stereo sinks, the new mixer for multichannel.
-static const bool kUseNewMixer = true;
+static constexpr bool kUseNewMixer = true;
// Set kUseFloat to true to allow floating input into the mixer engine.
// If kUseNewMixer is false, this is ignored or may be overridden internally
// because of downmix/upmix support.
-static const bool kUseFloat = true;
+static constexpr bool kUseFloat = true;
+
+#ifdef FLOAT_AUX
+using TYPE_AUX = float;
+static_assert(kUseNewMixer && kUseFloat,
+ "kUseNewMixer and kUseFloat must be true for FLOAT_AUX option");
+#else
+using TYPE_AUX = int32_t; // q4.27
+#endif
// Set to default copy buffer size in frames for input processing.
static const size_t kCopyBufferFrameCount = 256;
@@ -85,88 +86,28 @@
// ----------------------------------------------------------------------------
-template <typename T>
-T min(const T& a, const T& b)
-{
- return a < b ? a : b;
-}
-
-// ----------------------------------------------------------------------------
-
-// Ensure mConfiguredNames bitmask is initialized properly on all architectures.
-// The value of 1 << x is undefined in C when x >= 32.
-
-AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate, uint32_t maxNumTracks)
- : mTrackNames(0), mConfiguredNames((maxNumTracks >= 32 ? 0 : 1 << maxNumTracks) - 1),
- mSampleRate(sampleRate)
-{
- ALOG_ASSERT(maxNumTracks <= MAX_NUM_TRACKS, "maxNumTracks %u > MAX_NUM_TRACKS %u",
- maxNumTracks, MAX_NUM_TRACKS);
-
- // AudioMixer is not yet capable of more than 32 active track inputs
- ALOG_ASSERT(32 >= MAX_NUM_TRACKS, "bad MAX_NUM_TRACKS %d", MAX_NUM_TRACKS);
-
- pthread_once(&sOnceControl, &sInitRoutine);
-
- mState.enabledTracks= 0;
- mState.needsChanged = 0;
- mState.frameCount = frameCount;
- mState.hook = process__nop;
- mState.outputTemp = NULL;
- mState.resampleTemp = NULL;
- mState.mNBLogWriter = &mDummyLogWriter;
- // mState.reserved
-
- // FIXME Most of the following initialization is probably redundant since
- // tracks[i] should only be referenced if (mTrackNames & (1 << i)) != 0
- // and mTrackNames is initially 0. However, leave it here until that's verified.
- track_t* t = mState.tracks;
- for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
- t->resampler = NULL;
- t->downmixerBufferProvider = NULL;
- t->mReformatBufferProvider = NULL;
- t->mTimestretchBufferProvider = NULL;
- t++;
- }
-
-}
-
-AudioMixer::~AudioMixer()
-{
- track_t* t = mState.tracks;
- for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
- delete t->resampler;
- delete t->downmixerBufferProvider;
- delete t->mReformatBufferProvider;
- delete t->mTimestretchBufferProvider;
- t++;
- }
- delete [] mState.outputTemp;
- delete [] mState.resampleTemp;
-}
-
-void AudioMixer::setNBLogWriter(NBLog::Writer *logWriter)
-{
- mState.mNBLogWriter = logWriter;
-}
-
static inline audio_format_t selectMixerInFormat(audio_format_t inputFormat __unused) {
return kUseFloat && kUseNewMixer ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
}
-int AudioMixer::getTrackName(audio_channel_mask_t channelMask,
- audio_format_t format, int sessionId)
+status_t AudioMixer::create(
+ int name, audio_channel_mask_t channelMask, audio_format_t format, int sessionId)
{
- if (!isValidPcmTrackFormat(format)) {
- ALOGE("AudioMixer::getTrackName invalid format (%#x)", format);
- return -1;
+ LOG_ALWAYS_FATAL_IF(exists(name), "name %d already exists", name);
+
+ if (!isValidChannelMask(channelMask)) {
+ ALOGE("%s invalid channelMask: %#x", __func__, channelMask);
+ return BAD_VALUE;
}
- uint32_t names = (~mTrackNames) & mConfiguredNames;
- if (names != 0) {
- int n = __builtin_ctz(names);
- ALOGV("add track (%d)", n);
+ if (!isValidFormat(format)) {
+ ALOGE("%s invalid format: %#x", __func__, format);
+ return BAD_VALUE;
+ }
+
+ auto t = std::make_shared<Track>();
+ {
+ // TODO: move initialization to the Track constructor.
// assume default parameters for the track, except where noted below
- track_t* t = &mState.tracks[n];
t->needs = 0;
// Integer volume.
@@ -207,17 +148,12 @@
// no initialization needed
// t->buffer.frameCount
t->hook = NULL;
- t->in = NULL;
- t->resampler = NULL;
+ t->mIn = NULL;
t->sampleRate = mSampleRate;
// setParameter(name, TRACK, MAIN_BUFFER, mixBuffer) is required before enable(name)
t->mainBuffer = NULL;
t->auxBuffer = NULL;
t->mInputBufferProvider = NULL;
- t->mReformatBufferProvider = NULL;
- t->downmixerBufferProvider = NULL;
- t->mPostDownmixReformatBufferProvider = NULL;
- t->mTimestretchBufferProvider = NULL;
t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
t->mFormat = format;
t->mMixerInFormat = selectMixerInFormat(format);
@@ -230,96 +166,83 @@
status_t status = t->prepareForDownmix();
if (status != OK) {
ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask);
- return -1;
+ return BAD_VALUE;
}
// prepareForDownmix() may change mDownmixRequiresFormat
ALOGVV("mMixerFormat:%#x mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
t->prepareForReformat();
- mTrackNames |= 1 << n;
- return TRACK0 + n;
- }
- ALOGE("AudioMixer::getTrackName out of available tracks");
- return -1;
-}
-void AudioMixer::invalidateState(uint32_t mask)
-{
- if (mask != 0) {
- mState.needsChanged |= mask;
- mState.hook = process__validate;
+ mTracks[name] = t;
+ return OK;
}
- }
+}
// Called when channel masks have changed for a track name
// TODO: Fix DownmixerBufferProvider not to (possibly) change mixer input format,
// which will simplify this logic.
bool AudioMixer::setChannelMasks(int name,
audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask) {
- track_t &track = mState.tracks[name];
+ LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+ const std::shared_ptr<Track> &track = mTracks[name];
- if (trackChannelMask == track.channelMask
- && mixerChannelMask == track.mMixerChannelMask) {
+ if (trackChannelMask == track->channelMask
+ && mixerChannelMask == track->mMixerChannelMask) {
return false; // no need to change
}
// always recompute for both channel masks even if only one has changed.
const uint32_t trackChannelCount = audio_channel_count_from_out_mask(trackChannelMask);
const uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mixerChannelMask);
- const bool mixerChannelCountChanged = track.mMixerChannelCount != mixerChannelCount;
ALOG_ASSERT((trackChannelCount <= MAX_NUM_CHANNELS_TO_DOWNMIX)
&& trackChannelCount
&& mixerChannelCount);
- track.channelMask = trackChannelMask;
- track.channelCount = trackChannelCount;
- track.mMixerChannelMask = mixerChannelMask;
- track.mMixerChannelCount = mixerChannelCount;
+ track->channelMask = trackChannelMask;
+ track->channelCount = trackChannelCount;
+ track->mMixerChannelMask = mixerChannelMask;
+ track->mMixerChannelCount = mixerChannelCount;
// channel masks have changed, does this track need a downmixer?
// update to try using our desired format (if we aren't already using it)
- const audio_format_t prevDownmixerFormat = track.mDownmixRequiresFormat;
- const status_t status = mState.tracks[name].prepareForDownmix();
+ const status_t status = track->prepareForDownmix();
ALOGE_IF(status != OK,
"prepareForDownmix error %d, track channel mask %#x, mixer channel mask %#x",
- status, track.channelMask, track.mMixerChannelMask);
+ status, track->channelMask, track->mMixerChannelMask);
- if (prevDownmixerFormat != track.mDownmixRequiresFormat) {
- track.prepareForReformat(); // because of downmixer, track format may change!
- }
+ // always do reformat since channel mask changed,
+ // do it after downmix since track format may change!
+ track->prepareForReformat();
- if (track.resampler && mixerChannelCountChanged) {
+ if (track->mResampler.get() != nullptr) {
// resampler channels may have changed.
- const uint32_t resetToSampleRate = track.sampleRate;
- delete track.resampler;
- track.resampler = NULL;
- track.sampleRate = mSampleRate; // without resampler, track rate is device sample rate.
+ const uint32_t resetToSampleRate = track->sampleRate;
+ track->mResampler.reset(nullptr);
+ track->sampleRate = mSampleRate; // without resampler, track rate is device sample rate.
// recreate the resampler with updated format, channels, saved sampleRate.
- track.setResampler(resetToSampleRate /*trackSampleRate*/, mSampleRate /*devSampleRate*/);
+ track->setResampler(resetToSampleRate /*trackSampleRate*/, mSampleRate /*devSampleRate*/);
}
return true;
}
-void AudioMixer::track_t::unprepareForDownmix() {
+void AudioMixer::Track::unprepareForDownmix() {
ALOGV("AudioMixer::unprepareForDownmix(%p)", this);
- if (mPostDownmixReformatBufferProvider != nullptr) {
+ if (mPostDownmixReformatBufferProvider.get() != nullptr) {
// release any buffers held by the mPostDownmixReformatBufferProvider
- // before deallocating the downmixerBufferProvider.
+ // before deallocating the mDownmixerBufferProvider.
mPostDownmixReformatBufferProvider->reset();
}
mDownmixRequiresFormat = AUDIO_FORMAT_INVALID;
- if (downmixerBufferProvider != NULL) {
+ if (mDownmixerBufferProvider.get() != nullptr) {
// this track had previously been configured with a downmixer, delete it
- ALOGV(" deleting old downmixer");
- delete downmixerBufferProvider;
- downmixerBufferProvider = NULL;
+ mDownmixerBufferProvider.reset(nullptr);
reconfigureBufferProviders();
} else {
ALOGV(" nothing to do, no downmixer to delete");
}
}
-status_t AudioMixer::track_t::prepareForDownmix()
+status_t AudioMixer::Track::prepareForDownmix()
{
ALOGV("AudioMixer::prepareForDownmix(%p) with mask 0x%x",
this, channelMask);
@@ -337,40 +260,35 @@
if (audio_channel_mask_get_representation(channelMask)
== AUDIO_CHANNEL_REPRESENTATION_POSITION
&& DownmixerBufferProvider::isMultichannelCapable()) {
- DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(channelMask,
+ mDownmixerBufferProvider.reset(new DownmixerBufferProvider(channelMask,
mMixerChannelMask,
AUDIO_FORMAT_PCM_16_BIT /* TODO: use mMixerInFormat, now only PCM 16 */,
- sampleRate, sessionId, kCopyBufferFrameCount);
-
- if (pDbp->isValid()) { // if constructor completed properly
+ sampleRate, sessionId, kCopyBufferFrameCount));
+ if (static_cast<DownmixerBufferProvider *>(mDownmixerBufferProvider.get())->isValid()) {
mDownmixRequiresFormat = AUDIO_FORMAT_PCM_16_BIT; // PCM 16 bit required for downmix
- downmixerBufferProvider = pDbp;
reconfigureBufferProviders();
return NO_ERROR;
}
- delete pDbp;
+ // mDownmixerBufferProvider reset below.
}
// Effect downmixer does not accept the channel conversion. Let's use our remixer.
- RemixBufferProvider* pRbp = new RemixBufferProvider(channelMask,
- mMixerChannelMask, mMixerInFormat, kCopyBufferFrameCount);
+ mDownmixerBufferProvider.reset(new RemixBufferProvider(channelMask,
+ mMixerChannelMask, mMixerInFormat, kCopyBufferFrameCount));
// Remix always finds a conversion whereas Downmixer effect above may fail.
- downmixerBufferProvider = pRbp;
reconfigureBufferProviders();
return NO_ERROR;
}
-void AudioMixer::track_t::unprepareForReformat() {
+void AudioMixer::Track::unprepareForReformat() {
ALOGV("AudioMixer::unprepareForReformat(%p)", this);
bool requiresReconfigure = false;
- if (mReformatBufferProvider != NULL) {
- delete mReformatBufferProvider;
- mReformatBufferProvider = NULL;
+ if (mReformatBufferProvider.get() != nullptr) {
+ mReformatBufferProvider.reset(nullptr);
requiresReconfigure = true;
}
- if (mPostDownmixReformatBufferProvider != NULL) {
- delete mPostDownmixReformatBufferProvider;
- mPostDownmixReformatBufferProvider = NULL;
+ if (mPostDownmixReformatBufferProvider.get() != nullptr) {
+ mPostDownmixReformatBufferProvider.reset(nullptr);
requiresReconfigure = true;
}
if (requiresReconfigure) {
@@ -378,7 +296,7 @@
}
}
-status_t AudioMixer::track_t::prepareForReformat()
+status_t AudioMixer::Track::prepareForReformat()
{
ALOGV("AudioMixer::prepareForReformat(%p) with format %#x", this, mFormat);
// discard previous reformatters
@@ -388,19 +306,27 @@
? mDownmixRequiresFormat : mMixerInFormat;
bool requiresReconfigure = false;
if (mFormat != targetFormat) {
- mReformatBufferProvider = new ReformatBufferProvider(
+ mReformatBufferProvider.reset(new ReformatBufferProvider(
audio_channel_count_from_out_mask(channelMask),
mFormat,
targetFormat,
- kCopyBufferFrameCount);
+ kCopyBufferFrameCount));
+ requiresReconfigure = true;
+ } else if (mFormat == AUDIO_FORMAT_PCM_FLOAT) {
+ // Input and output are floats, make sure application did not provide > 3db samples
+ // that would break volume application (b/68099072)
+ // TODO: add a trusted source flag to avoid the overhead
+ mReformatBufferProvider.reset(new ClampFloatBufferProvider(
+ audio_channel_count_from_out_mask(channelMask),
+ kCopyBufferFrameCount));
requiresReconfigure = true;
}
if (targetFormat != mMixerInFormat) {
- mPostDownmixReformatBufferProvider = new ReformatBufferProvider(
+ mPostDownmixReformatBufferProvider.reset(new ReformatBufferProvider(
audio_channel_count_from_out_mask(mMixerChannelMask),
targetFormat,
mMixerInFormat,
- kCopyBufferFrameCount);
+ kCopyBufferFrameCount));
requiresReconfigure = true;
}
if (requiresReconfigure) {
@@ -409,74 +335,59 @@
return NO_ERROR;
}
-void AudioMixer::track_t::reconfigureBufferProviders()
+void AudioMixer::Track::reconfigureBufferProviders()
{
bufferProvider = mInputBufferProvider;
- if (mReformatBufferProvider) {
+ if (mReformatBufferProvider.get() != nullptr) {
mReformatBufferProvider->setBufferProvider(bufferProvider);
- bufferProvider = mReformatBufferProvider;
+ bufferProvider = mReformatBufferProvider.get();
}
- if (downmixerBufferProvider) {
- downmixerBufferProvider->setBufferProvider(bufferProvider);
- bufferProvider = downmixerBufferProvider;
+ if (mDownmixerBufferProvider.get() != nullptr) {
+ mDownmixerBufferProvider->setBufferProvider(bufferProvider);
+ bufferProvider = mDownmixerBufferProvider.get();
}
- if (mPostDownmixReformatBufferProvider) {
+ if (mPostDownmixReformatBufferProvider.get() != nullptr) {
mPostDownmixReformatBufferProvider->setBufferProvider(bufferProvider);
- bufferProvider = mPostDownmixReformatBufferProvider;
+ bufferProvider = mPostDownmixReformatBufferProvider.get();
}
- if (mTimestretchBufferProvider) {
+ if (mTimestretchBufferProvider.get() != nullptr) {
mTimestretchBufferProvider->setBufferProvider(bufferProvider);
- bufferProvider = mTimestretchBufferProvider;
+ bufferProvider = mTimestretchBufferProvider.get();
}
}
-void AudioMixer::deleteTrackName(int name)
+void AudioMixer::destroy(int name)
{
- ALOGV("AudioMixer::deleteTrackName(%d)", name);
- name -= TRACK0;
- LOG_ALWAYS_FATAL_IF(name < 0 || name >= (int)MAX_NUM_TRACKS, "bad track name %d", name);
+ LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
ALOGV("deleteTrackName(%d)", name);
- track_t& track(mState.tracks[ name ]);
- if (track.enabled) {
- track.enabled = false;
- invalidateState(1<<name);
+
+ if (mTracks[name]->enabled) {
+ invalidate();
}
- // delete the resampler
- delete track.resampler;
- track.resampler = NULL;
- // delete the downmixer
- mState.tracks[name].unprepareForDownmix();
- // delete the reformatter
- mState.tracks[name].unprepareForReformat();
- // delete the timestretch provider
- delete track.mTimestretchBufferProvider;
- track.mTimestretchBufferProvider = NULL;
- mTrackNames &= ~(1<<name);
+ mTracks.erase(name); // deallocate track
}
void AudioMixer::enable(int name)
{
- name -= TRACK0;
- ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
- track_t& track = mState.tracks[name];
+ LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+ const std::shared_ptr<Track> &track = mTracks[name];
- if (!track.enabled) {
- track.enabled = true;
+ if (!track->enabled) {
+ track->enabled = true;
ALOGV("enable(%d)", name);
- invalidateState(1 << name);
+ invalidate();
}
}
void AudioMixer::disable(int name)
{
- name -= TRACK0;
- ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
- track_t& track = mState.tracks[name];
+ LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+ const std::shared_ptr<Track> &track = mTracks[name];
- if (track.enabled) {
- track.enabled = false;
+ if (track->enabled) {
+ track->enabled = false;
ALOGV("disable(%d)", name);
- invalidateState(1 << name);
+ invalidate();
}
}
@@ -554,7 +465,8 @@
ALOGD_IF(*pPrevVolume != *pSetVolume, "previous float ramp hasn't finished,"
" prev:%f set_to:%f", *pPrevVolume, *pSetVolume);
const float inc = (newVolume - *pPrevVolume) / ramp; // could be inf, nan, subnormal
- const float maxv = max(newVolume, *pPrevVolume); // could be inf, cannot be nan, subnormal
+ // could be inf, cannot be nan, subnormal
+ const float maxv = std::max(newVolume, *pPrevVolume);
if (isnormal(inc) // inc must be a normal number (no subnormals, infinite, nan)
&& maxv + inc != maxv) { // inc must make forward progress
@@ -607,9 +519,8 @@
void AudioMixer::setParameter(int name, int target, int param, void *value)
{
- name -= TRACK0;
- ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
- track_t& track = mState.tracks[name];
+ LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+ const std::shared_ptr<Track> &track = mTracks[name];
int valueInt = static_cast<int>(reinterpret_cast<uintptr_t>(value));
int32_t *valueBuf = reinterpret_cast<int32_t*>(value);
@@ -621,33 +532,33 @@
case CHANNEL_MASK: {
const audio_channel_mask_t trackChannelMask =
static_cast<audio_channel_mask_t>(valueInt);
- if (setChannelMasks(name, trackChannelMask, track.mMixerChannelMask)) {
+ if (setChannelMasks(name, trackChannelMask, track->mMixerChannelMask)) {
ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", trackChannelMask);
- invalidateState(1 << name);
+ invalidate();
}
} break;
case MAIN_BUFFER:
- if (track.mainBuffer != valueBuf) {
- track.mainBuffer = valueBuf;
+ if (track->mainBuffer != valueBuf) {
+ track->mainBuffer = valueBuf;
ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf);
- invalidateState(1 << name);
+ invalidate();
}
break;
case AUX_BUFFER:
- if (track.auxBuffer != valueBuf) {
- track.auxBuffer = valueBuf;
+ if (track->auxBuffer != valueBuf) {
+ track->auxBuffer = valueBuf;
ALOGV("setParameter(TRACK, AUX_BUFFER, %p)", valueBuf);
- invalidateState(1 << name);
+ invalidate();
}
break;
case FORMAT: {
audio_format_t format = static_cast<audio_format_t>(valueInt);
- if (track.mFormat != format) {
+ if (track->mFormat != format) {
ALOG_ASSERT(audio_is_linear_pcm(format), "Invalid format %#x", format);
- track.mFormat = format;
+ track->mFormat = format;
ALOGV("setParameter(TRACK, FORMAT, %#x)", format);
- track.prepareForReformat();
- invalidateState(1 << name);
+ track->prepareForReformat();
+ invalidate();
}
} break;
// FIXME do we want to support setting the downmix type from AudioFlinger?
@@ -656,17 +567,17 @@
break */
case MIXER_FORMAT: {
audio_format_t format = static_cast<audio_format_t>(valueInt);
- if (track.mMixerFormat != format) {
- track.mMixerFormat = format;
+ if (track->mMixerFormat != format) {
+ track->mMixerFormat = format;
ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
}
} break;
case MIXER_CHANNEL_MASK: {
const audio_channel_mask_t mixerChannelMask =
static_cast<audio_channel_mask_t>(valueInt);
- if (setChannelMasks(name, track.channelMask, mixerChannelMask)) {
+ if (setChannelMasks(name, track->channelMask, mixerChannelMask)) {
ALOGV("setParameter(TRACK, MIXER_CHANNEL_MASK, %#x)", mixerChannelMask);
- invalidateState(1 << name);
+ invalidate();
}
} break;
default:
@@ -678,21 +589,20 @@
switch (param) {
case SAMPLE_RATE:
ALOG_ASSERT(valueInt > 0, "bad sample rate %d", valueInt);
- if (track.setResampler(uint32_t(valueInt), mSampleRate)) {
+ if (track->setResampler(uint32_t(valueInt), mSampleRate)) {
ALOGV("setParameter(RESAMPLE, SAMPLE_RATE, %u)",
uint32_t(valueInt));
- invalidateState(1 << name);
+ invalidate();
}
break;
case RESET:
- track.resetResampler();
- invalidateState(1 << name);
+ track->resetResampler();
+ invalidate();
break;
case REMOVE:
- delete track.resampler;
- track.resampler = NULL;
- track.sampleRate = mSampleRate;
- invalidateState(1 << name);
+ track->mResampler.reset(nullptr);
+ track->sampleRate = mSampleRate;
+ invalidate();
break;
default:
LOG_ALWAYS_FATAL("setParameter resample: bad param %d", param);
@@ -704,26 +614,28 @@
switch (param) {
case AUXLEVEL:
if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
- target == RAMP_VOLUME ? mState.frameCount : 0,
- &track.auxLevel, &track.prevAuxLevel, &track.auxInc,
- &track.mAuxLevel, &track.mPrevAuxLevel, &track.mAuxInc)) {
+ target == RAMP_VOLUME ? mFrameCount : 0,
+ &track->auxLevel, &track->prevAuxLevel, &track->auxInc,
+ &track->mAuxLevel, &track->mPrevAuxLevel, &track->mAuxInc)) {
ALOGV("setParameter(%s, AUXLEVEL: %04x)",
- target == VOLUME ? "VOLUME" : "RAMP_VOLUME", track.auxLevel);
- invalidateState(1 << name);
+ target == VOLUME ? "VOLUME" : "RAMP_VOLUME", track->auxLevel);
+ invalidate();
}
break;
default:
if ((unsigned)param >= VOLUME0 && (unsigned)param < VOLUME0 + MAX_NUM_VOLUMES) {
if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
- target == RAMP_VOLUME ? mState.frameCount : 0,
- &track.volume[param - VOLUME0], &track.prevVolume[param - VOLUME0],
- &track.volumeInc[param - VOLUME0],
- &track.mVolume[param - VOLUME0], &track.mPrevVolume[param - VOLUME0],
- &track.mVolumeInc[param - VOLUME0])) {
+ target == RAMP_VOLUME ? mFrameCount : 0,
+ &track->volume[param - VOLUME0],
+ &track->prevVolume[param - VOLUME0],
+ &track->volumeInc[param - VOLUME0],
+ &track->mVolume[param - VOLUME0],
+ &track->mPrevVolume[param - VOLUME0],
+ &track->mVolumeInc[param - VOLUME0])) {
ALOGV("setParameter(%s, VOLUME%d: %04x)",
target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0,
- track.volume[param - VOLUME0]);
- invalidateState(1 << name);
+ track->volume[param - VOLUME0]);
+ invalidate();
}
} else {
LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param);
@@ -736,16 +648,16 @@
const AudioPlaybackRate *playbackRate =
reinterpret_cast<AudioPlaybackRate*>(value);
ALOGW_IF(!isAudioPlaybackRateValid(*playbackRate),
- "bad parameters speed %f, pitch %f",playbackRate->mSpeed,
- playbackRate->mPitch);
- if (track.setPlaybackRate(*playbackRate)) {
+ "bad parameters speed %f, pitch %f",
+ playbackRate->mSpeed, playbackRate->mPitch);
+ if (track->setPlaybackRate(*playbackRate)) {
ALOGV("setParameter(TIMESTRETCH, PLAYBACK_RATE, STRETCH_MODE, FALLBACK_MODE "
"%f %f %d %d",
playbackRate->mSpeed,
playbackRate->mPitch,
playbackRate->mStretchMode,
playbackRate->mFallbackMode);
- // invalidateState(1 << name);
+ // invalidate(); (should not require reconfigure)
}
} break;
default:
@@ -758,12 +670,12 @@
}
}
-bool AudioMixer::track_t::setResampler(uint32_t trackSampleRate, uint32_t devSampleRate)
+bool AudioMixer::Track::setResampler(uint32_t trackSampleRate, uint32_t devSampleRate)
{
- if (trackSampleRate != devSampleRate || resampler != NULL) {
+ if (trackSampleRate != devSampleRate || mResampler.get() != nullptr) {
if (sampleRate != trackSampleRate) {
sampleRate = trackSampleRate;
- if (resampler == NULL) {
+ if (mResampler.get() == nullptr) {
ALOGV("Creating resampler from track %d Hz to device %d Hz",
trackSampleRate, devSampleRate);
AudioResampler::src_quality quality;
@@ -779,15 +691,15 @@
// TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
// but if none exists, it is the channel count (1 for mono).
- const int resamplerChannelCount = downmixerBufferProvider != NULL
+ const int resamplerChannelCount = mDownmixerBufferProvider.get() != nullptr
? mMixerChannelCount : channelCount;
ALOGVV("Creating resampler:"
" format(%#x) channels(%d) devSampleRate(%u) quality(%d)\n",
mMixerInFormat, resamplerChannelCount, devSampleRate, quality);
- resampler = AudioResampler::create(
+ mResampler.reset(AudioResampler::create(
mMixerInFormat,
resamplerChannelCount,
- devSampleRate, quality);
+ devSampleRate, quality));
}
return true;
}
@@ -795,25 +707,25 @@
return false;
}
-bool AudioMixer::track_t::setPlaybackRate(const AudioPlaybackRate &playbackRate)
+bool AudioMixer::Track::setPlaybackRate(const AudioPlaybackRate &playbackRate)
{
- if ((mTimestretchBufferProvider == NULL &&
+ if ((mTimestretchBufferProvider.get() == nullptr &&
fabs(playbackRate.mSpeed - mPlaybackRate.mSpeed) < AUDIO_TIMESTRETCH_SPEED_MIN_DELTA &&
fabs(playbackRate.mPitch - mPlaybackRate.mPitch) < AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) ||
isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
return false;
}
mPlaybackRate = playbackRate;
- if (mTimestretchBufferProvider == NULL) {
+ if (mTimestretchBufferProvider.get() == nullptr) {
// TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
// but if none exists, it is the channel count (1 for mono).
- const int timestretchChannelCount = downmixerBufferProvider != NULL
+ const int timestretchChannelCount = mDownmixerBufferProvider.get() != nullptr
? mMixerChannelCount : channelCount;
- mTimestretchBufferProvider = new TimestretchBufferProvider(timestretchChannelCount,
- mMixerInFormat, sampleRate, playbackRate);
+ mTimestretchBufferProvider.reset(new TimestretchBufferProvider(timestretchChannelCount,
+ mMixerInFormat, sampleRate, playbackRate));
reconfigureBufferProviders();
} else {
- reinterpret_cast<TimestretchBufferProvider*>(mTimestretchBufferProvider)
+ static_cast<TimestretchBufferProvider*>(mTimestretchBufferProvider.get())
->setPlaybackRate(playbackRate);
}
return true;
@@ -832,7 +744,7 @@
*
* There is a bit of duplicated code here, but it keeps backward compatibility.
*/
-inline void AudioMixer::track_t::adjustVolumeRamp(bool aux, bool useFloat)
+inline void AudioMixer::Track::adjustVolumeRamp(bool aux, bool useFloat)
{
if (useFloat) {
for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
@@ -861,113 +773,98 @@
}
}
}
- /* TODO: aux is always integer regardless of output buffer type */
+
if (aux) {
- if (((auxInc>0) && (((prevAuxLevel+auxInc)>>16) >= auxLevel)) ||
- ((auxInc<0) && (((prevAuxLevel+auxInc)>>16) <= auxLevel))) {
+#ifdef FLOAT_AUX
+ if (useFloat) {
+ if ((mAuxInc > 0.f && mPrevAuxLevel + mAuxInc >= mAuxLevel) ||
+ (mAuxInc < 0.f && mPrevAuxLevel + mAuxInc <= mAuxLevel)) {
+ auxInc = 0;
+ prevAuxLevel = auxLevel << 16;
+ mAuxInc = 0.f;
+ mPrevAuxLevel = mAuxLevel;
+ }
+ } else
+#endif
+ if ((auxInc > 0 && ((prevAuxLevel + auxInc) >> 16) >= auxLevel) ||
+ (auxInc < 0 && ((prevAuxLevel + auxInc) >> 16) <= auxLevel)) {
auxInc = 0;
prevAuxLevel = auxLevel << 16;
- mAuxInc = 0.;
+ mAuxInc = 0.f;
mPrevAuxLevel = mAuxLevel;
- } else {
- //ALOGV("aux ramp: %d %d %d", auxLevel << 16, prevAuxLevel, auxInc);
}
}
}
size_t AudioMixer::getUnreleasedFrames(int name) const
{
- name -= TRACK0;
- if (uint32_t(name) < MAX_NUM_TRACKS) {
- return mState.tracks[name].getUnreleasedFrames();
+ const auto it = mTracks.find(name);
+ if (it != mTracks.end()) {
+ return it->second->getUnreleasedFrames();
}
return 0;
}
void AudioMixer::setBufferProvider(int name, AudioBufferProvider* bufferProvider)
{
- name -= TRACK0;
- ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
+ LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+ const std::shared_ptr<Track> &track = mTracks[name];
- if (mState.tracks[name].mInputBufferProvider == bufferProvider) {
+ if (track->mInputBufferProvider == bufferProvider) {
return; // don't reset any buffer providers if identical.
}
- if (mState.tracks[name].mReformatBufferProvider != NULL) {
- mState.tracks[name].mReformatBufferProvider->reset();
- } else if (mState.tracks[name].downmixerBufferProvider != NULL) {
- mState.tracks[name].downmixerBufferProvider->reset();
- } else if (mState.tracks[name].mPostDownmixReformatBufferProvider != NULL) {
- mState.tracks[name].mPostDownmixReformatBufferProvider->reset();
- } else if (mState.tracks[name].mTimestretchBufferProvider != NULL) {
- mState.tracks[name].mTimestretchBufferProvider->reset();
+ if (track->mReformatBufferProvider.get() != nullptr) {
+ track->mReformatBufferProvider->reset();
+ } else if (track->mDownmixerBufferProvider != nullptr) {
+ track->mDownmixerBufferProvider->reset();
+ } else if (track->mPostDownmixReformatBufferProvider.get() != nullptr) {
+ track->mPostDownmixReformatBufferProvider->reset();
+ } else if (track->mTimestretchBufferProvider.get() != nullptr) {
+ track->mTimestretchBufferProvider->reset();
}
- mState.tracks[name].mInputBufferProvider = bufferProvider;
- mState.tracks[name].reconfigureBufferProviders();
+ track->mInputBufferProvider = bufferProvider;
+ track->reconfigureBufferProviders();
}
-
-void AudioMixer::process()
+void AudioMixer::process__validate()
{
- mState.hook(&mState);
-}
-
-
-void AudioMixer::process__validate(state_t* state)
-{
- ALOGW_IF(!state->needsChanged,
- "in process__validate() but nothing's invalid");
-
- uint32_t changed = state->needsChanged;
- state->needsChanged = 0; // clear the validation flag
-
- // recompute which tracks are enabled / disabled
- uint32_t enabled = 0;
- uint32_t disabled = 0;
- while (changed) {
- const int i = 31 - __builtin_clz(changed);
- const uint32_t mask = 1<<i;
- changed &= ~mask;
- track_t& t = state->tracks[i];
- (t.enabled ? enabled : disabled) |= mask;
- }
- state->enabledTracks &= ~disabled;
- state->enabledTracks |= enabled;
-
- // compute everything we need...
- int countActiveTracks = 0;
// TODO: fix all16BitsStereNoResample logic to
// either properly handle muted tracks (it should ignore them)
// or remove altogether as an obsolete optimization.
bool all16BitsStereoNoResample = true;
bool resampling = false;
bool volumeRamp = false;
- uint32_t en = state->enabledTracks;
- while (en) {
- const int i = 31 - __builtin_clz(en);
- en &= ~(1<<i);
- countActiveTracks++;
- track_t& t = state->tracks[i];
+ mEnabled.clear();
+ mGroups.clear();
+ for (const auto &pair : mTracks) {
+ const int name = pair.first;
+ const std::shared_ptr<Track> &t = pair.second;
+ if (!t->enabled) continue;
+
+ mEnabled.emplace_back(name); // we add to mEnabled in order of name.
+ mGroups[t->mainBuffer].emplace_back(name); // mGroups also in order of name.
+
uint32_t n = 0;
// FIXME can overflow (mask is only 3 bits)
- n |= NEEDS_CHANNEL_1 + t.channelCount - 1;
- if (t.doesResample()) {
+ n |= NEEDS_CHANNEL_1 + t->channelCount - 1;
+ if (t->doesResample()) {
n |= NEEDS_RESAMPLE;
}
- if (t.auxLevel != 0 && t.auxBuffer != NULL) {
+ if (t->auxLevel != 0 && t->auxBuffer != NULL) {
n |= NEEDS_AUX;
}
- if (t.volumeInc[0]|t.volumeInc[1]) {
+ if (t->volumeInc[0]|t->volumeInc[1]) {
volumeRamp = true;
- } else if (!t.doesResample() && t.volumeRL == 0) {
+ } else if (!t->doesResample() && t->volumeRL == 0) {
n |= NEEDS_MUTE;
}
- t.needs = n;
+ t->needs = n;
if (n & NEEDS_MUTE) {
- t.hook = track__nop;
+ t->hook = &Track::track__nop;
} else {
if (n & NEEDS_AUX) {
all16BitsStereoNoResample = false;
@@ -975,23 +872,23 @@
if (n & NEEDS_RESAMPLE) {
all16BitsStereoNoResample = false;
resampling = true;
- t.hook = getTrackHook(TRACKTYPE_RESAMPLE, t.mMixerChannelCount,
- t.mMixerInFormat, t.mMixerFormat);
+ t->hook = Track::getTrackHook(TRACKTYPE_RESAMPLE, t->mMixerChannelCount,
+ t->mMixerInFormat, t->mMixerFormat);
ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
"Track %d needs downmix + resample", i);
} else {
if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
- t.hook = getTrackHook(
- (t.mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO // TODO: MONO_HACK
- && t.channelMask == AUDIO_CHANNEL_OUT_MONO)
+ t->hook = Track::getTrackHook(
+ (t->mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO // TODO: MONO_HACK
+ && t->channelMask == AUDIO_CHANNEL_OUT_MONO)
? TRACKTYPE_NORESAMPLEMONO : TRACKTYPE_NORESAMPLE,
- t.mMixerChannelCount,
- t.mMixerInFormat, t.mMixerFormat);
+ t->mMixerChannelCount,
+ t->mMixerInFormat, t->mMixerFormat);
all16BitsStereoNoResample = false;
}
if ((n & NEEDS_CHANNEL_COUNT__MASK) >= NEEDS_CHANNEL_2){
- t.hook = getTrackHook(TRACKTYPE_NORESAMPLE, t.mMixerChannelCount,
- t.mMixerInFormat, t.mMixerFormat);
+ t->hook = Track::getTrackHook(TRACKTYPE_NORESAMPLE, t->mMixerChannelCount,
+ t->mMixerInFormat, t->mMixerFormat);
ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
"Track %d needs downmix", i);
}
@@ -1000,137 +897,125 @@
}
// select the processing hooks
- state->hook = process__nop;
- if (countActiveTracks > 0) {
+ mHook = &AudioMixer::process__nop;
+ if (mEnabled.size() > 0) {
if (resampling) {
- if (!state->outputTemp) {
- state->outputTemp = new int32_t[MAX_NUM_CHANNELS * state->frameCount];
+ if (mOutputTemp.get() == nullptr) {
+ mOutputTemp.reset(new int32_t[MAX_NUM_CHANNELS * mFrameCount]);
}
- if (!state->resampleTemp) {
- state->resampleTemp = new int32_t[MAX_NUM_CHANNELS * state->frameCount];
+ if (mResampleTemp.get() == nullptr) {
+ mResampleTemp.reset(new int32_t[MAX_NUM_CHANNELS * mFrameCount]);
}
- state->hook = process__genericResampling;
+ mHook = &AudioMixer::process__genericResampling;
} else {
- if (state->outputTemp) {
- delete [] state->outputTemp;
- state->outputTemp = NULL;
- }
- if (state->resampleTemp) {
- delete [] state->resampleTemp;
- state->resampleTemp = NULL;
- }
- state->hook = process__genericNoResampling;
+ // we keep temp arrays around.
+ mHook = &AudioMixer::process__genericNoResampling;
if (all16BitsStereoNoResample && !volumeRamp) {
- if (countActiveTracks == 1) {
- const int i = 31 - __builtin_clz(state->enabledTracks);
- track_t& t = state->tracks[i];
- if ((t.needs & NEEDS_MUTE) == 0) {
+ if (mEnabled.size() == 1) {
+ const std::shared_ptr<Track> &t = mTracks[mEnabled[0]];
+ if ((t->needs & NEEDS_MUTE) == 0) {
// The check prevents a muted track from acquiring a process hook.
//
// This is dangerous if the track is MONO as that requires
// special case handling due to implicit channel duplication.
// Stereo or Multichannel should actually be fine here.
- state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
- t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat);
+ mHook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
+ t->mMixerChannelCount, t->mMixerInFormat, t->mMixerFormat);
}
}
}
}
}
- ALOGV("mixer configuration change: %d activeTracks (%08x) "
+ ALOGV("mixer configuration change: %zu "
"all16BitsStereoNoResample=%d, resampling=%d, volumeRamp=%d",
- countActiveTracks, state->enabledTracks,
- all16BitsStereoNoResample, resampling, volumeRamp);
+ mEnabled.size(), all16BitsStereoNoResample, resampling, volumeRamp);
- state->hook(state);
+ process();
// Now that the volume ramp has been done, set optimal state and
// track hooks for subsequent mixer process
- if (countActiveTracks > 0) {
+ if (mEnabled.size() > 0) {
bool allMuted = true;
- uint32_t en = state->enabledTracks;
- while (en) {
- const int i = 31 - __builtin_clz(en);
- en &= ~(1<<i);
- track_t& t = state->tracks[i];
- if (!t.doesResample() && t.volumeRL == 0) {
- t.needs |= NEEDS_MUTE;
- t.hook = track__nop;
+
+ for (const int name : mEnabled) {
+ const std::shared_ptr<Track> &t = mTracks[name];
+ if (!t->doesResample() && t->volumeRL == 0) {
+ t->needs |= NEEDS_MUTE;
+ t->hook = &Track::track__nop;
} else {
allMuted = false;
}
}
if (allMuted) {
- state->hook = process__nop;
+ mHook = &AudioMixer::process__nop;
} else if (all16BitsStereoNoResample) {
- if (countActiveTracks == 1) {
- const int i = 31 - __builtin_clz(state->enabledTracks);
- track_t& t = state->tracks[i];
+ if (mEnabled.size() == 1) {
+ //const int i = 31 - __builtin_clz(enabledTracks);
+ const std::shared_ptr<Track> &t = mTracks[mEnabled[0]];
// Muted single tracks handled by allMuted above.
- state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
- t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat);
+ mHook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
+ t->mMixerChannelCount, t->mMixerInFormat, t->mMixerFormat);
}
}
}
}
-
-void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFrameCount,
- int32_t* temp, int32_t* aux)
+void AudioMixer::Track::track__genericResample(
+ int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux)
{
ALOGVV("track__genericResample\n");
- t->resampler->setSampleRate(t->sampleRate);
+ mResampler->setSampleRate(sampleRate);
// ramp gain - resample to temp buffer and scale/mix in 2nd step
if (aux != NULL) {
// always resample with unity gain when sending to auxiliary buffer to be able
// to apply send level after resampling
- t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
- memset(temp, 0, outFrameCount * t->mMixerChannelCount * sizeof(int32_t));
- t->resampler->resample(temp, outFrameCount, t->bufferProvider);
- if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) {
- volumeRampStereo(t, out, outFrameCount, temp, aux);
+ mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
+ memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(int32_t));
+ mResampler->resample(temp, outFrameCount, bufferProvider);
+ if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
+ volumeRampStereo(out, outFrameCount, temp, aux);
} else {
- volumeStereo(t, out, outFrameCount, temp, aux);
+ volumeStereo(out, outFrameCount, temp, aux);
}
} else {
- if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) {
- t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
+ if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
+ mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t));
- t->resampler->resample(temp, outFrameCount, t->bufferProvider);
- volumeRampStereo(t, out, outFrameCount, temp, aux);
+ mResampler->resample(temp, outFrameCount, bufferProvider);
+ volumeRampStereo(out, outFrameCount, temp, aux);
}
// constant gain
else {
- t->resampler->setVolume(t->mVolume[0], t->mVolume[1]);
- t->resampler->resample(out, outFrameCount, t->bufferProvider);
+ mResampler->setVolume(mVolume[0], mVolume[1]);
+ mResampler->resample(out, outFrameCount, bufferProvider);
}
}
}
-void AudioMixer::track__nop(track_t* t __unused, int32_t* out __unused,
+void AudioMixer::Track::track__nop(int32_t* out __unused,
size_t outFrameCount __unused, int32_t* temp __unused, int32_t* aux __unused)
{
}
-void AudioMixer::volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
- int32_t* aux)
+void AudioMixer::Track::volumeRampStereo(
+ int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
{
- int32_t vl = t->prevVolume[0];
- int32_t vr = t->prevVolume[1];
- const int32_t vlInc = t->volumeInc[0];
- const int32_t vrInc = t->volumeInc[1];
+ int32_t vl = prevVolume[0];
+ int32_t vr = prevVolume[1];
+ const int32_t vlInc = volumeInc[0];
+ const int32_t vrInc = volumeInc[1];
//ALOGD("[0] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
- // t, vlInc/65536.0f, vl/65536.0f, t->volume[0],
+ // t, vlInc/65536.0f, vl/65536.0f, volume[0],
// (vl + vlInc*frameCount)/65536.0f, frameCount);
// ramp volume
if (CC_UNLIKELY(aux != NULL)) {
- int32_t va = t->prevAuxLevel;
- const int32_t vaInc = t->auxInc;
+ int32_t va = prevAuxLevel;
+ const int32_t vaInc = auxInc;
int32_t l;
int32_t r;
@@ -1144,7 +1029,7 @@
vr += vrInc;
va += vaInc;
} while (--frameCount);
- t->prevAuxLevel = va;
+ prevAuxLevel = va;
} else {
do {
*out++ += (vl >> 16) * (*temp++ >> 12);
@@ -1153,19 +1038,19 @@
vr += vrInc;
} while (--frameCount);
}
- t->prevVolume[0] = vl;
- t->prevVolume[1] = vr;
- t->adjustVolumeRamp(aux != NULL);
+ prevVolume[0] = vl;
+ prevVolume[1] = vr;
+ adjustVolumeRamp(aux != NULL);
}
-void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
- int32_t* aux)
+void AudioMixer::Track::volumeStereo(
+ int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
{
- const int16_t vl = t->volume[0];
- const int16_t vr = t->volume[1];
+ const int16_t vl = volume[0];
+ const int16_t vr = volume[1];
if (CC_UNLIKELY(aux != NULL)) {
- const int16_t va = t->auxLevel;
+ const int16_t va = auxLevel;
do {
int16_t l = (int16_t)(*temp++ >> 12);
int16_t r = (int16_t)(*temp++ >> 12);
@@ -1187,25 +1072,25 @@
}
}
-void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount,
- int32_t* temp __unused, int32_t* aux)
+void AudioMixer::Track::track__16BitsStereo(
+ int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux)
{
ALOGVV("track__16BitsStereo\n");
- const int16_t *in = static_cast<const int16_t *>(t->in);
+ const int16_t *in = static_cast<const int16_t *>(mIn);
if (CC_UNLIKELY(aux != NULL)) {
int32_t l;
int32_t r;
// ramp gain
- if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) {
- int32_t vl = t->prevVolume[0];
- int32_t vr = t->prevVolume[1];
- int32_t va = t->prevAuxLevel;
- const int32_t vlInc = t->volumeInc[0];
- const int32_t vrInc = t->volumeInc[1];
- const int32_t vaInc = t->auxInc;
+ if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
+ int32_t vl = prevVolume[0];
+ int32_t vr = prevVolume[1];
+ int32_t va = prevAuxLevel;
+ const int32_t vlInc = volumeInc[0];
+ const int32_t vrInc = volumeInc[1];
+ const int32_t vaInc = auxInc;
// ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
- // t, vlInc/65536.0f, vl/65536.0f, t->volume[0],
+ // t, vlInc/65536.0f, vl/65536.0f, volume[0],
// (vl + vlInc*frameCount)/65536.0f, frameCount);
do {
@@ -1219,16 +1104,16 @@
va += vaInc;
} while (--frameCount);
- t->prevVolume[0] = vl;
- t->prevVolume[1] = vr;
- t->prevAuxLevel = va;
- t->adjustVolumeRamp(true);
+ prevVolume[0] = vl;
+ prevVolume[1] = vr;
+ prevAuxLevel = va;
+ adjustVolumeRamp(true);
}
// constant gain
else {
- const uint32_t vrl = t->volumeRL;
- const int16_t va = (int16_t)t->auxLevel;
+ const uint32_t vrl = volumeRL;
+ const int16_t va = (int16_t)auxLevel;
do {
uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
int16_t a = (int16_t)(((int32_t)in[0] + in[1]) >> 1);
@@ -1242,14 +1127,14 @@
}
} else {
// ramp gain
- if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) {
- int32_t vl = t->prevVolume[0];
- int32_t vr = t->prevVolume[1];
- const int32_t vlInc = t->volumeInc[0];
- const int32_t vrInc = t->volumeInc[1];
+ if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
+ int32_t vl = prevVolume[0];
+ int32_t vr = prevVolume[1];
+ const int32_t vlInc = volumeInc[0];
+ const int32_t vrInc = volumeInc[1];
// ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
- // t, vlInc/65536.0f, vl/65536.0f, t->volume[0],
+ // t, vlInc/65536.0f, vl/65536.0f, volume[0],
// (vl + vlInc*frameCount)/65536.0f, frameCount);
do {
@@ -1259,14 +1144,14 @@
vr += vrInc;
} while (--frameCount);
- t->prevVolume[0] = vl;
- t->prevVolume[1] = vr;
- t->adjustVolumeRamp(false);
+ prevVolume[0] = vl;
+ prevVolume[1] = vr;
+ adjustVolumeRamp(false);
}
// constant gain
else {
- const uint32_t vrl = t->volumeRL;
+ const uint32_t vrl = volumeRL;
do {
uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
in += 2;
@@ -1276,27 +1161,27 @@
} while (--frameCount);
}
}
- t->in = in;
+ mIn = in;
}
-void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount,
- int32_t* temp __unused, int32_t* aux)
+void AudioMixer::Track::track__16BitsMono(
+ int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux)
{
ALOGVV("track__16BitsMono\n");
- const int16_t *in = static_cast<int16_t const *>(t->in);
+ const int16_t *in = static_cast<int16_t const *>(mIn);
if (CC_UNLIKELY(aux != NULL)) {
// ramp gain
- if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) {
- int32_t vl = t->prevVolume[0];
- int32_t vr = t->prevVolume[1];
- int32_t va = t->prevAuxLevel;
- const int32_t vlInc = t->volumeInc[0];
- const int32_t vrInc = t->volumeInc[1];
- const int32_t vaInc = t->auxInc;
+ if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
+ int32_t vl = prevVolume[0];
+ int32_t vr = prevVolume[1];
+ int32_t va = prevAuxLevel;
+ const int32_t vlInc = volumeInc[0];
+ const int32_t vrInc = volumeInc[1];
+ const int32_t vaInc = auxInc;
// ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
- // t, vlInc/65536.0f, vl/65536.0f, t->volume[0],
+ // t, vlInc/65536.0f, vl/65536.0f, volume[0],
// (vl + vlInc*frameCount)/65536.0f, frameCount);
do {
@@ -1309,16 +1194,16 @@
va += vaInc;
} while (--frameCount);
- t->prevVolume[0] = vl;
- t->prevVolume[1] = vr;
- t->prevAuxLevel = va;
- t->adjustVolumeRamp(true);
+ prevVolume[0] = vl;
+ prevVolume[1] = vr;
+ prevAuxLevel = va;
+ adjustVolumeRamp(true);
}
// constant gain
else {
- const int16_t vl = t->volume[0];
- const int16_t vr = t->volume[1];
- const int16_t va = (int16_t)t->auxLevel;
+ const int16_t vl = volume[0];
+ const int16_t vr = volume[1];
+ const int16_t va = (int16_t)auxLevel;
do {
int16_t l = *in++;
out[0] = mulAdd(l, vl, out[0]);
@@ -1330,14 +1215,14 @@
}
} else {
// ramp gain
- if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) {
- int32_t vl = t->prevVolume[0];
- int32_t vr = t->prevVolume[1];
- const int32_t vlInc = t->volumeInc[0];
- const int32_t vrInc = t->volumeInc[1];
+ if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
+ int32_t vl = prevVolume[0];
+ int32_t vr = prevVolume[1];
+ const int32_t vlInc = volumeInc[0];
+ const int32_t vrInc = volumeInc[1];
// ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
- // t, vlInc/65536.0f, vl/65536.0f, t->volume[0],
+ // t, vlInc/65536.0f, vl/65536.0f, volume[0],
// (vl + vlInc*frameCount)/65536.0f, frameCount);
do {
@@ -1348,14 +1233,14 @@
vr += vrInc;
} while (--frameCount);
- t->prevVolume[0] = vl;
- t->prevVolume[1] = vr;
- t->adjustVolumeRamp(false);
+ prevVolume[0] = vl;
+ prevVolume[1] = vr;
+ adjustVolumeRamp(false);
}
// constant gain
else {
- const int16_t vl = t->volume[0];
- const int16_t vr = t->volume[1];
+ const int16_t vl = volume[0];
+ const int16_t vr = volume[1];
do {
int16_t l = *in++;
out[0] = mulAdd(l, vl, out[0]);
@@ -1364,273 +1249,213 @@
} while (--frameCount);
}
}
- t->in = in;
+ mIn = in;
}
// no-op case
-void AudioMixer::process__nop(state_t* state)
+void AudioMixer::process__nop()
{
ALOGVV("process__nop\n");
- uint32_t e0 = state->enabledTracks;
- while (e0) {
+
+ for (const auto &pair : mGroups) {
// process by group of tracks with same output buffer to
// avoid multiple memset() on same buffer
- uint32_t e1 = e0, e2 = e0;
- int i = 31 - __builtin_clz(e1);
- {
- track_t& t1 = state->tracks[i];
- e2 &= ~(1<<i);
- while (e2) {
- i = 31 - __builtin_clz(e2);
- e2 &= ~(1<<i);
- track_t& t2 = state->tracks[i];
- if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) {
- e1 &= ~(1<<i);
- }
- }
- e0 &= ~(e1);
+ const auto &group = pair.second;
- memset(t1.mainBuffer, 0, state->frameCount * t1.mMixerChannelCount
- * audio_bytes_per_sample(t1.mMixerFormat));
- }
+ const std::shared_ptr<Track> &t = mTracks[group[0]];
+ memset(t->mainBuffer, 0,
+ mFrameCount * t->mMixerChannelCount
+ * audio_bytes_per_sample(t->mMixerFormat));
- while (e1) {
- i = 31 - __builtin_clz(e1);
- e1 &= ~(1<<i);
- {
- track_t& t3 = state->tracks[i];
- size_t outFrames = state->frameCount;
- while (outFrames) {
- t3.buffer.frameCount = outFrames;
- t3.bufferProvider->getNextBuffer(&t3.buffer);
- if (t3.buffer.raw == NULL) break;
- outFrames -= t3.buffer.frameCount;
- t3.bufferProvider->releaseBuffer(&t3.buffer);
- }
+ // now consume data
+ for (const int name : group) {
+ const std::shared_ptr<Track> &t = mTracks[name];
+ size_t outFrames = mFrameCount;
+ while (outFrames) {
+ t->buffer.frameCount = outFrames;
+ t->bufferProvider->getNextBuffer(&t->buffer);
+ if (t->buffer.raw == NULL) break;
+ outFrames -= t->buffer.frameCount;
+ t->bufferProvider->releaseBuffer(&t->buffer);
}
}
}
}
// generic code without resampling
-void AudioMixer::process__genericNoResampling(state_t* state)
+void AudioMixer::process__genericNoResampling()
{
ALOGVV("process__genericNoResampling\n");
int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32)));
- // acquire each track's buffer
- uint32_t enabledTracks = state->enabledTracks;
- uint32_t e0 = enabledTracks;
- while (e0) {
- const int i = 31 - __builtin_clz(e0);
- e0 &= ~(1<<i);
- track_t& t = state->tracks[i];
- t.buffer.frameCount = state->frameCount;
- t.bufferProvider->getNextBuffer(&t.buffer);
- t.frameCount = t.buffer.frameCount;
- t.in = t.buffer.raw;
- }
+ for (const auto &pair : mGroups) {
+ // process by group of tracks with same output main buffer to
+ // avoid multiple memset() on same buffer
+ const auto &group = pair.second;
- e0 = enabledTracks;
- while (e0) {
- // process by group of tracks with same output buffer to
- // optimize cache use
- uint32_t e1 = e0, e2 = e0;
- int j = 31 - __builtin_clz(e1);
- track_t& t1 = state->tracks[j];
- e2 &= ~(1<<j);
- while (e2) {
- j = 31 - __builtin_clz(e2);
- e2 &= ~(1<<j);
- track_t& t2 = state->tracks[j];
- if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) {
- e1 &= ~(1<<j);
- }
+ // acquire buffer
+ for (const int name : group) {
+ const std::shared_ptr<Track> &t = mTracks[name];
+ t->buffer.frameCount = mFrameCount;
+ t->bufferProvider->getNextBuffer(&t->buffer);
+ t->frameCount = t->buffer.frameCount;
+ t->mIn = t->buffer.raw;
}
- e0 &= ~(e1);
- // this assumes output 16 bits stereo, no resampling
- int32_t *out = t1.mainBuffer;
+
+ int32_t *out = (int *)pair.first;
size_t numFrames = 0;
do {
+ const size_t frameCount = std::min((size_t)BLOCKSIZE, mFrameCount - numFrames);
memset(outTemp, 0, sizeof(outTemp));
- e2 = e1;
- while (e2) {
- const int i = 31 - __builtin_clz(e2);
- e2 &= ~(1<<i);
- track_t& t = state->tracks[i];
- size_t outFrames = BLOCKSIZE;
+ for (const int name : group) {
+ const std::shared_ptr<Track> &t = mTracks[name];
int32_t *aux = NULL;
- if (CC_UNLIKELY(t.needs & NEEDS_AUX)) {
- aux = t.auxBuffer + numFrames;
+ if (CC_UNLIKELY(t->needs & NEEDS_AUX)) {
+ aux = t->auxBuffer + numFrames;
}
- while (outFrames) {
- // t.in == NULL can happen if the track was flushed just after having
+ for (int outFrames = frameCount; outFrames > 0; ) {
+ // t->in == nullptr can happen if the track was flushed just after having
// been enabled for mixing.
- if (t.in == NULL) {
- enabledTracks &= ~(1<<i);
- e1 &= ~(1<<i);
+ if (t->mIn == nullptr) {
break;
}
- size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount;
+ size_t inFrames = (t->frameCount > outFrames)?outFrames:t->frameCount;
if (inFrames > 0) {
- t.hook(&t, outTemp + (BLOCKSIZE - outFrames) * t.mMixerChannelCount,
- inFrames, state->resampleTemp, aux);
- t.frameCount -= inFrames;
+ (t.get()->*t->hook)(
+ outTemp + (frameCount - outFrames) * t->mMixerChannelCount,
+ inFrames, mResampleTemp.get() /* naked ptr */, aux);
+ t->frameCount -= inFrames;
outFrames -= inFrames;
if (CC_UNLIKELY(aux != NULL)) {
aux += inFrames;
}
}
- if (t.frameCount == 0 && outFrames) {
- t.bufferProvider->releaseBuffer(&t.buffer);
- t.buffer.frameCount = (state->frameCount - numFrames) -
- (BLOCKSIZE - outFrames);
- t.bufferProvider->getNextBuffer(&t.buffer);
- t.in = t.buffer.raw;
- if (t.in == NULL) {
- enabledTracks &= ~(1<<i);
- e1 &= ~(1<<i);
+ if (t->frameCount == 0 && outFrames) {
+ t->bufferProvider->releaseBuffer(&t->buffer);
+ t->buffer.frameCount = (mFrameCount - numFrames) -
+ (frameCount - outFrames);
+ t->bufferProvider->getNextBuffer(&t->buffer);
+ t->mIn = t->buffer.raw;
+ if (t->mIn == nullptr) {
break;
}
- t.frameCount = t.buffer.frameCount;
+ t->frameCount = t->buffer.frameCount;
}
}
}
- convertMixerFormat(out, t1.mMixerFormat, outTemp, t1.mMixerInFormat,
- BLOCKSIZE * t1.mMixerChannelCount);
+ const std::shared_ptr<Track> &t1 = mTracks[group[0]];
+ convertMixerFormat(out, t1->mMixerFormat, outTemp, t1->mMixerInFormat,
+ frameCount * t1->mMixerChannelCount);
// TODO: fix ugly casting due to choice of out pointer type
out = reinterpret_cast<int32_t*>((uint8_t*)out
- + BLOCKSIZE * t1.mMixerChannelCount
- * audio_bytes_per_sample(t1.mMixerFormat));
- numFrames += BLOCKSIZE;
- } while (numFrames < state->frameCount);
- }
+ + frameCount * t1->mMixerChannelCount
+ * audio_bytes_per_sample(t1->mMixerFormat));
+ numFrames += frameCount;
+ } while (numFrames < mFrameCount);
- // release each track's buffer
- e0 = enabledTracks;
- while (e0) {
- const int i = 31 - __builtin_clz(e0);
- e0 &= ~(1<<i);
- track_t& t = state->tracks[i];
- t.bufferProvider->releaseBuffer(&t.buffer);
+ // release each track's buffer
+ for (const int name : group) {
+ const std::shared_ptr<Track> &t = mTracks[name];
+ t->bufferProvider->releaseBuffer(&t->buffer);
+ }
}
}
-
// generic code with resampling
-void AudioMixer::process__genericResampling(state_t* state)
+void AudioMixer::process__genericResampling()
{
ALOGVV("process__genericResampling\n");
- // this const just means that local variable outTemp doesn't change
- int32_t* const outTemp = state->outputTemp;
- size_t numFrames = state->frameCount;
+ int32_t * const outTemp = mOutputTemp.get(); // naked ptr
+ size_t numFrames = mFrameCount;
- uint32_t e0 = state->enabledTracks;
- while (e0) {
- // process by group of tracks with same output buffer
- // to optimize cache use
- uint32_t e1 = e0, e2 = e0;
- int j = 31 - __builtin_clz(e1);
- track_t& t1 = state->tracks[j];
- e2 &= ~(1<<j);
- while (e2) {
- j = 31 - __builtin_clz(e2);
- e2 &= ~(1<<j);
- track_t& t2 = state->tracks[j];
- if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) {
- e1 &= ~(1<<j);
- }
- }
- e0 &= ~(e1);
- int32_t *out = t1.mainBuffer;
- memset(outTemp, 0, sizeof(*outTemp) * t1.mMixerChannelCount * state->frameCount);
- while (e1) {
- const int i = 31 - __builtin_clz(e1);
- e1 &= ~(1<<i);
- track_t& t = state->tracks[i];
+ for (const auto &pair : mGroups) {
+ const auto &group = pair.second;
+ const std::shared_ptr<Track> &t1 = mTracks[group[0]];
+
+ // clear temp buffer
+ memset(outTemp, 0, sizeof(*outTemp) * t1->mMixerChannelCount * mFrameCount);
+ for (const int name : group) {
+ const std::shared_ptr<Track> &t = mTracks[name];
int32_t *aux = NULL;
- if (CC_UNLIKELY(t.needs & NEEDS_AUX)) {
- aux = t.auxBuffer;
+ if (CC_UNLIKELY(t->needs & NEEDS_AUX)) {
+ aux = t->auxBuffer;
}
// this is a little goofy, on the resampling case we don't
// acquire/release the buffers because it's done by
// the resampler.
- if (t.needs & NEEDS_RESAMPLE) {
- t.hook(&t, outTemp, numFrames, state->resampleTemp, aux);
+ if (t->needs & NEEDS_RESAMPLE) {
+ (t.get()->*t->hook)(outTemp, numFrames, mResampleTemp.get() /* naked ptr */, aux);
} else {
size_t outFrames = 0;
while (outFrames < numFrames) {
- t.buffer.frameCount = numFrames - outFrames;
- t.bufferProvider->getNextBuffer(&t.buffer);
- t.in = t.buffer.raw;
- // t.in == NULL can happen if the track was flushed just after having
+ t->buffer.frameCount = numFrames - outFrames;
+ t->bufferProvider->getNextBuffer(&t->buffer);
+ t->mIn = t->buffer.raw;
+ // t->mIn == nullptr can happen if the track was flushed just after having
// been enabled for mixing.
- if (t.in == NULL) break;
+ if (t->mIn == nullptr) break;
- if (CC_UNLIKELY(aux != NULL)) {
- aux += outFrames;
- }
- t.hook(&t, outTemp + outFrames * t.mMixerChannelCount, t.buffer.frameCount,
- state->resampleTemp, aux);
- outFrames += t.buffer.frameCount;
- t.bufferProvider->releaseBuffer(&t.buffer);
+ (t.get()->*t->hook)(
+ outTemp + outFrames * t->mMixerChannelCount, t->buffer.frameCount,
+ mResampleTemp.get() /* naked ptr */,
+ aux != nullptr ? aux + outFrames : nullptr);
+ outFrames += t->buffer.frameCount;
+
+ t->bufferProvider->releaseBuffer(&t->buffer);
}
}
}
- convertMixerFormat(out, t1.mMixerFormat,
- outTemp, t1.mMixerInFormat, numFrames * t1.mMixerChannelCount);
+ convertMixerFormat(t1->mainBuffer, t1->mMixerFormat,
+ outTemp, t1->mMixerInFormat, numFrames * t1->mMixerChannelCount);
}
}
// one track, 16 bits stereo without resampling is the most common case
-void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state)
+void AudioMixer::process__oneTrack16BitsStereoNoResampling()
{
- ALOGVV("process__OneTrack16BitsStereoNoResampling\n");
- // This method is only called when state->enabledTracks has exactly
- // one bit set. The asserts below would verify this, but are commented out
- // since the whole point of this method is to optimize performance.
- //ALOG_ASSERT(0 != state->enabledTracks, "no tracks enabled");
- const int i = 31 - __builtin_clz(state->enabledTracks);
- //ALOG_ASSERT((1 << i) == state->enabledTracks, "more than 1 track enabled");
- const track_t& t = state->tracks[i];
+ ALOGVV("process__oneTrack16BitsStereoNoResampling\n");
+ LOG_ALWAYS_FATAL_IF(mEnabled.size() != 0,
+ "%zu != 1 tracks enabled", mEnabled.size());
+ const int name = mEnabled[0];
+ const std::shared_ptr<Track> &t = mTracks[name];
- AudioBufferProvider::Buffer& b(t.buffer);
+ AudioBufferProvider::Buffer& b(t->buffer);
- int32_t* out = t.mainBuffer;
+ int32_t* out = t->mainBuffer;
float *fout = reinterpret_cast<float*>(out);
- size_t numFrames = state->frameCount;
+ size_t numFrames = mFrameCount;
- const int16_t vl = t.volume[0];
- const int16_t vr = t.volume[1];
- const uint32_t vrl = t.volumeRL;
+ const int16_t vl = t->volume[0];
+ const int16_t vr = t->volume[1];
+ const uint32_t vrl = t->volumeRL;
while (numFrames) {
b.frameCount = numFrames;
- t.bufferProvider->getNextBuffer(&b);
+ t->bufferProvider->getNextBuffer(&b);
const int16_t *in = b.i16;
// in == NULL can happen if the track was flushed just after having
// been enabled for mixing.
if (in == NULL || (((uintptr_t)in) & 3)) {
- if ( AUDIO_FORMAT_PCM_FLOAT == t.mMixerFormat ) {
+ if ( AUDIO_FORMAT_PCM_FLOAT == t->mMixerFormat ) {
memset((char*)fout, 0, numFrames
- * t.mMixerChannelCount * audio_bytes_per_sample(t.mMixerFormat));
+ * t->mMixerChannelCount * audio_bytes_per_sample(t->mMixerFormat));
} else {
memset((char*)out, 0, numFrames
- * t.mMixerChannelCount * audio_bytes_per_sample(t.mMixerFormat));
+ * t->mMixerChannelCount * audio_bytes_per_sample(t->mMixerFormat));
}
ALOGE_IF((((uintptr_t)in) & 3),
- "process__OneTrack16BitsStereoNoResampling: misaligned buffer"
+ "process__oneTrack16BitsStereoNoResampling: misaligned buffer"
" %p track %d, channels %d, needs %08x, volume %08x vfl %f vfr %f",
- in, i, t.channelCount, t.needs, vrl, t.mVolume[0], t.mVolume[1]);
+ in, name, t->channelCount, t->needs, vrl, t->mVolume[0], t->mVolume[1]);
return;
}
size_t outFrames = b.frameCount;
- switch (t.mMixerFormat) {
+ switch (t->mMixerFormat) {
case AUDIO_FORMAT_PCM_FLOAT:
do {
uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
@@ -1668,10 +1493,10 @@
}
break;
default:
- LOG_ALWAYS_FATAL("bad mixer format: %d", t.mMixerFormat);
+ LOG_ALWAYS_FATAL("bad mixer format: %d", t->mMixerFormat);
}
numFrames -= b.frameCount;
- t.bufferProvider->releaseBuffer(&b);
+ t->bufferProvider->releaseBuffer(&b);
}
}
@@ -1694,7 +1519,7 @@
/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
* TO: int32_t (Q4.27) or float
* TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
+ * TA: int32_t (Q4.27) or float
*/
template <int MIXTYPE,
typename TO, typename TI, typename TV, typename TA, typename TAV>
@@ -1738,7 +1563,7 @@
/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
* TO: int32_t (Q4.27) or float
* TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
+ * TA: int32_t (Q4.27) or float
*/
template <int MIXTYPE,
typename TO, typename TI, typename TV, typename TA, typename TAV>
@@ -1778,34 +1603,46 @@
* ADJUSTVOL (set to true if volume ramp parameters needs adjustment afterwards)
* TO: int32_t (Q4.27) or float
* TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
+ * TA: int32_t (Q4.27) or float
*/
template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
typename TO, typename TI, typename TA>
-void AudioMixer::volumeMix(TO *out, size_t outFrames,
- const TI *in, TA *aux, bool ramp, AudioMixer::track_t *t)
+void AudioMixer::Track::volumeMix(TO *out, size_t outFrames,
+ const TI *in, TA *aux, bool ramp)
{
if (USEFLOATVOL) {
if (ramp) {
- volumeRampMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
- t->mPrevVolume, t->mVolumeInc, &t->prevAuxLevel, t->auxInc);
+ volumeRampMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
+ mPrevVolume, mVolumeInc,
+#ifdef FLOAT_AUX
+ &mPrevAuxLevel, mAuxInc
+#else
+ &prevAuxLevel, auxInc
+#endif
+ );
if (ADJUSTVOL) {
- t->adjustVolumeRamp(aux != NULL, true);
+ adjustVolumeRamp(aux != NULL, true);
}
} else {
- volumeMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
- t->mVolume, t->auxLevel);
+ volumeMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
+ mVolume,
+#ifdef FLOAT_AUX
+ mAuxLevel
+#else
+ auxLevel
+#endif
+ );
}
} else {
if (ramp) {
- volumeRampMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
- t->prevVolume, t->volumeInc, &t->prevAuxLevel, t->auxInc);
+ volumeRampMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
+ prevVolume, volumeInc, &prevAuxLevel, auxInc);
if (ADJUSTVOL) {
- t->adjustVolumeRamp(aux != NULL);
+ adjustVolumeRamp(aux != NULL);
}
} else {
- volumeMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
- t->volume, t->auxLevel);
+ volumeMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
+ volume, auxLevel);
}
}
}
@@ -1820,19 +1657,18 @@
* TA: int32_t (Q4.27)
*/
template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::process_NoResampleOneTrack(state_t* state)
+void AudioMixer::process__noResampleOneTrack()
{
- ALOGVV("process_NoResampleOneTrack\n");
- // CLZ is faster than CTZ on ARM, though really not sure if true after 31 - clz.
- const int i = 31 - __builtin_clz(state->enabledTracks);
- ALOG_ASSERT((1 << i) == state->enabledTracks, "more than 1 track enabled");
- track_t *t = &state->tracks[i];
+ ALOGVV("process__noResampleOneTrack\n");
+ LOG_ALWAYS_FATAL_IF(mEnabled.size() != 1,
+ "%zu != 1 tracks enabled", mEnabled.size());
+ const std::shared_ptr<Track> &t = mTracks[mEnabled[0]];
const uint32_t channels = t->mMixerChannelCount;
TO* out = reinterpret_cast<TO*>(t->mainBuffer);
TA* aux = reinterpret_cast<TA*>(t->auxBuffer);
const bool ramp = t->needsRamp();
- for (size_t numFrames = state->frameCount; numFrames; ) {
+ for (size_t numFrames = mFrameCount; numFrames > 0; ) {
AudioBufferProvider::Buffer& b(t->buffer);
// get input buffer
b.frameCount = numFrames;
@@ -1844,19 +1680,19 @@
if (in == NULL || (((uintptr_t)in) & 3)) {
memset(out, 0, numFrames
* channels * audio_bytes_per_sample(t->mMixerFormat));
- ALOGE_IF((((uintptr_t)in) & 3), "process_NoResampleOneTrack: bus error: "
+ ALOGE_IF((((uintptr_t)in) & 3), "process__noResampleOneTrack: bus error: "
"buffer %p track %p, channels %d, needs %#x",
- in, t, t->channelCount, t->needs);
+ in, &t, t->channelCount, t->needs);
return;
}
const size_t outFrames = b.frameCount;
- volumeMix<MIXTYPE, is_same<TI, float>::value, false> (
- out, outFrames, in, aux, ramp, t);
+ t->volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, false /* ADJUSTVOL */> (
+ out, outFrames, in, aux, ramp);
out += outFrames * channels;
if (aux != NULL) {
- aux += channels;
+ aux += outFrames;
}
numFrames -= b.frameCount;
@@ -1874,59 +1710,59 @@
* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
* TO: int32_t (Q4.27) or float
* TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
+ * TA: int32_t (Q4.27) or float
*/
template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::track__Resample(track_t* t, TO* out, size_t outFrameCount, TO* temp, TA* aux)
+void AudioMixer::Track::track__Resample(TO* out, size_t outFrameCount, TO* temp, TA* aux)
{
ALOGVV("track__Resample\n");
- t->resampler->setSampleRate(t->sampleRate);
- const bool ramp = t->needsRamp();
+ mResampler->setSampleRate(sampleRate);
+ const bool ramp = needsRamp();
if (ramp || aux != NULL) {
// if ramp: resample with unity gain to temp buffer and scale/mix in 2nd step.
// if aux != NULL: resample with unity gain to temp buffer then apply send level.
- t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
- memset(temp, 0, outFrameCount * t->mMixerChannelCount * sizeof(TO));
- t->resampler->resample((int32_t*)temp, outFrameCount, t->bufferProvider);
+ mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
+ memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(TO));
+ mResampler->resample((int32_t*)temp, outFrameCount, bufferProvider);
- volumeMix<MIXTYPE, is_same<TI, float>::value, true>(
- out, outFrameCount, temp, aux, ramp, t);
+ volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, true /* ADJUSTVOL */>(
+ out, outFrameCount, temp, aux, ramp);
} else { // constant volume gain
- t->resampler->setVolume(t->mVolume[0], t->mVolume[1]);
- t->resampler->resample((int32_t*)out, outFrameCount, t->bufferProvider);
+ mResampler->setVolume(mVolume[0], mVolume[1]);
+ mResampler->resample((int32_t*)out, outFrameCount, bufferProvider);
}
}
/* This track hook is called to mix a track, when no resampling is required.
- * The input buffer should be present in t->in.
+ * The input buffer should be present in in.
*
* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
* TO: int32_t (Q4.27) or float
* TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
+ * TA: int32_t (Q4.27) or float
*/
template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::track__NoResample(track_t* t, TO* out, size_t frameCount,
- TO* temp __unused, TA* aux)
+void AudioMixer::Track::track__NoResample(TO* out, size_t frameCount, TO* temp __unused, TA* aux)
{
ALOGVV("track__NoResample\n");
- const TI *in = static_cast<const TI *>(t->in);
+ const TI *in = static_cast<const TI *>(mIn);
- volumeMix<MIXTYPE, is_same<TI, float>::value, true>(
- out, frameCount, in, aux, t->needsRamp(), t);
+ volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, true /* ADJUSTVOL */>(
+ out, frameCount, in, aux, needsRamp());
// MIXTYPE_MONOEXPAND reads a single input channel and expands to NCHAN output channels.
// MIXTYPE_MULTI reads NCHAN input channels and places to NCHAN output channels.
- in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * t->mMixerChannelCount;
- t->in = in;
+ in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * mMixerChannelCount;
+ mIn = in;
}
/* The Mixer engine generates either int32_t (Q4_27) or float data.
* We use this function to convert the engine buffers
* to the desired mixer output format, either int16_t (Q.15) or float.
*/
+/* static */
void AudioMixer::convertMixerFormat(void *out, audio_format_t mixerOutFormat,
void *in, audio_format_t mixerInFormat, size_t sampleCount)
{
@@ -1947,11 +1783,10 @@
case AUDIO_FORMAT_PCM_16_BIT:
switch (mixerOutFormat) {
case AUDIO_FORMAT_PCM_FLOAT:
- memcpy_to_float_from_q4_27((float*)out, (int32_t*)in, sampleCount);
+ memcpy_to_float_from_q4_27((float*)out, (const int32_t*)in, sampleCount);
break;
case AUDIO_FORMAT_PCM_16_BIT:
- // two int16_t are produced per iteration
- ditherAndClamp((int32_t*)out, (int32_t*)in, sampleCount >> 1);
+ memcpy_to_i16_from_q4_27((int16_t*)out, (const int32_t*)in, sampleCount);
break;
default:
LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
@@ -1966,19 +1801,20 @@
/* Returns the proper track hook to use for mixing the track into the output buffer.
*/
-AudioMixer::hook_t AudioMixer::getTrackHook(int trackType, uint32_t channelCount,
+/* static */
+AudioMixer::hook_t AudioMixer::Track::getTrackHook(int trackType, uint32_t channelCount,
audio_format_t mixerInFormat, audio_format_t mixerOutFormat __unused)
{
if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
switch (trackType) {
case TRACKTYPE_NOP:
- return track__nop;
+ return &Track::track__nop;
case TRACKTYPE_RESAMPLE:
- return track__genericResample;
+ return &Track::track__genericResample;
case TRACKTYPE_NORESAMPLEMONO:
- return track__16BitsMono;
+ return &Track::track__16BitsMono;
case TRACKTYPE_NORESAMPLE:
- return track__16BitsStereo;
+ return &Track::track__16BitsStereo;
default:
LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
break;
@@ -1987,15 +1823,15 @@
LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
switch (trackType) {
case TRACKTYPE_NOP:
- return track__nop;
+ return &Track::track__nop;
case TRACKTYPE_RESAMPLE:
switch (mixerInFormat) {
case AUDIO_FORMAT_PCM_FLOAT:
- return (AudioMixer::hook_t)
- track__Resample<MIXTYPE_MULTI, float /*TO*/, float /*TI*/, int32_t /*TA*/>;
+ return (AudioMixer::hook_t) &Track::track__Resample<
+ MIXTYPE_MULTI, float /*TO*/, float /*TI*/, TYPE_AUX>;
case AUDIO_FORMAT_PCM_16_BIT:
- return (AudioMixer::hook_t)\
- track__Resample<MIXTYPE_MULTI, int32_t, int16_t, int32_t>;
+ return (AudioMixer::hook_t) &Track::track__Resample<
+ MIXTYPE_MULTI, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
default:
LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
break;
@@ -2004,11 +1840,11 @@
case TRACKTYPE_NORESAMPLEMONO:
switch (mixerInFormat) {
case AUDIO_FORMAT_PCM_FLOAT:
- return (AudioMixer::hook_t)
- track__NoResample<MIXTYPE_MONOEXPAND, float, float, int32_t>;
+ return (AudioMixer::hook_t) &Track::track__NoResample<
+ MIXTYPE_MONOEXPAND, float /*TO*/, float /*TI*/, TYPE_AUX>;
case AUDIO_FORMAT_PCM_16_BIT:
- return (AudioMixer::hook_t)
- track__NoResample<MIXTYPE_MONOEXPAND, int32_t, int16_t, int32_t>;
+ return (AudioMixer::hook_t) &Track::track__NoResample<
+ MIXTYPE_MONOEXPAND, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
default:
LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
break;
@@ -2017,11 +1853,11 @@
case TRACKTYPE_NORESAMPLE:
switch (mixerInFormat) {
case AUDIO_FORMAT_PCM_FLOAT:
- return (AudioMixer::hook_t)
- track__NoResample<MIXTYPE_MULTI, float, float, int32_t>;
+ return (AudioMixer::hook_t) &Track::track__NoResample<
+ MIXTYPE_MULTI, float /*TO*/, float /*TI*/, TYPE_AUX>;
case AUDIO_FORMAT_PCM_16_BIT:
- return (AudioMixer::hook_t)
- track__NoResample<MIXTYPE_MULTI, int32_t, int16_t, int32_t>;
+ return (AudioMixer::hook_t) &Track::track__NoResample<
+ MIXTYPE_MULTI, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
default:
LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
break;
@@ -2041,7 +1877,9 @@
* a stereo output track, the input track cannot be MONO. This should be
* prevented by the caller.
*/
-AudioMixer::process_hook_t AudioMixer::getProcessHook(int processType, uint32_t channelCount,
+/* static */
+AudioMixer::process_hook_t AudioMixer::getProcessHook(
+ int processType, uint32_t channelCount,
audio_format_t mixerInFormat, audio_format_t mixerOutFormat)
{
if (processType != PROCESSTYPE_NORESAMPLEONETRACK) { // Only NORESAMPLEONETRACK
@@ -2049,18 +1887,18 @@
return NULL;
}
if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
- return process__OneTrack16BitsStereoNoResampling;
+ return &AudioMixer::process__oneTrack16BitsStereoNoResampling;
}
LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
switch (mixerInFormat) {
case AUDIO_FORMAT_PCM_FLOAT:
switch (mixerOutFormat) {
case AUDIO_FORMAT_PCM_FLOAT:
- return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
- float /*TO*/, float /*TI*/, int32_t /*TA*/>;
+ return &AudioMixer::process__noResampleOneTrack<
+ MIXTYPE_MULTI_SAVEONLY, float /*TO*/, float /*TI*/, TYPE_AUX>;
case AUDIO_FORMAT_PCM_16_BIT:
- return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
- int16_t, float, int32_t>;
+ return &AudioMixer::process__noResampleOneTrack<
+ MIXTYPE_MULTI_SAVEONLY, int16_t /*TO*/, float /*TI*/, TYPE_AUX>;
default:
LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
break;
@@ -2069,11 +1907,11 @@
case AUDIO_FORMAT_PCM_16_BIT:
switch (mixerOutFormat) {
case AUDIO_FORMAT_PCM_FLOAT:
- return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
- float, int16_t, int32_t>;
+ return &AudioMixer::process__noResampleOneTrack<
+ MIXTYPE_MULTI_SAVEONLY, float /*TO*/, int16_t /*TI*/, TYPE_AUX>;
case AUDIO_FORMAT_PCM_16_BIT:
- return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
- int16_t, int16_t, int32_t>;
+ return &AudioMixer::process__noResampleOneTrack<
+ MIXTYPE_MULTI_SAVEONLY, int16_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
default:
LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
break;
diff --git a/media/libaudioprocessing/AudioMixerOps.h b/media/libaudioprocessing/AudioMixerOps.h
index 8d74024..f33e361 100644
--- a/media/libaudioprocessing/AudioMixerOps.h
+++ b/media/libaudioprocessing/AudioMixerOps.h
@@ -188,13 +188,13 @@
template<>
inline void MixAccum<float, int16_t>(float *auxaccum, int16_t value) {
- static const float norm = 1. / (1 << 15);
+ static constexpr float norm = 1. / (1 << 15);
*auxaccum += norm * value;
}
template<>
inline void MixAccum<float, int32_t>(float *auxaccum, int32_t value) {
- static const float norm = 1. / (1 << 27);
+ static constexpr float norm = 1. / (1 << 27);
*auxaccum += norm * value;
}
@@ -238,6 +238,7 @@
* NCHAN represents number of input and output channels.
* TO: int32_t (Q4.27) or float
* TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27) or float
* TV: int32_t (U4.28) or int16_t (U4.12) or float
* vol: represents a volume array.
*
@@ -247,7 +248,8 @@
* Single input channel. NCHAN represents number of output channels.
* TO: int32_t (Q4.27) or float
* TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TV: int32_t (U4.28) or int16_t (U4.12) or float
+ * TA: int32_t (Q4.27) or float
+ * TV/TAV: int32_t (U4.28) or int16_t (U4.12) or float
* Input channel count is 1.
* vol: represents volume array.
*
@@ -257,7 +259,8 @@
* NCHAN represents number of input and output channels.
* TO: int16_t (Q.15) or float
* TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TV: int32_t (U4.28) or int16_t (U4.12) or float
+ * TA: int32_t (Q4.27) or float
+ * TV/TAV: int32_t (U4.28) or int16_t (U4.12) or float
* vol: represents a volume array.
*
* MIXTYPE_MULTI_SAVEONLY does not accumulate into the out pointer.
diff --git a/media/libaudioprocessing/AudioResamplerDyn.cpp b/media/libaudioprocessing/AudioResamplerDyn.cpp
index 8f7b982..eeeecce 100644
--- a/media/libaudioprocessing/AudioResamplerDyn.cpp
+++ b/media/libaudioprocessing/AudioResamplerDyn.cpp
@@ -38,6 +38,9 @@
//#define DEBUG_RESAMPLER
+// use this for our buffer alignment. Should be at least 32 bytes.
+constexpr size_t CACHE_LINE_SIZE = 64;
+
namespace android {
/*
@@ -94,7 +97,10 @@
// create new buffer
TI* state = NULL;
- (void)posix_memalign(reinterpret_cast<void**>(&state), 32, stateCount*sizeof(*state));
+ (void)posix_memalign(
+ reinterpret_cast<void **>(&state),
+ CACHE_LINE_SIZE /* alignment */,
+ stateCount * sizeof(*state));
memset(state, 0, stateCount*sizeof(*state));
// attempt to preserve state
@@ -185,6 +191,16 @@
// setSampleRate() for 1:1. (May be removed if precalculated filters are used.)
mInSampleRate = 0;
mConstants.set(128, 8, mSampleRate, mSampleRate); // TODO: set better
+
+ // fetch property based resampling parameters
+ mPropertyEnableAtSampleRate = property_get_int32(
+ "ro.audio.resampler.psd.enable_at_samplerate", mPropertyEnableAtSampleRate);
+ mPropertyHalfFilterLength = property_get_int32(
+ "ro.audio.resampler.psd.halflength", mPropertyHalfFilterLength);
+ mPropertyStopbandAttenuation = property_get_int32(
+ "ro.audio.resampler.psd.stopband", mPropertyStopbandAttenuation);
+ mPropertyCutoffPercent = property_get_int32(
+ "ro.audio.resampler.psd.cutoff_percent", mPropertyCutoffPercent);
}
template<typename TC, typename TI, typename TO>
@@ -215,6 +231,8 @@
}
}
+// TODO: update to C++11
+
template<typename T> T max(T a, T b) {return a > b ? a : b;}
template<typename T> T absdiff(T a, T b) {return a > b ? a - b : b - a;}
@@ -223,37 +241,74 @@
void AudioResamplerDyn<TC, TI, TO>::createKaiserFir(Constants &c,
double stopBandAtten, int inSampleRate, int outSampleRate, double tbwCheat)
{
- TC* buf = NULL;
- static const double atten = 0.9998; // to avoid ripple overflow
- double fcr;
- double tbw = firKaiserTbw(c.mHalfNumCoefs, stopBandAtten);
+ // compute the normalized transition bandwidth
+ const double tbw = firKaiserTbw(c.mHalfNumCoefs, stopBandAtten);
+ const double halfbw = tbw / 2.;
- (void)posix_memalign(reinterpret_cast<void**>(&buf), 32, (c.mL+1)*c.mHalfNumCoefs*sizeof(TC));
+ double fcr; // compute fcr, the 3 dB amplitude cut-off.
if (inSampleRate < outSampleRate) { // upsample
- fcr = max(0.5*tbwCheat - tbw/2, tbw/2);
+ fcr = max(0.5 * tbwCheat - halfbw, halfbw);
} else { // downsample
- fcr = max(0.5*tbwCheat*outSampleRate/inSampleRate - tbw/2, tbw/2);
+ fcr = max(0.5 * tbwCheat * outSampleRate / inSampleRate - halfbw, halfbw);
}
- // create and set filter
- firKaiserGen(buf, c.mL, c.mHalfNumCoefs, stopBandAtten, fcr, atten);
- c.mFirCoefs = buf;
- if (mCoefBuffer) {
- free(mCoefBuffer);
- }
- mCoefBuffer = buf;
-#ifdef DEBUG_RESAMPLER
+ createKaiserFir(c, stopBandAtten, fcr);
+}
+
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::createKaiserFir(Constants &c,
+ double stopBandAtten, double fcr) {
+ // compute the normalized transition bandwidth
+ const double tbw = firKaiserTbw(c.mHalfNumCoefs, stopBandAtten);
+ const int phases = c.mL;
+ const int halfLength = c.mHalfNumCoefs;
+
+ // create buffer
+ TC *coefs = nullptr;
+ int ret = posix_memalign(
+ reinterpret_cast<void **>(&coefs),
+ CACHE_LINE_SIZE /* alignment */,
+ (phases + 1) * halfLength * sizeof(TC));
+ LOG_ALWAYS_FATAL_IF(ret != 0, "Cannot allocate buffer memory, ret %d", ret);
+ c.mFirCoefs = coefs;
+ free(mCoefBuffer);
+ mCoefBuffer = coefs;
+
+ // square the computed minimum passband value (extra safety).
+ double attenuation =
+ computeWindowedSincMinimumPassbandValue(stopBandAtten);
+ attenuation *= attenuation;
+
+ // design filter
+ firKaiserGen(coefs, phases, halfLength, stopBandAtten, fcr, attenuation);
+
+ // update the design criteria
+ mNormalizedCutoffFrequency = fcr;
+ mNormalizedTransitionBandwidth = tbw;
+ mFilterAttenuation = attenuation;
+ mStopbandAttenuationDb = stopBandAtten;
+ mPassbandRippleDb = computeWindowedSincPassbandRippleDb(stopBandAtten);
+
+#if 0
+ // Keep this debug code in case an app causes resampler design issues.
+ const double halfbw = tbw / 2.;
// print basic filter stats
- printf("L:%d hnc:%d stopBandAtten:%lf fcr:%lf atten:%lf tbw:%lf\n",
- c.mL, c.mHalfNumCoefs, stopBandAtten, fcr, atten, tbw);
- // test the filter and report results
- double fp = (fcr - tbw/2)/c.mL;
- double fs = (fcr + tbw/2)/c.mL;
+ ALOGD("L:%d hnc:%d stopBandAtten:%lf fcr:%lf atten:%lf tbw:%lf\n",
+ c.mL, c.mHalfNumCoefs, stopBandAtten, fcr, attenuation, tbw);
+
+ // test the filter and report results.
+ // Since this is a polyphase filter, normalized fp and fs must be scaled.
+ const double fp = (fcr - halfbw) / phases;
+ const double fs = (fcr + halfbw) / phases;
+
double passMin, passMax, passRipple;
double stopMax, stopRipple;
- testFir(buf, c.mL, c.mHalfNumCoefs, fp, fs, /*passSteps*/ 1000, /*stopSteps*/ 100000,
+
+ const int32_t passSteps = 1000;
+
+ testFir(coefs, c.mL, c.mHalfNumCoefs, fp, fs, passSteps, passSteps * c.ML /*stopSteps*/,
passMin, passMax, passRipple, stopMax, stopRipple);
- printf("passband(%lf, %lf): %.8lf %.8lf %.8lf\n", 0., fp, passMin, passMax, passRipple);
- printf("stopband(%lf, %lf): %.8lf %.3lf\n", fs, 0.5, stopMax, stopRipple);
+ ALOGD("passband(%lf, %lf): %.8lf %.8lf %.8lf\n", 0., fp, passMin, passMax, passRipple);
+ ALOGD("stopband(%lf, %lf): %.8lf %.3lf\n", fs, 0.5, stopMax, stopRipple);
#endif
}
@@ -304,6 +359,11 @@
mFilterSampleRate = inSampleRate;
mFilterQuality = getQuality();
+ double stopBandAtten;
+ double tbwCheat = 1.; // how much we "cheat" into aliasing
+ int halfLength;
+ double fcr = 0.;
+
// Begin Kaiser Filter computation
//
// The quantization floor for S16 is about 96db - 10*log_10(#length) + 3dB.
@@ -313,52 +373,60 @@
// 96-98dB
//
- double stopBandAtten;
- double tbwCheat = 1.; // how much we "cheat" into aliasing
- int halfLength;
- if (mFilterQuality == DYN_HIGH_QUALITY) {
- // 32b coefficients, 64 length
+ if (mPropertyEnableAtSampleRate >= 0 && mSampleRate >= mPropertyEnableAtSampleRate) {
+ // An alternative method which allows allows a greater fcr
+ // at the expense of potential aliasing.
+ halfLength = mPropertyHalfFilterLength;
+ stopBandAtten = mPropertyStopbandAttenuation;
useS32 = true;
- stopBandAtten = 98.;
- if (inSampleRate >= mSampleRate * 4) {
- halfLength = 48;
- } else if (inSampleRate >= mSampleRate * 2) {
- halfLength = 40;
- } else {
- halfLength = 32;
- }
- } else if (mFilterQuality == DYN_LOW_QUALITY) {
- // 16b coefficients, 16-32 length
- useS32 = false;
- stopBandAtten = 80.;
- if (inSampleRate >= mSampleRate * 4) {
- halfLength = 24;
- } else if (inSampleRate >= mSampleRate * 2) {
- halfLength = 16;
- } else {
- halfLength = 8;
- }
- if (inSampleRate <= mSampleRate) {
- tbwCheat = 1.05;
- } else {
- tbwCheat = 1.03;
- }
- } else { // DYN_MED_QUALITY
- // 16b coefficients, 32-64 length
- // note: > 64 length filters with 16b coefs can have quantization noise problems
- useS32 = false;
- stopBandAtten = 84.;
- if (inSampleRate >= mSampleRate * 4) {
- halfLength = 32;
- } else if (inSampleRate >= mSampleRate * 2) {
- halfLength = 24;
- } else {
- halfLength = 16;
- }
- if (inSampleRate <= mSampleRate) {
- tbwCheat = 1.03;
- } else {
- tbwCheat = 1.01;
+ fcr = mInSampleRate <= mSampleRate
+ ? 0.5 : 0.5 * mSampleRate / mInSampleRate;
+ fcr *= mPropertyCutoffPercent / 100.;
+ } else {
+ if (mFilterQuality == DYN_HIGH_QUALITY) {
+ // 32b coefficients, 64 length
+ useS32 = true;
+ stopBandAtten = 98.;
+ if (inSampleRate >= mSampleRate * 4) {
+ halfLength = 48;
+ } else if (inSampleRate >= mSampleRate * 2) {
+ halfLength = 40;
+ } else {
+ halfLength = 32;
+ }
+ } else if (mFilterQuality == DYN_LOW_QUALITY) {
+ // 16b coefficients, 16-32 length
+ useS32 = false;
+ stopBandAtten = 80.;
+ if (inSampleRate >= mSampleRate * 4) {
+ halfLength = 24;
+ } else if (inSampleRate >= mSampleRate * 2) {
+ halfLength = 16;
+ } else {
+ halfLength = 8;
+ }
+ if (inSampleRate <= mSampleRate) {
+ tbwCheat = 1.05;
+ } else {
+ tbwCheat = 1.03;
+ }
+ } else { // DYN_MED_QUALITY
+ // 16b coefficients, 32-64 length
+ // note: > 64 length filters with 16b coefs can have quantization noise problems
+ useS32 = false;
+ stopBandAtten = 84.;
+ if (inSampleRate >= mSampleRate * 4) {
+ halfLength = 32;
+ } else if (inSampleRate >= mSampleRate * 2) {
+ halfLength = 24;
+ } else {
+ halfLength = 16;
+ }
+ if (inSampleRate <= mSampleRate) {
+ tbwCheat = 1.03;
+ } else {
+ tbwCheat = 1.01;
+ }
}
}
@@ -390,8 +458,12 @@
// create the filter
mConstants.set(phases, halfLength, inSampleRate, mSampleRate);
- createKaiserFir(mConstants, stopBandAtten,
- inSampleRate, mSampleRate, tbwCheat);
+ if (fcr > 0.) {
+ createKaiserFir(mConstants, stopBandAtten, fcr);
+ } else {
+ createKaiserFir(mConstants, stopBandAtten,
+ inSampleRate, mSampleRate, tbwCheat);
+ }
} // End Kaiser filter
// update phase and state based on the new filter.
diff --git a/media/libaudioprocessing/AudioResamplerDyn.h b/media/libaudioprocessing/AudioResamplerDyn.h
index 1840fc7..92144d0 100644
--- a/media/libaudioprocessing/AudioResamplerDyn.h
+++ b/media/libaudioprocessing/AudioResamplerDyn.h
@@ -55,6 +55,39 @@
virtual size_t resample(int32_t* out, size_t outFrameCount,
AudioBufferProvider* provider);
+ // Make available key design criteria for testing
+ int getHalfLength() const {
+ return mConstants.mHalfNumCoefs;
+ }
+
+ const TC *getFilterCoefs() const {
+ return mConstants.mFirCoefs;
+ }
+
+ int getPhases() const {
+ return mConstants.mL;
+ }
+
+ double getStopbandAttenuationDb() const {
+ return mStopbandAttenuationDb;
+ }
+
+ double getPassbandRippleDb() const {
+ return mPassbandRippleDb;
+ }
+
+ double getNormalizedTransitionBandwidth() const {
+ return mNormalizedTransitionBandwidth;
+ }
+
+ double getFilterAttenuation() const {
+ return mFilterAttenuation;
+ }
+
+ double getNormalizedCutoffFrequency() const {
+ return mNormalizedCutoffFrequency;
+ }
+
private:
class Constants { // stores the filter constants.
@@ -112,6 +145,8 @@
void createKaiserFir(Constants &c, double stopBandAtten,
int inSampleRate, int outSampleRate, double tbwCheat);
+ void createKaiserFir(Constants &c, double stopBandAtten, double fcr);
+
template<int CHANNELS, bool LOCKED, int STRIDE>
size_t resample(TO* out, size_t outFrameCount, AudioBufferProvider* provider);
@@ -127,6 +162,38 @@
int32_t mFilterSampleRate; // designed filter sample rate.
src_quality mFilterQuality; // designed filter quality.
void* mCoefBuffer; // if a filter is created, this is not null
+
+ // Property selected design parameters.
+ // This will enable fixed high quality resampling.
+
+ // 32 char PROP_NAME_MAX limit enforced before Android O
+
+ // Use for sample rates greater than or equal to this value.
+ // Set to non-negative to enable, negative to disable.
+ int32_t mPropertyEnableAtSampleRate = 48000;
+ // "ro.audio.resampler.psd.enable_at_samplerate"
+
+ // Specify HALF the resampling filter length.
+ // Set to a value which is a multiple of 4.
+ int32_t mPropertyHalfFilterLength = 32;
+ // "ro.audio.resampler.psd.halflength"
+
+ // Specify the stopband attenuation in positive dB.
+ // Set to a value greater or equal to 20.
+ int32_t mPropertyStopbandAttenuation = 90;
+ // "ro.audio.resampler.psd.stopband"
+
+ // Specify the cutoff frequency as a percentage of Nyquist.
+ // Set to a value between 50 and 100.
+ int32_t mPropertyCutoffPercent = 100;
+ // "ro.audio.resampler.psd.cutoff_percent"
+
+ // Filter creation design parameters, see setSampleRate()
+ double mStopbandAttenuationDb = 0.;
+ double mPassbandRippleDb = 0.;
+ double mNormalizedTransitionBandwidth = 0.;
+ double mFilterAttenuation = 0.;
+ double mNormalizedCutoffFrequency = 0.;
};
} // namespace android
diff --git a/media/libaudioprocessing/AudioResamplerFirGen.h b/media/libaudioprocessing/AudioResamplerFirGen.h
index ad18965..39cafeb 100644
--- a/media/libaudioprocessing/AudioResamplerFirGen.h
+++ b/media/libaudioprocessing/AudioResamplerFirGen.h
@@ -546,8 +546,9 @@
}
wstart += wstep;
}
- // renormalize - this is only needed for integer filter types
- double norm = 1./((1ULL<<(sizeof(T)*8-1))*L);
+ // renormalize - this is needed for integer filter types, use 1 for float or double.
+ constexpr int64_t integralShift = std::is_integral<T>::value ? (sizeof(T) * 8 - 1) : 0;
+ const double norm = 1. / (L << integralShift);
firMin = fmin * norm;
firMax = fmax * norm;
@@ -557,9 +558,12 @@
* evaluates the |H(f)| lowpass band characteristics.
*
* This function tests the lowpass characteristics for the overall polyphase filter,
- * and is used to verify the design. For this case, fp should be set to the
+ * and is used to verify the design.
+ *
+ * For a polyphase filter (L > 1), typically fp should be set to the
* passband normalized frequency from 0 to 0.5 for the overall filter (thus it
* is the designed polyphase bank value / L). Likewise for fs.
+ * Similarly the stopSteps should be L * passSteps for equivalent accuracy.
*
* @param coef is the designed polyphase filter banks
*
@@ -610,6 +614,74 @@
}
/*
+ * Estimate the windowed sinc minimum passband value.
+ *
+ * This is the minimum value for a windowed sinc filter in its passband,
+ * which is identical to the scaling required not to cause overflow of a 0dBFS signal.
+ * The actual value used to attenuate the filter amplitude should be slightly
+ * smaller than this (suggest squaring) as this is just an estimate.
+ *
+ * As a windowed sinc has a passband ripple commensurate to the stopband attenuation
+ * due to Gibb's phenomenon from truncating the sinc, we derive this value from
+ * the design stopbandAttenuationDb (a positive value).
+ */
+static inline double computeWindowedSincMinimumPassbandValue(
+ double stopBandAttenuationDb) {
+ return 1. - pow(10. /* base */, stopBandAttenuationDb * (-1. / 20.));
+}
+
+/*
+ * Compute the windowed sinc passband ripple from stopband attenuation.
+ *
+ * As a windowed sinc has an passband ripple commensurate to the stopband attenuation
+ * due to Gibb's phenomenon from truncating the sinc, we derive this value from
+ * the design stopbandAttenuationDb (a positive value).
+ */
+static inline double computeWindowedSincPassbandRippleDb(
+ double stopBandAttenuationDb) {
+ return -20. * log10(computeWindowedSincMinimumPassbandValue(stopBandAttenuationDb));
+}
+
+/*
+ * Kaiser window Beta value
+ *
+ * Formula 3.2.5, 3.2.7, Vaidyanathan, _Multirate Systems and Filter Banks_, p. 48
+ * Formula 7.75, Oppenheim and Schafer, _Discrete-time Signal Processing, 3e_, p. 542
+ *
+ * See also: http://melodi.ee.washington.edu/courses/ee518/notes/lec17.pdf
+ *
+ * Kaiser window and beta parameter
+ *
+ * | 0.1102*(A - 8.7) A > 50
+ * Beta = | 0.5842*(A - 21)^0.4 + 0.07886*(A - 21) 21 < A <= 50
+ * | 0. A <= 21
+ *
+ * with A is the desired stop-band attenuation in positive dBFS
+ *
+ * 30 dB 2.210
+ * 40 dB 3.384
+ * 50 dB 4.538
+ * 60 dB 5.658
+ * 70 dB 6.764
+ * 80 dB 7.865
+ * 90 dB 8.960
+ * 100 dB 10.056
+ *
+ * For some values of stopBandAttenuationDb the function may be computed
+ * at compile time.
+ */
+static inline constexpr double computeBeta(double stopBandAttenuationDb) {
+ if (stopBandAttenuationDb > 50.) {
+ return 0.1102 * (stopBandAttenuationDb - 8.7);
+ }
+ const double offset = stopBandAttenuationDb - 21.;
+ if (offset > 0.) {
+ return 0.5842 * pow(offset, 0.4) + 0.07886 * offset;
+ }
+ return 0.;
+}
+
+/*
* Calculates the overall polyphase filter based on a windowed sinc function.
*
* The windowed sinc is an odd length symmetric filter of exactly L*halfNumCoef*2+1
@@ -642,31 +714,8 @@
template <typename T>
static inline void firKaiserGen(T* coef, int L, int halfNumCoef,
double stopBandAtten, double fcr, double atten) {
- //
- // Formula 3.2.5, 3.2.7, Vaidyanathan, _Multirate Systems and Filter Banks_, p. 48
- // Formula 7.75, Oppenheim and Schafer, _Discrete-time Signal Processing, 3e_, p. 542
- //
- // See also: http://melodi.ee.washington.edu/courses/ee518/notes/lec17.pdf
- //
- // Kaiser window and beta parameter
- //
- // | 0.1102*(A - 8.7) A > 50
- // beta = | 0.5842*(A - 21)^0.4 + 0.07886*(A - 21) 21 <= A <= 50
- // | 0. A < 21
- //
- // with A is the desired stop-band attenuation in dBFS
- //
- // 30 dB 2.210
- // 40 dB 3.384
- // 50 dB 4.538
- // 60 dB 5.658
- // 70 dB 6.764
- // 80 dB 7.865
- // 90 dB 8.960
- // 100 dB 10.056
-
const int N = L * halfNumCoef; // non-negative half
- const double beta = 0.1102 * (stopBandAtten - 8.7); // >= 50dB always
+ const double beta = computeBeta(stopBandAtten);
const double xstep = (2. * M_PI) * fcr / L;
const double xfrac = 1. / N;
const double yscale = atten * L / (I0(beta) * M_PI);
@@ -696,9 +745,9 @@
sg.advance();
}
- if (is_same<T, int16_t>::value) { // int16_t needs noise shaping
+ if (std::is_same<T, int16_t>::value) { // int16_t needs noise shaping
*coef++ = static_cast<T>(toint(y, 1ULL<<(sizeof(T)*8-1), err));
- } else if (is_same<T, int32_t>::value) {
+ } else if (std::is_same<T, int32_t>::value) {
*coef++ = static_cast<T>(toint(y, 1ULL<<(sizeof(T)*8-1)));
} else { // assumed float or double
*coef++ = static_cast<T>(y);
diff --git a/media/libaudioprocessing/BufferProviders.cpp b/media/libaudioprocessing/BufferProviders.cpp
index 862fef6..2d9e1cb 100644
--- a/media/libaudioprocessing/BufferProviders.cpp
+++ b/media/libaudioprocessing/BufferProviders.cpp
@@ -183,7 +183,7 @@
mOutFrameSize =
audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask);
status_t status;
- status = EffectBufferHalInterface::mirror(
+ status = mEffectsFactory->mirrorBuffer(
nullptr, mInFrameSize * bufferFrameCount, &mInBuffer);
if (status != 0) {
ALOGE("DownmixerBufferProvider() error %d while creating input buffer", status);
@@ -191,7 +191,7 @@
mEffectsFactory.clear();
return;
}
- status = EffectBufferHalInterface::mirror(
+ status = mEffectsFactory->mirrorBuffer(
nullptr, mOutFrameSize * bufferFrameCount, &mOutBuffer);
if (status != 0) {
ALOGE("DownmixerBufferProvider() error %d while creating output buffer", status);
@@ -376,6 +376,23 @@
memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannelCount);
}
+ClampFloatBufferProvider::ClampFloatBufferProvider(int32_t channelCount, size_t bufferFrameCount) :
+ CopyBufferProvider(
+ channelCount * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT),
+ channelCount * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT),
+ bufferFrameCount),
+ mChannelCount(channelCount)
+{
+ ALOGV("ClampFloatBufferProvider(%p)(%u)", this, channelCount);
+}
+
+void ClampFloatBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+ memcpy_to_float_from_float_with_clamping((float*)dst, (const float*)src,
+ frames * mChannelCount,
+ FLOAT_NOMINAL_RANGE_HEADROOM);
+}
+
TimestretchBufferProvider::TimestretchBufferProvider(int32_t channelCount,
audio_format_t format, uint32_t sampleRate, const AudioPlaybackRate &playbackRate) :
mChannelCount(channelCount),
diff --git a/media/libaudioprocessing/OWNERS b/media/libaudioprocessing/OWNERS
new file mode 100644
index 0000000..96d0ea0
--- /dev/null
+++ b/media/libaudioprocessing/OWNERS
@@ -0,0 +1,3 @@
+gkasten@google.com
+hunga@google.com
+rago@google.com
diff --git a/media/libaudioprocessing/tests/build_and_run_all_unit_tests.sh b/media/libaudioprocessing/tests/build_and_run_all_unit_tests.sh
index 704d095..efef417 100755
--- a/media/libaudioprocessing/tests/build_and_run_all_unit_tests.sh
+++ b/media/libaudioprocessing/tests/build_and_run_all_unit_tests.sh
@@ -14,8 +14,8 @@
echo "waiting for device"
adb root && adb wait-for-device remount
-adb push $OUT/system/lib/libaudioresampler.so /system/lib
-adb push $OUT/system/lib64/libaudioresampler.so /system/lib64
+adb push $OUT/system/lib/libaudioprocessing.so /system/lib
+adb push $OUT/system/lib64/libaudioprocessing.so /system/lib64
adb push $OUT/data/nativetest/resampler_tests/resampler_tests /data/nativetest/resampler_tests/resampler_tests
adb push $OUT/data/nativetest64/resampler_tests/resampler_tests /data/nativetest64/resampler_tests/resampler_tests
diff --git a/media/libaudioprocessing/tests/resampler_tests.cpp b/media/libaudioprocessing/tests/resampler_tests.cpp
index a23c000..e1623f7 100644
--- a/media/libaudioprocessing/tests/resampler_tests.cpp
+++ b/media/libaudioprocessing/tests/resampler_tests.cpp
@@ -29,6 +29,7 @@
#include <unistd.h>
#include <iostream>
+#include <memory>
#include <utility>
#include <vector>
@@ -37,6 +38,8 @@
#include <media/AudioBufferProvider.h>
#include <media/AudioResampler.h>
+#include "../AudioResamplerDyn.h"
+#include "../AudioResamplerFirGen.h"
#include "test_utils.h"
template <typename T>
@@ -242,6 +245,60 @@
delete resampler;
}
+void testFilterResponse(
+ size_t channels, unsigned inputFreq, unsigned outputFreq)
+{
+ // create resampler
+ using ResamplerType = android::AudioResamplerDyn<float, float, float>;
+ std::unique_ptr<ResamplerType> rdyn(
+ static_cast<ResamplerType *>(
+ android::AudioResampler::create(
+ AUDIO_FORMAT_PCM_FLOAT,
+ channels,
+ outputFreq,
+ android::AudioResampler::DYN_HIGH_QUALITY)));
+ rdyn->setSampleRate(inputFreq);
+
+ // get design parameters
+ const int phases = rdyn->getPhases();
+ const int halfLength = rdyn->getHalfLength();
+ const float *coefs = rdyn->getFilterCoefs();
+ const double fcr = rdyn->getNormalizedCutoffFrequency();
+ const double tbw = rdyn->getNormalizedTransitionBandwidth();
+ const double attenuation = rdyn->getFilterAttenuation();
+ const double stopbandDb = rdyn->getStopbandAttenuationDb();
+ const double passbandDb = rdyn->getPassbandRippleDb();
+ const double fp = fcr - tbw / 2;
+ const double fs = fcr + tbw / 2;
+
+ printf("inputFreq:%d outputFreq:%d design"
+ " phases:%d halfLength:%d"
+ " fcr:%lf fp:%lf fs:%lf tbw:%lf"
+ " attenuation:%lf stopRipple:%.lf passRipple:%lf"
+ "\n",
+ inputFreq, outputFreq,
+ phases, halfLength,
+ fcr, fp, fs, tbw,
+ attenuation, stopbandDb, passbandDb);
+
+ // verify design parameters
+ constexpr int32_t passSteps = 1000;
+ double passMin, passMax, passRipple, stopMax, stopRipple;
+ android::testFir(coefs, phases, halfLength, fp / phases, fs / phases,
+ passSteps, phases * passSteps /* stopSteps */,
+ passMin, passMax, passRipple,
+ stopMax, stopRipple);
+ printf("inputFreq:%d outputFreq:%d verify"
+ " passMin:%lf passMax:%lf passRipple:%lf stopMax:%lf stopRipple:%lf"
+ "\n",
+ inputFreq, outputFreq,
+ passMin, passMax, passRipple, stopMax, stopRipple);
+
+ ASSERT_GT(stopRipple, 60.); // enough stopband attenuation
+ ASSERT_LT(passRipple, 0.2); // small passband ripple
+ ASSERT_GT(passMin, 0.99); // we do not attenuate the signal (ideally 1.)
+}
+
/* Buffer increment test
*
* We compare a reference output, where we consume and process the entire
@@ -484,3 +541,30 @@
}
}
+TEST(audioflinger_resampler, filterresponse) {
+ std::vector<int> inSampleRates{
+ 8000,
+ 11025,
+ 12000,
+ 16000,
+ 22050,
+ 24000,
+ 32000,
+ 44100,
+ 48000,
+ 88200,
+ 96000,
+ 176400,
+ 192000,
+ };
+ std::vector<int> outSampleRates{
+ 48000,
+ 96000,
+ };
+
+ for (int outSampleRate : outSampleRates) {
+ for (int inSampleRate : inSampleRates) {
+ testFilterResponse(2 /* channels */, inSampleRate, outSampleRate);
+ }
+ }
+}
diff --git a/media/libaudioprocessing/tests/test-mixer.cpp b/media/libaudioprocessing/tests/test-mixer.cpp
index 75dbf91..bc9d2a6 100644
--- a/media/libaudioprocessing/tests/test-mixer.cpp
+++ b/media/libaudioprocessing/tests/test-mixer.cpp
@@ -143,10 +143,6 @@
usage(progname);
return EXIT_FAILURE;
}
- if ((unsigned)argc > AudioMixer::MAX_NUM_TRACKS) {
- fprintf(stderr, "too many tracks: %d > %u", argc, AudioMixer::MAX_NUM_TRACKS);
- return EXIT_FAILURE;
- }
size_t outputFrames = 0;
@@ -246,9 +242,10 @@
for (size_t i = 0; i < providers.size(); ++i) {
//printf("track %d out of %d\n", i, providers.size());
uint32_t channelMask = audio_channel_out_mask_from_count(providers[i].getNumChannels());
- int32_t name = mixer->getTrackName(channelMask,
- formats[i], AUDIO_SESSION_OUTPUT_MIX);
- ALOG_ASSERT(name >= 0);
+ const int name = i;
+ const status_t status = mixer->create(
+ name, channelMask, formats[i], AUDIO_SESSION_OUTPUT_MIX);
+ LOG_ALWAYS_FATAL_IF(status != OK);
names[i] = name;
mixer->setBufferProvider(name, &providers[i]);
mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
@@ -315,9 +312,10 @@
writeFile(outputFilename, outputAddr,
outputSampleRate, outputChannels, outputFrames, useMixerFloat);
if (auxFilename) {
- // Aux buffer is always in q4_27 format for now.
- // memcpy_to_i16_from_q4_27(), but with stereo frame count (not sample count)
- ditherAndClamp((int32_t*)auxAddr, (int32_t*)auxAddr, outputFrames >> 1);
+ // Aux buffer is always in q4_27 format for O and earlier.
+ // memcpy_to_i16_from_q4_27((int16_t*)auxAddr, (const int32_t*)auxAddr, outputFrames);
+ // Aux buffer is always in float format for P.
+ memcpy_to_i16_from_float((int16_t*)auxAddr, (const float*)auxAddr, outputFrames);
writeFile(auxFilename, auxAddr, outputSampleRate, 1, outputFrames, false);
}
diff --git a/media/libcpustats/OWNERS b/media/libcpustats/OWNERS
new file mode 100644
index 0000000..f9cb567
--- /dev/null
+++ b/media/libcpustats/OWNERS
@@ -0,0 +1 @@
+gkasten@google.com
diff --git a/media/libeffects/OWNERS b/media/libeffects/OWNERS
index 7e3de13..7f9ae81 100644
--- a/media/libeffects/OWNERS
+++ b/media/libeffects/OWNERS
@@ -1,3 +1,4 @@
+hunga@google.com
krocard@google.com
mnaganov@google.com
rago@google.com
diff --git a/media/libeffects/config/Android.bp b/media/libeffects/config/Android.bp
index 4398a91..5fa9da9 100644
--- a/media/libeffects/config/Android.bp
+++ b/media/libeffects/config/Android.bp
@@ -1,10 +1,15 @@
// Effect configuration
-cc_library_shared {
+cc_library {
name: "libeffectsconfig",
vendor_available: true,
srcs: ["src/EffectsConfig.cpp"],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+
shared_libs: [
"liblog",
"libtinyxml2",
diff --git a/media/libeffects/config/include/media/EffectsConfig.h b/media/libeffects/config/include/media/EffectsConfig.h
index 811730c..55b946f 100644
--- a/media/libeffects/config/include/media/EffectsConfig.h
+++ b/media/libeffects/config/include/media/EffectsConfig.h
@@ -32,8 +32,13 @@
namespace android {
namespace effectsConfig {
-/** Default path of effect configuration file. */
-constexpr char DEFAULT_PATH[] = "/vendor/etc/audio_effects.xml";
+/** Default path of effect configuration file. Relative to DEFAULT_LOCATIONS. */
+constexpr const char* DEFAULT_NAME = "audio_effects.xml";
+
+/** Default path of effect configuration file.
+ * The /vendor partition is the recommended one, the others are deprecated.
+ */
+constexpr const char* DEFAULT_LOCATIONS[] = {"/odm/etc", "/vendor/etc", "/system/etc"};
/** Directories where the effect libraries will be search for. */
constexpr const char* LD_EFFECT_LIBRARY_PATH[] =
@@ -91,13 +96,16 @@
/** Parsed config, nullptr if the xml lib could not load the file */
std::unique_ptr<Config> parsedConfig;
size_t nbSkippedElement; //< Number of skipped invalid library, effect or processing chain
+ const char* configPath; //< Path to the loaded configuration
};
/** Parses the provided effect configuration.
* Parsing do not stop of first invalid element, but continues to the next.
+ * @param[in] path of the configuration file do load
+ * if nullptr, look for DEFAULT_NAME in DEFAULT_LOCATIONS.
* @see ParsingResult::nbSkippedElement
*/
-ParsingResult parse(const char* path = DEFAULT_PATH);
+ParsingResult parse(const char* path = nullptr);
} // namespace effectsConfig
} // namespace android
diff --git a/media/libeffects/config/src/EffectsConfig.cpp b/media/libeffects/config/src/EffectsConfig.cpp
index 98a37ab..d79501f 100644
--- a/media/libeffects/config/src/EffectsConfig.cpp
+++ b/media/libeffects/config/src/EffectsConfig.cpp
@@ -20,6 +20,7 @@
#include <cstdint>
#include <functional>
#include <string>
+#include <unistd.h>
#include <tinyxml2.h>
#include <log/log.h>
@@ -85,7 +86,7 @@
constexpr std::enable_if<false, Enum> STREAM_NAME_MAP;
/** All output stream types which support effects.
- * This need to be kept in sink with the xsd streamOutputType.
+ * This need to be kept in sync with the xsd streamOutputType.
*/
template <>
constexpr std::pair<audio_stream_type_t, const char*> STREAM_NAME_MAP<audio_stream_type_t>[] = {
@@ -102,7 +103,7 @@
};
/** All input stream types which support effects.
- * This need to be kept in sink with the xsd streamOutputType.
+ * This need to be kept in sync with the xsd streamOutputType.
*/
template <>
constexpr std::pair<audio_source_t, const char*> STREAM_NAME_MAP<audio_source_t>[] = {
@@ -142,7 +143,7 @@
}
/** Find an element in a collection by its name.
- * @return nullptr if not found, the ellements address if found.
+ * @return nullptr if not found, the element address if found.
*/
template <class T>
T* findByName(const char* name, std::vector<T>& collection) {
@@ -202,7 +203,7 @@
auto parseProxy = [&xmlEffect, &parseImpl](const char* tag, EffectImpl& proxyLib) {
auto* xmlProxyLib = xmlEffect.FirstChildElement(tag);
if (xmlProxyLib == nullptr) {
- ALOGE("effectProxy must contain a <%s>: %s", tag, dump(*xmlProxyLib));
+ ALOGE("effectProxy must contain a <%s>: %s", tag, dump(xmlEffect));
return false;
}
return parseImpl(*xmlProxyLib, proxyLib);
@@ -249,15 +250,14 @@
return true;
}
-}; // namespace
-
-ParsingResult parse(const char* path) {
+/** Internal version of the public parse(const char* path) with precondition `path != nullptr`. */
+ParsingResult parseWithPath(const char* path) {
XMLDocument doc;
doc.LoadFile(path);
if (doc.Error()) {
ALOGE("Failed to parse %s: Tinyxml2 error (%d): %s", path,
doc.ErrorID(), doc.ErrorStr());
- return {nullptr, 0};
+ return {nullptr, 0, path};
}
auto config = std::make_unique<Config>();
@@ -295,7 +295,29 @@
}
}
}
- return {std::move(config), nbSkippedElements};
+ return {std::move(config), nbSkippedElements, path};
+}
+
+}; // namespace
+
+ParsingResult parse(const char* path) {
+ if (path != nullptr) {
+ return parseWithPath(path);
+ }
+
+ for (std::string location : DEFAULT_LOCATIONS) {
+ std::string defaultPath = location + '/' + DEFAULT_NAME;
+ if (access(defaultPath.c_str(), R_OK) != 0) {
+ continue;
+ }
+ auto result = parseWithPath(defaultPath.c_str());
+ if (result.parsedConfig != nullptr) {
+ return result;
+ }
+ }
+
+ ALOGE("Could not parse effect configuration in any of the default locations.");
+ return {nullptr, 0, nullptr};
}
} // namespace effectsConfig
diff --git a/media/libeffects/data/audio_effects.conf b/media/libeffects/data/audio_effects.conf
index 14a171b..dd729c5 100644
--- a/media/libeffects/data/audio_effects.conf
+++ b/media/libeffects/data/audio_effects.conf
@@ -38,6 +38,9 @@
loudness_enhancer {
path /vendor/lib/soundfx/libldnhncr.so
}
+ dynamics_processing {
+ path /vendor/lib/soundfx/libdynproc.so
+ }
}
# Default pre-processing library. Add to audio_effect.conf "libraries" section if
@@ -129,6 +132,10 @@
library loudness_enhancer
uuid fa415329-2034-4bea-b5dc-5b381c8d1e2c
}
+ dynamics_processing {
+ library dynamics_processing
+ uuid e0e6539b-1781-7261-676f-6d7573696340
+ }
}
# Default pre-processing effects. Add to audio_effect.conf "effects" section if
diff --git a/media/libeffects/data/audio_effects.xml b/media/libeffects/data/audio_effects.xml
new file mode 100644
index 0000000..3f85052
--- /dev/null
+++ b/media/libeffects/data/audio_effects.xml
@@ -0,0 +1,102 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<audio_effects_conf version="2.0" xmlns="http://schemas.android.com/audio/audio_effects_conf/v2_0">
+ <!-- List of effect libraries to load.
+ Each library element must contain a "name" attribute and a "path" attribute giving the
+ name of a library .so file in /vendor/lib/soundfx on the target
+
+ If offloadable effects are present, the AOSP library libeffectproxy.so must be listed as
+ well as one library for the SW implementation and one library for the DSP implementation:
+ <library name="proxy" path="libeffectproxy.so"/>
+ <library name="some_fx_sw" path="lib_some_fx_sw.so"/>
+ <library name="some_fx_hw" path="lib_some_fx_hw.so"/>
+
+ If the audio HAL implements support for AOSP software audio pre-processing effects,
+ the following library must be added:
+ <library name="pre_processing" path="libaudiopreprocessing.so"/>
+ -->
+ <libraries>
+ <library name="bundle" path="libbundlewrapper.so"/>
+ <library name="reverb" path="libreverbwrapper.so"/>
+ <library name="visualizer" path="libvisualizer.so"/>
+ <library name="downmix" path="libdownmix.so"/>
+ <library name="loudness_enhancer" path="libldnhncr.so"/>
+ <library name="dynamics_processing" path="libdynproc.so"/>
+ </libraries>
+
+ <!-- list of effects to load.
+ Each "effect" element must contain a "name", "library" and a "uuid" attribute.
+ The value of the "library" element must correspond to the name of one library element in
+ the "libraries" element.
+ The "name" attribute is indicative, only the value of the "uuid" attribute designates
+ the effect.
+ The uuid is the implementation specific UUID as specified by the effect vendor. This is not
+ the generic effect type UUID.
+
+ Offloadable effects are described by an "effectProxy" element which contains one "libsw"
+ element containing the "uuid" and "library" for the SW implementation and one "libhw"
+ element containing the "uuid" and "library" for the DSP implementation.
+ The "uuid" value for the "effectProxy" element must be unique and will override the default
+ uuid in the AOSP proxy effect implementation.
+
+ If the audio HAL implements support for AOSP software audio pre-processing effects,
+ the following effects can be added:
+ <effect name="agc" library="pre_processing" uuid="aa8130e0-66fc-11e0-bad0-0002a5d5c51b"/>
+ <effect name="aec" library="pre_processing" uuid="bb392ec0-8d4d-11e0-a896-0002a5d5c51b"/>
+ <effect name="ns" library="pre_processing" uuid="c06c8400-8e06-11e0-9cb6-0002a5d5c51b"/>
+ -->
+
+ <effects>
+ <effect name="bassboost" library="bundle" uuid="8631f300-72e2-11df-b57e-0002a5d5c51b"/>
+ <effect name="virtualizer" library="bundle" uuid="1d4033c0-8557-11df-9f2d-0002a5d5c51b"/>
+ <effect name="equalizer" library="bundle" uuid="ce772f20-847d-11df-bb17-0002a5d5c51b"/>
+ <effect name="volume" library="bundle" uuid="119341a0-8469-11df-81f9-0002a5d5c51b"/>
+ <effect name="reverb_env_aux" library="reverb" uuid="4a387fc0-8ab3-11df-8bad-0002a5d5c51b"/>
+ <effect name="reverb_env_ins" library="reverb" uuid="c7a511a0-a3bb-11df-860e-0002a5d5c51b"/>
+ <effect name="reverb_pre_aux" library="reverb" uuid="f29a1400-a3bb-11df-8ddc-0002a5d5c51b"/>
+ <effect name="reverb_pre_ins" library="reverb" uuid="172cdf00-a3bc-11df-a72f-0002a5d5c51b"/>
+ <effect name="visualizer" library="visualizer" uuid="d069d9e0-8329-11df-9168-0002a5d5c51b"/>
+ <effect name="downmix" library="downmix" uuid="93f04452-e4fe-41cc-91f9-e475b6d1d69f"/>
+ <effect name="loudness_enhancer" library="loudness_enhancer" uuid="fa415329-2034-4bea-b5dc-5b381c8d1e2c"/>
+ <effect name="dynamics_processing" library="dynamics_processing" uuid="e0e6539b-1781-7261-676f-6d7573696340"/>
+ </effects>
+
+ <!-- Audio pre processor configurations.
+ The pre processor configuration is described in a "preprocess" element and consists in a
+ list of elements each describing pre processor settings for a given use case or "stream".
+ Each stream element has a "type" attribute corresponding to the input source used.
+ Valid types are:
+ "mic", "camcorder", "voice_recognition", "voice_communication"
+ Each "stream" element contains a list of "apply" elements indicating one effect to apply.
+ The effect to apply is designated by its name in the "effects" elements.
+
+ <preprocess>
+ <stream type="voice_communication">
+ <apply effect="aec"/>
+ <apply effect="ns"/>
+ </stream>
+ </preprocess>
+ -->
+
+ <!-- Audio post processor configurations.
+ The post processor configuration is described in a "postprocess" element and consists in a
+ list of elements each describing post processor settings for a given use case or "stream".
+ Each stream element has a "type" attribute corresponding to the stream type used.
+ Valid types are:
+ "music", "ring", "alarm", "notification", "voice_call"
+ Each "stream" element contains a list of "apply" elements indicating one effect to apply.
+ The effect to apply is designated by its name in the "effects" elements.
+
+ <postprocess>
+ <stream type="music">
+ <apply effect="music_post_proc"/>
+ </stream>
+ <stream type="voice_call">
+ <apply effect="voice_post_proc"/>
+ </stream>
+ <stream type="notification">
+ <apply effect="notification_post_proc"/>
+ </stream>
+ </postprocess>
+ -->
+
+</audio_effects_conf>
diff --git a/media/libeffects/dynamicsproc/Android.mk b/media/libeffects/dynamicsproc/Android.mk
new file mode 100644
index 0000000..7be0c49
--- /dev/null
+++ b/media/libeffects/dynamicsproc/Android.mk
@@ -0,0 +1,43 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH:= $(call my-dir)
+
+# DynamicsProcessing library
+include $(CLEAR_VARS)
+
+LOCAL_VENDOR_MODULE := true
+
+EIGEN_PATH := external/eigen
+LOCAL_C_INCLUDES += $(EIGEN_PATH)
+
+LOCAL_SRC_FILES:= \
+ EffectDynamicsProcessing.cpp \
+ dsp/DPBase.cpp \
+ dsp/DPFrequency.cpp
+
+LOCAL_CFLAGS+= -O2 -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ liblog \
+
+LOCAL_MODULE_RELATIVE_PATH := soundfx
+LOCAL_MODULE:= libdynproc
+
+LOCAL_HEADER_LIBRARIES := \
+ libaudioeffects
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp b/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp
new file mode 100644
index 0000000..0b883f1
--- /dev/null
+++ b/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp
@@ -0,0 +1,1300 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectDP"
+//#define LOG_NDEBUG 0
+
+#include <assert.h>
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <new>
+
+#include <log/log.h>
+
+#include <audio_effects/effect_dynamicsprocessing.h>
+#include <dsp/DPBase.h>
+#include <dsp/DPFrequency.h>
+
+//#define VERY_VERY_VERBOSE_LOGGING
+#ifdef VERY_VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while (false)
+#endif
+
+// union to hold command values
+using value_t = union {
+ int32_t i;
+ float f;
+};
+
+// effect_handle_t interface implementation for DP effect
+extern const struct effect_interface_s gDPInterface;
+
+// AOSP Dynamics Processing UUID: e0e6539b-1781-7261-676f-6d7573696340
+const effect_descriptor_t gDPDescriptor = {
+ {0x7261676f, 0x6d75, 0x7369, 0x6364, {0x28, 0xe2, 0xfd, 0x3a, 0xc3, 0x9e}}, // type
+ {0xe0e6539b, 0x1781, 0x7261, 0x676f, {0x6d, 0x75, 0x73, 0x69, 0x63, 0x40}}, // uuid
+ EFFECT_CONTROL_API_VERSION,
+ (EFFECT_FLAG_TYPE_INSERT | EFFECT_FLAG_INSERT_LAST | EFFECT_FLAG_VOLUME_CTRL),
+ 0, // TODO
+ 1,
+ "Dynamics Processing",
+ "The Android Open Source Project",
+};
+
+enum dp_state_e {
+ DYNAMICS_PROCESSING_STATE_UNINITIALIZED,
+ DYNAMICS_PROCESSING_STATE_INITIALIZED,
+ DYNAMICS_PROCESSING_STATE_ACTIVE,
+};
+
+struct DynamicsProcessingContext {
+ const struct effect_interface_s *mItfe;
+ effect_config_t mConfig;
+ uint8_t mState;
+
+ dp_fx::DPBase * mPDynamics; //the effect (or current effect)
+ int32_t mCurrentVariant;
+ float mPreferredFrameDuration;
+};
+
+// The value offset of an effect parameter is computed by rounding up
+// the parameter size to the next 32 bit alignment.
+static inline uint32_t computeParamVOffset(const effect_param_t *p) {
+ return ((p->psize + sizeof(int32_t) - 1) / sizeof(int32_t)) *
+ sizeof(int32_t);
+}
+
+//--- local function prototypes
+int DP_setParameter(DynamicsProcessingContext *pContext,
+ uint32_t paramSize,
+ void *pParam,
+ uint32_t valueSize,
+ void *pValue);
+int DP_getParameter(DynamicsProcessingContext *pContext,
+ uint32_t paramSize,
+ void *pParam,
+ uint32_t *pValueSize,
+ void *pValue);
+int DP_getParameterCmdSize(uint32_t paramSize,
+ void *pParam);
+void DP_expectedParamValueSizes(uint32_t paramSize,
+ void *pParam,
+ bool isSet,
+ uint32_t *pCmdSize,
+ uint32_t *pValueSize);
+//
+//--- Local functions (not directly used by effect interface)
+//
+
+void DP_reset(DynamicsProcessingContext *pContext)
+{
+ ALOGV("> DP_reset(%p)", pContext);
+ if (pContext->mPDynamics != NULL) {
+ pContext->mPDynamics->reset();
+ } else {
+ ALOGE("DP_reset(%p): null DynamicsProcessing", pContext);
+ }
+}
+
+//----------------------------------------------------------------------------
+// DP_setConfig()
+//----------------------------------------------------------------------------
+// Purpose: Set input and output audio configuration.
+//
+// Inputs:
+// pContext: effect engine context
+// pConfig: pointer to effect_config_t structure holding input and output
+// configuration parameters
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+int DP_setConfig(DynamicsProcessingContext *pContext, effect_config_t *pConfig)
+{
+ ALOGV("DP_setConfig(%p)", pContext);
+
+ if (pConfig->inputCfg.samplingRate != pConfig->outputCfg.samplingRate) return -EINVAL;
+ if (pConfig->inputCfg.channels != pConfig->outputCfg.channels) return -EINVAL;
+ if (pConfig->inputCfg.format != pConfig->outputCfg.format) return -EINVAL;
+ if (pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_WRITE &&
+ pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_ACCUMULATE) return -EINVAL;
+ if (pConfig->inputCfg.format != AUDIO_FORMAT_PCM_FLOAT) return -EINVAL;
+
+ pContext->mConfig = *pConfig;
+
+ DP_reset(pContext);
+
+ return 0;
+}
+
+//----------------------------------------------------------------------------
+// DP_getConfig()
+//----------------------------------------------------------------------------
+// Purpose: Get input and output audio configuration.
+//
+// Inputs:
+// pContext: effect engine context
+// pConfig: pointer to effect_config_t structure holding input and output
+// configuration parameters
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+void DP_getConfig(DynamicsProcessingContext *pContext, effect_config_t *pConfig)
+{
+ *pConfig = pContext->mConfig;
+}
+
+//----------------------------------------------------------------------------
+// DP_init()
+//----------------------------------------------------------------------------
+// Purpose: Initialize engine with default configuration.
+//
+// Inputs:
+// pContext: effect engine context
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+int DP_init(DynamicsProcessingContext *pContext)
+{
+ ALOGV("DP_init(%p)", pContext);
+
+ pContext->mItfe = &gDPInterface;
+ pContext->mPDynamics = NULL;
+ pContext->mState = DYNAMICS_PROCESSING_STATE_UNINITIALIZED;
+
+ pContext->mConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+ pContext->mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+ pContext->mConfig.inputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
+ pContext->mConfig.inputCfg.samplingRate = 48000;
+ pContext->mConfig.inputCfg.bufferProvider.getBuffer = NULL;
+ pContext->mConfig.inputCfg.bufferProvider.releaseBuffer = NULL;
+ pContext->mConfig.inputCfg.bufferProvider.cookie = NULL;
+ pContext->mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
+ pContext->mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
+ pContext->mConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+ pContext->mConfig.outputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
+ pContext->mConfig.outputCfg.samplingRate = 48000;
+ pContext->mConfig.outputCfg.bufferProvider.getBuffer = NULL;
+ pContext->mConfig.outputCfg.bufferProvider.releaseBuffer = NULL;
+ pContext->mConfig.outputCfg.bufferProvider.cookie = NULL;
+ pContext->mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
+
+ pContext->mCurrentVariant = -1; //none
+ pContext->mPreferredFrameDuration = 0; //none
+
+ DP_setConfig(pContext, &pContext->mConfig);
+ pContext->mState = DYNAMICS_PROCESSING_STATE_INITIALIZED;
+ return 0;
+}
+
+void DP_changeVariant(DynamicsProcessingContext *pContext, int newVariant) {
+ ALOGV("DP_changeVariant from %d to %d", pContext->mCurrentVariant, newVariant);
+ switch(newVariant) {
+ case VARIANT_FAVOR_FREQUENCY_RESOLUTION: {
+ pContext->mCurrentVariant = VARIANT_FAVOR_FREQUENCY_RESOLUTION;
+ delete pContext->mPDynamics;
+ pContext->mPDynamics = new dp_fx::DPFrequency();
+ break;
+ }
+ default: {
+ ALOGW("DynamicsProcessing variant %d not available for creation", newVariant);
+ break;
+ }
+ } //switch
+}
+
+static inline bool isPowerOf2(unsigned long n) {
+ return (n & (n - 1)) == 0;
+}
+
+void DP_configureVariant(DynamicsProcessingContext *pContext, int newVariant) {
+ ALOGV("DP_configureVariant %d", newVariant);
+ switch(newVariant) {
+ case VARIANT_FAVOR_FREQUENCY_RESOLUTION: {
+ int32_t minBlockSize = (int32_t)dp_fx::DPFrequency::getMinBockSize();
+ int32_t desiredBlock = pContext->mPreferredFrameDuration *
+ pContext->mConfig.inputCfg.samplingRate / 1000.0f;
+ int32_t currentBlock = desiredBlock;
+ ALOGV(" sampling rate: %d, desiredBlock size %0.2f (%d) samples",
+ pContext->mConfig.inputCfg.samplingRate, pContext->mPreferredFrameDuration,
+ desiredBlock);
+ if (desiredBlock < minBlockSize) {
+ currentBlock = minBlockSize;
+ } else if (!isPowerOf2(desiredBlock)) {
+ //find next highest power of 2.
+ currentBlock = 1 << (32 - __builtin_clz(desiredBlock));
+ }
+ ((dp_fx::DPFrequency*)pContext->mPDynamics)->configure(currentBlock,
+ currentBlock/2,
+ pContext->mConfig.inputCfg.samplingRate);
+ break;
+ }
+ default: {
+ ALOGE("DynamicsProcessing variant %d not available to configure", newVariant);
+ break;
+ }
+ }
+}
+
+//
+//--- Effect Library Interface Implementation
+//
+
+int DPLib_Release(effect_handle_t handle) {
+ DynamicsProcessingContext * pContext = (DynamicsProcessingContext *)handle;
+
+ ALOGV("DPLib_Release %p", handle);
+ if (pContext == NULL) {
+ return -EINVAL;
+ }
+ delete pContext->mPDynamics;
+ delete pContext;
+
+ return 0;
+}
+
+int DPLib_Create(const effect_uuid_t *uuid,
+ int32_t sessionId __unused,
+ int32_t ioId __unused,
+ effect_handle_t *pHandle) {
+ ALOGV("DPLib_Create()");
+
+ if (pHandle == NULL || uuid == NULL) {
+ return -EINVAL;
+ }
+
+ if (memcmp(uuid, &gDPDescriptor.uuid, sizeof(*uuid)) != 0) {
+ return -EINVAL;
+ }
+
+ DynamicsProcessingContext *pContext = new DynamicsProcessingContext;
+ *pHandle = (effect_handle_t)pContext;
+ int ret = DP_init(pContext);
+ if (ret < 0) {
+ ALOGW("DPLib_Create() init failed");
+ DPLib_Release(*pHandle);
+ return ret;
+ }
+
+ ALOGV("DPLib_Create context is %p", pContext);
+ return 0;
+}
+
+int DPLib_GetDescriptor(const effect_uuid_t *uuid,
+ effect_descriptor_t *pDescriptor) {
+
+ if (pDescriptor == NULL || uuid == NULL){
+ ALOGE("DPLib_GetDescriptor() called with NULL pointer");
+ return -EINVAL;
+ }
+
+ if (memcmp(uuid, &gDPDescriptor.uuid, sizeof(*uuid)) == 0) {
+ *pDescriptor = gDPDescriptor;
+ return 0;
+ }
+
+ return -EINVAL;
+} /* end DPLib_GetDescriptor */
+
+//
+//--- Effect Control Interface Implementation
+//
+int DP_process(effect_handle_t self, audio_buffer_t *inBuffer,
+ audio_buffer_t *outBuffer) {
+ DynamicsProcessingContext * pContext = (DynamicsProcessingContext *)self;
+
+ if (pContext == NULL) {
+ ALOGE("DP_process() called with NULL context");
+ return -EINVAL;
+ }
+
+ if (inBuffer == NULL || inBuffer->raw == NULL ||
+ outBuffer == NULL || outBuffer->raw == NULL ||
+ inBuffer->frameCount != outBuffer->frameCount ||
+ inBuffer->frameCount == 0) {
+ ALOGE("inBuffer or outBuffer are NULL or have problems with frame count");
+ return -EINVAL;
+ }
+ if (pContext->mState != DYNAMICS_PROCESSING_STATE_ACTIVE) {
+ ALOGE("mState is not DYNAMICS_PROCESSING_STATE_ACTIVE. Current mState %d",
+ pContext->mState);
+ return -ENODATA;
+ }
+ //if dynamics exist...
+ if (pContext->mPDynamics != NULL) {
+ int32_t channelCount = (int32_t)audio_channel_count_from_out_mask(
+ pContext->mConfig.inputCfg.channels);
+ pContext->mPDynamics->processSamples(inBuffer->f32, inBuffer->f32,
+ inBuffer->frameCount * channelCount);
+
+ if (inBuffer->raw != outBuffer->raw) {
+ if (pContext->mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+ for (size_t i = 0; i < outBuffer->frameCount * channelCount; i++) {
+ outBuffer->f32[i] += inBuffer->f32[i];
+ }
+ } else {
+ memcpy(outBuffer->raw, inBuffer->raw,
+ outBuffer->frameCount * channelCount * sizeof(float));
+ }
+ }
+ } else {
+ //do nothing. no effect created yet. warning.
+ ALOGW("Warning: no DynamicsProcessing engine available");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+//helper function
+bool DP_checkSizesInt(uint32_t paramSize, uint32_t valueSize, uint32_t expectedParams,
+ uint32_t expectedValues) {
+ if (paramSize < expectedParams * sizeof(int32_t)) {
+ ALOGE("Invalid paramSize: %u expected %u", paramSize,
+ (uint32_t)(expectedParams * sizeof(int32_t)));
+ return false;
+ }
+ if (valueSize < expectedValues * sizeof(int32_t)) {
+ ALOGE("Invalid valueSize %u expected %u", valueSize,
+ (uint32_t)(expectedValues * sizeof(int32_t)));
+ return false;
+ }
+ return true;
+}
+
+static dp_fx::DPChannel* DP_getChannel(DynamicsProcessingContext *pContext,
+ int32_t channel) {
+ if (pContext->mPDynamics == NULL) {
+ return NULL;
+ }
+ dp_fx::DPChannel *pChannel = pContext->mPDynamics->getChannel(channel);
+ ALOGE_IF(pChannel == NULL, "DPChannel NULL. invalid channel %d", channel);
+ return pChannel;
+}
+
+static dp_fx::DPEq* DP_getEq(DynamicsProcessingContext *pContext, int32_t channel,
+ int32_t eqType) {
+ dp_fx::DPChannel *pChannel = DP_getChannel(pContext, channel);
+ if (pChannel == NULL) {
+ return NULL;
+ }
+ dp_fx::DPEq *pEq = (eqType == DP_PARAM_PRE_EQ ? pChannel->getPreEq() :
+ (eqType == DP_PARAM_POST_EQ ? pChannel->getPostEq() : NULL));
+ ALOGE_IF(pEq == NULL,"DPEq NULL invalid eq");
+ return pEq;
+}
+
+static dp_fx::DPEqBand* DP_getEqBand(DynamicsProcessingContext *pContext, int32_t channel,
+ int32_t eqType, int32_t band) {
+ dp_fx::DPEq *pEq = DP_getEq(pContext, channel, eqType);
+ if (pEq == NULL) {
+ return NULL;
+ }
+ dp_fx::DPEqBand *pEqBand = pEq->getBand(band);
+ ALOGE_IF(pEqBand == NULL, "DPEqBand NULL. invalid band %d", band);
+ return pEqBand;
+}
+
+static dp_fx::DPMbc* DP_getMbc(DynamicsProcessingContext *pContext, int32_t channel) {
+ dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+ if (pChannel == NULL) {
+ return NULL;
+ }
+ dp_fx::DPMbc *pMbc = pChannel->getMbc();
+ ALOGE_IF(pMbc == NULL, "DPMbc NULL invalid MBC");
+ return pMbc;
+}
+
+static dp_fx::DPMbcBand* DP_getMbcBand(DynamicsProcessingContext *pContext, int32_t channel,
+ int32_t band) {
+ dp_fx::DPMbc *pMbc = DP_getMbc(pContext, channel);
+ if (pMbc == NULL) {
+ return NULL;
+ }
+ dp_fx::DPMbcBand *pMbcBand = pMbc->getBand(band);
+ ALOGE_IF(pMbcBand == NULL, "pMbcBand NULL. invalid band %d", band);
+ return pMbcBand;
+}
+
+int DP_command(effect_handle_t self, uint32_t cmdCode, uint32_t cmdSize,
+ void *pCmdData, uint32_t *replySize, void *pReplyData) {
+
+ DynamicsProcessingContext * pContext = (DynamicsProcessingContext *)self;
+
+ if (pContext == NULL || pContext->mState == DYNAMICS_PROCESSING_STATE_UNINITIALIZED) {
+ ALOGE("DP_command() called with NULL context or uninitialized state.");
+ return -EINVAL;
+ }
+
+ ALOGV("DP_command command %d cmdSize %d",cmdCode, cmdSize);
+ switch (cmdCode) {
+ case EFFECT_CMD_INIT:
+ if (pReplyData == NULL || *replySize != sizeof(int)) {
+ ALOGE("EFFECT_CMD_INIT wrong replyData or repySize");
+ return -EINVAL;
+ }
+ *(int *) pReplyData = DP_init(pContext);
+ break;
+ case EFFECT_CMD_SET_CONFIG:
+ if (pCmdData == NULL || cmdSize != sizeof(effect_config_t)
+ || pReplyData == NULL || replySize == NULL || *replySize != sizeof(int)) {
+ ALOGE("EFFECT_CMD_SET_CONFIG error with pCmdData, cmdSize, pReplyData or replySize");
+ return -EINVAL;
+ }
+ *(int *) pReplyData = DP_setConfig(pContext,
+ (effect_config_t *) pCmdData);
+ break;
+ case EFFECT_CMD_GET_CONFIG:
+ if (pReplyData == NULL ||
+ *replySize != sizeof(effect_config_t)) {
+ ALOGE("EFFECT_CMD_GET_CONFIG wrong replyData or repySize");
+ return -EINVAL;
+ }
+ DP_getConfig(pContext, (effect_config_t *)pReplyData);
+ break;
+ case EFFECT_CMD_RESET:
+ DP_reset(pContext);
+ break;
+ case EFFECT_CMD_ENABLE:
+ if (pReplyData == NULL || replySize == NULL || *replySize != sizeof(int)) {
+ ALOGE("EFFECT_CMD_ENABLE wrong replyData or repySize");
+ return -EINVAL;
+ }
+ if (pContext->mState != DYNAMICS_PROCESSING_STATE_INITIALIZED) {
+ ALOGE("EFFECT_CMD_ENABLE state not initialized");
+ *(int *)pReplyData = -ENOSYS;
+ } else {
+ pContext->mState = DYNAMICS_PROCESSING_STATE_ACTIVE;
+ ALOGV("EFFECT_CMD_ENABLE() OK");
+ *(int *)pReplyData = 0;
+ }
+ break;
+ case EFFECT_CMD_DISABLE:
+ if (pReplyData == NULL || replySize == NULL || *replySize != sizeof(int)) {
+ ALOGE("EFFECT_CMD_DISABLE wrong replyData or repySize");
+ return -EINVAL;
+ }
+ if (pContext->mState != DYNAMICS_PROCESSING_STATE_ACTIVE) {
+ ALOGE("EFFECT_CMD_DISABLE state not active");
+ *(int *)pReplyData = -ENOSYS;
+ } else {
+ pContext->mState = DYNAMICS_PROCESSING_STATE_INITIALIZED;
+ ALOGV("EFFECT_CMD_DISABLE() OK");
+ *(int *)pReplyData = 0;
+ }
+ break;
+ case EFFECT_CMD_GET_PARAM: {
+ if (pCmdData == NULL || pReplyData == NULL || replySize == NULL) {
+ ALOGE("null pCmdData or pReplyData or replySize");
+ return -EINVAL;
+ }
+ effect_param_t *pEffectParam = (effect_param_t *) pCmdData;
+ uint32_t expectedCmdSize = DP_getParameterCmdSize(pEffectParam->psize,
+ pEffectParam->data);
+ if (cmdSize != expectedCmdSize || *replySize < expectedCmdSize) {
+ ALOGE("error cmdSize: %d, expetedCmdSize: %d, replySize: %d",
+ cmdSize, expectedCmdSize, *replySize);
+ return -EINVAL;
+ }
+
+ ALOGVV("DP_command expectedCmdSize: %d", expectedCmdSize);
+ memcpy(pReplyData, pCmdData, expectedCmdSize);
+ effect_param_t *p = (effect_param_t *)pReplyData;
+
+ uint32_t voffset = computeParamVOffset(p);
+
+ p->status = DP_getParameter(pContext,
+ p->psize,
+ p->data,
+ &p->vsize,
+ p->data + voffset);
+ *replySize = sizeof(effect_param_t) + voffset + p->vsize;
+
+ ALOGVV("DP_command replysize %u, status %d" , *replySize, p->status);
+ break;
+ }
+ case EFFECT_CMD_SET_PARAM: {
+ if (pCmdData == NULL ||
+ cmdSize < (sizeof(effect_param_t) + sizeof(int32_t) + sizeof(int32_t)) ||
+ pReplyData == NULL || replySize == NULL || *replySize != sizeof(int32_t)) {
+ ALOGE("\tLVM_ERROR : DynamicsProcessing cmdCode Case: "
+ "EFFECT_CMD_SET_PARAM: ERROR");
+ return -EINVAL;
+ }
+
+ effect_param_t * const p = (effect_param_t *) pCmdData;
+ const uint32_t voffset = computeParamVOffset(p);
+
+ *(int *)pReplyData = DP_setParameter(pContext,
+ p->psize,
+ (void *)p->data,
+ p->vsize,
+ p->data + voffset);
+ break;
+ }
+ case EFFECT_CMD_SET_VOLUME: {
+ ALOGV("EFFECT_CMD_SET_VOLUME");
+ // if pReplyData is NULL, VOL_CTRL is delegated to another effect
+ if (pReplyData == NULL || replySize == NULL || *replySize < ((int)sizeof(int32_t) * 2)) {
+ ALOGV("no VOLUME data to return");
+ break;
+ }
+ if (pCmdData == NULL || cmdSize < ((int)sizeof(uint32_t) * 2)) {
+ ALOGE("\tLVM_ERROR : DynamicsProcessing EFFECT_CMD_SET_VOLUME ERROR");
+ return -EINVAL;
+ }
+
+ const int32_t unityGain = 1 << 24;
+ //channel count
+ int32_t channelCount = (int32_t)audio_channel_count_from_out_mask(
+ pContext->mConfig.inputCfg.channels);
+ for (int32_t ch = 0; ch < channelCount; ch++) {
+
+ dp_fx::DPChannel * pChannel = DP_getChannel(pContext, ch);
+ if (pChannel == NULL) {
+ ALOGE("%s EFFECT_CMD_SET_VOLUME invalid channel %d", __func__, ch);
+ return -EINVAL;
+ break;
+ }
+
+ int32_t offset = ch;
+ if (ch > 1) {
+ // FIXME: limited to 2 unique channels. If more channels present, use value for
+ // first channel
+ offset = 0;
+ }
+ const float gain = (float)*((uint32_t *)pCmdData + offset) / unityGain;
+ const float gainDb = linearToDb(gain);
+ ALOGVV("%s EFFECT_CMD_SET_VOLUME channel %d, engine outputlevel %f (%0.2f dB)",
+ __func__, ch, gain, gainDb);
+ pChannel->setOutputGain(gainDb);
+ }
+
+ const int32_t volRet[2] = {unityGain, unityGain}; // Apply no volume before effect.
+ memcpy(pReplyData, volRet, sizeof(volRet));
+ break;
+ }
+ case EFFECT_CMD_SET_DEVICE:
+ case EFFECT_CMD_SET_AUDIO_MODE:
+ break;
+
+ default:
+ ALOGW("DP_command invalid command %d",cmdCode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+//register expected cmd size
+int DP_getParameterCmdSize(uint32_t paramSize,
+ void *pParam) {
+ if (paramSize < sizeof(int32_t)) {
+ return 0;
+ }
+ int32_t param = *(int32_t*)pParam;
+ switch(param) {
+ case DP_PARAM_GET_CHANNEL_COUNT: //paramcmd
+ case DP_PARAM_ENGINE_ARCHITECTURE:
+ //effect + param
+ return (int)(sizeof(effect_param_t) + sizeof(uint32_t));
+ case DP_PARAM_INPUT_GAIN: //paramcmd + param
+ case DP_PARAM_LIMITER:
+ case DP_PARAM_PRE_EQ:
+ case DP_PARAM_POST_EQ:
+ case DP_PARAM_MBC:
+ //effect + param
+ return (int)(sizeof(effect_param_t) + 2 * sizeof(uint32_t));
+ case DP_PARAM_PRE_EQ_BAND:
+ case DP_PARAM_POST_EQ_BAND:
+ case DP_PARAM_MBC_BAND:
+ return (int)(sizeof(effect_param_t) + 3 * sizeof(uint32_t));
+ }
+ return 0;
+}
+
+int DP_getParameter(DynamicsProcessingContext *pContext,
+ uint32_t paramSize,
+ void *pParam,
+ uint32_t *pValueSize,
+ void *pValue) {
+ int status = 0;
+ int32_t *params = (int32_t *)pParam;
+ static_assert(sizeof(float) == sizeof(int32_t) && sizeof(float) == sizeof(value_t) &&
+ alignof(float) == alignof(int32_t) && alignof(float) == alignof(value_t),
+ "Size/alignment mismatch for float/int32_t/value_t");
+ value_t *values = reinterpret_cast<value_t*>(pValue);
+
+ ALOGVV("%s start", __func__);
+#ifdef VERY_VERY_VERBOSE_LOGGING
+ for (size_t i = 0; i < paramSize/sizeof(int32_t); i++) {
+ ALOGVV("Param[%zu] %d", i, params[i]);
+ }
+#endif
+ if (paramSize < sizeof(int32_t)) {
+ ALOGE("%s invalid paramSize: %u", __func__, paramSize);
+ return -EINVAL;
+ }
+ const int32_t command = params[0];
+ switch (command) {
+ case DP_PARAM_GET_CHANNEL_COUNT: {
+ if (!DP_checkSizesInt(paramSize,*pValueSize, 1 /*params*/, 1 /*values*/)) {
+ ALOGE("%s DP_PARAM_GET_CHANNEL_COUNT (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+ *pValueSize = sizeof(uint32_t);
+ *(uint32_t *)pValue = (uint32_t)audio_channel_count_from_out_mask(
+ pContext->mConfig.inputCfg.channels);
+ ALOGVV("%s DP_PARAM_GET_CHANNEL_COUNT channels %d", __func__, *(int32_t *)pValue);
+ break;
+ }
+ case DP_PARAM_ENGINE_ARCHITECTURE: {
+ ALOGVV("engine architecture paramsize: %d valuesize %d",paramSize, *pValueSize);
+ if (!DP_checkSizesInt(paramSize, *pValueSize, 1 /*params*/, 9 /*values*/)) {
+ ALOGE("%s DP_PARAM_ENGINE_ARCHITECTURE (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = { PARAM_ENGINE_ARCHITECTURE };
+// Number[] values = { 0 /*0 variant */,
+// 0.0f /* 1 preferredFrameDuration */,
+// 0 /*2 preEqInUse */,
+// 0 /*3 preEqBandCount */,
+// 0 /*4 mbcInUse */,
+// 0 /*5 mbcBandCount*/,
+// 0 /*6 postEqInUse */,
+// 0 /*7 postEqBandCount */,
+// 0 /*8 limiterInUse */};
+ if (pContext->mPDynamics == NULL) {
+ ALOGE("%s DP_PARAM_ENGINE_ARCHITECTURE error mPDynamics is NULL", __func__);
+ status = -EINVAL;
+ break;
+ }
+ values[0].i = pContext->mCurrentVariant;
+ values[1].f = pContext->mPreferredFrameDuration;
+ values[2].i = pContext->mPDynamics->isPreEQInUse();
+ values[3].i = pContext->mPDynamics->getPreEqBandCount();
+ values[4].i = pContext->mPDynamics->isMbcInUse();
+ values[5].i = pContext->mPDynamics->getMbcBandCount();
+ values[6].i = pContext->mPDynamics->isPostEqInUse();
+ values[7].i = pContext->mPDynamics->getPostEqBandCount();
+ values[8].i = pContext->mPDynamics->isLimiterInUse();
+
+ *pValueSize = sizeof(value_t) * 9;
+
+ ALOGVV(" variant %d, preferredFrameDuration: %f, preEqInuse %d, bands %d, mbcinuse %d,"
+ "mbcbands %d, posteqInUse %d, bands %d, limiterinuse %d",
+ values[0].i, values[1].f, values[2].i, values[3].i, values[4].i, values[5].i,
+ values[6].i, values[7].i, values[8].i);
+ break;
+ }
+ case DP_PARAM_INPUT_GAIN: {
+ ALOGVV("engine get PARAM_INPUT_GAIN paramsize: %d valuesize %d",paramSize, *pValueSize);
+ if (!DP_checkSizesInt(paramSize, *pValueSize, 2 /*params*/, 1 /*values*/)) {
+ ALOGE("%s get PARAM_INPUT_GAIN invalid sizes.", __func__);
+ status = -EINVAL;
+ break;
+ }
+
+ const int32_t channel = params[1];
+ dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+ if (pChannel == NULL) {
+ ALOGE("%s get PARAM_INPUT_GAIN invalid channel %d", __func__, channel);
+ status = -EINVAL;
+ break;
+ }
+ values[0].f = pChannel->getInputGain();
+ *pValueSize = sizeof(value_t) * 1;
+
+ ALOGVV(" channel: %d, input gain %f\n", channel, values[0].f);
+ break;
+ }
+ case DP_PARAM_PRE_EQ:
+ case DP_PARAM_POST_EQ: {
+ ALOGVV("engine get PARAM_*_EQ paramsize: %d valuesize %d",paramSize, *pValueSize);
+ if (!DP_checkSizesInt(paramSize, *pValueSize, 2 /*params*/, 3 /*values*/)) {
+ ALOGE("%s get PARAM_*_EQ (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = {paramSet == PARAM_PRE_EQ ? PARAM_PRE_EQ : PARAM_POST_EQ,
+// channelIndex};
+// Number[] values = {0 /*0 in use */,
+// 0 /*1 enabled*/,
+// 0 /*2 band count */};
+ const int32_t channel = params[1];
+
+ dp_fx::DPEq *pEq = DP_getEq(pContext, channel, command);
+ if (pEq == NULL) {
+ ALOGE("%s get PARAM_*_EQ invalid eq", __func__);
+ status = -EINVAL;
+ break;
+ }
+ values[0].i = pEq->isInUse();
+ values[1].i = pEq->isEnabled();
+ values[2].i = pEq->getBandCount();
+ *pValueSize = sizeof(value_t) * 3;
+
+ ALOGVV(" %s channel: %d, inUse::%d, enabled:%d, bandCount:%d\n",
+ (command == DP_PARAM_PRE_EQ ? "preEq" : "postEq"), channel,
+ values[0].i, values[1].i, values[2].i);
+ break;
+ }
+ case DP_PARAM_PRE_EQ_BAND:
+ case DP_PARAM_POST_EQ_BAND: {
+ ALOGVV("engine get PARAM_*_EQ_BAND paramsize: %d valuesize %d",paramSize, *pValueSize);
+ if (!DP_checkSizesInt(paramSize, *pValueSize, 3 /*params*/, 3 /*values*/)) {
+ ALOGE("%s get PARAM_*_EQ_BAND (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = {paramSet,
+// channelIndex,
+// bandIndex};
+// Number[] values = {(eqBand.isEnabled() ? 1 : 0),
+// eqBand.getCutoffFrequency(),
+// eqBand.getGain()};
+ const int32_t channel = params[1];
+ const int32_t band = params[2];
+ int eqCommand = (command == DP_PARAM_PRE_EQ_BAND ? DP_PARAM_PRE_EQ :
+ (command == DP_PARAM_POST_EQ_BAND ? DP_PARAM_POST_EQ : -1));
+
+ dp_fx::DPEqBand *pEqBand = DP_getEqBand(pContext, channel, eqCommand, band);
+ if (pEqBand == NULL) {
+ ALOGE("%s get PARAM_*_EQ_BAND invalid channel %d or band %d", __func__, channel, band);
+ status = -EINVAL;
+ break;
+ }
+
+ values[0].i = pEqBand->isEnabled();
+ values[1].f = pEqBand->getCutoffFrequency();
+ values[2].f = pEqBand->getGain();
+ *pValueSize = sizeof(value_t) * 3;
+
+ ALOGVV("%s channel: %d, band::%d, enabled:%d, cutoffFrequency:%f, gain%f\n",
+ (command == DP_PARAM_PRE_EQ_BAND ? "preEqBand" : "postEqBand"), channel, band,
+ values[0].i, values[1].f, values[2].f);
+ break;
+ }
+ case DP_PARAM_MBC: {
+ ALOGVV("engine get PDP_PARAM_MBC paramsize: %d valuesize %d",paramSize, *pValueSize);
+ if (!DP_checkSizesInt(paramSize, *pValueSize, 2 /*params*/, 3 /*values*/)) {
+ ALOGE("%s get PDP_PARAM_MBC (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+
+// Number[] params = {PARAM_MBC,
+// channelIndex};
+// Number[] values = {0 /*0 in use */,
+// 0 /*1 enabled*/,
+// 0 /*2 band count */};
+
+ const int32_t channel = params[1];
+
+ dp_fx::DPMbc *pMbc = DP_getMbc(pContext, channel);
+ if (pMbc == NULL) {
+ ALOGE("%s get PDP_PARAM_MBC invalid MBC", __func__);
+ status = -EINVAL;
+ break;
+ }
+
+ values[0].i = pMbc->isInUse();
+ values[1].i = pMbc->isEnabled();
+ values[2].i = pMbc->getBandCount();
+ *pValueSize = sizeof(value_t) * 3;
+
+ ALOGVV("DP_PARAM_MBC channel: %d, inUse::%d, enabled:%d, bandCount:%d\n", channel,
+ values[0].i, values[1].i, values[2].i);
+ break;
+ }
+ case DP_PARAM_MBC_BAND: {
+ ALOGVV("engine get DP_PARAM_MBC_BAND paramsize: %d valuesize %d",paramSize, *pValueSize);
+ if (!DP_checkSizesInt(paramSize, *pValueSize, 3 /*params*/, 11 /*values*/)) {
+ ALOGE("%s get DP_PARAM_MBC_BAND (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = {PARAM_MBC_BAND,
+// channelIndex,
+// bandIndex};
+// Number[] values = {0 /*0 enabled */,
+// 0.0f /*1 cutoffFrequency */,
+// 0.0f /*2 AttackTime */,
+// 0.0f /*3 ReleaseTime */,
+// 0.0f /*4 Ratio */,
+// 0.0f /*5 Threshold */,
+// 0.0f /*6 KneeWidth */,
+// 0.0f /*7 NoiseGateThreshold */,
+// 0.0f /*8 ExpanderRatio */,
+// 0.0f /*9 PreGain */,
+// 0.0f /*10 PostGain*/};
+
+ const int32_t channel = params[1];
+ const int32_t band = params[2];
+
+ dp_fx::DPMbcBand *pMbcBand = DP_getMbcBand(pContext, channel, band);
+ if (pMbcBand == NULL) {
+ ALOGE("%s get PARAM_MBC_BAND invalid channel %d or band %d", __func__, channel, band);
+ status = -EINVAL;
+ break;
+ }
+
+ values[0].i = pMbcBand->isEnabled();
+ values[1].f = pMbcBand->getCutoffFrequency();
+ values[2].f = pMbcBand->getAttackTime();
+ values[3].f = pMbcBand->getReleaseTime();
+ values[4].f = pMbcBand->getRatio();
+ values[5].f = pMbcBand->getThreshold();
+ values[6].f = pMbcBand->getKneeWidth();
+ values[7].f = pMbcBand->getNoiseGateThreshold();
+ values[8].f = pMbcBand->getExpanderRatio();
+ values[9].f = pMbcBand->getPreGain();
+ values[10].f = pMbcBand->getPostGain();
+
+ *pValueSize = sizeof(value_t) * 11;
+ ALOGVV(" mbcBand channel: %d, band::%d, enabled:%d, cutoffFrequency:%f, attackTime:%f,"
+ "releaseTime:%f, ratio:%f, threshold:%f, kneeWidth:%f, noiseGateThreshold:%f,"
+ "expanderRatio:%f, preGain:%f, postGain:%f\n", channel, band, values[0].i,
+ values[1].f, values[2].f, values[3].f, values[4].f, values[5].f, values[6].f,
+ values[7].f, values[8].f, values[9].f, values[10].f);
+ break;
+ }
+ case DP_PARAM_LIMITER: {
+ ALOGVV("engine get DP_PARAM_LIMITER paramsize: %d valuesize %d",paramSize, *pValueSize);
+ if (!DP_checkSizesInt(paramSize, *pValueSize, 2 /*params*/, 8 /*values*/)) {
+ ALOGE("%s DP_PARAM_LIMITER (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+
+ int32_t channel = params[1];
+// Number[] values = {0 /*0 in use (int)*/,
+// 0 /*1 enabled (int)*/,
+// 0 /*2 link group (int)*/,
+// 0.0f /*3 attack time (float)*/,
+// 0.0f /*4 release time (float)*/,
+// 0.0f /*5 ratio (float)*/,
+// 0.0f /*6 threshold (float)*/,
+// 0.0f /*7 post gain(float)*/};
+ dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+ if (pChannel == NULL) {
+ ALOGE("%s DP_PARAM_LIMITER invalid channel %d", __func__, channel);
+ status = -EINVAL;
+ break;
+ }
+ dp_fx::DPLimiter *pLimiter = pChannel->getLimiter();
+ if (pLimiter == NULL) {
+ ALOGE("%s DP_PARAM_LIMITER null LIMITER", __func__);
+ status = -EINVAL;
+ break;
+ }
+ values[0].i = pLimiter->isInUse();
+ values[1].i = pLimiter->isEnabled();
+ values[2].i = pLimiter->getLinkGroup();
+ values[3].f = pLimiter->getAttackTime();
+ values[4].f = pLimiter->getReleaseTime();
+ values[5].f = pLimiter->getRatio();
+ values[6].f = pLimiter->getThreshold();
+ values[7].f = pLimiter->getPostGain();
+
+ *pValueSize = sizeof(value_t) * 8;
+
+ ALOGVV(" Limiter channel: %d, inUse::%d, enabled:%d, linkgroup:%d attackTime:%f,"
+ "releaseTime:%f, ratio:%f, threshold:%f, postGain:%f\n",
+ channel, values[0].i/*inUse*/, values[1].i/*enabled*/, values[2].i/*linkGroup*/,
+ values[3].f/*attackTime*/, values[4].f/*releaseTime*/,
+ values[5].f/*ratio*/, values[6].f/*threshold*/,
+ values[7].f/*postGain*/);
+ break;
+ }
+ default:
+ ALOGE("%s invalid param %d", __func__, params[0]);
+ status = -EINVAL;
+ break;
+ }
+
+ ALOGVV("%s end param: %d, status: %d", __func__, params[0], status);
+ return status;
+} /* end DP_getParameter */
+
+int DP_setParameter(DynamicsProcessingContext *pContext,
+ uint32_t paramSize,
+ void *pParam,
+ uint32_t valueSize,
+ void *pValue) {
+ int status = 0;
+ int32_t *params = (int32_t *)pParam;
+ static_assert(sizeof(float) == sizeof(int32_t) && sizeof(float) == sizeof(value_t) &&
+ alignof(float) == alignof(int32_t) && alignof(float) == alignof(value_t),
+ "Size/alignment mismatch for float/int32_t/value_t");
+ value_t *values = reinterpret_cast<value_t*>(pValue);
+
+ ALOGVV("%s start", __func__);
+ if (paramSize < sizeof(int32_t)) {
+ ALOGE("%s invalid paramSize: %u", __func__, paramSize);
+ return -EINVAL;
+ }
+ const int32_t command = params[0];
+ switch (command) {
+ case DP_PARAM_ENGINE_ARCHITECTURE: {
+ ALOGVV("engine architecture paramsize: %d valuesize %d",paramSize, valueSize);
+ if (!DP_checkSizesInt(paramSize, valueSize, 1 /*params*/, 9 /*values*/)) {
+ ALOGE("%s DP_PARAM_ENGINE_ARCHITECTURE (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = { PARAM_ENGINE_ARCHITECTURE };
+// Number[] values = { variant /* variant */,
+// preferredFrameDuration,
+// (preEqInUse ? 1 : 0),
+// preEqBandCount,
+// (mbcInUse ? 1 : 0),
+// mbcBandCount,
+// (postEqInUse ? 1 : 0),
+// postEqBandCount,
+// (limiterInUse ? 1 : 0)};
+ const int32_t variant = values[0].i;
+ const float preferredFrameDuration = values[1].f;
+ const int32_t preEqInUse = values[2].i;
+ const int32_t preEqBandCount = values[3].i;
+ const int32_t mbcInUse = values[4].i;
+ const int32_t mbcBandCount = values[5].i;
+ const int32_t postEqInUse = values[6].i;
+ const int32_t postEqBandCount = values[7].i;
+ const int32_t limiterInUse = values[8].i;
+ ALOGVV("variant %d, preEqInuse %d, bands %d, mbcinuse %d, mbcbands %d, posteqInUse %d,"
+ "bands %d, limiterinuse %d", variant, preEqInUse, preEqBandCount, mbcInUse,
+ mbcBandCount, postEqInUse, postEqBandCount, limiterInUse);
+
+ //set variant (instantiate effect)
+ //initArchitecture for effect
+ DP_changeVariant(pContext, variant);
+ if (pContext->mPDynamics == NULL) {
+ ALOGE("%s DP_PARAM_ENGINE_ARCHITECTURE error setting variant %d", __func__, variant);
+ status = -EINVAL;
+ break;
+ }
+ pContext->mPreferredFrameDuration = preferredFrameDuration;
+ pContext->mPDynamics->init((uint32_t)audio_channel_count_from_out_mask(
+ pContext->mConfig.inputCfg.channels),
+ preEqInUse != 0, (uint32_t)preEqBandCount,
+ mbcInUse != 0, (uint32_t)mbcBandCount,
+ postEqInUse != 0, (uint32_t)postEqBandCount,
+ limiterInUse != 0);
+
+ DP_configureVariant(pContext, variant);
+ break;
+ }
+ case DP_PARAM_INPUT_GAIN: {
+ ALOGVV("engine DP_PARAM_INPUT_GAIN paramsize: %d valuesize %d",paramSize, valueSize);
+ if (!DP_checkSizesInt(paramSize, valueSize, 2 /*params*/, 1 /*values*/)) {
+ ALOGE("%s DP_PARAM_INPUT_GAIN invalid sizes.", __func__);
+ status = -EINVAL;
+ break;
+ }
+
+ const int32_t channel = params[1];
+ dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+ if (pChannel == NULL) {
+ ALOGE("%s DP_PARAM_INPUT_GAIN invalid channel %d", __func__, channel);
+ status = -EINVAL;
+ break;
+ }
+ const float gain = values[0].f;
+ ALOGVV("%s DP_PARAM_INPUT_GAIN channel %d, level %f", __func__, channel, gain);
+ pChannel->setInputGain(gain);
+ break;
+ }
+ case DP_PARAM_PRE_EQ:
+ case DP_PARAM_POST_EQ: {
+ ALOGVV("engine DP_PARAM_*_EQ paramsize: %d valuesize %d",paramSize, valueSize);
+ if (!DP_checkSizesInt(paramSize, valueSize, 2 /*params*/, 3 /*values*/)) {
+ ALOGE("%s DP_PARAM_*_EQ (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = {paramSet,
+// channelIndex};
+// Number[] values = { (eq.isInUse() ? 1 : 0),
+// (eq.isEnabled() ? 1 : 0),
+// bandCount};
+ const int32_t channel = params[1];
+
+ const int32_t enabled = values[1].i;
+ const int32_t bandCount = values[2].i;
+ ALOGVV(" %s channel: %d, inUse::%d, enabled:%d, bandCount:%d\n",
+ (command == DP_PARAM_PRE_EQ ? "preEq" : "postEq"), channel, values[0].i,
+ values[2].i, bandCount);
+
+ dp_fx::DPEq *pEq = DP_getEq(pContext, channel, command);
+ if (pEq == NULL) {
+ ALOGE("%s set PARAM_*_EQ invalid channel %d or command %d", __func__, channel,
+ command);
+ status = -EINVAL;
+ break;
+ }
+
+ pEq->setEnabled(enabled != 0);
+ //fail if bandcountis different? maybe.
+ if ((int32_t)pEq->getBandCount() != bandCount) {
+ ALOGW("%s warning, trying to set different bandcount from %d to %d", __func__,
+ pEq->getBandCount(), bandCount);
+ }
+ break;
+ }
+ case DP_PARAM_PRE_EQ_BAND:
+ case DP_PARAM_POST_EQ_BAND: {
+ ALOGVV("engine set PARAM_*_EQ_BAND paramsize: %d valuesize %d",paramSize, valueSize);
+ if (!DP_checkSizesInt(paramSize, valueSize, 3 /*params*/, 3 /*values*/)) {
+ ALOGE("%s PARAM_*_EQ_BAND (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] values = { channelIndex,
+// bandIndex,
+// (eqBand.isEnabled() ? 1 : 0),
+// eqBand.getCutoffFrequency(),
+// eqBand.getGain()};
+
+// Number[] params = {paramSet,
+// channelIndex,
+// bandIndex};
+// Number[] values = {(eqBand.isEnabled() ? 1 : 0),
+// eqBand.getCutoffFrequency(),
+// eqBand.getGain()};
+
+ const int32_t channel = params[1];
+ const int32_t band = params[2];
+
+ const int32_t enabled = values[0].i;
+ const float cutoffFrequency = values[1].f;
+ const float gain = values[2].f;
+
+
+ ALOGVV(" %s channel: %d, band::%d, enabled:%d, cutoffFrequency:%f, gain%f\n",
+ (command == DP_PARAM_PRE_EQ_BAND ? "preEqBand" : "postEqBand"), channel, band,
+ enabled, cutoffFrequency, gain);
+
+ int eqCommand = (command == DP_PARAM_PRE_EQ_BAND ? DP_PARAM_PRE_EQ :
+ (command == DP_PARAM_POST_EQ_BAND ? DP_PARAM_POST_EQ : -1));
+ dp_fx::DPEq *pEq = DP_getEq(pContext, channel, eqCommand);
+ if (pEq == NULL) {
+ ALOGE("%s set PARAM_*_EQ_BAND invalid channel %d or command %d", __func__, channel,
+ command);
+ status = -EINVAL;
+ break;
+ }
+
+ dp_fx::DPEqBand eqBand;
+ eqBand.init(enabled != 0, cutoffFrequency, gain);
+ pEq->setBand(band, eqBand);
+ break;
+ }
+ case DP_PARAM_MBC: {
+ ALOGVV("engine DP_PARAM_MBC paramsize: %d valuesize %d",paramSize, valueSize);
+ if (!DP_checkSizesInt(paramSize, valueSize, 2 /*params*/, 3 /*values*/)) {
+ ALOGE("%s DP_PARAM_MBC (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = { PARAM_MBC,
+// channelIndex};
+// Number[] values = {(mbc.isInUse() ? 1 : 0),
+// (mbc.isEnabled() ? 1 : 0),
+// bandCount};
+ const int32_t channel = params[1];
+
+ const int32_t enabled = values[1].i;
+ const int32_t bandCount = values[2].i;
+ ALOGVV("MBC channel: %d, inUse::%d, enabled:%d, bandCount:%d\n", channel, values[0].i,
+ enabled, bandCount);
+
+ dp_fx::DPMbc *pMbc = DP_getMbc(pContext, channel);
+ if (pMbc == NULL) {
+ ALOGE("%s set DP_PARAM_MBC invalid channel %d ", __func__, channel);
+ status = -EINVAL;
+ break;
+ }
+
+ pMbc->setEnabled(enabled != 0);
+ //fail if bandcountis different? maybe.
+ if ((int32_t)pMbc->getBandCount() != bandCount) {
+ ALOGW("%s warning, trying to set different bandcount from %d to %d", __func__,
+ pMbc->getBandCount(), bandCount);
+ }
+ break;
+ }
+ case DP_PARAM_MBC_BAND: {
+ ALOGVV("engine set DP_PARAM_MBC_BAND paramsize: %d valuesize %d ",paramSize, valueSize);
+ if (!DP_checkSizesInt(paramSize, valueSize, 3 /*params*/, 11 /*values*/)) {
+ ALOGE("%s DP_PARAM_MBC_BAND: (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = { PARAM_MBC_BAND,
+// channelIndex,
+// bandIndex};
+// Number[] values = {(mbcBand.isEnabled() ? 1 : 0),
+// mbcBand.getCutoffFrequency(),
+// mbcBand.getAttackTime(),
+// mbcBand.getReleaseTime(),
+// mbcBand.getRatio(),
+// mbcBand.getThreshold(),
+// mbcBand.getKneeWidth(),
+// mbcBand.getNoiseGateThreshold(),
+// mbcBand.getExpanderRatio(),
+// mbcBand.getPreGain(),
+// mbcBand.getPostGain()};
+
+ const int32_t channel = params[1];
+ const int32_t band = params[2];
+
+ const int32_t enabled = values[0].i;
+ const float cutoffFrequency = values[1].f;
+ const float attackTime = values[2].f;
+ const float releaseTime = values[3].f;
+ const float ratio = values[4].f;
+ const float threshold = values[5].f;
+ const float kneeWidth = values[6].f;
+ const float noiseGateThreshold = values[7].f;
+ const float expanderRatio = values[8].f;
+ const float preGain = values[9].f;
+ const float postGain = values[10].f;
+
+ ALOGVV(" mbcBand channel: %d, band::%d, enabled:%d, cutoffFrequency:%f, attackTime:%f,"
+ "releaseTime:%f, ratio:%f, threshold:%f, kneeWidth:%f, noiseGateThreshold:%f,"
+ "expanderRatio:%f, preGain:%f, postGain:%f\n",
+ channel, band, enabled, cutoffFrequency, attackTime, releaseTime, ratio,
+ threshold, kneeWidth, noiseGateThreshold, expanderRatio, preGain, postGain);
+
+ dp_fx::DPMbc *pMbc = DP_getMbc(pContext, channel);
+ if (pMbc == NULL) {
+ ALOGE("%s set DP_PARAM_MBC_BAND invalid channel %d", __func__, channel);
+ status = -EINVAL;
+ break;
+ }
+
+ dp_fx::DPMbcBand mbcBand;
+ mbcBand.init(enabled != 0, cutoffFrequency, attackTime, releaseTime, ratio, threshold,
+ kneeWidth, noiseGateThreshold, expanderRatio, preGain, postGain);
+ pMbc->setBand(band, mbcBand);
+ break;
+ }
+ case DP_PARAM_LIMITER: {
+ ALOGVV("engine DP_PARAM_LIMITER paramsize: %d valuesize %d",paramSize, valueSize);
+ if (!DP_checkSizesInt(paramSize, valueSize, 2 /*params*/, 8 /*values*/)) {
+ ALOGE("%s DP_PARAM_LIMITER (cmd %d) invalid sizes.", __func__, command);
+ status = -EINVAL;
+ break;
+ }
+// Number[] params = { PARAM_LIMITER,
+// channelIndex};
+// Number[] values = {(limiter.isInUse() ? 1 : 0),
+// (limiter.isEnabled() ? 1 : 0),
+// limiter.getLinkGroup(),
+// limiter.getAttackTime(),
+// limiter.getReleaseTime(),
+// limiter.getRatio(),
+// limiter.getThreshold(),
+// limiter.getPostGain()};
+
+ const int32_t channel = params[1];
+
+ const int32_t inUse = values[0].i;
+ const int32_t enabled = values[1].i;
+ const int32_t linkGroup = values[2].i;
+ const float attackTime = values[3].f;
+ const float releaseTime = values[4].f;
+ const float ratio = values[5].f;
+ const float threshold = values[6].f;
+ const float postGain = values[7].f;
+
+ ALOGVV(" Limiter channel: %d, inUse::%d, enabled:%d, linkgroup:%d attackTime:%f,"
+ "releaseTime:%f, ratio:%f, threshold:%f, postGain:%f\n", channel, inUse,
+ enabled, linkGroup, attackTime, releaseTime, ratio, threshold, postGain);
+
+ dp_fx::DPChannel * pChannel = DP_getChannel(pContext, channel);
+ if (pChannel == NULL) {
+ ALOGE("%s DP_PARAM_LIMITER invalid channel %d", __func__, channel);
+ status = -EINVAL;
+ break;
+ }
+ dp_fx::DPLimiter limiter;
+ limiter.init(inUse != 0, enabled != 0, linkGroup, attackTime, releaseTime, ratio,
+ threshold, postGain);
+ pChannel->setLimiter(limiter);
+ break;
+ }
+ default:
+ ALOGE("%s invalid param %d", __func__, params[0]);
+ status = -EINVAL;
+ break;
+ }
+
+ ALOGVV("%s end param: %d, status: %d", __func__, params[0], status);
+ return status;
+} /* end DP_setParameter */
+
+/* Effect Control Interface Implementation: get_descriptor */
+int DP_getDescriptor(effect_handle_t self,
+ effect_descriptor_t *pDescriptor)
+{
+ DynamicsProcessingContext * pContext = (DynamicsProcessingContext *) self;
+
+ if (pContext == NULL || pDescriptor == NULL) {
+ ALOGE("DP_getDescriptor() invalid param");
+ return -EINVAL;
+ }
+
+ *pDescriptor = gDPDescriptor;
+
+ return 0;
+} /* end DP_getDescriptor */
+
+
+// effect_handle_t interface implementation for Dynamics Processing effect
+const struct effect_interface_s gDPInterface = {
+ DP_process,
+ DP_command,
+ DP_getDescriptor,
+ NULL,
+};
+
+extern "C" {
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
+audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
+ .tag = AUDIO_EFFECT_LIBRARY_TAG,
+ .version = EFFECT_LIBRARY_API_VERSION,
+ .name = "Dynamics Processing Library",
+ .implementor = "The Android Open Source Project",
+ .create_effect = DPLib_Create,
+ .release_effect = DPLib_Release,
+ .get_descriptor = DPLib_GetDescriptor,
+};
+
+}; // extern "C"
+
diff --git a/media/libstagefright/matroska/MODULE_LICENSE_APACHE2 b/media/libeffects/dynamicsproc/MODULE_LICENSE_APACHE2
similarity index 100%
rename from media/libstagefright/matroska/MODULE_LICENSE_APACHE2
rename to media/libeffects/dynamicsproc/MODULE_LICENSE_APACHE2
diff --git a/media/libeffects/dynamicsproc/NOTICE b/media/libeffects/dynamicsproc/NOTICE
new file mode 100644
index 0000000..31cc6e9
--- /dev/null
+++ b/media/libeffects/dynamicsproc/NOTICE
@@ -0,0 +1,190 @@
+
+ Copyright (c) 2005-2018, The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
diff --git a/media/libeffects/dynamicsproc/dsp/DPBase.cpp b/media/libeffects/dynamicsproc/dsp/DPBase.cpp
new file mode 100644
index 0000000..ac758e0
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/DPBase.cpp
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DPBase"
+//#define LOG_NDEBUG 0
+
+#include <log/log.h>
+#include "DPBase.h"
+#include "DPFrequency.h"
+
+namespace dp_fx {
+
+DPStage::DPStage() : mInUse(DP_DEFAULT_STAGE_INUSE),
+ mEnabled(DP_DEFAULT_STAGE_ENABLED) {
+}
+
+void DPStage::init(bool inUse, bool enabled) {
+ mInUse = inUse;
+ mEnabled = enabled;
+}
+
+//----
+DPBandStage::DPBandStage() : mBandCount(0) {
+}
+
+void DPBandStage::init(bool inUse, bool enabled, int bandCount) {
+ DPStage::init(inUse, enabled);
+ mBandCount = inUse ? bandCount : 0;
+}
+
+//---
+DPBandBase::DPBandBase() {
+ init(DP_DEFAULT_BAND_ENABLED,
+ DP_DEFAULT_BAND_CUTOFF_FREQUENCY_HZ);
+}
+
+void DPBandBase::init(bool enabled, float cutoffFrequency){
+ mEnabled = enabled;
+ mCutoofFrequencyHz = cutoffFrequency;
+}
+
+//-----
+DPEqBand::DPEqBand() {
+ init(DP_DEFAULT_BAND_ENABLED,
+ DP_DEFAULT_BAND_CUTOFF_FREQUENCY_HZ,
+ DP_DEFAULT_GAIN_DB);
+}
+
+void DPEqBand::init(bool enabled, float cutoffFrequency, float gain) {
+ DPBandBase::init(enabled, cutoffFrequency);
+ setGain(gain);
+}
+
+float DPEqBand::getGain() const{
+ return mGainDb;
+}
+
+void DPEqBand::setGain(float gain) {
+ mGainDb = gain;
+}
+
+//------
+DPMbcBand::DPMbcBand() {
+ init(DP_DEFAULT_BAND_ENABLED,
+ DP_DEFAULT_BAND_CUTOFF_FREQUENCY_HZ,
+ DP_DEFAULT_ATTACK_TIME_MS,
+ DP_DEFAULT_RELEASE_TIME_MS,
+ DP_DEFAULT_RATIO,
+ DP_DEFAULT_THRESHOLD_DB,
+ DP_DEFAULT_KNEE_WIDTH_DB,
+ DP_DEFAULT_NOISE_GATE_THRESHOLD_DB,
+ DP_DEFAULT_EXPANDER_RATIO,
+ DP_DEFAULT_GAIN_DB,
+ DP_DEFAULT_GAIN_DB);
+}
+
+void DPMbcBand::init(bool enabled, float cutoffFrequency, float attackTime, float releaseTime,
+ float ratio, float threshold, float kneeWidth, float noiseGateThreshold,
+ float expanderRatio, float preGain, float postGain) {
+ DPBandBase::init(enabled, cutoffFrequency);
+ setAttackTime(attackTime);
+ setReleaseTime(releaseTime);
+ setRatio(ratio);
+ setThreshold(threshold);
+ setKneeWidth(kneeWidth);
+ setNoiseGateThreshold(noiseGateThreshold);
+ setExpanderRatio(expanderRatio);
+ setPreGain(preGain);
+ setPostGain(postGain);
+}
+
+//------
+DPEq::DPEq() {
+}
+
+void DPEq::init(bool inUse, bool enabled, uint32_t bandCount) {
+ DPBandStage::init(inUse, enabled, bandCount);
+ mBands.resize(getBandCount());
+}
+
+DPEqBand * DPEq::getBand(uint32_t band) {
+ if (band < getBandCount()) {
+ return &mBands[band];
+ }
+ return NULL;
+}
+
+void DPEq::setBand(uint32_t band, DPEqBand &src) {
+ if (band < getBandCount()) {
+ mBands[band] = src;
+ }
+}
+
+//------
+DPMbc::DPMbc() {
+}
+
+void DPMbc::init(bool inUse, bool enabled, uint32_t bandCount) {
+ DPBandStage::init(inUse, enabled, bandCount);
+ if (isInUse()) {
+ mBands.resize(bandCount);
+ } else {
+ mBands.resize(0);
+ }
+}
+
+DPMbcBand * DPMbc::getBand(uint32_t band) {
+ if (band < getBandCount()) {
+ return &mBands[band];
+ }
+ return NULL;
+}
+
+void DPMbc::setBand(uint32_t band, DPMbcBand &src) {
+ if (band < getBandCount()) {
+ mBands[band] = src;
+ }
+}
+
+//------
+DPLimiter::DPLimiter() {
+ init(DP_DEFAULT_STAGE_INUSE,
+ DP_DEFAULT_STAGE_ENABLED,
+ DP_DEFAULT_LINK_GROUP,
+ DP_DEFAULT_ATTACK_TIME_MS,
+ DP_DEFAULT_RELEASE_TIME_MS,
+ DP_DEFAULT_RATIO,
+ DP_DEFAULT_THRESHOLD_DB,
+ DP_DEFAULT_GAIN_DB);
+}
+
+void DPLimiter::init(bool inUse, bool enabled, uint32_t linkGroup, float attackTime, float releaseTime,
+ float ratio, float threshold, float postGain) {
+ DPStage::init(inUse, enabled);
+ setLinkGroup(linkGroup);
+ setAttackTime(attackTime);
+ setReleaseTime(releaseTime);
+ setRatio(ratio);
+ setThreshold(threshold);
+ setPostGain(postGain);
+}
+
+//----
+DPChannel::DPChannel() : mInitialized(false), mInputGainDb(0), mOutputGainDb(0),
+ mPreEqInUse(false), mMbcInUse(false), mPostEqInUse(false), mLimiterInUse(false) {
+}
+
+void DPChannel::init(float inputGain, bool preEqInUse, uint32_t preEqBandCount,
+ bool mbcInUse, uint32_t mbcBandCount, bool postEqInUse, uint32_t postEqBandCount,
+ bool limiterInUse) {
+ setInputGain(inputGain);
+ mPreEqInUse = preEqInUse;
+ mMbcInUse = mbcInUse;
+ mPostEqInUse = postEqInUse;
+ mLimiterInUse = limiterInUse;
+
+ mPreEq.init(mPreEqInUse, false, preEqBandCount);
+ mMbc.init(mMbcInUse, false, mbcBandCount);
+ mPostEq.init(mPostEqInUse, false, postEqBandCount);
+ mLimiter.init(mLimiterInUse, false, 0, 50, 120, 2, -30, 0);
+ mInitialized = true;
+}
+
+DPEq* DPChannel::getPreEq() {
+ if (!mInitialized) {
+ return NULL;
+ }
+ return &mPreEq;
+}
+
+DPMbc* DPChannel::getMbc() {
+ if (!mInitialized) {
+ return NULL;
+ }
+ return &mMbc;
+}
+
+DPEq* DPChannel::getPostEq() {
+ if (!mInitialized) {
+ return NULL;
+ }
+ return &mPostEq;
+}
+
+DPLimiter* DPChannel::getLimiter() {
+ if (!mInitialized) {
+ return NULL;
+ }
+ return &mLimiter;
+}
+
+void DPChannel::setLimiter(DPLimiter &limiter) {
+ if (!mInitialized) {
+ return;
+ }
+ mLimiter = limiter;
+}
+
+//----
+DPBase::DPBase() : mInitialized(false), mChannelCount(0), mPreEqInUse(false), mPreEqBandCount(0),
+ mMbcInUse(false), mMbcBandCount(0), mPostEqInUse(false), mPostEqBandCount(0),
+ mLimiterInUse(false) {
+}
+
+void DPBase::init(uint32_t channelCount, bool preEqInUse, uint32_t preEqBandCount,
+ bool mbcInUse, uint32_t mbcBandCount, bool postEqInUse, uint32_t postEqBandCount,
+ bool limiterInUse) {
+ ALOGV("DPBase::init");
+ mChannelCount = channelCount;
+ mPreEqInUse = preEqInUse;
+ mPreEqBandCount = preEqBandCount;
+ mMbcInUse = mbcInUse;
+ mMbcBandCount = mbcBandCount;
+ mPostEqInUse = postEqInUse;
+ mPostEqBandCount = postEqBandCount;
+ mLimiterInUse = limiterInUse;
+ mChannel.resize(mChannelCount);
+ for (size_t ch = 0; ch < mChannelCount; ch++) {
+ mChannel[ch].init(0, preEqInUse, preEqBandCount, mbcInUse, mbcBandCount,
+ postEqInUse, postEqBandCount, limiterInUse);
+ }
+ mInitialized = true;
+}
+
+DPChannel* DPBase::getChannel(uint32_t channelIndex) {
+ if (!mInitialized || channelIndex < 0 || channelIndex >= mChannel.size()) {
+ return NULL;
+ }
+ return & mChannel[channelIndex];
+}
+
+} //namespace dp_fx
diff --git a/media/libeffects/dynamicsproc/dsp/DPBase.h b/media/libeffects/dynamicsproc/dsp/DPBase.h
new file mode 100644
index 0000000..e74f91d
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/DPBase.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DPBASE_H_
+#define DPBASE_H_
+
+
+#include <stdint.h>
+#include <cmath>
+#include <vector>
+#include <android/log.h>
+
+namespace dp_fx {
+
+#define DP_DEFAULT_BAND_ENABLED false
+#define DP_DEFAULT_BAND_CUTOFF_FREQUENCY_HZ 1000
+#define DP_DEFAULT_ATTACK_TIME_MS 50
+#define DP_DEFAULT_RELEASE_TIME_MS 120
+#define DP_DEFAULT_RATIO 2
+#define DP_DEFAULT_THRESHOLD_DB -30
+#define DP_DEFAULT_KNEE_WIDTH_DB 0
+#define DP_DEFAULT_NOISE_GATE_THRESHOLD_DB -90
+#define DP_DEFAULT_EXPANDER_RATIO 1
+#define DP_DEFAULT_GAIN_DB 0
+#define DP_DEFAULT_STAGE_INUSE false
+#define DP_DEFAULT_STAGE_ENABLED false
+#define DP_DEFAULT_LINK_GROUP 0
+
+
+
+class DPStage {
+public:
+ DPStage();
+ ~DPStage() = default;
+ void init(bool inUse, bool enabled);
+ bool isInUse() const {
+ return mInUse;
+ }
+ bool isEnabled() const {
+ return mEnabled;
+ }
+ void setEnabled(bool enabled) {
+ mEnabled = enabled;
+ }
+private:
+ bool mInUse;
+ bool mEnabled;
+};
+
+class DPBandStage : public DPStage {
+public:
+ DPBandStage();
+ ~DPBandStage() = default;
+ void init(bool inUse, bool enabled, int bandCount);
+ uint32_t getBandCount() const {
+ return mBandCount;
+ }
+ void setBandCount(uint32_t bandCount) {
+ mBandCount = bandCount;
+ }
+private:
+ uint32_t mBandCount;
+};
+
+class DPBandBase {
+public:
+ DPBandBase();
+ ~DPBandBase() = default;
+ void init(bool enabled, float cutoffFrequency);
+ bool isEnabled() const {
+ return mEnabled;
+ }
+ void setEnabled(bool enabled) {
+ mEnabled = enabled;
+ }
+ float getCutoffFrequency() const {
+ return mCutoofFrequencyHz;
+ }
+ void setCutoffFrequency(float cutoffFrequency) {
+ mCutoofFrequencyHz = cutoffFrequency;
+ }
+private:
+ bool mEnabled;
+ float mCutoofFrequencyHz;
+};
+
+class DPEqBand : public DPBandBase {
+public:
+ DPEqBand();
+ ~DPEqBand() = default;
+ void init(bool enabled, float cutoffFrequency, float gain);
+ float getGain() const;
+ void setGain(float gain);
+private:
+ float mGainDb;
+};
+
+class DPMbcBand : public DPBandBase {
+public:
+ DPMbcBand();
+ ~DPMbcBand() = default;
+ void init(bool enabled, float cutoffFrequency, float attackTime, float releaseTime,
+ float ratio, float threshold, float kneeWidth, float noiseGateThreshold,
+ float expanderRatio, float preGain, float postGain);
+ float getAttackTime() const {
+ return mAttackTimeMs;
+ }
+ void setAttackTime(float attackTime) {
+ mAttackTimeMs = attackTime;
+ }
+ float getReleaseTime() const {
+ return mReleaseTimeMs;
+ }
+ void setReleaseTime(float releaseTime) {
+ mReleaseTimeMs = releaseTime;
+ }
+ float getRatio() const {
+ return mRatio;
+ }
+ void setRatio(float ratio) {
+ mRatio = ratio;
+ }
+ float getThreshold() const {
+ return mThresholdDb;
+ }
+ void setThreshold(float threshold) {
+ mThresholdDb = threshold;
+ }
+ float getKneeWidth() const {
+ return mKneeWidthDb;
+ }
+ void setKneeWidth(float kneeWidth) {
+ mKneeWidthDb = kneeWidth;
+ }
+ float getNoiseGateThreshold() const {
+ return mNoiseGateThresholdDb;
+ }
+ void setNoiseGateThreshold(float noiseGateThreshold) {
+ mNoiseGateThresholdDb = noiseGateThreshold;
+ }
+ float getExpanderRatio() const {
+ return mExpanderRatio;
+ }
+ void setExpanderRatio(float expanderRatio) {
+ mExpanderRatio = expanderRatio;
+ }
+ float getPreGain() const {
+ return mPreGainDb;
+ }
+ void setPreGain(float preGain) {
+ mPreGainDb = preGain;
+ }
+ float getPostGain() const {
+ return mPostGainDb;
+ }
+ void setPostGain(float postGain) {
+ mPostGainDb = postGain;
+ }
+private:
+ float mAttackTimeMs;
+ float mReleaseTimeMs;
+ float mRatio;
+ float mThresholdDb;
+ float mKneeWidthDb;
+ float mNoiseGateThresholdDb;
+ float mExpanderRatio;
+ float mPreGainDb;
+ float mPostGainDb;
+};
+
+class DPEq : public DPBandStage {
+public:
+ DPEq();
+ ~DPEq() = default;
+ void init(bool inUse, bool enabled, uint32_t bandCount);
+ DPEqBand * getBand(uint32_t band);
+ void setBand(uint32_t band, DPEqBand &src);
+private:
+ std::vector<DPEqBand> mBands;
+};
+
+class DPMbc : public DPBandStage {
+public:
+ DPMbc();
+ ~DPMbc() = default;
+ void init(bool inUse, bool enabled, uint32_t bandCount);
+ DPMbcBand * getBand(uint32_t band);
+ void setBand(uint32_t band, DPMbcBand &src);
+private:
+ std::vector<DPMbcBand> mBands;
+};
+
+class DPLimiter : public DPStage {
+public:
+ DPLimiter();
+ ~DPLimiter() = default;
+ void init(bool inUse, bool enabled, uint32_t linkGroup, float attackTime, float releaseTime,
+ float ratio, float threshold, float postGain);
+ uint32_t getLinkGroup() const {
+ return mLinkGroup;
+ }
+ void setLinkGroup(uint32_t linkGroup) {
+ mLinkGroup = linkGroup;
+ }
+ float getAttackTime() const {
+ return mAttackTimeMs;
+ }
+ void setAttackTime(float attackTime) {
+ mAttackTimeMs = attackTime;
+ }
+ float getReleaseTime() const {
+ return mReleaseTimeMs;
+ }
+ void setReleaseTime(float releaseTime) {
+ mReleaseTimeMs = releaseTime;
+ }
+ float getRatio() const {
+ return mRatio;
+ }
+ void setRatio(float ratio) {
+ mRatio = ratio;
+ }
+ float getThreshold() const {
+ return mThresholdDb;
+ }
+ void setThreshold(float threshold) {
+ mThresholdDb = threshold;
+ }
+ float getPostGain() const {
+ return mPostGainDb;
+ }
+ void setPostGain(float postGain) {
+ mPostGainDb = postGain;
+ }
+private:
+ uint32_t mLinkGroup;
+ float mAttackTimeMs;
+ float mReleaseTimeMs;
+ float mRatio;
+ float mThresholdDb;
+ float mPostGainDb;
+};
+
+class DPChannel {
+public:
+ DPChannel();
+ ~DPChannel() = default;
+ void init(float inputGain, bool preEqInUse, uint32_t preEqBandCount,
+ bool mbcInUse, uint32_t mbcBandCount, bool postEqInUse, uint32_t postEqBandCount,
+ bool limiterInUse);
+
+ float getInputGain() const {
+ if (!mInitialized) {
+ return 0;
+ }
+ return mInputGainDb;
+ }
+ void setInputGain(float gain) {
+ mInputGainDb = gain;
+ }
+
+ float getOutputGain() const {
+ if (!mInitialized) {
+ return 0;
+ }
+ return mOutputGainDb;
+ }
+ void setOutputGain(float gain) {
+ mOutputGainDb = gain;
+ }
+
+ DPEq* getPreEq();
+ DPMbc* getMbc();
+ DPEq* getPostEq();
+ DPLimiter *getLimiter();
+ void setLimiter(DPLimiter &limiter);
+
+private:
+ bool mInitialized;
+ float mInputGainDb;
+ float mOutputGainDb;
+
+ DPEq mPreEq;
+ DPMbc mMbc;
+ DPEq mPostEq;
+ DPLimiter mLimiter;
+
+ bool mPreEqInUse;
+ bool mMbcInUse;
+ bool mPostEqInUse;
+ bool mLimiterInUse;
+};
+
+class DPBase {
+public:
+ DPBase();
+ virtual ~DPBase() = default;
+
+ void init(uint32_t channelCount, bool preEqInUse, uint32_t preEqBandCount,
+ bool mbcInUse, uint32_t mbcBandCount, bool postEqInUse, uint32_t postEqBandCount,
+ bool limiterInUse);
+ virtual size_t processSamples(const float *in, float *out, size_t samples) = 0;
+ virtual void reset() = 0;
+
+ DPChannel* getChannel(uint32_t channelIndex);
+ uint32_t getChannelCount() const {
+ return mChannelCount;
+ }
+ uint32_t getPreEqBandCount() const {
+ return mPreEqBandCount;
+ }
+ uint32_t getMbcBandCount() const {
+ return mMbcBandCount;
+ }
+ uint32_t getPostEqBandCount() const {
+ return mPostEqBandCount;
+ }
+ bool isPreEQInUse() const {
+ return mPreEqInUse;
+ }
+ bool isMbcInUse() const {
+ return mMbcInUse;
+ }
+ bool isPostEqInUse() const {
+ return mPostEqInUse;
+ }
+ bool isLimiterInUse() const {
+ return mLimiterInUse;
+ }
+
+private:
+ bool mInitialized;
+ //general
+ uint32_t mChannelCount;
+ bool mPreEqInUse;
+ uint32_t mPreEqBandCount;
+ bool mMbcInUse;
+ uint32_t mMbcBandCount;
+ bool mPostEqInUse;
+ uint32_t mPostEqBandCount;
+ bool mLimiterInUse;
+
+ std::vector<DPChannel> mChannel;
+};
+
+} //namespace dp_fx
+
+
+#endif // DPBASE_H_
diff --git a/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp b/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp
new file mode 100644
index 0000000..d06fd70
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp
@@ -0,0 +1,675 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DPFrequency"
+//#define LOG_NDEBUG 0
+
+#include <log/log.h>
+#include "DPFrequency.h"
+#include <algorithm>
+
+namespace dp_fx {
+
+using Eigen::MatrixXd;
+#define MAX_BLOCKSIZE 16384 //For this implementation
+#define MIN_BLOCKSIZE 8
+
+#define CIRCULAR_BUFFER_UPSAMPLE 4 //4 times buffer size
+
+static constexpr float MIN_ENVELOPE = 1e-6f; //-120 dB
+//helper functionS
+static inline bool isPowerOf2(unsigned long n) {
+ return (n & (n - 1)) == 0;
+}
+static constexpr float EPSILON = 0.0000001f;
+
+static inline bool isZero(float f) {
+ return fabs(f) <= EPSILON;
+}
+
+template <class T>
+bool compareEquality(T a, T b) {
+ return (a == b);
+}
+
+template <> bool compareEquality<float>(float a, float b) {
+ return isZero(a - b);
+}
+
+//TODO: avoid using macro for estimating change and assignment.
+#define IS_CHANGED(c, a, b) { c |= !compareEquality(a,b); \
+ (a) = (b); }
+
+//ChannelBuffers helper
+void ChannelBuffer::initBuffers(unsigned int blockSize, unsigned int overlapSize,
+ unsigned int halfFftSize, unsigned int samplingRate, DPBase &dpBase) {
+ ALOGV("ChannelBuffer::initBuffers blockSize %d, overlap %d, halfFft %d",
+ blockSize, overlapSize, halfFftSize);
+
+ mSamplingRate = samplingRate;
+ mBlockSize = blockSize;
+
+ cBInput.resize(mBlockSize * CIRCULAR_BUFFER_UPSAMPLE);
+ cBOutput.resize(mBlockSize * CIRCULAR_BUFFER_UPSAMPLE);
+
+ //fill input with half block size...
+ for (unsigned int k = 0; k < mBlockSize/2; k++) {
+ cBInput.write(0);
+ }
+
+ //temp vectors
+ input.resize(mBlockSize);
+ output.resize(mBlockSize);
+ outTail.resize(overlapSize);
+
+ //module vectors
+ mPreEqFactorVector.resize(halfFftSize, 1.0);
+ mPostEqFactorVector.resize(halfFftSize, 1.0);
+
+ mPreEqBands.resize(dpBase.getPreEqBandCount());
+ mMbcBands.resize(dpBase.getMbcBandCount());
+ mPostEqBands.resize(dpBase.getPostEqBandCount());
+ ALOGV("mPreEqBands %zu, mMbcBands %zu, mPostEqBands %zu",mPreEqBands.size(),
+ mMbcBands.size(), mPostEqBands.size());
+
+ DPChannel *pChannel = dpBase.getChannel(0);
+ if (pChannel != nullptr) {
+ mPreEqInUse = pChannel->getPreEq()->isInUse();
+ mMbcInUse = pChannel->getMbc()->isInUse();
+ mPostEqInUse = pChannel->getPostEq()->isInUse();
+ mLimiterInUse = pChannel->getLimiter()->isInUse();
+ }
+
+ mLimiterParams.linkGroup = -1; //no group.
+}
+
+void ChannelBuffer::computeBinStartStop(BandParams &bp, size_t binStart) {
+
+ bp.binStart = binStart;
+ bp.binStop = (int)(0.5 + bp.freqCutoffHz * mBlockSize / mSamplingRate);
+}
+
+//== LinkedLimiters Helper
+void LinkedLimiters::reset() {
+ mGroupsMap.clear();
+}
+
+void LinkedLimiters::update(int32_t group, int index) {
+ mGroupsMap[group].push_back(index);
+}
+
+void LinkedLimiters::remove(int index) {
+ //check all groups and if index is found, remove it.
+ //if group is empty afterwards, remove it.
+ for (auto it = mGroupsMap.begin(); it != mGroupsMap.end(); ) {
+ for (auto itIndex = it->second.begin(); itIndex != it->second.end(); ) {
+ if (*itIndex == index) {
+ itIndex = it->second.erase(itIndex);
+ } else {
+ ++itIndex;
+ }
+ }
+ if (it->second.size() == 0) {
+ it = mGroupsMap.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+//== DPFrequency
+void DPFrequency::reset() {
+}
+
+size_t DPFrequency::getMinBockSize() {
+ return MIN_BLOCKSIZE;
+}
+
+size_t DPFrequency::getMaxBockSize() {
+ return MAX_BLOCKSIZE;
+}
+
+void DPFrequency::configure(size_t blockSize, size_t overlapSize,
+ size_t samplingRate) {
+ ALOGV("configure");
+ mBlockSize = blockSize;
+ if (mBlockSize > MAX_BLOCKSIZE) {
+ mBlockSize = MAX_BLOCKSIZE;
+ } else if (mBlockSize < MIN_BLOCKSIZE) {
+ mBlockSize = MIN_BLOCKSIZE;
+ } else {
+ if (!isPowerOf2(blockSize)) {
+ //find next highest power of 2.
+ mBlockSize = 1 << (32 - __builtin_clz(blockSize));
+ }
+ }
+
+ mHalfFFTSize = 1 + mBlockSize / 2; //including Nyquist bin
+ mOverlapSize = std::min(overlapSize, mBlockSize/2);
+
+ int channelcount = getChannelCount();
+ mSamplingRate = samplingRate;
+ mChannelBuffers.resize(channelcount);
+ for (int ch = 0; ch < channelcount; ch++) {
+ mChannelBuffers[ch].initBuffers(mBlockSize, mOverlapSize, mHalfFFTSize,
+ mSamplingRate, *this);
+ }
+
+ //effective number of frames processed per second
+ mBlocksPerSecond = (float)mSamplingRate / (mBlockSize - mOverlapSize);
+
+ fill_window(mVWindow, RDSP_WINDOW_HANNING_FLAT_TOP, mBlockSize, mOverlapSize);
+
+ //compute window rms for energy compensation
+ mWindowRms = 0;
+ for (size_t i = 0; i < mVWindow.size(); i++) {
+ mWindowRms += mVWindow[i] * mVWindow[i];
+ }
+
+ //Making sure window rms is not zero.
+ mWindowRms = std::max(sqrt(mWindowRms / mVWindow.size()), MIN_ENVELOPE);
+}
+
+void DPFrequency::updateParameters(ChannelBuffer &cb, int channelIndex) {
+ DPChannel *pChannel = getChannel(channelIndex);
+
+ if (pChannel == nullptr) {
+ ALOGE("Error: updateParameters null DPChannel %d", channelIndex);
+ return;
+ }
+
+ //===Input Gain and preEq
+ {
+ bool changed = false;
+ IS_CHANGED(changed, cb.inputGainDb, pChannel->getInputGain());
+ //===EqPre
+ if (cb.mPreEqInUse) {
+ DPEq *pPreEq = pChannel->getPreEq();
+ if (pPreEq == nullptr) {
+ ALOGE("Error: updateParameters null PreEq for channel: %d", channelIndex);
+ return;
+ }
+ IS_CHANGED(changed, cb.mPreEqEnabled, pPreEq->isEnabled());
+ if (cb.mPreEqEnabled) {
+ for (unsigned int b = 0; b < getPreEqBandCount(); b++) {
+ DPEqBand *pEqBand = pPreEq->getBand(b);
+ if (pEqBand == nullptr) {
+ ALOGE("Error: updateParameters null PreEqBand for band %d", b);
+ return; //failed.
+ }
+ ChannelBuffer::EqBandParams *pEqBandParams = &cb.mPreEqBands[b];
+ IS_CHANGED(changed, pEqBandParams->enabled, pEqBand->isEnabled());
+ IS_CHANGED(changed, pEqBandParams->freqCutoffHz,
+ pEqBand->getCutoffFrequency());
+ IS_CHANGED(changed, pEqBandParams->gainDb, pEqBand->getGain());
+ }
+ }
+ }
+
+ if (changed) {
+ float inputGainFactor = dBtoLinear(cb.inputGainDb);
+ if (cb.mPreEqInUse && cb.mPreEqEnabled) {
+ ALOGV("preEq changed, recomputing! channel %d", channelIndex);
+ size_t binNext = 0;
+ for (unsigned int b = 0; b < getPreEqBandCount(); b++) {
+ ChannelBuffer::EqBandParams *pEqBandParams = &cb.mPreEqBands[b];
+
+ //frequency translation
+ cb.computeBinStartStop(*pEqBandParams, binNext);
+ binNext = pEqBandParams->binStop + 1;
+ float factor = dBtoLinear(pEqBandParams->gainDb);
+ if (!pEqBandParams->enabled) {
+ factor = inputGainFactor;
+ }
+ for (size_t k = pEqBandParams->binStart;
+ k <= pEqBandParams->binStop && k < mHalfFFTSize; k++) {
+ cb.mPreEqFactorVector[k] = factor * inputGainFactor;
+ }
+ }
+ } else {
+ ALOGV("only input gain changed, recomputing!");
+ //populate PreEq factor with input gain factor.
+ for (size_t k = 0; k < mHalfFFTSize; k++) {
+ cb.mPreEqFactorVector[k] = inputGainFactor;
+ }
+ }
+ }
+ } //inputGain and preEq
+
+ //===EqPost
+ if (cb.mPostEqInUse) {
+ bool changed = false;
+
+ DPEq *pPostEq = pChannel->getPostEq();
+ if (pPostEq == nullptr) {
+ ALOGE("Error: updateParameters null postEq for channel: %d", channelIndex);
+ return; //failed.
+ }
+ IS_CHANGED(changed, cb.mPostEqEnabled, pPostEq->isEnabled());
+ if (cb.mPostEqEnabled) {
+ for (unsigned int b = 0; b < getPostEqBandCount(); b++) {
+ DPEqBand *pEqBand = pPostEq->getBand(b);
+ if (pEqBand == nullptr) {
+ ALOGE("Error: updateParameters PostEqBand NULL for band %d", b);
+ return; //failed.
+ }
+ ChannelBuffer::EqBandParams *pEqBandParams = &cb.mPostEqBands[b];
+ IS_CHANGED(changed, pEqBandParams->enabled, pEqBand->isEnabled());
+ IS_CHANGED(changed, pEqBandParams->freqCutoffHz,
+ pEqBand->getCutoffFrequency());
+ IS_CHANGED(changed, pEqBandParams->gainDb, pEqBand->getGain());
+ }
+ if (changed) {
+ ALOGV("postEq changed, recomputing! channel %d", channelIndex);
+ size_t binNext = 0;
+ for (unsigned int b = 0; b < getPostEqBandCount(); b++) {
+ ChannelBuffer::EqBandParams *pEqBandParams = &cb.mPostEqBands[b];
+
+ //frequency translation
+ cb.computeBinStartStop(*pEqBandParams, binNext);
+ binNext = pEqBandParams->binStop + 1;
+ float factor = dBtoLinear(pEqBandParams->gainDb);
+ if (!pEqBandParams->enabled) {
+ factor = 1.0;
+ }
+ for (size_t k = pEqBandParams->binStart;
+ k <= pEqBandParams->binStop && k < mHalfFFTSize; k++) {
+ cb.mPostEqFactorVector[k] = factor;
+ }
+ }
+ }
+ } //enabled
+ }
+
+ //===MBC
+ if (cb.mMbcInUse) {
+ DPMbc *pMbc = pChannel->getMbc();
+ if (pMbc == nullptr) {
+ ALOGE("Error: updateParameters Mbc NULL for channel: %d", channelIndex);
+ return;
+ }
+ cb.mMbcEnabled = pMbc->isEnabled();
+ if (cb.mMbcEnabled) {
+ bool changed = false;
+ for (unsigned int b = 0; b < getMbcBandCount(); b++) {
+ DPMbcBand *pMbcBand = pMbc->getBand(b);
+ if (pMbcBand == nullptr) {
+ ALOGE("Error: updateParameters MbcBand NULL for band %d", b);
+ return; //failed.
+ }
+ ChannelBuffer::MbcBandParams *pMbcBandParams = &cb.mMbcBands[b];
+ pMbcBandParams->enabled = pMbcBand->isEnabled();
+ IS_CHANGED(changed, pMbcBandParams->freqCutoffHz,
+ pMbcBand->getCutoffFrequency());
+
+ pMbcBandParams->gainPreDb = pMbcBand->getPreGain();
+ pMbcBandParams->gainPostDb = pMbcBand->getPostGain();
+ pMbcBandParams->attackTimeMs = pMbcBand->getAttackTime();
+ pMbcBandParams->releaseTimeMs = pMbcBand->getReleaseTime();
+ pMbcBandParams->ratio = pMbcBand->getRatio();
+ pMbcBandParams->thresholdDb = pMbcBand->getThreshold();
+ pMbcBandParams->kneeWidthDb = pMbcBand->getKneeWidth();
+ pMbcBandParams->noiseGateThresholdDb = pMbcBand->getNoiseGateThreshold();
+ pMbcBandParams->expanderRatio = pMbcBand->getExpanderRatio();
+
+ }
+
+ if (changed) {
+ ALOGV("mbc changed, recomputing! channel %d", channelIndex);
+ size_t binNext= 0;
+ for (unsigned int b = 0; b < getMbcBandCount(); b++) {
+ ChannelBuffer::MbcBandParams *pMbcBandParams = &cb.mMbcBands[b];
+
+ pMbcBandParams->previousEnvelope = 0;
+
+ //frequency translation
+ cb.computeBinStartStop(*pMbcBandParams, binNext);
+ binNext = pMbcBandParams->binStop + 1;
+ }
+ }
+ }
+ }
+
+ //===Limiter
+ if (cb.mLimiterInUse) {
+ bool changed = false;
+ DPLimiter *pLimiter = pChannel->getLimiter();
+ if (pLimiter == nullptr) {
+ ALOGE("Error: updateParameters Limiter NULL for channel: %d", channelIndex);
+ return;
+ }
+ cb.mLimiterEnabled = pLimiter->isEnabled();
+ if (cb.mLimiterEnabled) {
+ IS_CHANGED(changed, cb.mLimiterParams.linkGroup ,
+ (int32_t)pLimiter->getLinkGroup());
+ cb.mLimiterParams.attackTimeMs = pLimiter->getAttackTime();
+ cb.mLimiterParams.releaseTimeMs = pLimiter->getReleaseTime();
+ cb.mLimiterParams.ratio = pLimiter->getRatio();
+ cb.mLimiterParams.thresholdDb = pLimiter->getThreshold();
+ cb.mLimiterParams.postGainDb = pLimiter->getPostGain();
+ }
+
+ if (changed) {
+ ALOGV("limiter changed, recomputing linkGroups for %d", channelIndex);
+ mLinkedLimiters.remove(channelIndex); //in case it was already there.
+ mLinkedLimiters.update(cb.mLimiterParams.linkGroup, channelIndex);
+ }
+ }
+
+ //=== Output Gain
+ cb.outputGainDb = pChannel->getOutputGain();
+}
+
+size_t DPFrequency::processSamples(const float *in, float *out, size_t samples) {
+ const float *pIn = in;
+ float *pOut = out;
+
+ int channelCount = mChannelBuffers.size();
+ if (channelCount < 1) {
+ ALOGW("warning: no Channels ready for processing");
+ return 0;
+ }
+
+ //**Check if parameters have changed and update
+ for (int ch = 0; ch < channelCount; ch++) {
+ updateParameters(mChannelBuffers[ch], ch);
+ }
+
+ //**separate into channels
+ for (size_t k = 0; k < samples; k += channelCount) {
+ for (int ch = 0; ch < channelCount; ch++) {
+ mChannelBuffers[ch].cBInput.write(*pIn++);
+ }
+ }
+
+ //**process all channelBuffers
+ processChannelBuffers(mChannelBuffers);
+
+ //** estimate how much data is available in ALL channels
+ size_t available = mChannelBuffers[0].cBOutput.availableToRead();
+ for (int ch = 1; ch < channelCount; ch++) {
+ available = std::min(available, mChannelBuffers[ch].cBOutput.availableToRead());
+ }
+
+ //** make sure to output just what the buffer can handle
+ if (available > samples/channelCount) {
+ available = samples/channelCount;
+ }
+
+ //**Prepend zeroes if necessary
+ size_t fill = samples - (channelCount * available);
+ for (size_t k = 0; k < fill; k++) {
+ *pOut++ = 0;
+ }
+
+ //**interleave channels
+ for (size_t k = 0; k < available; k++) {
+ for (int ch = 0; ch < channelCount; ch++) {
+ *pOut++ = mChannelBuffers[ch].cBOutput.read();
+ }
+ }
+
+ return samples;
+}
+
+size_t DPFrequency::processChannelBuffers(CBufferVector &channelBuffers) {
+ const int channelCount = channelBuffers.size();
+ size_t processedSamples = 0;
+ size_t processFrames = mBlockSize - mOverlapSize;
+
+ size_t available = channelBuffers[0].cBInput.availableToRead();
+ for (int ch = 1; ch < channelCount; ch++) {
+ available = std::min(available, channelBuffers[ch].cBInput.availableToRead());
+ }
+
+ while (available >= processFrames) {
+ //First pass
+ for (int ch = 0; ch < channelCount; ch++) {
+ ChannelBuffer * pCb = &channelBuffers[ch];
+ //move tail of previous
+ std::copy(pCb->input.begin() + processFrames,
+ pCb->input.end(),
+ pCb->input.begin());
+
+ //read new available data
+ for (unsigned int k = 0; k < processFrames; k++) {
+ pCb->input[mOverlapSize + k] = pCb->cBInput.read();
+ }
+ //first stages: fft, preEq, mbc, postEq and start of Limiter
+ processedSamples += processFirstStages(*pCb);
+ }
+
+ //**compute linked limiters and update levels if needed
+ processLinkedLimiters(channelBuffers);
+
+ //final pass.
+ for (int ch = 0; ch < channelCount; ch++) {
+ ChannelBuffer * pCb = &channelBuffers[ch];
+
+ //linked limiter and ifft
+ processLastStages(*pCb);
+
+ //mix tail (and capture new tail
+ for (unsigned int k = 0; k < mOverlapSize; k++) {
+ pCb->output[k] += pCb->outTail[k];
+ pCb->outTail[k] = pCb->output[processFrames + k]; //new tail
+ }
+
+ //output data
+ for (unsigned int k = 0; k < processFrames; k++) {
+ pCb->cBOutput.write(pCb->output[k]);
+ }
+ }
+ available -= processFrames;
+ }
+ return processedSamples;
+}
+size_t DPFrequency::processFirstStages(ChannelBuffer &cb) {
+
+ //##apply window
+ Eigen::Map<Eigen::VectorXf> eWindow(&mVWindow[0], mVWindow.size());
+ Eigen::Map<Eigen::VectorXf> eInput(&cb.input[0], cb.input.size());
+
+ Eigen::VectorXf eWin = eInput.cwiseProduct(eWindow); //apply window
+
+ //##fft
+ //Note: we are using eigen with the default scaling, which ensures that
+ // IFFT( FFT(x) ) = x.
+ // TODO: optimize by using the noscale option, and compensate with dB scale offsets
+ mFftServer.fwd(cb.complexTemp, eWin);
+
+ size_t cSize = cb.complexTemp.size();
+ size_t maxBin = std::min(cSize/2, mHalfFFTSize);
+
+ //== EqPre (always runs)
+ for (size_t k = 0; k < maxBin; k++) {
+ cb.complexTemp[k] *= cb.mPreEqFactorVector[k];
+ }
+
+ //== MBC
+ if (cb.mMbcInUse && cb.mMbcEnabled) {
+ for (size_t band = 0; band < cb.mMbcBands.size(); band++) {
+ ChannelBuffer::MbcBandParams *pMbcBandParams = &cb.mMbcBands[band];
+ float fEnergySum = 0;
+
+ //apply pre gain.
+ float preGainFactor = dBtoLinear(pMbcBandParams->gainPreDb);
+ float preGainSquared = preGainFactor * preGainFactor;
+
+ for (size_t k = pMbcBandParams->binStart; k <= pMbcBandParams->binStop; k++) {
+ fEnergySum += std::norm(cb.complexTemp[k]) * preGainSquared; //mag squared
+ }
+
+ //Eigen FFT is full spectrum, even if the source was real data.
+ // Each half spectrum has half the energy. This is taken into account with the * 2
+ // factor in the energy computations.
+ // energy = sqrt(sum_components_squared) number_points
+ // in here, the fEnergySum is duplicated to account for the second half spectrum,
+ // and the windowRms is used to normalize by the expected energy reduction
+ // caused by the window used (expected for steady state signals)
+ fEnergySum = sqrt(fEnergySum * 2) / (mBlockSize * mWindowRms);
+
+ // updates computed per frame advance.
+ float fTheta = 0.0;
+ float fFAttSec = pMbcBandParams->attackTimeMs / 1000; //in seconds
+ float fFRelSec = pMbcBandParams->releaseTimeMs / 1000; //in seconds
+
+ if (fEnergySum > pMbcBandParams->previousEnvelope) {
+ fTheta = exp(-1.0 / (fFAttSec * mBlocksPerSecond));
+ } else {
+ fTheta = exp(-1.0 / (fFRelSec * mBlocksPerSecond));
+ }
+
+ float fEnv = (1.0 - fTheta) * fEnergySum + fTheta * pMbcBandParams->previousEnvelope;
+ //preserve for next iteration
+ pMbcBandParams->previousEnvelope = fEnv;
+
+ if (fEnv < MIN_ENVELOPE) {
+ fEnv = MIN_ENVELOPE;
+ }
+ const float envDb = linearToDb(fEnv);
+ float newLevelDb = envDb;
+ //using shorter variables for code clarity
+ const float thresholdDb = pMbcBandParams->thresholdDb;
+ const float ratio = pMbcBandParams->ratio;
+ const float kneeWidthDbHalf = pMbcBandParams->kneeWidthDb / 2;
+ const float noiseGateThresholdDb = pMbcBandParams->noiseGateThresholdDb;
+ const float expanderRatio = pMbcBandParams->expanderRatio;
+
+ //find segment
+ if (envDb > thresholdDb + kneeWidthDbHalf) {
+ //compression segment
+ newLevelDb = envDb + ((1 / ratio) - 1) * (envDb - thresholdDb);
+ } else if (envDb > thresholdDb - kneeWidthDbHalf) {
+ //knee-compression segment
+ float temp = (envDb - thresholdDb + kneeWidthDbHalf);
+ newLevelDb = envDb + ((1 / ratio) - 1) *
+ temp * temp / (kneeWidthDbHalf * 4);
+ } else if (envDb < noiseGateThresholdDb) {
+ //expander segment
+ newLevelDb = noiseGateThresholdDb -
+ expanderRatio * (noiseGateThresholdDb - envDb);
+ }
+
+ float newFactor = dBtoLinear(newLevelDb - envDb);
+
+ //apply post gain.
+ newFactor *= dBtoLinear(pMbcBandParams->gainPostDb);
+
+ //apply to this band
+ for (size_t k = pMbcBandParams->binStart; k <= pMbcBandParams->binStop; k++) {
+ cb.complexTemp[k] *= newFactor;
+ }
+
+ } //end per band process
+
+ } //end MBC
+
+ //== EqPost
+ if (cb.mPostEqInUse && cb.mPostEqEnabled) {
+ for (size_t k = 0; k < maxBin; k++) {
+ cb.complexTemp[k] *= cb.mPostEqFactorVector[k];
+ }
+ }
+
+ //== Limiter. First Pass
+ if (cb.mLimiterInUse && cb.mLimiterEnabled) {
+ float fEnergySum = 0;
+ for (size_t k = 0; k < maxBin; k++) {
+ fEnergySum += std::norm(cb.complexTemp[k]);
+ }
+
+ //see explanation above for energy computation logic
+ fEnergySum = sqrt(fEnergySum * 2) / (mBlockSize * mWindowRms);
+ float fTheta = 0.0;
+ float fFAttSec = cb.mLimiterParams.attackTimeMs / 1000; //in seconds
+ float fFRelSec = cb.mLimiterParams.releaseTimeMs / 1000; //in seconds
+
+ if (fEnergySum > cb.mLimiterParams.previousEnvelope) {
+ fTheta = exp(-1.0 / (fFAttSec * mBlocksPerSecond));
+ } else {
+ fTheta = exp(-1.0 / (fFRelSec * mBlocksPerSecond));
+ }
+
+ float fEnv = (1.0 - fTheta) * fEnergySum + fTheta * cb.mLimiterParams.previousEnvelope;
+ //preserve for next iteration
+ cb.mLimiterParams.previousEnvelope = fEnv;
+
+ const float envDb = linearToDb(fEnv);
+ float newFactorDb = 0;
+ //using shorter variables for code clarity
+ const float thresholdDb = cb.mLimiterParams.thresholdDb;
+ const float ratio = cb.mLimiterParams.ratio;
+
+ if (envDb > thresholdDb) {
+ //limiter segment
+ newFactorDb = ((1 / ratio) - 1) * (envDb - thresholdDb);
+ }
+
+ float newFactor = dBtoLinear(newFactorDb);
+
+ cb.mLimiterParams.newFactor = newFactor;
+
+ } //end Limiter
+ return mBlockSize;
+}
+
+void DPFrequency::processLinkedLimiters(CBufferVector &channelBuffers) {
+
+ const int channelCount = channelBuffers.size();
+ for (auto &groupPair : mLinkedLimiters.mGroupsMap) {
+ float minFactor = 1.0;
+ //estimate minfactor for all linked
+ for(int index : groupPair.second) {
+ if (index >= 0 && index < channelCount) {
+ minFactor = std::min(channelBuffers[index].mLimiterParams.newFactor, minFactor);
+ }
+ }
+ //apply minFactor
+ for(int index : groupPair.second) {
+ if (index >= 0 && index < channelCount) {
+ channelBuffers[index].mLimiterParams.linkFactor = minFactor;
+ }
+ }
+ }
+}
+
+size_t DPFrequency::processLastStages(ChannelBuffer &cb) {
+
+ float outputGainFactor = dBtoLinear(cb.outputGainDb);
+ //== Limiter. last Pass
+ if (cb.mLimiterInUse && cb.mLimiterEnabled) {
+ //compute factor, with post-gain
+ float factor = cb.mLimiterParams.linkFactor * dBtoLinear(cb.mLimiterParams.postGainDb);
+ outputGainFactor *= factor;
+ }
+
+ //apply to all if != 1.0
+ if (!compareEquality(outputGainFactor, 1.0f)) {
+ size_t cSize = cb.complexTemp.size();
+ size_t maxBin = std::min(cSize/2, mHalfFFTSize);
+ for (size_t k = 0; k < maxBin; k++) {
+ cb.complexTemp[k] *= outputGainFactor;
+ }
+ }
+
+ //##ifft directly to output.
+ Eigen::Map<Eigen::VectorXf> eOutput(&cb.output[0], cb.output.size());
+ mFftServer.inv(eOutput, cb.complexTemp);
+ return mBlockSize;
+}
+
+} //namespace dp_fx
diff --git a/media/libeffects/dynamicsproc/dsp/DPFrequency.h b/media/libeffects/dynamicsproc/dsp/DPFrequency.h
new file mode 100644
index 0000000..be8771d
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/DPFrequency.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef DPFREQUENCY_H_
+#define DPFREQUENCY_H_
+
+#include <Eigen/Dense>
+#include <unsupported/Eigen/FFT>
+
+#include "RDsp.h"
+#include "SHCircularBuffer.h"
+
+#include "DPBase.h"
+
+
+namespace dp_fx {
+
+using FXBuffer = SHCircularBuffer<float>;
+
+class ChannelBuffer {
+public:
+ FXBuffer cBInput; // Circular Buffer input
+ FXBuffer cBOutput; // Circular Buffer output
+ FloatVec input; // time domain temp vector for input
+ FloatVec output; // time domain temp vector for output
+ FloatVec outTail; // time domain temp vector for output tail (for overlap-add method)
+
+ Eigen::VectorXcf complexTemp; // complex temp vector for frequency domain operations
+
+ //Current parameters
+ float inputGainDb;
+ float outputGainDb;
+ struct BandParams {
+ bool enabled;
+ float freqCutoffHz;
+ size_t binStart;
+ size_t binStop;
+ };
+ struct EqBandParams : public BandParams {
+ float gainDb;
+ };
+ struct MbcBandParams : public BandParams {
+ float gainPreDb;
+ float gainPostDb;
+ float attackTimeMs;
+ float releaseTimeMs;
+ float ratio;
+ float thresholdDb;
+ float kneeWidthDb;
+ float noiseGateThresholdDb;
+ float expanderRatio;
+
+ //Historic values
+ float previousEnvelope;
+ };
+ struct LimiterParams {
+ int32_t linkGroup;
+ float attackTimeMs;
+ float releaseTimeMs;
+ float ratio;
+ float thresholdDb;
+ float postGainDb;
+
+ //Historic values
+ float previousEnvelope;
+ float newFactor;
+ float linkFactor;
+ };
+
+ bool mPreEqInUse;
+ bool mPreEqEnabled;
+ std::vector<EqBandParams> mPreEqBands;
+
+ bool mMbcInUse;
+ bool mMbcEnabled;
+ std::vector<MbcBandParams> mMbcBands;
+
+ bool mPostEqInUse;
+ bool mPostEqEnabled;
+ std::vector<EqBandParams> mPostEqBands;
+
+ bool mLimiterInUse;
+ bool mLimiterEnabled;
+ LimiterParams mLimiterParams;
+ FloatVec mPreEqFactorVector; // temp pre-computed vector to shape spectrum at preEQ stage
+ FloatVec mPostEqFactorVector; // temp pre-computed vector to shape spectrum at postEQ stage
+
+ void initBuffers(unsigned int blockSize, unsigned int overlapSize, unsigned int halfFftSize,
+ unsigned int samplingRate, DPBase &dpBase);
+ void computeBinStartStop(BandParams &bp, size_t binStart);
+private:
+ unsigned int mSamplingRate;
+ unsigned int mBlockSize;
+
+};
+
+using CBufferVector = std::vector<ChannelBuffer>;
+
+using GroupsMap = std::map<int32_t, IntVec>;
+
+class LinkedLimiters {
+public:
+ void reset();
+ void update(int32_t group, int index);
+ void remove(int index);
+ GroupsMap mGroupsMap;
+};
+
+class DPFrequency : public DPBase {
+public:
+ virtual size_t processSamples(const float *in, float *out, size_t samples);
+ virtual void reset();
+ void configure(size_t blockSize, size_t overlapSize, size_t samplingRate);
+ static size_t getMinBockSize();
+ static size_t getMaxBockSize();
+
+private:
+ void updateParameters(ChannelBuffer &cb, int channelIndex);
+ size_t processMono(ChannelBuffer &cb);
+ size_t processOneVector(FloatVec &output, FloatVec &input, ChannelBuffer &cb);
+
+ size_t processChannelBuffers(CBufferVector &channelBuffers);
+ size_t processFirstStages(ChannelBuffer &cb);
+ size_t processLastStages(ChannelBuffer &cb);
+ void processLinkedLimiters(CBufferVector &channelBuffers);
+
+ size_t mBlockSize;
+ size_t mHalfFFTSize;
+ size_t mOverlapSize;
+ size_t mSamplingRate;
+
+ float mBlocksPerSecond;
+
+ CBufferVector mChannelBuffers;
+
+ LinkedLimiters mLinkedLimiters;
+
+ //dsp
+ FloatVec mVWindow; //window class.
+ float mWindowRms;
+ Eigen::FFT<float> mFftServer;
+};
+
+} //namespace dp_fx
+
+#endif // DPFREQUENCY_H_
diff --git a/media/libeffects/dynamicsproc/dsp/RDsp.h b/media/libeffects/dynamicsproc/dsp/RDsp.h
new file mode 100644
index 0000000..cfa1305
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/RDsp.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RDSP_H
+#define RDSP_H
+
+#include <complex>
+#include <log/log.h>
+#include <vector>
+#include <map>
+using FloatVec = std::vector<float>;
+using IntVec = std::vector<int>;
+using ComplexVec = std::vector<std::complex<float>>;
+
+// =======
+// Helper Functions
+// =======
+template <class T>
+static T dBtoLinear(T valueDb) {
+ return pow (10, valueDb / 20.0);
+}
+
+template <class T>
+static T linearToDb(T value) {
+ return 20 * log10(value);
+}
+
+// =======
+// DSP window creation
+// =======
+
+#define TWOPI (M_PI * 2)
+
+enum rdsp_window_type {
+ RDSP_WINDOW_RECTANGULAR,
+ RDSP_WINDOW_TRIANGULAR,
+ RDSP_WINDOW_TRIANGULAR_FLAT_TOP,
+ RDSP_WINDOW_HAMMING,
+ RDSP_WINDOW_HAMMING_FLAT_TOP,
+ RDSP_WINDOW_HANNING,
+ RDSP_WINDOW_HANNING_FLAT_TOP,
+};
+
+template <typename T>
+static void fillRectangular(T &v) {
+ const size_t size = v.size();
+ for (size_t i = 0; i < size; i++) {
+ v[i] = 1.0;
+ }
+} //rectangular
+
+template <typename T>
+static void fillTriangular(T &v, size_t overlap) {
+ const size_t size = v.size();
+ //ramp up
+ size_t i = 0;
+ if (overlap > 0) {
+ for (; i < overlap; i++) {
+ v[i] = (2.0 * i + 1) / (2 * overlap);
+ }
+ }
+
+ //flat top
+ for (; i < size - overlap; i++) {
+ v[i] = 1.0;
+ }
+
+ //ramp down
+ if (overlap > 0) {
+ for (; i < size; i++) {
+ v[i] = (2.0 * (size - i) - 1) / (2 * overlap);
+ }
+ }
+} //triangular
+
+template <typename T>
+static void fillHamming(T &v, size_t overlap) {
+ const size_t size = v.size();
+ const size_t twoOverlap = 2 * overlap;
+ size_t i = 0;
+ if (overlap > 0) {
+ for (; i < overlap; i++) {
+ v[i] = 0.54 - 0.46 * cos(TWOPI * i /(twoOverlap - 1));
+ }
+ }
+
+ //flat top
+ for (; i < size - overlap; i++) {
+ v[i] = 1.0;
+ }
+
+ //ramp down
+ if (overlap > 0) {
+ for (; i < size; i++) {
+ int k = i - ((int)size - 2 * overlap);
+ v[i] = 0.54 - 0.46 * cos(TWOPI * k / (twoOverlap - 1));
+ }
+ }
+} //hamming
+
+template <typename T>
+static void fillHanning(T &v, size_t overlap) {
+ const size_t size = v.size();
+ const size_t twoOverlap = 2 * overlap;
+ //ramp up
+ size_t i = 0;
+ if (overlap > 0) {
+ for (; i < overlap; i++) {
+ v[i] = 0.5 * (1.0 - cos(TWOPI * i / (twoOverlap - 1)));
+ }
+ }
+
+ //flat top
+ for (; i < size - overlap; i++) {
+ v[i] = 1.0;
+ }
+
+ //ramp down
+ if (overlap > 0) {
+ for (; i < size; i++) {
+ int k = i - ((int)size - 2 * overlap);
+ v[i] = 0.5 * (1.0 - cos(TWOPI * k / (twoOverlap - 1)));
+ }
+ }
+}
+
+template <typename T>
+static void fill_window(T &v, int type, size_t size, size_t overlap) {
+ if (overlap > size / 2) {
+ overlap = size / 2;
+ }
+ v.resize(size);
+
+ switch (type) {
+ case RDSP_WINDOW_RECTANGULAR:
+ fillRectangular(v);
+ break;
+ case RDSP_WINDOW_TRIANGULAR:
+ fillTriangular(v, size / 2);
+ break;
+ case RDSP_WINDOW_TRIANGULAR_FLAT_TOP:
+ fillTriangular(v, overlap);
+ break;
+ case RDSP_WINDOW_HAMMING:
+ fillHamming(v, size / 2);
+ break;
+ case RDSP_WINDOW_HAMMING_FLAT_TOP:
+ fillHamming(v, overlap);
+ break;
+ case RDSP_WINDOW_HANNING:
+ fillHanning(v, size / 2);
+ break;
+ case RDSP_WINDOW_HANNING_FLAT_TOP:
+ fillHanning(v, overlap);
+ break;
+ default:
+ ALOGE("Error: unknown window type %d", type);
+ }
+}
+
+//};
+#endif //RDSP_H
diff --git a/media/libeffects/dynamicsproc/dsp/SHCircularBuffer.h b/media/libeffects/dynamicsproc/dsp/SHCircularBuffer.h
new file mode 100644
index 0000000..c139cd8
--- /dev/null
+++ b/media/libeffects/dynamicsproc/dsp/SHCircularBuffer.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SHCIRCULARBUFFER_H
+#define SHCIRCULARBUFFER_H
+
+#include <log/log.h>
+#include <vector>
+
+template <class T>
+class SHCircularBuffer {
+
+public:
+ SHCircularBuffer() : mReadIndex(0), mWriteIndex(0), mReadAvailable(0) {
+ }
+
+ explicit SHCircularBuffer(size_t maxSize) {
+ resize(maxSize);
+ }
+ void resize(size_t maxSize) {
+ mBuffer.resize(maxSize);
+ mReadIndex = 0;
+ mWriteIndex = 0;
+ mReadAvailable = 0;
+ }
+ inline void write(T value) {
+ if (availableToWrite()) {
+ mBuffer[mWriteIndex++] = value;
+ if (mWriteIndex >= getSize()) {
+ mWriteIndex = 0;
+ }
+ mReadAvailable++;
+ } else {
+ ALOGE("Error: SHCircularBuffer no space to write. allocated size %zu ", getSize());
+ }
+ }
+ inline T read() {
+ T value = T();
+ if (availableToRead()) {
+ value = mBuffer[mReadIndex++];
+ if (mReadIndex >= getSize()) {
+ mReadIndex = 0;
+ }
+ mReadAvailable--;
+ } else {
+ ALOGW("Warning: SHCircularBuffer no data available to read. Default value returned");
+ }
+ return value;
+ }
+ inline size_t availableToRead() const {
+ return mReadAvailable;
+ }
+ inline size_t availableToWrite() const {
+ return getSize() - mReadAvailable;
+ }
+ inline size_t getSize() const {
+ return mBuffer.size();
+ }
+
+private:
+ std::vector<T> mBuffer;
+ size_t mReadIndex;
+ size_t mWriteIndex;
+ size_t mReadAvailable;
+};
+
+
+#endif //SHCIRCULARBUFFER_H
diff --git a/media/libeffects/factory/EffectsXmlConfigLoader.cpp b/media/libeffects/factory/EffectsXmlConfigLoader.cpp
index 438b787..7a7d431 100644
--- a/media/libeffects/factory/EffectsXmlConfigLoader.cpp
+++ b/media/libeffects/factory/EffectsXmlConfigLoader.cpp
@@ -327,7 +327,7 @@
&gSkippedEffects, &gSubEffectList);
ALOGE_IF(result.nbSkippedElement != 0, "%zu errors during loading of configuration: %s",
- result.nbSkippedElement, path ?: effectsConfig::DEFAULT_PATH);
+ result.nbSkippedElement, result.configPath ?: "No config file found");
return result.nbSkippedElement;
}
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Coeffs.h b/media/libeffects/lvm/lib/Bass/src/LVDBE_Coeffs.h
index f32ed30..4ecaf14 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Coeffs.h
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Coeffs.h
@@ -534,246 +534,246 @@
/* Coefficients for centre frequency 55Hz */
#define HPF_Fs8000_Fc55_A0 0.958849f
-#define HPF_Fs8000_Fc55_A1 -1.917698f
+#define HPF_Fs8000_Fc55_A1 (-1.917698f)
#define HPF_Fs8000_Fc55_A2 0.958849f
-#define HPF_Fs8000_Fc55_B1 -1.939001f
+#define HPF_Fs8000_Fc55_B1 (-1.939001f)
#define HPF_Fs8000_Fc55_B2 0.940807f
#define HPF_Fs11025_Fc55_A0 0.966909f
-#define HPF_Fs11025_Fc55_A1 -1.933818f
+#define HPF_Fs11025_Fc55_A1 (-1.933818f)
#define HPF_Fs11025_Fc55_A2 0.966909f
-#define HPF_Fs11025_Fc55_B1 -1.955732f
+#define HPF_Fs11025_Fc55_B1 (-1.955732f)
#define HPF_Fs11025_Fc55_B2 0.956690f
#define HPF_Fs12000_Fc55_A0 0.968650f
-#define HPF_Fs12000_Fc55_A1 -1.937300f
+#define HPF_Fs12000_Fc55_A1 (-1.937300f)
#define HPF_Fs12000_Fc55_A2 0.968650f
-#define HPF_Fs12000_Fc55_B1 -1.959327f
+#define HPF_Fs12000_Fc55_B1 (-1.959327f)
#define HPF_Fs12000_Fc55_B2 0.960138f
#define HPF_Fs16000_Fc55_A0 0.973588f
-#define HPF_Fs16000_Fc55_A1 -1.947176f
+#define HPF_Fs16000_Fc55_A1 (-1.947176f)
#define HPF_Fs16000_Fc55_A2 0.973588f
-#define HPF_Fs16000_Fc55_B1 -1.969494f
+#define HPF_Fs16000_Fc55_B1 (-1.969494f)
#define HPF_Fs16000_Fc55_B2 0.969952f
#define HPF_Fs22050_Fc55_A0 0.977671f
-#define HPF_Fs22050_Fc55_A1 -1.955343f
+#define HPF_Fs22050_Fc55_A1 (-1.955343f)
#define HPF_Fs22050_Fc55_A2 0.977671f
-#define HPF_Fs22050_Fc55_B1 -1.977863f
+#define HPF_Fs22050_Fc55_B1 (-1.977863f)
#define HPF_Fs22050_Fc55_B2 0.978105f
#define HPF_Fs24000_Fc55_A0 0.978551f
-#define HPF_Fs24000_Fc55_A1 -1.957102f
+#define HPF_Fs24000_Fc55_A1 (-1.957102f)
#define HPF_Fs24000_Fc55_A2 0.978551f
-#define HPF_Fs24000_Fc55_B1 -1.979662f
+#define HPF_Fs24000_Fc55_B1 (-1.979662f)
#define HPF_Fs24000_Fc55_B2 0.979866f
#define HPF_Fs32000_Fc55_A0 0.981042f
-#define HPF_Fs32000_Fc55_A1 -1.962084f
+#define HPF_Fs32000_Fc55_A1 (-1.962084f)
#define HPF_Fs32000_Fc55_A2 0.981042f
-#define HPF_Fs32000_Fc55_B1 -1.984746f
+#define HPF_Fs32000_Fc55_B1 (-1.984746f)
#define HPF_Fs32000_Fc55_B2 0.984861f
#define HPF_Fs44100_Fc55_A0 0.983097f
-#define HPF_Fs44100_Fc55_A1 -1.966194f
+#define HPF_Fs44100_Fc55_A1 (-1.966194f)
#define HPF_Fs44100_Fc55_A2 0.983097f
-#define HPF_Fs44100_Fc55_B1 -1.988931f
+#define HPF_Fs44100_Fc55_B1 (-1.988931f)
#define HPF_Fs44100_Fc55_B2 0.988992f
#define HPF_Fs48000_Fc55_A0 0.983539f
-#define HPF_Fs48000_Fc55_A1 -1.967079f
+#define HPF_Fs48000_Fc55_A1 (-1.967079f)
#define HPF_Fs48000_Fc55_A2 0.983539f
-#define HPF_Fs48000_Fc55_B1 -1.989831f
+#define HPF_Fs48000_Fc55_B1 (-1.989831f)
#define HPF_Fs48000_Fc55_B2 0.989882f
#ifdef HIGHER_FS
#define HPF_Fs96000_Fc55_A0 0.986040f
-#define HPF_Fs96000_Fc55_A1 -1.972080f
+#define HPF_Fs96000_Fc55_A1 (-1.972080f)
#define HPF_Fs96000_Fc55_A2 0.986040f
-#define HPF_Fs96000_Fc55_B1 -1.994915f
+#define HPF_Fs96000_Fc55_B1 (-1.994915f)
#define HPF_Fs96000_Fc55_B2 0.994928f
#define HPF_Fs192000_Fc55_A0 0.987294f
-#define HPF_Fs192000_Fc55_A1 -1.974588f
+#define HPF_Fs192000_Fc55_A1 (-1.974588f)
#define HPF_Fs192000_Fc55_A2 0.987294f
-#define HPF_Fs192000_Fc55_B1 -1.997458f
+#define HPF_Fs192000_Fc55_B1 (-1.997458f)
#define HPF_Fs192000_Fc55_B2 0.997461f
#endif
/* Coefficients for centre frequency 66Hz */
#define HPF_Fs8000_Fc66_A0 0.953016f
-#define HPF_Fs8000_Fc66_A1 -1.906032f
+#define HPF_Fs8000_Fc66_A1 (-1.906032f)
#define HPF_Fs8000_Fc66_A2 0.953016f
-#define HPF_Fs8000_Fc66_B1 -1.926810f
+#define HPF_Fs8000_Fc66_B1 (-1.926810f)
#define HPF_Fs8000_Fc66_B2 0.929396f
#define HPF_Fs11025_Fc66_A0 0.962638f
-#define HPF_Fs11025_Fc66_A1 -1.925275f
+#define HPF_Fs11025_Fc66_A1 (-1.925275f)
#define HPF_Fs11025_Fc66_A2 0.962638f
-#define HPF_Fs11025_Fc66_B1 -1.946881f
+#define HPF_Fs11025_Fc66_B1 (-1.946881f)
#define HPF_Fs11025_Fc66_B2 0.948256f
#define HPF_Fs12000_Fc66_A0 0.964718f
-#define HPF_Fs12000_Fc66_A1 -1.929435f
+#define HPF_Fs12000_Fc66_A1 (-1.929435f)
#define HPF_Fs12000_Fc66_A2 0.964718f
-#define HPF_Fs12000_Fc66_B1 -1.951196f
+#define HPF_Fs12000_Fc66_B1 (-1.951196f)
#define HPF_Fs12000_Fc66_B2 0.952359f
#define HPF_Fs16000_Fc66_A0 0.970622f
-#define HPF_Fs16000_Fc66_A1 -1.941244f
+#define HPF_Fs16000_Fc66_A1 (-1.941244f)
#define HPF_Fs16000_Fc66_A2 0.970622f
-#define HPF_Fs16000_Fc66_B1 -1.963394f
+#define HPF_Fs16000_Fc66_B1 (-1.963394f)
#define HPF_Fs16000_Fc66_B2 0.964052f
#define HPF_Fs22050_Fc66_A0 0.975509f
-#define HPF_Fs22050_Fc66_A1 -1.951019f
+#define HPF_Fs22050_Fc66_A1 (-1.951019f)
#define HPF_Fs22050_Fc66_A2 0.975509f
-#define HPF_Fs22050_Fc66_B1 -1.973436f
+#define HPF_Fs22050_Fc66_B1 (-1.973436f)
#define HPF_Fs22050_Fc66_B2 0.973784f
#define HPF_Fs24000_Fc66_A0 0.976563f
-#define HPF_Fs24000_Fc66_A1 -1.953125f
+#define HPF_Fs24000_Fc66_A1 (-1.953125f)
#define HPF_Fs24000_Fc66_A2 0.976563f
-#define HPF_Fs24000_Fc66_B1 -1.975594f
+#define HPF_Fs24000_Fc66_B1 (-1.975594f)
#define HPF_Fs24000_Fc66_B2 0.975889f
#define HPF_Fs32000_Fc66_A0 0.979547f
-#define HPF_Fs32000_Fc66_A1 -1.959093f
+#define HPF_Fs32000_Fc66_A1 (-1.959093f)
#define HPF_Fs32000_Fc66_A2 0.979547f
-#define HPF_Fs32000_Fc66_B1 -1.981695f
+#define HPF_Fs32000_Fc66_B1 (-1.981695f)
#define HPF_Fs32000_Fc66_B2 0.981861f
#define HPF_Fs44100_Fc66_A0 0.982010f
-#define HPF_Fs44100_Fc66_A1 -1.964019f
+#define HPF_Fs44100_Fc66_A1 (-1.964019f)
#define HPF_Fs44100_Fc66_A2 0.982010f
-#define HPF_Fs44100_Fc66_B1 -1.986718f
+#define HPF_Fs44100_Fc66_B1 (-1.986718f)
#define HPF_Fs44100_Fc66_B2 0.986805f
#define HPF_Fs48000_Fc66_A0 0.982540f
-#define HPF_Fs48000_Fc66_A1 -1.965079f
+#define HPF_Fs48000_Fc66_A1 (-1.965079f)
#define HPF_Fs48000_Fc66_A2 0.982540f
-#define HPF_Fs48000_Fc66_B1 -1.987797f
+#define HPF_Fs48000_Fc66_B1 (-1.987797f)
#define HPF_Fs48000_Fc66_B2 0.987871f
#ifdef HIGHER_FS
#define HPF_Fs96000_Fc66_A0 0.985539f
-#define HPF_Fs96000_Fc66_A1 -1.971077f
+#define HPF_Fs96000_Fc66_A1 (-1.971077f)
#define HPF_Fs96000_Fc66_A2 0.985539f
-#define HPF_Fs96000_Fc66_B1 -1.993898f
+#define HPF_Fs96000_Fc66_B1 (-1.993898f)
#define HPF_Fs96000_Fc66_B2 0.993917f
#define HPF_Fs192000_Fc66_A0 0.987043f
-#define HPF_Fs192000_Fc66_A1 -1.974086f
+#define HPF_Fs192000_Fc66_A1 (-1.974086f)
#define HPF_Fs192000_Fc66_A2 0.987043f
-#define HPF_Fs192000_Fc66_B1 -1.996949f
+#define HPF_Fs192000_Fc66_B1 (-1.996949f)
#define HPF_Fs192000_Fc66_B2 0.996954f
#endif
/* Coefficients for centre frequency 78Hz */
#define HPF_Fs8000_Fc78_A0 0.946693f
-#define HPF_Fs8000_Fc78_A1 -1.893387f
+#define HPF_Fs8000_Fc78_A1 (-1.893387f)
#define HPF_Fs8000_Fc78_A2 0.946693f
-#define HPF_Fs8000_Fc78_B1 -1.913517f
+#define HPF_Fs8000_Fc78_B1 (-1.913517f)
#define HPF_Fs8000_Fc78_B2 0.917105f
#define HPF_Fs11025_Fc78_A0 0.957999f
-#define HPF_Fs11025_Fc78_A1 -1.915998f
+#define HPF_Fs11025_Fc78_A1 (-1.915998f)
#define HPF_Fs11025_Fc78_A2 0.957999f
-#define HPF_Fs11025_Fc78_B1 -1.937229f
+#define HPF_Fs11025_Fc78_B1 (-1.937229f)
#define HPF_Fs11025_Fc78_B2 0.939140f
#define HPF_Fs12000_Fc78_A0 0.960446f
-#define HPF_Fs12000_Fc78_A1 -1.920892f
+#define HPF_Fs12000_Fc78_A1 (-1.920892f)
#define HPF_Fs12000_Fc78_A2 0.960446f
-#define HPF_Fs12000_Fc78_B1 -1.942326f
+#define HPF_Fs12000_Fc78_B1 (-1.942326f)
#define HPF_Fs12000_Fc78_B2 0.943944f
#define HPF_Fs16000_Fc78_A0 0.967397f
-#define HPF_Fs16000_Fc78_A1 -1.934794f
+#define HPF_Fs16000_Fc78_A1 (-1.934794f)
#define HPF_Fs16000_Fc78_A2 0.967397f
-#define HPF_Fs16000_Fc78_B1 -1.956740f
+#define HPF_Fs16000_Fc78_B1 (-1.956740f)
#define HPF_Fs16000_Fc78_B2 0.957656f
#define HPF_Fs22050_Fc78_A0 0.973156f
-#define HPF_Fs22050_Fc78_A1 -1.946313f
+#define HPF_Fs22050_Fc78_A1 (-1.946313f)
#define HPF_Fs22050_Fc78_A2 0.973156f
-#define HPF_Fs22050_Fc78_B1 -1.968607f
+#define HPF_Fs22050_Fc78_B1 (-1.968607f)
#define HPF_Fs22050_Fc78_B2 0.969092f
#define HPF_Fs24000_Fc78_A0 0.974398f
-#define HPF_Fs24000_Fc78_A1 -1.948797f
+#define HPF_Fs24000_Fc78_A1 (-1.948797f)
#define HPF_Fs24000_Fc78_A2 0.974398f
-#define HPF_Fs24000_Fc78_B1 -1.971157f
+#define HPF_Fs24000_Fc78_B1 (-1.971157f)
#define HPF_Fs24000_Fc78_B2 0.971568f
#define HPF_Fs32000_Fc78_A0 0.977918f
-#define HPF_Fs32000_Fc78_A1 -1.955836f
+#define HPF_Fs32000_Fc78_A1 (-1.955836f)
#define HPF_Fs32000_Fc78_A2 0.977918f
-#define HPF_Fs32000_Fc78_B1 -1.978367f
+#define HPF_Fs32000_Fc78_B1 (-1.978367f)
#define HPF_Fs32000_Fc78_B2 0.978599f
#define HPF_Fs44100_Fc78_A0 0.980824f
-#define HPF_Fs44100_Fc78_A1 -1.961649f
+#define HPF_Fs44100_Fc78_A1 (-1.961649f)
#define HPF_Fs44100_Fc78_A2 0.980824f
-#define HPF_Fs44100_Fc78_B1 -1.984303f
+#define HPF_Fs44100_Fc78_B1 (-1.984303f)
#define HPF_Fs44100_Fc78_B2 0.984425f
#define HPF_Fs48000_Fc78_A0 0.981450f
-#define HPF_Fs48000_Fc78_A1 -1.962900f
+#define HPF_Fs48000_Fc78_A1 (-1.962900f)
#define HPF_Fs48000_Fc78_A2 0.981450f
-#define HPF_Fs48000_Fc78_B1 -1.985578f
+#define HPF_Fs48000_Fc78_B1 (-1.985578f)
#define HPF_Fs48000_Fc78_B2 0.985681f
#ifdef HIGHER_FS
#define HPF_Fs96000_Fc78_A0 0.984992f
-#define HPF_Fs96000_Fc78_A1 -1.969984f
+#define HPF_Fs96000_Fc78_A1 (-1.969984f)
#define HPF_Fs96000_Fc78_A2 0.984992f
-#define HPF_Fs96000_Fc78_B1 -1.992789f
+#define HPF_Fs96000_Fc78_B1 (-1.992789f)
#define HPF_Fs96000_Fc78_B2 0.992815f
#define HPF_Fs192000_Fc78_A0 0.986769f
-#define HPF_Fs192000_Fc78_A1 -1.973539f
+#define HPF_Fs192000_Fc78_A1 (-1.973539f)
#define HPF_Fs192000_Fc78_A2 0.986769f
-#define HPF_Fs192000_Fc78_B1 -1.996394f
+#define HPF_Fs192000_Fc78_B1 (-1.996394f)
#define HPF_Fs192000_Fc78_B2 0.996401f
#endif
/* Coefficients for centre frequency 90Hz */
#define HPF_Fs8000_Fc90_A0 0.940412f
-#define HPF_Fs8000_Fc90_A1 -1.880825f
+#define HPF_Fs8000_Fc90_A1 (-1.880825f)
#define HPF_Fs8000_Fc90_A2 0.940412f
-#define HPF_Fs8000_Fc90_B1 -1.900231f
+#define HPF_Fs8000_Fc90_B1 (-1.900231f)
#define HPF_Fs8000_Fc90_B2 0.904977f
#define HPF_Fs11025_Fc90_A0 0.953383f
-#define HPF_Fs11025_Fc90_A1 -1.906766f
+#define HPF_Fs11025_Fc90_A1 (-1.906766f)
#define HPF_Fs11025_Fc90_A2 0.953383f
-#define HPF_Fs11025_Fc90_B1 -1.927579f
+#define HPF_Fs11025_Fc90_B1 (-1.927579f)
#define HPF_Fs11025_Fc90_B2 0.930111f
#define HPF_Fs12000_Fc90_A0 0.956193f
-#define HPF_Fs12000_Fc90_A1 -1.912387f
+#define HPF_Fs12000_Fc90_A1 (-1.912387f)
#define HPF_Fs12000_Fc90_A2 0.956193f
-#define HPF_Fs12000_Fc90_B1 -1.933459f
+#define HPF_Fs12000_Fc90_B1 (-1.933459f)
#define HPF_Fs12000_Fc90_B2 0.935603f
#define HPF_Fs16000_Fc90_A0 0.964183f
-#define HPF_Fs16000_Fc90_A1 -1.928365f
+#define HPF_Fs16000_Fc90_A1 (-1.928365f)
#define HPF_Fs16000_Fc90_A2 0.964183f
-#define HPF_Fs16000_Fc90_B1 -1.950087f
+#define HPF_Fs16000_Fc90_B1 (-1.950087f)
#define HPF_Fs16000_Fc90_B2 0.951303f
#define HPF_Fs22050_Fc90_A0 0.970809f
-#define HPF_Fs22050_Fc90_A1 -1.941618f
+#define HPF_Fs22050_Fc90_A1 (-1.941618f)
#define HPF_Fs22050_Fc90_A2 0.970809f
-#define HPF_Fs22050_Fc90_B1 -1.963778f
+#define HPF_Fs22050_Fc90_B1 (-1.963778f)
#define HPF_Fs22050_Fc90_B2 0.964423f
#define HPF_Fs24000_Fc90_A0 0.972239f
-#define HPF_Fs24000_Fc90_A1 -1.944477f
+#define HPF_Fs24000_Fc90_A1 (-1.944477f)
#define HPF_Fs24000_Fc90_A2 0.972239f
-#define HPF_Fs24000_Fc90_B1 -1.966721f
+#define HPF_Fs24000_Fc90_B1 (-1.966721f)
#define HPF_Fs24000_Fc90_B2 0.967266f
#define HPF_Fs32000_Fc90_A0 0.976292f
-#define HPF_Fs32000_Fc90_A1 -1.952584f
+#define HPF_Fs32000_Fc90_A1 (-1.952584f)
#define HPF_Fs32000_Fc90_A2 0.976292f
-#define HPF_Fs32000_Fc90_B1 -1.975040f
+#define HPF_Fs32000_Fc90_B1 (-1.975040f)
#define HPF_Fs32000_Fc90_B2 0.975347f
#define HPF_Fs44100_Fc90_A0 0.979641f
-#define HPF_Fs44100_Fc90_A1 -1.959282f
+#define HPF_Fs44100_Fc90_A1 (-1.959282f)
#define HPF_Fs44100_Fc90_A2 0.979641f
-#define HPF_Fs44100_Fc90_B1 -1.981888f
+#define HPF_Fs44100_Fc90_B1 (-1.981888f)
#define HPF_Fs44100_Fc90_B2 0.982050f
#define HPF_Fs48000_Fc90_A0 0.980362f
-#define HPF_Fs48000_Fc90_A1 -1.960724f
+#define HPF_Fs48000_Fc90_A1 (-1.960724f)
#define HPF_Fs48000_Fc90_A2 0.980362f
-#define HPF_Fs48000_Fc90_B1 -1.983359f
+#define HPF_Fs48000_Fc90_B1 (-1.983359f)
#define HPF_Fs48000_Fc90_B2 0.983497f
#ifdef HIGHER_FS
#define HPF_Fs96000_Fc90_A0 0.984446f
-#define HPF_Fs96000_Fc90_A1 -1.968892f
+#define HPF_Fs96000_Fc90_A1 (-1.968892f)
#define HPF_Fs96000_Fc90_A2 0.984446f
-#define HPF_Fs96000_Fc90_B1 -1.991680f
+#define HPF_Fs96000_Fc90_B1 (-1.991680f)
#define HPF_Fs96000_Fc90_B2 0.991714f
#define HPF_Fs192000_Fc90_A0 0.986496f
-#define HPF_Fs192000_Fc90_A1 -1.972992f
+#define HPF_Fs192000_Fc90_A1 (-1.972992f)
#define HPF_Fs192000_Fc90_A2 0.986496f
-#define HPF_Fs192000_Fc90_B1 -1.995840f
+#define HPF_Fs192000_Fc90_B1 (-1.995840f)
#define HPF_Fs192000_Fc90_B2 0.995848f
#endif
@@ -786,244 +786,244 @@
/* Coefficients for centre frequency 55Hz */
#define BPF_Fs8000_Fc55_A0 0.009197f
#define BPF_Fs8000_Fc55_A1 0.000000f
-#define BPF_Fs8000_Fc55_A2 -0.009197f
-#define BPF_Fs8000_Fc55_B1 -1.979545f
+#define BPF_Fs8000_Fc55_A2 (-0.009197f)
+#define BPF_Fs8000_Fc55_B1 (-1.979545f)
#define BPF_Fs8000_Fc55_B2 0.981393f
#define BPF_Fs11025_Fc55_A0 0.006691f
#define BPF_Fs11025_Fc55_A1 0.000000f
-#define BPF_Fs11025_Fc55_A2 -0.006691f
-#define BPF_Fs11025_Fc55_B1 -1.985488f
+#define BPF_Fs11025_Fc55_A2 (-0.006691f)
+#define BPF_Fs11025_Fc55_B1 (-1.985488f)
#define BPF_Fs11025_Fc55_B2 0.986464f
#define BPF_Fs12000_Fc55_A0 0.006150f
#define BPF_Fs12000_Fc55_A1 0.000000f
-#define BPF_Fs12000_Fc55_A2 -0.006150f
-#define BPF_Fs12000_Fc55_B1 -1.986733f
+#define BPF_Fs12000_Fc55_A2 (-0.006150f)
+#define BPF_Fs12000_Fc55_B1 (-1.986733f)
#define BPF_Fs12000_Fc55_B2 0.987557f
#define BPF_Fs16000_Fc55_A0 0.004620f
#define BPF_Fs16000_Fc55_A1 0.000000f
-#define BPF_Fs16000_Fc55_A2 -0.004620f
-#define BPF_Fs16000_Fc55_B1 -1.990189f
+#define BPF_Fs16000_Fc55_A2 (-0.004620f)
+#define BPF_Fs16000_Fc55_B1 (-1.990189f)
#define BPF_Fs16000_Fc55_B2 0.990653f
#define BPF_Fs22050_Fc55_A0 0.003357f
#define BPF_Fs22050_Fc55_A1 0.000000f
-#define BPF_Fs22050_Fc55_A2 -0.003357f
-#define BPF_Fs22050_Fc55_B1 -1.992964f
+#define BPF_Fs22050_Fc55_A2 (-0.003357f)
+#define BPF_Fs22050_Fc55_B1 (-1.992964f)
#define BPF_Fs22050_Fc55_B2 0.993209f
#define BPF_Fs24000_Fc55_A0 0.003085f
#define BPF_Fs24000_Fc55_A1 0.000000f
-#define BPF_Fs24000_Fc55_A2 -0.003085f
-#define BPF_Fs24000_Fc55_B1 -1.993552f
+#define BPF_Fs24000_Fc55_A2 (-0.003085f)
+#define BPF_Fs24000_Fc55_B1 (-1.993552f)
#define BPF_Fs24000_Fc55_B2 0.993759f
#define BPF_Fs32000_Fc55_A0 0.002315f
#define BPF_Fs32000_Fc55_A1 0.000000f
-#define BPF_Fs32000_Fc55_A2 -0.002315f
-#define BPF_Fs32000_Fc55_B1 -1.995199f
+#define BPF_Fs32000_Fc55_A2 (-0.002315f)
+#define BPF_Fs32000_Fc55_B1 (-1.995199f)
#define BPF_Fs32000_Fc55_B2 0.995316f
#define BPF_Fs44100_Fc55_A0 0.001681f
#define BPF_Fs44100_Fc55_A1 0.000000f
-#define BPF_Fs44100_Fc55_A2 -0.001681f
-#define BPF_Fs44100_Fc55_B1 -1.996537f
+#define BPF_Fs44100_Fc55_A2 (-0.001681f)
+#define BPF_Fs44100_Fc55_B1 (-1.996537f)
#define BPF_Fs44100_Fc55_B2 0.996599f
#define BPF_Fs48000_Fc55_A0 0.001545f
#define BPF_Fs48000_Fc55_A1 0.000000f
-#define BPF_Fs48000_Fc55_A2 -0.001545f
-#define BPF_Fs48000_Fc55_B1 -1.996823f
+#define BPF_Fs48000_Fc55_A2 (-0.001545f)
+#define BPF_Fs48000_Fc55_B1 (-1.996823f)
#define BPF_Fs48000_Fc55_B2 0.996875f
#ifdef HIGHER_FS
#define BPF_Fs96000_Fc55_A0 0.000762f
#define BPF_Fs96000_Fc55_A1 0.000000f
-#define BPF_Fs96000_Fc55_A2 -0.000762f
-#define BPF_Fs96000_Fc55_B1 -1.998461f
+#define BPF_Fs96000_Fc55_A2 (-0.000762f)
+#define BPF_Fs96000_Fc55_B1 (-1.998461f)
#define BPF_Fs96000_Fc55_B2 0.998477f
#define BPF_Fs192000_Fc55_A0 0.000381f
#define BPF_Fs192000_Fc55_A1 0.000000f
-#define BPF_Fs192000_Fc55_A2 -0.000381f
-#define BPF_Fs192000_Fc55_B1 -1.999234f
+#define BPF_Fs192000_Fc55_A2 (-0.000381f)
+#define BPF_Fs192000_Fc55_B1 (-1.999234f)
#define BPF_Fs192000_Fc55_B2 0.999238f
#endif
/* Coefficients for centre frequency 66Hz */
#define BPF_Fs8000_Fc66_A0 0.012648f
#define BPF_Fs8000_Fc66_A1 0.000000f
-#define BPF_Fs8000_Fc66_A2 -0.012648f
-#define BPF_Fs8000_Fc66_B1 -1.971760f
+#define BPF_Fs8000_Fc66_A2 (-0.012648f)
+#define BPF_Fs8000_Fc66_B1 (-1.971760f)
#define BPF_Fs8000_Fc66_B2 0.974412f
#define BPF_Fs11025_Fc66_A0 0.009209f
#define BPF_Fs11025_Fc66_A1 0.000000f
-#define BPF_Fs11025_Fc66_A2 -0.009209f
-#define BPF_Fs11025_Fc66_B1 -1.979966f
+#define BPF_Fs11025_Fc66_A2 (-0.009209f)
+#define BPF_Fs11025_Fc66_B1 (-1.979966f)
#define BPF_Fs11025_Fc66_B2 0.981368f
#define BPF_Fs12000_Fc66_A0 0.008468f
#define BPF_Fs12000_Fc66_A1 0.000000f
-#define BPF_Fs12000_Fc66_A2 -0.008468f
-#define BPF_Fs12000_Fc66_B1 -1.981685f
+#define BPF_Fs12000_Fc66_A2 (-0.008468f)
+#define BPF_Fs12000_Fc66_B1 (-1.981685f)
#define BPF_Fs12000_Fc66_B2 0.982869f
#define BPF_Fs16000_Fc66_A0 0.006364f
#define BPF_Fs16000_Fc66_A1 0.000000f
-#define BPF_Fs16000_Fc66_A2 -0.006364f
-#define BPF_Fs16000_Fc66_B1 -1.986457f
+#define BPF_Fs16000_Fc66_A2 (-0.006364f)
+#define BPF_Fs16000_Fc66_B1 (-1.986457f)
#define BPF_Fs16000_Fc66_B2 0.987124f
#define BPF_Fs22050_Fc66_A0 0.004626f
#define BPF_Fs22050_Fc66_A1 0.000000f
-#define BPF_Fs22050_Fc66_A2 -0.004626f
-#define BPF_Fs22050_Fc66_B1 -1.990288f
+#define BPF_Fs22050_Fc66_A2 (-0.004626f)
+#define BPF_Fs22050_Fc66_B1 (-1.990288f)
#define BPF_Fs22050_Fc66_B2 0.990641f
#define BPF_Fs24000_Fc66_A0 0.004252f
#define BPF_Fs24000_Fc66_A1 0.000000f
-#define BPF_Fs24000_Fc66_A2 -0.004252f
-#define BPF_Fs24000_Fc66_B1 -1.991100f
+#define BPF_Fs24000_Fc66_A2 (-0.004252f)
+#define BPF_Fs24000_Fc66_B1 (-1.991100f)
#define BPF_Fs24000_Fc66_B2 0.991398f
#define BPF_Fs32000_Fc66_A0 0.003192f
#define BPF_Fs32000_Fc66_A1 0.000000f
-#define BPF_Fs32000_Fc66_A2 -0.003192f
-#define BPF_Fs32000_Fc66_B1 -1.993374f
+#define BPF_Fs32000_Fc66_A2 (-0.003192f)
+#define BPF_Fs32000_Fc66_B1 (-1.993374f)
#define BPF_Fs32000_Fc66_B2 0.993541f
#define BPF_Fs44100_Fc66_A0 0.002318f
#define BPF_Fs44100_Fc66_A1 0.000000f
-#define BPF_Fs44100_Fc66_A2 -0.002318f
-#define BPF_Fs44100_Fc66_B1 -1.995221f
+#define BPF_Fs44100_Fc66_A2 (-0.002318f)
+#define BPF_Fs44100_Fc66_B1 (-1.995221f)
#define BPF_Fs44100_Fc66_B2 0.995309f
#define BPF_Fs48000_Fc66_A0 0.002131f
#define BPF_Fs48000_Fc66_A1 0.000000f
-#define BPF_Fs48000_Fc66_A2 -0.002131f
-#define BPF_Fs48000_Fc66_B1 -1.995615f
+#define BPF_Fs48000_Fc66_A2 (-0.002131f)
+#define BPF_Fs48000_Fc66_B1 (-1.995615f)
#define BPF_Fs48000_Fc66_B2 0.995690f
#ifdef HIGHER_FS
#define BPF_Fs96000_Fc66_A0 0.001055f
#define BPF_Fs96000_Fc66_A1 0.000000f
-#define BPF_Fs96000_Fc66_A2 -0.001055f
-#define BPF_Fs96000_Fc66_B1 -1.997868f
+#define BPF_Fs96000_Fc66_A2 (-0.001055f)
+#define BPF_Fs96000_Fc66_B1 (-1.997868f)
#define BPF_Fs96000_Fc66_B2 0.997891f
#define BPF_Fs192000_Fc66_A0 0.000528f
#define BPF_Fs192000_Fc66_A1 0.000000f
-#define BPF_Fs192000_Fc66_A2 -0.000528f
-#define BPF_Fs192000_Fc66_B1 -1.998939f
+#define BPF_Fs192000_Fc66_A2 (-0.000528f)
+#define BPF_Fs192000_Fc66_B1 (-1.998939f)
#define BPF_Fs192000_Fc66_B2 0.998945f
#endif
/* Coefficients for centre frequency 78Hz */
#define BPF_Fs8000_Fc78_A0 0.018572f
#define BPF_Fs8000_Fc78_A1 0.000000f
-#define BPF_Fs8000_Fc78_A2 -0.018572f
-#define BPF_Fs8000_Fc78_B1 -1.958745f
+#define BPF_Fs8000_Fc78_A2 (-0.018572f)
+#define BPF_Fs8000_Fc78_B1 (-1.958745f)
#define BPF_Fs8000_Fc78_B2 0.962427f
#define BPF_Fs11025_Fc78_A0 0.013545f
#define BPF_Fs11025_Fc78_A1 0.000000f
-#define BPF_Fs11025_Fc78_A2 -0.013545f
-#define BPF_Fs11025_Fc78_B1 -1.970647f
+#define BPF_Fs11025_Fc78_A2 (-0.013545f)
+#define BPF_Fs11025_Fc78_B1 (-1.970647f)
#define BPF_Fs11025_Fc78_B2 0.972596f
#define BPF_Fs12000_Fc78_A0 0.012458f
#define BPF_Fs12000_Fc78_A1 0.000000f
-#define BPF_Fs12000_Fc78_A2 -0.012458f
-#define BPF_Fs12000_Fc78_B1 -1.973148f
+#define BPF_Fs12000_Fc78_A2 (-0.012458f)
+#define BPF_Fs12000_Fc78_B1 (-1.973148f)
#define BPF_Fs12000_Fc78_B2 0.974795f
#define BPF_Fs16000_Fc78_A0 0.009373f
#define BPF_Fs16000_Fc78_A1 0.000000f
-#define BPF_Fs16000_Fc78_A2 -0.009373f
-#define BPF_Fs16000_Fc78_B1 -1.980108f
+#define BPF_Fs16000_Fc78_A2 (-0.009373f)
+#define BPF_Fs16000_Fc78_B1 (-1.980108f)
#define BPF_Fs16000_Fc78_B2 0.981037f
#define BPF_Fs22050_Fc78_A0 0.006819f
#define BPF_Fs22050_Fc78_A1 0.000000f
-#define BPF_Fs22050_Fc78_A2 -0.006819f
-#define BPF_Fs22050_Fc78_B1 -1.985714f
+#define BPF_Fs22050_Fc78_A2 (-0.006819f)
+#define BPF_Fs22050_Fc78_B1 (-1.985714f)
#define BPF_Fs22050_Fc78_B2 0.986204f
#define BPF_Fs24000_Fc78_A0 0.006268f
#define BPF_Fs24000_Fc78_A1 0.000000f
-#define BPF_Fs24000_Fc78_A2 -0.006268f
-#define BPF_Fs24000_Fc78_B1 -1.986904f
+#define BPF_Fs24000_Fc78_A2 (-0.006268f)
+#define BPF_Fs24000_Fc78_B1 (-1.986904f)
#define BPF_Fs24000_Fc78_B2 0.987318f
#define BPF_Fs32000_Fc78_A0 0.004709f
#define BPF_Fs32000_Fc78_A1 0.000000f
-#define BPF_Fs32000_Fc78_A2 -0.004709f
-#define BPF_Fs32000_Fc78_B1 -1.990240f
+#define BPF_Fs32000_Fc78_A2 (-0.004709f)
+#define BPF_Fs32000_Fc78_B1 (-1.990240f)
#define BPF_Fs32000_Fc78_B2 0.990473f
#define BPF_Fs44100_Fc78_A0 0.003421f
#define BPF_Fs44100_Fc78_A1 0.000000f
-#define BPF_Fs44100_Fc78_A2 -0.003421f
-#define BPF_Fs44100_Fc78_B1 -1.992955f
+#define BPF_Fs44100_Fc78_A2 (-0.003421f)
+#define BPF_Fs44100_Fc78_B1 (-1.992955f)
#define BPF_Fs44100_Fc78_B2 0.993078f
#define BPF_Fs48000_Fc78_A0 0.003144f
#define BPF_Fs48000_Fc78_A1 0.000000f
-#define BPF_Fs48000_Fc78_A2 -0.003144f
-#define BPF_Fs48000_Fc78_B1 -1.993535f
+#define BPF_Fs48000_Fc78_A2 (-0.003144f)
+#define BPF_Fs48000_Fc78_B1 (-1.993535f)
#define BPF_Fs48000_Fc78_B2 0.993639f
#ifdef HIGHER_FS
#define BPF_Fs96000_Fc78_A0 0.001555f
#define BPF_Fs96000_Fc78_A1 0.000000f
-#define BPF_Fs96000_Fc78_A2 -0.0015555f
-#define BPF_Fs96000_Fc78_B1 -1.996860f
+#define BPF_Fs96000_Fc78_A2 (-0.0015555f)
+#define BPF_Fs96000_Fc78_B1 (-1.996860f)
#define BPF_Fs96000_Fc78_B2 0.996891f
#define BPF_Fs192000_Fc78_A0 0.000778f
#define BPF_Fs192000_Fc78_A1 0.000000f
-#define BPF_Fs192000_Fc78_A2 -0.000778f
-#define BPF_Fs192000_Fc78_B1 -1.998437f
+#define BPF_Fs192000_Fc78_A2 (-0.000778f)
+#define BPF_Fs192000_Fc78_B1 (-1.998437f)
#define BPF_Fs192000_Fc78_B2 0.998444f
#endif
/* Coefficients for centre frequency 90Hz */
#define BPF_Fs8000_Fc90_A0 0.022760f
#define BPF_Fs8000_Fc90_A1 0.000000f
-#define BPF_Fs8000_Fc90_A2 -0.022760f
-#define BPF_Fs8000_Fc90_B1 -1.949073f
+#define BPF_Fs8000_Fc90_A2 (-0.022760f)
+#define BPF_Fs8000_Fc90_B1 (-1.949073f)
#define BPF_Fs8000_Fc90_B2 0.953953f
#define BPF_Fs11025_Fc90_A0 0.016619f
#define BPF_Fs11025_Fc90_A1 0.000000f
-#define BPF_Fs11025_Fc90_A2 -0.016619f
-#define BPF_Fs11025_Fc90_B1 -1.963791f
+#define BPF_Fs11025_Fc90_A2 (-0.016619f)
+#define BPF_Fs11025_Fc90_B1 (-1.963791f)
#define BPF_Fs11025_Fc90_B2 0.966377f
#define BPF_Fs12000_Fc90_A0 0.015289f
#define BPF_Fs12000_Fc90_A1 0.000000f
-#define BPF_Fs12000_Fc90_A2 -0.015289f
-#define BPF_Fs12000_Fc90_B1 -1.966882f
+#define BPF_Fs12000_Fc90_A2 (-0.015289f)
+#define BPF_Fs12000_Fc90_B1 (-1.966882f)
#define BPF_Fs12000_Fc90_B2 0.969067f
#define BPF_Fs16000_Fc90_A0 0.011511f
#define BPF_Fs16000_Fc90_A1 0.000000f
-#define BPF_Fs16000_Fc90_A2 -0.011511f
-#define BPF_Fs16000_Fc90_B1 -1.975477f
+#define BPF_Fs16000_Fc90_A2 (-0.011511f)
+#define BPF_Fs16000_Fc90_B1 (-1.975477f)
#define BPF_Fs16000_Fc90_B2 0.976711f
#define BPF_Fs22050_Fc90_A0 0.008379f
#define BPF_Fs22050_Fc90_A1 0.000000f
-#define BPF_Fs22050_Fc90_A2 -0.008379f
-#define BPF_Fs22050_Fc90_B1 -1.982395f
+#define BPF_Fs22050_Fc90_A2 (-0.008379f)
+#define BPF_Fs22050_Fc90_B1 (-1.982395f)
#define BPF_Fs22050_Fc90_B2 0.983047f
#define BPF_Fs24000_Fc90_A0 0.007704f
#define BPF_Fs24000_Fc90_A1 0.000000f
-#define BPF_Fs24000_Fc90_A2 -0.007704f
-#define BPF_Fs24000_Fc90_B1 -1.983863f
+#define BPF_Fs24000_Fc90_A2 (-0.007704f)
+#define BPF_Fs24000_Fc90_B1 (-1.983863f)
#define BPF_Fs24000_Fc90_B2 0.984414f
#define BPF_Fs32000_Fc90_A0 0.005789f
#define BPF_Fs32000_Fc90_A1 0.000000f
-#define BPF_Fs32000_Fc90_A2 -0.005789f
-#define BPF_Fs32000_Fc90_B1 -1.987977f
+#define BPF_Fs32000_Fc90_A2 (-0.005789f)
+#define BPF_Fs32000_Fc90_B1 (-1.987977f)
#define BPF_Fs32000_Fc90_B2 0.988288f
#define BPF_Fs44100_Fc90_A0 0.004207f
#define BPF_Fs44100_Fc90_A1 0.000000f
-#define BPF_Fs44100_Fc90_A2 -0.004207f
-#define BPF_Fs44100_Fc90_B1 -1.991324f
+#define BPF_Fs44100_Fc90_A2 (-0.004207f)
+#define BPF_Fs44100_Fc90_B1 (-1.991324f)
#define BPF_Fs44100_Fc90_B2 0.991488f
#define BPF_Fs48000_Fc90_A0 0.003867f
#define BPF_Fs48000_Fc90_A1 0.000000f
-#define BPF_Fs48000_Fc90_A2 -0.003867f
-#define BPF_Fs48000_Fc90_B1 -1.992038f
+#define BPF_Fs48000_Fc90_A2 (-0.003867f)
+#define BPF_Fs48000_Fc90_B1 (-1.992038f)
#define BPF_Fs48000_Fc90_B2 0.992177f
#ifdef HIGHER_FS
#define BPF_Fs96000_Fc90_A0 0.001913f
#define BPF_Fs96000_Fc90_A1 0.000000f
-#define BPF_Fs96000_Fc90_A2 -0.001913f
-#define BPF_Fs96000_Fc90_B1 -1.996134f
+#define BPF_Fs96000_Fc90_A2 (-0.001913f)
+#define BPF_Fs96000_Fc90_B1 (-1.996134f)
#define BPF_Fs96000_Fc90_B2 0.996174f
#define BPF_Fs192000_Fc90_A0 0.000958f
#define BPF_Fs192000_Fc90_A1 0.000000f
-#define BPF_Fs192000_Fc90_A2 -0.000958f
-#define BPF_Fs192000_Fc90_B1 -1.998075f
+#define BPF_Fs192000_Fc90_A2 (-0.000958f)
+#define BPF_Fs192000_Fc90_B1 (-1.998075f)
#define BPF_Fs192000_Fc90_B2 0.998085f
#endif
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Coeffs.h b/media/libeffects/lvm/lib/Bundle/src/LVM_Coeffs.h
index 353560c..8c04847 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Coeffs.h
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Coeffs.h
@@ -69,55 +69,55 @@
#define HPF_Fs22050_Gain6_B2 0.000000
/* Gain = 7.000000 dB */
#define HPF_Fs22050_Gain7_A0 1.390177
-#define HPF_Fs22050_Gain7_A1 -0.020144
+#define HPF_Fs22050_Gain7_A1 (-0.020144)
#define HPF_Fs22050_Gain7_A2 0.000000
#define HPF_Fs22050_Gain7_B1 0.370033
#define HPF_Fs22050_Gain7_B2 0.000000
/* Gain = 8.000000 dB */
#define HPF_Fs22050_Gain8_A0 1.476219
-#define HPF_Fs22050_Gain8_A1 -0.106187
+#define HPF_Fs22050_Gain8_A1 (-0.106187)
#define HPF_Fs22050_Gain8_A2 0.000000
#define HPF_Fs22050_Gain8_B1 0.370033
#define HPF_Fs22050_Gain8_B2 0.000000
/* Gain = 9.000000 dB */
#define HPF_Fs22050_Gain9_A0 1.572761
-#define HPF_Fs22050_Gain9_A1 -0.202728
+#define HPF_Fs22050_Gain9_A1 (-0.202728)
#define HPF_Fs22050_Gain9_A2 0.000000
#define HPF_Fs22050_Gain9_B1 0.370033
#define HPF_Fs22050_Gain9_B2 0.000000
/* Gain = 10.000000 dB */
#define HPF_Fs22050_Gain10_A0 1.681082
-#define HPF_Fs22050_Gain10_A1 -0.311049
+#define HPF_Fs22050_Gain10_A1 (-0.311049)
#define HPF_Fs22050_Gain10_A2 0.000000
#define HPF_Fs22050_Gain10_B1 0.370033
#define HPF_Fs22050_Gain10_B2 0.000000
/* Gain = 11.000000 dB */
#define HPF_Fs22050_Gain11_A0 1.802620
-#define HPF_Fs22050_Gain11_A1 -0.432588
+#define HPF_Fs22050_Gain11_A1 (-0.432588)
#define HPF_Fs22050_Gain11_A2 0.000000
#define HPF_Fs22050_Gain11_B1 0.370033
#define HPF_Fs22050_Gain11_B2 0.000000
/* Gain = 12.000000 dB */
#define HPF_Fs22050_Gain12_A0 1.938989
-#define HPF_Fs22050_Gain12_A1 -0.568956
+#define HPF_Fs22050_Gain12_A1 (-0.568956)
#define HPF_Fs22050_Gain12_A2 0.000000
#define HPF_Fs22050_Gain12_B1 0.370033
#define HPF_Fs22050_Gain12_B2 0.000000
/* Gain = 13.000000 dB */
#define HPF_Fs22050_Gain13_A0 2.091997
-#define HPF_Fs22050_Gain13_A1 -0.721964
+#define HPF_Fs22050_Gain13_A1 (-0.721964)
#define HPF_Fs22050_Gain13_A2 0.000000
#define HPF_Fs22050_Gain13_B1 0.370033
#define HPF_Fs22050_Gain13_B2 0.000000
/* Gain = 14.000000 dB */
#define HPF_Fs22050_Gain14_A0 2.263674
-#define HPF_Fs22050_Gain14_A1 -0.893641
+#define HPF_Fs22050_Gain14_A1 (-0.893641)
#define HPF_Fs22050_Gain14_A2 0.000000
#define HPF_Fs22050_Gain14_B1 0.370033
#define HPF_Fs22050_Gain14_B2 0.000000
/* Gain = 15.000000 dB */
#define HPF_Fs22050_Gain15_A0 2.456300
-#define HPF_Fs22050_Gain15_A1 -1.086267
+#define HPF_Fs22050_Gain15_A1 (-1.086267)
#define HPF_Fs22050_Gain15_A2 0.000000
#define HPF_Fs22050_Gain15_B1 0.370033
#define HPF_Fs22050_Gain15_B2 0.000000
@@ -148,342 +148,342 @@
#define HPF_Fs24000_Gain4_B2 0.000000
/* Gain = 5.000000 dB */
#define HPF_Fs24000_Gain5_A0 1.284870
-#define HPF_Fs24000_Gain5_A1 -0.016921
+#define HPF_Fs24000_Gain5_A1 (-0.016921)
#define HPF_Fs24000_Gain5_A2 0.000000
#define HPF_Fs24000_Gain5_B1 0.267949
#define HPF_Fs24000_Gain5_B2 0.000000
/* Gain = 6.000000 dB */
#define HPF_Fs24000_Gain6_A0 1.364291
-#define HPF_Fs24000_Gain6_A1 -0.096342
+#define HPF_Fs24000_Gain6_A1 (-0.096342)
#define HPF_Fs24000_Gain6_A2 0.000000
#define HPF_Fs24000_Gain6_B1 0.267949
#define HPF_Fs24000_Gain6_B2 0.000000
/* Gain = 7.000000 dB */
#define HPF_Fs24000_Gain7_A0 1.453403
-#define HPF_Fs24000_Gain7_A1 -0.185454
+#define HPF_Fs24000_Gain7_A1 (-0.185454)
#define HPF_Fs24000_Gain7_A2 0.000000
#define HPF_Fs24000_Gain7_B1 0.267949
#define HPF_Fs24000_Gain7_B2 0.000000
/* Gain = 8.000000 dB */
#define HPF_Fs24000_Gain8_A0 1.553389
-#define HPF_Fs24000_Gain8_A1 -0.285440
+#define HPF_Fs24000_Gain8_A1 (-0.285440)
#define HPF_Fs24000_Gain8_A2 0.000000
#define HPF_Fs24000_Gain8_B1 0.267949
#define HPF_Fs24000_Gain8_B2 0.000000
/* Gain = 9.000000 dB */
#define HPF_Fs24000_Gain9_A0 1.665574
-#define HPF_Fs24000_Gain9_A1 -0.397625
+#define HPF_Fs24000_Gain9_A1 (-0.397625)
#define HPF_Fs24000_Gain9_A2 0.000000
#define HPF_Fs24000_Gain9_B1 0.267949
#define HPF_Fs24000_Gain9_B2 0.000000
/* Gain = 10.000000 dB */
#define HPF_Fs24000_Gain10_A0 1.791449
-#define HPF_Fs24000_Gain10_A1 -0.523499
+#define HPF_Fs24000_Gain10_A1 (-0.523499)
#define HPF_Fs24000_Gain10_A2 0.000000
#define HPF_Fs24000_Gain10_B1 0.267949
#define HPF_Fs24000_Gain10_B2 0.000000
/* Gain = 11.000000 dB */
#define HPF_Fs24000_Gain11_A0 1.932682
-#define HPF_Fs24000_Gain11_A1 -0.664733
+#define HPF_Fs24000_Gain11_A1 (-0.664733)
#define HPF_Fs24000_Gain11_A2 0.000000
#define HPF_Fs24000_Gain11_B1 0.267949
#define HPF_Fs24000_Gain11_B2 0.000000
/* Gain = 12.000000 dB */
#define HPF_Fs24000_Gain12_A0 2.091148
-#define HPF_Fs24000_Gain12_A1 -0.823199
+#define HPF_Fs24000_Gain12_A1 (-0.823199)
#define HPF_Fs24000_Gain12_A2 0.000000
#define HPF_Fs24000_Gain12_B1 0.267949
#define HPF_Fs24000_Gain12_B2 0.000000
/* Gain = 13.000000 dB */
#define HPF_Fs24000_Gain13_A0 2.268950
-#define HPF_Fs24000_Gain13_A1 -1.001001
+#define HPF_Fs24000_Gain13_A1 (-1.001001)
#define HPF_Fs24000_Gain13_A2 0.000000
#define HPF_Fs24000_Gain13_B1 0.267949
#define HPF_Fs24000_Gain13_B2 0.000000
/* Gain = 14.000000 dB */
#define HPF_Fs24000_Gain14_A0 2.468447
-#define HPF_Fs24000_Gain14_A1 -1.200498
+#define HPF_Fs24000_Gain14_A1 (-1.200498)
#define HPF_Fs24000_Gain14_A2 0.000000
#define HPF_Fs24000_Gain14_B1 0.267949
#define HPF_Fs24000_Gain14_B2 0.000000
/* Gain = 15.000000 dB */
#define HPF_Fs24000_Gain15_A0 2.692287
-#define HPF_Fs24000_Gain15_A1 -1.424338
+#define HPF_Fs24000_Gain15_A1 (-1.424338)
#define HPF_Fs24000_Gain15_A2 0.000000
#define HPF_Fs24000_Gain15_B1 0.267949
#define HPF_Fs24000_Gain15_B2 0.000000
/* Coefficients for sample rate 32000Hz */
/* Gain = 1.000000 dB */
#define HPF_Fs32000_Gain1_A0 1.061009
-#define HPF_Fs32000_Gain1_A1 -0.061009
+#define HPF_Fs32000_Gain1_A1 (-0.061009)
#define HPF_Fs32000_Gain1_A2 0.000000
-#define HPF_Fs32000_Gain1_B1 -0.000000
+#define HPF_Fs32000_Gain1_B1 (-0.000000)
#define HPF_Fs32000_Gain1_B2 0.000000
/* Gain = 2.000000 dB */
#define HPF_Fs32000_Gain2_A0 1.129463
-#define HPF_Fs32000_Gain2_A1 -0.129463
+#define HPF_Fs32000_Gain2_A1 (-0.129463)
#define HPF_Fs32000_Gain2_A2 0.000000
-#define HPF_Fs32000_Gain2_B1 -0.000000
+#define HPF_Fs32000_Gain2_B1 (-0.000000)
#define HPF_Fs32000_Gain2_B2 0.000000
/* Gain = 3.000000 dB */
#define HPF_Fs32000_Gain3_A0 1.206267
-#define HPF_Fs32000_Gain3_A1 -0.206267
+#define HPF_Fs32000_Gain3_A1 (-0.206267)
#define HPF_Fs32000_Gain3_A2 0.000000
-#define HPF_Fs32000_Gain3_B1 -0.000000
+#define HPF_Fs32000_Gain3_B1 (-0.000000)
#define HPF_Fs32000_Gain3_B2 0.000000
/* Gain = 4.000000 dB */
#define HPF_Fs32000_Gain4_A0 1.292447
-#define HPF_Fs32000_Gain4_A1 -0.292447
+#define HPF_Fs32000_Gain4_A1 (-0.292447)
#define HPF_Fs32000_Gain4_A2 0.000000
-#define HPF_Fs32000_Gain4_B1 -0.000000
+#define HPF_Fs32000_Gain4_B1 (-0.000000)
#define HPF_Fs32000_Gain4_B2 0.000000
/* Gain = 5.000000 dB */
#define HPF_Fs32000_Gain5_A0 1.389140
-#define HPF_Fs32000_Gain5_A1 -0.389140
+#define HPF_Fs32000_Gain5_A1 (-0.389140)
#define HPF_Fs32000_Gain5_A2 0.000000
-#define HPF_Fs32000_Gain5_B1 -0.000000
+#define HPF_Fs32000_Gain5_B1 (-0.000000)
#define HPF_Fs32000_Gain5_B2 0.000000
/* Gain = 6.000000 dB */
#define HPF_Fs32000_Gain6_A0 1.497631
-#define HPF_Fs32000_Gain6_A1 -0.497631
+#define HPF_Fs32000_Gain6_A1 (-0.497631)
#define HPF_Fs32000_Gain6_A2 0.000000
-#define HPF_Fs32000_Gain6_B1 -0.000000
+#define HPF_Fs32000_Gain6_B1 (-0.000000)
#define HPF_Fs32000_Gain6_B2 0.000000
/* Gain = 7.000000 dB */
#define HPF_Fs32000_Gain7_A0 1.619361
-#define HPF_Fs32000_Gain7_A1 -0.619361
+#define HPF_Fs32000_Gain7_A1 (-0.619361)
#define HPF_Fs32000_Gain7_A2 0.000000
-#define HPF_Fs32000_Gain7_B1 -0.000000
+#define HPF_Fs32000_Gain7_B1 (-0.000000)
#define HPF_Fs32000_Gain7_B2 0.000000
/* Gain = 8.000000 dB */
#define HPF_Fs32000_Gain8_A0 1.755943
-#define HPF_Fs32000_Gain8_A1 -0.755943
+#define HPF_Fs32000_Gain8_A1 (-0.755943)
#define HPF_Fs32000_Gain8_A2 0.000000
-#define HPF_Fs32000_Gain8_B1 -0.000000
+#define HPF_Fs32000_Gain8_B1 (-0.000000)
#define HPF_Fs32000_Gain8_B2 0.000000
/* Gain = 9.000000 dB */
#define HPF_Fs32000_Gain9_A0 1.909191
-#define HPF_Fs32000_Gain9_A1 -0.909191
+#define HPF_Fs32000_Gain9_A1 (-0.909191)
#define HPF_Fs32000_Gain9_A2 0.000000
-#define HPF_Fs32000_Gain9_B1 -0.000000
+#define HPF_Fs32000_Gain9_B1 (-0.000000)
#define HPF_Fs32000_Gain9_B2 0.000000
/* Gain = 10.000000 dB */
#define HPF_Fs32000_Gain10_A0 2.081139
-#define HPF_Fs32000_Gain10_A1 -1.081139
+#define HPF_Fs32000_Gain10_A1 (-1.081139)
#define HPF_Fs32000_Gain10_A2 0.000000
-#define HPF_Fs32000_Gain10_B1 -0.000000
+#define HPF_Fs32000_Gain10_B1 (-0.000000)
#define HPF_Fs32000_Gain10_B2 0.000000
/* Gain = 11.000000 dB */
#define HPF_Fs32000_Gain11_A0 2.274067
-#define HPF_Fs32000_Gain11_A1 -1.274067
+#define HPF_Fs32000_Gain11_A1 (-1.274067)
#define HPF_Fs32000_Gain11_A2 0.000000
-#define HPF_Fs32000_Gain11_B1 -0.000000
+#define HPF_Fs32000_Gain11_B1 (-0.000000)
#define HPF_Fs32000_Gain11_B2 0.000000
/* Gain = 12.000000 dB */
#define HPF_Fs32000_Gain12_A0 2.490536
-#define HPF_Fs32000_Gain12_A1 -1.490536
+#define HPF_Fs32000_Gain12_A1 (-1.490536)
#define HPF_Fs32000_Gain12_A2 0.000000
-#define HPF_Fs32000_Gain12_B1 -0.000000
+#define HPF_Fs32000_Gain12_B1 (-0.000000)
#define HPF_Fs32000_Gain12_B2 0.000000
/* Gain = 13.000000 dB */
#define HPF_Fs32000_Gain13_A0 2.733418
-#define HPF_Fs32000_Gain13_A1 -1.733418
+#define HPF_Fs32000_Gain13_A1 (-1.733418)
#define HPF_Fs32000_Gain13_A2 0.000000
-#define HPF_Fs32000_Gain13_B1 -0.000000
+#define HPF_Fs32000_Gain13_B1 (-0.000000)
#define HPF_Fs32000_Gain13_B2 0.000000
/* Gain = 14.000000 dB */
#define HPF_Fs32000_Gain14_A0 3.005936
-#define HPF_Fs32000_Gain14_A1 -2.005936
+#define HPF_Fs32000_Gain14_A1 (-2.005936)
#define HPF_Fs32000_Gain14_A2 0.000000
-#define HPF_Fs32000_Gain14_B1 -0.000000
+#define HPF_Fs32000_Gain14_B1 (-0.000000)
#define HPF_Fs32000_Gain14_B2 0.000000
/* Gain = 15.000000 dB */
#define HPF_Fs32000_Gain15_A0 3.311707
-#define HPF_Fs32000_Gain15_A1 -2.311707
+#define HPF_Fs32000_Gain15_A1 (-2.311707)
#define HPF_Fs32000_Gain15_A2 0.000000
-#define HPF_Fs32000_Gain15_B1 -0.000000
+#define HPF_Fs32000_Gain15_B1 (-0.000000)
#define HPF_Fs32000_Gain15_B2 0.000000
/* Coefficients for sample rate 44100Hz */
/* Gain = 1.000000 dB */
#define HPF_Fs44100_Gain1_A0 1.074364
-#define HPF_Fs44100_Gain1_A1 -0.293257
+#define HPF_Fs44100_Gain1_A1 (-0.293257)
#define HPF_Fs44100_Gain1_A2 0.000000
-#define HPF_Fs44100_Gain1_B1 -0.218894
+#define HPF_Fs44100_Gain1_B1 (-0.218894)
#define HPF_Fs44100_Gain1_B2 0.000000
/* Gain = 2.000000 dB */
#define HPF_Fs44100_Gain2_A0 1.157801
-#define HPF_Fs44100_Gain2_A1 -0.376695
+#define HPF_Fs44100_Gain2_A1 (-0.376695)
#define HPF_Fs44100_Gain2_A2 0.000000
-#define HPF_Fs44100_Gain2_B1 -0.218894
+#define HPF_Fs44100_Gain2_B1 (-0.218894)
#define HPF_Fs44100_Gain2_B2 0.000000
/* Gain = 3.000000 dB */
#define HPF_Fs44100_Gain3_A0 1.251420
-#define HPF_Fs44100_Gain3_A1 -0.470313
+#define HPF_Fs44100_Gain3_A1 (-0.470313)
#define HPF_Fs44100_Gain3_A2 0.000000
-#define HPF_Fs44100_Gain3_B1 -0.218894
+#define HPF_Fs44100_Gain3_B1 (-0.218894)
#define HPF_Fs44100_Gain3_B2 0.000000
/* Gain = 4.000000 dB */
#define HPF_Fs44100_Gain4_A0 1.356461
-#define HPF_Fs44100_Gain4_A1 -0.575355
+#define HPF_Fs44100_Gain4_A1 (-0.575355)
#define HPF_Fs44100_Gain4_A2 0.000000
-#define HPF_Fs44100_Gain4_B1 -0.218894
+#define HPF_Fs44100_Gain4_B1 (-0.218894)
#define HPF_Fs44100_Gain4_B2 0.000000
/* Gain = 5.000000 dB */
#define HPF_Fs44100_Gain5_A0 1.474320
-#define HPF_Fs44100_Gain5_A1 -0.693213
+#define HPF_Fs44100_Gain5_A1 (-0.693213)
#define HPF_Fs44100_Gain5_A2 0.000000
-#define HPF_Fs44100_Gain5_B1 -0.218894
+#define HPF_Fs44100_Gain5_B1 (-0.218894)
#define HPF_Fs44100_Gain5_B2 0.000000
/* Gain = 6.000000 dB */
#define HPF_Fs44100_Gain6_A0 1.606559
-#define HPF_Fs44100_Gain6_A1 -0.825453
+#define HPF_Fs44100_Gain6_A1 (-0.825453)
#define HPF_Fs44100_Gain6_A2 0.000000
-#define HPF_Fs44100_Gain6_B1 -0.218894
+#define HPF_Fs44100_Gain6_B1 (-0.218894)
#define HPF_Fs44100_Gain6_B2 0.000000
/* Gain = 7.000000 dB */
#define HPF_Fs44100_Gain7_A0 1.754935
-#define HPF_Fs44100_Gain7_A1 -0.973828
+#define HPF_Fs44100_Gain7_A1 (-0.973828)
#define HPF_Fs44100_Gain7_A2 0.000000
-#define HPF_Fs44100_Gain7_B1 -0.218894
+#define HPF_Fs44100_Gain7_B1 (-0.218894)
#define HPF_Fs44100_Gain7_B2 0.000000
/* Gain = 8.000000 dB */
#define HPF_Fs44100_Gain8_A0 1.921414
-#define HPF_Fs44100_Gain8_A1 -1.140308
+#define HPF_Fs44100_Gain8_A1 (-1.140308)
#define HPF_Fs44100_Gain8_A2 0.000000
-#define HPF_Fs44100_Gain8_B1 -0.218894
+#define HPF_Fs44100_Gain8_B1 (-0.218894)
#define HPF_Fs44100_Gain8_B2 0.000000
/* Gain = 9.000000 dB */
#define HPF_Fs44100_Gain9_A0 2.108208
-#define HPF_Fs44100_Gain9_A1 -1.327101
+#define HPF_Fs44100_Gain9_A1 (-1.327101)
#define HPF_Fs44100_Gain9_A2 0.000000
-#define HPF_Fs44100_Gain9_B1 -0.218894
+#define HPF_Fs44100_Gain9_B1 (-0.218894)
#define HPF_Fs44100_Gain9_B2 0.000000
/* Gain = 10.000000 dB */
#define HPF_Fs44100_Gain10_A0 2.317793
-#define HPF_Fs44100_Gain10_A1 -1.536687
+#define HPF_Fs44100_Gain10_A1 (-1.536687)
#define HPF_Fs44100_Gain10_A2 0.000000
-#define HPF_Fs44100_Gain10_B1 -0.218894
+#define HPF_Fs44100_Gain10_B1 (-0.218894)
#define HPF_Fs44100_Gain10_B2 0.000000
/* Gain = 11.000000 dB */
#define HPF_Fs44100_Gain11_A0 2.552952
-#define HPF_Fs44100_Gain11_A1 -1.771846
+#define HPF_Fs44100_Gain11_A1 (-1.771846)
#define HPF_Fs44100_Gain11_A2 0.000000
-#define HPF_Fs44100_Gain11_B1 -0.218894
+#define HPF_Fs44100_Gain11_B1 (-0.218894)
#define HPF_Fs44100_Gain11_B2 0.000000
/* Gain = 12.000000 dB */
#define HPF_Fs44100_Gain12_A0 2.816805
-#define HPF_Fs44100_Gain12_A1 -2.035698
+#define HPF_Fs44100_Gain12_A1 (-2.035698)
#define HPF_Fs44100_Gain12_A2 0.000000
-#define HPF_Fs44100_Gain12_B1 -0.218894
+#define HPF_Fs44100_Gain12_B1 (-0.218894)
#define HPF_Fs44100_Gain12_B2 0.000000
/* Gain = 13.000000 dB */
#define HPF_Fs44100_Gain13_A0 3.112852
-#define HPF_Fs44100_Gain13_A1 -2.331746
+#define HPF_Fs44100_Gain13_A1 (-2.331746)
#define HPF_Fs44100_Gain13_A2 0.000000
-#define HPF_Fs44100_Gain13_B1 -0.218894
+#define HPF_Fs44100_Gain13_B1 (-0.218894)
#define HPF_Fs44100_Gain13_B2 0.000000
/* Gain = 14.000000 dB */
#define HPF_Fs44100_Gain14_A0 3.445023
-#define HPF_Fs44100_Gain14_A1 -2.663916
+#define HPF_Fs44100_Gain14_A1 (-2.663916)
#define HPF_Fs44100_Gain14_A2 0.000000
-#define HPF_Fs44100_Gain14_B1 -0.218894
+#define HPF_Fs44100_Gain14_B1 (-0.218894)
#define HPF_Fs44100_Gain14_B2 0.000000
/* Gain = 15.000000 dB */
#define HPF_Fs44100_Gain15_A0 3.817724
-#define HPF_Fs44100_Gain15_A1 -3.036618
+#define HPF_Fs44100_Gain15_A1 (-3.036618)
#define HPF_Fs44100_Gain15_A2 0.000000
-#define HPF_Fs44100_Gain15_B1 -0.218894
+#define HPF_Fs44100_Gain15_B1 (-0.218894)
#define HPF_Fs44100_Gain15_B2 0.000000
/* Coefficients for sample rate 48000Hz */
/* Gain = 1.000000 dB */
#define HPF_Fs48000_Gain1_A0 1.077357
-#define HPF_Fs48000_Gain1_A1 -0.345306
+#define HPF_Fs48000_Gain1_A1 (-0.345306)
#define HPF_Fs48000_Gain1_A2 0.000000
-#define HPF_Fs48000_Gain1_B1 -0.267949
+#define HPF_Fs48000_Gain1_B1 (-0.267949)
#define HPF_Fs48000_Gain1_B2 0.000000
/* Gain = 2.000000 dB */
#define HPF_Fs48000_Gain2_A0 1.164152
-#define HPF_Fs48000_Gain2_A1 -0.432101
+#define HPF_Fs48000_Gain2_A1 (-0.432101)
#define HPF_Fs48000_Gain2_A2 0.000000
-#define HPF_Fs48000_Gain2_B1 -0.267949
+#define HPF_Fs48000_Gain2_B1 (-0.267949)
#define HPF_Fs48000_Gain2_B2 0.000000
/* Gain = 3.000000 dB */
#define HPF_Fs48000_Gain3_A0 1.261538
-#define HPF_Fs48000_Gain3_A1 -0.529488
+#define HPF_Fs48000_Gain3_A1 (-0.529488)
#define HPF_Fs48000_Gain3_A2 0.000000
-#define HPF_Fs48000_Gain3_B1 -0.267949
+#define HPF_Fs48000_Gain3_B1 (-0.267949)
#define HPF_Fs48000_Gain3_B2 0.000000
/* Gain = 4.000000 dB */
#define HPF_Fs48000_Gain4_A0 1.370807
-#define HPF_Fs48000_Gain4_A1 -0.638757
+#define HPF_Fs48000_Gain4_A1 (-0.638757)
#define HPF_Fs48000_Gain4_A2 0.000000
-#define HPF_Fs48000_Gain4_B1 -0.267949
+#define HPF_Fs48000_Gain4_B1 (-0.267949)
#define HPF_Fs48000_Gain4_B2 0.000000
/* Gain = 5.000000 dB */
#define HPF_Fs48000_Gain5_A0 1.493409
-#define HPF_Fs48000_Gain5_A1 -0.761359
+#define HPF_Fs48000_Gain5_A1 (-0.761359)
#define HPF_Fs48000_Gain5_A2 0.000000
-#define HPF_Fs48000_Gain5_B1 -0.267949
+#define HPF_Fs48000_Gain5_B1 (-0.267949)
#define HPF_Fs48000_Gain5_B2 0.000000
/* Gain = 6.000000 dB */
#define HPF_Fs48000_Gain6_A0 1.630971
-#define HPF_Fs48000_Gain6_A1 -0.898920
+#define HPF_Fs48000_Gain6_A1 (-0.898920)
#define HPF_Fs48000_Gain6_A2 0.000000
-#define HPF_Fs48000_Gain6_B1 -0.267949
+#define HPF_Fs48000_Gain6_B1 (-0.267949)
#define HPF_Fs48000_Gain6_B2 0.000000
/* Gain = 7.000000 dB */
#define HPF_Fs48000_Gain7_A0 1.785318
-#define HPF_Fs48000_Gain7_A1 -1.053267
+#define HPF_Fs48000_Gain7_A1 (-1.053267)
#define HPF_Fs48000_Gain7_A2 0.000000
-#define HPF_Fs48000_Gain7_B1 -0.267949
+#define HPF_Fs48000_Gain7_B1 (-0.267949)
#define HPF_Fs48000_Gain7_B2 0.000000
/* Gain = 8.000000 dB */
#define HPF_Fs48000_Gain8_A0 1.958498
-#define HPF_Fs48000_Gain8_A1 -1.226447
+#define HPF_Fs48000_Gain8_A1 (-1.226447)
#define HPF_Fs48000_Gain8_A2 0.000000
-#define HPF_Fs48000_Gain8_B1 -0.267949
+#define HPF_Fs48000_Gain8_B1 (-0.267949)
#define HPF_Fs48000_Gain8_B2 0.000000
/* Gain = 9.000000 dB */
#define HPF_Fs48000_Gain9_A0 2.152809
-#define HPF_Fs48000_Gain9_A1 -1.420758
+#define HPF_Fs48000_Gain9_A1 (-1.420758)
#define HPF_Fs48000_Gain9_A2 0.000000
-#define HPF_Fs48000_Gain9_B1 -0.267949
+#define HPF_Fs48000_Gain9_B1 (-0.267949)
#define HPF_Fs48000_Gain9_B2 0.000000
/* Gain = 10.000000 dB */
#define HPF_Fs48000_Gain10_A0 2.370829
-#define HPF_Fs48000_Gain10_A1 -1.638778
+#define HPF_Fs48000_Gain10_A1 (-1.638778)
#define HPF_Fs48000_Gain10_A2 0.000000
-#define HPF_Fs48000_Gain10_B1 -0.267949
+#define HPF_Fs48000_Gain10_B1 (-0.267949)
#define HPF_Fs48000_Gain10_B2 0.000000
/* Gain = 11.000000 dB */
#define HPF_Fs48000_Gain11_A0 2.615452
-#define HPF_Fs48000_Gain11_A1 -1.883401
+#define HPF_Fs48000_Gain11_A1 (-1.883401)
#define HPF_Fs48000_Gain11_A2 0.000000
-#define HPF_Fs48000_Gain11_B1 -0.267949
+#define HPF_Fs48000_Gain11_B1 (-0.267949)
#define HPF_Fs48000_Gain11_B2 0.000000
/* Gain = 12.000000 dB */
#define HPF_Fs48000_Gain12_A0 2.889924
-#define HPF_Fs48000_Gain12_A1 -2.157873
+#define HPF_Fs48000_Gain12_A1 (-2.157873)
#define HPF_Fs48000_Gain12_A2 0.000000
-#define HPF_Fs48000_Gain12_B1 -0.267949
+#define HPF_Fs48000_Gain12_B1 (-0.267949)
#define HPF_Fs48000_Gain12_B2 0.000000
/* Gain = 13.000000 dB */
#define HPF_Fs48000_Gain13_A0 3.197886
-#define HPF_Fs48000_Gain13_A1 -2.465835
+#define HPF_Fs48000_Gain13_A1 (-2.465835)
#define HPF_Fs48000_Gain13_A2 0.000000
-#define HPF_Fs48000_Gain13_B1 -0.267949
+#define HPF_Fs48000_Gain13_B1 (-0.267949)
#define HPF_Fs48000_Gain13_B2 0.000000
/* Gain = 14.000000 dB */
#define HPF_Fs48000_Gain14_A0 3.543425
-#define HPF_Fs48000_Gain14_A1 -2.811374
+#define HPF_Fs48000_Gain14_A1 (-2.811374)
#define HPF_Fs48000_Gain14_A2 0.000000
-#define HPF_Fs48000_Gain14_B1 -0.267949
+#define HPF_Fs48000_Gain14_B1 (-0.267949)
#define HPF_Fs48000_Gain14_B2 0.000000
/* Gain = 15.000000 dB */
#define HPF_Fs48000_Gain15_A0 3.931127
-#define HPF_Fs48000_Gain15_A1 -3.199076
+#define HPF_Fs48000_Gain15_A1 (-3.199076)
#define HPF_Fs48000_Gain15_A2 0.000000
-#define HPF_Fs48000_Gain15_B1 -0.267949
+#define HPF_Fs48000_Gain15_B1 (-0.267949)
#define HPF_Fs48000_Gain15_B2 0.000000
#ifdef HIGHER_FS
@@ -491,185 +491,185 @@
/* Coefficients for sample rate 96000Hz */
/* Gain = 1.000000 dB */
#define HPF_Fs96000_Gain1_A0 1.096233
-#define HPF_Fs96000_Gain1_A1 -0.673583
+#define HPF_Fs96000_Gain1_A1 (-0.673583)
#define HPF_Fs96000_Gain1_A2 0.000000
-#define HPF_Fs96000_Gain1_B1 -0.577350
+#define HPF_Fs96000_Gain1_B1 (-0.577350)
#define HPF_Fs96000_Gain1_B2 0.000000
/* Gain = 2.000000 dB */
#define HPF_Fs96000_Gain2_A0 1.204208
-#define HPF_Fs96000_Gain2_A1 -0.781558
+#define HPF_Fs96000_Gain2_A1 (-0.781558)
#define HPF_Fs96000_Gain2_A2 0.000000
-#define HPF_Fs96000_Gain2_B1 -0.577350
+#define HPF_Fs96000_Gain2_B1 (-0.577350)
#define HPF_Fs96000_Gain2_B2 0.000000
/* Gain = 3.000000 dB */
#define HPF_Fs96000_Gain3_A0 1.325358
-#define HPF_Fs96000_Gain3_A1 -0.902708
+#define HPF_Fs96000_Gain3_A1 (-0.902708)
#define HPF_Fs96000_Gain3_A2 0.000000
-#define HPF_Fs96000_Gain3_B1 -0.577350
+#define HPF_Fs96000_Gain3_B1 (-0.577350)
#define HPF_Fs96000_Gain3_B2 0.000000
/* Gain = 4.000000 dB */
#define HPF_Fs96000_Gain4_A0 1.461291
-#define HPF_Fs96000_Gain4_A1 -1.038641
+#define HPF_Fs96000_Gain4_A1 (-1.038641)
#define HPF_Fs96000_Gain4_A2 0.000000
-#define HPF_Fs96000_Gain4_B1 -0.577350
+#define HPF_Fs96000_Gain4_B1 (-0.577350)
#define HPF_Fs96000_Gain4_B2 0.000000
/* Gain = 5.000000 dB */
#define HPF_Fs96000_Gain5_A0 1.613810
-#define HPF_Fs96000_Gain5_A1 -1.191160
+#define HPF_Fs96000_Gain5_A1 (-1.191160)
#define HPF_Fs96000_Gain5_A2 0.000000
-#define HPF_Fs96000_Gain5_B1 -0.577350
+#define HPF_Fs96000_Gain5_B1 (-0.577350)
#define HPF_Fs96000_Gain5_B2 0.000000
/* Gain = 6.000000 dB */
#define HPF_Fs96000_Gain6_A0 1.784939
-#define HPF_Fs96000_Gain6_A1 -1.362289
+#define HPF_Fs96000_Gain6_A1 (-1.362289)
#define HPF_Fs96000_Gain6_A2 0.000000
-#define HPF_Fs96000_Gain6_B1 -0.577350
+#define HPF_Fs96000_Gain6_B1 (-0.577350)
#define HPF_Fs96000_Gain6_B2 0.000000
/* Gain = 7.000000 dB */
#define HPF_Fs96000_Gain7_A0 1.976949
-#define HPF_Fs96000_Gain7_A1 -1.554299
+#define HPF_Fs96000_Gain7_A1 (-1.554299)
#define HPF_Fs96000_Gain7_A2 0.000000
-#define HPF_Fs96000_Gain7_B1 -0.577350
+#define HPF_Fs96000_Gain7_B1 (-0.577350)
#define HPF_Fs96000_Gain7_B2 0.000000
/* Gain = 8.000000 dB */
#define HPF_Fs96000_Gain8_A0 2.192387
-#define HPF_Fs96000_Gain8_A1 -1.769738
+#define HPF_Fs96000_Gain8_A1 (-1.769738)
#define HPF_Fs96000_Gain8_A2 0.000000
-#define HPF_Fs96000_Gain8_B1 -0.577350
+#define HPF_Fs96000_Gain8_B1 (-0.577350)
#define HPF_Fs96000_Gain8_B2 0.000000
/* Gain = 9.000000 dB */
#define HPF_Fs96000_Gain9_A0 2.434113
-#define HPF_Fs96000_Gain9_A1 -2.011464
+#define HPF_Fs96000_Gain9_A1 (-2.011464)
#define HPF_Fs96000_Gain9_A2 0.000000
-#define HPF_Fs96000_Gain9_B1 -0.577350
+#define HPF_Fs96000_Gain9_B1 (-0.577350)
#define HPF_Fs96000_Gain9_B2 0.000000
/* Gain = 10.000000 dB */
#define HPF_Fs96000_Gain10_A0 2.705335
-#define HPF_Fs96000_Gain10_A1 -2.282685
+#define HPF_Fs96000_Gain10_A1 (-2.282685)
#define HPF_Fs96000_Gain10_A2 0.000000
-#define HPF_Fs96000_Gain10_B1 -0.577350
+#define HPF_Fs96000_Gain10_B1 (-0.577350)
#define HPF_Fs96000_Gain10_B2 0.000000
/* Gain = 11.000000 dB */
#define HPF_Fs96000_Gain11_A0 3.009650
-#define HPF_Fs96000_Gain11_A1 -2.587000
+#define HPF_Fs96000_Gain11_A1 (-2.587000)
#define HPF_Fs96000_Gain11_A2 0.000000
-#define HPF_Fs96000_Gain11_B1 -0.577350
+#define HPF_Fs96000_Gain11_B1 (-0.577350)
#define HPF_Fs96000_Gain11_B2 0.000000
/* Gain = 12.000000 dB */
#define HPF_Fs96000_Gain12_A0 3.351097
-#define HPF_Fs96000_Gain12_A1 -2.928447
+#define HPF_Fs96000_Gain12_A1 (-2.928447)
#define HPF_Fs96000_Gain12_A2 0.000000
-#define HPF_Fs96000_Gain12_B1 -0.577350
+#define HPF_Fs96000_Gain12_B1 (-0.577350)
#define HPF_Fs96000_Gain12_B2 0.000000
/* Gain = 13.000000 dB */
#define HPF_Fs96000_Gain13_A0 3.734207
-#define HPF_Fs96000_Gain13_A1 -3.311558
+#define HPF_Fs96000_Gain13_A1 (-3.311558)
#define HPF_Fs96000_Gain13_A2 0.000000
-#define HPF_Fs96000_Gain13_B1 -0.577350
+#define HPF_Fs96000_Gain13_B1 (-0.577350)
#define HPF_Fs96000_Gain13_B2 0.000000
/* Gain = 14.000000 dB */
#define HPF_Fs96000_Gain14_A0 4.164064
-#define HPF_Fs96000_Gain14_A1 -3.741414
+#define HPF_Fs96000_Gain14_A1 (-3.741414)
#define HPF_Fs96000_Gain14_A2 0.000000
-#define HPF_Fs96000_Gain14_B1 -0.577350
+#define HPF_Fs96000_Gain14_B1 (-0.577350)
#define HPF_Fs96000_Gain14_B2 0.000000
/* Gain = 15.000000 dB */
#define HPF_Fs96000_Gain15_A0 4.646371
-#define HPF_Fs96000_Gain15_A1 -4.223721
+#define HPF_Fs96000_Gain15_A1 (-4.223721)
#define HPF_Fs96000_Gain15_A2 0.000000
-#define HPF_Fs96000_Gain15_B1 -0.577350
+#define HPF_Fs96000_Gain15_B1 (-0.577350)
#define HPF_Fs96000_Gain15_B2 0.000000
/* Coefficients for sample rate 192000Hz */
/* Gain = 1.000000 dB */
#define HPF_Fs192000_Gain1_A0 1.107823
-#define HPF_Fs192000_Gain1_A1 -0.875150
+#define HPF_Fs192000_Gain1_A1 (-0.875150)
#define HPF_Fs192000_Gain1_A2 0.000000
-#define HPF_Fs192000_Gain1_B1 -0.767327
+#define HPF_Fs192000_Gain1_B1 (-0.767327)
#define HPF_Fs192000_Gain1_B2 0.000000
/* Gain = 2.000000 dB */
#define HPF_Fs192000_Gain2_A0 1.228803
-#define HPF_Fs192000_Gain2_A1 -0.996130
+#define HPF_Fs192000_Gain2_A1 (-0.996130)
#define HPF_Fs192000_Gain2_A2 0.000000
-#define HPF_Fs192000_Gain2_B1 -0.767327
+#define HPF_Fs192000_Gain2_B1 (-0.767327)
#define HPF_Fs192000_Gain2_B2 0.000000
/* Gain = 3.000000 dB */
#define HPF_Fs192000_Gain3_A0 1.364544
-#define HPF_Fs192000_Gain3_A1 -1.131871
+#define HPF_Fs192000_Gain3_A1 (-1.131871)
#define HPF_Fs192000_Gain3_A2 0.000000
-#define HPF_Fs192000_Gain3_B1 -0.767327
+#define HPF_Fs192000_Gain3_B1 (-0.767327)
#define HPF_Fs192000_Gain3_B2 0.000000
/* Gain = 4.000000 dB */
#define HPF_Fs192000_Gain4_A0 1.516849
-#define HPF_Fs192000_Gain4_A1 -1.284176
+#define HPF_Fs192000_Gain4_A1 (-1.284176)
#define HPF_Fs192000_Gain4_A2 0.000000
-#define HPF_Fs192000_Gain4_B1 -0.767327
+#define HPF_Fs192000_Gain4_B1 (-0.767327)
#define HPF_Fs192000_Gain4_B2 0.000000
/* Gain = 5.000000 dB */
#define HPF_Fs192000_Gain5_A0 1.687737
-#define HPF_Fs192000_Gain5_A1 -1.455064
+#define HPF_Fs192000_Gain5_A1 (-1.455064)
#define HPF_Fs192000_Gain5_A2 0.000000
-#define HPF_Fs192000_Gain5_B1 -0.767327
+#define HPF_Fs192000_Gain5_B1 (-0.767327)
#define HPF_Fs192000_Gain5_B2 0.000000
/* Gain = 6.000000 dB */
#define HPF_Fs192000_Gain6_A0 1.879477
-#define HPF_Fs192000_Gain6_A1 -1.646804
+#define HPF_Fs192000_Gain6_A1 (-1.646804)
#define HPF_Fs192000_Gain6_A2 0.000000
-#define HPF_Fs192000_Gain6_B1 -0.767327
+#define HPF_Fs192000_Gain6_B1 (-0.767327)
#define HPF_Fs192000_Gain6_B2 0.000000
/* Gain = 7.000000 dB */
#define HPF_Fs192000_Gain7_A0 2.094613
-#define HPF_Fs192000_Gain7_A1 -1.861940
+#define HPF_Fs192000_Gain7_A1 (-1.861940)
#define HPF_Fs192000_Gain7_A2 0.000000
-#define HPF_Fs192000_Gain7_B1 -0.767327
+#define HPF_Fs192000_Gain7_B1 (-0.767327)
#define HPF_Fs192000_Gain7_B2 0.000000
/* Gain = 8.000000 dB */
#define HPF_Fs192000_Gain8_A0 2.335999
-#define HPF_Fs192000_Gain8_A1 -2.103326
+#define HPF_Fs192000_Gain8_A1 (-2.103326)
#define HPF_Fs192000_Gain8_A2 0.000000
-#define HPF_Fs192000_Gain8_B1 -0.767327
+#define HPF_Fs192000_Gain8_B1 (-0.767327)
#define HPF_Fs192000_Gain8_B2 0.000000
/* Gain = 9.000000 dB */
#define HPF_Fs192000_Gain9_A0 2.606839
-#define HPF_Fs192000_Gain9_A1 -2.374166
+#define HPF_Fs192000_Gain9_A1 (-2.374166)
#define HPF_Fs192000_Gain9_A2 0.000000
-#define HPF_Fs192000_Gain9_B1 -0.767327
+#define HPF_Fs192000_Gain9_B1 (-0.767327)
#define HPF_Fs192000_Gain9_B2 0.000000
/* Gain = 10.000000 dB */
#define HPF_Fs192000_Gain10_A0 2.910726
-#define HPF_Fs192000_Gain10_A1 -2.678053
+#define HPF_Fs192000_Gain10_A1 (-2.678053)
#define HPF_Fs192000_Gain10_A2 0.000000
-#define HPF_Fs192000_Gain10_B1 -0.767327
+#define HPF_Fs192000_Gain10_B1 (-0.767327)
#define HPF_Fs192000_Gain10_B2 0.000000
/* Gain = 11.000000 dB */
#define HPF_Fs192000_Gain11_A0 3.251693
-#define HPF_Fs192000_Gain11_A1 -3.019020
+#define HPF_Fs192000_Gain11_A1 (-3.019020)
#define HPF_Fs192000_Gain11_A2 0.000000
-#define HPF_Fs192000_Gain11_B1 -0.767327
+#define HPF_Fs192000_Gain11_B1 (-0.767327)
#define HPF_Fs192000_Gain11_B2 0.000000
/* Gain = 12.000000 dB */
#define HPF_Fs192000_Gain12_A0 3.634264
-#define HPF_Fs192000_Gain12_A1 -3.401591
+#define HPF_Fs192000_Gain12_A1 (-3.401591)
#define HPF_Fs192000_Gain12_A2 0.000000
-#define HPF_Fs192000_Gain12_B1 -0.767327
+#define HPF_Fs192000_Gain12_B1 (-0.767327)
#define HPF_Fs192000_Gain12_B2 0.000000
/* Gain = 13.000000 dB */
#define HPF_Fs192000_Gain13_A0 4.063516
-#define HPF_Fs192000_Gain13_A1 -3.830843
+#define HPF_Fs192000_Gain13_A1 (-3.830843)
#define HPF_Fs192000_Gain13_A2 0.000000
-#define HPF_Fs192000_Gain13_B1 -0.767327
+#define HPF_Fs192000_Gain13_B1 (-0.767327)
#define HPF_Fs192000_Gain13_B2 0.000000
/* Gain = 14.000000 dB */
#define HPF_Fs192000_Gain14_A0 4.545145
-#define HPF_Fs192000_Gain14_A1 -4.312472
+#define HPF_Fs192000_Gain14_A1 (-4.312472)
#define HPF_Fs192000_Gain14_A2 0.000000
-#define HPF_Fs192000_Gain14_B1 -0.767327
+#define HPF_Fs192000_Gain14_B1 (-0.767327)
#define HPF_Fs192000_Gain14_B2 0.000000
/* Gain = 15.000000 dB */
#define HPF_Fs192000_Gain15_A0 5.085542
-#define HPF_Fs192000_Gain15_A1 -4.852868
+#define HPF_Fs192000_Gain15_A1 (-4.852868)
#define HPF_Fs192000_Gain15_A2 0.000000
-#define HPF_Fs192000_Gain15_B1 -0.767327
+#define HPF_Fs192000_Gain15_B1 (-0.767327)
#define HPF_Fs192000_Gain15_B2 0.000000
#endif
diff --git a/media/libeffects/lvm/lib/Common/lib/LVM_Types.h b/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
index cb15b60..ea16072 100644
--- a/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
+++ b/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
@@ -44,9 +44,6 @@
#define LVM_MAXINT_8 127 /* Maximum positive integer size */
#define LVM_MAXINT_16 32767
-#ifdef BUILD_FLOAT
-#define LVM_MAXFLOAT 1.0f
-#endif
#define LVM_MAXINT_32 2147483647
#define LVM_MAXENUM 2147483647
@@ -99,8 +96,32 @@
typedef uint32_t LVM_UINT32; /* Unsigned 32-bit word */
#ifdef BUILD_FLOAT
-typedef float LVM_FLOAT; /* single precission floating point*/
-#endif
+
+#define LVM_MAXFLOAT 1.f
+
+typedef float LVM_FLOAT; /* single precision floating point */
+
+// If NATIVE_FLOAT_BUFFER is defined, we expose effects as floating point format;
+// otherwise we expose as integer 16 bit and translate to float for the effect libraries.
+// Hence, NATIVE_FLOAT_BUFFER should only be enabled under BUILD_FLOAT compilation.
+
+#define NATIVE_FLOAT_BUFFER
+
+#endif // BUILD_FLOAT
+
+// Select whether we expose int16_t or float buffers.
+#ifdef NATIVE_FLOAT_BUFFER
+
+#define EFFECT_BUFFER_FORMAT AUDIO_FORMAT_PCM_FLOAT
+typedef float effect_buffer_t;
+
+#else // NATIVE_FLOAT_BUFFER
+
+#define EFFECT_BUFFER_FORMAT AUDIO_FORMAT_PCM_16_BIT
+typedef int16_t effect_buffer_t;
+
+#endif // NATIVE_FLOAT_BUFFER
+
/****************************************************************************************/
/* */
/* Standard Enumerated types */
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Coeffs.h b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Coeffs.h
index f0deb6c..42ea46f 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Coeffs.h
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Coeffs.h
@@ -26,21 +26,21 @@
/* */
/************************************************************************************/
#ifdef BUILD_FLOAT
-#define LVEQNB_Gain_Neg15_dB -0.822172f
-#define LVEQNB_Gain_Neg14_dB -0.800474f
-#define LVEQNB_Gain_Neg13_dB -0.776128f
-#define LVEQNB_Gain_Neg12_dB -0.748811f
-#define LVEQNB_Gain_Neg11_dB -0.718162f
-#define LVEQNB_Gain_Neg10_dB -0.683772f
-#define LVEQNB_Gain_Neg9_dB -0.645187f
-#define LVEQNB_Gain_Neg8_dB -0.601893f
-#define LVEQNB_Gain_Neg7_dB -0.553316f
-#define LVEQNB_Gain_Neg6_dB -0.498813f
-#define LVEQNB_Gain_Neg5_dB -0.437659f
-#define LVEQNB_Gain_Neg4_dB -0.369043f
-#define LVEQNB_Gain_Neg3_dB -0.292054f
-#define LVEQNB_Gain_Neg2_dB -0.205672f
-#define LVEQNB_Gain_Neg1_dB -0.108749f
+#define LVEQNB_Gain_Neg15_dB (-0.822172f)
+#define LVEQNB_Gain_Neg14_dB (-0.800474f)
+#define LVEQNB_Gain_Neg13_dB (-0.776128f)
+#define LVEQNB_Gain_Neg12_dB (-0.748811f)
+#define LVEQNB_Gain_Neg11_dB (-0.718162f)
+#define LVEQNB_Gain_Neg10_dB (-0.683772f)
+#define LVEQNB_Gain_Neg9_dB (-0.645187f)
+#define LVEQNB_Gain_Neg8_dB (-0.601893f)
+#define LVEQNB_Gain_Neg7_dB (-0.553316f)
+#define LVEQNB_Gain_Neg6_dB (-0.498813f)
+#define LVEQNB_Gain_Neg5_dB (-0.437659f)
+#define LVEQNB_Gain_Neg4_dB (-0.369043f)
+#define LVEQNB_Gain_Neg3_dB (-0.292054f)
+#define LVEQNB_Gain_Neg2_dB (-0.205672f)
+#define LVEQNB_Gain_Neg1_dB (-0.108749f)
#define LVEQNB_Gain_0_dB 0.000000f
#define LVEQNB_Gain_1_dB 0.122018f
#define LVEQNB_Gain_2_dB 0.258925f
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c
index c290aec..7b0f341 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c
@@ -430,7 +430,15 @@
}
- if(bChange){
+ // During operating mode transition, there is a race condition where the mode
+ // is still LVEQNB_ON, but the effect is considered disabled in the upper layers.
+ // modeChange handles this special race condition.
+ const int /* bool */ modeChange = pParams->OperatingMode != OperatingModeSave
+ || (OperatingModeSave == LVEQNB_ON
+ && pInstance->bInOperatingModeTransition
+ && LVC_Mixer_GetTarget(&pInstance->BypassMixer.MixerStream[0]) == 0);
+
+ if (bChange || modeChange) {
/*
* If the sample rate has changed clear the history
@@ -462,8 +470,7 @@
LVEQNB_SetCoefficients(pInstance); /* Instance pointer */
}
- if(pParams->OperatingMode != OperatingModeSave)
- {
+ if (modeChange) {
if(pParams->OperatingMode == LVEQNB_ON)
{
#ifdef BUILD_FLOAT
@@ -479,6 +486,8 @@
else
{
/* Stay on the ON operating mode until the transition is done */
+ // This may introduce a state race condition if the effect is enabled again
+ // while in transition. This is fixed in the modeChange logic.
pInstance->Params.OperatingMode = LVEQNB_ON;
#ifdef BUILD_FLOAT
LVC_Mixer_SetTarget(&pInstance->BypassMixer.MixerStream[0], 0.0f);
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
index 4f5221a..0c2fe53 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
@@ -27,127 +27,127 @@
#ifdef BUILD_FLOAT
/* Stereo Enhancer coefficients for 8000 Hz sample rate, scaled with 0.161258 */
#define CS_MIDDLE_8000_A0 0.227720
-#define CS_MIDDLE_8000_A1 -0.215125
+#define CS_MIDDLE_8000_A1 (-0.215125)
#define CS_MIDDLE_8000_A2 0.000000
-#define CS_MIDDLE_8000_B1 -0.921899
+#define CS_MIDDLE_8000_B1 (-0.921899)
#define CS_MIDDLE_8000_B2 0.000000
#define CS_MIDDLE_8000_SCALE 15
#define CS_SIDE_8000_A0 0.611441
-#define CS_SIDE_8000_A1 -0.380344
-#define CS_SIDE_8000_A2 -0.231097
-#define CS_SIDE_8000_B1 -0.622470
-#define CS_SIDE_8000_B2 -0.130759
+#define CS_SIDE_8000_A1 (-0.380344)
+#define CS_SIDE_8000_A2 (-0.231097)
+#define CS_SIDE_8000_B1 (-0.622470)
+#define CS_SIDE_8000_B2 (-0.130759)
#define CS_SIDE_8000_SCALE 15
/* Stereo Enhancer coefficients for 11025Hz sample rate, scaled with 0.162943 */
#define CS_MIDDLE_11025_A0 0.230838
-#define CS_MIDDLE_11025_A1 -0.221559
+#define CS_MIDDLE_11025_A1 (-0.221559)
#define CS_MIDDLE_11025_A2 0.000000
-#define CS_MIDDLE_11025_B1 -0.943056
+#define CS_MIDDLE_11025_B1 (-0.943056)
#define CS_MIDDLE_11025_B2 0.000000
#define CS_MIDDLE_11025_SCALE 15
#define CS_SIDE_11025_A0 0.557372
-#define CS_SIDE_11025_A1 -0.391490
-#define CS_SIDE_11025_A2 -0.165881
-#define CS_SIDE_11025_B1 -0.880608
+#define CS_SIDE_11025_A1 (-0.391490)
+#define CS_SIDE_11025_A2 (-0.165881)
+#define CS_SIDE_11025_B1 (-0.880608)
#define CS_SIDE_11025_B2 0.032397
#define CS_SIDE_11025_SCALE 15
/* Stereo Enhancer coefficients for 12000Hz sample rate, scaled with 0.162191 */
#define CS_MIDDLE_12000_A0 0.229932
-#define CS_MIDDLE_12000_A1 -0.221436
+#define CS_MIDDLE_12000_A1 (-0.221436)
#define CS_MIDDLE_12000_A2 0.000000
-#define CS_MIDDLE_12000_B1 -0.947616
+#define CS_MIDDLE_12000_B1 (-0.947616)
#define CS_MIDDLE_12000_B2 0.000000
#define CS_MIDDLE_12000_SCALE 15
#define CS_SIDE_12000_A0 0.558398
-#define CS_SIDE_12000_A1 -0.392211
-#define CS_SIDE_12000_A2 -0.166187
-#define CS_SIDE_12000_B1 -0.892550
+#define CS_SIDE_12000_A1 (-0.392211)
+#define CS_SIDE_12000_A2 (-0.166187)
+#define CS_SIDE_12000_B1 (-0.892550)
#define CS_SIDE_12000_B2 0.032856
#define CS_SIDE_12000_SCALE 15
/* Stereo Enhancer coefficients for 16000Hz sample rate, scaled with 0.162371 */
#define CS_MIDDLE_16000_A0 0.230638
-#define CS_MIDDLE_16000_A1 -0.224232
+#define CS_MIDDLE_16000_A1 (-0.224232)
#define CS_MIDDLE_16000_A2 0.000000
-#define CS_MIDDLE_16000_B1 -0.960550
+#define CS_MIDDLE_16000_B1 (-0.960550)
#define CS_MIDDLE_16000_B2 0.000000
#define CS_MIDDLE_16000_SCALE 15
#define CS_SIDE_16000_A0 0.499695
-#define CS_SIDE_16000_A1 -0.355543
-#define CS_SIDE_16000_A2 -0.144152
-#define CS_SIDE_16000_B1 -1.050788
+#define CS_SIDE_16000_A1 (-0.355543)
+#define CS_SIDE_16000_A2 (-0.144152)
+#define CS_SIDE_16000_B1 (-1.050788)
#define CS_SIDE_16000_B2 0.144104
#define CS_SIDE_16000_SCALE 14
/* Stereo Enhancer coefficients for 22050Hz sample rate, scaled with 0.160781 */
#define CS_MIDDLE_22050_A0 0.228749
-#define CS_MIDDLE_22050_A1 -0.224128
+#define CS_MIDDLE_22050_A1 (-0.224128)
#define CS_MIDDLE_22050_A2 0.000000
-#define CS_MIDDLE_22050_B1 -0.971262
+#define CS_MIDDLE_22050_B1 (-0.971262)
#define CS_MIDDLE_22050_B2 0.000000
#define CS_MIDDLE_22050_SCALE 15
#define CS_SIDE_22050_A0 0.440112
-#define CS_SIDE_22050_A1 -0.261096
-#define CS_SIDE_22050_A2 -0.179016
-#define CS_SIDE_22050_B1 -1.116786
+#define CS_SIDE_22050_A1 (-0.261096)
+#define CS_SIDE_22050_A2 (-0.179016)
+#define CS_SIDE_22050_B1 (-1.116786)
#define CS_SIDE_22050_B2 0.182507
#define CS_SIDE_22050_SCALE 14
/* Stereo Enhancer coefficients for 24000Hz sample rate, scaled with 0.161882 */
#define CS_MIDDLE_24000_A0 0.230395
-#define CS_MIDDLE_24000_A1 -0.226117
+#define CS_MIDDLE_24000_A1 (-0.226117)
#define CS_MIDDLE_24000_A2 0.000000
-#define CS_MIDDLE_24000_B1 -0.973573
+#define CS_MIDDLE_24000_B1 (-0.973573)
#define CS_MIDDLE_24000_B2 0.000000
#define CS_MIDDLE_24000_SCALE 15
#define CS_SIDE_24000_A0 0.414770
-#define CS_SIDE_24000_A1 -0.287182
-#define CS_SIDE_24000_A2 -0.127588
-#define CS_SIDE_24000_B1 -1.229648
+#define CS_SIDE_24000_A1 (-0.287182)
+#define CS_SIDE_24000_A2 (-0.127588)
+#define CS_SIDE_24000_B1 (-1.229648)
#define CS_SIDE_24000_B2 0.282177
#define CS_SIDE_24000_SCALE 14
/* Stereo Enhancer coefficients for 32000Hz sample rate, scaled with 0.160322 */
#define CS_MIDDLE_32000_A0 0.228400
-#define CS_MIDDLE_32000_A1 -0.225214
+#define CS_MIDDLE_32000_A1 (-0.225214)
#define CS_MIDDLE_32000_A2 0.000000
-#define CS_MIDDLE_32000_B1 -0.980126
+#define CS_MIDDLE_32000_B1 (-0.980126)
#define CS_MIDDLE_32000_B2 0.000000
#define CS_MIDDLE_32000_SCALE 15
#define CS_SIDE_32000_A0 0.364579
-#define CS_SIDE_32000_A1 -0.207355
-#define CS_SIDE_32000_A2 -0.157224
-#define CS_SIDE_32000_B1 -1.274231
+#define CS_SIDE_32000_A1 (-0.207355)
+#define CS_SIDE_32000_A2 (-0.157224)
+#define CS_SIDE_32000_B1 (-1.274231)
#define CS_SIDE_32000_B2 0.312495
#define CS_SIDE_32000_SCALE 14
/* Stereo Enhancer coefficients for 44100Hz sample rate, scaled with 0.163834 */
#define CS_MIDDLE_44100_A0 0.233593
-#define CS_MIDDLE_44100_A1 -0.231225
+#define CS_MIDDLE_44100_A1 (-0.231225)
#define CS_MIDDLE_44100_A2 0.000000
-#define CS_MIDDLE_44100_B1 -0.985545
+#define CS_MIDDLE_44100_B1 (-0.985545)
#define CS_MIDDLE_44100_B2 0.000000
#define CS_MIDDLE_44100_SCALE 15
#define CS_SIDE_44100_A0 0.284573
-#define CS_SIDE_44100_A1 -0.258910
-#define CS_SIDE_44100_A2 -0.025662
-#define CS_SIDE_44100_B1 -1.572248
+#define CS_SIDE_44100_A1 (-0.258910)
+#define CS_SIDE_44100_A2 (-0.025662)
+#define CS_SIDE_44100_B1 (-1.572248)
#define CS_SIDE_44100_B2 0.588399
#define CS_SIDE_44100_SCALE 14
/* Stereo Enhancer coefficients for 48000Hz sample rate, scaled with 0.164402 */
#define CS_MIDDLE_48000_A0 0.234445
-#define CS_MIDDLE_48000_A1 -0.232261
+#define CS_MIDDLE_48000_A1 (-0.232261)
#define CS_MIDDLE_48000_A2 0.000000
-#define CS_MIDDLE_48000_B1 -0.986713
+#define CS_MIDDLE_48000_B1 (-0.986713)
#define CS_MIDDLE_48000_B2 0.000000
#define CS_MIDDLE_48000_SCALE 15
#define CS_SIDE_48000_A0 0.272606
-#define CS_SIDE_48000_A1 -0.266952
-#define CS_SIDE_48000_A2 -0.005654
-#define CS_SIDE_48000_B1 -1.617141
+#define CS_SIDE_48000_A1 (-0.266952)
+#define CS_SIDE_48000_A2 (-0.005654)
+#define CS_SIDE_48000_B1 (-1.617141)
#define CS_SIDE_48000_B2 0.630405
#define CS_SIDE_48000_SCALE 14
@@ -155,31 +155,31 @@
/* Stereo Enhancer coefficients for 96000Hz sample rate, scaled with 0.165*/
/* high pass filter with cutoff frequency 102.18 Hz*/
#define CS_MIDDLE_96000_A0 0.235532
-#define CS_MIDDLE_96000_A1 -0.234432
+#define CS_MIDDLE_96000_A1 (-0.234432)
#define CS_MIDDLE_96000_A2 0.000000
-#define CS_MIDDLE_96000_B1 -0.993334
+#define CS_MIDDLE_96000_B1 (-0.993334)
#define CS_MIDDLE_96000_B2 0.000000
#define CS_MIDDLE_96000_SCALE 15
/* bandpass filter with fc1 270 and fc2 3703, designed using 2nd order butterworth */
#define CS_SIDE_96000_A0 0.016727
#define CS_SIDE_96000_A1 0.000000
-#define CS_SIDE_96000_A2 -0.016727
-#define CS_SIDE_96000_B1 -1.793372
+#define CS_SIDE_96000_A2 (-0.016727)
+#define CS_SIDE_96000_B1 (-1.793372)
#define CS_SIDE_96000_B2 0.797236
#define CS_SIDE_96000_SCALE 14
/* Stereo Enhancer coefficients for 192000Hz sample rate, scaled with 0.1689*/
#define CS_MIDDLE_192000_A0 0.241219
-#define CS_MIDDLE_192000_A1 -0.240656
+#define CS_MIDDLE_192000_A1 (-0.240656)
#define CS_MIDDLE_192000_A2 0.000000
-#define CS_MIDDLE_192000_B1 -0.996661
+#define CS_MIDDLE_192000_B1 (-0.996661)
#define CS_MIDDLE_192000_B2 0.000000
#define CS_MIDDLE_192000_SCALE 15
/* bandpass filter with fc1 270 and fc2 3703, designed using 2nd order butterworth */
#define CS_SIDE_192000_A0 0.008991
-#define CS_SIDE_192000_A1 -0.000000
-#define CS_SIDE_192000_A2 -0.008991
-#define CS_SIDE_192000_B1 -1.892509
+#define CS_SIDE_192000_A1 (-0.000000)
+#define CS_SIDE_192000_A2 (-0.008991)
+#define CS_SIDE_192000_B1 (-1.892509)
#define CS_SIDE_192000_B2 0.893524
#define CS_SIDE_192000_SCALE 14
#endif
@@ -203,74 +203,74 @@
/* Reverb coefficients for 8000 Hz sample rate, scaled with 1.038030 */
#define CS_REVERB_8000_A0 0.667271
-#define CS_REVERB_8000_A1 -0.667271
+#define CS_REVERB_8000_A1 (-0.667271)
#define CS_REVERB_8000_A2 0.000000
-#define CS_REVERB_8000_B1 -0.668179
+#define CS_REVERB_8000_B1 (-0.668179)
#define CS_REVERB_8000_B2 0.000000
#define CS_REVERB_8000_SCALE 15
/* Reverb coefficients for 11025Hz sample rate, scaled with 1.038030 */
#define CS_REVERB_11025_A0 0.699638
-#define CS_REVERB_11025_A1 -0.699638
+#define CS_REVERB_11025_A1 (-0.699638)
#define CS_REVERB_11025_A2 0.000000
-#define CS_REVERB_11025_B1 -0.749096
+#define CS_REVERB_11025_B1 (-0.749096)
#define CS_REVERB_11025_B2 0.000000
#define CS_REVERB_11025_SCALE 15
/* Reverb coefficients for 12000Hz sample rate, scaled with 1.038030 */
#define CS_REVERB_12000_A0 0.706931
-#define CS_REVERB_12000_A1 -0.706931
+#define CS_REVERB_12000_A1 (-0.706931)
#define CS_REVERB_12000_A2 0.000000
-#define CS_REVERB_12000_B1 -0.767327
+#define CS_REVERB_12000_B1 (-0.767327)
#define CS_REVERB_12000_B2 0.000000
#define CS_REVERB_12000_SCALE 15
/* Reverb coefficients for 16000Hz sample rate, scaled with 1.038030 */
#define CS_REVERB_16000_A0 0.728272
-#define CS_REVERB_16000_A1 -0.728272
+#define CS_REVERB_16000_A1 (-0.728272)
#define CS_REVERB_16000_A2 0.000000
-#define CS_REVERB_16000_B1 -0.820679
+#define CS_REVERB_16000_B1 (-0.820679)
#define CS_REVERB_16000_B2 0.000000
#define CS_REVERB_16000_SCALE 15
/* Reverb coefficients for 22050Hz sample rate, scaled with 1.038030 */
#define CS_REVERB_22050_A0 0.516396
#define CS_REVERB_22050_A1 0.000000
-#define CS_REVERB_22050_A2 -0.516396
-#define CS_REVERB_22050_B1 -0.518512
-#define CS_REVERB_22050_B2 -0.290990
+#define CS_REVERB_22050_A2 (-0.516396)
+#define CS_REVERB_22050_B1 (-0.518512)
+#define CS_REVERB_22050_B2 (-0.290990)
#define CS_REVERB_22050_SCALE 15
/* Reverb coefficients for 24000Hz sample rate, scaled with 1.038030 */
#define CS_REVERB_24000_A0 0.479565
#define CS_REVERB_24000_A1 0.000000
-#define CS_REVERB_24000_A2 -0.479565
-#define CS_REVERB_24000_B1 -0.637745
-#define CS_REVERB_24000_B2 -0.198912
+#define CS_REVERB_24000_A2 (-0.479565)
+#define CS_REVERB_24000_B1 (-0.637745)
+#define CS_REVERB_24000_B2 (-0.198912)
#define CS_REVERB_24000_SCALE 15
/* Reverb coefficients for 32000Hz sample rate, scaled with 1.038030 */
#define CS_REVERB_32000_A0 0.380349
#define CS_REVERB_32000_A1 0.000000
-#define CS_REVERB_32000_A2 -0.380349
-#define CS_REVERB_32000_B1 -0.950873
+#define CS_REVERB_32000_A2 (-0.380349)
+#define CS_REVERB_32000_B1 (-0.950873)
#define CS_REVERB_32000_B2 0.049127
#define CS_REVERB_32000_SCALE 15
/* Reverb coefficients for 44100Hz sample rate, scaled with 1.038030 */
#define CS_REVERB_44100_A0 0.297389
#define CS_REVERB_44100_A1 0.000000
-#define CS_REVERB_44100_A2 -0.297389
-#define CS_REVERB_44100_B1 -1.200423
+#define CS_REVERB_44100_A2 (-0.297389)
+#define CS_REVERB_44100_B1 (-1.200423)
#define CS_REVERB_44100_B2 0.256529
#define CS_REVERB_44100_SCALE 14
/* Reverb coefficients for 48000Hz sample rate, scaled with 1.038030 */
#define CS_REVERB_48000_A0 0.278661
#define CS_REVERB_48000_A1 0.000000
-#define CS_REVERB_48000_A2 -0.278661
-#define CS_REVERB_48000_B1 -1.254993
+#define CS_REVERB_48000_A2 (-0.278661)
+#define CS_REVERB_48000_B1 (-1.254993)
#define CS_REVERB_48000_B2 0.303347
#define CS_REVERB_48000_SCALE 14
@@ -279,8 +279,8 @@
/* Band pass filter with fc1=500 and fc2=8000*/
#define CS_REVERB_96000_A0 0.1602488
#define CS_REVERB_96000_A1 0.000000
-#define CS_REVERB_96000_A2 -0.1602488
-#define CS_REVERB_96000_B1 -1.585413
+#define CS_REVERB_96000_A2 (-0.1602488)
+#define CS_REVERB_96000_B1 (-1.585413)
#define CS_REVERB_96000_B2 0.599377
#define CS_REVERB_96000_SCALE 14
@@ -288,8 +288,8 @@
/* Band pass filter with fc1=500 and fc2=8000*/
#define CS_REVERB_192000_A0 0.0878369
#define CS_REVERB_192000_A1 0.000000
-#define CS_REVERB_192000_A2 -0.0878369
-#define CS_REVERB_192000_B1 -1.7765764
+#define CS_REVERB_192000_A2 (-0.0878369)
+#define CS_REVERB_192000_B1 (-1.7765764)
#define CS_REVERB_192000_B2 0.7804076
#define CS_REVERB_192000_SCALE 14
@@ -312,163 +312,163 @@
/* Equaliser coefficients for 8000 Hz sample rate, \
CS scaled with 1.038497 and CSEX scaled with 0.775480 */
#define CS_EQUALISER_8000_A0 1.263312
-#define CS_EQUALISER_8000_A1 -0.601748
-#define CS_EQUALISER_8000_A2 -0.280681
-#define CS_EQUALISER_8000_B1 -0.475865
-#define CS_EQUALISER_8000_B2 -0.408154
+#define CS_EQUALISER_8000_A1 (-0.601748)
+#define CS_EQUALISER_8000_A2 (-0.280681)
+#define CS_EQUALISER_8000_B1 (-0.475865)
+#define CS_EQUALISER_8000_B2 (-0.408154)
#define CS_EQUALISER_8000_SCALE 14
#define CSEX_EQUALISER_8000_A0 0.943357
-#define CSEX_EQUALISER_8000_A1 -0.449345
-#define CSEX_EQUALISER_8000_A2 -0.209594
-#define CSEX_EQUALISER_8000_B1 -0.475865
-#define CSEX_EQUALISER_8000_B2 -0.408154
+#define CSEX_EQUALISER_8000_A1 (-0.449345)
+#define CSEX_EQUALISER_8000_A2 (-0.209594)
+#define CSEX_EQUALISER_8000_B1 (-0.475865)
+#define CSEX_EQUALISER_8000_B2 (-0.408154)
#define CSEX_EQUALISER_8000_SCALE 15
/* Equaliser coefficients for 11025Hz sample rate, \
CS scaled with 1.027761 and CSEX scaled with 0.767463 */
#define CS_EQUALISER_11025_A0 1.101145
#define CS_EQUALISER_11025_A1 0.139020
-#define CS_EQUALISER_11025_A2 -0.864423
+#define CS_EQUALISER_11025_A2 (-0.864423)
#define CS_EQUALISER_11025_B1 0.024541
-#define CS_EQUALISER_11025_B2 -0.908930
+#define CS_EQUALISER_11025_B2 (-0.908930)
#define CS_EQUALISER_11025_SCALE 14
#define CSEX_EQUALISER_11025_A0 0.976058
-#define CSEX_EQUALISER_11025_A1 -0.695326
-#define CSEX_EQUALISER_11025_A2 -0.090809
-#define CSEX_EQUALISER_11025_B1 -0.610594
-#define CSEX_EQUALISER_11025_B2 -0.311149
+#define CSEX_EQUALISER_11025_A1 (-0.695326)
+#define CSEX_EQUALISER_11025_A2 (-0.090809)
+#define CSEX_EQUALISER_11025_B1 (-0.610594)
+#define CSEX_EQUALISER_11025_B2 (-0.311149)
#define CSEX_EQUALISER_11025_SCALE 15
/* Equaliser coefficients for 12000Hz sample rate, \
CS scaled with 1.032521 and CSEX scaled with 0.771017 */
#define CS_EQUALISER_12000_A0 1.276661
-#define CS_EQUALISER_12000_A1 -1.017519
-#define CS_EQUALISER_12000_A2 -0.044128
-#define CS_EQUALISER_12000_B1 -0.729616
-#define CS_EQUALISER_12000_B2 -0.204532
+#define CS_EQUALISER_12000_A1 (-1.017519)
+#define CS_EQUALISER_12000_A2 (-0.044128)
+#define CS_EQUALISER_12000_B1 (-0.729616)
+#define CS_EQUALISER_12000_B2 (-0.204532)
#define CS_EQUALISER_12000_SCALE 14
#define CSEX_EQUALISER_12000_A0 1.007095
-#define CSEX_EQUALISER_12000_A1 -0.871912
+#define CSEX_EQUALISER_12000_A1 (-0.871912)
#define CSEX_EQUALISER_12000_A2 0.023232
-#define CSEX_EQUALISER_12000_B1 -0.745857
-#define CSEX_EQUALISER_12000_B2 -0.189171
+#define CSEX_EQUALISER_12000_B1 (-0.745857)
+#define CSEX_EQUALISER_12000_B2 (-0.189171)
#define CSEX_EQUALISER_12000_SCALE 14
/* Equaliser coefficients for 16000Hz sample rate, \
CS scaled with 1.031378 and CSEX scaled with 0.770164 */
#define CS_EQUALISER_16000_A0 1.281629
-#define CS_EQUALISER_16000_A1 -1.075872
-#define CS_EQUALISER_16000_A2 -0.041365
-#define CS_EQUALISER_16000_B1 -0.725239
-#define CS_EQUALISER_16000_B2 -0.224358
+#define CS_EQUALISER_16000_A1 (-1.075872)
+#define CS_EQUALISER_16000_A2 (-0.041365)
+#define CS_EQUALISER_16000_B1 (-0.725239)
+#define CS_EQUALISER_16000_B2 (-0.224358)
#define CS_EQUALISER_16000_SCALE 14
#define CSEX_EQUALISER_16000_A0 1.081091
-#define CSEX_EQUALISER_16000_A1 -0.867183
-#define CSEX_EQUALISER_16000_A2 -0.070247
-#define CSEX_EQUALISER_16000_B1 -0.515121
-#define CSEX_EQUALISER_16000_B2 -0.425893
+#define CSEX_EQUALISER_16000_A1 (-0.867183)
+#define CSEX_EQUALISER_16000_A2 (-0.070247)
+#define CSEX_EQUALISER_16000_B1 (-0.515121)
+#define CSEX_EQUALISER_16000_B2 (-0.425893)
#define CSEX_EQUALISER_16000_SCALE 14
/* Equaliser coefficients for 22050Hz sample rate, \
CS scaled with 1.041576 and CSEX scaled with 0.777779 */
#define CS_EQUALISER_22050_A0 1.388605
-#define CS_EQUALISER_22050_A1 -1.305799
+#define CS_EQUALISER_22050_A1 (-1.305799)
#define CS_EQUALISER_22050_A2 0.039922
-#define CS_EQUALISER_22050_B1 -0.719494
-#define CS_EQUALISER_22050_B2 -0.243245
+#define CS_EQUALISER_22050_B1 (-0.719494)
+#define CS_EQUALISER_22050_B2 (-0.243245)
#define CS_EQUALISER_22050_SCALE 14
#define CSEX_EQUALISER_22050_A0 1.272910
-#define CSEX_EQUALISER_22050_A1 -1.341014
+#define CSEX_EQUALISER_22050_A1 (-1.341014)
#define CSEX_EQUALISER_22050_A2 0.167462
-#define CSEX_EQUALISER_22050_B1 -0.614219
-#define CSEX_EQUALISER_22050_B2 -0.345384
+#define CSEX_EQUALISER_22050_B1 (-0.614219)
+#define CSEX_EQUALISER_22050_B2 (-0.345384)
#define CSEX_EQUALISER_22050_SCALE 14
/* Equaliser coefficients for 24000Hz sample rate, \
CS scaled with 1.034495 and CSEX scaled with 0.772491 */
#define CS_EQUALISER_24000_A0 1.409832
-#define CS_EQUALISER_24000_A1 -1.456506
+#define CS_EQUALISER_24000_A1 (-1.456506)
#define CS_EQUALISER_24000_A2 0.151410
-#define CS_EQUALISER_24000_B1 -0.804201
-#define CS_EQUALISER_24000_B2 -0.163783
+#define CS_EQUALISER_24000_B1 (-0.804201)
+#define CS_EQUALISER_24000_B2 (-0.163783)
#define CS_EQUALISER_24000_SCALE 14
#define CSEX_EQUALISER_24000_A0 1.299198
-#define CSEX_EQUALISER_24000_A1 -1.452447
+#define CSEX_EQUALISER_24000_A1 (-1.452447)
#define CSEX_EQUALISER_24000_A2 0.240489
-#define CSEX_EQUALISER_24000_B1 -0.669303
-#define CSEX_EQUALISER_24000_B2 -0.294984
+#define CSEX_EQUALISER_24000_B1 (-0.669303)
+#define CSEX_EQUALISER_24000_B2 (-0.294984)
#define CSEX_EQUALISER_24000_SCALE 14
/* Equaliser coefficients for 32000Hz sample rate, \
CS scaled with 1.044559 and CSEX scaled with 0.780006 */
#define CS_EQUALISER_32000_A0 1.560988
-#define CS_EQUALISER_32000_A1 -1.877724
+#define CS_EQUALISER_32000_A1 (-1.877724)
#define CS_EQUALISER_32000_A2 0.389741
-#define CS_EQUALISER_32000_B1 -0.907410
-#define CS_EQUALISER_32000_B2 -0.070489
+#define CS_EQUALISER_32000_B1 (-0.907410)
+#define CS_EQUALISER_32000_B2 (-0.070489)
#define CS_EQUALISER_32000_SCALE 14
#define CSEX_EQUALISER_32000_A0 1.785049
-#define CSEX_EQUALISER_32000_A1 -2.233497
+#define CSEX_EQUALISER_32000_A1 (-2.233497)
#define CSEX_EQUALISER_32000_A2 0.526431
-#define CSEX_EQUALISER_32000_B1 -0.445939
-#define CSEX_EQUALISER_32000_B2 -0.522446
+#define CSEX_EQUALISER_32000_B1 (-0.445939)
+#define CSEX_EQUALISER_32000_B2 (-0.522446)
#define CSEX_EQUALISER_32000_SCALE 13
/* Equaliser coefficients for 44100Hz sample rate, \
CS scaled with 1.022170 and CSEX scaled with 0.763288 */
#define CS_EQUALISER_44100_A0 1.623993
-#define CS_EQUALISER_44100_A1 -2.270743
+#define CS_EQUALISER_44100_A1 (-2.270743)
#define CS_EQUALISER_44100_A2 0.688829
-#define CS_EQUALISER_44100_B1 -1.117190
+#define CS_EQUALISER_44100_B1 (-1.117190)
#define CS_EQUALISER_44100_B2 0.130208
#define CS_EQUALISER_44100_SCALE 13
#define CSEX_EQUALISER_44100_A0 2.028315
-#define CSEX_EQUALISER_44100_A1 -2.882459
+#define CSEX_EQUALISER_44100_A1 (-2.882459)
#define CSEX_EQUALISER_44100_A2 0.904535
-#define CSEX_EQUALISER_44100_B1 -0.593308
-#define CSEX_EQUALISER_44100_B2 -0.385816
+#define CSEX_EQUALISER_44100_B1 (-0.593308)
+#define CSEX_EQUALISER_44100_B2 (-0.385816)
#define CSEX_EQUALISER_44100_SCALE 13
/* Equaliser coefficients for 48000Hz sample rate, \
CS scaled with 1.018635 and CSEX scaled with 0.760648 */
#define CS_EQUALISER_48000_A0 1.641177
-#define CS_EQUALISER_48000_A1 -2.364687
+#define CS_EQUALISER_48000_A1 (-2.364687)
#define CS_EQUALISER_48000_A2 0.759910
-#define CS_EQUALISER_48000_B1 -1.166774
+#define CS_EQUALISER_48000_B1 (-1.166774)
#define CS_EQUALISER_48000_B2 0.178074
#define CS_EQUALISER_48000_SCALE 13
#define CSEX_EQUALISER_48000_A0 2.099655
-#define CSEX_EQUALISER_48000_A1 -3.065220
+#define CSEX_EQUALISER_48000_A1 (-3.065220)
#define CSEX_EQUALISER_48000_A2 1.010417
-#define CSEX_EQUALISER_48000_B1 -0.634021
-#define CSEX_EQUALISER_48000_B2 -0.347332
+#define CSEX_EQUALISER_48000_B1 (-0.634021)
+#define CSEX_EQUALISER_48000_B2 (-0.347332)
#define CSEX_EQUALISER_48000_SCALE 13
#ifdef HIGHER_FS
#define CS_EQUALISER_96000_A0 1.784497
-#define CS_EQUALISER_96000_A1 -3.001435
+#define CS_EQUALISER_96000_A1 (-3.001435)
#define CS_EQUALISER_96000_A2 1.228422
-#define CS_EQUALISER_96000_B1 -1.477804
+#define CS_EQUALISER_96000_B1 (-1.477804)
#define CS_EQUALISER_96000_B2 0.481369
#define CS_EQUALISER_96000_SCALE 13
#define CSEX_EQUALISER_96000_A0 2.7573
-#define CSEX_EQUALISER_96000_A1 -4.6721
+#define CSEX_EQUALISER_96000_A1 (-4.6721)
#define CSEX_EQUALISER_96000_A2 1.9317
-#define CSEX_EQUALISER_96000_B1 -0.971718
-#define CSEX_EQUALISER_96000_B2 -0.021216
+#define CSEX_EQUALISER_96000_B1 (-0.971718)
+#define CSEX_EQUALISER_96000_B2 (-0.021216)
#define CSEX_EQUALISER_96000_SCALE 13
#define CS_EQUALISER_192000_A0 1.889582
-#define CS_EQUALISER_192000_A1 -3.456140
+#define CS_EQUALISER_192000_A1 (-3.456140)
#define CS_EQUALISER_192000_A2 1.569864
-#define CS_EQUALISER_192000_B1 -1.700798
+#define CS_EQUALISER_192000_B1 (-1.700798)
#define CS_EQUALISER_192000_B2 0.701824
#define CS_EQUALISER_192000_SCALE 13
#define CSEX_EQUALISER_192000_A0 3.4273
-#define CSEX_EQUALISER_192000_A1 -6.2936
+#define CSEX_EQUALISER_192000_A1 (-6.2936)
#define CSEX_EQUALISER_192000_A2 2.8720
-#define CSEX_EQUALISER_192000_B1 -1.31074
+#define CSEX_EQUALISER_192000_B1 (-1.31074)
#define CSEX_EQUALISER_192000_B2 0.31312
#define CSEX_EQUALISER_192000_SCALE 13
#endif
diff --git a/media/libeffects/lvm/wrapper/Android.mk b/media/libeffects/lvm/wrapper/Android.mk
index f106aae..341dbc2 100644
--- a/media/libeffects/lvm/wrapper/Android.mk
+++ b/media/libeffects/lvm/wrapper/Android.mk
@@ -1,5 +1,8 @@
LOCAL_PATH:= $(call my-dir)
+# The wrapper -DBUILD_FLOAT needs to match
+# the lvm library -DBUILD_FLOAT.
+
# music bundle wrapper
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
@@ -20,15 +23,17 @@
LOCAL_STATIC_LIBRARIES += libmusicbundle
LOCAL_SHARED_LIBRARIES := \
- liblog \
+ libaudioutils \
libcutils \
- libdl
+ libdl \
+ liblog \
LOCAL_C_INCLUDES += \
$(LOCAL_PATH)/Bundle \
$(LOCAL_PATH)/../lib/Common/lib/ \
$(LOCAL_PATH)/../lib/Bundle/lib/ \
- $(call include-path-for, audio-effects)
+ $(call include-path-for, audio-effects) \
+ $(call include-path-for, audio-utils) \
LOCAL_HEADER_LIBRARIES += libhardware_headers
include $(BUILD_SHARED_LIBRARY)
@@ -53,15 +58,20 @@
LOCAL_STATIC_LIBRARIES += libreverb
LOCAL_SHARED_LIBRARIES := \
- liblog \
+ libaudioutils \
libcutils \
- libdl
+ libdl \
+ liblog \
LOCAL_C_INCLUDES += \
$(LOCAL_PATH)/Reverb \
$(LOCAL_PATH)/../lib/Common/lib/ \
$(LOCAL_PATH)/../lib/Reverb/lib/ \
- $(call include-path-for, audio-effects)
+ $(call include-path-for, audio-effects) \
+ $(call include-path-for, audio-utils) \
LOCAL_HEADER_LIBRARIES += libhardware_headers
+
+LOCAL_SANITIZE := integer_overflow
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 94d4516..04c2692 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -27,6 +27,7 @@
#include <stdlib.h>
#include <string.h>
+#include <audio_utils/primitives.h>
#include <log/log.h>
#include "EffectBundle.h"
@@ -63,16 +64,6 @@
}\
}
-
-static inline int16_t clamp16(int32_t sample)
-{
- // check overflow for both positive and negative values:
- // all bits above short range must me equal to sign bit
- if ((sample>>15) ^ (sample>>31))
- sample = 0x7FFF ^ (sample>>31);
- return sample;
-}
-
// Namespaces
namespace android {
namespace {
@@ -304,7 +295,7 @@
pContext->pBundledContext->SamplesToExitCountVirt = 0;
pContext->pBundledContext->SamplesToExitCountBb = 0;
pContext->pBundledContext->SamplesToExitCountEq = 0;
-#ifdef BUILD_FLOAT
+#if defined(BUILD_FLOAT) && !defined(NATIVE_FLOAT_BUFFER)
pContext->pBundledContext->pInputBuffer = NULL;
pContext->pBundledContext->pOutputBuffer = NULL;
#endif
@@ -475,13 +466,9 @@
if (pContext->pBundledContext->workBuffer != NULL) {
free(pContext->pBundledContext->workBuffer);
}
-#ifdef BUILD_FLOAT
- if (pContext->pBundledContext->pInputBuffer != NULL) {
- free(pContext->pBundledContext->pInputBuffer);
- }
- if (pContext->pBundledContext->pOutputBuffer != NULL) {
- free(pContext->pBundledContext->pOutputBuffer);
- }
+#if defined(BUILD_FLOAT) && !defined(NATIVE_FLOAT_BUFFER)
+ free(pContext->pBundledContext->pInputBuffer);
+ free(pContext->pBundledContext->pOutputBuffer);
#endif
delete pContext->pBundledContext;
pContext->pBundledContext = LVM_NULL;
@@ -554,7 +541,7 @@
pContext->config.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
pContext->config.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
- pContext->config.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+ pContext->config.inputCfg.format = EFFECT_BUFFER_FORMAT;
pContext->config.inputCfg.samplingRate = 44100;
pContext->config.inputCfg.bufferProvider.getBuffer = NULL;
pContext->config.inputCfg.bufferProvider.releaseBuffer = NULL;
@@ -562,7 +549,7 @@
pContext->config.inputCfg.mask = EFFECT_CONFIG_ALL;
pContext->config.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
pContext->config.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
- pContext->config.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+ pContext->config.outputCfg.format = EFFECT_BUFFER_FORMAT;
pContext->config.outputCfg.samplingRate = 44100;
pContext->config.outputCfg.bufferProvider.getBuffer = NULL;
pContext->config.outputCfg.bufferProvider.releaseBuffer = NULL;
@@ -739,47 +726,6 @@
return 0;
} /* end LvmBundle_init */
-#ifdef BUILD_FLOAT
-/**********************************************************************************
- FUNCTION INT16LTOFLOAT
-***********************************************************************************/
-// Todo: need to write function descriptor
-static void Int16ToFloat(const LVM_INT16 *src, LVM_FLOAT *dst, size_t n) {
- size_t ii;
- src += n-1;
- dst += n-1;
- for (ii = n; ii != 0; ii--) {
- *dst = ((LVM_FLOAT)((LVM_INT16)*src)) / 32768.0f;
- src--;
- dst--;
- }
- return;
-}
-/**********************************************************************************
- FUNCTION FLOATTOINT16_SAT
-***********************************************************************************/
-// Todo : Need to write function descriptor
-static void FloatToInt16_SAT(const LVM_FLOAT *src, LVM_INT16 *dst, size_t n) {
- size_t ii;
- LVM_INT32 temp;
-
- src += n-1;
- dst += n-1;
- for (ii = n; ii != 0; ii--) {
- temp = (LVM_INT32)((*src) * 32768.0f);
- if (temp >= 32767) {
- *dst = 32767;
- } else if (temp <= -32768) {
- *dst = -32768;
- } else {
- *dst = (LVM_INT16)temp;
- }
- src--;
- dst--;
- }
- return;
-}
-#endif
//----------------------------------------------------------------------------
// LvmBundle_process()
//----------------------------------------------------------------------------
@@ -787,8 +733,8 @@
// Apply LVM Bundle effects
//
// Inputs:
-// pIn: pointer to stereo 16 bit input data
-// pOut: pointer to stereo 16 bit output data
+// pIn: pointer to stereo float or 16 bit input data
+// pOut: pointer to stereo float or 16 bit output data
// frameCount: Frames to process
// pContext: effect engine context
// strength strength to be applied
@@ -798,44 +744,37 @@
//
//----------------------------------------------------------------------------
#ifdef BUILD_FLOAT
-int LvmBundle_process(LVM_INT16 *pIn,
- LVM_INT16 *pOut,
+int LvmBundle_process(effect_buffer_t *pIn,
+ effect_buffer_t *pOut,
int frameCount,
EffectContext *pContext){
-
- //LVM_ControlParams_t ActiveParams; /* Current control Parameters */
LVM_ReturnStatus_en LvmStatus = LVM_SUCCESS; /* Function call status */
- LVM_INT16 *pOutTmp;
- LVM_FLOAT *pInputBuff;
- LVM_FLOAT *pOutputBuff;
-
- if (pContext->pBundledContext->pInputBuffer == NULL ||
+ effect_buffer_t *pOutTmp;
+#ifndef NATIVE_FLOAT_BUFFER
+ if (pContext->pBundledContext->pInputBuffer == nullptr ||
pContext->pBundledContext->frameCount < frameCount) {
- if (pContext->pBundledContext->pInputBuffer != NULL) {
- free(pContext->pBundledContext->pInputBuffer);
- }
- pContext->pBundledContext->pInputBuffer = (LVM_FLOAT *)malloc(frameCount * \
- sizeof(LVM_FLOAT) * FCC_2);
+ free(pContext->pBundledContext->pInputBuffer);
+ pContext->pBundledContext->pInputBuffer =
+ (LVM_FLOAT *)calloc(frameCount, sizeof(LVM_FLOAT) * FCC_2);
}
- if (pContext->pBundledContext->pOutputBuffer == NULL ||
+ if (pContext->pBundledContext->pOutputBuffer == nullptr ||
pContext->pBundledContext->frameCount < frameCount) {
- if (pContext->pBundledContext->pOutputBuffer != NULL) {
- free(pContext->pBundledContext->pOutputBuffer);
- }
- pContext->pBundledContext->pOutputBuffer = (LVM_FLOAT *)malloc(frameCount * \
- sizeof(LVM_FLOAT) * FCC_2);
+ free(pContext->pBundledContext->pOutputBuffer);
+ pContext->pBundledContext->pOutputBuffer =
+ (LVM_FLOAT *)calloc(frameCount, sizeof(LVM_FLOAT) * FCC_2);
}
- if ((pContext->pBundledContext->pInputBuffer == NULL) ||
- (pContext->pBundledContext->pOutputBuffer == NULL)) {
- ALOGV("LVM_ERROR : LvmBundle_process memory allocation for float buffer's failed");
+ if (pContext->pBundledContext->pInputBuffer == nullptr ||
+ pContext->pBundledContext->pOutputBuffer == nullptr) {
+ ALOGE("LVM_ERROR : LvmBundle_process memory allocation for float buffer's failed");
return -EINVAL;
}
- pInputBuff = pContext->pBundledContext->pInputBuffer;
- pOutputBuff = pContext->pBundledContext->pOutputBuffer;
+ LVM_FLOAT * const pInputBuff = pContext->pBundledContext->pInputBuffer;
+ LVM_FLOAT * const pOutputBuff = pContext->pBundledContext->pOutputBuffer;
+#endif
if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_WRITE){
pOutTmp = pOut;
@@ -845,7 +784,7 @@
free(pContext->pBundledContext->workBuffer);
}
pContext->pBundledContext->workBuffer =
- (LVM_INT16 *)calloc(frameCount, sizeof(LVM_INT16) * FCC_2);
+ (effect_buffer_t *)calloc(frameCount, sizeof(effect_buffer_t) * FCC_2);
if (pContext->pBundledContext->workBuffer == NULL) {
return -ENOMEM;
}
@@ -857,43 +796,61 @@
return -EINVAL;
}
- #ifdef LVM_PCM
- fwrite(pIn, frameCount*sizeof(LVM_INT16) * FCC_2, 1, pContext->pBundledContext->PcmInPtr);
+#ifdef LVM_PCM
+ fwrite(pIn,
+ frameCount*sizeof(effect_buffer_t) * FCC_2, 1, pContext->pBundledContext->PcmInPtr);
fflush(pContext->pBundledContext->PcmInPtr);
- #endif
+#endif
+#ifndef NATIVE_FLOAT_BUFFER
/* Converting input data from fixed point to float point */
- Int16ToFloat(pIn, pInputBuff, frameCount * 2);
+ memcpy_to_float_from_i16(pInputBuff, pIn, frameCount * FCC_2);
/* Process the samples */
LvmStatus = LVM_Process(pContext->pBundledContext->hInstance, /* Instance handle */
pInputBuff, /* Input buffer */
pOutputBuff, /* Output buffer */
(LVM_UINT16)frameCount, /* Number of samples to read */
- 0); /* Audo Time */
+ 0); /* Audio Time */
+ /* Converting output data from float point to fixed point */
+ memcpy_to_i16_from_float(pOutTmp, pOutputBuff, frameCount * FCC_2);
+
+#else
+ /* Process the samples */
+ LvmStatus = LVM_Process(pContext->pBundledContext->hInstance, /* Instance handle */
+ pIn, /* Input buffer */
+ pOutTmp, /* Output buffer */
+ (LVM_UINT16)frameCount, /* Number of samples to read */
+ 0); /* Audio Time */
+#endif
LVM_ERROR_CHECK(LvmStatus, "LVM_Process", "LvmBundle_process")
if(LvmStatus != LVM_SUCCESS) return -EINVAL;
- /* Converting output data from float point to fixed point */
- FloatToInt16_SAT(pOutputBuff, pOutTmp, (LVM_UINT16)frameCount * 2);
- #ifdef LVM_PCM
- fwrite(pOutTmp, frameCount*sizeof(LVM_INT16) * FCC_2, 1, pContext->pBundledContext->PcmOutPtr);
+#ifdef LVM_PCM
+ fwrite(pOutTmp,
+ frameCount*sizeof(effect_buffer_t) * FCC_2, 1, pContext->pBundledContext->PcmOutPtr);
fflush(pContext->pBundledContext->PcmOutPtr);
- #endif
+#endif
if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE){
- for (int i = 0; i < frameCount * 2; i++){
+ for (int i = 0; i < frameCount * FCC_2; i++) {
+#ifndef NATIVE_FLOAT_BUFFER
pOut[i] = clamp16((LVM_INT32)pOut[i] + (LVM_INT32)pOutTmp[i]);
+#else
+ pOut[i] = pOut[i] + pOutTmp[i];
+#endif
}
}
return 0;
} /* end LvmBundle_process */
-#else
+
+#else // BUILD_FLOAT
+
int LvmBundle_process(LVM_INT16 *pIn,
LVM_INT16 *pOut,
int frameCount,
- EffectContext *pContext){
+ EffectContext *pContext) {
LVM_ReturnStatus_en LvmStatus = LVM_SUCCESS; /* Function call status */
LVM_INT16 *pOutTmp;
@@ -906,7 +863,7 @@
free(pContext->pBundledContext->workBuffer);
}
pContext->pBundledContext->workBuffer =
- (LVM_INT16 *)calloc(frameCount, sizeof(LVM_INT16) * 2);
+ (effect_buffer_t *)calloc(frameCount, sizeof(effect_buffer_t) * FCC_2);
if (pContext->pBundledContext->workBuffer == NULL) {
return -ENOMEM;
}
@@ -918,10 +875,11 @@
return -EINVAL;
}
- #ifdef LVM_PCM
- fwrite(pIn, frameCount*sizeof(LVM_INT16)*2, 1, pContext->pBundledContext->PcmInPtr);
+#ifdef LVM_PCM
+ fwrite(pIn, frameCount * sizeof(*pIn) * FCC_2,
+ 1 /* nmemb */, pContext->pBundledContext->PcmInPtr);
fflush(pContext->pBundledContext->PcmInPtr);
- #endif
+#endif
//ALOGV("Calling LVM_Process");
@@ -930,15 +888,16 @@
pIn, /* Input buffer */
pOutTmp, /* Output buffer */
(LVM_UINT16)frameCount, /* Number of samples to read */
- 0); /* Audo Time */
+ 0); /* Audio Time */
LVM_ERROR_CHECK(LvmStatus, "LVM_Process", "LvmBundle_process")
if(LvmStatus != LVM_SUCCESS) return -EINVAL;
- #ifdef LVM_PCM
- fwrite(pOutTmp, frameCount*sizeof(LVM_INT16)*2, 1, pContext->pBundledContext->PcmOutPtr);
+#ifdef LVM_PCM
+ fwrite(pOutTmp, frameCount * sizeof(*pOutTmp) * FCC_2,
+ 1 /* nmemb */, pContext->pBundledContext->PcmOutPtr);
fflush(pContext->pBundledContext->PcmOutPtr);
- #endif
+#endif
if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE){
for (int i=0; i<frameCount*2; i++){
@@ -947,7 +906,8 @@
}
return 0;
} /* end LvmBundle_process */
-#endif
+
+#endif // BUILD_FLOAT
//----------------------------------------------------------------------------
// EqualizerUpdateActiveParams()
@@ -1281,8 +1241,7 @@
CHECK_ARG(pConfig->inputCfg.channels == AUDIO_CHANNEL_OUT_STEREO);
CHECK_ARG(pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_WRITE
|| pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE);
- CHECK_ARG(pConfig->inputCfg.format == AUDIO_FORMAT_PCM_16_BIT);
-
+ CHECK_ARG(pConfig->inputCfg.format == EFFECT_BUFFER_FORMAT);
pContext->config = *pConfig;
switch (pConfig->inputCfg.samplingRate) {
@@ -3354,10 +3313,17 @@
pContext->pBundledContext->NumberEffectsCalled = 0;
/* Process all the available frames, block processing is
handled internalLY by the LVM bundle */
- processStatus = android::LvmBundle_process( (LVM_INT16 *)inBuffer->raw,
- (LVM_INT16 *)outBuffer->raw,
- outBuffer->frameCount,
- pContext);
+#ifdef NATIVE_FLOAT_BUFFER
+ processStatus = android::LvmBundle_process(inBuffer->f32,
+ outBuffer->f32,
+ outBuffer->frameCount,
+ pContext);
+#else
+ processStatus = android::LvmBundle_process(inBuffer->s16,
+ outBuffer->s16,
+ outBuffer->frameCount,
+ pContext);
+#endif
if (processStatus != 0){
ALOGV("\tLVM_ERROR : LvmBundle_process returned error %d", processStatus);
if (status == 0) {
@@ -3369,14 +3335,19 @@
//ALOGV("\tEffect_process Not Calling process with %d effects enabled, %d called: Effect %d",
//pContext->pBundledContext->NumberEffectsEnabled,
//pContext->pBundledContext->NumberEffectsCalled, pContext->EffectType);
- // 2 is for stereo input
+
if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
- for (size_t i=0; i < outBuffer->frameCount*2; i++){
- outBuffer->s16[i] =
- clamp16((LVM_INT32)outBuffer->s16[i] + (LVM_INT32)inBuffer->s16[i]);
+ for (size_t i = 0; i < outBuffer->frameCount * FCC_2; ++i){
+#ifdef NATIVE_FLOAT_BUFFER
+ outBuffer->f32[i] += inBuffer->f32[i];
+#else
+ outBuffer->s16[i] = clamp16((LVM_INT32)outBuffer->s16[i] + inBuffer->s16[i]);
+#endif
}
} else if (outBuffer->raw != inBuffer->raw) {
- memcpy(outBuffer->raw, inBuffer->raw, outBuffer->frameCount*sizeof(LVM_INT16)*2);
+ memcpy(outBuffer->raw,
+ inBuffer->raw,
+ outBuffer->frameCount * sizeof(effect_buffer_t) * FCC_2);
}
}
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
index 291383a..6bf045d 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
@@ -95,7 +95,7 @@
int SamplesToExitCountEq;
int SamplesToExitCountBb;
int SamplesToExitCountVirt;
- LVM_INT16 *workBuffer;
+ effect_buffer_t *workBuffer;
int frameCount;
int32_t bandGaindB[FIVEBAND_NUMBANDS];
int volume;
@@ -103,10 +103,10 @@
FILE *PcmInPtr;
FILE *PcmOutPtr;
#endif
- #ifdef BUILD_FLOAT
+#if defined(BUILD_FLOAT) && !defined(NATIVE_FLOAT_BUFFER)
LVM_FLOAT *pInputBuffer;
LVM_FLOAT *pOutputBuffer;
- #endif
+#endif
};
/* SessionContext : One session */
diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
index ee9406d..e1c03f9 100644
--- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
@@ -27,6 +27,7 @@
#include <stdlib.h>
#include <string.h>
+#include <audio_utils/primitives.h>
#include <log/log.h>
#include "EffectReverb.h"
@@ -135,6 +136,12 @@
&gInsertPresetReverbDescriptor
};
+#ifdef BUILD_FLOAT
+typedef float process_buffer_t; // process in float
+#else
+typedef int32_t process_buffer_t; // process in Q4_27
+#endif // BUILD_FLOAT
+
struct ReverbContext{
const struct effect_interface_s *itfe;
effect_config_t config;
@@ -152,8 +159,8 @@
FILE *PcmOutPtr;
#endif
LVM_Fs_en SampleRate;
- LVM_INT32 *InFrames32;
- LVM_INT32 *OutFrames32;
+ process_buffer_t *InFrames;
+ process_buffer_t *OutFrames;
size_t bufferSizeIn;
size_t bufferSizeOut;
bool auxiliary;
@@ -262,7 +269,7 @@
*pHandle = (effect_handle_t)pContext;
- #ifdef LVM_PCM
+#ifdef LVM_PCM
pContext->PcmInPtr = NULL;
pContext->PcmOutPtr = NULL;
@@ -273,19 +280,15 @@
(pContext->PcmOutPtr == NULL)){
return -EINVAL;
}
- #endif
+#endif
+ int channels = audio_channel_count_from_out_mask(pContext->config.inputCfg.channels);
// Allocate memory for reverb process (*2 is for STEREO)
-#ifdef BUILD_FLOAT
- pContext->bufferSizeIn = LVREV_MAX_FRAME_SIZE * sizeof(float) * 2;
- pContext->bufferSizeOut = pContext->bufferSizeIn;
-#else
- pContext->bufferSizeIn = LVREV_MAX_FRAME_SIZE * sizeof(LVM_INT32) * 2;
- pContext->bufferSizeOut = pContext->bufferSizeIn;
-#endif
- pContext->InFrames32 = (LVM_INT32 *)malloc(pContext->bufferSizeIn);
- pContext->OutFrames32 = (LVM_INT32 *)malloc(pContext->bufferSizeOut);
+ pContext->bufferSizeIn = LVREV_MAX_FRAME_SIZE * sizeof(process_buffer_t) * channels;
+ pContext->bufferSizeOut = LVREV_MAX_FRAME_SIZE * sizeof(process_buffer_t) * FCC_2;
+ pContext->InFrames = (process_buffer_t *)calloc(pContext->bufferSizeIn, 1 /* size */);
+ pContext->OutFrames = (process_buffer_t *)calloc(pContext->bufferSizeOut, 1 /* size */);
ALOGV("\tEffectCreate %p, size %zu", pContext, sizeof(ReverbContext));
ALOGV("\tEffectCreate end\n");
@@ -305,8 +308,8 @@
fclose(pContext->PcmInPtr);
fclose(pContext->PcmOutPtr);
#endif
- free(pContext->InFrames32);
- free(pContext->OutFrames32);
+ free(pContext->InFrames);
+ free(pContext->OutFrames);
pContext->bufferSizeIn = 0;
pContext->bufferSizeOut = 0;
Reverb_free(pContext);
@@ -344,114 +347,6 @@
} \
}
-#if 0
-//----------------------------------------------------------------------------
-// MonoTo2I_32()
-//----------------------------------------------------------------------------
-// Purpose:
-// Convert MONO to STEREO
-//
-//----------------------------------------------------------------------------
-
-void MonoTo2I_32( const LVM_INT32 *src,
- LVM_INT32 *dst,
- LVM_INT16 n)
-{
- LVM_INT16 ii;
- src += (n-1);
- dst += ((n*2)-1);
-
- for (ii = n; ii != 0; ii--)
- {
- *dst = *src;
- dst--;
-
- *dst = *src;
- dst--;
- src--;
- }
-
- return;
-}
-
-//----------------------------------------------------------------------------
-// From2iToMono_32()
-//----------------------------------------------------------------------------
-// Purpose:
-// Convert STEREO to MONO
-//
-//----------------------------------------------------------------------------
-
-void From2iToMono_32( const LVM_INT32 *src,
- LVM_INT32 *dst,
- LVM_INT16 n)
-{
- LVM_INT16 ii;
- LVM_INT32 Temp;
-
- for (ii = n; ii != 0; ii--)
- {
- Temp = (*src>>1);
- src++;
-
- Temp +=(*src>>1);
- src++;
-
- *dst = Temp;
- dst++;
- }
-
- return;
-}
-#endif
-
-#ifdef BUILD_FLOAT
-/**********************************************************************************
- FUNCTION INT16LTOFLOAT
-***********************************************************************************/
-// Todo: need to write function descriptor
-static void Int16ToFloat(const LVM_INT16 *src, LVM_FLOAT *dst, size_t n) {
- size_t ii;
- src += n-1;
- dst += n-1;
- for (ii = n; ii != 0; ii--) {
- *dst = ((LVM_FLOAT)((LVM_INT16)*src)) / 32768.0f;
- src--;
- dst--;
- }
- return;
-}
-/**********************************************************************************
- FUNCTION FLOATTOINT16_SAT
-***********************************************************************************/
-// Todo : Need to write function descriptor
-static void FloatToInt16_SAT(const LVM_FLOAT *src, LVM_INT16 *dst, size_t n) {
- size_t ii;
- LVM_INT32 temp;
-
- for (ii = 0; ii < n; ii++) {
- temp = (LVM_INT32)((*src) * 32768.0f);
- if (temp >= 32767) {
- *dst = 32767;
- } else if (temp <= -32768) {
- *dst = -32768;
- } else {
- *dst = (LVM_INT16)temp;
- }
- src++;
- dst++;
- }
- return;
-}
-#endif
-
-static inline int16_t clamp16(int32_t sample)
-{
- if ((sample>>15) ^ (sample>>31))
- sample = 0x7FFF ^ (sample>>31);
- return sample;
-}
-
//----------------------------------------------------------------------------
// process()
//----------------------------------------------------------------------------
@@ -459,8 +354,8 @@
// Apply the Reverb
//
// Inputs:
-// pIn: pointer to stereo/mono 16 bit input data
-// pOut: pointer to stereo 16 bit output data
+// pIn: pointer to stereo/mono float or 16 bit input data
+// pOut: pointer to stereo float or 16 bit output data
// frameCount: Frames to process
// pContext: effect engine context
// strength strength to be applied
@@ -469,116 +364,107 @@
// pOut: pointer to updated stereo 16 bit output data
//
//----------------------------------------------------------------------------
-
-int process( LVM_INT16 *pIn,
- LVM_INT16 *pOut,
+int process( effect_buffer_t *pIn,
+ effect_buffer_t *pOut,
int frameCount,
ReverbContext *pContext){
- LVM_INT16 samplesPerFrame = 1;
+ int channels = audio_channel_count_from_out_mask(pContext->config.inputCfg.channels);
LVREV_ReturnStatus_en LvmStatus = LVREV_SUCCESS; /* Function call status */
- LVM_INT16 *OutFrames16;
-#ifdef BUILD_FLOAT
- LVM_FLOAT *pInputBuff;
- LVM_FLOAT *pOutputBuff;
-#endif
-#ifdef BUILD_FLOAT
- if (pContext->InFrames32 == NULL ||
- pContext->bufferSizeIn < frameCount * sizeof(float) * 2) {
- if (pContext->InFrames32 != NULL) {
- free(pContext->InFrames32);
- }
- pContext->bufferSizeIn = frameCount * sizeof(float) * 2;
- pContext->InFrames32 = (LVM_INT32 *)malloc(pContext->bufferSizeIn);
- }
- if (pContext->OutFrames32 == NULL ||
- pContext->bufferSizeOut < frameCount * sizeof(float) * 2) {
- if (pContext->OutFrames32 != NULL) {
- free(pContext->OutFrames32);
- }
- pContext->bufferSizeOut = frameCount * sizeof(float) * 2;
- pContext->OutFrames32 = (LVM_INT32 *)malloc(pContext->bufferSizeOut);
- }
- pInputBuff = (float *)pContext->InFrames32;
- pOutputBuff = (float *)pContext->OutFrames32;
-#endif
// Check that the input is either mono or stereo
- if (pContext->config.inputCfg.channels == AUDIO_CHANNEL_OUT_STEREO) {
- samplesPerFrame = 2;
- } else if (pContext->config.inputCfg.channels != AUDIO_CHANNEL_OUT_MONO) {
- ALOGV("\tLVREV_ERROR : process invalid PCM format");
+ if (!(channels == 1 || channels == FCC_2) ) {
+ ALOGE("\tLVREV_ERROR : process invalid PCM format");
return -EINVAL;
}
- OutFrames16 = (LVM_INT16 *)pContext->OutFrames32;
+#ifdef BUILD_FLOAT
+ size_t inSize = frameCount * sizeof(process_buffer_t) * channels;
+ size_t outSize = frameCount * sizeof(process_buffer_t) * FCC_2;
+ if (pContext->InFrames == NULL ||
+ pContext->bufferSizeIn < inSize) {
+ free(pContext->InFrames);
+ pContext->bufferSizeIn = inSize;
+ pContext->InFrames = (process_buffer_t *)calloc(1, pContext->bufferSizeIn);
+ }
+ if (pContext->OutFrames == NULL ||
+ pContext->bufferSizeOut < outSize) {
+ free(pContext->OutFrames);
+ pContext->bufferSizeOut = outSize;
+ pContext->OutFrames = (process_buffer_t *)calloc(1, pContext->bufferSizeOut);
+ }
+
+#ifndef NATIVE_FLOAT_BUFFER
+ effect_buffer_t * const OutFrames16 = (effect_buffer_t *)pContext->OutFrames;
+#endif
+#endif
// Check for NULL pointers
- if((pContext->InFrames32 == NULL)||(pContext->OutFrames32 == NULL)){
- ALOGV("\tLVREV_ERROR : process failed to allocate memory for temporary buffers ");
+ if ((pContext->InFrames == NULL) || (pContext->OutFrames == NULL)) {
+ ALOGE("\tLVREV_ERROR : process failed to allocate memory for temporary buffers ");
return -EINVAL;
}
- #ifdef LVM_PCM
- fwrite(pIn, frameCount*sizeof(LVM_INT16)*samplesPerFrame, 1, pContext->PcmInPtr);
+#ifdef LVM_PCM
+ fwrite(pIn, frameCount * sizeof(*pIn) * channels, 1 /* nmemb */, pContext->PcmInPtr);
fflush(pContext->PcmInPtr);
- #endif
+#endif
if (pContext->preset && pContext->nextPreset != pContext->curPreset) {
Reverb_LoadPreset(pContext);
}
- // Convert to Input 32 bits
if (pContext->auxiliary) {
#ifdef BUILD_FLOAT
- Int16ToFloat(pIn, pInputBuff, frameCount * samplesPerFrame);
+#ifdef NATIVE_FLOAT_BUFFER
+ static_assert(std::is_same<decltype(*pIn), decltype(*pContext->InFrames)>::value,
+ "pIn and InFrames must be same type");
+ memcpy(pContext->InFrames, pIn, frameCount * channels * sizeof(*pIn));
#else
- for(int i=0; i<frameCount*samplesPerFrame; i++){
- pContext->InFrames32[i] = (LVM_INT32)pIn[i]<<8;
+ memcpy_to_float_from_i16(
+ pContext->InFrames, pIn, frameCount * channels);
+#endif
+#else //no BUILD_FLOAT
+ for (int i = 0; i < frameCount * channels; i++) {
+ pContext->InFrames[i] = (process_buffer_t)pIn[i]<<8;
}
#endif
} else {
// insert reverb input is always stereo
for (int i = 0; i < frameCount; i++) {
-#ifndef BUILD_FLOAT
- pContext->InFrames32[2*i] = (pIn[2*i] * REVERB_SEND_LEVEL) >> 4; // <<8 + >>12
- pContext->InFrames32[2*i+1] = (pIn[2*i+1] * REVERB_SEND_LEVEL) >> 4; // <<8 + >>12
+#ifdef BUILD_FLOAT
+#ifdef NATIVE_FLOAT_BUFFER
+ pContext->InFrames[2 * i] = (process_buffer_t)pIn[2 * i] * REVERB_SEND_LEVEL;
+ pContext->InFrames[2 * i + 1] = (process_buffer_t)pIn[2 * i + 1] * REVERB_SEND_LEVEL;
#else
- pInputBuff[2 * i] = (LVM_FLOAT)pIn[2 * i] * REVERB_SEND_LEVEL / 32768.0f;
- pInputBuff[2 * i + 1] = (LVM_FLOAT)pIn[2 * i + 1] * REVERB_SEND_LEVEL / 32768.0f;
+ pContext->InFrames[2 * i] =
+ (process_buffer_t)pIn[2 * i] * REVERB_SEND_LEVEL / 32768.0f;
+ pContext->InFrames[2 * i + 1] =
+ (process_buffer_t)pIn[2 * i + 1] * REVERB_SEND_LEVEL / 32768.0f;
+#endif
+#else
+ pContext->InFrames[2*i] = (pIn[2*i] * REVERB_SEND_LEVEL) >> 4; // <<8 + >>12
+ pContext->InFrames[2*i+1] = (pIn[2*i+1] * REVERB_SEND_LEVEL) >> 4; // <<8 + >>12
#endif
}
}
if (pContext->preset && pContext->curPreset == REVERB_PRESET_NONE) {
-#ifdef BUILD_FLOAT
- memset(pOutputBuff, 0, frameCount * sizeof(LVM_FLOAT) * 2); //always stereo here
-#else
- memset(pContext->OutFrames32, 0, frameCount * sizeof(LVM_INT32) * 2); //always stereo here
-#endif
+ memset(pContext->OutFrames, 0,
+ frameCount * sizeof(*pContext->OutFrames) * FCC_2); //always stereo here
} else {
if(pContext->bEnabled == LVM_FALSE && pContext->SamplesToExitCount > 0) {
-#ifdef BUILD_FLOAT
- memset(pInputBuff, 0, frameCount * sizeof(LVM_FLOAT) * samplesPerFrame);
-#else
- memset(pContext->InFrames32,0,frameCount * sizeof(LVM_INT32) * samplesPerFrame);
-#endif
- ALOGV("\tZeroing %d samples per frame at the end of call", samplesPerFrame);
+ memset(pContext->InFrames, 0,
+ frameCount * sizeof(*pContext->OutFrames) * channels);
+ ALOGV("\tZeroing %d samples per frame at the end of call", channels);
}
/* Process the samples, producing a stereo output */
-#ifdef BUILD_FLOAT
LvmStatus = LVREV_Process(pContext->hInstance, /* Instance handle */
- pInputBuff, /* Input buffer */
- pOutputBuff, /* Output buffer */
+ pContext->InFrames, /* Input buffer */
+ pContext->OutFrames, /* Output buffer */
frameCount); /* Number of samples to read */
-#else
- LvmStatus = LVREV_Process(pContext->hInstance, /* Instance handle */
- pContext->InFrames32, /* Input buffer */
- pContext->OutFrames32, /* Output buffer */
- frameCount); /* Number of samples to read */
-#endif
- }
+ }
LVM_ERROR_CHECK(LvmStatus, "LVREV_Process", "process")
if(LvmStatus != LVREV_SUCCESS) return -EINVAL;
@@ -586,55 +472,87 @@
// Convert to 16 bits
if (pContext->auxiliary) {
#ifdef BUILD_FLOAT
- FloatToInt16_SAT(pOutputBuff, OutFrames16, (size_t)frameCount * 2);
-#else
- for (int i=0; i < frameCount*2; i++) { //always stereo here
- OutFrames16[i] = clamp16(pContext->OutFrames32[i]>>8);
- }
+ // nothing to do here
+#ifndef NATIVE_FLOAT_BUFFER
+ // pContext->OutFrames and OutFrames16 point to the same buffer
+ // make sure the float to int conversion happens in the right order.
+ memcpy_to_i16_from_float(OutFrames16, pContext->OutFrames,
+ (size_t)frameCount * FCC_2);
#endif
- } else {
-#ifdef BUILD_FLOAT
- for (int i = 0; i < frameCount * 2; i++) {//always stereo here
- //pOutputBuff and OutFrames16 point to the same buffer, so better to
- //accumulate in pInputBuff, which is available
- pInputBuff[i] = pOutputBuff[i] + (LVM_FLOAT)pIn[i] / 32768.0f;
- }
-
- FloatToInt16_SAT(pInputBuff, OutFrames16, (size_t)frameCount * 2);
#else
- for (int i=0; i < frameCount*2; i++) { //always stereo here
- OutFrames16[i] = clamp16((pContext->OutFrames32[i]>>8) + (LVM_INT32)pIn[i]);
- }
+ memcpy_to_i16_from_q4_27(OutFrames16, pContext->OutFrames, (size_t)frameCount * FCC_2);
+#endif
+ } else {
+#ifdef BUILD_FLOAT
+#ifdef NATIVE_FLOAT_BUFFER
+ for (int i = 0; i < frameCount * FCC_2; i++) { // always stereo here
+ // Mix with dry input
+ pContext->OutFrames[i] += pIn[i];
+ }
+#else
+ for (int i = 0; i < frameCount * FCC_2; i++) { // always stereo here
+ // pOutputBuff and OutFrames16 point to the same buffer
+ // make sure the float to int conversion happens in the right order.
+ pContext->OutFrames[i] += (process_buffer_t)pIn[i] / 32768.0f;
+ }
+ memcpy_to_i16_from_float(OutFrames16, pContext->OutFrames,
+ (size_t)frameCount * FCC_2);
+#endif
+#else
+ for (int i=0; i < frameCount * FCC_2; i++) { // always stereo here
+ OutFrames16[i] = clamp16((pContext->OutFrames[i]>>8) + (process_buffer_t)pIn[i]);
+ }
#endif
// apply volume with ramp if needed
if ((pContext->leftVolume != pContext->prevLeftVolume ||
pContext->rightVolume != pContext->prevRightVolume) &&
pContext->volumeMode == REVERB_VOLUME_RAMP) {
+#if defined (BUILD_FLOAT) && defined (NATIVE_FLOAT_BUFFER)
+ // FIXME: still using int16 volumes.
+ // For reference: REVERB_UNIT_VOLUME (0x1000) // 1.0 in 4.12 format
+ float vl = (float)pContext->prevLeftVolume / 4096;
+ float incl = (((float)pContext->leftVolume / 4096) - vl) / frameCount;
+ float vr = (float)pContext->prevRightVolume / 4096;
+ float incr = (((float)pContext->rightVolume / 4096) - vr) / frameCount;
+
+ for (int i = 0; i < frameCount; i++) {
+ pContext->OutFrames[FCC_2 * i] *= vl;
+ pContext->OutFrames[FCC_2 * i + 1] *= vr;
+
+ vl += incl;
+ vr += incr;
+ }
+#else
LVM_INT32 vl = (LVM_INT32)pContext->prevLeftVolume << 16;
LVM_INT32 incl = (((LVM_INT32)pContext->leftVolume << 16) - vl) / frameCount;
LVM_INT32 vr = (LVM_INT32)pContext->prevRightVolume << 16;
LVM_INT32 incr = (((LVM_INT32)pContext->rightVolume << 16) - vr) / frameCount;
for (int i = 0; i < frameCount; i++) {
- OutFrames16[2*i] =
+ OutFrames16[FCC_2 * i] =
clamp16((LVM_INT32)((vl >> 16) * OutFrames16[2*i]) >> 12);
- OutFrames16[2*i+1] =
+ OutFrames16[FCC_2 * i + 1] =
clamp16((LVM_INT32)((vr >> 16) * OutFrames16[2*i+1]) >> 12);
vl += incl;
vr += incr;
}
-
+#endif
pContext->prevLeftVolume = pContext->leftVolume;
pContext->prevRightVolume = pContext->rightVolume;
} else if (pContext->volumeMode != REVERB_VOLUME_OFF) {
if (pContext->leftVolume != REVERB_UNIT_VOLUME ||
pContext->rightVolume != REVERB_UNIT_VOLUME) {
for (int i = 0; i < frameCount; i++) {
- OutFrames16[2*i] =
+#if defined(BUILD_FLOAT) && defined(NATIVE_FLOAT_BUFFER)
+ pContext->OutFrames[FCC_2 * i] *= ((float)pContext->leftVolume / 4096);
+ pContext->OutFrames[FCC_2 * i + 1] *= ((float)pContext->rightVolume / 4096);
+#else
+ OutFrames16[FCC_2 * i] =
clamp16((LVM_INT32)(pContext->leftVolume * OutFrames16[2*i]) >> 12);
- OutFrames16[2*i+1] =
+ OutFrames16[FCC_2 * i + 1] =
clamp16((LVM_INT32)(pContext->rightVolume * OutFrames16[2*i+1]) >> 12);
+#endif
}
}
pContext->prevLeftVolume = pContext->leftVolume;
@@ -643,20 +561,25 @@
}
}
- #ifdef LVM_PCM
- fwrite(OutFrames16, frameCount*sizeof(LVM_INT16)*2, 1, pContext->PcmOutPtr);
+#ifdef LVM_PCM
+ fwrite(pContext->OutFrames, frameCount * sizeof(*pContext->OutFrames) * FCC_2,
+ 1 /* nmemb */, pContext->PcmOutPtr);
fflush(pContext->PcmOutPtr);
- #endif
+#endif
// Accumulate if required
if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE){
//ALOGV("\tBuffer access is ACCUMULATE");
- for (int i=0; i<frameCount*2; i++){ //always stereo here
+ for (int i = 0; i < frameCount * FCC_2; i++) { // always stereo here
+#ifndef NATIVE_FLOAT_BUFFER
pOut[i] = clamp16((int32_t)pOut[i] + (int32_t)OutFrames16[i]);
+#else
+ pOut[i] += pContext->OutFrames[i];
+#endif
}
}else{
//ALOGV("\tBuffer access is WRITE");
- memcpy(pOut, OutFrames16, frameCount*sizeof(LVM_INT16)*2);
+ memcpy(pOut, pContext->OutFrames, frameCount * sizeof(*pOut) * FCC_2);
}
return 0;
@@ -733,8 +656,7 @@
CHECK_ARG(pConfig->outputCfg.channels == AUDIO_CHANNEL_OUT_STEREO);
CHECK_ARG(pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_WRITE
|| pConfig->outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE);
- CHECK_ARG(pConfig->inputCfg.format == AUDIO_FORMAT_PCM_16_BIT);
-
+ CHECK_ARG(pConfig->inputCfg.format == EFFECT_BUFFER_FORMAT);
//ALOGV("\tReverb_setConfig calling memcpy");
pContext->config = *pConfig;
@@ -847,8 +769,7 @@
} else {
pContext->config.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
}
-
- pContext->config.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+ pContext->config.inputCfg.format = EFFECT_BUFFER_FORMAT;
pContext->config.inputCfg.samplingRate = 44100;
pContext->config.inputCfg.bufferProvider.getBuffer = NULL;
pContext->config.inputCfg.bufferProvider.releaseBuffer = NULL;
@@ -856,7 +777,7 @@
pContext->config.inputCfg.mask = EFFECT_CONFIG_ALL;
pContext->config.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
pContext->config.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
- pContext->config.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+ pContext->config.outputCfg.format = EFFECT_BUFFER_FORMAT;
pContext->config.outputCfg.samplingRate = 44100;
pContext->config.outputCfg.bufferProvider.getBuffer = NULL;
pContext->config.outputCfg.bufferProvider.releaseBuffer = NULL;
@@ -1889,6 +1810,10 @@
if (param != REVERB_PARAM_PRESET) {
return -EINVAL;
}
+ if (vsize < (int)sizeof(uint16_t)) {
+ android_errorWriteLog(0x534e4554, "67647856");
+ return -EINVAL;
+ }
uint16_t preset = *(uint16_t *)pValue;
ALOGV("set REVERB_PARAM_PRESET, preset %d", preset);
@@ -2027,14 +1952,22 @@
}
//ALOGV("\tReverb_process() Calling process with %d frames", outBuffer->frameCount);
/* Process all the available frames, block processing is handled internalLY by the LVM bundle */
- status = process( (LVM_INT16 *)inBuffer->raw,
- (LVM_INT16 *)outBuffer->raw,
- outBuffer->frameCount,
- pContext);
+#if defined (BUILD_FLOAT) && defined (NATIVE_FLOAT_BUFFER)
+ status = process( inBuffer->f32,
+ outBuffer->f32,
+ outBuffer->frameCount,
+ pContext);
+#else
+ status = process( inBuffer->s16,
+ outBuffer->s16,
+ outBuffer->frameCount,
+ pContext);
+#endif
if (pContext->bEnabled == LVM_FALSE) {
if (pContext->SamplesToExitCount > 0) {
- pContext->SamplesToExitCount -= outBuffer->frameCount;
+ // signed - unsigned will trigger integer overflow if result becomes negative.
+ pContext->SamplesToExitCount -= (ssize_t)outBuffer->frameCount;
} else {
status = -ENODATA;
}
diff --git a/media/libheif/HeifDecoderImpl.cpp b/media/libheif/HeifDecoderImpl.cpp
index 4b131a7..01f014f 100644
--- a/media/libheif/HeifDecoderImpl.cpp
+++ b/media/libheif/HeifDecoderImpl.cpp
@@ -22,11 +22,12 @@
#include <stdio.h>
#include <binder/IMemory.h>
+#include <binder/MemoryDealer.h>
#include <drm/drm_framework_common.h>
#include <media/IDataSource.h>
#include <media/mediametadataretriever.h>
+#include <media/MediaSource.h>
#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaSource.h>
#include <private/media/VideoFrame.h>
#include <utils/Log.h>
#include <utils/RefBase.h>
@@ -139,6 +140,11 @@
// have been caught above.
CHECK(offset >= mCachedOffset);
+ off64_t resultOffset;
+ if (__builtin_add_overflow(offset, size, &resultOffset)) {
+ return ERROR_IO;
+ }
+
if (size == 0) {
return 0;
}
@@ -265,19 +271,49 @@
/////////////////////////////////////////////////////////////////////////
+struct HeifDecoderImpl::DecodeThread : public Thread {
+ explicit DecodeThread(HeifDecoderImpl *decoder) : mDecoder(decoder) {}
+
+private:
+ HeifDecoderImpl* mDecoder;
+
+ bool threadLoop();
+
+ DISALLOW_EVIL_CONSTRUCTORS(DecodeThread);
+};
+
+bool HeifDecoderImpl::DecodeThread::threadLoop() {
+ return mDecoder->decodeAsync();
+}
+
+/////////////////////////////////////////////////////////////////////////
+
HeifDecoderImpl::HeifDecoderImpl() :
// output color format should always be set via setOutputColor(), in case
// it's not, default to HAL_PIXEL_FORMAT_RGB_565.
mOutputColor(HAL_PIXEL_FORMAT_RGB_565),
mCurScanline(0),
- mFrameDecoded(false) {
+ mWidth(0),
+ mHeight(0),
+ mFrameDecoded(false),
+ mHasImage(false),
+ mHasVideo(false),
+ mAvailableLines(0),
+ mNumSlices(1),
+ mSliceHeight(0),
+ mAsyncDecodeDone(false) {
}
HeifDecoderImpl::~HeifDecoderImpl() {
+ if (mThread != nullptr) {
+ mThread->join();
+ }
}
bool HeifDecoderImpl::init(HeifStream* stream, HeifFrameInfo* frameInfo) {
mFrameDecoded = false;
+ mFrameMemory.clear();
+
sp<HeifDataSource> dataSource = new HeifDataSource(stream);
if (!dataSource->init()) {
return false;
@@ -285,7 +321,7 @@
mDataSource = dataSource;
mRetriever = new MediaMetadataRetriever();
- status_t err = mRetriever->setDataSource(mDataSource, "video/mp4");
+ status_t err = mRetriever->setDataSource(mDataSource, "image/heif");
if (err != OK) {
ALOGE("failed to set data source!");
@@ -295,21 +331,28 @@
}
ALOGV("successfully set data source.");
+ const char* hasImage = mRetriever->extractMetadata(METADATA_KEY_HAS_IMAGE);
const char* hasVideo = mRetriever->extractMetadata(METADATA_KEY_HAS_VIDEO);
- if (!hasVideo || strcasecmp(hasVideo, "yes")) {
- ALOGE("no video: %s", hasVideo ? hasVideo : "null");
- return false;
+
+ mHasImage = hasImage && !strcasecmp(hasImage, "yes");
+ mHasVideo = hasVideo && !strcasecmp(hasVideo, "yes");
+ sp<IMemory> sharedMem;
+ if (mHasImage) {
+ // image index < 0 to retrieve primary image
+ sharedMem = mRetriever->getImageAtIndex(
+ -1, mOutputColor, true /*metaOnly*/);
+ } else if (mHasVideo) {
+ sharedMem = mRetriever->getFrameAtTime(0,
+ MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC,
+ mOutputColor, true /*metaOnly*/);
}
- mFrameMemory = mRetriever->getFrameAtTime(0,
- IMediaSource::ReadOptions::SEEK_PREVIOUS_SYNC,
- mOutputColor, true /*metaOnly*/);
- if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
+ if (sharedMem == nullptr || sharedMem->pointer() == nullptr) {
ALOGE("getFrameAtTime: videoFrame is a nullptr");
return false;
}
- VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
+ VideoFrame* videoFrame = static_cast<VideoFrame*>(sharedMem->pointer());
ALOGV("Meta dimension %dx%d, display %dx%d, angle %d, iccSize %d",
videoFrame->mWidth,
@@ -321,13 +364,21 @@
if (frameInfo != nullptr) {
frameInfo->set(
- videoFrame->mDisplayWidth,
- videoFrame->mDisplayHeight,
+ videoFrame->mWidth,
+ videoFrame->mHeight,
videoFrame->mRotationAngle,
videoFrame->mBytesPerPixel,
videoFrame->mIccSize,
videoFrame->getFlattenedIccData());
}
+ mWidth = videoFrame->mWidth;
+ mHeight = videoFrame->mHeight;
+ if (mHasImage && videoFrame->mTileHeight >= 512 && mWidth >= 3000 && mHeight >= 2000 ) {
+ // Try decoding in slices only if the image has tiles and is big enough.
+ mSliceHeight = videoFrame->mTileHeight;
+ mNumSlices = (videoFrame->mHeight + mSliceHeight - 1) / mSliceHeight;
+ ALOGV("mSliceHeight %u, mNumSlices %zu", mSliceHeight, mNumSlices);
+ }
return true;
}
@@ -360,6 +411,36 @@
return false;
}
+bool HeifDecoderImpl::decodeAsync() {
+ for (size_t i = 1; i < mNumSlices; i++) {
+ ALOGV("decodeAsync(): decoding slice %zu", i);
+ size_t top = i * mSliceHeight;
+ size_t bottom = (i + 1) * mSliceHeight;
+ if (bottom > mHeight) {
+ bottom = mHeight;
+ }
+ sp<IMemory> frameMemory = mRetriever->getImageRectAtIndex(
+ -1, mOutputColor, 0, top, mWidth, bottom);
+ {
+ Mutex::Autolock autolock(mLock);
+
+ if (frameMemory == nullptr || frameMemory->pointer() == nullptr) {
+ mAsyncDecodeDone = true;
+ mScanlineReady.signal();
+ break;
+ }
+ mFrameMemory = frameMemory;
+ mAvailableLines = bottom;
+ ALOGV("decodeAsync(): available lines %zu", mAvailableLines);
+ mScanlineReady.signal();
+ }
+ }
+ // Aggressive clear to avoid holding on to resources
+ mRetriever.clear();
+ mDataSource.clear();
+ return false;
+}
+
bool HeifDecoderImpl::decode(HeifFrameInfo* frameInfo) {
// reset scanline pointer
mCurScanline = 0;
@@ -368,17 +449,64 @@
return true;
}
- mFrameMemory = mRetriever->getFrameAtTime(0,
- IMediaSource::ReadOptions::SEEK_PREVIOUS_SYNC, mOutputColor);
+ // See if we want to decode in slices to allow client to start
+ // scanline processing in parallel with decode. If this fails
+ // we fallback to decoding the full frame.
+ if (mHasImage && mNumSlices > 1) {
+ // get first slice and metadata
+ sp<IMemory> frameMemory = mRetriever->getImageRectAtIndex(
+ -1, mOutputColor, 0, 0, mWidth, mSliceHeight);
+
+ if (frameMemory == nullptr || frameMemory->pointer() == nullptr) {
+ ALOGE("decode: metadata is a nullptr");
+ return false;
+ }
+
+ VideoFrame* videoFrame = static_cast<VideoFrame*>(frameMemory->pointer());
+
+ if (frameInfo != nullptr) {
+ frameInfo->set(
+ videoFrame->mWidth,
+ videoFrame->mHeight,
+ videoFrame->mRotationAngle,
+ videoFrame->mBytesPerPixel,
+ videoFrame->mIccSize,
+ videoFrame->getFlattenedIccData());
+ }
+
+ mFrameMemory = frameMemory;
+ mAvailableLines = mSliceHeight;
+ mThread = new DecodeThread(this);
+ if (mThread->run("HeifDecode", ANDROID_PRIORITY_FOREGROUND) == OK) {
+ mFrameDecoded = true;
+ return true;
+ }
+
+ // Fallback to decode without slicing
+ mThread.clear();
+ mNumSlices = 1;
+ mSliceHeight = 0;
+ mAvailableLines = 0;
+ mFrameMemory.clear();
+ }
+
+ if (mHasImage) {
+ // image index < 0 to retrieve primary image
+ mFrameMemory = mRetriever->getImageAtIndex(-1, mOutputColor);
+ } else if (mHasVideo) {
+ mFrameMemory = mRetriever->getFrameAtTime(0,
+ MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC, mOutputColor);
+ }
+
if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
- ALOGE("getFrameAtTime: videoFrame is a nullptr");
+ ALOGE("decode: videoFrame is a nullptr");
return false;
}
VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
if (videoFrame->mSize == 0 ||
mFrameMemory->size() < videoFrame->getFlattenedSize()) {
- ALOGE("getFrameAtTime: videoFrame size is invalid");
+ ALOGE("decode: videoFrame size is invalid");
return false;
}
@@ -393,8 +521,8 @@
if (frameInfo != nullptr) {
frameInfo->set(
- videoFrame->mDisplayWidth,
- videoFrame->mDisplayHeight,
+ videoFrame->mWidth,
+ videoFrame->mHeight,
videoFrame->mRotationAngle,
videoFrame->mBytesPerPixel,
videoFrame->mIccSize,
@@ -402,36 +530,45 @@
}
mFrameDecoded = true;
- // Aggressive clear to avoid holding on to resources
+ // Aggressively clear to avoid holding on to resources
mRetriever.clear();
mDataSource.clear();
return true;
}
-bool HeifDecoderImpl::getScanline(uint8_t* dst) {
+bool HeifDecoderImpl::getScanlineInner(uint8_t* dst) {
if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
return false;
}
VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
- if (mCurScanline >= videoFrame->mDisplayHeight) {
- ALOGE("no more scanline available");
- return false;
- }
uint8_t* src = videoFrame->getFlattenedData() + videoFrame->mRowBytes * mCurScanline++;
- memcpy(dst, src, videoFrame->mBytesPerPixel * videoFrame->mDisplayWidth);
+ memcpy(dst, src, videoFrame->mBytesPerPixel * videoFrame->mWidth);
return true;
}
-size_t HeifDecoderImpl::skipScanlines(size_t count) {
- if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
- return 0;
+bool HeifDecoderImpl::getScanline(uint8_t* dst) {
+ if (mCurScanline >= mHeight) {
+ ALOGE("no more scanline available");
+ return false;
}
- VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
+ if (mNumSlices > 1) {
+ Mutex::Autolock autolock(mLock);
+
+ while (!mAsyncDecodeDone && mCurScanline >= mAvailableLines) {
+ mScanlineReady.wait(mLock);
+ }
+ return (mCurScanline < mAvailableLines) ? getScanlineInner(dst) : false;
+ }
+
+ return getScanlineInner(dst);
+}
+
+size_t HeifDecoderImpl::skipScanlines(size_t count) {
uint32_t oldScanline = mCurScanline;
mCurScanline += count;
- if (mCurScanline > videoFrame->mDisplayHeight) {
- mCurScanline = videoFrame->mDisplayHeight;
+ if (mCurScanline > mHeight) {
+ mCurScanline = mHeight;
}
return (mCurScanline > oldScanline) ? (mCurScanline - oldScanline) : 0;
}
diff --git a/media/libheif/HeifDecoderImpl.h b/media/libheif/HeifDecoderImpl.h
index c2e4ff3..528ee3b 100644
--- a/media/libheif/HeifDecoderImpl.h
+++ b/media/libheif/HeifDecoderImpl.h
@@ -19,6 +19,8 @@
#include "include/HeifDecoderAPI.h"
#include <system/graphics.h>
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
#include <utils/RefBase.h>
namespace android {
@@ -49,12 +51,30 @@
size_t skipScanlines(size_t count) override;
private:
+ struct DecodeThread;
+
sp<IDataSource> mDataSource;
sp<MediaMetadataRetriever> mRetriever;
sp<IMemory> mFrameMemory;
android_pixel_format_t mOutputColor;
size_t mCurScanline;
+ uint32_t mWidth;
+ uint32_t mHeight;
bool mFrameDecoded;
+ bool mHasImage;
+ bool mHasVideo;
+
+ // Slice decoding only
+ Mutex mLock;
+ Condition mScanlineReady;
+ sp<DecodeThread> mThread;
+ size_t mAvailableLines;
+ size_t mNumSlices;
+ uint32_t mSliceHeight;
+ bool mAsyncDecodeDone;
+
+ bool decodeAsync();
+ bool getScanlineInner(uint8_t* dst);
};
} // namespace android
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 8580f57..1b3a1be 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -3,10 +3,12 @@
vendor_available: true,
export_include_dirs: ["include"],
header_libs:[
+ "libgui_headers",
"libstagefright_headers",
"media_plugin_headers",
],
export_header_lib_headers: [
+ "libgui_headers",
"libstagefright_headers",
"media_plugin_headers",
],
@@ -18,13 +20,13 @@
vndk: {
enabled: true,
},
- srcs: ["AudioParameter.cpp", "TypeConverter.cpp"],
+ srcs: ["AudioParameter.cpp", "TypeConverter.cpp", "TimeCheck.cpp"],
cflags: [
"-Werror",
"-Wno-error=deprecated-declarations",
"-Wall",
],
- shared_libs: ["libutils", "liblog", "libgui"],
+ shared_libs: ["libutils", "liblog"],
header_libs: [
"libmedia_headers",
"libaudioclient_headers",
@@ -33,22 +35,22 @@
clang: true,
}
-// TODO(b/35449087): merge back with libmedia when OMX implementatoins
-// no longer use aidl wrappers (or remove OMX component form libmedia)
-cc_defaults {
- name: "libmedia_omx_defaults",
+cc_library_shared {
+ name: "libmedia_omx",
+ vendor_available: true,
+ vndk: {
+ enabled: true,
+ },
+ double_loadable: true,
srcs: [
"aidl/android/IGraphicBufferSource.aidl",
"aidl/android/IOMXBufferSource.aidl",
"IMediaCodecList.cpp",
- "IMediaCodecService.cpp",
"IOMX.cpp",
- "IOMXStore.cpp",
"MediaCodecBuffer.cpp",
"MediaCodecInfo.cpp",
- "MediaDefs.cpp",
"OMXBuffer.cpp",
"omx/1.0/WGraphicBufferSource.cpp",
"omx/1.0/WOmx.cpp",
@@ -63,18 +65,13 @@
},
shared_libs: [
- "android.hidl.memory@1.0",
"android.hidl.token@1.0-utils",
"android.hardware.media.omx@1.0",
- "android.hardware.media@1.0",
- "libbase",
"libbinder",
"libcutils",
"libgui",
"libhidlbase",
- "libhidlmemory",
"libhidltransport",
- "libhwbinder",
"liblog",
"libstagefright_foundation",
"libui",
@@ -82,11 +79,8 @@
],
export_shared_lib_headers: [
- "android.hidl.memory@1.0",
"android.hidl.token@1.0-utils",
"android.hardware.media.omx@1.0",
- "android.hardware.media@1.0",
- "libhidlmemory",
"libstagefright_foundation",
"libui",
],
@@ -121,24 +115,46 @@
},
}
-cc_library_shared {
- name: "libmedia_omx",
- vendor_available: true,
- vndk: {
- enabled: true,
- },
- double_loadable: true,
+cc_library_static {
+ name: "libmedia_midiiowrapper",
- defaults: ["libmedia_omx_defaults"],
+ srcs: ["MidiIoWrapper.cpp"],
+
+ static_libs: [
+ "libsonivox",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wno-error=deprecated-declarations",
+ "-Wall",
+ ],
+
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
+ },
}
-cc_library_shared {
+filegroup {
+ name: "mediaupdateservice_aidl",
+ srcs: [
+ "aidl/android/media/IMediaExtractorUpdateService.aidl",
+ ],
+}
+
+cc_library {
name: "libmedia",
- defaults: ["libmedia_omx_defaults"],
srcs: [
+ ":mediaupdateservice_aidl",
"IDataSource.cpp",
- "IHDCP.cpp",
"BufferingSettings.cpp",
"mediaplayer.cpp",
"IMediaHTTPConnection.cpp",
@@ -162,7 +178,6 @@
"IMediaMetadataRetriever.cpp",
"mediametadataretriever.cpp",
"MidiDeviceInfo.cpp",
- "MidiIoWrapper.cpp",
"JetPlayer.cpp",
"MediaScanner.cpp",
"MediaScannerClient.cpp",
@@ -175,8 +190,21 @@
"StringArray.cpp",
],
+ aidl: {
+ local_include_dirs: ["aidl"],
+ export_aidl_headers: true,
+ },
+
+ header_libs: [
+ "libstagefright_headers",
+ ],
+
+ export_header_lib_headers: [
+ "libstagefright_headers",
+ ],
+
shared_libs: [
- "libui",
+ "android.hidl.token@1.0-utils",
"liblog",
"libcutils",
"libutils",
@@ -187,36 +215,92 @@
"libexpat",
"libcamera_client",
"libstagefright_foundation",
+ "libmediaextractor",
"libgui",
"libdl",
"libaudioutils",
"libaudioclient",
- "libmedia_helper",
- "libmediadrm",
- "libmediametrics",
- "libbase",
- "libhidlbase",
- "libhidltransport",
- "libhwbinder",
- "libhidlmemory",
- "android.hidl.memory@1.0",
- "android.hardware.graphics.common@1.0",
- "android.hardware.graphics.bufferqueue@1.0",
+ "libmedia_omx",
],
export_shared_lib_headers: [
+ "libaudioclient",
"libbinder",
"libicuuc",
"libicui18n",
"libsonivox",
- "libmediadrm",
- "libmedia_helper",
- "android.hidl.memory@1.0",
+ "libmedia_omx",
],
- // for memory heap analysis
static_libs: [
- "libc_malloc_debug_backtrace",
+ "libc_malloc_debug_backtrace", // for memory heap analysis
+ "libmedia_midiiowrapper",
+ ],
+
+ export_include_dirs: [
+ "include",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wno-error=deprecated-declarations",
+ "-Wall",
+ ],
+
+ version_script: "exports.lds",
+
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
+ },
+}
+
+cc_library {
+ name: "libmedia_player2_util",
+
+ srcs: [
+ "BufferingSettings.cpp",
+ "DataSourceDesc.cpp",
+ "IDataSource.cpp",
+ "IMediaExtractor.cpp",
+ "IMediaExtractorService.cpp",
+ "IMediaSource.cpp",
+ "MediaCodecBuffer.cpp",
+ "MediaUtils.cpp",
+ "Metadata.cpp",
+ "NdkWrapper.cpp",
+ ],
+
+ shared_libs: [
+ "libbinder",
+ "libcutils",
+ "liblog",
+ "libmediaextractor",
+ "libmediandk",
+ "libnativewindow",
+ "libstagefright_foundation",
+ "libui",
+ "libutils",
+ ],
+
+ export_shared_lib_headers: [
+ "libbinder",
+ "libmediandk",
+ ],
+
+ header_libs: [
+ "media_plugin_headers",
+ ],
+
+ static_libs: [
+ "libstagefright_rtsp",
+ "libstagefright_timedtext",
],
export_include_dirs: [
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
index 65fc70b..034f7c2 100644
--- a/media/libmedia/AudioParameter.cpp
+++ b/media/libmedia/AudioParameter.cpp
@@ -34,6 +34,8 @@
const char * const AudioParameter::keyScreenState = AUDIO_PARAMETER_KEY_SCREEN_STATE;
const char * const AudioParameter::keyBtNrec = AUDIO_PARAMETER_KEY_BT_NREC;
const char * const AudioParameter::keyHwAvSync = AUDIO_PARAMETER_HW_AV_SYNC;
+const char * const AudioParameter::keyPresentationId = AUDIO_PARAMETER_STREAM_PRESENTATION_ID;
+const char * const AudioParameter::keyProgramId = AUDIO_PARAMETER_STREAM_PROGRAM_ID;
const char * const AudioParameter::keyMonoOutput = AUDIO_PARAMETER_MONO_OUTPUT;
const char * const AudioParameter::keyStreamHwAvSync = AUDIO_PARAMETER_STREAM_HW_AV_SYNC;
const char * const AudioParameter::keyStreamConnect = AUDIO_PARAMETER_DEVICE_CONNECT;
@@ -45,6 +47,8 @@
const char * const AudioParameter::valueOn = AUDIO_PARAMETER_VALUE_ON;
const char * const AudioParameter::valueOff = AUDIO_PARAMETER_VALUE_OFF;
const char * const AudioParameter::valueListSeparator = AUDIO_PARAMETER_VALUE_LIST_SEPARATOR;
+const char * const AudioParameter::keyReconfigA2dp = AUDIO_PARAMETER_RECONFIG_A2DP;
+const char * const AudioParameter::keyReconfigA2dpSupported = AUDIO_PARAMETER_A2DP_RECONFIG_SUPPORTED;
AudioParameter::AudioParameter(const String8& keyValuePairs)
{
diff --git a/media/libmedia/BufferingSettings.cpp b/media/libmedia/BufferingSettings.cpp
index a69497e..271a238 100644
--- a/media/libmedia/BufferingSettings.cpp
+++ b/media/libmedia/BufferingSettings.cpp
@@ -23,43 +23,16 @@
namespace android {
-// static
-bool BufferingSettings::IsValidBufferingMode(int mode) {
- return (mode >= BUFFERING_MODE_NONE && mode < BUFFERING_MODE_COUNT);
-}
-
-// static
-bool BufferingSettings::IsTimeBasedBufferingMode(int mode) {
- return (mode == BUFFERING_MODE_TIME_ONLY || mode == BUFFERING_MODE_TIME_THEN_SIZE);
-}
-
-// static
-bool BufferingSettings::IsSizeBasedBufferingMode(int mode) {
- return (mode == BUFFERING_MODE_SIZE_ONLY || mode == BUFFERING_MODE_TIME_THEN_SIZE);
-}
-
BufferingSettings::BufferingSettings()
- : mInitialBufferingMode(BUFFERING_MODE_NONE),
- mRebufferingMode(BUFFERING_MODE_NONE),
- mInitialWatermarkMs(kNoWatermark),
- mInitialWatermarkKB(kNoWatermark),
- mRebufferingWatermarkLowMs(kNoWatermark),
- mRebufferingWatermarkHighMs(kNoWatermark),
- mRebufferingWatermarkLowKB(kNoWatermark),
- mRebufferingWatermarkHighKB(kNoWatermark) { }
+ : mInitialMarkMs(kNoMark),
+ mResumePlaybackMarkMs(kNoMark) { }
status_t BufferingSettings::readFromParcel(const Parcel* parcel) {
if (parcel == nullptr) {
return BAD_VALUE;
}
- mInitialBufferingMode = (BufferingMode)parcel->readInt32();
- mRebufferingMode = (BufferingMode)parcel->readInt32();
- mInitialWatermarkMs = parcel->readInt32();
- mInitialWatermarkKB = parcel->readInt32();
- mRebufferingWatermarkLowMs = parcel->readInt32();
- mRebufferingWatermarkHighMs = parcel->readInt32();
- mRebufferingWatermarkLowKB = parcel->readInt32();
- mRebufferingWatermarkHighKB = parcel->readInt32();
+ mInitialMarkMs = parcel->readInt32();
+ mResumePlaybackMarkMs = parcel->readInt32();
return OK;
}
@@ -68,26 +41,17 @@
if (parcel == nullptr) {
return BAD_VALUE;
}
- parcel->writeInt32(mInitialBufferingMode);
- parcel->writeInt32(mRebufferingMode);
- parcel->writeInt32(mInitialWatermarkMs);
- parcel->writeInt32(mInitialWatermarkKB);
- parcel->writeInt32(mRebufferingWatermarkLowMs);
- parcel->writeInt32(mRebufferingWatermarkHighMs);
- parcel->writeInt32(mRebufferingWatermarkLowKB);
- parcel->writeInt32(mRebufferingWatermarkHighKB);
+ parcel->writeInt32(mInitialMarkMs);
+ parcel->writeInt32(mResumePlaybackMarkMs);
return OK;
}
String8 BufferingSettings::toString() const {
String8 s;
- s.appendFormat("initialMode(%d), rebufferingMode(%d), "
- "initialMarks(%d ms, %d KB), rebufferingMarks(%d, %d)ms, (%d, %d)KB",
- mInitialBufferingMode, mRebufferingMode,
- mInitialWatermarkMs, mInitialWatermarkKB,
- mRebufferingWatermarkLowMs, mRebufferingWatermarkHighMs,
- mRebufferingWatermarkLowKB, mRebufferingWatermarkHighKB);
+ s.appendFormat(
+ "initialMarks(%d ms), resumePlaybackMarks(%d ms)",
+ mInitialMarkMs, mResumePlaybackMarkMs);
return s;
}
diff --git a/media/libmedia/DataSourceDesc.cpp b/media/libmedia/DataSourceDesc.cpp
new file mode 100644
index 0000000..b7ccbce
--- /dev/null
+++ b/media/libmedia/DataSourceDesc.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DataSourceDesc"
+
+#include <media/DataSource.h>
+#include <media/DataSourceDesc.h>
+#include <media/MediaHTTPService.h>
+
+namespace android {
+
+static const int64_t kLongMax = 0x7ffffffffffffffL;
+
+DataSourceDesc::DataSourceDesc()
+ : mType(TYPE_NONE),
+ mFDOffset(0),
+ mFDLength(kLongMax),
+ mId(0),
+ mStartPositionMs(0),
+ mEndPositionMs(0) {
+}
+
+} // namespace android
diff --git a/media/libmedia/IHDCP.cpp b/media/libmedia/IHDCP.cpp
deleted file mode 100644
index a46017f..0000000
--- a/media/libmedia/IHDCP.cpp
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "IHDCP"
-#include <utils/Log.h>
-
-#include <binder/Parcel.h>
-#include <media/IHDCP.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/foundation/ADebug.h>
-
-namespace android {
-
-enum {
- OBSERVER_NOTIFY = IBinder::FIRST_CALL_TRANSACTION,
- HDCP_SET_OBSERVER,
- HDCP_INIT_ASYNC,
- HDCP_SHUTDOWN_ASYNC,
- HDCP_GET_CAPS,
- HDCP_ENCRYPT,
- HDCP_ENCRYPT_NATIVE,
- HDCP_DECRYPT,
-};
-
-struct BpHDCPObserver : public BpInterface<IHDCPObserver> {
- explicit BpHDCPObserver(const sp<IBinder> &impl)
- : BpInterface<IHDCPObserver>(impl) {
- }
-
- virtual void notify(
- int msg, int ext1, int ext2, const Parcel *obj) {
- Parcel data, reply;
- data.writeInterfaceToken(IHDCPObserver::getInterfaceDescriptor());
- data.writeInt32(msg);
- data.writeInt32(ext1);
- data.writeInt32(ext2);
- if (obj && obj->dataSize() > 0) {
- data.appendFrom(const_cast<Parcel *>(obj), 0, obj->dataSize());
- }
- remote()->transact(OBSERVER_NOTIFY, data, &reply, IBinder::FLAG_ONEWAY);
- }
-};
-
-IMPLEMENT_META_INTERFACE(HDCPObserver, "android.hardware.IHDCPObserver");
-
-struct BpHDCP : public BpInterface<IHDCP> {
- explicit BpHDCP(const sp<IBinder> &impl)
- : BpInterface<IHDCP>(impl) {
- }
-
- virtual status_t setObserver(const sp<IHDCPObserver> &observer) {
- Parcel data, reply;
- data.writeInterfaceToken(IHDCP::getInterfaceDescriptor());
- data.writeStrongBinder(IInterface::asBinder(observer));
- remote()->transact(HDCP_SET_OBSERVER, data, &reply);
- return reply.readInt32();
- }
-
- virtual status_t initAsync(const char *host, unsigned port) {
- Parcel data, reply;
- data.writeInterfaceToken(IHDCP::getInterfaceDescriptor());
- data.writeCString(host);
- data.writeInt32(port);
- remote()->transact(HDCP_INIT_ASYNC, data, &reply);
- return reply.readInt32();
- }
-
- virtual status_t shutdownAsync() {
- Parcel data, reply;
- data.writeInterfaceToken(IHDCP::getInterfaceDescriptor());
- remote()->transact(HDCP_SHUTDOWN_ASYNC, data, &reply);
- return reply.readInt32();
- }
-
- virtual uint32_t getCaps() {
- Parcel data, reply;
- data.writeInterfaceToken(IHDCP::getInterfaceDescriptor());
- remote()->transact(HDCP_GET_CAPS, data, &reply);
- return reply.readInt32();
- }
-
- virtual status_t encrypt(
- const void *inData, size_t size, uint32_t streamCTR,
- uint64_t *outInputCTR, void *outData) {
- Parcel data, reply;
- data.writeInterfaceToken(IHDCP::getInterfaceDescriptor());
- data.writeInt32(size);
- data.write(inData, size);
- data.writeInt32(streamCTR);
- remote()->transact(HDCP_ENCRYPT, data, &reply);
-
- status_t err = reply.readInt32();
-
- if (err != OK) {
- *outInputCTR = 0;
-
- return err;
- }
-
- *outInputCTR = reply.readInt64();
- reply.read(outData, size);
-
- return err;
- }
-
- virtual status_t encryptNative(
- const sp<GraphicBuffer> &graphicBuffer,
- size_t offset, size_t size, uint32_t streamCTR,
- uint64_t *outInputCTR, void *outData) {
- Parcel data, reply;
- data.writeInterfaceToken(IHDCP::getInterfaceDescriptor());
- data.write(*graphicBuffer);
- data.writeInt32(offset);
- data.writeInt32(size);
- data.writeInt32(streamCTR);
- remote()->transact(HDCP_ENCRYPT_NATIVE, data, &reply);
-
- status_t err = reply.readInt32();
-
- if (err != OK) {
- *outInputCTR = 0;
- return err;
- }
-
- *outInputCTR = reply.readInt64();
- reply.read(outData, size);
-
- return err;
- }
-
- virtual status_t decrypt(
- const void *inData, size_t size,
- uint32_t streamCTR, uint64_t inputCTR,
- void *outData) {
- Parcel data, reply;
- data.writeInterfaceToken(IHDCP::getInterfaceDescriptor());
- data.writeInt32(size);
- data.write(inData, size);
- data.writeInt32(streamCTR);
- data.writeInt64(inputCTR);
- remote()->transact(HDCP_DECRYPT, data, &reply);
-
- status_t err = reply.readInt32();
-
- if (err != OK) {
- return err;
- }
-
- reply.read(outData, size);
-
- return err;
- }
-};
-
-IMPLEMENT_META_INTERFACE(HDCP, "android.hardware.IHDCP");
-
-status_t BnHDCPObserver::onTransact(
- uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags) {
- switch (code) {
- case OBSERVER_NOTIFY:
- {
- CHECK_INTERFACE(IHDCPObserver, data, reply);
-
- int msg = data.readInt32();
- int ext1 = data.readInt32();
- int ext2 = data.readInt32();
-
- Parcel obj;
- if (data.dataAvail() > 0) {
- obj.appendFrom(
- const_cast<Parcel *>(&data),
- data.dataPosition(),
- data.dataAvail());
- }
-
- notify(msg, ext1, ext2, &obj);
-
- return OK;
- }
-
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-status_t BnHDCP::onTransact(
- uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags) {
- switch (code) {
- case HDCP_SET_OBSERVER:
- {
- CHECK_INTERFACE(IHDCP, data, reply);
-
- sp<IHDCPObserver> observer =
- interface_cast<IHDCPObserver>(data.readStrongBinder());
-
- reply->writeInt32(setObserver(observer));
- return OK;
- }
-
- case HDCP_INIT_ASYNC:
- {
- CHECK_INTERFACE(IHDCP, data, reply);
-
- const char *host = data.readCString();
- unsigned port = data.readInt32();
-
- reply->writeInt32(initAsync(host, port));
- return OK;
- }
-
- case HDCP_SHUTDOWN_ASYNC:
- {
- CHECK_INTERFACE(IHDCP, data, reply);
-
- reply->writeInt32(shutdownAsync());
- return OK;
- }
-
- case HDCP_GET_CAPS:
- {
- CHECK_INTERFACE(IHDCP, data, reply);
-
- reply->writeInt32(getCaps());
- return OK;
- }
-
- case HDCP_ENCRYPT:
- {
- CHECK_INTERFACE(IHDCP, data, reply);
-
- size_t size = data.readInt32();
- void *inData = NULL;
- // watch out for overflow
- if (size <= SIZE_MAX / 2) {
- inData = malloc(2 * size);
- }
- if (inData == NULL) {
- reply->writeInt32(ERROR_OUT_OF_RANGE);
- return OK;
- }
-
- void *outData = (uint8_t *)inData + size;
-
- status_t err = data.read(inData, size);
- if (err != OK) {
- free(inData);
- reply->writeInt32(err);
- return OK;
- }
-
- uint32_t streamCTR = data.readInt32();
- uint64_t inputCTR;
- err = encrypt(inData, size, streamCTR, &inputCTR, outData);
-
- reply->writeInt32(err);
-
- if (err == OK) {
- reply->writeInt64(inputCTR);
- reply->write(outData, size);
- }
-
- free(inData);
- inData = outData = NULL;
-
- return OK;
- }
-
- case HDCP_ENCRYPT_NATIVE:
- {
- CHECK_INTERFACE(IHDCP, data, reply);
-
- sp<GraphicBuffer> graphicBuffer = new GraphicBuffer();
- data.read(*graphicBuffer);
- size_t offset = data.readInt32();
- size_t size = data.readInt32();
- uint32_t streamCTR = data.readInt32();
- void *outData = NULL;
- uint64_t inputCTR;
-
- status_t err = ERROR_OUT_OF_RANGE;
-
- outData = malloc(size);
-
- if (outData != NULL) {
- err = encryptNative(graphicBuffer, offset, size,
- streamCTR, &inputCTR, outData);
- }
-
- reply->writeInt32(err);
-
- if (err == OK) {
- reply->writeInt64(inputCTR);
- reply->write(outData, size);
- }
-
- free(outData);
- outData = NULL;
-
- return OK;
- }
-
- case HDCP_DECRYPT:
- {
- CHECK_INTERFACE(IHDCP, data, reply);
-
- size_t size = data.readInt32();
- size_t bufSize = 2 * size;
-
- // watch out for overflow
- void *inData = NULL;
- if (bufSize > size) {
- inData = malloc(bufSize);
- }
-
- if (inData == NULL) {
- reply->writeInt32(ERROR_OUT_OF_RANGE);
- return OK;
- }
-
- void *outData = (uint8_t *)inData + size;
-
- data.read(inData, size);
-
- uint32_t streamCTR = data.readInt32();
- uint64_t inputCTR = data.readInt64();
- status_t err = decrypt(inData, size, streamCTR, inputCTR, outData);
-
- reply->writeInt32(err);
-
- if (err == OK) {
- reply->write(outData, size);
- }
-
- free(inData);
- inData = outData = NULL;
-
- return OK;
- }
-
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-} // namespace android
diff --git a/media/libmedia/IMediaCodecService.cpp b/media/libmedia/IMediaCodecService.cpp
deleted file mode 100644
index adfa93d..0000000
--- a/media/libmedia/IMediaCodecService.cpp
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
-**
-** Copyright 2015, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#define LOG_TAG "IMediaCodecService"
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-#include <stdint.h>
-#include <sys/types.h>
-#include <binder/Parcel.h>
-#include <media/IMediaCodecService.h>
-
-namespace android {
-
-enum {
- GET_OMX = IBinder::FIRST_CALL_TRANSACTION,
- GET_OMX_STORE
-};
-
-class BpMediaCodecService : public BpInterface<IMediaCodecService>
-{
-public:
- explicit BpMediaCodecService(const sp<IBinder>& impl)
- : BpInterface<IMediaCodecService>(impl)
- {
- }
-
- virtual sp<IOMX> getOMX() {
- Parcel data, reply;
- data.writeInterfaceToken(IMediaCodecService::getInterfaceDescriptor());
- remote()->transact(GET_OMX, data, &reply);
- return interface_cast<IOMX>(reply.readStrongBinder());
- }
-
- virtual sp<IOMXStore> getOMXStore() {
- Parcel data, reply;
- data.writeInterfaceToken(IMediaCodecService::getInterfaceDescriptor());
- remote()->transact(GET_OMX_STORE, data, &reply);
- return interface_cast<IOMXStore>(reply.readStrongBinder());
- }
-
-};
-
-IMPLEMENT_META_INTERFACE(MediaCodecService, "android.media.IMediaCodecService");
-
-// ----------------------------------------------------------------------
-
-status_t BnMediaCodecService::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
- switch (code) {
-
- case GET_OMX: {
- CHECK_INTERFACE(IMediaCodecService, data, reply);
- sp<IOMX> omx = getOMX();
- reply->writeStrongBinder(IInterface::asBinder(omx));
- return NO_ERROR;
- }
- case GET_OMX_STORE: {
- CHECK_INTERFACE(IMediaCodecService, data, reply);
- sp<IOMXStore> omxStore = getOMXStore();
- reply->writeStrongBinder(IInterface::asBinder(omxStore));
- return NO_ERROR;
- }
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-} // namespace android
diff --git a/media/libmedia/IMediaExtractor.cpp b/media/libmedia/IMediaExtractor.cpp
index a8a7b82..e9a6230 100644
--- a/media/libmedia/IMediaExtractor.cpp
+++ b/media/libmedia/IMediaExtractor.cpp
@@ -35,12 +35,9 @@
GETTRACKMETADATA,
GETMETADATA,
FLAGS,
- GETDRMTRACKINFO,
SETMEDIACAS,
- SETUID,
NAME,
- GETMETRICS,
- RELEASE,
+ GETMETRICS
};
class BpMediaExtractor : public BpInterface<IMediaExtractor> {
@@ -113,11 +110,6 @@
return 0;
}
- virtual char* getDrmTrackInfo(size_t trackID __unused, int *len __unused) {
- ALOGV("getDrmTrackInfo NOT IMPLEMENTED");
- return NULL;
- }
-
virtual status_t setMediaCas(const HInterfaceToken &casToken) {
ALOGV("setMediaCas");
@@ -132,21 +124,10 @@
return reply.readInt32();
}
- virtual void setUID(uid_t uid __unused) {
- ALOGV("setUID NOT IMPLEMENTED");
- }
-
virtual const char * name() {
ALOGV("name NOT IMPLEMENTED");
return NULL;
}
-
- virtual void release() {
- ALOGV("release");
- Parcel data, reply;
- data.writeInterfaceToken(BpMediaExtractor::getInterfaceDescriptor());
- remote()->transact(RELEASE, data, &reply);
- }
};
IMPLEMENT_META_INTERFACE(MediaExtractor, "android.media.IMediaExtractor");
@@ -224,12 +205,6 @@
reply->writeInt32(setMediaCas(casToken));
return OK;
}
- case RELEASE: {
- ALOGV("release");
- CHECK_INTERFACE(IMediaExtractor, data, reply);
- release();
- return OK;
- }
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/IMediaExtractorService.cpp b/media/libmedia/IMediaExtractorService.cpp
index 7c0d08d..d7533ca 100644
--- a/media/libmedia/IMediaExtractorService.cpp
+++ b/media/libmedia/IMediaExtractorService.cpp
@@ -23,7 +23,7 @@
#include <sys/types.h>
#include <binder/Parcel.h>
#include <media/IMediaExtractorService.h>
-#include <media/stagefright/MediaExtractor.h>
+#include <media/MediaExtractor.h>
namespace android {
diff --git a/media/libmedia/IMediaHTTPService.cpp b/media/libmedia/IMediaHTTPService.cpp
index 062a07a..74d8ee8 100644
--- a/media/libmedia/IMediaHTTPService.cpp
+++ b/media/libmedia/IMediaHTTPService.cpp
@@ -34,7 +34,7 @@
: BpInterface<IMediaHTTPService>(impl) {
}
- virtual sp<IMediaHTTPConnection> makeHTTPConnection() {
+ virtual sp<MediaHTTPConnection> makeHTTPConnection() {
Parcel data, reply;
data.writeInterfaceToken(
IMediaHTTPService::getInterfaceDescriptor());
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index 5ea2e8b..590ba1a 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -68,6 +68,9 @@
SET_DATA_SOURCE_FD,
SET_DATA_SOURCE_CALLBACK,
GET_FRAME_AT_TIME,
+ GET_IMAGE_AT_INDEX,
+ GET_IMAGE_RECT_AT_INDEX,
+ GET_FRAME_AT_INDEX,
EXTRACT_ALBUM_ART,
EXTRACT_METADATA,
};
@@ -164,6 +167,80 @@
return interface_cast<IMemory>(reply.readStrongBinder());
}
+ sp<IMemory> getImageAtIndex(int index, int colorFormat, bool metaOnly, bool thumbnail)
+ {
+ ALOGV("getImageAtIndex: index %d, colorFormat(%d) metaOnly(%d) thumbnail(%d)",
+ index, colorFormat, metaOnly, thumbnail);
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
+ data.writeInt32(index);
+ data.writeInt32(colorFormat);
+ data.writeInt32(metaOnly);
+ data.writeInt32(thumbnail);
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ sendSchedPolicy(data);
+#endif
+ remote()->transact(GET_IMAGE_AT_INDEX, data, &reply);
+ status_t ret = reply.readInt32();
+ if (ret != NO_ERROR) {
+ return NULL;
+ }
+ return interface_cast<IMemory>(reply.readStrongBinder());
+ }
+
+ sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom)
+ {
+ ALOGV("getImageRectAtIndex: index %d, colorFormat(%d) rect {%d, %d, %d, %d}",
+ index, colorFormat, left, top, right, bottom);
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
+ data.writeInt32(index);
+ data.writeInt32(colorFormat);
+ data.writeInt32(left);
+ data.writeInt32(top);
+ data.writeInt32(right);
+ data.writeInt32(bottom);
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ sendSchedPolicy(data);
+#endif
+ remote()->transact(GET_IMAGE_RECT_AT_INDEX, data, &reply);
+ status_t ret = reply.readInt32();
+ if (ret != NO_ERROR) {
+ return NULL;
+ }
+ return interface_cast<IMemory>(reply.readStrongBinder());
+ }
+
+ status_t getFrameAtIndex(std::vector<sp<IMemory> > *frames,
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly)
+ {
+ ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d) metaOnly(%d)",
+ frameIndex, numFrames, colorFormat, metaOnly);
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
+ data.writeInt32(frameIndex);
+ data.writeInt32(numFrames);
+ data.writeInt32(colorFormat);
+ data.writeInt32(metaOnly);
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ sendSchedPolicy(data);
+#endif
+ remote()->transact(GET_FRAME_AT_INDEX, data, &reply);
+ status_t ret = reply.readInt32();
+ if (ret != NO_ERROR) {
+ return ret;
+ }
+ int retNumFrames = reply.readInt32();
+ if (retNumFrames < numFrames) {
+ numFrames = retNumFrames;
+ }
+ for (int i = 0; i < numFrames; i++) {
+ frames->push_back(interface_cast<IMemory>(reply.readStrongBinder()));
+ }
+ return OK;
+ }
+
sp<IMemory> extractAlbumArt()
{
Parcel data, reply;
@@ -300,6 +377,83 @@
#endif
return NO_ERROR;
} break;
+ case GET_IMAGE_AT_INDEX: {
+ CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
+ int index = data.readInt32();
+ int colorFormat = data.readInt32();
+ bool metaOnly = (data.readInt32() != 0);
+ bool thumbnail = (data.readInt32() != 0);
+ ALOGV("getImageAtIndex: index(%d), colorFormat(%d), metaOnly(%d), thumbnail(%d)",
+ index, colorFormat, metaOnly, thumbnail);
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ setSchedPolicy(data);
+#endif
+ sp<IMemory> bitmap = getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
+ if (bitmap != 0) { // Don't send NULL across the binder interface
+ reply->writeInt32(NO_ERROR);
+ reply->writeStrongBinder(IInterface::asBinder(bitmap));
+ } else {
+ reply->writeInt32(UNKNOWN_ERROR);
+ }
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ restoreSchedPolicy();
+#endif
+ return NO_ERROR;
+ } break;
+
+ case GET_IMAGE_RECT_AT_INDEX: {
+ CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
+ int index = data.readInt32();
+ int colorFormat = data.readInt32();
+ int left = data.readInt32();
+ int top = data.readInt32();
+ int right = data.readInt32();
+ int bottom = data.readInt32();
+ ALOGV("getImageRectAtIndex: index(%d), colorFormat(%d), rect {%d, %d, %d, %d}",
+ index, colorFormat, left, top, right, bottom);
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ setSchedPolicy(data);
+#endif
+ sp<IMemory> bitmap = getImageRectAtIndex(
+ index, colorFormat, left, top, right, bottom);
+ if (bitmap != 0) { // Don't send NULL across the binder interface
+ reply->writeInt32(NO_ERROR);
+ reply->writeStrongBinder(IInterface::asBinder(bitmap));
+ } else {
+ reply->writeInt32(UNKNOWN_ERROR);
+ }
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ restoreSchedPolicy();
+#endif
+ return NO_ERROR;
+ } break;
+
+ case GET_FRAME_AT_INDEX: {
+ CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
+ int frameIndex = data.readInt32();
+ int numFrames = data.readInt32();
+ int colorFormat = data.readInt32();
+ bool metaOnly = (data.readInt32() != 0);
+ ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d), metaOnly(%d)",
+ frameIndex, numFrames, colorFormat, metaOnly);
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ setSchedPolicy(data);
+#endif
+ std::vector<sp<IMemory> > frames;
+ status_t err = getFrameAtIndex(
+ &frames, frameIndex, numFrames, colorFormat, metaOnly);
+ reply->writeInt32(err);
+ if (OK == err) {
+ reply->writeInt32(frames.size());
+ for (size_t i = 0; i < frames.size(); i++) {
+ reply->writeStrongBinder(IInterface::asBinder(frames[i]));
+ }
+ }
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ restoreSchedPolicy();
+#endif
+ return NO_ERROR;
+ } break;
case EXTRACT_ALBUM_ART: {
CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
#ifndef DISABLE_GROUP_SCHEDULE_HACK
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index 3996227..e2eccdd 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -35,6 +35,8 @@
namespace android {
+using media::VolumeShaper;
+
enum {
DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
SET_DATA_SOURCE_URL,
@@ -42,7 +44,7 @@
SET_DATA_SOURCE_STREAM,
SET_DATA_SOURCE_CALLBACK,
SET_BUFFERING_SETTINGS,
- GET_DEFAULT_BUFFERING_SETTINGS,
+ GET_BUFFERING_SETTINGS,
PREPARE_ASYNC,
START,
STOP,
@@ -56,6 +58,7 @@
GET_CURRENT_POSITION,
GET_DURATION,
RESET,
+ NOTIFY_AT,
SET_AUDIO_STREAM_TYPE,
SET_LOOPING,
SET_VOLUME,
@@ -75,6 +78,10 @@
// Modular DRM
PREPARE_DRM,
RELEASE_DRM,
+ // AudioRouting
+ SET_OUTPUT_DEVICE,
+ GET_ROUTED_DEVICE_ID,
+ ENABLE_AUDIO_DEVICE_CALLBACK,
};
// ModDrm helpers
@@ -177,14 +184,14 @@
return reply.readInt32();
}
- status_t getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */)
+ status_t getBufferingSettings(BufferingSettings* buffering /* nonnull */)
{
if (buffering == nullptr) {
return BAD_VALUE;
}
Parcel data, reply;
data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
- remote()->transact(GET_DEFAULT_BUFFERING_SETTINGS, data, &reply);
+ remote()->transact(GET_BUFFERING_SETTINGS, data, &reply);
status_t err = reply.readInt32();
if (err == OK) {
err = buffering->readFromParcel(&reply);
@@ -326,6 +333,15 @@
return reply.readInt32();
}
+ status_t notifyAt(int64_t mediaTimeUs)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+ data.writeInt64(mediaTimeUs);
+ remote()->transact(NOTIFY_AT, data, &reply);
+ return reply.readInt32();
+ }
+
status_t setAudioStreamType(audio_stream_type_t stream)
{
Parcel data, reply;
@@ -509,7 +525,7 @@
return nullptr;
}
sp<VolumeShaper::State> state = new VolumeShaper::State();
- status = state->readFromParcel(reply);
+ status = state->readFromParcel(&reply);
if (status != NO_ERROR) {
return nullptr;
}
@@ -547,6 +563,59 @@
return reply.readInt32();
}
+
+ status_t setOutputDevice(audio_port_handle_t deviceId)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+
+ data.writeInt32(deviceId);
+
+ status_t status = remote()->transact(SET_OUTPUT_DEVICE, data, &reply);
+ if (status != OK) {
+ ALOGE("setOutputDevice: binder call failed: %d", status);
+ return status;
+ }
+
+ return reply.readInt32();
+ }
+
+ status_t getRoutedDeviceId(audio_port_handle_t* deviceId)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+
+ status_t status = remote()->transact(GET_ROUTED_DEVICE_ID, data, &reply);
+ if (status != OK) {
+ ALOGE("getRoutedDeviceid: binder call failed: %d", status);
+ *deviceId = AUDIO_PORT_HANDLE_NONE;
+ return status;
+ }
+
+ status = reply.readInt32();
+ if (status != NO_ERROR) {
+ *deviceId = AUDIO_PORT_HANDLE_NONE;
+ } else {
+ *deviceId = reply.readInt32();
+ }
+ return status;
+ }
+
+ status_t enableAudioDeviceCallback(bool enabled)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+
+ data.writeBool(enabled);
+
+ status_t status = remote()->transact(ENABLE_AUDIO_DEVICE_CALLBACK, data, &reply);
+ if (status != OK) {
+ ALOGE("enableAudioDeviceCallback: binder call failed: %d, %d", enabled, status);
+ return status;
+ }
+
+ return reply.readInt32();
+ }
};
IMPLEMENT_META_INTERFACE(MediaPlayer, "android.media.IMediaPlayer");
@@ -631,10 +700,10 @@
reply->writeInt32(setBufferingSettings(buffering));
return NO_ERROR;
} break;
- case GET_DEFAULT_BUFFERING_SETTINGS: {
+ case GET_BUFFERING_SETTINGS: {
CHECK_INTERFACE(IMediaPlayer, data, reply);
BufferingSettings buffering;
- status_t err = getDefaultBufferingSettings(&buffering);
+ status_t err = getBufferingSettings(&buffering);
reply->writeInt32(err);
if (err == OK) {
buffering.writeToParcel(reply);
@@ -744,6 +813,11 @@
reply->writeInt32(reset());
return NO_ERROR;
} break;
+ case NOTIFY_AT: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ reply->writeInt32(notifyAt(data.readInt64()));
+ return NO_ERROR;
+ } break;
case SET_AUDIO_STREAM_TYPE: {
CHECK_INTERFACE(IMediaPlayer, data, reply);
reply->writeInt32(setAudioStreamType((audio_stream_type_t) data.readInt32()));
@@ -851,14 +925,14 @@
status_t status = data.readInt32(&present);
if (status == NO_ERROR && present != 0) {
configuration = new VolumeShaper::Configuration();
- status = configuration->readFromParcel(data);
+ status = configuration->readFromParcel(&data);
}
if (status == NO_ERROR) {
status = data.readInt32(&present);
}
if (status == NO_ERROR && present != 0) {
operation = new VolumeShaper::Operation();
- status = operation->readFromParcel(data);
+ status = operation->readFromParcel(&data);
}
if (status == NO_ERROR) {
status = (status_t)applyVolumeShaper(configuration, operation);
@@ -899,6 +973,41 @@
reply->writeInt32(result);
return OK;
}
+
+ // AudioRouting
+ case SET_OUTPUT_DEVICE: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ int deviceId;
+ status_t status = data.readInt32(&deviceId);
+ if (status == NO_ERROR) {
+ reply->writeInt32(setOutputDevice(deviceId));
+ } else {
+ reply->writeInt32(BAD_VALUE);
+ }
+ return NO_ERROR;
+ }
+ case GET_ROUTED_DEVICE_ID: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ audio_port_handle_t deviceId;
+ status_t ret = getRoutedDeviceId(&deviceId);
+ reply->writeInt32(ret);
+ if (ret == NO_ERROR) {
+ reply->writeInt32(deviceId);
+ }
+ return NO_ERROR;
+ } break;
+ case ENABLE_AUDIO_DEVICE_CALLBACK: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ bool enabled;
+ status_t status = data.readBool(&enabled);
+ if (status == NO_ERROR) {
+ reply->writeInt32(enableAudioDeviceCallback(enabled));
+ } else {
+ reply->writeInt32(BAD_VALUE);
+ }
+ return NO_ERROR;
+ } break;
+
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp
index a01852c..aca7ad9 100644
--- a/media/libmedia/IMediaPlayerService.cpp
+++ b/media/libmedia/IMediaPlayerService.cpp
@@ -20,7 +20,6 @@
#include <binder/Parcel.h>
#include <binder/IMemory.h>
-#include <media/IHDCP.h>
#include <media/IMediaCodecList.h>
#include <media/IMediaHTTPService.h>
#include <media/IMediaPlayerService.h>
@@ -39,8 +38,6 @@
CREATE = IBinder::FIRST_CALL_TRANSACTION,
CREATE_MEDIA_RECORDER,
CREATE_METADATA_RETRIEVER,
- GET_OMX,
- MAKE_HDCP,
ADD_BATTERY_DATA,
PULL_BATTERY_DATA,
LISTEN_FOR_REMOTE_DISPLAY,
@@ -83,21 +80,6 @@
return interface_cast<IMediaRecorder>(reply.readStrongBinder());
}
- virtual sp<IOMX> getOMX() {
- Parcel data, reply;
- data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
- remote()->transact(GET_OMX, data, &reply);
- return interface_cast<IOMX>(reply.readStrongBinder());
- }
-
- virtual sp<IHDCP> makeHDCP(bool createEncryptionModule) {
- Parcel data, reply;
- data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
- data.writeInt32(createEncryptionModule);
- remote()->transact(MAKE_HDCP, data, &reply);
- return interface_cast<IHDCP>(reply.readStrongBinder());
- }
-
virtual void addBatteryData(uint32_t params) {
Parcel data, reply;
data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
@@ -161,19 +143,6 @@
reply->writeStrongBinder(IInterface::asBinder(retriever));
return NO_ERROR;
} break;
- case GET_OMX: {
- CHECK_INTERFACE(IMediaPlayerService, data, reply);
- sp<IOMX> omx = getOMX();
- reply->writeStrongBinder(IInterface::asBinder(omx));
- return NO_ERROR;
- } break;
- case MAKE_HDCP: {
- CHECK_INTERFACE(IMediaPlayerService, data, reply);
- bool createEncryptionModule = data.readInt32();
- sp<IHDCP> hdcp = makeHDCP(createEncryptionModule);
- reply->writeStrongBinder(IInterface::asBinder(hdcp));
- return NO_ERROR;
- } break;
case ADD_BATTERY_DATA: {
CHECK_INTERFACE(IMediaPlayerService, data, reply);
uint32_t params = data.readInt32();
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index 5282352..b2c91c4 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -61,6 +61,10 @@
PAUSE,
RESUME,
GET_METRICS,
+ SET_INPUT_DEVICE,
+ GET_ROUTED_DEVICE_ID,
+ ENABLE_AUDIO_DEVICE_CALLBACK,
+ GET_ACTIVE_MICROPHONES,
};
@@ -337,6 +341,72 @@
remote()->transact(RELEASE, data, &reply);
return reply.readInt32();
}
+
+ status_t setInputDevice(audio_port_handle_t deviceId)
+ {
+ ALOGV("setInputDevice");
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+ data.writeInt32(deviceId);
+
+ status_t status = remote()->transact(SET_INPUT_DEVICE, data, &reply);
+ if (status != OK) {
+ ALOGE("setInputDevice binder call failed: %d", status);
+ return status;
+ }
+ return reply.readInt32();;
+ }
+
+ audio_port_handle_t getRoutedDeviceId(audio_port_handle_t *deviceId)
+ {
+ ALOGV("getRoutedDeviceId");
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+
+ status_t status = remote()->transact(GET_ROUTED_DEVICE_ID, data, &reply);
+ if (status != OK) {
+ ALOGE("getRoutedDeviceid binder call failed: %d", status);
+ *deviceId = AUDIO_PORT_HANDLE_NONE;
+ return status;
+ }
+
+ status = reply.readInt32();
+ if (status != NO_ERROR) {
+ *deviceId = AUDIO_PORT_HANDLE_NONE;
+ } else {
+ *deviceId = reply.readInt32();
+ }
+ return status;
+ }
+
+ status_t enableAudioDeviceCallback(bool enabled)
+ {
+ ALOGV("enableAudioDeviceCallback");
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+ data.writeBool(enabled);
+ status_t status = remote()->transact(ENABLE_AUDIO_DEVICE_CALLBACK, data, &reply);
+ if (status != OK) {
+ ALOGE("enableAudioDeviceCallback binder call failed: %d, %d", enabled, status);
+ return status;
+ }
+ return reply.readInt32();
+ }
+
+ status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones)
+ {
+ ALOGV("getActiveMicrophones");
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+ status_t status = remote()->transact(GET_ACTIVE_MICROPHONES, data, &reply);
+ if (status != OK
+ || (status = (status_t)reply.readInt32()) != NO_ERROR) {
+ return status;
+ }
+ status = reply.readParcelableVector(activeMicrophones);
+ return status;
+ }
+
};
IMPLEMENT_META_INTERFACE(MediaRecorder, "android.media.IMediaRecorder");
@@ -543,6 +613,54 @@
}
return NO_ERROR;
} break;
+ case SET_INPUT_DEVICE: {
+ ALOGV("SET_INPUT_DEVICE");
+ CHECK_INTERFACE(IMediaRecorder, data, reply);
+ audio_port_handle_t deviceId;
+ status_t status = data.readInt32(&deviceId);
+ if (status == NO_ERROR) {
+ reply->writeInt32(setInputDevice(deviceId));
+ } else {
+ reply->writeInt32(BAD_VALUE);
+ }
+ return NO_ERROR;
+ } break;
+ case GET_ROUTED_DEVICE_ID: {
+ ALOGV("GET_ROUTED_DEVICE_ID");
+ CHECK_INTERFACE(IMediaRecorder, data, reply);
+ audio_port_handle_t deviceId;
+ status_t status = getRoutedDeviceId(&deviceId);
+ reply->writeInt32(status);
+ if (status == NO_ERROR) {
+ reply->writeInt32(deviceId);
+ }
+ return NO_ERROR;
+ } break;
+ case ENABLE_AUDIO_DEVICE_CALLBACK: {
+ ALOGV("ENABLE_AUDIO_DEVICE_CALLBACK");
+ CHECK_INTERFACE(IMediaRecorder, data, reply);
+ bool enabled;
+ status_t status = data.readBool(&enabled);
+ if (status == NO_ERROR) {
+ reply->writeInt32(enableAudioDeviceCallback(enabled));
+ } else {
+ reply->writeInt32(BAD_VALUE);
+ }
+ return NO_ERROR;
+ } break;
+ case GET_ACTIVE_MICROPHONES: {
+ ALOGV("GET_ACTIVE_MICROPHONES");
+ CHECK_INTERFACE(IMediaRecorder, data, reply);
+ std::vector<media::MicrophoneInfo> activeMicrophones;
+ status_t status = getActiveMicrophones(&activeMicrophones);
+ reply->writeInt32(status);
+ if (status != NO_ERROR) {
+ return NO_ERROR;
+ }
+ reply->writeParcelableVector(activeMicrophones);
+ return NO_ERROR;
+
+ }
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/IMediaSource.cpp b/media/libmedia/IMediaSource.cpp
index 724b3a0..f185fd4 100644
--- a/media/libmedia/IMediaSource.cpp
+++ b/media/libmedia/IMediaSource.cpp
@@ -26,7 +26,7 @@
#include <media/IMediaSource.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaSource.h>
+#include <media/MediaSource.h>
#include <media/stagefright/MetaData.h>
namespace android {
@@ -113,8 +113,9 @@
return NULL;
}
- virtual status_t read(MediaBuffer **buffer, const ReadOptions *options) {
- Vector<MediaBuffer *> buffers;
+ virtual status_t read(MediaBufferBase **buffer,
+ const MediaSource::ReadOptions *options) {
+ Vector<MediaBufferBase *> buffers;
status_t ret = readMultiple(&buffers, 1 /* maxNumBuffers */, options);
*buffer = buffers.size() == 0 ? nullptr : buffers[0];
ALOGV("read status %d, bufferCount %u, sinceStop %u",
@@ -123,7 +124,8 @@
}
virtual status_t readMultiple(
- Vector<MediaBuffer *> *buffers, uint32_t maxNumBuffers, const ReadOptions *options) {
+ Vector<MediaBufferBase *> *buffers, uint32_t maxNumBuffers,
+ const MediaSource::ReadOptions *options) {
ALOGV("readMultiple");
if (buffers == NULL || !buffers->isEmpty()) {
return BAD_VALUE;
@@ -169,13 +171,13 @@
size_t length = reply.readInt32();
buf = new RemoteMediaBufferWrapper(mem);
buf->set_range(offset, length);
- buf->meta_data()->updateFromParcel(reply);
+ buf->meta_data().updateFromParcel(reply);
} else { // INLINE_BUFFER
int32_t len = reply.readInt32();
ALOGV("INLINE_BUFFER status %d and len %d", ret, len);
buf = new MediaBuffer(len);
reply.read(buf->data(), len);
- buf->meta_data()->updateFromParcel(reply);
+ buf->meta_data().updateFromParcel(reply);
}
buffers->push_back(buf);
++bufferCount;
@@ -210,11 +212,6 @@
return remote()->transact(PAUSE, data, &reply);
}
- virtual status_t setBuffers(const Vector<MediaBuffer *> & buffers __unused) {
- ALOGV("setBuffers NOT IMPLEMENTED");
- return ERROR_UNSUPPORTED; // default
- }
-
private:
uint32_t mBuffersSinceStop; // Buffer tracking variable
@@ -330,7 +327,7 @@
}
// Get read options, if any.
- ReadOptions opts;
+ MediaSource::ReadOptions opts;
uint32_t len;
const bool useOptions =
data.readUint32(&len) == NO_ERROR
@@ -344,7 +341,7 @@
uint32_t bufferCount = 0;
for (; bufferCount < maxNumBuffers; ++bufferCount, ++mBuffersSinceStop) {
MediaBuffer *buf = nullptr;
- ret = read(&buf, useOptions ? &opts : nullptr);
+ ret = read((MediaBufferBase **)&buf, useOptions ? &opts : nullptr);
opts.clearNonPersistent(); // Remove options that only apply to first buffer.
if (ret != NO_ERROR || buf == nullptr) {
break;
@@ -367,7 +364,7 @@
} else {
ALOGD("Large buffer %zu without IMemory!", length);
ret = mGroup->acquire_buffer(
- &transferBuf, false /* nonBlocking */, length);
+ (MediaBufferBase **)&transferBuf, false /* nonBlocking */, length);
if (ret != OK
|| transferBuf == nullptr
|| transferBuf->mMemory == nullptr) {
@@ -411,7 +408,7 @@
}
reply->writeInt32(offset);
reply->writeInt32(length);
- buf->meta_data()->writeToParcel(*reply);
+ buf->meta_data().writeToParcel(*reply);
transferBuf->addRemoteRefcount(1);
if (transferBuf != buf) {
transferBuf->release(); // release local ref
@@ -424,7 +421,7 @@
buf, buf->mMemory->size(), length);
reply->writeInt32(INLINE_BUFFER);
reply->writeByteArray(length, (uint8_t*)buf->data() + offset);
- buf->meta_data()->writeToParcel(*reply);
+ buf->meta_data().writeToParcel(*reply);
inlineTransferSize += length;
if (inlineTransferSize > kInlineMaxTransfer) {
maxNumBuffers = 0; // stop readMultiple if inline transfer is too large.
@@ -449,58 +446,5 @@
}
}
-////////////////////////////////////////////////////////////////////////////////
-
-IMediaSource::ReadOptions::ReadOptions() {
- reset();
-}
-
-void IMediaSource::ReadOptions::reset() {
- mOptions = 0;
- mSeekTimeUs = 0;
- mLatenessUs = 0;
- mNonBlocking = false;
-}
-
-void IMediaSource::ReadOptions::setNonBlocking() {
- mNonBlocking = true;
-}
-
-void IMediaSource::ReadOptions::clearNonBlocking() {
- mNonBlocking = false;
-}
-
-bool IMediaSource::ReadOptions::getNonBlocking() const {
- return mNonBlocking;
-}
-
-void IMediaSource::ReadOptions::setSeekTo(int64_t time_us, SeekMode mode) {
- mOptions |= kSeekTo_Option;
- mSeekTimeUs = time_us;
- mSeekMode = mode;
-}
-
-void IMediaSource::ReadOptions::clearSeekTo() {
- mOptions &= ~kSeekTo_Option;
- mSeekTimeUs = 0;
- mSeekMode = SEEK_CLOSEST_SYNC;
-}
-
-bool IMediaSource::ReadOptions::getSeekTo(
- int64_t *time_us, SeekMode *mode) const {
- *time_us = mSeekTimeUs;
- *mode = mSeekMode;
- return (mOptions & kSeekTo_Option) != 0;
-}
-
-void IMediaSource::ReadOptions::setLateBy(int64_t lateness_us) {
- mLatenessUs = lateness_us;
-}
-
-int64_t IMediaSource::ReadOptions::getLateBy() const {
- return mLatenessUs;
-}
-
-
} // namespace android
diff --git a/media/libmedia/IOMXStore.cpp b/media/libmedia/IOMXStore.cpp
deleted file mode 100644
index 4948f1a..0000000
--- a/media/libmedia/IOMXStore.cpp
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * Copyright (c) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "IOMXStore"
-
-#include <utils/Log.h>
-
-#include <media/IOMX.h>
-#include <media/IOMXStore.h>
-#include <android/hardware/media/omx/1.0/IOmxStore.h>
-
-#include <binder/IInterface.h>
-#include <binder/IBinder.h>
-#include <binder/Parcel.h>
-
-#include <vector>
-#include <string>
-
-namespace android {
-
-namespace {
-
-enum {
- CONNECT = IBinder::FIRST_CALL_TRANSACTION,
- LIST_SERVICE_ATTRIBUTES,
- GET_NODE_PREFIX,
- LIST_ROLES,
- GET_OMX,
-};
-
-// Forward declarations of std::vector<T> <-> Parcel conversion funcitons that
-// depend on writeToParcel() and readToParcel() for T <-> Parcel.
-
-template <typename T>
-status_t writeToParcel(const std::vector<T>& v, Parcel* p);
-
-template <typename T>
-status_t readFromParcel(std::vector<T>* v, const Parcel& p);
-
-// std::string <-> Parcel
-
-status_t writeToParcel(const std::string& s, Parcel* p) {
- if (s.size() > INT32_MAX) {
- return BAD_VALUE;
- }
- return p->writeByteArray(
- s.size(), reinterpret_cast<const uint8_t*>(s.c_str()));
-}
-
-status_t readFromParcel(std::string* s, const Parcel& p) {
- int32_t len;
- status_t status = p.readInt32(&len);
- if (status != NO_ERROR) {
- return status;
- } else if ((len < 0) || (static_cast<uint64_t>(len) > SIZE_MAX)) {
- return BAD_VALUE;
- }
- s->resize(len);
- if (len == 0) {
- return NO_ERROR;
- }
- return p.read(static_cast<void*>(&s->front()), static_cast<size_t>(len));
-}
-
-// IOMXStore::Attribute <-> Parcel
-
-status_t writeToParcel(const IOMXStore::Attribute& a, Parcel* p) {
- status_t status = writeToParcel(a.key, p);
- if (status != NO_ERROR) {
- return status;
- }
- return writeToParcel(a.value, p);
-}
-
-status_t readFromParcel(IOMXStore::Attribute* a, const Parcel& p) {
- status_t status = readFromParcel(&(a->key), p);
- if (status != NO_ERROR) {
- return status;
- }
- return readFromParcel(&(a->value), p);
-}
-
-// IOMXStore::NodeInfo <-> Parcel
-
-status_t writeToParcel(const IOMXStore::NodeInfo& n, Parcel* p) {
- status_t status = writeToParcel(n.name, p);
- if (status != NO_ERROR) {
- return status;
- }
- status = writeToParcel(n.owner, p);
- if (status != NO_ERROR) {
- return status;
- }
- return writeToParcel(n.attributes, p);
-}
-
-status_t readFromParcel(IOMXStore::NodeInfo* n, const Parcel& p) {
- status_t status = readFromParcel(&(n->name), p);
- if (status != NO_ERROR) {
- return status;
- }
- status = readFromParcel(&(n->owner), p);
- if (status != NO_ERROR) {
- return status;
- }
- return readFromParcel(&(n->attributes), p);
-}
-
-// IOMXStore::RoleInfo <-> Parcel
-
-status_t writeToParcel(const IOMXStore::RoleInfo& r, Parcel* p) {
- status_t status = writeToParcel(r.role, p);
- if (status != NO_ERROR) {
- return status;
- }
- status = writeToParcel(r.type, p);
- if (status != NO_ERROR) {
- return status;
- }
- status = p->writeBool(r.isEncoder);
- if (status != NO_ERROR) {
- return status;
- }
- status = p->writeBool(r.preferPlatformNodes);
- if (status != NO_ERROR) {
- return status;
- }
- return writeToParcel(r.nodes, p);
-}
-
-status_t readFromParcel(IOMXStore::RoleInfo* r, const Parcel& p) {
- status_t status = readFromParcel(&(r->role), p);
- if (status != NO_ERROR) {
- return status;
- }
- status = readFromParcel(&(r->type), p);
- if (status != NO_ERROR) {
- return status;
- }
- status = p.readBool(&(r->isEncoder));
- if (status != NO_ERROR) {
- return status;
- }
- status = p.readBool(&(r->preferPlatformNodes));
- if (status != NO_ERROR) {
- return status;
- }
- return readFromParcel(&(r->nodes), p);
-}
-
-// std::vector<NodeInfo> <-> Parcel
-// std::vector<RoleInfo> <-> Parcel
-
-template <typename T>
-status_t writeToParcel(const std::vector<T>& v, Parcel* p) {
- status_t status = p->writeVectorSize(v);
- if (status != NO_ERROR) {
- return status;
- }
- for (const T& x : v) {
- status = writeToParcel(x, p);
- if (status != NO_ERROR) {
- return status;
- }
- }
- return NO_ERROR;
-}
-
-template <typename T>
-status_t readFromParcel(std::vector<T>* v, const Parcel& p) {
- status_t status = p.resizeOutVector(v);
- if (status != NO_ERROR) {
- return status;
- }
- for (T& x : *v) {
- status = readFromParcel(&x, p);
- if (status != NO_ERROR) {
- return status;
- }
- }
- return NO_ERROR;
-}
-
-} // unnamed namespace
-
-////////////////////////////////////////////////////////////////////////////////
-
-class BpOMXStore : public BpInterface<IOMXStore> {
-public:
- explicit BpOMXStore(const sp<IBinder> &impl)
- : BpInterface<IOMXStore>(impl) {
- }
-
- status_t listServiceAttributes(
- std::vector<Attribute>* attributes) override {
- Parcel data, reply;
- status_t status;
- status = data.writeInterfaceToken(IOMXStore::getInterfaceDescriptor());
- if (status != NO_ERROR) {
- return status;
- }
- status = remote()->transact(LIST_SERVICE_ATTRIBUTES, data, &reply);
- if (status != NO_ERROR) {
- return status;
- }
- return readFromParcel(attributes, reply);
- }
-
- status_t getNodePrefix(std::string* prefix) override {
- Parcel data, reply;
- status_t status;
- status = data.writeInterfaceToken(IOMXStore::getInterfaceDescriptor());
- if (status != NO_ERROR) {
- return status;
- }
- status = remote()->transact(GET_NODE_PREFIX, data, &reply);
- if (status != NO_ERROR) {
- return status;
- }
- return readFromParcel(prefix, reply);
- }
-
- status_t listRoles(std::vector<RoleInfo>* roleList) override {
- Parcel data, reply;
- status_t status;
- status = data.writeInterfaceToken(IOMXStore::getInterfaceDescriptor());
- if (status != NO_ERROR) {
- return status;
- }
- status = remote()->transact(LIST_ROLES, data, &reply);
- if (status != NO_ERROR) {
- return status;
- }
- return readFromParcel(roleList, reply);
- }
-
- status_t getOmx(const std::string& name, sp<IOMX>* omx) override {
- Parcel data, reply;
- status_t status;
- status = data.writeInterfaceToken(IOMXStore::getInterfaceDescriptor());
- if (status != NO_ERROR) {
- return status;
- }
- status = writeToParcel(name, &data);
- if (status != NO_ERROR) {
- return status;
- }
- status = remote()->transact(GET_OMX, data, &reply);
- if (status != NO_ERROR) {
- return status;
- }
- return reply.readStrongBinder(omx);
- }
-
-};
-
-IMPLEMENT_META_INTERFACE(OMXStore, "android.hardware.IOMXStore");
-
-////////////////////////////////////////////////////////////////////////////////
-
-#define CHECK_OMX_INTERFACE(interface, data, reply) \
- do { if (!(data).enforceInterface(interface::getInterfaceDescriptor())) { \
- ALOGW("Call incorrectly routed to " #interface); \
- return PERMISSION_DENIED; \
- } } while (0)
-
-status_t BnOMXStore::onTransact(
- uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags) {
- switch (code) {
- case LIST_SERVICE_ATTRIBUTES: {
- CHECK_OMX_INTERFACE(IOMXStore, data, reply);
- status_t status;
- std::vector<Attribute> attributes;
-
- status = listServiceAttributes(&attributes);
- if (status != NO_ERROR) {
- ALOGE("listServiceAttributes() fails with status %d",
- static_cast<int>(status));
- return NO_ERROR;
- }
- status = writeToParcel(attributes, reply);
- if (status != NO_ERROR) {
- ALOGE("listServiceAttributes() fails to send reply");
- return NO_ERROR;
- }
- return NO_ERROR;
- }
- case GET_NODE_PREFIX: {
- CHECK_OMX_INTERFACE(IOMXStore, data, reply);
- status_t status;
- std::string prefix;
-
- status = getNodePrefix(&prefix);
- if (status != NO_ERROR) {
- ALOGE("getNodePrefix() fails with status %d",
- static_cast<int>(status));
- return NO_ERROR;
- }
- status = writeToParcel(prefix, reply);
- if (status != NO_ERROR) {
- ALOGE("getNodePrefix() fails to send reply");
- return NO_ERROR;
- }
- return NO_ERROR;
- }
- case LIST_ROLES: {
- CHECK_OMX_INTERFACE(IOMXStore, data, reply);
- status_t status;
- std::vector<RoleInfo> roleList;
-
- status = listRoles(&roleList);
- if (status != NO_ERROR) {
- ALOGE("listRoles() fails with status %d",
- static_cast<int>(status));
- return NO_ERROR;
- }
- status = writeToParcel(roleList, reply);
- if (status != NO_ERROR) {
- ALOGE("listRoles() fails to send reply");
- return NO_ERROR;
- }
- return NO_ERROR;
- }
- case GET_OMX: {
- CHECK_OMX_INTERFACE(IOMXStore, data, reply);
- status_t status;
- std::string name;
- sp<IOMX> omx;
-
- status = readFromParcel(&name, data);
- if (status != NO_ERROR) {
- ALOGE("getOmx() fails to retrieve name");
- return NO_ERROR;
- }
- status = getOmx(name, &omx);
- if (status != NO_ERROR) {
- ALOGE("getOmx() fails with status %d",
- static_cast<int>(status));
- return NO_ERROR;
- }
- status = reply->writeStrongBinder(IInterface::asBinder(omx));
- if (status != NO_ERROR) {
- ALOGE("getOmx() fails to send reply");
- return NO_ERROR;
- }
- return NO_ERROR;
- }
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-} // namespace android
diff --git a/media/libmedia/IStreamSource.cpp b/media/libmedia/IStreamSource.cpp
index ba0a272..e11bc74 100644
--- a/media/libmedia/IStreamSource.cpp
+++ b/media/libmedia/IStreamSource.cpp
@@ -20,24 +20,13 @@
#include <media/IStreamSource.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/MediaKeys.h>
#include <binder/IMemory.h>
#include <binder/Parcel.h>
namespace android {
-// static
-const char *const IStreamListener::kKeyResumeAtPTS = "resume-at-PTS";
-
-// static
-const char *const IStreamListener::kKeyDiscontinuityMask = "discontinuity-mask";
-
-// static
-const char *const IStreamListener::kKeyMediaTimeUs = "media-time-us";
-
-// static
-const char *const IStreamListener::kKeyRecentMediaTimeUs = "recent-media-time-us";
-
enum {
// IStreamSource
SET_LISTENER = IBinder::FIRST_CALL_TRANSACTION,
diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp
index 34deb59..0d3c1ba 100644
--- a/media/libmedia/JetPlayer.cpp
+++ b/media/libmedia/JetPlayer.cpp
@@ -36,6 +36,7 @@
mPaused(false),
mMaxTracks(maxTracks),
mEasData(NULL),
+ mIoWrapper(NULL),
mTrackBufferSize(trackBufferSize)
{
ALOGV("JetPlayer constructor");
@@ -50,7 +51,6 @@
{
ALOGV("~JetPlayer");
release();
-
}
//-------------------------------------------------------------------------------------------------
@@ -138,7 +138,8 @@
JET_Shutdown(mEasData);
EAS_Shutdown(mEasData);
}
- mIoWrapper.clear();
+ delete mIoWrapper;
+ mIoWrapper = NULL;
if (mAudioTrack != 0) {
mAudioTrack->stop();
mAudioTrack->flush();
@@ -329,6 +330,7 @@
Mutex::Autolock lock(mMutex);
+ delete mIoWrapper;
mIoWrapper = new MidiIoWrapper(path);
EAS_RESULT result = JET_OpenFile(mEasData, mIoWrapper->getLocator());
@@ -347,6 +349,7 @@
Mutex::Autolock lock(mMutex);
+ delete mIoWrapper;
mIoWrapper = new MidiIoWrapper(fd, offset, length);
EAS_RESULT result = JET_OpenFile(mEasData, mIoWrapper->getLocator());
diff --git a/media/libmedia/MediaCodecBuffer.cpp b/media/libmedia/MediaCodecBuffer.cpp
index 59d6164..68ae3ea 100644
--- a/media/libmedia/MediaCodecBuffer.cpp
+++ b/media/libmedia/MediaCodecBuffer.cpp
@@ -21,15 +21,13 @@
#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/MediaBufferBase.h>
namespace android {
MediaCodecBuffer::MediaCodecBuffer(const sp<AMessage> &format, const sp<ABuffer> &buffer)
: mMeta(new AMessage),
mFormat(format),
- mBuffer(buffer),
- mMediaBufferBase(nullptr) {
+ mBuffer(buffer) {
}
// ABuffer-like interface
@@ -58,20 +56,6 @@
return OK;
}
-MediaBufferBase *MediaCodecBuffer::getMediaBufferBase() {
- if (mMediaBufferBase != NULL) {
- mMediaBufferBase->add_ref();
- }
- return mMediaBufferBase;
-}
-
-void MediaCodecBuffer::setMediaBufferBase(MediaBufferBase *mediaBuffer) {
- if (mMediaBufferBase != NULL) {
- mMediaBufferBase->release();
- }
- mMediaBufferBase = mediaBuffer;
-}
-
sp<AMessage> MediaCodecBuffer::meta() {
return mMeta;
}
diff --git a/media/libmedia/MediaCodecInfo.cpp b/media/libmedia/MediaCodecInfo.cpp
index a570ffe..5308e1c 100644
--- a/media/libmedia/MediaCodecInfo.cpp
+++ b/media/libmedia/MediaCodecInfo.cpp
@@ -141,6 +141,10 @@
return mIsEncoder;
}
+uint32_t MediaCodecInfo::rank() const {
+ return mRank;
+}
+
void MediaCodecInfo::getSupportedMimes(Vector<AString> *mimes) const {
mimes->clear();
for (size_t ix = 0; ix < mCaps.size(); ix++) {
@@ -170,10 +174,12 @@
AString name = AString::FromParcel(parcel);
AString owner = AString::FromParcel(parcel);
bool isEncoder = static_cast<bool>(parcel.readInt32());
+ uint32_t rank = parcel.readUint32();
sp<MediaCodecInfo> info = new MediaCodecInfo;
info->mName = name;
info->mOwner = owner;
info->mIsEncoder = isEncoder;
+ info->mRank = rank;
size_t size = static_cast<size_t>(parcel.readInt32());
for (size_t i = 0; i < size; i++) {
AString mime = AString::FromParcel(parcel);
@@ -191,6 +197,7 @@
mName.writeToParcel(parcel);
mOwner.writeToParcel(parcel);
parcel->writeInt32(mIsEncoder);
+ parcel->writeUint32(mRank);
parcel->writeInt32(mCaps.size());
for (size_t i = 0; i < mCaps.size(); i++) {
mCaps.keyAt(i).writeToParcel(parcel);
@@ -210,7 +217,7 @@
return -1;
}
-MediaCodecInfo::MediaCodecInfo() {
+MediaCodecInfo::MediaCodecInfo() : mRank(0x100) {
}
void MediaCodecInfoWriter::setName(const char* name) {
@@ -225,6 +232,10 @@
mInfo->mIsEncoder = isEncoder;
}
+void MediaCodecInfoWriter::setRank(uint32_t rank) {
+ mInfo->mRank = rank;
+}
+
std::unique_ptr<MediaCodecInfo::CapabilitiesWriter>
MediaCodecInfoWriter::addMime(const char *mime) {
ssize_t ix = mInfo->getCapabilityIndex(mime);
diff --git a/media/libmedia/MediaDefs.cpp b/media/libmedia/MediaDefs.cpp
deleted file mode 100644
index 544a6ae..0000000
--- a/media/libmedia/MediaDefs.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <media/MediaDefs.h>
-
-namespace android {
-
-const char *MEDIA_MIMETYPE_IMAGE_JPEG = "image/jpeg";
-
-const char *MEDIA_MIMETYPE_VIDEO_VP8 = "video/x-vnd.on2.vp8";
-const char *MEDIA_MIMETYPE_VIDEO_VP9 = "video/x-vnd.on2.vp9";
-const char *MEDIA_MIMETYPE_VIDEO_AVC = "video/avc";
-const char *MEDIA_MIMETYPE_VIDEO_HEVC = "video/hevc";
-const char *MEDIA_MIMETYPE_VIDEO_MPEG4 = "video/mp4v-es";
-const char *MEDIA_MIMETYPE_VIDEO_H263 = "video/3gpp";
-const char *MEDIA_MIMETYPE_VIDEO_MPEG2 = "video/mpeg2";
-const char *MEDIA_MIMETYPE_VIDEO_RAW = "video/raw";
-const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION = "video/dolby-vision";
-const char *MEDIA_MIMETYPE_VIDEO_SCRAMBLED = "video/scrambled";
-
-const char *MEDIA_MIMETYPE_AUDIO_AMR_NB = "audio/3gpp";
-const char *MEDIA_MIMETYPE_AUDIO_AMR_WB = "audio/amr-wb";
-const char *MEDIA_MIMETYPE_AUDIO_MPEG = "audio/mpeg";
-const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I = "audio/mpeg-L1";
-const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II = "audio/mpeg-L2";
-const char *MEDIA_MIMETYPE_AUDIO_MIDI = "audio/midi";
-const char *MEDIA_MIMETYPE_AUDIO_AAC = "audio/mp4a-latm";
-const char *MEDIA_MIMETYPE_AUDIO_QCELP = "audio/qcelp";
-const char *MEDIA_MIMETYPE_AUDIO_VORBIS = "audio/vorbis";
-const char *MEDIA_MIMETYPE_AUDIO_OPUS = "audio/opus";
-const char *MEDIA_MIMETYPE_AUDIO_G711_ALAW = "audio/g711-alaw";
-const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW = "audio/g711-mlaw";
-const char *MEDIA_MIMETYPE_AUDIO_RAW = "audio/raw";
-const char *MEDIA_MIMETYPE_AUDIO_FLAC = "audio/flac";
-const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS = "audio/aac-adts";
-const char *MEDIA_MIMETYPE_AUDIO_MSGSM = "audio/gsm";
-const char *MEDIA_MIMETYPE_AUDIO_AC3 = "audio/ac3";
-const char *MEDIA_MIMETYPE_AUDIO_EAC3 = "audio/eac3";
-const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
-
-const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
-const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav";
-const char *MEDIA_MIMETYPE_CONTAINER_OGG = "application/ogg";
-const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA = "video/x-matroska";
-const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS = "video/mp2ts";
-const char *MEDIA_MIMETYPE_CONTAINER_AVI = "video/avi";
-const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS = "video/mp2p";
-
-const char *MEDIA_MIMETYPE_TEXT_3GPP = "text/3gpp-tt";
-const char *MEDIA_MIMETYPE_TEXT_SUBRIP = "application/x-subrip";
-const char *MEDIA_MIMETYPE_TEXT_VTT = "text/vtt";
-const char *MEDIA_MIMETYPE_TEXT_CEA_608 = "text/cea-608";
-const char *MEDIA_MIMETYPE_TEXT_CEA_708 = "text/cea-708";
-const char *MEDIA_MIMETYPE_DATA_TIMED_ID3 = "application/x-id3v4";
-
-} // namespace android
diff --git a/media/libmedia/MidiIoWrapper.cpp b/media/libmedia/MidiIoWrapper.cpp
index 4e5d67f..5ca3b48 100644
--- a/media/libmedia/MidiIoWrapper.cpp
+++ b/media/libmedia/MidiIoWrapper.cpp
@@ -38,6 +38,7 @@
mFd = open(path, O_RDONLY | O_LARGEFILE);
mBase = 0;
mLength = lseek(mFd, 0, SEEK_END);
+ mDataSource = nullptr;
}
MidiIoWrapper::MidiIoWrapper(int fd, off64_t offset, int64_t size) {
@@ -45,9 +46,10 @@
mFd = fd < 0 ? -1 : dup(fd);
mBase = offset;
mLength = size;
+ mDataSource = nullptr;
}
-MidiIoWrapper::MidiIoWrapper(const sp<DataSource> &source) {
+MidiIoWrapper::MidiIoWrapper(DataSourceBase *source) {
ALOGV("MidiIoWrapper(DataSource)");
mFd = -1;
mDataSource = source;
diff --git a/media/libmedia/NdkWrapper.cpp b/media/libmedia/NdkWrapper.cpp
new file mode 100644
index 0000000..272bc30
--- /dev/null
+++ b/media/libmedia/NdkWrapper.cpp
@@ -0,0 +1,1306 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NdkWrapper"
+
+#include <media/NdkWrapper.h>
+
+#include <android/native_window.h>
+#include <log/log.h>
+#include <media/NdkMediaCodec.h>
+#include <media/NdkMediaCrypto.h>
+#include <media/NdkMediaDrm.h>
+#include <media/NdkMediaFormat.h>
+#include <media/NdkMediaExtractor.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <utils/Errors.h>
+
+// TODO: remove forward declaration when AMediaExtractor_disconnect is offcially added to NDK
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+media_status_t AMediaExtractor_disconnect(AMediaExtractor *);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+namespace android {
+
+static const size_t kAESBlockSize = 16; // AES_BLOCK_SIZE
+
+static const char *AMediaFormatKeyGroupInt32[] = {
+ AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR,
+ AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR,
+ AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION,
+ AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL,
+ AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL,
+ AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT,
+ AMEDIAFORMAT_KEY_AAC_PROFILE,
+ AMEDIAFORMAT_KEY_AAC_SBR_MODE,
+ AMEDIAFORMAT_KEY_AUDIO_SESSION_ID,
+ AMEDIAFORMAT_KEY_BITRATE_MODE,
+ AMEDIAFORMAT_KEY_BIT_RATE,
+ AMEDIAFORMAT_KEY_CAPTURE_RATE,
+ AMEDIAFORMAT_KEY_CHANNEL_COUNT,
+ AMEDIAFORMAT_KEY_CHANNEL_MASK,
+ AMEDIAFORMAT_KEY_COLOR_FORMAT,
+ AMEDIAFORMAT_KEY_COLOR_RANGE,
+ AMEDIAFORMAT_KEY_COLOR_STANDARD,
+ AMEDIAFORMAT_KEY_COLOR_TRANSFER,
+ AMEDIAFORMAT_KEY_COMPLEXITY,
+ AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL,
+ AMEDIAFORMAT_KEY_GRID_COLUMNS,
+ AMEDIAFORMAT_KEY_GRID_ROWS,
+ AMEDIAFORMAT_KEY_HEIGHT,
+ AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD,
+ AMEDIAFORMAT_KEY_IS_ADTS,
+ AMEDIAFORMAT_KEY_IS_AUTOSELECT,
+ AMEDIAFORMAT_KEY_IS_DEFAULT,
+ AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE,
+ AMEDIAFORMAT_KEY_LATENCY,
+ AMEDIAFORMAT_KEY_LEVEL,
+ AMEDIAFORMAT_KEY_MAX_HEIGHT,
+ AMEDIAFORMAT_KEY_MAX_INPUT_SIZE,
+ AMEDIAFORMAT_KEY_MAX_WIDTH,
+ AMEDIAFORMAT_KEY_PCM_ENCODING,
+ AMEDIAFORMAT_KEY_PRIORITY,
+ AMEDIAFORMAT_KEY_PROFILE,
+ AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP,
+ AMEDIAFORMAT_KEY_ROTATION,
+ AMEDIAFORMAT_KEY_SAMPLE_RATE,
+ AMEDIAFORMAT_KEY_SLICE_HEIGHT,
+ AMEDIAFORMAT_KEY_STRIDE,
+ AMEDIAFORMAT_KEY_TRACK_ID,
+ AMEDIAFORMAT_KEY_WIDTH,
+ AMEDIAFORMAT_KEY_DISPLAY_HEIGHT,
+ AMEDIAFORMAT_KEY_DISPLAY_WIDTH,
+ AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID,
+ AMEDIAFORMAT_KEY_TILE_HEIGHT,
+ AMEDIAFORMAT_KEY_TILE_WIDTH,
+ AMEDIAFORMAT_KEY_TRACK_INDEX,
+};
+
+static const char *AMediaFormatKeyGroupInt64[] = {
+ AMEDIAFORMAT_KEY_DURATION,
+ AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER,
+ AMEDIAFORMAT_KEY_TIME_US,
+};
+
+static const char *AMediaFormatKeyGroupString[] = {
+ AMEDIAFORMAT_KEY_LANGUAGE,
+ AMEDIAFORMAT_KEY_MIME,
+ AMEDIAFORMAT_KEY_TEMPORAL_LAYERING,
+};
+
+static const char *AMediaFormatKeyGroupBuffer[] = {
+ AMEDIAFORMAT_KEY_HDR_STATIC_INFO,
+ AMEDIAFORMAT_KEY_SEI,
+ AMEDIAFORMAT_KEY_MPEG_USER_DATA,
+};
+
+static const char *AMediaFormatKeyGroupCsd[] = {
+ AMEDIAFORMAT_KEY_CSD_0,
+ AMEDIAFORMAT_KEY_CSD_1,
+ AMEDIAFORMAT_KEY_CSD_2,
+};
+
+static const char *AMediaFormatKeyGroupRect[] = {
+ AMEDIAFORMAT_KEY_DISPLAY_CROP,
+};
+
+static const char *AMediaFormatKeyGroupFloatInt32[] = {
+ AMEDIAFORMAT_KEY_FRAME_RATE,
+ AMEDIAFORMAT_KEY_I_FRAME_INTERVAL,
+ AMEDIAFORMAT_KEY_OPERATING_RATE,
+};
+
+static status_t translateErrorCode(media_status_t err) {
+ if (err == AMEDIA_OK) {
+ return OK;
+ } else if (err == AMEDIA_ERROR_END_OF_STREAM) {
+ return ERROR_END_OF_STREAM;
+ } else if (err == AMEDIA_ERROR_IO) {
+ return ERROR_IO;
+ } else if (err == AMEDIACODEC_INFO_TRY_AGAIN_LATER) {
+ return -EAGAIN;
+ }
+
+ ALOGE("ndk error code: %d", err);
+ return UNKNOWN_ERROR;
+}
+
+static int32_t translateActionCode(int32_t actionCode) {
+ if (AMediaCodecActionCode_isTransient(actionCode)) {
+ return ACTION_CODE_TRANSIENT;
+ } else if (AMediaCodecActionCode_isRecoverable(actionCode)) {
+ return ACTION_CODE_RECOVERABLE;
+ }
+ return ACTION_CODE_FATAL;
+}
+
+static CryptoPlugin::Mode translateToCryptoPluginMode(cryptoinfo_mode_t mode) {
+ CryptoPlugin::Mode ret = CryptoPlugin::kMode_Unencrypted;
+ switch (mode) {
+ case AMEDIACODECRYPTOINFO_MODE_AES_CTR: {
+ ret = CryptoPlugin::kMode_AES_CTR;
+ break;
+ }
+
+ case AMEDIACODECRYPTOINFO_MODE_AES_WV: {
+ ret = CryptoPlugin::kMode_AES_WV;
+ break;
+ }
+
+ case AMEDIACODECRYPTOINFO_MODE_AES_CBC: {
+ ret = CryptoPlugin::kMode_AES_CBC;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static cryptoinfo_mode_t translateToCryptoInfoMode(CryptoPlugin::Mode mode) {
+ cryptoinfo_mode_t ret = AMEDIACODECRYPTOINFO_MODE_CLEAR;
+ switch (mode) {
+ case CryptoPlugin::kMode_AES_CTR: {
+ ret = AMEDIACODECRYPTOINFO_MODE_AES_CTR;
+ break;
+ }
+
+ case CryptoPlugin::kMode_AES_WV: {
+ ret = AMEDIACODECRYPTOINFO_MODE_AES_WV;
+ break;
+ }
+
+ case CryptoPlugin::kMode_AES_CBC: {
+ ret = AMEDIACODECRYPTOINFO_MODE_AES_CBC;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+//////////// AMediaFormatWrapper
+// static
+sp<AMediaFormatWrapper> AMediaFormatWrapper::Create(const sp<AMessage> &message) {
+ sp<AMediaFormatWrapper> aMediaFormat = new AMediaFormatWrapper();
+
+ for (size_t i = 0; i < message->countEntries(); ++i) {
+ AMessage::Type valueType;
+ const char *key = message->getEntryNameAt(i, &valueType);
+
+ switch (valueType) {
+ case AMessage::kTypeInt32: {
+ int32_t val;
+ if (!message->findInt32(key, &val)) {
+ ALOGE("AMediaFormatWrapper::Create: error at item %zu", i);
+ continue;
+ }
+ aMediaFormat->setInt32(key, val);
+ break;
+ }
+
+ case AMessage::kTypeInt64: {
+ int64_t val;
+ if (!message->findInt64(key, &val)) {
+ ALOGE("AMediaFormatWrapper::Create: error at item %zu", i);
+ continue;
+ }
+ aMediaFormat->setInt64(key, val);
+ break;
+ }
+
+ case AMessage::kTypeFloat: {
+ float val;
+ if (!message->findFloat(key, &val)) {
+ ALOGE("AMediaFormatWrapper::Create: error at item %zu", i);
+ continue;
+ }
+ aMediaFormat->setFloat(key, val);
+ break;
+ }
+
+ case AMessage::kTypeDouble: {
+ double val;
+ if (!message->findDouble(key, &val)) {
+ ALOGE("AMediaFormatWrapper::Create: error at item %zu", i);
+ continue;
+ }
+ aMediaFormat->setDouble(key, val);
+ break;
+ }
+
+ case AMessage::kTypeSize: {
+ size_t val;
+ if (!message->findSize(key, &val)) {
+ ALOGE("AMediaFormatWrapper::Create: error at item %zu", i);
+ continue;
+ }
+ aMediaFormat->setSize(key, val);
+ break;
+ }
+
+ case AMessage::kTypeRect: {
+ int32_t left, top, right, bottom;
+ if (!message->findRect(key, &left, &top, &right, &bottom)) {
+ ALOGE("AMediaFormatWrapper::Create: error at item %zu", i);
+ continue;
+ }
+ aMediaFormat->setRect(key, left, top, right, bottom);
+ break;
+ }
+
+ case AMessage::kTypeString: {
+ AString val;
+ if (!message->findString(key, &val)) {
+ ALOGE("AMediaFormatWrapper::Create: error at item %zu", i);
+ continue;
+ }
+ aMediaFormat->setString(key, val);
+ break;
+ }
+
+ case AMessage::kTypeBuffer: {
+ sp<ABuffer> val;
+ if (!message->findBuffer(key, &val)) {
+ ALOGE("AMediaFormatWrapper::Create: error at item %zu", i);
+ continue;
+ }
+ aMediaFormat->setBuffer(key, val->data(), val->size());
+ break;
+ }
+
+ default: {
+ break;
+ }
+ }
+ }
+
+ return aMediaFormat;
+}
+
+AMediaFormatWrapper::AMediaFormatWrapper() {
+ mAMediaFormat = AMediaFormat_new();
+}
+
+AMediaFormatWrapper::AMediaFormatWrapper(AMediaFormat *aMediaFormat)
+ : mAMediaFormat(aMediaFormat) {
+}
+
+AMediaFormatWrapper::~AMediaFormatWrapper() {
+ release();
+}
+
+status_t AMediaFormatWrapper::release() {
+ if (mAMediaFormat != NULL) {
+ media_status_t err = AMediaFormat_delete(mAMediaFormat);
+ mAMediaFormat = NULL;
+ return translateErrorCode(err);
+ }
+ return OK;
+}
+
+AMediaFormat *AMediaFormatWrapper::getAMediaFormat() const {
+ return mAMediaFormat;
+}
+
+sp<AMessage> AMediaFormatWrapper::toAMessage() const {
+ sp<AMessage> msg;
+ writeToAMessage(msg);
+ return msg;
+}
+
+void AMediaFormatWrapper::writeToAMessage(sp<AMessage> &msg) const {
+ if (mAMediaFormat == NULL) {
+ msg = NULL;
+ }
+
+ if (msg == NULL) {
+ msg = new AMessage;
+ }
+ for (auto& key : AMediaFormatKeyGroupInt32) {
+ int32_t val;
+ if (getInt32(key, &val)) {
+ msg->setInt32(key, val);
+ }
+ }
+ for (auto& key : AMediaFormatKeyGroupInt64) {
+ int64_t val;
+ if (getInt64(key, &val)) {
+ msg->setInt64(key, val);
+ }
+ }
+ for (auto& key : AMediaFormatKeyGroupString) {
+ AString val;
+ if (getString(key, &val)) {
+ msg->setString(key, val);
+ }
+ }
+ for (auto& key : AMediaFormatKeyGroupBuffer) {
+ void *data;
+ size_t size;
+ if (getBuffer(key, &data, &size)) {
+ sp<ABuffer> buffer = ABuffer::CreateAsCopy(data, size);
+ msg->setBuffer(key, buffer);
+ }
+ }
+ for (auto& key : AMediaFormatKeyGroupCsd) {
+ void *data;
+ size_t size;
+ if (getBuffer(key, &data, &size)) {
+ sp<ABuffer> buffer = ABuffer::CreateAsCopy(data, size);
+ buffer->meta()->setInt32(AMEDIAFORMAT_KEY_CSD, 1);
+ buffer->meta()->setInt64(AMEDIAFORMAT_KEY_TIME_US, 0);
+ msg->setBuffer(key, buffer);
+ }
+ }
+ for (auto& key : AMediaFormatKeyGroupRect) {
+ int32_t left, top, right, bottom;
+ if (getRect(key, &left, &top, &right, &bottom)) {
+ msg->setRect(key, left, top, right, bottom);
+ }
+ }
+ for (auto& key : AMediaFormatKeyGroupFloatInt32) {
+ float valFloat;
+ if (getFloat(key, &valFloat)) {
+ msg->setFloat(key, valFloat);
+ } else {
+ int32_t valInt32;
+ if (getInt32(key, &valInt32)) {
+ msg->setFloat(key, (float)valInt32);
+ }
+ }
+ }
+}
+
+const char* AMediaFormatWrapper::toString() const {
+ if (mAMediaFormat == NULL) {
+ return NULL;
+ }
+ return AMediaFormat_toString(mAMediaFormat);
+}
+
+bool AMediaFormatWrapper::getInt32(const char *name, int32_t *out) const {
+ if (mAMediaFormat == NULL) {
+ return false;
+ }
+ return AMediaFormat_getInt32(mAMediaFormat, name, out);
+}
+
+bool AMediaFormatWrapper::getInt64(const char *name, int64_t *out) const {
+ if (mAMediaFormat == NULL) {
+ return false;
+ }
+ return AMediaFormat_getInt64(mAMediaFormat, name, out);
+}
+
+bool AMediaFormatWrapper::getFloat(const char *name, float *out) const {
+ if (mAMediaFormat == NULL) {
+ return false;
+ }
+ return AMediaFormat_getFloat(mAMediaFormat, name, out);
+}
+
+bool AMediaFormatWrapper::getDouble(const char *name, double *out) const {
+ if (mAMediaFormat == NULL) {
+ return false;
+ }
+ return AMediaFormat_getDouble(mAMediaFormat, name, out);
+}
+
+bool AMediaFormatWrapper::getSize(const char *name, size_t *out) const {
+ if (mAMediaFormat == NULL) {
+ return false;
+ }
+ return AMediaFormat_getSize(mAMediaFormat, name, out);
+}
+
+bool AMediaFormatWrapper::getRect(
+ const char *name, int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) const {
+ if (mAMediaFormat == NULL) {
+ return false;
+ }
+ return AMediaFormat_getRect(mAMediaFormat, name, left, top, right, bottom);
+}
+
+bool AMediaFormatWrapper::getBuffer(const char *name, void** data, size_t *outSize) const {
+ if (mAMediaFormat == NULL) {
+ return false;
+ }
+ return AMediaFormat_getBuffer(mAMediaFormat, name, data, outSize);
+}
+
+bool AMediaFormatWrapper::getString(const char *name, AString *out) const {
+ if (mAMediaFormat == NULL) {
+ return false;
+ }
+ const char *outChar = NULL;
+ bool ret = AMediaFormat_getString(mAMediaFormat, name, &outChar);
+ if (ret) {
+ *out = AString(outChar);
+ }
+ return ret;
+}
+
+void AMediaFormatWrapper::setInt32(const char* name, int32_t value) {
+ if (mAMediaFormat != NULL) {
+ AMediaFormat_setInt32(mAMediaFormat, name, value);
+ }
+}
+
+void AMediaFormatWrapper::setInt64(const char* name, int64_t value) {
+ if (mAMediaFormat != NULL) {
+ AMediaFormat_setInt64(mAMediaFormat, name, value);
+ }
+}
+
+void AMediaFormatWrapper::setFloat(const char* name, float value) {
+ if (mAMediaFormat != NULL) {
+ AMediaFormat_setFloat(mAMediaFormat, name, value);
+ }
+}
+
+void AMediaFormatWrapper::setDouble(const char* name, double value) {
+ if (mAMediaFormat != NULL) {
+ AMediaFormat_setDouble(mAMediaFormat, name, value);
+ }
+}
+
+void AMediaFormatWrapper::setSize(const char* name, size_t value) {
+ if (mAMediaFormat != NULL) {
+ AMediaFormat_setSize(mAMediaFormat, name, value);
+ }
+}
+
+void AMediaFormatWrapper::setRect(
+ const char* name, int32_t left, int32_t top, int32_t right, int32_t bottom) {
+ if (mAMediaFormat != NULL) {
+ AMediaFormat_setRect(mAMediaFormat, name, left, top, right, bottom);
+ }
+}
+
+void AMediaFormatWrapper::setString(const char* name, const AString &value) {
+ if (mAMediaFormat != NULL) {
+ AMediaFormat_setString(mAMediaFormat, name, value.c_str());
+ }
+}
+
+void AMediaFormatWrapper::setBuffer(const char* name, void* data, size_t size) {
+ if (mAMediaFormat != NULL) {
+ AMediaFormat_setBuffer(mAMediaFormat, name, data, size);
+ }
+}
+
+
+//////////// ANativeWindowWrapper
+ANativeWindowWrapper::ANativeWindowWrapper(ANativeWindow *aNativeWindow)
+ : mANativeWindow(aNativeWindow) {
+ if (aNativeWindow != NULL) {
+ ANativeWindow_acquire(aNativeWindow);
+ }
+}
+
+ANativeWindowWrapper::~ANativeWindowWrapper() {
+ release();
+}
+
+status_t ANativeWindowWrapper::release() {
+ if (mANativeWindow != NULL) {
+ ANativeWindow_release(mANativeWindow);
+ mANativeWindow = NULL;
+ }
+ return OK;
+}
+
+ANativeWindow *ANativeWindowWrapper::getANativeWindow() const {
+ return mANativeWindow;
+}
+
+
+//////////// AMediaDrmWrapper
+AMediaDrmWrapper::AMediaDrmWrapper(const uint8_t uuid[16]) {
+ mAMediaDrm = AMediaDrm_createByUUID(uuid);
+}
+
+AMediaDrmWrapper::AMediaDrmWrapper(AMediaDrm *aMediaDrm)
+ : mAMediaDrm(aMediaDrm) {
+}
+
+AMediaDrmWrapper::~AMediaDrmWrapper() {
+ release();
+}
+
+status_t AMediaDrmWrapper::release() {
+ if (mAMediaDrm != NULL) {
+ AMediaDrm_release(mAMediaDrm);
+ mAMediaDrm = NULL;
+ }
+ return OK;
+}
+
+AMediaDrm *AMediaDrmWrapper::getAMediaDrm() const {
+ return mAMediaDrm;
+}
+
+// static
+bool AMediaDrmWrapper::isCryptoSchemeSupported(
+ const uint8_t uuid[16],
+ const char *mimeType) {
+ return AMediaDrm_isCryptoSchemeSupported(uuid, mimeType);
+}
+
+
+//////////// AMediaCryptoWrapper
+AMediaCryptoWrapper::AMediaCryptoWrapper(
+ const uint8_t uuid[16], const void *initData, size_t initDataSize) {
+ mAMediaCrypto = AMediaCrypto_new(uuid, initData, initDataSize);
+}
+
+AMediaCryptoWrapper::AMediaCryptoWrapper(AMediaCrypto *aMediaCrypto)
+ : mAMediaCrypto(aMediaCrypto) {
+}
+
+AMediaCryptoWrapper::~AMediaCryptoWrapper() {
+ release();
+}
+
+status_t AMediaCryptoWrapper::release() {
+ if (mAMediaCrypto != NULL) {
+ AMediaCrypto_delete(mAMediaCrypto);
+ mAMediaCrypto = NULL;
+ }
+ return OK;
+}
+
+AMediaCrypto *AMediaCryptoWrapper::getAMediaCrypto() const {
+ return mAMediaCrypto;
+}
+
+bool AMediaCryptoWrapper::isCryptoSchemeSupported(const uint8_t uuid[16]) {
+ if (mAMediaCrypto == NULL) {
+ return false;
+ }
+ return AMediaCrypto_isCryptoSchemeSupported(uuid);
+}
+
+bool AMediaCryptoWrapper::requiresSecureDecoderComponent(const char *mime) {
+ if (mAMediaCrypto == NULL) {
+ return false;
+ }
+ return AMediaCrypto_requiresSecureDecoderComponent(mime);
+}
+
+
+//////////// AMediaCodecCryptoInfoWrapper
+// static
+sp<AMediaCodecCryptoInfoWrapper> AMediaCodecCryptoInfoWrapper::Create(MetaDataBase &meta) {
+
+ uint32_t type;
+ const void *crypteddata;
+ size_t cryptedsize;
+
+ if (!meta.findData(kKeyEncryptedSizes, &type, &crypteddata, &cryptedsize)) {
+ return NULL;
+ }
+
+ int numSubSamples = cryptedsize / sizeof(size_t);
+
+ if (numSubSamples <= 0) {
+ ALOGE("Create: INVALID numSubSamples: %d", numSubSamples);
+ return NULL;
+ }
+
+ const void *cleardata;
+ size_t clearsize;
+ if (meta.findData(kKeyPlainSizes, &type, &cleardata, &clearsize)) {
+ if (clearsize != cryptedsize) {
+ // The two must be of the same length.
+ ALOGE("Create: mismatch cryptedsize: %zu != clearsize: %zu", cryptedsize, clearsize);
+ return NULL;
+ }
+ }
+
+ const void *key;
+ size_t keysize;
+ if (meta.findData(kKeyCryptoKey, &type, &key, &keysize)) {
+ if (keysize != kAESBlockSize) {
+ // Keys must be 16 bytes in length.
+ ALOGE("Create: Keys must be %zu bytes in length: %zu", kAESBlockSize, keysize);
+ return NULL;
+ }
+ }
+
+ const void *iv;
+ size_t ivsize;
+ if (meta.findData(kKeyCryptoIV, &type, &iv, &ivsize)) {
+ if (ivsize != kAESBlockSize) {
+ // IVs must be 16 bytes in length.
+ ALOGE("Create: IV must be %zu bytes in length: %zu", kAESBlockSize, ivsize);
+ return NULL;
+ }
+ }
+
+ int32_t mode;
+ if (!meta.findInt32(kKeyCryptoMode, &mode)) {
+ mode = CryptoPlugin::kMode_AES_CTR;
+ }
+
+ return new AMediaCodecCryptoInfoWrapper(
+ numSubSamples,
+ (uint8_t*) key,
+ (uint8_t*) iv,
+ (CryptoPlugin::Mode)mode,
+ (size_t*) cleardata,
+ (size_t*) crypteddata);
+}
+
+AMediaCodecCryptoInfoWrapper::AMediaCodecCryptoInfoWrapper(
+ int numsubsamples,
+ uint8_t key[16],
+ uint8_t iv[16],
+ CryptoPlugin::Mode mode,
+ size_t *clearbytes,
+ size_t *encryptedbytes) {
+ mAMediaCodecCryptoInfo =
+ AMediaCodecCryptoInfo_new(numsubsamples,
+ key,
+ iv,
+ translateToCryptoInfoMode(mode),
+ clearbytes,
+ encryptedbytes);
+}
+
+AMediaCodecCryptoInfoWrapper::AMediaCodecCryptoInfoWrapper(
+ AMediaCodecCryptoInfo *aMediaCodecCryptoInfo)
+ : mAMediaCodecCryptoInfo(aMediaCodecCryptoInfo) {
+}
+
+AMediaCodecCryptoInfoWrapper::~AMediaCodecCryptoInfoWrapper() {
+ release();
+}
+
+status_t AMediaCodecCryptoInfoWrapper::release() {
+ if (mAMediaCodecCryptoInfo != NULL) {
+ media_status_t err = AMediaCodecCryptoInfo_delete(mAMediaCodecCryptoInfo);
+ mAMediaCodecCryptoInfo = NULL;
+ return translateErrorCode(err);
+ }
+ return OK;
+}
+
+AMediaCodecCryptoInfo *AMediaCodecCryptoInfoWrapper::getAMediaCodecCryptoInfo() const {
+ return mAMediaCodecCryptoInfo;
+}
+
+void AMediaCodecCryptoInfoWrapper::setPattern(CryptoPlugin::Pattern *pattern) {
+ if (mAMediaCodecCryptoInfo == NULL || pattern == NULL) {
+ return;
+ }
+ cryptoinfo_pattern_t ndkPattern = {(int32_t)pattern->mEncryptBlocks,
+ (int32_t)pattern->mSkipBlocks };
+ return AMediaCodecCryptoInfo_setPattern(mAMediaCodecCryptoInfo, &ndkPattern);
+}
+
+size_t AMediaCodecCryptoInfoWrapper::getNumSubSamples() {
+ if (mAMediaCodecCryptoInfo == NULL) {
+ return 0;
+ }
+ return AMediaCodecCryptoInfo_getNumSubSamples(mAMediaCodecCryptoInfo);
+}
+
+status_t AMediaCodecCryptoInfoWrapper::getKey(uint8_t *dst) {
+ if (mAMediaCodecCryptoInfo == NULL) {
+ return DEAD_OBJECT;
+ }
+ if (dst == NULL) {
+ return BAD_VALUE;
+ }
+ return translateErrorCode(
+ AMediaCodecCryptoInfo_getKey(mAMediaCodecCryptoInfo, dst));
+}
+
+status_t AMediaCodecCryptoInfoWrapper::getIV(uint8_t *dst) {
+ if (mAMediaCodecCryptoInfo == NULL) {
+ return DEAD_OBJECT;
+ }
+ if (dst == NULL) {
+ return BAD_VALUE;
+ }
+ return translateErrorCode(
+ AMediaCodecCryptoInfo_getIV(mAMediaCodecCryptoInfo, dst));
+}
+
+CryptoPlugin::Mode AMediaCodecCryptoInfoWrapper::getMode() {
+ if (mAMediaCodecCryptoInfo == NULL) {
+ return CryptoPlugin::kMode_Unencrypted;
+ }
+ return translateToCryptoPluginMode(
+ AMediaCodecCryptoInfo_getMode(mAMediaCodecCryptoInfo));
+}
+
+status_t AMediaCodecCryptoInfoWrapper::getClearBytes(size_t *dst) {
+ if (mAMediaCodecCryptoInfo == NULL) {
+ return DEAD_OBJECT;
+ }
+ if (dst == NULL) {
+ return BAD_VALUE;
+ }
+ return translateErrorCode(
+ AMediaCodecCryptoInfo_getClearBytes(mAMediaCodecCryptoInfo, dst));
+}
+
+status_t AMediaCodecCryptoInfoWrapper::getEncryptedBytes(size_t *dst) {
+ if (mAMediaCodecCryptoInfo == NULL) {
+ return DEAD_OBJECT;
+ }
+ if (dst == NULL) {
+ return BAD_VALUE;
+ }
+ return translateErrorCode(
+ AMediaCodecCryptoInfo_getEncryptedBytes(mAMediaCodecCryptoInfo, dst));
+}
+
+
+//////////// AMediaCodecWrapper
+// static
+sp<AMediaCodecWrapper> AMediaCodecWrapper::CreateCodecByName(const AString &name) {
+ AMediaCodec *aMediaCodec = AMediaCodec_createCodecByName(name.c_str());
+ return new AMediaCodecWrapper(aMediaCodec);
+}
+
+// static
+sp<AMediaCodecWrapper> AMediaCodecWrapper::CreateDecoderByType(const AString &mimeType) {
+ AMediaCodec *aMediaCodec = AMediaCodec_createDecoderByType(mimeType.c_str());
+ return new AMediaCodecWrapper(aMediaCodec);
+}
+
+// static
+void AMediaCodecWrapper::OnInputAvailableCB(
+ AMediaCodec * /* aMediaCodec */,
+ void *userdata,
+ int32_t index) {
+ ALOGV("OnInputAvailableCB: index(%d)", index);
+ sp<AMessage> msg = sp<AMessage>((AMessage *)userdata)->dup();
+ msg->setInt32("callbackID", CB_INPUT_AVAILABLE);
+ msg->setInt32("index", index);
+ msg->post();
+}
+
+// static
+void AMediaCodecWrapper::OnOutputAvailableCB(
+ AMediaCodec * /* aMediaCodec */,
+ void *userdata,
+ int32_t index,
+ AMediaCodecBufferInfo *bufferInfo) {
+ ALOGV("OnOutputAvailableCB: index(%d), (%d, %d, %lld, 0x%x)",
+ index, bufferInfo->offset, bufferInfo->size,
+ (long long)bufferInfo->presentationTimeUs, bufferInfo->flags);
+ sp<AMessage> msg = sp<AMessage>((AMessage *)userdata)->dup();
+ msg->setInt32("callbackID", CB_OUTPUT_AVAILABLE);
+ msg->setInt32("index", index);
+ msg->setSize("offset", (size_t)(bufferInfo->offset));
+ msg->setSize("size", (size_t)(bufferInfo->size));
+ msg->setInt64("timeUs", bufferInfo->presentationTimeUs);
+ msg->setInt32("flags", (int32_t)(bufferInfo->flags));
+ msg->post();
+}
+
+// static
+void AMediaCodecWrapper::OnFormatChangedCB(
+ AMediaCodec * /* aMediaCodec */,
+ void *userdata,
+ AMediaFormat *format) {
+ sp<AMediaFormatWrapper> formatWrapper = new AMediaFormatWrapper(format);
+ sp<AMessage> outputFormat = formatWrapper->toAMessage();
+ ALOGV("OnFormatChangedCB: format(%s)", outputFormat->debugString().c_str());
+
+ sp<AMessage> msg = sp<AMessage>((AMessage *)userdata)->dup();
+ msg->setInt32("callbackID", CB_OUTPUT_FORMAT_CHANGED);
+ msg->setMessage("format", outputFormat);
+ msg->post();
+}
+
+// static
+void AMediaCodecWrapper::OnErrorCB(
+ AMediaCodec * /* aMediaCodec */,
+ void *userdata,
+ media_status_t err,
+ int32_t actionCode,
+ const char *detail) {
+ ALOGV("OnErrorCB: err(%d), actionCode(%d), detail(%s)", err, actionCode, detail);
+ sp<AMessage> msg = sp<AMessage>((AMessage *)userdata)->dup();
+ msg->setInt32("callbackID", CB_ERROR);
+ msg->setInt32("err", translateErrorCode(err));
+ msg->setInt32("actionCode", translateActionCode(actionCode));
+ msg->setString("detail", detail);
+ msg->post();
+}
+
+AMediaCodecWrapper::AMediaCodecWrapper(AMediaCodec *aMediaCodec)
+ : mAMediaCodec(aMediaCodec) {
+}
+
+AMediaCodecWrapper::~AMediaCodecWrapper() {
+ release();
+}
+
+status_t AMediaCodecWrapper::release() {
+ if (mAMediaCodec != NULL) {
+ AMediaCodecOnAsyncNotifyCallback aCB = {};
+ AMediaCodec_setAsyncNotifyCallback(mAMediaCodec, aCB, NULL);
+ mCallback = NULL;
+
+ media_status_t err = AMediaCodec_delete(mAMediaCodec);
+ mAMediaCodec = NULL;
+ return translateErrorCode(err);
+ }
+ return OK;
+}
+
+AMediaCodec *AMediaCodecWrapper::getAMediaCodec() const {
+ return mAMediaCodec;
+}
+
+status_t AMediaCodecWrapper::getName(AString *outComponentName) const {
+ if (mAMediaCodec == NULL) {
+ return DEAD_OBJECT;
+ }
+ char *name = NULL;
+ media_status_t err = AMediaCodec_getName(mAMediaCodec, &name);
+ if (err != AMEDIA_OK) {
+ return translateErrorCode(err);
+ }
+
+ *outComponentName = AString(name);
+ AMediaCodec_releaseName(mAMediaCodec, name);
+ return OK;
+}
+
+status_t AMediaCodecWrapper::configure(
+ const sp<AMediaFormatWrapper> &format,
+ const sp<ANativeWindowWrapper> &nww,
+ const sp<AMediaCryptoWrapper> &crypto,
+ uint32_t flags) {
+ if (mAMediaCodec == NULL) {
+ return DEAD_OBJECT;
+ }
+
+ media_status_t err = AMediaCodec_configure(
+ mAMediaCodec,
+ format->getAMediaFormat(),
+ (nww == NULL ? NULL : nww->getANativeWindow()),
+ crypto == NULL ? NULL : crypto->getAMediaCrypto(),
+ flags);
+
+ return translateErrorCode(err);
+}
+
+status_t AMediaCodecWrapper::setCallback(const sp<AMessage> &callback) {
+ if (mAMediaCodec == NULL) {
+ return DEAD_OBJECT;
+ }
+
+ mCallback = callback;
+
+ AMediaCodecOnAsyncNotifyCallback aCB = {
+ OnInputAvailableCB,
+ OnOutputAvailableCB,
+ OnFormatChangedCB,
+ OnErrorCB
+ };
+
+ return translateErrorCode(
+ AMediaCodec_setAsyncNotifyCallback(mAMediaCodec, aCB, callback.get()));
+}
+
+status_t AMediaCodecWrapper::releaseCrypto() {
+ if (mAMediaCodec == NULL) {
+ return DEAD_OBJECT;
+ }
+ return translateErrorCode(AMediaCodec_releaseCrypto(mAMediaCodec));
+}
+
+status_t AMediaCodecWrapper::start() {
+ if (mAMediaCodec == NULL) {
+ return DEAD_OBJECT;
+ }
+ return translateErrorCode(AMediaCodec_start(mAMediaCodec));
+}
+
+status_t AMediaCodecWrapper::stop() {
+ if (mAMediaCodec == NULL) {
+ return DEAD_OBJECT;
+ }
+ return translateErrorCode(AMediaCodec_stop(mAMediaCodec));
+}
+
+status_t AMediaCodecWrapper::flush() {
+ if (mAMediaCodec == NULL) {
+ return DEAD_OBJECT;
+ }
+ return translateErrorCode(AMediaCodec_flush(mAMediaCodec));
+}
+
+uint8_t* AMediaCodecWrapper::getInputBuffer(size_t idx, size_t *out_size) {
+ if (mAMediaCodec == NULL) {
+ return NULL;
+ }
+ return AMediaCodec_getInputBuffer(mAMediaCodec, idx, out_size);
+}
+
+uint8_t* AMediaCodecWrapper::getOutputBuffer(size_t idx, size_t *out_size) {
+ if (mAMediaCodec == NULL) {
+ return NULL;
+ }
+ return AMediaCodec_getOutputBuffer(mAMediaCodec, idx, out_size);
+}
+
+status_t AMediaCodecWrapper::queueInputBuffer(
+ size_t idx,
+ size_t offset,
+ size_t size,
+ uint64_t time,
+ uint32_t flags) {
+ if (mAMediaCodec == NULL) {
+ return DEAD_OBJECT;
+ }
+ return translateErrorCode(
+ AMediaCodec_queueInputBuffer(mAMediaCodec, idx, offset, size, time, flags));
+}
+
+status_t AMediaCodecWrapper::queueSecureInputBuffer(
+ size_t idx,
+ size_t offset,
+ sp<AMediaCodecCryptoInfoWrapper> &codecCryptoInfo,
+ uint64_t time,
+ uint32_t flags) {
+ if (mAMediaCodec == NULL) {
+ return DEAD_OBJECT;
+ }
+ return translateErrorCode(
+ AMediaCodec_queueSecureInputBuffer(
+ mAMediaCodec,
+ idx,
+ offset,
+ codecCryptoInfo->getAMediaCodecCryptoInfo(),
+ time,
+ flags));
+}
+
+sp<AMediaFormatWrapper> AMediaCodecWrapper::getOutputFormat() {
+ if (mAMediaCodec == NULL) {
+ return NULL;
+ }
+ return new AMediaFormatWrapper(AMediaCodec_getOutputFormat(mAMediaCodec));
+}
+
+sp<AMediaFormatWrapper> AMediaCodecWrapper::getInputFormat() {
+ if (mAMediaCodec == NULL) {
+ return NULL;
+ }
+ return new AMediaFormatWrapper(AMediaCodec_getInputFormat(mAMediaCodec));
+}
+
+status_t AMediaCodecWrapper::releaseOutputBuffer(size_t idx, bool render) {
+ if (mAMediaCodec == NULL) {
+ return DEAD_OBJECT;
+ }
+ return translateErrorCode(
+ AMediaCodec_releaseOutputBuffer(mAMediaCodec, idx, render));
+}
+
+status_t AMediaCodecWrapper::setOutputSurface(const sp<ANativeWindowWrapper> &nww) {
+ if (mAMediaCodec == NULL) {
+ return DEAD_OBJECT;
+ }
+ return translateErrorCode(
+ AMediaCodec_setOutputSurface(mAMediaCodec,
+ (nww == NULL ? NULL : nww->getANativeWindow())));
+}
+
+status_t AMediaCodecWrapper::releaseOutputBufferAtTime(size_t idx, int64_t timestampNs) {
+ if (mAMediaCodec == NULL) {
+ return DEAD_OBJECT;
+ }
+ return translateErrorCode(
+ AMediaCodec_releaseOutputBufferAtTime(mAMediaCodec, idx, timestampNs));
+}
+
+status_t AMediaCodecWrapper::setParameters(const sp<AMediaFormatWrapper> ¶ms) {
+ if (mAMediaCodec == NULL) {
+ return DEAD_OBJECT;
+ }
+ return translateErrorCode(
+ AMediaCodec_setParameters(mAMediaCodec, params->getAMediaFormat()));
+}
+
+//////////// AMediaExtractorWrapper
+
+AMediaExtractorWrapper::AMediaExtractorWrapper(AMediaExtractor *aMediaExtractor)
+ : mAMediaExtractor(aMediaExtractor) {
+}
+
+AMediaExtractorWrapper::~AMediaExtractorWrapper() {
+ release();
+}
+
+status_t AMediaExtractorWrapper::release() {
+ if (mAMediaExtractor != NULL) {
+ media_status_t err = AMediaExtractor_delete(mAMediaExtractor);
+ mAMediaExtractor = NULL;
+ return translateErrorCode(err);
+ }
+ return OK;
+}
+
+status_t AMediaExtractorWrapper::disconnect() {
+ if (mAMediaExtractor != NULL) {
+ media_status_t err = AMediaExtractor_disconnect(mAMediaExtractor);
+ return translateErrorCode(err);
+ }
+ return DEAD_OBJECT;
+}
+
+AMediaExtractor *AMediaExtractorWrapper::getAMediaExtractor() const {
+ return mAMediaExtractor;
+}
+
+status_t AMediaExtractorWrapper::setDataSource(int fd, off64_t offset, off64_t length) {
+ if (mAMediaExtractor == NULL) {
+ return DEAD_OBJECT;
+ }
+ return translateErrorCode(AMediaExtractor_setDataSourceFd(
+ mAMediaExtractor, fd, offset, length));
+}
+
+status_t AMediaExtractorWrapper::setDataSource(const char *location) {
+ if (mAMediaExtractor == NULL) {
+ return DEAD_OBJECT;
+ }
+ return translateErrorCode(AMediaExtractor_setDataSource(mAMediaExtractor, location));
+}
+
+status_t AMediaExtractorWrapper::setDataSource(AMediaDataSource *source) {
+ if (mAMediaExtractor == NULL) {
+ return DEAD_OBJECT;
+ }
+ return translateErrorCode(AMediaExtractor_setDataSourceCustom(mAMediaExtractor, source));
+}
+
+size_t AMediaExtractorWrapper::getTrackCount() {
+ if (mAMediaExtractor == NULL) {
+ return 0;
+ }
+ return AMediaExtractor_getTrackCount(mAMediaExtractor);
+}
+
+sp<AMediaFormatWrapper> AMediaExtractorWrapper::getFormat() {
+ if (mAMediaExtractor == NULL) {
+ return NULL;
+ }
+ return new AMediaFormatWrapper(AMediaExtractor_getFileFormat(mAMediaExtractor));
+}
+
+sp<AMediaFormatWrapper> AMediaExtractorWrapper::getTrackFormat(size_t idx) {
+ if (mAMediaExtractor == NULL) {
+ return NULL;
+ }
+ return new AMediaFormatWrapper(AMediaExtractor_getTrackFormat(mAMediaExtractor, idx));
+}
+
+status_t AMediaExtractorWrapper::selectTrack(size_t idx) {
+ if (mAMediaExtractor == NULL) {
+ return DEAD_OBJECT;
+ }
+ return translateErrorCode(AMediaExtractor_selectTrack(mAMediaExtractor, idx));
+}
+
+status_t AMediaExtractorWrapper::unselectTrack(size_t idx) {
+ if (mAMediaExtractor == NULL) {
+ return DEAD_OBJECT;
+ }
+ return translateErrorCode(AMediaExtractor_unselectTrack(mAMediaExtractor, idx));
+}
+
+status_t AMediaExtractorWrapper::selectSingleTrack(size_t idx) {
+ if (mAMediaExtractor == NULL) {
+ return DEAD_OBJECT;
+ }
+ for (size_t i = 0; i < AMediaExtractor_getTrackCount(mAMediaExtractor); ++i) {
+ if (i == idx) {
+ media_status_t err = AMediaExtractor_selectTrack(mAMediaExtractor, i);
+ if (err != AMEDIA_OK) {
+ return translateErrorCode(err);
+ }
+ } else {
+ media_status_t err = AMediaExtractor_unselectTrack(mAMediaExtractor, i);
+ if (err != AMEDIA_OK) {
+ return translateErrorCode(err);
+ }
+ }
+ }
+ return OK;
+}
+
+ssize_t AMediaExtractorWrapper::readSampleData(const sp<ABuffer> &buffer) {
+ if (mAMediaExtractor == NULL) {
+ return -1;
+ }
+ return AMediaExtractor_readSampleData(mAMediaExtractor, buffer->data(), buffer->capacity());
+}
+
+ssize_t AMediaExtractorWrapper::getSampleSize() {
+ if (mAMediaExtractor == NULL) {
+ return 0;
+ }
+ return AMediaExtractor_getSampleSize(mAMediaExtractor);
+}
+
+uint32_t AMediaExtractorWrapper::getSampleFlags() {
+ if (mAMediaExtractor == NULL) {
+ return 0;
+ }
+ return AMediaExtractor_getSampleFlags(mAMediaExtractor);
+}
+
+int AMediaExtractorWrapper::getSampleTrackIndex() {
+ if (mAMediaExtractor == NULL) {
+ return -1;
+ }
+ return AMediaExtractor_getSampleTrackIndex(mAMediaExtractor);
+}
+
+int64_t AMediaExtractorWrapper::getSampleTime() {
+ if (mAMediaExtractor == NULL) {
+ return -1;
+ }
+ return AMediaExtractor_getSampleTime(mAMediaExtractor);
+}
+
+status_t AMediaExtractorWrapper::getSampleFormat(sp<AMediaFormatWrapper> &formatWrapper) {
+ if (mAMediaExtractor == NULL) {
+ return DEAD_OBJECT;
+ }
+ AMediaFormat *format = AMediaFormat_new();
+ formatWrapper = new AMediaFormatWrapper(format);
+ return translateErrorCode(AMediaExtractor_getSampleFormat(mAMediaExtractor, format));
+}
+
+int64_t AMediaExtractorWrapper::getCachedDuration() {
+ if (mAMediaExtractor == NULL) {
+ return -1;
+ }
+ return AMediaExtractor_getCachedDuration(mAMediaExtractor);
+}
+
+bool AMediaExtractorWrapper::advance() {
+ if (mAMediaExtractor == NULL) {
+ return false;
+ }
+ return AMediaExtractor_advance(mAMediaExtractor);
+}
+
+status_t AMediaExtractorWrapper::seekTo(int64_t seekPosUs, MediaSource::ReadOptions::SeekMode mode) {
+ if (mAMediaExtractor == NULL) {
+ return DEAD_OBJECT;
+ }
+
+ SeekMode aMode;
+ switch (mode) {
+ case MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC: {
+ aMode = AMEDIAEXTRACTOR_SEEK_PREVIOUS_SYNC;
+ break;
+ }
+ case MediaSource::ReadOptions::SEEK_NEXT_SYNC: {
+ aMode = AMEDIAEXTRACTOR_SEEK_NEXT_SYNC;
+ break;
+ }
+ default: {
+ aMode = AMEDIAEXTRACTOR_SEEK_CLOSEST_SYNC;
+ break;
+ }
+ }
+ return AMediaExtractor_seekTo(mAMediaExtractor, seekPosUs, aMode);
+}
+
+PsshInfo* AMediaExtractorWrapper::getPsshInfo() {
+ if (mAMediaExtractor == NULL) {
+ return NULL;
+ }
+ return AMediaExtractor_getPsshInfo(mAMediaExtractor);
+}
+
+sp<AMediaCodecCryptoInfoWrapper> AMediaExtractorWrapper::getSampleCryptoInfo() {
+ if (mAMediaExtractor == NULL) {
+ return NULL;
+ }
+ return new AMediaCodecCryptoInfoWrapper(AMediaExtractor_getSampleCryptoInfo(mAMediaExtractor));
+}
+
+ssize_t AMediaDataSourceWrapper::AMediaDataSourceWrapper_getSize(void *userdata) {
+ DataSource *source = static_cast<DataSource *>(userdata);
+ off64_t size = -1;
+ source->getSize(&size);
+ return size;
+}
+
+ssize_t AMediaDataSourceWrapper::AMediaDataSourceWrapper_readAt(void *userdata, off64_t offset, void * buf, size_t size) {
+ DataSource *source = static_cast<DataSource *>(userdata);
+ return source->readAt(offset, buf, size);
+}
+
+void AMediaDataSourceWrapper::AMediaDataSourceWrapper_close(void *userdata) {
+ DataSource *source = static_cast<DataSource *>(userdata);
+ source->close();
+}
+
+AMediaDataSourceWrapper::AMediaDataSourceWrapper(const sp<DataSource> &dataSource)
+ : mDataSource(dataSource),
+ mAMediaDataSource(AMediaDataSource_new()) {
+ ALOGV("setDataSource (source: %p)", dataSource.get());
+ AMediaDataSource_setUserdata(mAMediaDataSource, dataSource.get());
+ AMediaDataSource_setReadAt(mAMediaDataSource, AMediaDataSourceWrapper_readAt);
+ AMediaDataSource_setGetSize(mAMediaDataSource, AMediaDataSourceWrapper_getSize);
+ AMediaDataSource_setClose(mAMediaDataSource, AMediaDataSourceWrapper_close);
+}
+
+AMediaDataSourceWrapper::~AMediaDataSourceWrapper() {
+ if (mAMediaDataSource == NULL) {
+ return;
+ }
+ AMediaDataSource_delete(mAMediaDataSource);
+ mAMediaDataSource = NULL;
+}
+
+AMediaDataSource* AMediaDataSourceWrapper::getAMediaDataSource() {
+ return mAMediaDataSource;
+}
+
+} // namespace android
diff --git a/media/libmedia/TimeCheck.cpp b/media/libmedia/TimeCheck.cpp
new file mode 100644
index 0000000..dab5d4f
--- /dev/null
+++ b/media/libmedia/TimeCheck.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <media/TimeCheck.h>
+
+namespace android {
+
+/* static */
+sp<TimeCheck::TimeCheckThread> TimeCheck::getTimeCheckThread()
+{
+ static sp<TimeCheck::TimeCheckThread> sTimeCheckThread = new TimeCheck::TimeCheckThread();
+ return sTimeCheckThread;
+}
+
+TimeCheck::TimeCheck(const char *tag, uint32_t timeoutMs)
+ : mEndTimeNs(getTimeCheckThread()->startMonitoring(tag, timeoutMs))
+{
+}
+
+TimeCheck::~TimeCheck() {
+ getTimeCheckThread()->stopMonitoring(mEndTimeNs);
+}
+
+TimeCheck::TimeCheckThread::~TimeCheckThread()
+{
+ AutoMutex _l(mMutex);
+ requestExit();
+ mMonitorRequests.clear();
+ mCond.signal();
+}
+
+nsecs_t TimeCheck::TimeCheckThread::startMonitoring(const char *tag, uint32_t timeoutMs) {
+ Mutex::Autolock _l(mMutex);
+ nsecs_t endTimeNs = systemTime() + milliseconds(timeoutMs);
+ for (; mMonitorRequests.indexOfKey(endTimeNs) >= 0; ++endTimeNs);
+ mMonitorRequests.add(endTimeNs, tag);
+ mCond.signal();
+ return endTimeNs;
+}
+
+void TimeCheck::TimeCheckThread::stopMonitoring(nsecs_t endTimeNs) {
+ Mutex::Autolock _l(mMutex);
+ mMonitorRequests.removeItem(endTimeNs);
+ mCond.signal();
+}
+
+bool TimeCheck::TimeCheckThread::threadLoop()
+{
+ status_t status = TIMED_OUT;
+ const char *tag;
+ {
+ AutoMutex _l(mMutex);
+
+ if (exitPending()) {
+ return false;
+ }
+
+ nsecs_t endTimeNs = INT64_MAX;
+ // KeyedVector mMonitorRequests is ordered so take first entry as next timeout
+ if (mMonitorRequests.size() != 0) {
+ endTimeNs = mMonitorRequests.keyAt(0);
+ tag = mMonitorRequests.valueAt(0);
+ }
+
+ const nsecs_t waitTimeNs = endTimeNs - systemTime();
+ if (waitTimeNs > 0) {
+ status = mCond.waitRelative(mMutex, waitTimeNs);
+ }
+ }
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "TimeCheck timeout for %s", tag);
+ return true;
+}
+
+}; // namespace android
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index e6c8f9c..a3db754 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -55,6 +55,8 @@
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BUS),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_PROXY),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_HEARING_AID),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ECHO_CANCELLER),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_DEFAULT),
// STUB must be after DEFAULT, so the latter is picked up by toString first.
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_STUB),
@@ -115,6 +117,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DIRECT_PCM),
MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ),
MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_VOIP_RX),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_INCALL_MUSIC),
TERMINATOR
};
@@ -128,6 +131,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_SYNC),
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_MMAP_NOIRQ),
MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_VOIP_TX),
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_HW_AV_SYNC),
TERMINATOR
};
@@ -154,6 +158,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LD),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_HE_V2),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ELD),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_XHE),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_MAIN),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_LC),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_SSR),
@@ -164,6 +169,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_LD),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_HE_V2),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_ELD),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_XHE),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_VORBIS),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_HE_AAC_V1),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_HE_AAC_V2),
@@ -194,6 +200,11 @@
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_APTX_HD),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AC4),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_LDAC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MAT),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_E_AC3_JOC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MAT_1_0),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MAT_2_0),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MAT_2_1),
TERMINATOR
};
@@ -203,12 +214,24 @@
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_MONO),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_STEREO),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_2POINT1),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_2POINT0POINT2),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_2POINT1POINT2),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_3POINT0POINT2),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_3POINT1POINT2),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_QUAD),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_QUAD_BACK),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_QUAD_SIDE),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_SURROUND),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_PENTA),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_5POINT1_BACK),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_5POINT1_SIDE),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_5POINT1POINT2),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_5POINT1POINT4),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_6POINT1),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1POINT2),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1POINT4),
TERMINATOR
};
@@ -219,6 +242,11 @@
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_STEREO),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_6),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_2POINT0POINT2),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_2POINT1POINT2),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_3POINT0POINT2),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_3POINT1POINT2),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_5POINT1),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_VOICE_CALL_MONO),
@@ -277,6 +305,16 @@
TERMINATOR
};
+template<>
+const AudioContentTypeConverter::Table AudioContentTypeConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_CONTENT_TYPE_UNKNOWN),
+ MAKE_STRING_FROM_ENUM(AUDIO_CONTENT_TYPE_SPEECH),
+ MAKE_STRING_FROM_ENUM(AUDIO_CONTENT_TYPE_MUSIC),
+ MAKE_STRING_FROM_ENUM(AUDIO_CONTENT_TYPE_MOVIE),
+ MAKE_STRING_FROM_ENUM(AUDIO_CONTENT_TYPE_SONIFICATION),
+ TERMINATOR
+};
+
template <>
const UsageTypeConverter::Table UsageTypeConverter::mTable[] = {
MAKE_STRING_FROM_ENUM(AUDIO_USAGE_UNKNOWN),
@@ -295,8 +333,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ASSISTANCE_SONIFICATION),
MAKE_STRING_FROM_ENUM(AUDIO_USAGE_GAME),
MAKE_STRING_FROM_ENUM(AUDIO_USAGE_VIRTUAL_SOURCE),
- MAKE_STRING_FROM_ENUM(AUDIO_USAGE_CNT),
- MAKE_STRING_FROM_ENUM(AUDIO_USAGE_MAX),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ASSISTANT),
TERMINATOR
};
@@ -312,8 +349,6 @@
MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_VOICE_COMMUNICATION),
MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_REMOTE_SUBMIX),
MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_UNPROCESSED),
- MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_CNT),
- MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_MAX),
MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_FM_TUNER),
MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_HOTWORD),
TERMINATOR
diff --git a/media/libmedia/aidl/android/media/IMediaExtractorUpdateService.aidl b/media/libmedia/aidl/android/media/IMediaExtractorUpdateService.aidl
new file mode 100644
index 0000000..57b1bc9
--- /dev/null
+++ b/media/libmedia/aidl/android/media/IMediaExtractorUpdateService.aidl
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * Service to reload extractor plugins when update package is installed/uninstalled.
+ * @hide
+ */
+interface IMediaExtractorUpdateService {
+ void loadPlugins(@utf8InCpp String apkPath);
+}
diff --git a/media/libmedia/exports.lds b/media/libmedia/exports.lds
new file mode 100644
index 0000000..904a7f7
--- /dev/null
+++ b/media/libmedia/exports.lds
@@ -0,0 +1,7 @@
+{
+ global:
+ *;
+ local:
+ _ZN7android13MidiIoWrapper*;
+ _ZTVN7android13MidiIoWrapperE*;
+};
diff --git a/media/libmedia/include/media/BufferProviders.h b/media/libmedia/include/media/BufferProviders.h
index 9d026f6..d6a9cfb 100644
--- a/media/libmedia/include/media/BufferProviders.h
+++ b/media/libmedia/include/media/BufferProviders.h
@@ -161,6 +161,17 @@
const audio_format_t mOutputFormat;
};
+// ClampFloatBufferProvider derives from CopyBufferProvider to clamp floats inside -3db
+class ClampFloatBufferProvider : public CopyBufferProvider {
+public:
+ ClampFloatBufferProvider(int32_t channelCount,
+ size_t bufferFrameCount);
+ virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+protected:
+ const uint32_t mChannelCount;
+};
+
// TimestretchBufferProvider derives from PassthruBufferProvider for time stretching
class TimestretchBufferProvider : public PassthruBufferProvider {
public:
diff --git a/media/libmedia/include/media/BufferingSettings.h b/media/libmedia/include/media/BufferingSettings.h
index e812d2a..d2a3e40 100644
--- a/media/libmedia/include/media/BufferingSettings.h
+++ b/media/libmedia/include/media/BufferingSettings.h
@@ -21,45 +21,14 @@
namespace android {
-enum BufferingMode : int {
- // Do not support buffering.
- BUFFERING_MODE_NONE = 0,
- // Support only time based buffering.
- BUFFERING_MODE_TIME_ONLY = 1,
- // Support only size based buffering.
- BUFFERING_MODE_SIZE_ONLY = 2,
- // Support both time and size based buffering, time based calculation precedes size based.
- // Size based calculation will be used only when time information is not available for
- // the stream.
- BUFFERING_MODE_TIME_THEN_SIZE = 3,
- // Number of modes.
- BUFFERING_MODE_COUNT = 4,
-};
-
struct BufferingSettings : public Parcelable {
- static const int kNoWatermark = -1;
+ static const int kNoMark = -1;
- static bool IsValidBufferingMode(int mode);
- static bool IsTimeBasedBufferingMode(int mode);
- static bool IsSizeBasedBufferingMode(int mode);
+ int mInitialMarkMs;
- BufferingMode mInitialBufferingMode; // for prepare
- BufferingMode mRebufferingMode; // for playback
-
- int mInitialWatermarkMs; // time based
- int mInitialWatermarkKB; // size based
-
- // When cached data is below this mark, playback will be paused for buffering
- // till data reach |mRebufferingWatermarkHighMs| or end of stream.
- int mRebufferingWatermarkLowMs;
- // When cached data is above this mark, buffering will be paused.
- int mRebufferingWatermarkHighMs;
-
- // When cached data is below this mark, playback will be paused for buffering
- // till data reach |mRebufferingWatermarkHighKB| or end of stream.
- int mRebufferingWatermarkLowKB;
- // When cached data is above this mark, buffering will be paused.
- int mRebufferingWatermarkHighKB;
+ // When cached data is above this mark, playback will be resumed if it has been paused
+ // due to low cached data.
+ int mResumePlaybackMarkMs;
BufferingSettings();
diff --git a/media/libmedia/include/media/CodecServiceRegistrant.h b/media/libmedia/include/media/CodecServiceRegistrant.h
new file mode 100644
index 0000000..e0af781
--- /dev/null
+++ b/media/libmedia/include/media/CodecServiceRegistrant.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CODEC_SERVICE_REGISTRANT_H_
+
+#define CODEC_SERVICE_REGISTRANT_H_
+
+typedef void (*RegisterCodecServicesFunc)();
+
+#endif // CODEC_SERVICE_REGISTRANT_H_
diff --git a/media/libmedia/include/media/CounterMetric.h b/media/libmedia/include/media/CounterMetric.h
new file mode 100644
index 0000000..b53470d
--- /dev/null
+++ b/media/libmedia/include/media/CounterMetric.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_COUNTER_METRIC_H_
+#define ANDROID_COUNTER_METRIC_H_
+
+#include <functional>
+#include <map>
+#include <string>
+
+#include <media/MediaAnalyticsItem.h>
+#include <utils/Log.h>
+
+namespace android {
+
+
+// The CounterMetric class is used to hold counts of operations or events.
+// A CounterMetric can break down counts by a dimension specified by the
+// application. E.g. an application may want to track counts broken out by
+// error code or the size of some parameter.
+//
+// Example:
+//
+// CounterMetric<status_t> workCounter;
+// workCounter("workCounterName", "result_status");
+//
+// status_t err = DoWork();
+//
+// // Increments the number of times called with the given error code.
+// workCounter.Increment(err);
+//
+// std::map<int, int64_t> values;
+// metric.ExportValues(
+// [&] (int attribute_value, int64_t value) {
+// values[attribute_value] = value;
+// });
+//
+// // Do something with the exported stat.
+//
+template<typename AttributeType>
+class CounterMetric {
+ public:
+ // Instantiate the counter with the given metric name and
+ // attribute names. |attribute_names| must not be null.
+ CounterMetric(
+ const std::string& metric_name,
+ const std::string& attribute_name)
+ : metric_name_(metric_name),
+ attribute_name_(attribute_name) {}
+
+ // Increment the count of times the operation occurred with this
+ // combination of attributes.
+ void Increment(AttributeType attribute) {
+ if (values_.find(attribute) == values_.end()) {
+ values_[attribute] = 1;
+ } else {
+ values_[attribute] = values_[attribute] + 1;
+ }
+ };
+
+ // Export the metrics to the provided |function|. Each value for Attribute
+ // has a separate count. As such, |function| will be called once per value
+ // of Attribute.
+ void ExportValues(
+ std::function<void (const AttributeType&,
+ const int64_t count)> function) const {
+ for (auto it = values_.begin(); it != values_.end(); it++) {
+ function(it->first, it->second);
+ }
+ }
+
+ const std::string& metric_name() const { return metric_name_; };
+
+ private:
+ const std::string metric_name_;
+ const std::string attribute_name_;
+ std::map<AttributeType, int64_t> values_;
+};
+
+} // namespace android
+
+#endif // ANDROID_COUNTER_METRIC_H_
diff --git a/media/libmedia/include/media/CryptoHal.h b/media/libmedia/include/media/CryptoHal.h
index a5d8b43..ff8789d 100644
--- a/media/libmedia/include/media/CryptoHal.h
+++ b/media/libmedia/include/media/CryptoHal.h
@@ -20,14 +20,16 @@
#include <android/hardware/drm/1.0/ICryptoFactory.h>
#include <android/hardware/drm/1.0/ICryptoPlugin.h>
+#include <android/hardware/drm/1.1/ICryptoFactory.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <utils/KeyedVector.h>
#include <utils/threads.h>
-using ::android::hardware::drm::V1_0::ICryptoFactory;
-using ::android::hardware::drm::V1_0::ICryptoPlugin;
-using ::android::hardware::drm::V1_0::SharedBuffer;
+namespace drm = ::android::hardware::drm;
+using drm::V1_0::ICryptoFactory;
+using drm::V1_0::ICryptoPlugin;
+using drm::V1_0::SharedBuffer;
class IMemoryHeap;
@@ -79,7 +81,20 @@
*/
status_t mInitCheck;
- KeyedVector<int32_t, uint32_t> mHeapBases;
+ struct HeapBase {
+ HeapBase() : mBufferId(0), mSize(0) {}
+ HeapBase(uint32_t bufferId, size_t size) :
+ mBufferId(bufferId), mSize(size) {}
+
+ uint32_t getBufferId() const {return mBufferId;}
+ size_t getSize() const {return mSize;}
+
+ private:
+ uint32_t mBufferId;
+ size_t mSize;
+ };
+
+ KeyedVector<int32_t, HeapBase> mHeapBases;
uint32_t mNextBufferId;
int32_t mHeapSeqNum;
diff --git a/media/libmedia/include/media/DataSourceDesc.h b/media/libmedia/include/media/DataSourceDesc.h
new file mode 100644
index 0000000..c190261
--- /dev/null
+++ b/media/libmedia/include/media/DataSourceDesc.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_DATASOURCEDESC_H
+#define ANDROID_DATASOURCEDESC_H
+
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/RefBase.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class DataSource;
+struct MediaHTTPService;
+
+// A binder interface for implementing a stagefright DataSource remotely.
+struct DataSourceDesc : public RefBase {
+public:
+ enum {
+ /* No data source has been set yet */
+ TYPE_NONE = 0,
+ /* data source is type of MediaDataSource */
+ TYPE_CALLBACK = 1,
+ /* data source is type of FileDescriptor */
+ TYPE_FD = 2,
+ /* data source is type of Url */
+ TYPE_URL = 3,
+ };
+
+ DataSourceDesc();
+
+ int mType;
+
+ sp<MediaHTTPService> mHttpService;
+ String8 mUrl;
+ KeyedVector<String8, String8> mHeaders;
+
+ int mFD;
+ int64_t mFDOffset;
+ int64_t mFDLength;
+
+ sp<DataSource> mCallbackSource;
+
+ int64_t mId;
+ int64_t mStartPositionMs;
+ int64_t mEndPositionMs;
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(DataSourceDesc);
+};
+
+}; // namespace android
+
+#endif // ANDROID_DATASOURCEDESC_H
diff --git a/media/libmedia/include/media/DrmHal.h b/media/libmedia/include/media/DrmHal.h
index 5d25e4d..f267f76 100644
--- a/media/libmedia/include/media/DrmHal.h
+++ b/media/libmedia/include/media/DrmHal.h
@@ -18,19 +18,24 @@
#define DRM_HAL_H_
+#include <android/hardware/drm/1.0/IDrmFactory.h>
#include <android/hardware/drm/1.0/IDrmPlugin.h>
#include <android/hardware/drm/1.0/IDrmPluginListener.h>
-#include <android/hardware/drm/1.0/IDrmFactory.h>
+#include <android/hardware/drm/1.1/IDrmFactory.h>
+#include <android/hardware/drm/1.1/IDrmPlugin.h>
-#include <media/IDrm.h>
-#include <media/IDrmClient.h>
+#include <media/MediaAnalyticsItem.h>
+#include <mediadrm/DrmMetrics.h>
+#include <mediadrm/IDrm.h>
+#include <mediadrm/IDrmClient.h>
#include <utils/threads.h>
-using ::android::hardware::drm::V1_0::EventType;
-using ::android::hardware::drm::V1_0::IDrmFactory;
-using ::android::hardware::drm::V1_0::IDrmPlugin;
-using ::android::hardware::drm::V1_0::IDrmPluginListener;
-using ::android::hardware::drm::V1_0::KeyStatus;
+namespace drm = ::android::hardware::drm;
+using drm::V1_0::EventType;
+using drm::V1_0::IDrmFactory;
+using drm::V1_0::IDrmPlugin;
+using drm::V1_0::IDrmPluginListener;
+using drm::V1_0::KeyStatus;
using ::android::hardware::hidl_vec;
using ::android::hardware::Return;
using ::android::hardware::Void;
@@ -59,7 +64,8 @@
virtual status_t destroyPlugin();
- virtual status_t openSession(Vector<uint8_t> &sessionId);
+ virtual status_t openSession(DrmPlugin::SecurityLevel level,
+ Vector<uint8_t> &sessionId);
virtual status_t closeSession(Vector<uint8_t> const &sessionId);
@@ -93,10 +99,19 @@
Vector<uint8_t> &wrappedKey);
virtual status_t getSecureStops(List<Vector<uint8_t>> &secureStops);
+ virtual status_t getSecureStopIds(List<Vector<uint8_t>> &secureStopIds);
virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop);
virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease);
- virtual status_t releaseAllSecureStops();
+ virtual status_t removeSecureStop(Vector<uint8_t> const &ssid);
+ virtual status_t removeAllSecureStops();
+
+ virtual status_t getHdcpLevels(DrmPlugin::HdcpLevel *connectedLevel,
+ DrmPlugin::HdcpLevel *maxLevel) const;
+ virtual status_t getNumberOfSessions(uint32_t *currentSessions,
+ uint32_t *maxSessions) const;
+ virtual status_t getSecurityLevel(Vector<uint8_t> const &sessionId,
+ DrmPlugin::SecurityLevel *level) const;
virtual status_t getPropertyString(String8 const &name, String8 &value ) const;
virtual status_t getPropertyByteArray(String8 const &name,
@@ -104,6 +119,7 @@
virtual status_t setPropertyString(String8 const &name, String8 const &value ) const;
virtual status_t setPropertyByteArray(String8 const &name,
Vector<uint8_t> const &value ) const;
+ virtual status_t getMetrics(os::PersistableBundle *metrics);
virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId,
String8 const &algorithm);
@@ -165,9 +181,15 @@
const Vector<sp<IDrmFactory>> mFactories;
sp<IDrmPlugin> mPlugin;
+ sp<drm::V1_1::IDrmPlugin> mPluginV1_1;
+ String8 mAppPackageName;
+
+ // Mutable to allow modification within GetPropertyByteArray.
+ mutable MediaDrmMetrics mMetrics;
Vector<Vector<uint8_t>> mOpenSessions;
void closeOpenSessions();
+ void cleanup();
/**
* mInitCheck is:
@@ -183,7 +205,8 @@
void writeByteArray(Parcel &obj, const hidl_vec<uint8_t>& array);
- void reportMetrics() const;
+ void reportPluginMetrics() const;
+ void reportFrameworkMetrics() const;
status_t getPropertyStringInternal(String8 const &name, String8 &value) const;
status_t getPropertyByteArrayInternal(String8 const &name,
Vector<uint8_t> &value) const;
diff --git a/media/libmedia/include/media/DrmMetrics.h b/media/libmedia/include/media/DrmMetrics.h
new file mode 100644
index 0000000..261998f
--- /dev/null
+++ b/media/libmedia/include/media/DrmMetrics.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DRM_METRICS_H_
+#define DRM_METRICS_H_
+
+#include <map>
+
+#include <android/hardware/drm/1.0/types.h>
+#include <android/hardware/drm/1.1/types.h>
+#include <binder/PersistableBundle.h>
+#include <media/CounterMetric.h>
+#include <media/EventMetric.h>
+
+namespace android {
+
+/**
+ * This class contains the definition of metrics captured within MediaDrm.
+ * It also contains a method for exporting all of the metrics to a
+ * PersistableBundle.
+ */
+class MediaDrmMetrics {
+ public:
+ explicit MediaDrmMetrics();
+ virtual ~MediaDrmMetrics() {};
+ // Count of openSession calls.
+ CounterMetric<status_t> mOpenSessionCounter;
+ // Count of closeSession calls.
+ CounterMetric<status_t> mCloseSessionCounter;
+ // Count and timing of getKeyRequest calls.
+ EventMetric<status_t> mGetKeyRequestTimeUs;
+ // Count and timing of provideKeyResponse calls.
+ EventMetric<status_t> mProvideKeyResponseTimeUs;
+ // Count of getProvisionRequest calls.
+ CounterMetric<status_t> mGetProvisionRequestCounter;
+ // Count of provideProvisionResponse calls.
+ CounterMetric<status_t> mProvideProvisionResponseCounter;
+
+ // Count of key status events broken out by status type.
+ CounterMetric<::android::hardware::drm::V1_0::KeyStatusType>
+ mKeyStatusChangeCounter;
+ // Count of events broken out by event type
+ CounterMetric<::android::hardware::drm::V1_0::EventType> mEventCounter;
+
+ // Count getPropertyByteArray calls to retrieve the device unique id.
+ CounterMetric<status_t> mGetDeviceUniqueIdCounter;
+
+ // Adds a session start time record.
+ void SetSessionStart(const Vector<uint8_t>& sessionId);
+
+ // Adds a session end time record.
+ void SetSessionEnd(const Vector<uint8_t>& sessionId);
+
+ // The app package name is the application package name that is using the
+ // instance. The app package name is held here for convenience. It is not
+ // serialized or exported with the metrics.
+ void SetAppPackageName(const String8& appPackageName) { mAppPackageName = appPackageName; }
+ const String8& GetAppPackageName() { return mAppPackageName; }
+
+ // Export the metrics to a PersistableBundle.
+ void Export(os::PersistableBundle* metricsBundle);
+
+ // Get the serialized metrics. Metrics are formatted as a serialized
+ // DrmFrameworkMetrics proto. If there is a failure serializing the metrics,
+ // this returns an error. The parameter |serlializedMetrics| is owned by the
+ // caller and must not be null.
+ status_t GetSerializedMetrics(std::string* serializedMetrics);
+
+ // Converts the DRM plugin metrics to a PersistableBundle. All of the metrics
+ // found in |pluginMetrics| are added to the |metricsBundle| parameter.
+ // |pluginBundle| is owned by the caller and must not be null.
+ //
+ // Each item in the pluginMetrics vector is added as a new PersistableBundle. E.g.
+ // DrmMetricGroup {
+ // metrics[0] {
+ // name: "buf_copy"
+ // attributes[0] {
+ // name: "size"
+ // type: INT64_TYPE
+ // int64Value: 1024
+ // }
+ // values[0] {
+ // componentName: "operation_count"
+ // type: INT64_TYPE
+ // int64Value: 75
+ // }
+ // values[1] {
+ // component_name: "average_time_seconds"
+ // type: DOUBLE_TYPE
+ // doubleValue: 0.00000042
+ // }
+ // }
+ // }
+ //
+ // becomes
+ //
+ // metricsBundle {
+ // "0": (PersistableBundle) {
+ // "attributes" : (PersistableBundle) {
+ // "size" : (int64) 1024
+ // }
+ // "operation_count" : (int64) 75
+ // "average_time_seconds" : (double) 0.00000042
+ // }
+ //
+ static status_t HidlMetricsToBundle(
+ const hardware::hidl_vec<hardware::drm::V1_1::DrmMetricGroup>& pluginMetrics,
+ os::PersistableBundle* metricsBundle);
+
+ protected:
+ // This is visible for testing only.
+ virtual int64_t GetCurrentTimeMs();
+
+ private:
+ // Session lifetimes. A pair of values representing the milliseconds since
+ // epoch, UTC. The first value is the start time, the second is the end time.
+ std::map<std::string, std::pair<int64_t, int64_t>> mSessionLifespans;
+
+ String8 mAppPackageName;
+};
+
+} // namespace android
+
+#endif // DRM_METRICS_H_
diff --git a/media/libmedia/include/media/EventMetric.h b/media/libmedia/include/media/EventMetric.h
new file mode 100644
index 0000000..dbb736a
--- /dev/null
+++ b/media/libmedia/include/media/EventMetric.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_EVENT_METRIC_H_
+#define ANDROID_EVENT_METRIC_H_
+
+#include <media/MediaAnalyticsItem.h>
+#include <utils/Timers.h>
+
+namespace android {
+
+// This is a simple holder for the statistics recorded in EventMetric.
+struct EventStatistics {
+ // The count of times the event occurred.
+ int64_t count;
+
+ // The minimum and maximum values recorded in the Record method.
+ double min;
+ double max;
+
+ // The average (mean) of all values recorded.
+ double mean;
+ // The sum of squared devation. Variance can be calculated from
+ // this value.
+ // var = sum_squared_deviation / count;
+ double sum_squared_deviation;
+};
+
+// The EventMetric class is used to accumulate stats about an event over time.
+// A common use case is to track clock timings for a method call or operation.
+// An EventMetric can break down stats by a dimension specified by the
+// application. E.g. an application may want to track counts broken out by
+// error code or the size of some parameter.
+//
+// Example:
+//
+// struct C {
+// status_t DoWork() {
+// unsigned long start_time = now();
+// status_t result;
+//
+// // DO WORK and determine result;
+//
+// work_event_.Record(now() - start_time, result);
+//
+// return result;
+// }
+// EventMetric<status_t> work_event_;
+// };
+//
+// C c;
+// c.DoWork();
+//
+// std::map<int, int64_t> values;
+// metric.ExportValues(
+// [&] (int attribute_value, int64_t value) {
+// values[attribute_value] = value;
+// });
+// // Do something with the exported stat.
+//
+template<typename AttributeType>
+class EventMetric {
+ public:
+ // Instantiate the counter with the given metric name and
+ // attribute names. |attribute_names| must not be null.
+ EventMetric(
+ const std::string& metric_name,
+ const std::string& attribute_name)
+ : metric_name_(metric_name),
+ attribute_name_(attribute_name) {}
+
+ // Increment the count of times the operation occurred with this
+ // combination of attributes.
+ void Record(double value, AttributeType attribute) {
+ if (values_.find(attribute) != values_.end()) {
+ EventStatistics* stats = values_[attribute].get();
+ // Using method of provisional means.
+ double deviation = value - stats->mean;
+ stats->mean = stats->mean + (deviation / stats->count);
+ stats->sum_squared_deviation =
+ stats->sum_squared_deviation + (deviation * (value - stats->mean));
+ stats->count++;
+
+ stats->min = stats->min < value ? stats->min : value;
+ stats->max = stats->max > value ? stats->max : value;
+ } else {
+ std::unique_ptr<EventStatistics> stats =
+ std::make_unique<EventStatistics>();
+ stats->count = 1;
+ stats->min = value;
+ stats->max = value;
+ stats->mean = value;
+ stats->sum_squared_deviation = 0;
+ values_[attribute] = std::move(stats);
+ }
+ };
+
+ // Export the metrics to the provided |function|. Each value for Attribute
+ // has a separate set of stats. As such, |function| will be called once per
+ // value of Attribute.
+ void ExportValues(
+ std::function<void (const AttributeType&,
+ const EventStatistics&)> function) const {
+ for (auto it = values_.begin(); it != values_.end(); it++) {
+ function(it->first, *(it->second));
+ }
+ }
+
+ const std::string& metric_name() const { return metric_name_; };
+
+ private:
+ const std::string metric_name_;
+ const std::string attribute_name_;
+ std::map<AttributeType, std::unique_ptr<struct EventStatistics>> values_;
+};
+
+// The EventTimer is a supporting class for EventMetric instances that are used
+// to time methods. The EventTimer starts a timer when first in scope, and
+// records the timing when exiting scope.
+//
+// Example:
+//
+// EventMetric<int> my_metric;
+//
+// {
+// EventTimer<int> my_timer(&my_metric);
+// // Set the attribute to associate with this timing.
+// my_timer.SetAttribtue(42);
+//
+// // Do some work that you want to time.
+//
+// } // The EventTimer destructor will record the the timing in my_metric;
+//
+template<typename AttributeType>
+class EventTimer {
+ public:
+ explicit EventTimer(EventMetric<AttributeType>* metric)
+ :start_time_(systemTime()), metric_(metric) {
+ }
+
+ virtual ~EventTimer() {
+ if (metric_) {
+ metric_->Record(ns2us(systemTime() - start_time_), attribute_);
+ }
+ }
+
+ // Set the attribute to associate with this timing. E.g. this can be used to
+ // record the return code from the work that was timed.
+ void SetAttribute(const AttributeType& attribute) {
+ attribute_ = attribute;
+ }
+
+ protected:
+ // Visible for testing only.
+ nsecs_t start_time_;
+
+ private:
+ EventMetric<AttributeType>* metric_;
+ AttributeType attribute_;
+};
+
+} // namespace android
+
+#endif // ANDROID_EVENT_METRIC_H_
diff --git a/media/libmedia/include/media/IDrm.h b/media/libmedia/include/media/IDrm.h
index a57e372..8e9eb3a 100644
--- a/media/libmedia/include/media/IDrm.h
+++ b/media/libmedia/include/media/IDrm.h
@@ -15,9 +15,11 @@
*/
#include <binder/IInterface.h>
+#include <binder/PersistableBundle.h>
#include <media/stagefright/foundation/ABase.h>
#include <media/drm/DrmAPI.h>
-#include <media/IDrmClient.h>
+#include <media/MediaAnalyticsItem.h>
+#include <mediadrm/IDrmClient.h>
#ifndef ANDROID_IDRM_H_
@@ -39,7 +41,8 @@
virtual status_t destroyPlugin() = 0;
- virtual status_t openSession(Vector<uint8_t> &sessionId) = 0;
+ virtual status_t openSession(DrmPlugin::SecurityLevel securityLevel,
+ Vector<uint8_t> &sessionId) = 0;
virtual status_t closeSession(Vector<uint8_t> const &sessionId) = 0;
@@ -73,10 +76,20 @@
Vector<uint8_t> &wrappedKey) = 0;
virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops) = 0;
+ virtual status_t getSecureStopIds(List<Vector<uint8_t> > &secureStopIds) = 0;
virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) = 0;
virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease) = 0;
- virtual status_t releaseAllSecureStops() = 0;
+ virtual status_t removeSecureStop(Vector<uint8_t> const &ssid) = 0;
+ virtual status_t removeAllSecureStops() = 0;
+
+ virtual status_t getHdcpLevels(DrmPlugin::HdcpLevel *connectedLevel,
+ DrmPlugin::HdcpLevel *maxLevel)
+ const = 0;
+ virtual status_t getNumberOfSessions(uint32_t *currentSessions,
+ uint32_t *maxSessions) const = 0;
+ virtual status_t getSecurityLevel(Vector<uint8_t> const &sessionId,
+ DrmPlugin::SecurityLevel *level) const = 0;
virtual status_t getPropertyString(String8 const &name, String8 &value) const = 0;
virtual status_t getPropertyByteArray(String8 const &name,
@@ -86,6 +99,8 @@
virtual status_t setPropertyByteArray(String8 const &name,
Vector<uint8_t> const &value) const = 0;
+ virtual status_t getMetrics(os::PersistableBundle *metrics) = 0;
+
virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId,
String8 const &algorithm) = 0;
diff --git a/media/libmedia/include/media/IHDCP.h b/media/libmedia/include/media/IHDCP.h
deleted file mode 100644
index 352561e..0000000
--- a/media/libmedia/include/media/IHDCP.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <binder/IInterface.h>
-#include <media/hardware/HDCPAPI.h>
-#include <media/stagefright/foundation/ABase.h>
-#include <ui/GraphicBuffer.h>
-
-namespace android {
-
-struct IHDCPObserver : public IInterface {
- DECLARE_META_INTERFACE(HDCPObserver);
-
- virtual void notify(
- int msg, int ext1, int ext2, const Parcel *obj) = 0;
-
-private:
- DISALLOW_EVIL_CONSTRUCTORS(IHDCPObserver);
-};
-
-struct IHDCP : public IInterface {
- DECLARE_META_INTERFACE(HDCP);
-
- // Called to specify the observer that receives asynchronous notifications
- // from the HDCP implementation to signal completion/failure of asynchronous
- // operations (such as initialization) or out of band events.
- virtual status_t setObserver(const sp<IHDCPObserver> &observer) = 0;
-
- // Request to setup an HDCP session with the specified host listening
- // on the specified port.
- virtual status_t initAsync(const char *host, unsigned port) = 0;
-
- // Request to shutdown the active HDCP session.
- virtual status_t shutdownAsync() = 0;
-
- // Returns the capability bitmask of this HDCP session.
- // Possible return values (please refer to HDCAPAPI.h):
- // HDCP_CAPS_ENCRYPT: mandatory, meaning the HDCP module can encrypt
- // from an input byte-array buffer to an output byte-array buffer
- // HDCP_CAPS_ENCRYPT_NATIVE: the HDCP module supports encryption from
- // a native buffer to an output byte-array buffer. The format of the
- // input native buffer is specific to vendor's encoder implementation.
- // It is the same format as that used by the encoder when
- // "storeMetaDataInBuffers" extension is enabled on its output port.
- virtual uint32_t getCaps() = 0;
-
- // ENCRYPTION only:
- // Encrypt data according to the HDCP spec. "size" bytes of data are
- // available at "inData" (virtual address), "size" may not be a multiple
- // of 128 bits (16 bytes). An equal number of encrypted bytes should be
- // written to the buffer at "outData" (virtual address).
- // This operation is to be synchronous, i.e. this call does not return
- // until outData contains size bytes of encrypted data.
- // streamCTR will be assigned by the caller (to 0 for the first PES stream,
- // 1 for the second and so on)
- // inputCTR _will_be_maintained_by_the_callee_ for each PES stream.
- virtual status_t encrypt(
- const void *inData, size_t size, uint32_t streamCTR,
- uint64_t *outInputCTR, void *outData) = 0;
-
- // Encrypt data according to the HDCP spec. "size" bytes of data starting
- // at location "offset" are available in "buffer" (buffer handle). "size"
- // may not be a multiple of 128 bits (16 bytes). An equal number of
- // encrypted bytes should be written to the buffer at "outData" (virtual
- // address). This operation is to be synchronous, i.e. this call does not
- // return until outData contains size bytes of encrypted data.
- // streamCTR will be assigned by the caller (to 0 for the first PES stream,
- // 1 for the second and so on)
- // inputCTR _will_be_maintained_by_the_callee_ for each PES stream.
- virtual status_t encryptNative(
- const sp<GraphicBuffer> &graphicBuffer,
- size_t offset, size_t size, uint32_t streamCTR,
- uint64_t *outInputCTR, void *outData) = 0;
-
- // DECRYPTION only:
- // Decrypt data according to the HDCP spec.
- // "size" bytes of encrypted data are available at "inData"
- // (virtual address), "size" may not be a multiple of 128 bits (16 bytes).
- // An equal number of decrypted bytes should be written to the buffer
- // at "outData" (virtual address).
- // This operation is to be synchronous, i.e. this call does not return
- // until outData contains size bytes of decrypted data.
- // Both streamCTR and inputCTR will be provided by the caller.
- virtual status_t decrypt(
- const void *inData, size_t size,
- uint32_t streamCTR, uint64_t inputCTR,
- void *outData) = 0;
-
-private:
- DISALLOW_EVIL_CONSTRUCTORS(IHDCP);
-};
-
-struct BnHDCPObserver : public BnInterface<IHDCPObserver> {
- virtual status_t onTransact(
- uint32_t code, const Parcel &data, Parcel *reply,
- uint32_t flags = 0);
-};
-
-struct BnHDCP : public BnInterface<IHDCP> {
- virtual status_t onTransact(
- uint32_t code, const Parcel &data, Parcel *reply,
- uint32_t flags = 0);
-};
-
-} // namespace android
-
-
diff --git a/media/libmedia/include/media/IMediaCodecService.h b/media/libmedia/include/media/IMediaCodecService.h
deleted file mode 100644
index 59fb1c0..0000000
--- a/media/libmedia/include/media/IMediaCodecService.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IMEDIACODECSERVICE_H
-#define ANDROID_IMEDIACODECSERVICE_H
-
-#include <binder/IInterface.h>
-#include <binder/IMemory.h>
-#include <binder/Parcel.h>
-#include <media/IDataSource.h>
-#include <media/IOMX.h>
-#include <media/IOMXStore.h>
-
-namespace android {
-
-class IMediaCodecService: public IInterface
-{
-public:
- DECLARE_META_INTERFACE(MediaCodecService);
-
- virtual sp<IOMX> getOMX() = 0;
- virtual sp<IOMXStore> getOMXStore() = 0;
-};
-
-class BnMediaCodecService: public BnInterface<IMediaCodecService>
-{
-public:
- virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
- uint32_t flags = 0);
-};
-
-} // namespace android
-
-#endif // ANDROID_IMEDIACODECSERVICE_H
diff --git a/media/libmedia/include/media/IMediaExtractor.h b/media/libmedia/include/media/IMediaExtractor.h
index 0ac7673..75e4ee2 100644
--- a/media/libmedia/include/media/IMediaExtractor.h
+++ b/media/libmedia/include/media/IMediaExtractor.h
@@ -18,8 +18,8 @@
#define IMEDIA_EXTRACTOR_BASE_H_
+#include <media/DataSource.h>
#include <media/IMediaSource.h>
-#include <media/stagefright/DataSource.h>
#include <vector>
namespace android {
@@ -60,16 +60,9 @@
// CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE
virtual uint32_t flags() const = 0;
- // for DRM
- virtual char* getDrmTrackInfo(size_t trackID, int *len) = 0;
-
virtual status_t setMediaCas(const HInterfaceToken &casToken) = 0;
- virtual void setUID(uid_t uid) = 0;
-
virtual const char * name() = 0;
-
- virtual void release() = 0;
};
diff --git a/media/libmedia/include/media/IMediaHTTPConnection.h b/media/libmedia/include/media/IMediaHTTPConnection.h
index 2a63eb7..0fb6bb1 100644
--- a/media/libmedia/include/media/IMediaHTTPConnection.h
+++ b/media/libmedia/include/media/IMediaHTTPConnection.h
@@ -19,16 +19,15 @@
#define I_MEDIA_HTTP_CONNECTION_H_
#include <binder/IInterface.h>
+#include <media/MediaHTTPConnection.h>
#include <media/stagefright/foundation/ABase.h>
#include <utils/KeyedVector.h>
namespace android {
-struct IMediaHTTPConnection;
-
/** MUST stay in sync with IMediaHTTPConnection.aidl */
-struct IMediaHTTPConnection : public IInterface {
+struct IMediaHTTPConnection : public MediaHTTPConnection, public IInterface {
DECLARE_META_INTERFACE(MediaHTTPConnection);
virtual bool connect(
diff --git a/media/libmedia/include/media/IMediaHTTPService.h b/media/libmedia/include/media/IMediaHTTPService.h
index f66d6c8..e948b78 100644
--- a/media/libmedia/include/media/IMediaHTTPService.h
+++ b/media/libmedia/include/media/IMediaHTTPService.h
@@ -19,18 +19,19 @@
#define I_MEDIA_HTTP_SERVICE_H_
#include <binder/IInterface.h>
+#include <media/MediaHTTPService.h>
#include <media/stagefright/foundation/ABase.h>
namespace android {
-struct IMediaHTTPConnection;
+struct MediaHTTPConnection;
/** MUST stay in sync with IMediaHTTPService.aidl */
-struct IMediaHTTPService : public IInterface {
+struct IMediaHTTPService : public MediaHTTPService, public IInterface {
DECLARE_META_INTERFACE(MediaHTTPService);
- virtual sp<IMediaHTTPConnection> makeHTTPConnection() = 0;
+ virtual sp<MediaHTTPConnection> makeHTTPConnection() = 0;
private:
DISALLOW_EVIL_CONSTRUCTORS(IMediaHTTPService);
diff --git a/media/libmedia/include/media/IMediaMetadataRetriever.h b/media/libmedia/include/media/IMediaMetadataRetriever.h
index ea95161..c6f422d 100644
--- a/media/libmedia/include/media/IMediaMetadataRetriever.h
+++ b/media/libmedia/include/media/IMediaMetadataRetriever.h
@@ -44,6 +44,13 @@
const sp<IDataSource>& dataSource, const char *mime) = 0;
virtual sp<IMemory> getFrameAtTime(
int64_t timeUs, int option, int colorFormat, bool metaOnly) = 0;
+ virtual sp<IMemory> getImageAtIndex(
+ int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
+ virtual sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom) = 0;
+ virtual status_t getFrameAtIndex(
+ std::vector<sp<IMemory> > *frames,
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
virtual sp<IMemory> extractAlbumArt() = 0;
virtual const char* extractMetadata(int keyCode) = 0;
};
diff --git a/media/libmedia/include/media/IMediaPlayer.h b/media/libmedia/include/media/IMediaPlayer.h
index e5a98dd..97a998e 100644
--- a/media/libmedia/include/media/IMediaPlayer.h
+++ b/media/libmedia/include/media/IMediaPlayer.h
@@ -23,7 +23,7 @@
#include <utils/KeyedVector.h>
#include <system/audio.h>
-#include <media/IMediaSource.h>
+#include <media/MediaSource.h>
#include <media/VolumeShaper.h>
// Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
@@ -42,7 +42,7 @@
struct AVSyncSettings;
struct BufferingSettings;
-typedef IMediaSource::ReadOptions::SeekMode MediaPlayerSeekMode;
+typedef MediaSource::ReadOptions::SeekMode MediaPlayerSeekMode;
class IMediaPlayer: public IInterface
{
@@ -61,7 +61,7 @@
virtual status_t setDataSource(const sp<IDataSource>& source) = 0;
virtual status_t setVideoSurfaceTexture(
const sp<IGraphicBufferProducer>& bufferProducer) = 0;
- virtual status_t getDefaultBufferingSettings(
+ virtual status_t getBufferingSettings(
BufferingSettings* buffering /* nonnull */) = 0;
virtual status_t setBufferingSettings(const BufferingSettings& buffering) = 0;
virtual status_t prepareAsync() = 0;
@@ -79,6 +79,7 @@
MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) = 0;
virtual status_t getCurrentPosition(int* msec) = 0;
virtual status_t getDuration(int* msec) = 0;
+ virtual status_t notifyAt(int64_t mediaTimeUs) = 0;
virtual status_t reset() = 0;
virtual status_t setAudioStreamType(audio_stream_type_t type) = 0;
virtual status_t setLooping(int loop) = 0;
@@ -91,10 +92,10 @@
virtual status_t getRetransmitEndpoint(struct sockaddr_in* endpoint) = 0;
virtual status_t setNextPlayer(const sp<IMediaPlayer>& next) = 0;
- virtual VolumeShaper::Status applyVolumeShaper(
- const sp<VolumeShaper::Configuration>& configuration,
- const sp<VolumeShaper::Operation>& operation) = 0;
- virtual sp<VolumeShaper::State> getVolumeShaperState(int id) = 0;
+ virtual media::VolumeShaper::Status applyVolumeShaper(
+ const sp<media::VolumeShaper::Configuration>& configuration,
+ const sp<media::VolumeShaper::Operation>& operation) = 0;
+ virtual sp<media::VolumeShaper::State> getVolumeShaperState(int id) = 0;
// Modular DRM
virtual status_t prepareDrm(const uint8_t uuid[16],
@@ -130,6 +131,11 @@
virtual status_t getMetadata(bool update_only,
bool apply_filter,
Parcel *metadata) = 0;
+
+ // AudioRouting
+ virtual status_t setOutputDevice(audio_port_handle_t deviceId) = 0;
+ virtual status_t getRoutedDeviceId(audio_port_handle_t *deviceId) = 0;
+ virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
};
// ----------------------------------------------------------------------------
diff --git a/media/libmedia/include/media/IMediaPlayerService.h b/media/libmedia/include/media/IMediaPlayerService.h
index f21bb3a..217de14 100644
--- a/media/libmedia/include/media/IMediaPlayerService.h
+++ b/media/libmedia/include/media/IMediaPlayerService.h
@@ -31,7 +31,6 @@
namespace android {
-struct IHDCP;
class IMediaCodecList;
struct IMediaHTTPService;
class IMediaRecorder;
@@ -49,8 +48,6 @@
virtual sp<IMediaMetadataRetriever> createMetadataRetriever() = 0;
virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client,
audio_session_t audioSessionId = AUDIO_SESSION_ALLOCATE) = 0;
- virtual sp<IOMX> getOMX() = 0;
- virtual sp<IHDCP> makeHDCP(bool createEncryptionModule) = 0;
virtual sp<IMediaCodecList> getCodecList() const = 0;
// Connects to a remote display.
diff --git a/media/libmedia/include/media/IMediaRecorder.h b/media/libmedia/include/media/IMediaRecorder.h
index 9d0341a..379000e 100644
--- a/media/libmedia/include/media/IMediaRecorder.h
+++ b/media/libmedia/include/media/IMediaRecorder.h
@@ -19,6 +19,9 @@
#define ANDROID_IMEDIARECORDER_H
#include <binder/IInterface.h>
+#include <media/MicrophoneInfo.h>
+#include <system/audio.h>
+#include <vector>
namespace android {
@@ -64,6 +67,13 @@
virtual status_t release() = 0;
virtual status_t setInputSurface(const sp<PersistentSurface>& surface) = 0;
virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() = 0;
+
+ virtual status_t setInputDevice(audio_port_handle_t deviceId) = 0;
+ virtual status_t getRoutedDeviceId(audio_port_handle_t *deviceId) = 0;
+ virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
+ virtual status_t getActiveMicrophones(
+ std::vector<media::MicrophoneInfo>* activeMicrophones) = 0;
+
};
// ----------------------------------------------------------------------------
diff --git a/media/libmedia/include/media/IMediaSource.h b/media/libmedia/include/media/IMediaSource.h
index 2bde782..7a4b1b9 100644
--- a/media/libmedia/include/media/IMediaSource.h
+++ b/media/libmedia/include/media/IMediaSource.h
@@ -22,13 +22,12 @@
#include <binder/IInterface.h>
#include <binder/IMemory.h>
+#include <media/MediaSource.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaErrors.h>
namespace android {
-struct MediaSource;
-class MetaData;
class MediaBufferGroup;
class IMediaSource : public IInterface {
@@ -55,51 +54,6 @@
// Returns the format of the data output by this media source.
virtual sp<MetaData> getFormat() = 0;
- // Options that modify read() behaviour. The default is to
- // a) not request a seek
- // b) not be late, i.e. lateness_us = 0
- struct ReadOptions {
- enum SeekMode : int32_t {
- SEEK_PREVIOUS_SYNC,
- SEEK_NEXT_SYNC,
- SEEK_CLOSEST_SYNC,
- SEEK_CLOSEST,
- };
-
- ReadOptions();
-
- // Reset everything back to defaults.
- void reset();
-
- void setSeekTo(int64_t time_us, SeekMode mode = SEEK_CLOSEST_SYNC);
- void clearSeekTo();
- bool getSeekTo(int64_t *time_us, SeekMode *mode) const;
-
- // TODO: remove this if unused.
- void setLateBy(int64_t lateness_us);
- int64_t getLateBy() const;
-
- void setNonBlocking();
- void clearNonBlocking();
- bool getNonBlocking() const;
-
- // Used to clear all non-persistent options for multiple buffer reads.
- void clearNonPersistent() {
- clearSeekTo();
- }
-
- private:
- enum Options {
- kSeekTo_Option = 1,
- };
-
- uint32_t mOptions;
- int64_t mSeekTimeUs;
- SeekMode mSeekMode;
- int64_t mLatenessUs;
- bool mNonBlocking;
- } __attribute__((packed)); // sent through Binder
-
// Returns a new buffer of data. Call blocks until a
// buffer is available, an error is encountered or the end of the stream
// is reached.
@@ -110,7 +64,8 @@
//
// TODO: consider removing read() in favor of readMultiple().
virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL) = 0;
+ MediaBufferBase **buffer,
+ const MediaSource::ReadOptions *options = NULL) = 0;
// Returns a vector of new buffers of data, where the new buffers are added
// to the end of the vector.
@@ -125,8 +80,8 @@
// ReadOptions may be specified. Persistent options apply to all reads;
// non-persistent options (e.g. seek) apply only to the first read.
virtual status_t readMultiple(
- Vector<MediaBuffer *> *buffers, uint32_t maxNumBuffers = 1,
- const ReadOptions *options = nullptr) = 0;
+ Vector<MediaBufferBase *> *buffers, uint32_t maxNumBuffers = 1,
+ const MediaSource::ReadOptions *options = nullptr) = 0;
// Returns true if |readMultiple| is supported, otherwise false.
virtual bool supportReadMultiple() = 0;
@@ -139,14 +94,6 @@
// until a subsequent read-with-seek. Currently only supported by
// OMXCodec.
virtual status_t pause() = 0;
-
- // The consumer of this media source requests that the given buffers
- // are to be returned exclusively in response to read calls.
- // This will be called after a successful start() and before the
- // first read() call.
- // Callee assumes ownership of the buffers if no error is returned.
- virtual status_t setBuffers(const Vector<MediaBuffer *> & /* buffers */) = 0;
-
};
class BnMediaSource: public BnInterface<IMediaSource>
@@ -161,14 +108,10 @@
return ERROR_UNSUPPORTED;
}
- virtual status_t setBuffers(const Vector<MediaBuffer *> & /* buffers */) {
- return ERROR_UNSUPPORTED;
- }
-
// TODO: Implement this for local media sources.
virtual status_t readMultiple(
- Vector<MediaBuffer *> * /* buffers */, uint32_t /* maxNumBuffers = 1 */,
- const ReadOptions * /* options = nullptr */) {
+ Vector<MediaBufferBase *> * /* buffers */, uint32_t /* maxNumBuffers = 1 */,
+ const MediaSource::ReadOptions * /* options = nullptr */) {
return ERROR_UNSUPPORTED;
}
diff --git a/media/libmedia/include/media/IOMXStore.h b/media/libmedia/include/media/IOMXStore.h
deleted file mode 100644
index 628db70..0000000
--- a/media/libmedia/include/media/IOMXStore.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IOMXSTORE_H_
-
-#define ANDROID_IOMXSTORE_H_
-
-#include <media/IOMX.h>
-#include <android/hardware/media/omx/1.0/IOmxStore.h>
-
-#include <binder/IInterface.h>
-#include <binder/IBinder.h>
-
-#include <vector>
-#include <string>
-
-namespace android {
-
-using hardware::media::omx::V1_0::IOmxStore;
-
-class IOMXStore : public IInterface {
-public:
- DECLARE_META_INTERFACE(OMXStore);
-
- struct Attribute {
- std::string key;
- std::string value;
- };
-
- struct NodeInfo {
- std::string name;
- std::string owner;
- std::vector<Attribute> attributes;
- };
-
- struct RoleInfo {
- std::string role;
- std::string type;
- bool isEncoder;
- bool preferPlatformNodes;
- std::vector<NodeInfo> nodes;
- };
-
- virtual status_t listServiceAttributes(
- std::vector<Attribute>* attributes) = 0;
-
- virtual status_t getNodePrefix(std::string* prefix) = 0;
-
- virtual status_t listRoles(std::vector<RoleInfo>* roleList) = 0;
-
- virtual status_t getOmx(const std::string& name, sp<IOMX>* omx) = 0;
-};
-
-
-////////////////////////////////////////////////////////////////////////////////
-
-class BnOMXStore : public BnInterface<IOMXStore> {
-public:
- virtual status_t onTransact(
- uint32_t code, const Parcel &data, Parcel *reply,
- uint32_t flags = 0);
-};
-
-} // namespace android
-
-#endif // ANDROID_IOMX_H_
diff --git a/media/libmedia/include/media/IStreamSource.h b/media/libmedia/include/media/IStreamSource.h
index 4a6aafd..c08c3e8 100644
--- a/media/libmedia/include/media/IStreamSource.h
+++ b/media/libmedia/include/media/IStreamSource.h
@@ -54,40 +54,6 @@
virtual void queueBuffer(size_t index, size_t size) = 0;
- // When signalling a discontinuity you can optionally
- // specify an int64_t PTS timestamp in "msg".
- // If present, rendering of data following the discontinuity
- // will be suppressed until media time reaches this timestamp.
- static const char *const kKeyResumeAtPTS;
-
- // When signalling a discontinuity you can optionally
- // specify the type(s) of discontinuity, i.e. if the
- // audio format has changed, the video format has changed,
- // time has jumped or any combination thereof.
- // To do so, include a non-zero int32_t value
- // under the key "kKeyDiscontinuityMask" when issuing the DISCONTINUITY
- // command.
- // If there is a change in audio/video format, The new logical stream
- // must start with proper codec initialization
- // information for playback to continue, i.e. SPS and PPS in the case
- // of AVC video etc.
- // If this key is not present, only a time discontinuity is assumed.
- // The value should be a bitmask of values from
- // ATSParser::DiscontinuityType.
- static const char *const kKeyDiscontinuityMask;
-
- // Optionally signalled as part of a discontinuity that includes
- // DISCONTINUITY_TIME. It indicates the media time (in us) to be associated
- // with the next PTS occuring in the stream. The value is of type int64_t.
- static const char *const kKeyMediaTimeUs;
-
- // Optionally signalled as part of a discontinuity that includes
- // DISCONTINUITY_TIME. It indicates the media time (in us) of a recent
- // sample from the same content, and is used as a hint for the parser to
- // handle PTS wraparound. This is required when a new parser is created
- // to continue parsing content from the same timeline.
- static const char *const kKeyRecentMediaTimeUs;
-
virtual void issueCommand(
Command cmd, bool synchronous, const sp<AMessage> &msg = NULL) = 0;
};
diff --git a/media/libmedia/include/media/JAudioAttributes.h b/media/libmedia/include/media/JAudioAttributes.h
new file mode 100644
index 0000000..fb11435
--- /dev/null
+++ b/media/libmedia/include/media/JAudioAttributes.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_JAUDIOATTRIBUTES_H
+#define ANDROID_JAUDIOATTRIBUTES_H
+
+#include <jni.h>
+#include <system/audio.h>
+
+namespace android {
+
+class JAudioAttributes {
+public:
+ /* Creates a Java AudioAttributes object. */
+ static jobject createAudioAttributesObj(JNIEnv *env,
+ const audio_attributes_t* pAttributes,
+ audio_stream_type_t streamType) {
+
+ jclass jBuilderCls = env->FindClass("android/media/AudioAttributes$Builder");
+ jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
+ jobject jBuilderObj = env->NewObject(jBuilderCls, jBuilderCtor);
+
+ if (pAttributes != NULL) {
+ // If pAttributes is not null, streamType is ignored.
+ jmethodID jSetUsage = env->GetMethodID(
+ jBuilderCls, "setUsage", "(I)Landroid/media/AudioAttributes$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetUsage, pAttributes->usage);
+
+ jmethodID jSetContentType = env->GetMethodID(jBuilderCls, "setContentType",
+ "(I)Landroid/media/AudioAttributes$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetContentType,
+ pAttributes->content_type);
+
+ // TODO: Java AudioAttributes.Builder.setCapturePreset() is systemApi and hidden.
+ // Can we use this method?
+// jmethodID jSetCapturePreset = env->GetMethodID(jBuilderCls, "setCapturePreset",
+// "(I)Landroid/media/AudioAttributes$Builder;");
+// jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetCapturePreset,
+// pAttributes->source);
+
+ jmethodID jSetFlags = env->GetMethodID(jBuilderCls, "setFlags",
+ "(I)Landroid/media/AudioAttributes$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetFlags, pAttributes->flags);
+
+ // TODO: Handle the 'tags' (char[] to HashSet<String>).
+ // How to parse the char[]? Is there any example of it?
+ // Also, the addTags() method is hidden.
+ } else {
+ // Call AudioAttributes.Builder.setLegacyStreamType().build()
+ jmethodID jSetLegacyStreamType = env->GetMethodID(jBuilderCls, "setLegacyStreamType",
+ "(I)Landroid/media/AudioAttributes$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetLegacyStreamType, streamType);
+ }
+
+ jmethodID jBuild = env->GetMethodID(jBuilderCls, "build",
+ "()Landroid/media/AudioAttributes;");
+ return env->CallObjectMethod(jBuilderObj, jBuild);
+ }
+
+};
+
+} // namespace android
+
+#endif // ANDROID_JAUDIOATTRIBUTES_H
diff --git a/media/libmedia/include/media/JAudioFormat.h b/media/libmedia/include/media/JAudioFormat.h
new file mode 100644
index 0000000..00abdff
--- /dev/null
+++ b/media/libmedia/include/media/JAudioFormat.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_JAUDIOFORMAT_H
+#define ANDROID_JAUDIOFORMAT_H
+
+#include <android_media_AudioFormat.h>
+#include <jni.h>
+
+namespace android {
+
+class JAudioFormat {
+public:
+ /* Creates a Java AudioFormat object. */
+ static jobject createAudioFormatObj(JNIEnv *env,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask) {
+
+ jclass jBuilderCls = env->FindClass("android/media/AudioFormat$Builder");
+ jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
+ jobject jBuilderObj = env->NewObject(jBuilderCls, jBuilderCtor);
+
+ if (sampleRate == 0) {
+ jclass jAudioFormatCls = env->FindClass("android/media/AudioFormat");
+ jfieldID jSampleRateUnspecified =
+ env->GetStaticFieldID(jAudioFormatCls, "SAMPLE_RATE_UNSPECIFIED", "I");
+ sampleRate = env->GetStaticIntField(jAudioFormatCls, jSampleRateUnspecified);
+ }
+
+ jmethodID jSetEncoding = env->GetMethodID(jBuilderCls, "setEncoding",
+ "(I)Landroid/media/AudioFormat$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetEncoding,
+ audioFormatFromNative(format));
+
+ jmethodID jSetSampleRate = env->GetMethodID(jBuilderCls, "setSampleRate",
+ "(I)Landroid/media/AudioFormat$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetSampleRate, sampleRate);
+
+ jmethodID jSetChannelMask = env->GetMethodID(jBuilderCls, "setChannelMask",
+ "(I)Landroid/media/AudioFormat$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetChannelMask,
+ outChannelMaskFromNative(channelMask));
+
+ jmethodID jBuild = env->GetMethodID(jBuilderCls, "build", "()Landroid/media/AudioFormat;");
+ return env->CallObjectMethod(jBuilderObj, jBuild);
+ }
+
+};
+
+} // namespace android
+
+#endif // ANDROID_JAUDIOFORMAT_H
diff --git a/media/libmedia/include/media/JetPlayer.h b/media/libmedia/include/media/JetPlayer.h
index 63d1980..bb569bc 100644
--- a/media/libmedia/include/media/JetPlayer.h
+++ b/media/libmedia/include/media/JetPlayer.h
@@ -87,7 +87,7 @@
int mMaxTracks; // max number of MIDI tracks, usually 32
EAS_DATA_HANDLE mEasData;
- sp<MidiIoWrapper> mIoWrapper;
+ MidiIoWrapper* mIoWrapper;
EAS_PCM* mAudioBuffer;// EAS renders the MIDI data into this buffer,
sp<AudioTrack> mAudioTrack; // and we play it in this audio track
int mTrackBufferSize;
diff --git a/media/libmedia/include/media/MediaBufferHolder.h b/media/libmedia/include/media/MediaBufferHolder.h
new file mode 100644
index 0000000..f9dfdf5
--- /dev/null
+++ b/media/libmedia/include/media/MediaBufferHolder.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_BUFFER_HOLDER_H_
+
+#define MEDIA_BUFFER_HOLDER_H_
+
+#include <media/stagefright/MediaBuffer.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+struct MediaBufferHolder : public RefBase {
+ MediaBufferHolder(MediaBufferBase* buffer)
+ : mMediaBuffer(buffer) {
+ if (mMediaBuffer != nullptr) {
+ mMediaBuffer->add_ref();
+ }
+ }
+
+ virtual ~MediaBufferHolder() {
+ if (mMediaBuffer != nullptr) {
+ mMediaBuffer->release();
+ }
+ }
+
+ MediaBufferBase* mediaBuffer() { return mMediaBuffer; }
+
+private:
+ MediaBufferBase* const mMediaBuffer;
+};
+
+} // android
+
+#endif // MEDIA_BUFFER_HOLDER_H_
diff --git a/media/libmedia/include/media/MediaCodecBuffer.h b/media/libmedia/include/media/MediaCodecBuffer.h
index 501c00b..2c16fba 100644
--- a/media/libmedia/include/media/MediaCodecBuffer.h
+++ b/media/libmedia/include/media/MediaCodecBuffer.h
@@ -50,9 +50,6 @@
size_t offset() const;
// Default implementation calls ABuffer::setRange() and returns OK.
virtual status_t setRange(size_t offset, size_t size);
- // TODO: These can be removed if we finish replacing all MediaBuffer's.
- MediaBufferBase *getMediaBufferBase();
- void setMediaBufferBase(MediaBufferBase *mediaBuffer);
// TODO: Specify each field for meta/format.
sp<AMessage> meta();
@@ -66,7 +63,6 @@
const sp<AMessage> mMeta;
sp<AMessage> mFormat;
const sp<ABuffer> mBuffer;
- MediaBufferBase *mMediaBufferBase;
};
} // namespace android
diff --git a/media/libmedia/include/media/MediaCodecInfo.h b/media/libmedia/include/media/MediaCodecInfo.h
index ab2cd24..b3777d3 100644
--- a/media/libmedia/include/media/MediaCodecInfo.h
+++ b/media/libmedia/include/media/MediaCodecInfo.h
@@ -170,6 +170,7 @@
* Currently, this is the "instance name" of the IOmx service.
*/
const char *getOwnerName() const;
+ uint32_t rank() const;
/**
* Serialization over Binder
@@ -182,6 +183,7 @@
AString mOwner;
bool mIsEncoder;
KeyedVector<AString, sp<Capabilities> > mCaps;
+ uint32_t mRank;
ssize_t getCapabilityIndex(const char *mime) const;
@@ -252,6 +254,13 @@
* @return `true` if `mime` is removed; `false` if `mime` is not found.
*/
bool removeMime(const char* mime);
+ /**
+ * Set rank of the codec. MediaCodecList will stable-sort the list according
+ * to rank in non-descending order.
+ *
+ * @param rank The rank of the component.
+ */
+ void setRank(uint32_t rank);
private:
/**
* The associated `MediaCodecInfo`.
diff --git a/media/libmedia/include/media/MediaDefs.h b/media/libmedia/include/media/MediaDefs.h
deleted file mode 100644
index 7f17013..0000000
--- a/media/libmedia/include/media/MediaDefs.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_DEFS_H_
-
-#define MEDIA_DEFS_H_
-
-namespace android {
-
-extern const char *MEDIA_MIMETYPE_IMAGE_JPEG;
-
-extern const char *MEDIA_MIMETYPE_VIDEO_VP8;
-extern const char *MEDIA_MIMETYPE_VIDEO_VP9;
-extern const char *MEDIA_MIMETYPE_VIDEO_AVC;
-extern const char *MEDIA_MIMETYPE_VIDEO_HEVC;
-extern const char *MEDIA_MIMETYPE_VIDEO_MPEG4;
-extern const char *MEDIA_MIMETYPE_VIDEO_H263;
-extern const char *MEDIA_MIMETYPE_VIDEO_MPEG2;
-extern const char *MEDIA_MIMETYPE_VIDEO_RAW;
-extern const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION;
-extern const char *MEDIA_MIMETYPE_VIDEO_SCRAMBLED;
-
-extern const char *MEDIA_MIMETYPE_AUDIO_AMR_NB;
-extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB;
-extern const char *MEDIA_MIMETYPE_AUDIO_MPEG; // layer III
-extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I;
-extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II;
-extern const char *MEDIA_MIMETYPE_AUDIO_MIDI;
-extern const char *MEDIA_MIMETYPE_AUDIO_AAC;
-extern const char *MEDIA_MIMETYPE_AUDIO_QCELP;
-extern const char *MEDIA_MIMETYPE_AUDIO_VORBIS;
-extern const char *MEDIA_MIMETYPE_AUDIO_OPUS;
-extern const char *MEDIA_MIMETYPE_AUDIO_G711_ALAW;
-extern const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW;
-extern const char *MEDIA_MIMETYPE_AUDIO_RAW;
-extern const char *MEDIA_MIMETYPE_AUDIO_FLAC;
-extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS;
-extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
-extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
-extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
-extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
-
-extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
-extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
-extern const char *MEDIA_MIMETYPE_CONTAINER_OGG;
-extern const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA;
-extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS;
-extern const char *MEDIA_MIMETYPE_CONTAINER_AVI;
-extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS;
-
-extern const char *MEDIA_MIMETYPE_TEXT_3GPP;
-extern const char *MEDIA_MIMETYPE_TEXT_SUBRIP;
-extern const char *MEDIA_MIMETYPE_TEXT_VTT;
-extern const char *MEDIA_MIMETYPE_TEXT_CEA_608;
-extern const char *MEDIA_MIMETYPE_TEXT_CEA_708;
-extern const char *MEDIA_MIMETYPE_DATA_TIMED_ID3;
-
-// These are values exported to JAVA API that need to be in sync with
-// frameworks/base/media/java/android/media/AudioFormat.java. Unfortunately,
-// they are not defined in frameworks/av, so defining them here.
-enum AudioEncoding {
- kAudioEncodingPcm16bit = 2,
- kAudioEncodingPcm8bit = 3,
- kAudioEncodingPcmFloat = 4,
-};
-
-} // namespace android
-
-#endif // MEDIA_DEFS_H_
diff --git a/media/libmedia/include/media/MediaHTTPConnection.h b/media/libmedia/include/media/MediaHTTPConnection.h
new file mode 100644
index 0000000..17813a2
--- /dev/null
+++ b/media/libmedia/include/media/MediaHTTPConnection.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_HTTP_CONNECTION_H_
+
+#define MEDIA_HTTP_CONNECTION_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+
+namespace android {
+
+struct MediaHTTPConnection : public virtual RefBase {
+ MediaHTTPConnection() {}
+
+ virtual bool connect(
+ const char *uri, const KeyedVector<String8, String8> *headers) = 0;
+
+ virtual void disconnect() = 0;
+ virtual ssize_t readAt(off64_t offset, void *data, size_t size) = 0;
+ virtual off64_t getSize() = 0;
+ virtual status_t getMIMEType(String8 *mimeType) = 0;
+ virtual status_t getUri(String8 *uri) = 0;
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(MediaHTTPConnection);
+};
+
+} // namespace android
+
+#endif // MEDIA_HTTP_CONNECTION_H_
diff --git a/media/libmedia/include/media/MediaHTTPService.h b/media/libmedia/include/media/MediaHTTPService.h
new file mode 100644
index 0000000..6e9f125
--- /dev/null
+++ b/media/libmedia/include/media/MediaHTTPService.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_HTTP_SERVICE_H_
+
+#define MEDIA_HTTP_SERVICE_H_
+
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+struct MediaHTTPConnection;
+
+struct MediaHTTPService : public virtual RefBase {
+ MediaHTTPService() {}
+
+ virtual sp<MediaHTTPConnection> makeHTTPConnection() = 0;
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(MediaHTTPService);
+};
+
+} // namespace android
+
+#endif // MEDIA_HTTP_SERVICE_H_
diff --git a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
index 257002d..98d300f 100644
--- a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
+++ b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
@@ -22,6 +22,7 @@
#include <media/mediametadataretriever.h>
#include <media/mediascanner.h>
#include <private/media/VideoFrame.h>
+#include <media/stagefright/MediaErrors.h>
namespace android {
@@ -41,27 +42,20 @@
const KeyedVector<String8, String8> *headers = NULL) = 0;
virtual status_t setDataSource(int fd, int64_t offset, int64_t length) = 0;
- virtual status_t setDataSource(const sp<DataSource>& source, const char *mime) = 0;
- virtual VideoFrame* getFrameAtTime(
+ virtual status_t setDataSource(const sp<DataSource>& source, const char *mime) = 0;
+ virtual sp<IMemory> getFrameAtTime(
int64_t timeUs, int option, int colorFormat, bool metaOnly) = 0;
+ virtual sp<IMemory> getImageAtIndex(
+ int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
+ virtual sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom) = 0;
+ virtual status_t getFrameAtIndex(
+ std::vector<sp<IMemory> >* frames,
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
virtual MediaAlbumArt* extractAlbumArt() = 0;
virtual const char* extractMetadata(int keyCode) = 0;
};
-// MediaMetadataRetrieverInterface
-class MediaMetadataRetrieverInterface : public MediaMetadataRetrieverBase
-{
-public:
- MediaMetadataRetrieverInterface() {}
-
- virtual ~MediaMetadataRetrieverInterface() {}
- virtual VideoFrame* getFrameAtTime(
- int64_t /*timeUs*/, int /*option*/, int /*colorFormat*/, bool /*metaOnly*/)
- { return NULL; }
- virtual MediaAlbumArt* extractAlbumArt() { return NULL; }
- virtual const char* extractMetadata(int /*keyCode*/) { return NULL; }
-};
-
}; // namespace android
#endif // ANDROID_MEDIAMETADATARETRIEVERINTERFACE_H
diff --git a/media/libmedia/include/media/MediaRecorderBase.h b/media/libmedia/include/media/MediaRecorderBase.h
index 40dd9f9..5340dde 100644
--- a/media/libmedia/include/media/MediaRecorderBase.h
+++ b/media/libmedia/include/media/MediaRecorderBase.h
@@ -18,10 +18,14 @@
#define MEDIA_RECORDER_BASE_H_
+#include <media/AudioSystem.h>
+#include <media/MicrophoneInfo.h>
#include <media/mediarecorder.h>
#include <system/audio.h>
+#include <vector>
+
namespace android {
class ICameraRecordingProxy;
@@ -62,6 +66,13 @@
virtual status_t dump(int fd, const Vector<String16>& args) const = 0;
virtual status_t setInputSurface(const sp<PersistentSurface>& surface) = 0;
virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() const = 0;
+ virtual status_t setInputDevice(audio_port_handle_t deviceId) = 0;
+ virtual status_t getRoutedDeviceId(audio_port_handle_t* deviceId) = 0;
+ virtual void setAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback) = 0;
+ virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
+ virtual status_t getActiveMicrophones(
+ std::vector<media::MicrophoneInfo>* activeMicrophones) = 0;
+
protected:
diff --git a/media/libmedia/include/media/MediaResource.h b/media/libmedia/include/media/MediaResource.h
index 1957a45..e1fdb9b 100644
--- a/media/libmedia/include/media/MediaResource.h
+++ b/media/libmedia/include/media/MediaResource.h
@@ -29,7 +29,8 @@
kUnspecified = 0,
kSecureCodec,
kNonSecureCodec,
- kGraphicMemory
+ kGraphicMemory,
+ kCpuBoost,
};
enum SubType {
diff --git a/media/libmedia/include/media/MidiIoWrapper.h b/media/libmedia/include/media/MidiIoWrapper.h
index e6f8cf7..b5e565e 100644
--- a/media/libmedia/include/media/MidiIoWrapper.h
+++ b/media/libmedia/include/media/MidiIoWrapper.h
@@ -19,15 +19,15 @@
#include <libsonivox/eas_types.h>
-#include "media/stagefright/DataSource.h"
+#include <media/DataSourceBase.h>
namespace android {
-class MidiIoWrapper : public RefBase {
+class MidiIoWrapper {
public:
- MidiIoWrapper(const char *path);
- MidiIoWrapper(int fd, off64_t offset, int64_t size);
- MidiIoWrapper(const sp<DataSource> &source);
+ explicit MidiIoWrapper(const char *path);
+ explicit MidiIoWrapper(int fd, off64_t offset, int64_t size);
+ explicit MidiIoWrapper(DataSourceBase *source);
~MidiIoWrapper();
@@ -40,7 +40,7 @@
int mFd;
off64_t mBase;
int64_t mLength;
- sp<DataSource> mDataSource;
+ DataSourceBase *mDataSource;
EAS_FILE mEasFile;
};
diff --git a/media/libmedia/include/media/NdkWrapper.h b/media/libmedia/include/media/NdkWrapper.h
new file mode 100644
index 0000000..c97d171
--- /dev/null
+++ b/media/libmedia/include/media/NdkWrapper.h
@@ -0,0 +1,369 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NDK_WRAPPER_H_
+
+#define NDK_WRAPPER_H_
+
+#include <media/DataSource.h>
+#include <media/MediaSource.h>
+#include <media/NdkMediaDataSource.h>
+#include <media/NdkMediaError.h>
+#include <media/NdkMediaExtractor.h>
+#include <media/hardware/CryptoAPI.h>
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+struct AMediaCodec;
+struct AMediaCodecBufferInfo;
+struct AMediaCodecCryptoInfo;
+struct AMediaCrypto;
+struct AMediaDrm;
+struct AMediaFormat;
+struct AMediaExtractor;
+struct ANativeWindow;
+struct PsshInfo;
+
+namespace android {
+
+struct AMessage;
+class MetaData;
+
+struct AMediaFormatWrapper : public RefBase {
+
+ static sp<AMediaFormatWrapper> Create(const sp<AMessage> &message);
+
+ AMediaFormatWrapper();
+ AMediaFormatWrapper(AMediaFormat *aMediaFormat);
+
+ // the returned AMediaFormat is still owned by this wrapper.
+ AMediaFormat *getAMediaFormat() const;
+
+ sp<AMessage> toAMessage() const ;
+ void writeToAMessage(sp<AMessage>&) const ;
+ const char* toString() const ;
+
+ status_t release();
+
+ bool getInt32(const char *name, int32_t *out) const;
+ bool getInt64(const char *name, int64_t *out) const;
+ bool getFloat(const char *name, float *out) const;
+ bool getDouble(const char *name, double *out) const;
+ bool getSize(const char *name, size_t *out) const;
+ bool getRect(const char *name,
+ int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) const;
+ bool getBuffer(const char *name, void** data, size_t *outsize) const;
+ bool getString(const char *name, AString *out) const;
+
+ void setInt32(const char* name, int32_t value);
+ void setInt64(const char* name, int64_t value);
+ void setFloat(const char* name, float value);
+ void setDouble(const char *name, double value);
+ void setSize(const char* name, size_t value);
+ void setRect(const char* name,
+ int32_t left, int32_t top, int32_t right, int32_t bottom);
+ void setString(const char* name, const AString &value);
+ void setBuffer(const char* name, void* data, size_t size);
+
+protected:
+ virtual ~AMediaFormatWrapper();
+
+private:
+ AMediaFormat *mAMediaFormat;
+
+ DISALLOW_EVIL_CONSTRUCTORS(AMediaFormatWrapper);
+};
+
+struct ANativeWindowWrapper : public RefBase {
+ ANativeWindowWrapper(ANativeWindow *aNativeWindow);
+
+ // the returned ANativeWindow is still owned by this wrapper.
+ ANativeWindow *getANativeWindow() const;
+
+ status_t release();
+
+protected:
+ virtual ~ANativeWindowWrapper();
+
+private:
+ ANativeWindow *mANativeWindow;
+
+ DISALLOW_EVIL_CONSTRUCTORS(ANativeWindowWrapper);
+};
+
+struct AMediaDrmWrapper : public RefBase {
+ AMediaDrmWrapper(const uint8_t uuid[16]);
+ AMediaDrmWrapper(AMediaDrm *aMediaDrm);
+
+ // the returned AMediaDrm is still owned by this wrapper.
+ AMediaDrm *getAMediaDrm() const;
+
+ status_t release();
+
+ static bool isCryptoSchemeSupported(const uint8_t uuid[16], const char *mimeType);
+
+protected:
+ virtual ~AMediaDrmWrapper();
+
+private:
+ AMediaDrm *mAMediaDrm;
+
+ DISALLOW_EVIL_CONSTRUCTORS(AMediaDrmWrapper);
+};
+
+struct AMediaCryptoWrapper : public RefBase {
+ AMediaCryptoWrapper(const uint8_t uuid[16],
+ const void *initData,
+ size_t initDataSize);
+ AMediaCryptoWrapper(AMediaCrypto *aMediaCrypto);
+
+ // the returned AMediaCrypto is still owned by this wrapper.
+ AMediaCrypto *getAMediaCrypto() const;
+
+ status_t release();
+
+ bool isCryptoSchemeSupported(const uint8_t uuid[16]);
+
+ bool requiresSecureDecoderComponent(const char *mime);
+
+protected:
+ virtual ~AMediaCryptoWrapper();
+
+private:
+ AMediaCrypto *mAMediaCrypto;
+
+ DISALLOW_EVIL_CONSTRUCTORS(AMediaCryptoWrapper);
+};
+
+struct AMediaCodecCryptoInfoWrapper : public RefBase {
+ static sp<AMediaCodecCryptoInfoWrapper> Create(MetaDataBase &meta);
+
+ AMediaCodecCryptoInfoWrapper(int numsubsamples,
+ uint8_t key[16],
+ uint8_t iv[16],
+ CryptoPlugin::Mode mode,
+ size_t *clearbytes,
+ size_t *encryptedbytes);
+ AMediaCodecCryptoInfoWrapper(AMediaCodecCryptoInfo *aMediaCodecCryptoInfo);
+
+ // the returned AMediaCryptoInfo is still owned by this wrapper.
+ AMediaCodecCryptoInfo *getAMediaCodecCryptoInfo() const;
+
+ status_t release();
+
+ void setPattern(CryptoPlugin::Pattern *pattern);
+
+ size_t getNumSubSamples();
+
+ status_t getKey(uint8_t *dst);
+
+ status_t getIV(uint8_t *dst);
+
+ CryptoPlugin::Mode getMode();
+
+ status_t getClearBytes(size_t *dst);
+
+ status_t getEncryptedBytes(size_t *dst);
+
+protected:
+ virtual ~AMediaCodecCryptoInfoWrapper();
+
+private:
+ AMediaCodecCryptoInfo *mAMediaCodecCryptoInfo;
+
+ DISALLOW_EVIL_CONSTRUCTORS(AMediaCodecCryptoInfoWrapper);
+};
+
+struct AMediaCodecWrapper : public RefBase {
+ enum {
+ CB_INPUT_AVAILABLE = 1,
+ CB_OUTPUT_AVAILABLE = 2,
+ CB_ERROR = 3,
+ CB_OUTPUT_FORMAT_CHANGED = 4,
+ };
+
+ static sp<AMediaCodecWrapper> CreateCodecByName(const AString &name);
+ static sp<AMediaCodecWrapper> CreateDecoderByType(const AString &mimeType);
+
+ static void OnInputAvailableCB(AMediaCodec *codec,
+ void *userdata,
+ int32_t index);
+ static void OnOutputAvailableCB(AMediaCodec *codec,
+ void *userdata,
+ int32_t index,
+ AMediaCodecBufferInfo *bufferInfo);
+ static void OnFormatChangedCB(AMediaCodec *codec,
+ void *userdata,
+ AMediaFormat *format);
+ static void OnErrorCB(AMediaCodec *codec,
+ void *userdata,
+ media_status_t err,
+ int32_t actionCode,
+ const char *detail);
+
+ AMediaCodecWrapper(AMediaCodec *aMediaCodec);
+
+ // the returned AMediaCodec is still owned by this wrapper.
+ AMediaCodec *getAMediaCodec() const;
+
+ status_t release();
+
+ status_t getName(AString* outComponentName) const;
+
+ status_t configure(
+ const sp<AMediaFormatWrapper> &format,
+ const sp<ANativeWindowWrapper> &nww,
+ const sp<AMediaCryptoWrapper> &crypto,
+ uint32_t flags);
+
+ status_t setCallback(const sp<AMessage> &callback);
+
+ status_t releaseCrypto();
+
+ status_t start();
+ status_t stop();
+ status_t flush();
+
+ uint8_t* getInputBuffer(size_t idx, size_t *out_size);
+ uint8_t* getOutputBuffer(size_t idx, size_t *out_size);
+
+ status_t queueInputBuffer(
+ size_t idx,
+ size_t offset,
+ size_t size,
+ uint64_t time,
+ uint32_t flags);
+
+ status_t queueSecureInputBuffer(
+ size_t idx,
+ size_t offset,
+ sp<AMediaCodecCryptoInfoWrapper> &codecCryptoInfo,
+ uint64_t time,
+ uint32_t flags);
+
+ sp<AMediaFormatWrapper> getOutputFormat();
+ sp<AMediaFormatWrapper> getInputFormat();
+
+ status_t releaseOutputBuffer(size_t idx, bool render);
+
+ status_t setOutputSurface(const sp<ANativeWindowWrapper> &nww);
+
+ status_t releaseOutputBufferAtTime(size_t idx, int64_t timestampNs);
+
+ status_t setParameters(const sp<AMediaFormatWrapper> ¶ms);
+
+protected:
+ virtual ~AMediaCodecWrapper();
+
+private:
+ AMediaCodec *mAMediaCodec;
+
+ sp<AMessage> mCallback;
+
+ DISALLOW_EVIL_CONSTRUCTORS(AMediaCodecWrapper);
+};
+
+struct AMediaExtractorWrapper : public RefBase {
+
+ AMediaExtractorWrapper(AMediaExtractor *aMediaExtractor);
+
+ // the returned AMediaExtractor is still owned by this wrapper.
+ AMediaExtractor *getAMediaExtractor() const;
+
+ status_t release();
+
+ status_t disconnect();
+
+ status_t setDataSource(int fd, off64_t offset, off64_t length);
+
+ status_t setDataSource(const char *location);
+
+ status_t setDataSource(AMediaDataSource *);
+
+ size_t getTrackCount();
+
+ sp<AMediaFormatWrapper> getFormat();
+
+ sp<AMediaFormatWrapper> getTrackFormat(size_t idx);
+
+ status_t selectTrack(size_t idx);
+
+ status_t unselectTrack(size_t idx);
+
+ status_t selectSingleTrack(size_t idx);
+
+ ssize_t readSampleData(const sp<ABuffer> &buffer);
+
+ ssize_t getSampleSize();
+
+ uint32_t getSampleFlags();
+
+ int getSampleTrackIndex();
+
+ int64_t getSampleTime();
+
+ status_t getSampleFormat(sp<AMediaFormatWrapper> &formatWrapper);
+
+ int64_t getCachedDuration();
+
+ bool advance();
+
+ status_t seekTo(int64_t seekPosUs, MediaSource::ReadOptions::SeekMode mode);
+
+ // the returned PsshInfo is still owned by this wrapper.
+ PsshInfo* getPsshInfo();
+
+ sp<AMediaCodecCryptoInfoWrapper> getSampleCryptoInfo();
+
+protected:
+ virtual ~AMediaExtractorWrapper();
+
+private:
+ AMediaExtractor *mAMediaExtractor;
+
+ DISALLOW_EVIL_CONSTRUCTORS(AMediaExtractorWrapper);
+};
+
+struct AMediaDataSourceWrapper : public RefBase {
+
+ static status_t translate_error(media_status_t err);
+
+ static ssize_t AMediaDataSourceWrapper_getSize(void *userdata);
+
+ static ssize_t AMediaDataSourceWrapper_readAt(void *userdata, off64_t offset, void * buf, size_t size);
+
+ static void AMediaDataSourceWrapper_close(void *userdata);
+
+ AMediaDataSourceWrapper(const sp<DataSource> &dataSource);
+
+ AMediaDataSource *getAMediaDataSource();
+
+protected:
+ virtual ~AMediaDataSourceWrapper();
+
+private:
+ sp<DataSource> mDataSource;
+
+ AMediaDataSource *mAMediaDataSource;
+
+ DISALLOW_EVIL_CONSTRUCTORS(AMediaDataSourceWrapper);
+};
+
+} // namespace android
+
+#endif // NDK_WRAPPER_H_
diff --git a/media/libmedia/include/media/OMXBuffer.h b/media/libmedia/include/media/OMXBuffer.h
index 3e84858..9c9f5e7 100644
--- a/media/libmedia/include/media/OMXBuffer.h
+++ b/media/libmedia/include/media/OMXBuffer.h
@@ -91,6 +91,7 @@
private:
friend struct OMXNodeInstance;
+ friend struct C2OMXNode;
// This is needed temporarily for OMX HIDL transition.
friend inline bool (::android::hardware::media::omx::V1_0::implementation::
diff --git a/media/libmedia/include/media/PluginMetricsReporting.h b/media/libmedia/include/media/PluginMetricsReporting.h
index 4a5a363..e00bd43 100644
--- a/media/libmedia/include/media/PluginMetricsReporting.h
+++ b/media/libmedia/include/media/PluginMetricsReporting.h
@@ -20,13 +20,13 @@
#include <utils/Errors.h>
#include <utils/String8.h>
-#include <utils/Vector.h>
namespace android {
-status_t reportDrmPluginMetrics(const Vector<uint8_t>& serializedMetrics,
+status_t reportDrmPluginMetrics(const std::string& b64EncodedMetrics,
const String8& vendorName,
- const String8& description);
+ const String8& description,
+ const String8& appPackageName);
} // namespace android
diff --git a/media/libmedia/include/media/TimeCheck.h b/media/libmedia/include/media/TimeCheck.h
new file mode 100644
index 0000000..6c5f656
--- /dev/null
+++ b/media/libmedia/include/media/TimeCheck.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_TIME_CHECK_H
+#define ANDROID_TIME_CHECK_H
+
+#include <utils/KeyedVector.h>
+#include <utils/Thread.h>
+
+
+namespace android {
+
+// A class monitoring execution time for a code block (scoped variable) and causing an assert
+// if it exceeds a certain time
+
+class TimeCheck {
+public:
+
+ // The default timeout is chosen to be less than system server watchdog timeout
+ static constexpr uint32_t kDefaultTimeOutMs = 5000;
+
+ TimeCheck(const char *tag, uint32_t timeoutMs = kDefaultTimeOutMs);
+ ~TimeCheck();
+
+private:
+
+ class TimeCheckThread : public Thread {
+ public:
+
+ TimeCheckThread() {}
+ virtual ~TimeCheckThread() override;
+
+ nsecs_t startMonitoring(const char *tag, uint32_t timeoutMs);
+ void stopMonitoring(nsecs_t endTimeNs);
+
+ private:
+
+ // RefBase
+ virtual void onFirstRef() override { run("TimeCheckThread", PRIORITY_URGENT_AUDIO); }
+
+ // Thread
+ virtual bool threadLoop() override;
+
+ Condition mCond;
+ Mutex mMutex;
+ // using the end time in ns as key is OK given the risk is low that two entries
+ // are added in such a way that <add time> + <timeout> are the same for both.
+ KeyedVector< nsecs_t, const char*> mMonitorRequests;
+ };
+
+ static sp<TimeCheckThread> getTimeCheckThread();
+
+ const nsecs_t mEndTimeNs;
+};
+
+}; // namespace android
+
+#endif // ANDROID_TIME_CHECK_H
diff --git a/media/libmedia/include/media/TypeConverter.h b/media/libmedia/include/media/TypeConverter.h
index 84e22b1..86f0d4c 100644
--- a/media/libmedia/include/media/TypeConverter.h
+++ b/media/libmedia/include/media/TypeConverter.h
@@ -80,6 +80,11 @@
typedef audio_mode_t Type;
typedef Vector<Type> Collection;
};
+struct AudioContentTraits
+{
+ typedef audio_content_type_t Type;
+ typedef Vector<Type> Collection;
+};
struct UsageTraits
{
typedef audio_usage_t Type;
@@ -226,6 +231,7 @@
typedef TypeConverter<GainModeTraits> GainModeConverter;
typedef TypeConverter<StreamTraits> StreamTypeConverter;
typedef TypeConverter<AudioModeTraits> AudioModeConverter;
+typedef TypeConverter<AudioContentTraits> AudioContentTypeConverter;
typedef TypeConverter<UsageTraits> UsageTypeConverter;
typedef TypeConverter<SourceTraits> SourceTypeConverter;
@@ -240,6 +246,7 @@
template<> const GainModeConverter::Table GainModeConverter::mTable[];
template<> const StreamTypeConverter::Table StreamTypeConverter::mTable[];
template<> const AudioModeConverter::Table AudioModeConverter::mTable[];
+template<> const AudioContentTypeConverter::Table AudioContentTypeConverter::mTable[];
template<> const UsageTypeConverter::Table UsageTypeConverter::mTable[];
template<> const SourceTypeConverter::Table SourceTypeConverter::mTable[];
diff --git a/media/libmedia/include/media/mediametadataretriever.h b/media/libmedia/include/media/mediametadataretriever.h
index 65c266b..cdef637 100644
--- a/media/libmedia/include/media/mediametadataretriever.h
+++ b/media/libmedia/include/media/mediametadataretriever.h
@@ -59,6 +59,15 @@
METADATA_KEY_LOCATION = 23,
METADATA_KEY_VIDEO_ROTATION = 24,
METADATA_KEY_CAPTURE_FRAMERATE = 25,
+ METADATA_KEY_HAS_IMAGE = 26,
+ METADATA_KEY_IMAGE_COUNT = 27,
+ METADATA_KEY_IMAGE_PRIMARY = 28,
+ METADATA_KEY_IMAGE_WIDTH = 29,
+ METADATA_KEY_IMAGE_HEIGHT = 30,
+ METADATA_KEY_IMAGE_ROTATION = 31,
+ METADATA_KEY_VIDEO_FRAME_COUNT = 32,
+ METADATA_KEY_EXIF_OFFSET = 33,
+ METADATA_KEY_EXIF_LENGTH = 34,
// Add more here...
};
@@ -80,6 +89,13 @@
const sp<IDataSource>& dataSource, const char *mime = NULL);
sp<IMemory> getFrameAtTime(int64_t timeUs, int option,
int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
+ sp<IMemory> getImageAtIndex(int index,
+ int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false, bool thumbnail = false);
+ sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom);
+ status_t getFrameAtIndex(
+ std::vector<sp<IMemory> > *frames, int frameIndex, int numFrames = 1,
+ int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
sp<IMemory> extractAlbumArt();
const char* extractMetadata(int keyCode);
diff --git a/media/libmedia/include/media/mediaplayer.h b/media/libmedia/include/media/mediaplayer.h
index 623c374..2335c5a 100644
--- a/media/libmedia/include/media/mediaplayer.h
+++ b/media/libmedia/include/media/mediaplayer.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_MEDIAPLAYER_H
#define ANDROID_MEDIAPLAYER_H
+#include <media/mediaplayer_common.h>
+
#include <arpa/inet.h>
#include <binder/IMemory.h>
@@ -50,12 +52,15 @@
MEDIA_PAUSED = 7,
MEDIA_STOPPED = 8,
MEDIA_SKIPPED = 9,
+ MEDIA_NOTIFY_TIME = 98,
MEDIA_TIMED_TEXT = 99,
MEDIA_ERROR = 100,
MEDIA_INFO = 200,
MEDIA_SUBTITLE_DATA = 201,
MEDIA_META_DATA = 202,
MEDIA_DRM_INFO = 210,
+ MEDIA_TIME_DISCONTINUITY = 211,
+ MEDIA_AUDIO_ROUTING_CHANGED = 10000,
};
// Generic error codes for the media player framework. Errors are fatal, the
@@ -186,16 +191,6 @@
INVOKE_ID_GET_SELECTED_TRACK = 7
};
-// Keep MEDIA_TRACK_TYPE_* in sync with MediaPlayer.java.
-enum media_track_type {
- MEDIA_TRACK_TYPE_UNKNOWN = 0,
- MEDIA_TRACK_TYPE_VIDEO = 1,
- MEDIA_TRACK_TYPE_AUDIO = 2,
- MEDIA_TRACK_TYPE_TIMEDTEXT = 3,
- MEDIA_TRACK_TYPE_SUBTITLE = 4,
- MEDIA_TRACK_TYPE_METADATA = 5,
-};
-
// ----------------------------------------------------------------------------
// ref-counted object for callbacks
class MediaPlayerListener: virtual public RefBase
@@ -225,7 +220,6 @@
status_t setVideoSurfaceTexture(
const sp<IGraphicBufferProducer>& bufferProducer);
status_t setListener(const sp<MediaPlayerListener>& listener);
- status_t getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */);
status_t getBufferingSettings(BufferingSettings* buffering /* nonnull */);
status_t setBufferingSettings(const BufferingSettings& buffering);
status_t prepare();
@@ -245,6 +239,7 @@
status_t seekTo(
int msec,
MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC);
+ status_t notifyAt(int64_t mediaTimeUs);
status_t getCurrentPosition(int *msec);
status_t getDuration(int *msec);
status_t reset();
@@ -266,13 +261,17 @@
status_t setRetransmitEndpoint(const char* addrString, uint16_t port);
status_t setNextMediaPlayer(const sp<MediaPlayer>& player);
- VolumeShaper::Status applyVolumeShaper(
- const sp<VolumeShaper::Configuration>& configuration,
- const sp<VolumeShaper::Operation>& operation);
- sp<VolumeShaper::State> getVolumeShaperState(int id);
+ media::VolumeShaper::Status applyVolumeShaper(
+ const sp<media::VolumeShaper::Configuration>& configuration,
+ const sp<media::VolumeShaper::Operation>& operation);
+ sp<media::VolumeShaper::State> getVolumeShaperState(int id);
// Modular DRM
status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId);
status_t releaseDrm();
+ // AudioRouting
+ status_t setOutputDevice(audio_port_handle_t deviceId);
+ audio_port_handle_t getRoutedDeviceId();
+ status_t enableAudioDeviceCallback(bool enabled);
private:
void clear_l();
@@ -309,7 +308,6 @@
float mSendLevel;
struct sockaddr_in mRetransmitEndpoint;
bool mRetransmitEndpointValid;
- BufferingSettings mCurrentBufferingSettings;
};
}; // namespace android
diff --git a/media/libmedia/include/media/mediaplayer_common.h b/media/libmedia/include/media/mediaplayer_common.h
new file mode 100644
index 0000000..d5a0135
--- /dev/null
+++ b/media/libmedia/include/media/mediaplayer_common.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIAPLAYER_COMMON_H
+#define ANDROID_MEDIAPLAYER_COMMON_H
+
+namespace android {
+
+// Keep MEDIA_TRACK_TYPE_* in sync with MediaPlayer.java.
+enum media_track_type {
+ MEDIA_TRACK_TYPE_UNKNOWN = 0,
+ MEDIA_TRACK_TYPE_VIDEO = 1,
+ MEDIA_TRACK_TYPE_AUDIO = 2,
+ MEDIA_TRACK_TYPE_TIMEDTEXT = 3,
+ MEDIA_TRACK_TYPE_SUBTITLE = 4,
+ MEDIA_TRACK_TYPE_METADATA = 5,
+};
+
+}; // namespace android
+
+#endif // ANDROID_MEDIAPLAYER_COMMON_H
diff --git a/media/libmedia/include/media/mediarecorder.h b/media/libmedia/include/media/mediarecorder.h
index 071e7a1..d8b0fe7 100644
--- a/media/libmedia/include/media/mediarecorder.h
+++ b/media/libmedia/include/media/mediarecorder.h
@@ -24,6 +24,7 @@
#include <utils/Errors.h>
#include <media/IMediaRecorderClient.h>
#include <media/IMediaDeathNotifier.h>
+#include <media/MicrophoneInfo.h>
namespace android {
@@ -77,6 +78,9 @@
/* VP8/VORBIS data in a WEBM container */
OUTPUT_FORMAT_WEBM = 9,
+ /* HEIC data in a HEIF container */
+ OUTPUT_FORMAT_HEIF = 10,
+
OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
};
@@ -138,6 +142,8 @@
MEDIA_RECORDER_TRACK_EVENT_ERROR = 100,
MEDIA_RECORDER_TRACK_EVENT_INFO = 101,
MEDIA_RECORDER_TRACK_EVENT_LIST_END = 1000,
+
+ MEDIA_RECORDER_AUDIO_ROUTING_CHANGED = 10000,
};
/*
@@ -250,6 +256,10 @@
status_t setInputSurface(const sp<PersistentSurface>& surface);
sp<IGraphicBufferProducer> querySurfaceMediaSourceFromMediaServer();
status_t getMetrics(Parcel *reply);
+ status_t setInputDevice(audio_port_handle_t deviceId);
+ status_t getRoutedDeviceId(audio_port_handle_t *deviceId);
+ status_t enableAudioDeviceCallback(bool enabled);
+ status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
private:
void doCleanUp();
diff --git a/media/libmedia/include/media/omx/1.0/Conversion.h b/media/libmedia/include/media/omx/1.0/Conversion.h
index 9816fe1..3700a23 100644
--- a/media/libmedia/include/media/omx/1.0/Conversion.h
+++ b/media/libmedia/include/media/omx/1.0/Conversion.h
@@ -20,11 +20,11 @@
#include <vector>
#include <list>
+#include <cinttypes>
#include <unistd.h>
#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
-#include <hidlmemory/mapping.h>
#include <binder/Binder.h>
#include <binder/Status.h>
@@ -35,8 +35,9 @@
#include <media/OMXFenceParcelable.h>
#include <media/OMXBuffer.h>
#include <media/hardware/VideoAPI.h>
+#include <media/stagefright/MediaErrors.h>
+#include <gui/IGraphicBufferProducer.h>
-#include <android/hidl/memory/1.0/IMemory.h>
#include <android/hardware/media/omx/1.0/types.h>
#include <android/hardware/media/omx/1.0/IOmx.h>
#include <android/hardware/media/omx/1.0/IOmxNode.h>
@@ -199,26 +200,6 @@
}
/**
- * \brief Convert `Return<Status>` to `status_t`. This is for legacy binder
- * calls.
- *
- * \param[in] t The source `Return<Status>`.
- * \return The corresponding `status_t`.
- *
- * This function first check if \p t has a transport error. If it does, then the
- * return value is the transport error code. Otherwise, the return value is
- * converted from `Status` contained inside \p t.
- *
- * Note:
- * - This `Status` is omx-specific. It is defined in `types.hal`.
- * - The name of this function is not `convert`.
- */
-// convert: Status -> status_t
-inline status_t toStatusT(Return<Status> const& t) {
- return t.isOk() ? static_cast<status_t>(static_cast<Status>(t)) : UNKNOWN_ERROR;
-}
-
-/**
* \brief Convert `Return<void>` to `status_t`. This is for legacy binder calls.
*
* \param[in] t The source `Return<void>`.
@@ -237,7 +218,47 @@
*/
// convert: Status -> status_t
inline status_t toStatusT(Status const& t) {
- return static_cast<status_t>(t);
+ switch (t) {
+ case Status::NO_ERROR:
+ case Status::NAME_NOT_FOUND:
+ case Status::WOULD_BLOCK:
+ case Status::NO_MEMORY:
+ case Status::ALREADY_EXISTS:
+ case Status::NO_INIT:
+ case Status::BAD_VALUE:
+ case Status::DEAD_OBJECT:
+ case Status::INVALID_OPERATION:
+ case Status::TIMED_OUT:
+ case Status::ERROR_UNSUPPORTED:
+ case Status::UNKNOWN_ERROR:
+ case Status::RELEASE_ALL_BUFFERS:
+ return static_cast<status_t>(t);
+ case Status::BUFFER_NEEDS_REALLOCATION:
+ return NOT_ENOUGH_DATA;
+ default:
+ ALOGW("Unrecognized status value: %" PRId32, static_cast<int32_t>(t));
+ return static_cast<status_t>(t);
+ }
+}
+
+/**
+ * \brief Convert `Return<Status>` to `status_t`. This is for legacy binder
+ * calls.
+ *
+ * \param[in] t The source `Return<Status>`.
+ * \return The corresponding `status_t`.
+ *
+ * This function first check if \p t has a transport error. If it does, then the
+ * return value is the transport error code. Otherwise, the return value is
+ * converted from `Status` contained inside \p t.
+ *
+ * Note:
+ * - This `Status` is omx-specific. It is defined in `types.hal`.
+ * - The name of this function is not `convert`.
+ */
+// convert: Status -> status_t
+inline status_t toStatusT(Return<Status> const& t) {
+ return t.isOk() ? toStatusT(static_cast<Status>(t)) : UNKNOWN_ERROR;
}
/**
@@ -248,7 +269,28 @@
*/
// convert: status_t -> Status
inline Status toStatus(status_t l) {
- return static_cast<Status>(l);
+ switch (l) {
+ case NO_ERROR:
+ case NAME_NOT_FOUND:
+ case WOULD_BLOCK:
+ case NO_MEMORY:
+ case ALREADY_EXISTS:
+ case NO_INIT:
+ case BAD_VALUE:
+ case DEAD_OBJECT:
+ case INVALID_OPERATION:
+ case TIMED_OUT:
+ case ERROR_UNSUPPORTED:
+ case UNKNOWN_ERROR:
+ case IGraphicBufferProducer::RELEASE_ALL_BUFFERS:
+ case IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION:
+ return static_cast<Status>(l);
+ case NOT_ENOUGH_DATA:
+ return Status::BUFFER_NEEDS_REALLOCATION;
+ default:
+ ALOGW("Unrecognized status value: %" PRId32, static_cast<int32_t>(l));
+ return static_cast<Status>(l);
+ }
}
/**
diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp
index 7d27d57..e61b04d 100644
--- a/media/libmedia/mediametadataretriever.cpp
+++ b/media/libmedia/mediametadataretriever.cpp
@@ -154,6 +154,45 @@
return mRetriever->getFrameAtTime(timeUs, option, colorFormat, metaOnly);
}
+sp<IMemory> MediaMetadataRetriever::getImageAtIndex(
+ int index, int colorFormat, bool metaOnly, bool thumbnail) {
+ ALOGV("getImageAtIndex: index(%d) colorFormat(%d) metaOnly(%d) thumbnail(%d)",
+ index, colorFormat, metaOnly, thumbnail);
+ Mutex::Autolock _l(mLock);
+ if (mRetriever == 0) {
+ ALOGE("retriever is not initialized");
+ return NULL;
+ }
+ return mRetriever->getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
+}
+
+sp<IMemory> MediaMetadataRetriever::getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom) {
+ ALOGV("getImageRectAtIndex: index(%d) colorFormat(%d) rect {%d, %d, %d, %d}",
+ index, colorFormat, left, top, right, bottom);
+ Mutex::Autolock _l(mLock);
+ if (mRetriever == 0) {
+ ALOGE("retriever is not initialized");
+ return NULL;
+ }
+ return mRetriever->getImageRectAtIndex(
+ index, colorFormat, left, top, right, bottom);
+}
+
+status_t MediaMetadataRetriever::getFrameAtIndex(
+ std::vector<sp<IMemory> > *frames,
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
+ ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d) metaOnly(%d)",
+ frameIndex, numFrames, colorFormat, metaOnly);
+ Mutex::Autolock _l(mLock);
+ if (mRetriever == 0) {
+ ALOGE("retriever is not initialized");
+ return INVALID_OPERATION;
+ }
+ return mRetriever->getFrameAtIndex(
+ frames, frameIndex, numFrames, colorFormat, metaOnly);
+}
+
const char* MediaMetadataRetriever::extractMetadata(int keyCode)
{
ALOGV("extractMetadata(%d)", keyCode);
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index b976721..26908e5 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -48,6 +48,8 @@
namespace android {
+using media::VolumeShaper;
+
MediaPlayer::MediaPlayer()
{
ALOGV("constructor");
@@ -137,10 +139,8 @@
mPlayer = player;
if (player != 0) {
mCurrentState = MEDIA_PLAYER_INITIALIZED;
- player->getDefaultBufferingSettings(&mCurrentBufferingSettings);
err = NO_ERROR;
} else {
- mCurrentBufferingSettings = BufferingSettings();
ALOGE("Unable to create media player");
}
}
@@ -247,17 +247,6 @@
return mPlayer->setVideoSurfaceTexture(bufferProducer);
}
-status_t MediaPlayer::getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */)
-{
- ALOGV("getDefaultBufferingSettings");
-
- Mutex::Autolock _l(mLock);
- if (mPlayer == 0) {
- return NO_INIT;
- }
- return mPlayer->getDefaultBufferingSettings(buffering);
-}
-
status_t MediaPlayer::getBufferingSettings(BufferingSettings* buffering /* nonnull */)
{
ALOGV("getBufferingSettings");
@@ -266,8 +255,7 @@
if (mPlayer == 0) {
return NO_INIT;
}
- *buffering = mCurrentBufferingSettings;
- return NO_ERROR;
+ return mPlayer->getBufferingSettings(buffering);
}
status_t MediaPlayer::setBufferingSettings(const BufferingSettings& buffering)
@@ -278,11 +266,7 @@
if (mPlayer == 0) {
return NO_INIT;
}
- status_t err = mPlayer->setBufferingSettings(buffering);
- if (err == NO_ERROR) {
- mCurrentBufferingSettings = buffering;
- }
- return err;
+ return mPlayer->setBufferingSettings(buffering);
}
// must call with lock held
@@ -608,6 +592,15 @@
return result;
}
+status_t MediaPlayer::notifyAt(int64_t mediaTimeUs)
+{
+ Mutex::Autolock _l(mLock);
+ if (mPlayer != 0) {
+ return mPlayer->notifyAt(mediaTimeUs);
+ }
+ return INVALID_OPERATION;
+}
+
status_t MediaPlayer::reset_l()
{
mLoop = false;
@@ -625,7 +618,6 @@
// setDataSource has to be called again to create a
// new mediaplayer.
mPlayer = 0;
- mCurrentBufferingSettings = BufferingSettings();
return ret;
}
clear_l();
@@ -649,8 +641,12 @@
status_t MediaPlayer::reset()
{
ALOGV("reset");
+ mLockThreadId = getThreadId();
Mutex::Autolock _l(mLock);
- return reset_l();
+ status_t result = reset_l();
+ mLockThreadId = 0;
+
+ return result;
}
status_t MediaPlayer::setAudioStreamType(audio_stream_type_t type)
@@ -860,7 +856,7 @@
// this will deadlock.
//
// The threadId hack below works around this for the care of prepare,
- // seekTo and start within the same process.
+ // seekTo, start, and reset within the same process.
// FIXME: Remember, this is a hack, it's not even a hack that is applied
// consistently for all use-cases, this needs to be revisited.
if (mLockThreadId != getThreadId()) {
@@ -944,6 +940,9 @@
mVideoWidth = ext1;
mVideoHeight = ext2;
break;
+ case MEDIA_NOTIFY_TIME:
+ ALOGV("Received notify time message");
+ break;
case MEDIA_TIMED_TEXT:
ALOGV("Received timed text message");
break;
@@ -1076,4 +1075,39 @@
return status;
}
+status_t MediaPlayer::setOutputDevice(audio_port_handle_t deviceId)
+{
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == NULL) {
+ ALOGV("setOutputDevice: player not init");
+ return NO_INIT;
+ }
+ return mPlayer->setOutputDevice(deviceId);
+}
+
+audio_port_handle_t MediaPlayer::getRoutedDeviceId()
+{
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == NULL) {
+ ALOGV("getRoutedDeviceId: player not init");
+ return AUDIO_PORT_HANDLE_NONE;
+ }
+ audio_port_handle_t deviceId;
+ status_t status = mPlayer->getRoutedDeviceId(&deviceId);
+ if (status != NO_ERROR) {
+ return AUDIO_PORT_HANDLE_NONE;
+ }
+ return deviceId;
+}
+
+status_t MediaPlayer::enableAudioDeviceCallback(bool enabled)
+{
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == NULL) {
+ ALOGV("addAudioDeviceCallback: player not init");
+ return NO_INIT;
+ }
+ return mPlayer->enableAudioDeviceCallback(enabled);
+}
+
} // namespace android
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 4405930..721a043 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -792,4 +792,52 @@
notify(MEDIA_RECORDER_EVENT_ERROR, MEDIA_ERROR_SERVER_DIED, 0);
}
+status_t MediaRecorder::setInputDevice(audio_port_handle_t deviceId)
+{
+ ALOGV("setInputDevice");
+
+ if (mMediaRecorder == NULL) {
+ ALOGE("media recorder is not initialized yet");
+ return INVALID_OPERATION;
+ }
+ return mMediaRecorder->setInputDevice(deviceId);
+}
+
+status_t MediaRecorder::getRoutedDeviceId(audio_port_handle_t* deviceId)
+{
+ ALOGV("getRoutedDeviceId");
+
+ if (mMediaRecorder == NULL) {
+ ALOGE("media recorder is not initialized yet");
+ return INVALID_OPERATION;
+ }
+ status_t status = mMediaRecorder->getRoutedDeviceId(deviceId);
+ if (status != NO_ERROR) {
+ *deviceId = AUDIO_PORT_HANDLE_NONE;
+ }
+ return status;
+}
+
+status_t MediaRecorder::enableAudioDeviceCallback(bool enabled)
+{
+ ALOGV("enableAudioDeviceCallback");
+
+ if (mMediaRecorder == NULL) {
+ ALOGE("media recorder is not initialized yet");
+ return INVALID_OPERATION;
+ }
+ return mMediaRecorder->enableAudioDeviceCallback(enabled);
+}
+
+status_t MediaRecorder::getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones)
+{
+ ALOGV("getActiveMicrophones");
+
+ if (mMediaRecorder == NULL) {
+ ALOGE("media recorder is not initialized yet");
+ return INVALID_OPERATION;
+ }
+ return mMediaRecorder->getActiveMicrophones(activeMicrophones);
+}
+
} // namespace android
diff --git a/media/libmedia/omx/1.0/WOmxNode.cpp b/media/libmedia/omx/1.0/WOmxNode.cpp
index 0b40e8d..2cd8b76 100644
--- a/media/libmedia/omx/1.0/WOmxNode.cpp
+++ b/media/libmedia/omx/1.0/WOmxNode.cpp
@@ -151,7 +151,8 @@
hidl_handle const& outNativeHandle) {
fnStatus = toStatusT(status);
*buffer = outBuffer;
- *native_handle = NativeHandle::create(
+ *native_handle = outNativeHandle.getNativeHandle() == nullptr ?
+ nullptr : NativeHandle::create(
native_handle_clone(outNativeHandle), true);
}));
return transStatus == NO_ERROR ? fnStatus : transStatus;
diff --git a/media/libmediaextractor/Android.bp b/media/libmediaextractor/Android.bp
new file mode 100644
index 0000000..b9b47cd
--- /dev/null
+++ b/media/libmediaextractor/Android.bp
@@ -0,0 +1,50 @@
+cc_library {
+ name: "libmediaextractor",
+
+ include_dirs: [
+ "frameworks/av/include",
+ "frameworks/av/media/libmediaextractor/include",
+ ],
+
+ export_include_dirs: ["include"],
+
+ cflags: [
+ "-Wno-multichar",
+ "-Werror",
+ "-Wall",
+ ],
+
+ shared_libs: [
+ "libbinder",
+ "libstagefright_foundation",
+ "libutils",
+ "libcutils",
+ "liblog",
+ ],
+
+ srcs: [
+ "DataSourceBase.cpp",
+ "MediaBuffer.cpp",
+ "MediaBufferBase.cpp",
+ "MediaBufferGroup.cpp",
+ "MediaSource.cpp",
+ "MediaTrack.cpp",
+ "MediaExtractor.cpp",
+ "MetaData.cpp",
+ "MetaDataBase.cpp",
+ "VorbisComment.cpp",
+ ],
+
+ clang: true,
+
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
+ },
+}
diff --git a/media/libmediaextractor/DataSourceBase.cpp b/media/libmediaextractor/DataSourceBase.cpp
new file mode 100644
index 0000000..8f47ee5
--- /dev/null
+++ b/media/libmediaextractor/DataSourceBase.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DataSourceBase"
+
+#include <media/DataSourceBase.h>
+#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/stagefright/MediaErrors.h>
+#include <utils/String8.h>
+
+namespace android {
+
+bool DataSourceBase::getUInt16(off64_t offset, uint16_t *x) {
+ *x = 0;
+
+ uint8_t byte[2];
+ if (readAt(offset, byte, 2) != 2) {
+ return false;
+ }
+
+ *x = (byte[0] << 8) | byte[1];
+
+ return true;
+}
+
+bool DataSourceBase::getUInt24(off64_t offset, uint32_t *x) {
+ *x = 0;
+
+ uint8_t byte[3];
+ if (readAt(offset, byte, 3) != 3) {
+ return false;
+ }
+
+ *x = (byte[0] << 16) | (byte[1] << 8) | byte[2];
+
+ return true;
+}
+
+bool DataSourceBase::getUInt32(off64_t offset, uint32_t *x) {
+ *x = 0;
+
+ uint32_t tmp;
+ if (readAt(offset, &tmp, 4) != 4) {
+ return false;
+ }
+
+ *x = ntohl(tmp);
+
+ return true;
+}
+
+bool DataSourceBase::getUInt64(off64_t offset, uint64_t *x) {
+ *x = 0;
+
+ uint64_t tmp;
+ if (readAt(offset, &tmp, 8) != 8) {
+ return false;
+ }
+
+ *x = ntoh64(tmp);
+
+ return true;
+}
+
+bool DataSourceBase::getUInt16Var(off64_t offset, uint16_t *x, size_t size) {
+ if (size == 2) {
+ return getUInt16(offset, x);
+ }
+ if (size == 1) {
+ uint8_t tmp;
+ if (readAt(offset, &tmp, 1) == 1) {
+ *x = tmp;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool DataSourceBase::getUInt32Var(off64_t offset, uint32_t *x, size_t size) {
+ if (size == 4) {
+ return getUInt32(offset, x);
+ }
+ if (size == 2) {
+ uint16_t tmp;
+ if (getUInt16(offset, &tmp)) {
+ *x = tmp;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool DataSourceBase::getUInt64Var(off64_t offset, uint64_t *x, size_t size) {
+ if (size == 8) {
+ return getUInt64(offset, x);
+ }
+ if (size == 4) {
+ uint32_t tmp;
+ if (getUInt32(offset, &tmp)) {
+ *x = tmp;
+ return true;
+ }
+ }
+ return false;
+}
+
+status_t DataSourceBase::getSize(off64_t *size) {
+ *size = 0;
+
+ return ERROR_UNSUPPORTED;
+}
+
+bool DataSourceBase::getUri(char *uriString __unused, size_t bufferSize __unused) {
+ return false;
+}
+
+} // namespace android
diff --git a/media/libmediaextractor/MediaBuffer.cpp b/media/libmediaextractor/MediaBuffer.cpp
new file mode 100644
index 0000000..39f8d6e
--- /dev/null
+++ b/media/libmediaextractor/MediaBuffer.cpp
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MediaBuffer"
+#include <utils/Log.h>
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdlib.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MetaData.h>
+
+namespace android {
+
+/* static */
+std::atomic_int_least32_t MediaBuffer::mUseSharedMemory(0);
+
+MediaBuffer::MediaBuffer(void *data, size_t size)
+ : mObserver(NULL),
+ mRefCount(0),
+ mData(data),
+ mSize(size),
+ mRangeOffset(0),
+ mRangeLength(size),
+ mOwnsData(false),
+ mMetaData(new MetaData),
+ mOriginal(NULL) {
+}
+
+MediaBuffer::MediaBuffer(size_t size)
+ : mObserver(NULL),
+ mRefCount(0),
+ mData(NULL),
+ mSize(size),
+ mRangeOffset(0),
+ mRangeLength(size),
+ mOwnsData(true),
+ mMetaData(new MetaData),
+ mOriginal(NULL) {
+ if (size < kSharedMemThreshold
+ || std::atomic_load_explicit(&mUseSharedMemory, std::memory_order_seq_cst) == 0) {
+ mData = malloc(size);
+ } else {
+ ALOGV("creating memoryDealer");
+ sp<MemoryDealer> memoryDealer =
+ new MemoryDealer(size + sizeof(SharedControl), "MediaBuffer");
+ mMemory = memoryDealer->allocate(size + sizeof(SharedControl));
+ if (mMemory == NULL) {
+ ALOGW("Failed to allocate shared memory, trying regular allocation!");
+ mData = malloc(size);
+ if (mData == NULL) {
+ ALOGE("Out of memory");
+ }
+ } else {
+ getSharedControl()->clear();
+ mData = (uint8_t *)mMemory->pointer() + sizeof(SharedControl);
+ ALOGV("Allocated shared mem buffer of size %zu @ %p", size, mData);
+ }
+ }
+}
+
+MediaBuffer::MediaBuffer(const sp<ABuffer> &buffer)
+ : mObserver(NULL),
+ mRefCount(0),
+ mData(buffer->data()),
+ mSize(buffer->size()),
+ mRangeOffset(0),
+ mRangeLength(mSize),
+ mBuffer(buffer),
+ mOwnsData(false),
+ mMetaData(new MetaData),
+ mOriginal(NULL) {
+}
+
+void MediaBuffer::release() {
+ if (mObserver == NULL) {
+ // Legacy contract for MediaBuffer without a MediaBufferGroup.
+ CHECK_EQ(mRefCount, 0);
+ delete this;
+ return;
+ }
+
+ int prevCount = __sync_fetch_and_sub(&mRefCount, 1);
+ if (prevCount == 1) {
+ if (mObserver == NULL) {
+ delete this;
+ return;
+ }
+
+ mObserver->signalBufferReturned(this);
+ }
+ CHECK(prevCount > 0);
+}
+
+void MediaBuffer::claim() {
+ CHECK(mObserver != NULL);
+ CHECK_EQ(mRefCount, 1);
+
+ mRefCount = 0;
+}
+
+void MediaBuffer::add_ref() {
+ (void) __sync_fetch_and_add(&mRefCount, 1);
+}
+
+void *MediaBuffer::data() const {
+ return mData;
+}
+
+size_t MediaBuffer::size() const {
+ return mSize;
+}
+
+size_t MediaBuffer::range_offset() const {
+ return mRangeOffset;
+}
+
+size_t MediaBuffer::range_length() const {
+ return mRangeLength;
+}
+
+void MediaBuffer::set_range(size_t offset, size_t length) {
+ if (offset + length > mSize) {
+ ALOGE("offset = %zu, length = %zu, mSize = %zu", offset, length, mSize);
+ }
+ CHECK(offset + length <= mSize);
+
+ mRangeOffset = offset;
+ mRangeLength = length;
+}
+
+MetaDataBase& MediaBuffer::meta_data() {
+ return *mMetaData;
+}
+
+void MediaBuffer::reset() {
+ mMetaData->clear();
+ set_range(0, mSize);
+}
+
+MediaBuffer::~MediaBuffer() {
+ CHECK(mObserver == NULL);
+
+ if (mOwnsData && mData != NULL && mMemory == NULL) {
+ free(mData);
+ mData = NULL;
+ }
+
+ if (mOriginal != NULL) {
+ mOriginal->release();
+ mOriginal = NULL;
+ }
+
+ if (mMemory.get() != nullptr) {
+ getSharedControl()->setDeadObject();
+ }
+ delete mMetaData;
+}
+
+void MediaBuffer::setObserver(MediaBufferObserver *observer) {
+ CHECK(observer == NULL || mObserver == NULL);
+ mObserver = observer;
+}
+
+MediaBufferBase *MediaBuffer::clone() {
+ MediaBuffer *buffer = new MediaBuffer(mData, mSize);
+ buffer->set_range(mRangeOffset, mRangeLength);
+ buffer->mMetaData = new MetaDataBase(*mMetaData);
+
+ add_ref();
+ buffer->mOriginal = this;
+
+ return buffer;
+}
+
+} // namespace android
diff --git a/media/libmediaextractor/MediaBufferBase.cpp b/media/libmediaextractor/MediaBufferBase.cpp
new file mode 100644
index 0000000..a553289
--- /dev/null
+++ b/media/libmediaextractor/MediaBufferBase.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MediaBufferBase"
+#include <utils/Log.h>
+
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaBufferBase.h>
+
+namespace android {
+
+//static
+MediaBufferBase *MediaBufferBase::Create(size_t size) {
+ return new (std::nothrow) MediaBuffer(size);
+}
+
+} // android
diff --git a/media/libmediaextractor/MediaBufferGroup.cpp b/media/libmediaextractor/MediaBufferGroup.cpp
new file mode 100644
index 0000000..2a8dd41
--- /dev/null
+++ b/media/libmediaextractor/MediaBufferGroup.cpp
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MediaBufferGroup"
+#include <utils/Log.h>
+
+#include <list>
+
+#include <binder/MemoryDealer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <utils/threads.h>
+
+namespace android {
+
+// std::min is not constexpr in C++11
+template<typename T>
+constexpr T MIN(const T &a, const T &b) { return a <= b ? a : b; }
+
+// MediaBufferGroup may create shared memory buffers at a
+// smaller threshold than an isolated new MediaBuffer.
+static const size_t kSharedMemoryThreshold = MIN(
+ (size_t)MediaBuffer::kSharedMemThreshold, (size_t)(4 * 1024));
+
+struct MediaBufferGroup::InternalData {
+ Mutex mLock;
+ Condition mCondition;
+ size_t mGrowthLimit; // Do not automatically grow group larger than this.
+ std::list<MediaBufferBase *> mBuffers;
+};
+
+MediaBufferGroup::MediaBufferGroup(size_t growthLimit)
+ : mInternal(new InternalData()) {
+ mInternal->mGrowthLimit = growthLimit;
+}
+
+MediaBufferGroup::MediaBufferGroup(size_t buffers, size_t buffer_size, size_t growthLimit)
+ : mInternal(new InternalData()) {
+ mInternal->mGrowthLimit = growthLimit;
+
+ if (mInternal->mGrowthLimit > 0 && buffers > mInternal->mGrowthLimit) {
+ ALOGW("Preallocated buffers %zu > growthLimit %zu, increasing growthLimit",
+ buffers, mInternal->mGrowthLimit);
+ mInternal->mGrowthLimit = buffers;
+ }
+
+ if (buffer_size >= kSharedMemoryThreshold) {
+ ALOGD("creating MemoryDealer");
+ // Using a single MemoryDealer is efficient for a group of shared memory objects.
+ // This loop guarantees that we use shared memory (no fallback to malloc).
+
+ size_t alignment = MemoryDealer::getAllocationAlignment();
+ size_t augmented_size = buffer_size + sizeof(MediaBuffer::SharedControl);
+ size_t total = (augmented_size + alignment - 1) / alignment * alignment * buffers;
+ sp<MemoryDealer> memoryDealer = new MemoryDealer(total, "MediaBufferGroup");
+
+ for (size_t i = 0; i < buffers; ++i) {
+ sp<IMemory> mem = memoryDealer->allocate(augmented_size);
+ if (mem.get() == nullptr || mem->pointer() == nullptr) {
+ ALOGW("Only allocated %zu shared buffers of size %zu", i, buffer_size);
+ break;
+ }
+ MediaBuffer *buffer = new MediaBuffer(mem);
+ buffer->getSharedControl()->clear();
+ add_buffer(buffer);
+ }
+ return;
+ }
+
+ // Non-shared memory allocation.
+ for (size_t i = 0; i < buffers; ++i) {
+ MediaBuffer *buffer = new MediaBuffer(buffer_size);
+ if (buffer->data() == nullptr) {
+ delete buffer; // don't call release, it's not properly formed
+ ALOGW("Only allocated %zu malloc buffers of size %zu", i, buffer_size);
+ break;
+ }
+ add_buffer(buffer);
+ }
+}
+
+MediaBufferGroup::~MediaBufferGroup() {
+ for (MediaBufferBase *buffer : mInternal->mBuffers) {
+ if (buffer->refcount() != 0) {
+ const int localRefcount = buffer->localRefcount();
+ const int remoteRefcount = buffer->remoteRefcount();
+
+ // Fatal if we have a local refcount.
+ LOG_ALWAYS_FATAL_IF(localRefcount != 0,
+ "buffer(%p) localRefcount %d != 0, remoteRefcount %d",
+ buffer, localRefcount, remoteRefcount);
+
+ // Log an error if we have a remaining remote refcount,
+ // as the remote process may have died or may have inappropriate behavior.
+ // The shared memory associated with the MediaBuffer will
+ // automatically be reclaimed when there are no remaining fds
+ // associated with it.
+ ALOGE("buffer(%p) has residual remoteRefcount %d",
+ buffer, remoteRefcount);
+ }
+ // gracefully delete.
+ buffer->setObserver(nullptr);
+ buffer->release();
+ }
+ delete mInternal;
+}
+
+void MediaBufferGroup::add_buffer(MediaBufferBase *buffer) {
+ Mutex::Autolock autoLock(mInternal->mLock);
+
+ // if we're above our growth limit, release buffers if we can
+ for (auto it = mInternal->mBuffers.begin();
+ mInternal->mGrowthLimit > 0
+ && mInternal->mBuffers.size() >= mInternal->mGrowthLimit
+ && it != mInternal->mBuffers.end();) {
+ if ((*it)->refcount() == 0) {
+ (*it)->setObserver(nullptr);
+ (*it)->release();
+ it = mInternal->mBuffers.erase(it);
+ } else {
+ ++it;
+ }
+ }
+
+ buffer->setObserver(this);
+ mInternal->mBuffers.emplace_back(buffer);
+}
+
+bool MediaBufferGroup::has_buffers() {
+ if (mInternal->mBuffers.size() < mInternal->mGrowthLimit) {
+ return true; // We can add more buffers internally.
+ }
+ for (MediaBufferBase *buffer : mInternal->mBuffers) {
+ if (buffer->refcount() == 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+status_t MediaBufferGroup::acquire_buffer(
+ MediaBufferBase **out, bool nonBlocking, size_t requestedSize) {
+ Mutex::Autolock autoLock(mInternal->mLock);
+ for (;;) {
+ size_t smallest = requestedSize;
+ MediaBufferBase *buffer = nullptr;
+ auto free = mInternal->mBuffers.end();
+ for (auto it = mInternal->mBuffers.begin(); it != mInternal->mBuffers.end(); ++it) {
+ if ((*it)->refcount() == 0) {
+ const size_t size = (*it)->size();
+ if (size >= requestedSize) {
+ buffer = *it;
+ break;
+ }
+ if (size < smallest) {
+ smallest = size; // always free the smallest buf
+ free = it;
+ }
+ }
+ }
+ if (buffer == nullptr
+ && (free != mInternal->mBuffers.end()
+ || mInternal->mBuffers.size() < mInternal->mGrowthLimit)) {
+ // We alloc before we free so failure leaves group unchanged.
+ const size_t allocateSize = requestedSize < SIZE_MAX / 3 * 2 /* NB: ordering */ ?
+ requestedSize * 3 / 2 : requestedSize;
+ buffer = new MediaBuffer(allocateSize);
+ if (buffer->data() == nullptr) {
+ ALOGE("Allocation failure for size %zu", allocateSize);
+ delete buffer; // Invalid alloc, prefer not to call release.
+ buffer = nullptr;
+ } else {
+ buffer->setObserver(this);
+ if (free != mInternal->mBuffers.end()) {
+ ALOGV("reallocate buffer, requested size %zu vs available %zu",
+ requestedSize, (*free)->size());
+ (*free)->setObserver(nullptr);
+ (*free)->release();
+ *free = buffer; // in-place replace
+ } else {
+ ALOGV("allocate buffer, requested size %zu", requestedSize);
+ mInternal->mBuffers.emplace_back(buffer);
+ }
+ }
+ }
+ if (buffer != nullptr) {
+ buffer->add_ref();
+ buffer->reset();
+ *out = buffer;
+ return OK;
+ }
+ if (nonBlocking) {
+ *out = nullptr;
+ return WOULD_BLOCK;
+ }
+ // All buffers are in use, block until one of them is returned.
+ mInternal->mCondition.wait(mInternal->mLock);
+ }
+ // Never gets here.
+}
+
+size_t MediaBufferGroup::buffers() const {
+ return mInternal->mBuffers.size();
+}
+
+void MediaBufferGroup::signalBufferReturned(MediaBufferBase *) {
+ Mutex::Autolock autoLock(mInternal->mLock);
+ mInternal->mCondition.signal();
+}
+
+} // namespace android
diff --git a/media/libmediaextractor/MediaExtractor.cpp b/media/libmediaextractor/MediaExtractor.cpp
new file mode 100644
index 0000000..a6b3dc9
--- /dev/null
+++ b/media/libmediaextractor/MediaExtractor.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaExtractor"
+#include <utils/Log.h>
+#include <pwd.h>
+
+#include <media/MediaExtractor.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MetaData.h>
+
+namespace android {
+
+MediaExtractor::MediaExtractor() {
+ if (!LOG_NDEBUG) {
+ uid_t uid = getuid();
+ struct passwd *pw = getpwuid(uid);
+ ALOGV("extractor created in uid: %d (%s)", getuid(), pw->pw_name);
+ }
+}
+
+MediaExtractor::~MediaExtractor() {}
+
+uint32_t MediaExtractor::flags() const {
+ return CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_PAUSE | CAN_SEEK;
+}
+
+} // namespace android
diff --git a/media/libmediaextractor/MediaSource.cpp b/media/libmediaextractor/MediaSource.cpp
new file mode 100644
index 0000000..5bbd3d8
--- /dev/null
+++ b/media/libmediaextractor/MediaSource.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/MediaSource.h>
+#include <media/IMediaSource.h>
+
+namespace android {
+
+MediaSource::MediaSource() {}
+
+MediaSource::~MediaSource() {}
+
+////////////////////////////////////////////////////////////////////////////////
+
+} // namespace android
diff --git a/media/libmediaextractor/MediaTrack.cpp b/media/libmediaextractor/MediaTrack.cpp
new file mode 100644
index 0000000..4963f58
--- /dev/null
+++ b/media/libmediaextractor/MediaTrack.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/MediaTrack.h>
+
+namespace android {
+
+MediaTrack::MediaTrack() {}
+
+MediaTrack::~MediaTrack() {}
+
+////////////////////////////////////////////////////////////////////////////////
+
+MediaTrack::ReadOptions::ReadOptions() {
+ reset();
+}
+
+void MediaTrack::ReadOptions::reset() {
+ mOptions = 0;
+ mSeekTimeUs = 0;
+ mNonBlocking = false;
+}
+
+void MediaTrack::ReadOptions::setNonBlocking() {
+ mNonBlocking = true;
+}
+
+void MediaTrack::ReadOptions::clearNonBlocking() {
+ mNonBlocking = false;
+}
+
+bool MediaTrack::ReadOptions::getNonBlocking() const {
+ return mNonBlocking;
+}
+
+void MediaTrack::ReadOptions::setSeekTo(int64_t time_us, SeekMode mode) {
+ mOptions |= kSeekTo_Option;
+ mSeekTimeUs = time_us;
+ mSeekMode = mode;
+}
+
+void MediaTrack::ReadOptions::clearSeekTo() {
+ mOptions &= ~kSeekTo_Option;
+ mSeekTimeUs = 0;
+ mSeekMode = SEEK_CLOSEST_SYNC;
+}
+
+bool MediaTrack::ReadOptions::getSeekTo(
+ int64_t *time_us, SeekMode *mode) const {
+ *time_us = mSeekTimeUs;
+ *mode = mSeekMode;
+ return (mOptions & kSeekTo_Option) != 0;
+}
+
+} // namespace android
diff --git a/media/libmediaextractor/MetaData.cpp b/media/libmediaextractor/MetaData.cpp
new file mode 100644
index 0000000..1d0a607
--- /dev/null
+++ b/media/libmediaextractor/MetaData.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MetaData"
+#include <inttypes.h>
+#include <binder/Parcel.h>
+#include <utils/KeyedVector.h>
+#include <utils/Log.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MetaData.h>
+
+namespace android {
+
+
+MetaData::MetaData() {
+}
+
+MetaData::MetaData(const MetaData &from)
+ : MetaDataBase(from) {
+}
+MetaData::MetaData(const MetaDataBase &from)
+ : MetaDataBase(from) {
+}
+
+MetaData::~MetaData() {
+}
+
+/* static */
+sp<MetaData> MetaData::createFromParcel(const Parcel &parcel) {
+
+ sp<MetaData> meta = new MetaData();
+ meta->updateFromParcel(parcel);
+ return meta;
+}
+
+} // namespace android
+
diff --git a/media/libmediaextractor/MetaDataBase.cpp b/media/libmediaextractor/MetaDataBase.cpp
new file mode 100644
index 0000000..bfea6f1
--- /dev/null
+++ b/media/libmediaextractor/MetaDataBase.cpp
@@ -0,0 +1,533 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MetaDataBase"
+#include <inttypes.h>
+#include <binder/Parcel.h>
+#include <utils/KeyedVector.h>
+#include <utils/Log.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MetaDataBase.h>
+
+namespace android {
+
+struct MetaDataBase::typed_data {
+ typed_data();
+ ~typed_data();
+
+ typed_data(const MetaDataBase::typed_data &);
+ typed_data &operator=(const MetaDataBase::typed_data &);
+
+ void clear();
+ void setData(uint32_t type, const void *data, size_t size);
+ void getData(uint32_t *type, const void **data, size_t *size) const;
+ // may include hexdump of binary data if verbose=true
+ String8 asString(bool verbose) const;
+
+private:
+ uint32_t mType;
+ size_t mSize;
+
+ union {
+ void *ext_data;
+ float reservoir;
+ } u;
+
+ bool usesReservoir() const {
+ return mSize <= sizeof(u.reservoir);
+ }
+
+ void *allocateStorage(size_t size);
+ void freeStorage();
+
+ void *storage() {
+ return usesReservoir() ? &u.reservoir : u.ext_data;
+ }
+
+ const void *storage() const {
+ return usesReservoir() ? &u.reservoir : u.ext_data;
+ }
+};
+
+struct MetaDataBase::Rect {
+ int32_t mLeft, mTop, mRight, mBottom;
+};
+
+
+struct MetaDataBase::MetaDataInternal {
+ KeyedVector<uint32_t, MetaDataBase::typed_data> mItems;
+};
+
+
+MetaDataBase::MetaDataBase()
+ : mInternalData(new MetaDataInternal()) {
+}
+
+MetaDataBase::MetaDataBase(const MetaDataBase &from)
+ : mInternalData(new MetaDataInternal()) {
+ mInternalData->mItems = from.mInternalData->mItems;
+}
+
+MetaDataBase& MetaDataBase::operator = (const MetaDataBase &rhs) {
+ this->mInternalData->mItems = rhs.mInternalData->mItems;
+ return *this;
+}
+
+MetaDataBase::~MetaDataBase() {
+ clear();
+ delete mInternalData;
+}
+
+void MetaDataBase::clear() {
+ mInternalData->mItems.clear();
+}
+
+bool MetaDataBase::remove(uint32_t key) {
+ ssize_t i = mInternalData->mItems.indexOfKey(key);
+
+ if (i < 0) {
+ return false;
+ }
+
+ mInternalData->mItems.removeItemsAt(i);
+
+ return true;
+}
+
+bool MetaDataBase::setCString(uint32_t key, const char *value) {
+ return setData(key, TYPE_C_STRING, value, strlen(value) + 1);
+}
+
+bool MetaDataBase::setInt32(uint32_t key, int32_t value) {
+ return setData(key, TYPE_INT32, &value, sizeof(value));
+}
+
+bool MetaDataBase::setInt64(uint32_t key, int64_t value) {
+ return setData(key, TYPE_INT64, &value, sizeof(value));
+}
+
+bool MetaDataBase::setFloat(uint32_t key, float value) {
+ return setData(key, TYPE_FLOAT, &value, sizeof(value));
+}
+
+bool MetaDataBase::setPointer(uint32_t key, void *value) {
+ return setData(key, TYPE_POINTER, &value, sizeof(value));
+}
+
+bool MetaDataBase::setRect(
+ uint32_t key,
+ int32_t left, int32_t top,
+ int32_t right, int32_t bottom) {
+ Rect r;
+ r.mLeft = left;
+ r.mTop = top;
+ r.mRight = right;
+ r.mBottom = bottom;
+
+ return setData(key, TYPE_RECT, &r, sizeof(r));
+}
+
+/**
+ * Note that the returned pointer becomes invalid when additional metadata is set.
+ */
+bool MetaDataBase::findCString(uint32_t key, const char **value) const {
+ uint32_t type;
+ const void *data;
+ size_t size;
+ if (!findData(key, &type, &data, &size) || type != TYPE_C_STRING) {
+ return false;
+ }
+
+ *value = (const char *)data;
+
+ return true;
+}
+
+bool MetaDataBase::findInt32(uint32_t key, int32_t *value) const {
+ uint32_t type = 0;
+ const void *data;
+ size_t size;
+ if (!findData(key, &type, &data, &size) || type != TYPE_INT32) {
+ return false;
+ }
+
+ CHECK_EQ(size, sizeof(*value));
+
+ *value = *(int32_t *)data;
+
+ return true;
+}
+
+bool MetaDataBase::findInt64(uint32_t key, int64_t *value) const {
+ uint32_t type = 0;
+ const void *data;
+ size_t size;
+ if (!findData(key, &type, &data, &size) || type != TYPE_INT64) {
+ return false;
+ }
+
+ CHECK_EQ(size, sizeof(*value));
+
+ *value = *(int64_t *)data;
+
+ return true;
+}
+
+bool MetaDataBase::findFloat(uint32_t key, float *value) const {
+ uint32_t type = 0;
+ const void *data;
+ size_t size;
+ if (!findData(key, &type, &data, &size) || type != TYPE_FLOAT) {
+ return false;
+ }
+
+ CHECK_EQ(size, sizeof(*value));
+
+ *value = *(float *)data;
+
+ return true;
+}
+
+bool MetaDataBase::findPointer(uint32_t key, void **value) const {
+ uint32_t type = 0;
+ const void *data;
+ size_t size;
+ if (!findData(key, &type, &data, &size) || type != TYPE_POINTER) {
+ return false;
+ }
+
+ CHECK_EQ(size, sizeof(*value));
+
+ *value = *(void **)data;
+
+ return true;
+}
+
+bool MetaDataBase::findRect(
+ uint32_t key,
+ int32_t *left, int32_t *top,
+ int32_t *right, int32_t *bottom) const {
+ uint32_t type = 0;
+ const void *data;
+ size_t size;
+ if (!findData(key, &type, &data, &size) || type != TYPE_RECT) {
+ return false;
+ }
+
+ CHECK_EQ(size, sizeof(Rect));
+
+ const Rect *r = (const Rect *)data;
+ *left = r->mLeft;
+ *top = r->mTop;
+ *right = r->mRight;
+ *bottom = r->mBottom;
+
+ return true;
+}
+
+bool MetaDataBase::setData(
+ uint32_t key, uint32_t type, const void *data, size_t size) {
+ bool overwrote_existing = true;
+
+ ssize_t i = mInternalData->mItems.indexOfKey(key);
+ if (i < 0) {
+ typed_data item;
+ i = mInternalData->mItems.add(key, item);
+
+ overwrote_existing = false;
+ }
+
+ typed_data &item = mInternalData->mItems.editValueAt(i);
+
+ item.setData(type, data, size);
+
+ return overwrote_existing;
+}
+
+bool MetaDataBase::findData(uint32_t key, uint32_t *type,
+ const void **data, size_t *size) const {
+ ssize_t i = mInternalData->mItems.indexOfKey(key);
+
+ if (i < 0) {
+ return false;
+ }
+
+ const typed_data &item = mInternalData->mItems.valueAt(i);
+
+ item.getData(type, data, size);
+
+ return true;
+}
+
+bool MetaDataBase::hasData(uint32_t key) const {
+ ssize_t i = mInternalData->mItems.indexOfKey(key);
+
+ if (i < 0) {
+ return false;
+ }
+
+ return true;
+}
+
+MetaDataBase::typed_data::typed_data()
+ : mType(0),
+ mSize(0) {
+}
+
+MetaDataBase::typed_data::~typed_data() {
+ clear();
+}
+
+MetaDataBase::typed_data::typed_data(const typed_data &from)
+ : mType(from.mType),
+ mSize(0) {
+
+ void *dst = allocateStorage(from.mSize);
+ if (dst) {
+ memcpy(dst, from.storage(), mSize);
+ }
+}
+
+MetaDataBase::typed_data &MetaDataBase::typed_data::operator=(
+ const MetaDataBase::typed_data &from) {
+ if (this != &from) {
+ clear();
+ mType = from.mType;
+ void *dst = allocateStorage(from.mSize);
+ if (dst) {
+ memcpy(dst, from.storage(), mSize);
+ }
+ }
+
+ return *this;
+}
+
+void MetaDataBase::typed_data::clear() {
+ freeStorage();
+
+ mType = 0;
+}
+
+void MetaDataBase::typed_data::setData(
+ uint32_t type, const void *data, size_t size) {
+ clear();
+
+ mType = type;
+
+ void *dst = allocateStorage(size);
+ if (dst) {
+ memcpy(dst, data, size);
+ }
+}
+
+void MetaDataBase::typed_data::getData(
+ uint32_t *type, const void **data, size_t *size) const {
+ *type = mType;
+ *size = mSize;
+ *data = storage();
+}
+
+void *MetaDataBase::typed_data::allocateStorage(size_t size) {
+ mSize = size;
+
+ if (usesReservoir()) {
+ return &u.reservoir;
+ }
+
+ u.ext_data = malloc(mSize);
+ if (u.ext_data == NULL) {
+ ALOGE("Couldn't allocate %zu bytes for item", size);
+ mSize = 0;
+ }
+ return u.ext_data;
+}
+
+void MetaDataBase::typed_data::freeStorage() {
+ if (!usesReservoir()) {
+ if (u.ext_data) {
+ free(u.ext_data);
+ u.ext_data = NULL;
+ }
+ }
+
+ mSize = 0;
+}
+
+String8 MetaDataBase::typed_data::asString(bool verbose) const {
+ String8 out;
+ const void *data = storage();
+ switch(mType) {
+ case TYPE_NONE:
+ out = String8::format("no type, size %zu)", mSize);
+ break;
+ case TYPE_C_STRING:
+ out = String8::format("(char*) %s", (const char *)data);
+ break;
+ case TYPE_INT32:
+ out = String8::format("(int32_t) %d", *(int32_t *)data);
+ break;
+ case TYPE_INT64:
+ out = String8::format("(int64_t) %" PRId64, *(int64_t *)data);
+ break;
+ case TYPE_FLOAT:
+ out = String8::format("(float) %f", *(float *)data);
+ break;
+ case TYPE_POINTER:
+ out = String8::format("(void*) %p", *(void **)data);
+ break;
+ case TYPE_RECT:
+ {
+ const Rect *r = (const Rect *)data;
+ out = String8::format("Rect(%d, %d, %d, %d)",
+ r->mLeft, r->mTop, r->mRight, r->mBottom);
+ break;
+ }
+
+ default:
+ out = String8::format("(unknown type %d, size %zu)", mType, mSize);
+ if (verbose && mSize <= 48) { // if it's less than three lines of hex data, dump it
+ AString foo;
+ hexdump(data, mSize, 0, &foo);
+ out.append("\n");
+ out.append(foo.c_str());
+ }
+ break;
+ }
+ return out;
+}
+
+static void MakeFourCCString(uint32_t x, char *s) {
+ s[0] = x >> 24;
+ s[1] = (x >> 16) & 0xff;
+ s[2] = (x >> 8) & 0xff;
+ s[3] = x & 0xff;
+ s[4] = '\0';
+}
+
+String8 MetaDataBase::toString() const {
+ String8 s;
+ for (int i = mInternalData->mItems.size(); --i >= 0;) {
+ int32_t key = mInternalData->mItems.keyAt(i);
+ char cc[5];
+ MakeFourCCString(key, cc);
+ const typed_data &item = mInternalData->mItems.valueAt(i);
+ s.appendFormat("%s: %s", cc, item.asString(false).string());
+ if (i != 0) {
+ s.append(", ");
+ }
+ }
+ return s;
+}
+
+void MetaDataBase::dumpToLog() const {
+ for (int i = mInternalData->mItems.size(); --i >= 0;) {
+ int32_t key = mInternalData->mItems.keyAt(i);
+ char cc[5];
+ MakeFourCCString(key, cc);
+ const typed_data &item = mInternalData->mItems.valueAt(i);
+ ALOGI("%s: %s", cc, item.asString(true /* verbose */).string());
+ }
+}
+
+status_t MetaDataBase::writeToParcel(Parcel &parcel) {
+ status_t ret;
+ size_t numItems = mInternalData->mItems.size();
+ ret = parcel.writeUint32(uint32_t(numItems));
+ if (ret) {
+ return ret;
+ }
+ for (size_t i = 0; i < numItems; i++) {
+ int32_t key = mInternalData->mItems.keyAt(i);
+ const typed_data &item = mInternalData->mItems.valueAt(i);
+ uint32_t type;
+ const void *data;
+ size_t size;
+ item.getData(&type, &data, &size);
+ ret = parcel.writeInt32(key);
+ if (ret) {
+ return ret;
+ }
+ ret = parcel.writeUint32(type);
+ if (ret) {
+ return ret;
+ }
+ if (type == TYPE_NONE) {
+ android::Parcel::WritableBlob blob;
+ ret = parcel.writeUint32(static_cast<uint32_t>(size));
+ if (ret) {
+ return ret;
+ }
+ ret = parcel.writeBlob(size, false, &blob);
+ if (ret) {
+ return ret;
+ }
+ memcpy(blob.data(), data, size);
+ blob.release();
+ } else {
+ ret = parcel.writeByteArray(size, (uint8_t*)data);
+ if (ret) {
+ return ret;
+ }
+ }
+ }
+ return OK;
+}
+
+status_t MetaDataBase::updateFromParcel(const Parcel &parcel) {
+ uint32_t numItems;
+ if (parcel.readUint32(&numItems) == OK) {
+
+ for (size_t i = 0; i < numItems; i++) {
+ int32_t key;
+ uint32_t type;
+ uint32_t size;
+ status_t ret = parcel.readInt32(&key);
+ ret |= parcel.readUint32(&type);
+ ret |= parcel.readUint32(&size);
+ if (ret != OK) {
+ break;
+ }
+ // copy data from Blob, which may be inline in Parcel storage,
+ // then advance position
+ if (type == TYPE_NONE) {
+ android::Parcel::ReadableBlob blob;
+ ret = parcel.readBlob(size, &blob);
+ if (ret != OK) {
+ break;
+ }
+ setData(key, type, blob.data(), size);
+ blob.release();
+ } else {
+ // copy data directly from Parcel storage, then advance position
+ setData(key, type, parcel.readInplace(size), size);
+ }
+ }
+
+ return OK;
+ }
+ ALOGW("no metadata in parcel");
+ return UNKNOWN_ERROR;
+}
+
+} // namespace android
+
diff --git a/media/libmediaextractor/VorbisComment.cpp b/media/libmediaextractor/VorbisComment.cpp
new file mode 100644
index 0000000..fabaf51
--- /dev/null
+++ b/media/libmediaextractor/VorbisComment.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "VorbisComment"
+#include <utils/Log.h>
+
+#include "media/VorbisComment.h"
+
+#include <media/stagefright/foundation/base64.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/stagefright/MetaDataBase.h>
+
+namespace android {
+
+static void extractAlbumArt(
+ MetaDataBase *fileMeta, const void *data, size_t size) {
+ ALOGV("extractAlbumArt from '%s'", (const char *)data);
+
+ sp<ABuffer> flacBuffer = decodeBase64(AString((const char *)data, size));
+ if (flacBuffer == NULL) {
+ ALOGE("malformed base64 encoded data.");
+ return;
+ }
+
+ size_t flacSize = flacBuffer->size();
+ uint8_t *flac = flacBuffer->data();
+ ALOGV("got flac of size %zu", flacSize);
+
+ uint32_t picType;
+ uint32_t typeLen;
+ uint32_t descLen;
+ uint32_t dataLen;
+ char type[128];
+
+ if (flacSize < 8) {
+ return;
+ }
+
+ picType = U32_AT(flac);
+
+ if (picType != 3) {
+ // This is not a front cover.
+ return;
+ }
+
+ typeLen = U32_AT(&flac[4]);
+ if (typeLen > sizeof(type) - 1) {
+ return;
+ }
+
+ // we've already checked above that flacSize >= 8
+ if (flacSize - 8 < typeLen) {
+ return;
+ }
+
+ memcpy(type, &flac[8], typeLen);
+ type[typeLen] = '\0';
+
+ ALOGV("picType = %d, type = '%s'", picType, type);
+
+ if (!strcmp(type, "-->")) {
+ // This is not inline cover art, but an external url instead.
+ return;
+ }
+
+ if (flacSize < 32 || flacSize - 32 < typeLen) {
+ return;
+ }
+
+ descLen = U32_AT(&flac[8 + typeLen]);
+ if (flacSize - 32 - typeLen < descLen) {
+ return;
+ }
+
+ dataLen = U32_AT(&flac[8 + typeLen + 4 + descLen + 16]);
+
+ // we've already checked above that (flacSize - 32 - typeLen - descLen) >= 0
+ if (flacSize - 32 - typeLen - descLen < dataLen) {
+ return;
+ }
+
+ ALOGV("got image data, %zu trailing bytes",
+ flacSize - 32 - typeLen - descLen - dataLen);
+
+ fileMeta->setData(
+ kKeyAlbumArt, 0, &flac[8 + typeLen + 4 + descLen + 20], dataLen);
+
+ fileMeta->setCString(kKeyAlbumArtMIME, type);
+}
+
+void parseVorbisComment(
+ MetaDataBase *fileMeta, const char *comment, size_t commentLength)
+{
+ struct {
+ const char *const mTag;
+ uint32_t mKey;
+ } kMap[] = {
+ { "TITLE", kKeyTitle },
+ { "ARTIST", kKeyArtist },
+ { "ALBUMARTIST", kKeyAlbumArtist },
+ { "ALBUM ARTIST", kKeyAlbumArtist },
+ { "COMPILATION", kKeyCompilation },
+ { "ALBUM", kKeyAlbum },
+ { "COMPOSER", kKeyComposer },
+ { "GENRE", kKeyGenre },
+ { "AUTHOR", kKeyAuthor },
+ { "TRACKNUMBER", kKeyCDTrackNumber },
+ { "DISCNUMBER", kKeyDiscNumber },
+ { "DATE", kKeyDate },
+ { "YEAR", kKeyYear },
+ { "LYRICIST", kKeyWriter },
+ { "METADATA_BLOCK_PICTURE", kKeyAlbumArt },
+ { "ANDROID_LOOP", kKeyAutoLoop },
+ };
+
+ for (size_t j = 0; j < sizeof(kMap) / sizeof(kMap[0]); ++j) {
+ size_t tagLen = strlen(kMap[j].mTag);
+ if (!strncasecmp(kMap[j].mTag, comment, tagLen)
+ && comment[tagLen] == '=') {
+ if (kMap[j].mKey == kKeyAlbumArt) {
+ extractAlbumArt(
+ fileMeta,
+ &comment[tagLen + 1],
+ commentLength - tagLen - 1);
+ } else if (kMap[j].mKey == kKeyAutoLoop) {
+ if (!strcasecmp(&comment[tagLen + 1], "true")) {
+ fileMeta->setInt32(kKeyAutoLoop, true);
+ }
+ } else {
+ fileMeta->setCString(kMap[j].mKey, &comment[tagLen + 1]);
+ }
+ }
+ }
+
+}
+
+} // namespace android
diff --git a/media/libmediaextractor/include/media/DataSource.h b/media/libmediaextractor/include/media/DataSource.h
new file mode 100644
index 0000000..0e59f39
--- /dev/null
+++ b/media/libmediaextractor/include/media/DataSource.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DATA_SOURCE_H_
+
+#define DATA_SOURCE_H_
+
+#include <sys/types.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/DataSourceBase.h>
+#include <media/IDataSource.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+#include <drm/DrmManagerClient.h>
+
+
+namespace android {
+
+class String8;
+
+class DataSource : public DataSourceBase, public virtual RefBase {
+public:
+ DataSource() {}
+
+ // returns a pointer to IDataSource if it is wrapped.
+ virtual sp<IDataSource> getIDataSource() const {
+ return nullptr;
+ }
+
+ virtual String8 toString() {
+ return String8("<unspecified>");
+ }
+
+ virtual status_t reconnectAtOffset(off64_t /*offset*/) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ ////////////////////////////////////////////////////////////////////////////
+
+ // for DRM
+ virtual sp<DecryptHandle> DrmInitialization(const char * /*mime*/ = NULL) {
+ return NULL;
+ }
+
+ virtual String8 getUri() {
+ return String8();
+ }
+
+ virtual bool getUri(char *uriString, size_t bufferSize) final {
+ int ret = snprintf(uriString, bufferSize, "%s", getUri().c_str());
+ return ret >= 0 && static_cast<size_t>(ret) < bufferSize;
+ }
+
+ virtual String8 getMIMEType() const {
+ return String8("application/octet-stream");
+ }
+
+protected:
+ virtual ~DataSource() {}
+
+private:
+ DataSource(const DataSource &);
+ DataSource &operator=(const DataSource &);
+};
+
+} // namespace android
+
+#endif // DATA_SOURCE_H_
diff --git a/media/libmediaextractor/include/media/DataSourceBase.h b/media/libmediaextractor/include/media/DataSourceBase.h
new file mode 100644
index 0000000..51993da
--- /dev/null
+++ b/media/libmediaextractor/include/media/DataSourceBase.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DATA_SOURCE_BASE_H_
+
+#define DATA_SOURCE_BASE_H_
+
+#include <sys/types.h>
+#include <utils/Errors.h>
+
+namespace android {
+
+class String8;
+
+class DataSourceBase {
+public:
+ enum Flags {
+ kWantsPrefetching = 1,
+ kStreamedFromLocalHost = 2,
+ kIsCachingDataSource = 4,
+ kIsHTTPBasedSource = 8,
+ kIsLocalFileSource = 16,
+ };
+
+ DataSourceBase() {}
+
+ virtual status_t initCheck() const = 0;
+
+ // Returns the number of bytes read, or -1 on failure. It's not an error if
+ // this returns zero; it just means the given offset is equal to, or
+ // beyond, the end of the source.
+ virtual ssize_t readAt(off64_t offset, void *data, size_t size) = 0;
+
+ // Convenience methods:
+ bool getUInt16(off64_t offset, uint16_t *x);
+ bool getUInt24(off64_t offset, uint32_t *x); // 3 byte int, returned as a 32-bit int
+ bool getUInt32(off64_t offset, uint32_t *x);
+ bool getUInt64(off64_t offset, uint64_t *x);
+
+ // read either int<N> or int<2N> into a uint<2N>_t, size is the int size in bytes.
+ bool getUInt16Var(off64_t offset, uint16_t *x, size_t size);
+ bool getUInt32Var(off64_t offset, uint32_t *x, size_t size);
+ bool getUInt64Var(off64_t offset, uint64_t *x, size_t size);
+
+ // May return ERROR_UNSUPPORTED.
+ virtual status_t getSize(off64_t *size);
+
+ virtual bool getUri(char *uriString, size_t bufferSize);
+
+ virtual uint32_t flags() {
+ return 0;
+ }
+
+ virtual void close() {};
+
+protected:
+ virtual ~DataSourceBase() {}
+
+private:
+ DataSourceBase(const DataSourceBase &);
+ DataSourceBase &operator=(const DataSourceBase &);
+};
+
+} // namespace android
+
+#endif // DATA_SOURCE_BASE_H_
diff --git a/media/libmediaextractor/include/media/ExtractorUtils.h b/media/libmediaextractor/include/media/ExtractorUtils.h
new file mode 100644
index 0000000..22f9349
--- /dev/null
+++ b/media/libmediaextractor/include/media/ExtractorUtils.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef EXTRACTOR_UTILS_H_
+
+#define EXTRACTOR_UTILS_H_
+
+#include <memory>
+
+namespace android {
+
+template <class T>
+std::unique_ptr<T[]> heapbuffer(size_t size) {
+ return std::unique_ptr<T[]>(new (std::nothrow) T[size]);
+}
+
+} // namespace android
+
+#endif // UTILS_H_
diff --git a/media/libmediaextractor/include/media/MediaExtractor.h b/media/libmediaextractor/include/media/MediaExtractor.h
new file mode 100644
index 0000000..4ba98da
--- /dev/null
+++ b/media/libmediaextractor/include/media/MediaExtractor.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_EXTRACTOR_H_
+
+#define MEDIA_EXTRACTOR_H_
+
+#include <stdio.h>
+#include <vector>
+
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class DataSourceBase;
+class MetaDataBase;
+struct MediaTrack;
+
+
+class ExtractorAllocTracker {
+public:
+ ExtractorAllocTracker() {
+ ALOGD("extractor allocated: %p", this);
+ }
+ virtual ~ExtractorAllocTracker() {
+ ALOGD("extractor freed: %p", this);
+ }
+};
+
+
+class MediaExtractor
+// : public ExtractorAllocTracker
+{
+public:
+ virtual ~MediaExtractor();
+ virtual size_t countTracks() = 0;
+ virtual MediaTrack *getTrack(size_t index) = 0;
+
+ enum GetTrackMetaDataFlags {
+ kIncludeExtensiveMetaData = 1
+ };
+ virtual status_t getTrackMetaData(
+ MetaDataBase& meta,
+ size_t index, uint32_t flags = 0) = 0;
+
+ // Return container specific meta-data. The default implementation
+ // returns an empty metadata object.
+ virtual status_t getMetaData(MetaDataBase& meta) = 0;
+
+ enum Flags {
+ CAN_SEEK_BACKWARD = 1, // the "seek 10secs back button"
+ CAN_SEEK_FORWARD = 2, // the "seek 10secs forward button"
+ CAN_PAUSE = 4,
+ CAN_SEEK = 8, // the "seek bar"
+ };
+
+ // If subclasses do _not_ override this, the default is
+ // CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE
+ virtual uint32_t flags() const;
+
+ virtual status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) {
+ return INVALID_OPERATION;
+ }
+
+ virtual const char * name() { return "<unspecified>"; }
+
+ typedef MediaExtractor* (*CreatorFunc)(
+ DataSourceBase *source, void *meta);
+ typedef void (*FreeMetaFunc)(void *meta);
+
+ // The sniffer can optionally fill in an opaque object, "meta", that helps
+ // the corresponding extractor initialize its state without duplicating
+ // effort already exerted by the sniffer. If "freeMeta" is given, it will be
+ // called against the opaque object when it is no longer used.
+ typedef CreatorFunc (*SnifferFunc)(
+ DataSourceBase *source, float *confidence,
+ void **meta, FreeMetaFunc *freeMeta);
+
+ typedef struct {
+ const uint8_t b[16];
+ } uuid_t;
+
+ typedef struct {
+ // version number of this structure
+ const uint32_t def_version;
+
+ // A unique identifier for this extractor.
+ // See below for a convenience macro to create this from a string.
+ uuid_t extractor_uuid;
+
+ // Version number of this extractor. When two extractors with the same
+ // uuid are encountered, the one with the largest version number will
+ // be used.
+ const uint32_t extractor_version;
+
+ // a human readable name
+ const char *extractor_name;
+
+ // the sniffer function
+ const SnifferFunc sniff;
+ } ExtractorDef;
+
+ static const uint32_t EXTRACTORDEF_VERSION = 1;
+
+ typedef ExtractorDef (*GetExtractorDef)();
+
+protected:
+ MediaExtractor();
+
+private:
+ MediaExtractor(const MediaExtractor &);
+ MediaExtractor &operator=(const MediaExtractor &);
+};
+
+// purposely not defined anywhere so that this will fail to link if
+// expressions below are not evaluated at compile time
+int invalid_uuid_string(const char *);
+
+template <typename T, size_t N>
+constexpr uint8_t _digitAt_(const T (&s)[N], const size_t n) {
+ return s[n] >= '0' && s[n] <= '9' ? s[n] - '0'
+ : s[n] >= 'a' && s[n] <= 'f' ? s[n] - 'a' + 10
+ : s[n] >= 'A' && s[n] <= 'F' ? s[n] - 'A' + 10
+ : invalid_uuid_string("uuid: bad digits");
+}
+
+template <typename T, size_t N>
+constexpr uint8_t _hexByteAt_(const T (&s)[N], size_t n) {
+ return (_digitAt_(s, n) << 4) + _digitAt_(s, n + 1);
+}
+
+constexpr bool _assertIsDash_(char c) {
+ return c == '-' ? true : invalid_uuid_string("Wrong format");
+}
+
+template <size_t N>
+constexpr MediaExtractor::uuid_t constUUID(const char (&s) [N]) {
+ static_assert(N == 37, "uuid: wrong length");
+ return
+ _assertIsDash_(s[8]),
+ _assertIsDash_(s[13]),
+ _assertIsDash_(s[18]),
+ _assertIsDash_(s[23]),
+ MediaExtractor::uuid_t {{
+ _hexByteAt_(s, 0),
+ _hexByteAt_(s, 2),
+ _hexByteAt_(s, 4),
+ _hexByteAt_(s, 6),
+ _hexByteAt_(s, 9),
+ _hexByteAt_(s, 11),
+ _hexByteAt_(s, 14),
+ _hexByteAt_(s, 16),
+ _hexByteAt_(s, 19),
+ _hexByteAt_(s, 21),
+ _hexByteAt_(s, 24),
+ _hexByteAt_(s, 26),
+ _hexByteAt_(s, 28),
+ _hexByteAt_(s, 30),
+ _hexByteAt_(s, 32),
+ _hexByteAt_(s, 34),
+ }};
+}
+// Convenience macro to create a uuid_t from a string literal, which should
+// be formatted as "12345678-1234-1234-1234-123456789abc", as generated by
+// e.g. https://www.uuidgenerator.net/ or the 'uuidgen' linux command.
+// Hex digits may be upper or lower case.
+//
+// The macro call is otherwise equivalent to specifying the structure directly
+// (e.g. UUID("7d613858-5837-4a38-84c5-332d1cddee27") is the same as
+// {{0x7d, 0x61, 0x38, 0x58, 0x58, 0x37, 0x4a, 0x38,
+// 0x84, 0xc5, 0x33, 0x2d, 0x1c, 0xdd, 0xee, 0x27}})
+
+#define UUID(str) []{ constexpr MediaExtractor::uuid_t uuid = constUUID(str); return uuid; }()
+
+
+
+} // namespace android
+
+#endif // MEDIA_EXTRACTOR_H_
diff --git a/media/libmediaextractor/include/media/MediaSource.h b/media/libmediaextractor/include/media/MediaSource.h
new file mode 100644
index 0000000..73c4703
--- /dev/null
+++ b/media/libmediaextractor/include/media/MediaSource.h
@@ -0,0 +1,102 @@
+/*
+
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_SOURCE_H_
+
+#define MEDIA_SOURCE_H_
+
+#include <sys/types.h>
+
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <utils/RefBase.h>
+
+#include <media/MediaTrack.h>
+
+namespace android {
+
+class MediaBuffer;
+
+struct MediaSource : public virtual RefBase {
+ MediaSource();
+
+ // To be called before any other methods on this object, except
+ // getFormat().
+ virtual status_t start(MetaData *params = NULL) = 0;
+
+ // Any blocking read call returns immediately with a result of NO_INIT.
+ // It is an error to call any methods other than start after this call
+ // returns. Any buffers the object may be holding onto at the time of
+ // the stop() call are released.
+ // Also, it is imperative that any buffers output by this object and
+ // held onto by callers be released before a call to stop() !!!
+ virtual status_t stop() = 0;
+
+ // Returns the format of the data output by this media source.
+ virtual sp<MetaData> getFormat() = 0;
+
+ // Options that modify read() behaviour. The default is to
+ // a) not request a seek
+ // b) not be late, i.e. lateness_us = 0
+ typedef MediaTrack::ReadOptions ReadOptions;
+
+ // Returns a new buffer of data. Call blocks until a
+ // buffer is available, an error is encountered of the end of the stream
+ // is reached.
+ // End of stream is signalled by a result of ERROR_END_OF_STREAM.
+ // A result of INFO_FORMAT_CHANGED indicates that the format of this
+ // MediaSource has changed mid-stream, the client can continue reading
+ // but should be prepared for buffers of the new configuration.
+ virtual status_t read(
+ MediaBufferBase **buffer, const ReadOptions *options = NULL) = 0;
+
+ // Causes this source to suspend pulling data from its upstream source
+ // until a subsequent read-with-seek. This is currently not supported
+ // as such by any source. E.g. MediaCodecSource does not suspend its
+ // upstream source, and instead discard upstream data while paused.
+ virtual status_t pause() {
+ return ERROR_UNSUPPORTED;
+ }
+
+ // The consumer of this media source requests the source stops sending
+ // buffers with timestamp larger than or equal to stopTimeUs. stopTimeUs
+ // must be in the same time base as the startTime passed in start(). If
+ // source does not support this request, ERROR_UNSUPPORTED will be returned.
+ // If stopTimeUs is invalid, BAD_VALUE will be returned. This could be
+ // called at any time even before source starts and it could be called
+ // multiple times. Setting stopTimeUs to be -1 will effectively cancel the stopTimeUs
+ // set previously. If stopTimeUs is larger than or equal to last buffer's timestamp,
+ // source will start to drop buffer when it gets a buffer with timestamp larger
+ // than or equal to stopTimeUs. If stopTimeUs is smaller than or equal to last
+ // buffer's timestamp, source will drop all the incoming buffers immediately.
+ // After setting stopTimeUs, source may still stop sending buffers with timestamp
+ // less than stopTimeUs if it is stopped by the consumer.
+ virtual status_t setStopTimeUs(int64_t /* stopTimeUs */) {
+ return ERROR_UNSUPPORTED;
+ }
+
+protected:
+ virtual ~MediaSource();
+
+private:
+ MediaSource(const MediaSource &);
+ MediaSource &operator=(const MediaSource &);
+};
+
+} // namespace android
+
+#endif // MEDIA_SOURCE_H_
diff --git a/media/libmediaextractor/include/media/MediaTrack.h b/media/libmediaextractor/include/media/MediaTrack.h
new file mode 100644
index 0000000..adea61a
--- /dev/null
+++ b/media/libmediaextractor/include/media/MediaTrack.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_SOURCE_BASE_H_
+
+#define MEDIA_SOURCE_BASE_H_
+
+#include <sys/types.h>
+
+#include <binder/IMemory.h>
+#include <binder/MemoryDealer.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <utils/Log.h>
+#include <utils/RefBase.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+class MediaBufferBase;
+
+class SourceBaseAllocTracker {
+public:
+ SourceBaseAllocTracker() {
+ ALOGD("sourcebase allocated: %p", this);
+ }
+ virtual ~SourceBaseAllocTracker() {
+ ALOGD("sourcebase freed: %p", this);
+ }
+};
+
+struct MediaTrack
+// : public SourceBaseAllocTracker
+{
+ MediaTrack();
+
+ // To be called before any other methods on this object, except
+ // getFormat().
+ virtual status_t start(MetaDataBase *params = NULL) = 0;
+
+ // Any blocking read call returns immediately with a result of NO_INIT.
+ // It is an error to call any methods other than start after this call
+ // returns. Any buffers the object may be holding onto at the time of
+ // the stop() call are released.
+ // Also, it is imperative that any buffers output by this object and
+ // held onto by callers be released before a call to stop() !!!
+ virtual status_t stop() = 0;
+
+ // Returns the format of the data output by this media track.
+ virtual status_t getFormat(MetaDataBase& format) = 0;
+
+ // Options that modify read() behaviour. The default is to
+ // a) not request a seek
+ // b) not be late, i.e. lateness_us = 0
+ struct ReadOptions {
+ enum SeekMode : int32_t {
+ SEEK_PREVIOUS_SYNC,
+ SEEK_NEXT_SYNC,
+ SEEK_CLOSEST_SYNC,
+ SEEK_CLOSEST,
+ SEEK_FRAME_INDEX,
+ };
+
+ ReadOptions();
+
+ // Reset everything back to defaults.
+ void reset();
+
+ void setSeekTo(int64_t time_us, SeekMode mode = SEEK_CLOSEST_SYNC);
+ void clearSeekTo();
+ bool getSeekTo(int64_t *time_us, SeekMode *mode) const;
+
+ void setNonBlocking();
+ void clearNonBlocking();
+ bool getNonBlocking() const;
+
+ // Used to clear all non-persistent options for multiple buffer reads.
+ void clearNonPersistent() {
+ clearSeekTo();
+ }
+
+ private:
+ enum Options {
+ kSeekTo_Option = 1,
+ };
+
+ uint32_t mOptions;
+ int64_t mSeekTimeUs;
+ SeekMode mSeekMode;
+ bool mNonBlocking;
+ } __attribute__((packed)); // sent through Binder
+
+ // Returns a new buffer of data. Call blocks until a
+ // buffer is available, an error is encountered of the end of the stream
+ // is reached.
+ // End of stream is signalled by a result of ERROR_END_OF_STREAM.
+ // A result of INFO_FORMAT_CHANGED indicates that the format of this
+ // MediaSource has changed mid-stream, the client can continue reading
+ // but should be prepared for buffers of the new configuration.
+ virtual status_t read(
+ MediaBufferBase **buffer, const ReadOptions *options = NULL) = 0;
+
+ virtual ~MediaTrack();
+
+private:
+ MediaTrack(const MediaTrack &);
+ MediaTrack &operator=(const MediaTrack &);
+};
+
+} // namespace android
+
+#endif // MEDIA_SOURCE_BASE_H_
diff --git a/media/libmediaextractor/include/media/VorbisComment.h b/media/libmediaextractor/include/media/VorbisComment.h
new file mode 100644
index 0000000..8ba3295
--- /dev/null
+++ b/media/libmediaextractor/include/media/VorbisComment.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef VORBIS_COMMENT_H_
+#define VORBIS_COMMENT_H_
+
+namespace android {
+
+class MetaDataBase;
+
+void parseVorbisComment(
+ MetaDataBase *fileMeta, const char *comment, size_t commentLength);
+
+} // namespace android
+
+#endif // VORBIS_COMMENT_H_
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBuffer.h b/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
new file mode 100644
index 0000000..f944d51
--- /dev/null
+++ b/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_BUFFER_H_
+
+#define MEDIA_BUFFER_H_
+
+#include <atomic>
+#include <list>
+
+#include <pthread.h>
+
+#include <binder/MemoryDealer.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <media/stagefright/MediaBufferBase.h>
+
+namespace android {
+
+struct ABuffer;
+class MediaBuffer;
+class MediaBufferObserver;
+class MetaDataBase;
+
+class MediaBuffer : public MediaBufferBase {
+public:
+ // allocations larger than or equal to this will use shared memory.
+ static const size_t kSharedMemThreshold = 64 * 1024;
+
+ // The underlying data remains the responsibility of the caller!
+ MediaBuffer(void *data, size_t size);
+
+ explicit MediaBuffer(size_t size);
+
+ explicit MediaBuffer(const sp<ABuffer> &buffer);
+
+ MediaBuffer(const sp<IMemory> &mem) :
+ MediaBuffer((uint8_t *)mem->pointer() + sizeof(SharedControl), mem->size()) {
+ // delegate and override mMemory
+ mMemory = mem;
+ }
+
+ // If MediaBufferGroup is set, decrement the local reference count;
+ // if the local reference count drops to 0, return the buffer to the
+ // associated MediaBufferGroup.
+ //
+ // If no MediaBufferGroup is set, the local reference count must be zero
+ // when called, whereupon the MediaBuffer is deleted.
+ virtual void release();
+
+ // Increments the local reference count.
+ // Use only when MediaBufferGroup is set.
+ virtual void add_ref();
+
+ virtual void *data() const;
+ virtual size_t size() const;
+
+ virtual size_t range_offset() const;
+ virtual size_t range_length() const;
+
+ virtual void set_range(size_t offset, size_t length);
+
+ MetaDataBase& meta_data();
+
+ // Clears meta data and resets the range to the full extent.
+ virtual void reset();
+
+ virtual void setObserver(MediaBufferObserver *group);
+
+ // Returns a clone of this MediaBuffer increasing its reference count.
+ // The clone references the same data but has its own range and
+ // MetaData.
+ virtual MediaBufferBase *clone();
+
+ // sum of localRefcount() and remoteRefcount()
+ virtual int refcount() const {
+ return localRefcount() + remoteRefcount();
+ }
+
+ virtual int localRefcount() const {
+ return mRefCount;
+ }
+
+ virtual int remoteRefcount() const {
+ if (mMemory.get() == nullptr || mMemory->pointer() == nullptr) return 0;
+ int32_t remoteRefcount =
+ reinterpret_cast<SharedControl *>(mMemory->pointer())->getRemoteRefcount();
+ // Sanity check so that remoteRefCount() is non-negative.
+ return remoteRefcount >= 0 ? remoteRefcount : 0; // do not allow corrupted data.
+ }
+
+ // returns old value
+ int addRemoteRefcount(int32_t value) {
+ if (mMemory.get() == nullptr || mMemory->pointer() == nullptr) return 0;
+ return reinterpret_cast<SharedControl *>(mMemory->pointer())->addRemoteRefcount(value);
+ }
+
+ bool isDeadObject() const {
+ return isDeadObject(mMemory);
+ }
+
+ static bool isDeadObject(const sp<IMemory> &memory) {
+ if (memory.get() == nullptr || memory->pointer() == nullptr) return false;
+ return reinterpret_cast<SharedControl *>(memory->pointer())->isDeadObject();
+ }
+
+ // Sticky on enabling of shared memory MediaBuffers. By default we don't use
+ // shared memory for MediaBuffers, but we enable this for those processes
+ // that export MediaBuffers.
+ static void useSharedMemory() {
+ std::atomic_store_explicit(
+ &mUseSharedMemory, (int_least32_t)1, std::memory_order_seq_cst);
+ }
+
+protected:
+ // true if MediaBuffer is observed (part of a MediaBufferGroup).
+ inline bool isObserved() const {
+ return mObserver != nullptr;
+ }
+
+ virtual ~MediaBuffer();
+
+ sp<IMemory> mMemory;
+
+private:
+ friend class MediaBufferGroup;
+ friend class OMXDecoder;
+ friend class BnMediaSource;
+ friend class BpMediaSource;
+
+ // For use by OMXDecoder, reference count must be 1, drop reference
+ // count to 0 without signalling the observer.
+ void claim();
+
+ MediaBufferObserver *mObserver;
+ int mRefCount;
+
+ void *mData;
+ size_t mSize, mRangeOffset, mRangeLength;
+ sp<ABuffer> mBuffer;
+
+ bool mOwnsData;
+
+ MetaDataBase* mMetaData;
+
+ MediaBuffer *mOriginal;
+
+ static std::atomic_int_least32_t mUseSharedMemory;
+
+ MediaBuffer(const MediaBuffer &);
+ MediaBuffer &operator=(const MediaBuffer &);
+
+ // SharedControl block at the start of IMemory.
+ struct SharedControl {
+ enum {
+ FLAG_DEAD_OBJECT = (1 << 0),
+ };
+
+ // returns old value
+ inline int32_t addRemoteRefcount(int32_t value) {
+ return std::atomic_fetch_add_explicit(
+ &mRemoteRefcount, (int_least32_t)value, std::memory_order_seq_cst);
+ }
+
+ inline int32_t getRemoteRefcount() const {
+ return std::atomic_load_explicit(&mRemoteRefcount, std::memory_order_seq_cst);
+ }
+
+ inline void setRemoteRefcount(int32_t value) {
+ std::atomic_store_explicit(
+ &mRemoteRefcount, (int_least32_t)value, std::memory_order_seq_cst);
+ }
+
+ inline bool isDeadObject() const {
+ return (std::atomic_load_explicit(
+ &mFlags, std::memory_order_seq_cst) & FLAG_DEAD_OBJECT) != 0;
+ }
+
+ inline void setDeadObject() {
+ (void)std::atomic_fetch_or_explicit(
+ &mFlags, (int_least32_t)FLAG_DEAD_OBJECT, std::memory_order_seq_cst);
+ }
+
+ inline void clear() {
+ std::atomic_store_explicit(
+ &mFlags, (int_least32_t)0, std::memory_order_seq_cst);
+ std::atomic_store_explicit(
+ &mRemoteRefcount, (int_least32_t)0, std::memory_order_seq_cst);
+ }
+
+ private:
+ // Caution: atomic_int_fast32_t is 64 bits on LP64.
+ std::atomic_int_least32_t mFlags;
+ std::atomic_int_least32_t mRemoteRefcount;
+ int32_t unused[6] __attribute__((__unused__)); // additional buffer space
+ };
+
+ inline SharedControl *getSharedControl() const {
+ return reinterpret_cast<SharedControl *>(mMemory->pointer());
+ }
+};
+
+} // namespace android
+
+#endif // MEDIA_BUFFER_H_
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h b/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h
new file mode 100644
index 0000000..6c8d94a
--- /dev/null
+++ b/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_BUFFER_BASE_H_
+
+#define MEDIA_BUFFER_BASE_H_
+
+namespace android {
+
+class MediaBufferBase;
+class MetaDataBase;
+
+class MediaBufferObserver {
+public:
+ MediaBufferObserver() {}
+ virtual ~MediaBufferObserver() {}
+
+ virtual void signalBufferReturned(MediaBufferBase *buffer) = 0;
+
+private:
+ MediaBufferObserver(const MediaBufferObserver &);
+ MediaBufferObserver &operator=(const MediaBufferObserver &);
+};
+
+class MediaBufferBase {
+public:
+ static MediaBufferBase *Create(size_t size);
+
+ // If MediaBufferGroup is set, decrement the local reference count;
+ // if the local reference count drops to 0, return the buffer to the
+ // associated MediaBufferGroup.
+ //
+ // If no MediaBufferGroup is set, the local reference count must be zero
+ // when called, whereupon the MediaBuffer is deleted.
+ virtual void release() = 0;
+
+ // Increments the local reference count.
+ // Use only when MediaBufferGroup is set.
+ virtual void add_ref() = 0;
+
+ virtual void *data() const = 0;
+ virtual size_t size() const = 0;
+
+ virtual size_t range_offset() const = 0;
+ virtual size_t range_length() const = 0;
+
+ virtual void set_range(size_t offset, size_t length) = 0;
+
+ virtual MetaDataBase& meta_data() = 0;
+
+ // Clears meta data and resets the range to the full extent.
+ virtual void reset() = 0;
+
+ virtual void setObserver(MediaBufferObserver *group) = 0;
+
+ // Returns a clone of this MediaBufferBase increasing its reference
+ // count. The clone references the same data but has its own range and
+ // MetaData.
+ virtual MediaBufferBase *clone() = 0;
+
+ virtual int refcount() const = 0;
+
+ virtual int localRefcount() const = 0;
+ virtual int remoteRefcount() const = 0;
+
+ virtual ~MediaBufferBase() {};
+};
+
+} // namespace android
+
+#endif // MEDIA_BUFFER_BASE_H_
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBufferGroup.h b/media/libmediaextractor/include/media/stagefright/MediaBufferGroup.h
new file mode 100644
index 0000000..75d5df7
--- /dev/null
+++ b/media/libmediaextractor/include/media/stagefright/MediaBufferGroup.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_BUFFER_GROUP_H_
+
+#define MEDIA_BUFFER_GROUP_H_
+
+#include <list>
+
+#include <media/stagefright/MediaBufferBase.h>
+#include <utils/Errors.h>
+#include <utils/threads.h>
+
+namespace android {
+
+class MediaBufferBase;
+
+class MediaBufferGroup : public MediaBufferObserver {
+public:
+ MediaBufferGroup(size_t growthLimit = 0);
+
+ // create a media buffer group with preallocated buffers
+ MediaBufferGroup(size_t buffers, size_t buffer_size, size_t growthLimit = 0);
+
+ ~MediaBufferGroup();
+
+ void add_buffer(MediaBufferBase *buffer);
+
+ bool has_buffers();
+
+ // If nonBlocking is false, it blocks until a buffer is available and
+ // passes it to the caller in *buffer, while returning OK.
+ // The returned buffer will have a reference count of 1.
+ // If nonBlocking is true and a buffer is not immediately available,
+ // buffer is set to NULL and it returns WOULD_BLOCK.
+ // If requestedSize is 0, any free MediaBuffer will be returned.
+ // If requestedSize is > 0, the returned MediaBuffer should have buffer
+ // size of at least requstedSize.
+ status_t acquire_buffer(
+ MediaBufferBase **buffer, bool nonBlocking = false, size_t requestedSize = 0);
+
+ size_t buffers() const;
+
+ // If buffer is nullptr, have acquire_buffer() check for remote release.
+ virtual void signalBufferReturned(MediaBufferBase *buffer);
+
+private:
+ struct InternalData;
+ InternalData *mInternal;
+
+ MediaBufferGroup(const MediaBufferGroup &);
+ MediaBufferGroup &operator=(const MediaBufferGroup &);
+};
+
+} // namespace android
+
+#endif // MEDIA_BUFFER_GROUP_H_
diff --git a/media/libmediaextractor/include/media/stagefright/MetaData.h b/media/libmediaextractor/include/media/stagefright/MetaData.h
new file mode 100644
index 0000000..f625358
--- /dev/null
+++ b/media/libmediaextractor/include/media/stagefright/MetaData.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef META_DATA_H_
+
+#define META_DATA_H_
+
+#include <sys/types.h>
+
+#include <stdint.h>
+
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include <media/stagefright/MetaDataBase.h>
+
+namespace android {
+
+class MetaData final : public MetaDataBase, public RefBase {
+public:
+ MetaData();
+ MetaData(const MetaData &from);
+ MetaData(const MetaDataBase &from);
+
+protected:
+ virtual ~MetaData();
+
+private:
+ friend class BnMediaSource;
+ friend class BpMediaSource;
+ friend class BpMediaExtractor;
+ static sp<MetaData> createFromParcel(const Parcel &parcel);
+};
+
+} // namespace android
+
+#endif // META_DATA_H_
diff --git a/media/libmediaextractor/include/media/stagefright/MetaDataBase.h b/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
new file mode 100644
index 0000000..dfe34e8
--- /dev/null
+++ b/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef META_DATA_BASE_H_
+
+#define META_DATA_BASE_H_
+
+#include <sys/types.h>
+
+#include <stdint.h>
+
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+
+namespace android {
+
+// The following keys map to int32_t data unless indicated otherwise.
+enum {
+ kKeyMIMEType = 'mime', // cstring
+ kKeyWidth = 'widt', // int32_t, image pixel
+ kKeyHeight = 'heig', // int32_t, image pixel
+ kKeyDisplayWidth = 'dWid', // int32_t, display/presentation
+ kKeyDisplayHeight = 'dHgt', // int32_t, display/presentation
+ kKeySARWidth = 'sarW', // int32_t, sampleAspectRatio width
+ kKeySARHeight = 'sarH', // int32_t, sampleAspectRatio height
+ kKeyThumbnailWidth = 'thbW', // int32_t, thumbnail width
+ kKeyThumbnailHeight = 'thbH', // int32_t, thumbnail height
+
+ // a rectangle, if absent assumed to be (0, 0, width - 1, height - 1)
+ kKeyCropRect = 'crop',
+
+ kKeyRotation = 'rotA', // int32_t (angle in degrees)
+ kKeyIFramesInterval = 'ifiv', // int32_t
+ kKeyStride = 'strd', // int32_t
+ kKeySliceHeight = 'slht', // int32_t
+ kKeyChannelCount = '#chn', // int32_t
+ kKeyChannelMask = 'chnm', // int32_t
+ kKeySampleRate = 'srte', // int32_t (audio sampling rate Hz)
+ kKeyPcmEncoding = 'PCMe', // int32_t (audio encoding enum)
+ kKeyFrameRate = 'frmR', // int32_t (video frame rate fps)
+ kKeyBitRate = 'brte', // int32_t (bps)
+ kKeyMaxBitRate = 'mxBr', // int32_t (bps)
+ kKeyStreamHeader = 'stHd', // raw data
+ kKeyESDS = 'esds', // raw data
+ kKeyAACProfile = 'aacp', // int32_t
+ kKeyAVCC = 'avcc', // raw data
+ kKeyHVCC = 'hvcc', // raw data
+ kKeyThumbnailHVCC = 'thvc', // raw data
+ kKeyD263 = 'd263', // raw data
+ kKeyVorbisInfo = 'vinf', // raw data
+ kKeyVorbisBooks = 'vboo', // raw data
+ kKeyOpusHeader = 'ohdr', // raw data
+ kKeyOpusCodecDelay = 'ocod', // uint64_t (codec delay in ns)
+ kKeyOpusSeekPreRoll = 'ospr', // uint64_t (seek preroll in ns)
+ kKeyFlacMetadata = 'flMd', // raw data
+ kKeyVp9CodecPrivate = 'vp9p', // raw data (vp9 csd information)
+ kKeyWantsNALFragments = 'NALf',
+ kKeyIsSyncFrame = 'sync', // int32_t (bool)
+ kKeyIsCodecConfig = 'conf', // int32_t (bool)
+ kKeyIsMuxerData = 'muxd', // int32_t (bool)
+ kKeyTime = 'time', // int64_t (usecs)
+ kKeyDecodingTime = 'decT', // int64_t (decoding timestamp in usecs)
+ kKeyNTPTime = 'ntpT', // uint64_t (ntp-timestamp)
+ kKeyTargetTime = 'tarT', // int64_t (usecs)
+ kKeyDriftTime = 'dftT', // int64_t (usecs)
+ kKeyAnchorTime = 'ancT', // int64_t (usecs)
+ kKeyDuration = 'dura', // int64_t (usecs)
+ kKeyPixelFormat = 'pixf', // int32_t
+ kKeyColorFormat = 'colf', // int32_t
+ kKeyColorSpace = 'cols', // int32_t
+ kKeyPlatformPrivate = 'priv', // pointer
+ kKeyDecoderComponent = 'decC', // cstring
+ kKeyBufferID = 'bfID',
+ kKeyMaxInputSize = 'inpS',
+ kKeyMaxWidth = 'maxW',
+ kKeyMaxHeight = 'maxH',
+ kKeyThumbnailTime = 'thbT', // int64_t (usecs)
+ kKeyTrackID = 'trID',
+ kKeyIsDRM = 'idrm', // int32_t (bool)
+ kKeyEncoderDelay = 'encd', // int32_t (frames)
+ kKeyEncoderPadding = 'encp', // int32_t (frames)
+
+ kKeyAlbum = 'albu', // cstring
+ kKeyArtist = 'arti', // cstring
+ kKeyAlbumArtist = 'aart', // cstring
+ kKeyComposer = 'comp', // cstring
+ kKeyGenre = 'genr', // cstring
+ kKeyTitle = 'titl', // cstring
+ kKeyYear = 'year', // cstring
+ kKeyAlbumArt = 'albA', // compressed image data
+ kKeyAlbumArtMIME = 'alAM', // cstring
+ kKeyAuthor = 'auth', // cstring
+ kKeyCDTrackNumber = 'cdtr', // cstring
+ kKeyDiscNumber = 'dnum', // cstring
+ kKeyDate = 'date', // cstring
+ kKeyWriter = 'writ', // cstring
+ kKeyCompilation = 'cpil', // cstring
+ kKeyLocation = 'loc ', // cstring
+ kKeyTimeScale = 'tmsl', // int32_t
+ kKeyCaptureFramerate = 'capF', // float (capture fps)
+
+ // video profile and level
+ kKeyVideoProfile = 'vprf', // int32_t
+ kKeyVideoLevel = 'vlev', // int32_t
+
+ // Set this key to enable authoring files in 64-bit offset
+ kKey64BitFileOffset = 'fobt', // int32_t (bool)
+ kKey2ByteNalLength = '2NAL', // int32_t (bool)
+
+ // Identify the file output format for authoring
+ // Please see <media/mediarecorder.h> for the supported
+ // file output formats.
+ kKeyFileType = 'ftyp', // int32_t
+
+ // Track authoring progress status
+ // kKeyTrackTimeStatus is used to track progress in elapsed time
+ kKeyTrackTimeStatus = 'tktm', // int64_t
+
+ kKeyRealTimeRecording = 'rtrc', // bool (int32_t)
+ kKeyNumBuffers = 'nbbf', // int32_t
+
+ // Ogg files can be tagged to be automatically looping...
+ kKeyAutoLoop = 'autL', // bool (int32_t)
+
+ kKeyValidSamples = 'valD', // int32_t
+
+ kKeyIsUnreadable = 'unre', // bool (int32_t)
+
+ // An indication that a video buffer has been rendered.
+ kKeyRendered = 'rend', // bool (int32_t)
+
+ // The language code for this media
+ kKeyMediaLanguage = 'lang', // cstring
+
+ // To store the timed text format data
+ kKeyTextFormatData = 'text', // raw data
+
+ kKeyRequiresSecureBuffers = 'secu', // bool (int32_t)
+
+ kKeyIsADTS = 'adts', // bool (int32_t)
+ kKeyAACAOT = 'aaot', // int32_t
+
+ // If a MediaBuffer's data represents (at least partially) encrypted
+ // data, the following fields aid in decryption.
+ // The data can be thought of as pairs of plain and encrypted data
+ // fragments, i.e. plain and encrypted data alternate.
+ // The first fragment is by convention plain data (if that's not the
+ // case, simply specify plain fragment size of 0).
+ // kKeyEncryptedSizes and kKeyPlainSizes each map to an array of
+ // size_t values. The sum total of all size_t values of both arrays
+ // must equal the amount of data (i.e. MediaBuffer's range_length()).
+ // If both arrays are present, they must be of the same size.
+ // If only encrypted sizes are present it is assumed that all
+ // plain sizes are 0, i.e. all fragments are encrypted.
+ // To programmatically set these array, use the MetaDataBase::setData API, i.e.
+ // const size_t encSizes[];
+ // meta->setData(
+ // kKeyEncryptedSizes, 0 /* type */, encSizes, sizeof(encSizes));
+ // A plain sizes array by itself makes no sense.
+ kKeyEncryptedSizes = 'encr', // size_t[]
+ kKeyPlainSizes = 'plai', // size_t[]
+ kKeyCryptoKey = 'cryK', // uint8_t[16]
+ kKeyCryptoIV = 'cryI', // uint8_t[16]
+ kKeyCryptoMode = 'cryM', // int32_t
+
+ kKeyCryptoDefaultIVSize = 'cryS', // int32_t
+
+ kKeyPssh = 'pssh', // raw data
+ kKeyCASystemID = 'caid', // int32_t
+ kKeyCASessionID = 'seid', // raw data
+
+ kKeyEncryptedByteBlock = 'cblk', // uint8_t
+ kKeySkipByteBlock = 'sblk', // uint8_t
+
+ // Please see MediaFormat.KEY_IS_AUTOSELECT.
+ kKeyTrackIsAutoselect = 'auto', // bool (int32_t)
+ // Please see MediaFormat.KEY_IS_DEFAULT.
+ kKeyTrackIsDefault = 'dflt', // bool (int32_t)
+ // Similar to MediaFormat.KEY_IS_FORCED_SUBTITLE but pertains to av tracks as well.
+ kKeyTrackIsForced = 'frcd', // bool (int32_t)
+
+ // H264 supplemental enhancement information offsets/sizes
+ kKeySEI = 'sei ', // raw data
+
+ // MPEG user data offsets
+ kKeyMpegUserData = 'mpud', // size_t[]
+
+ // Size of NALU length in mkv/mp4
+ kKeyNalLengthSize = 'nals', // int32_t
+
+ // HDR related
+ kKeyHdrStaticInfo = 'hdrS', // HDRStaticInfo
+
+ // color aspects
+ kKeyColorRange = 'cRng', // int32_t, color range, value defined by ColorAspects.Range
+ kKeyColorPrimaries = 'cPrm', // int32_t,
+ // color Primaries, value defined by ColorAspects.Primaries
+ kKeyTransferFunction = 'tFun', // int32_t,
+ // transfer Function, value defined by ColorAspects.Transfer.
+ kKeyColorMatrix = 'cMtx', // int32_t,
+ // color Matrix, value defined by ColorAspects.MatrixCoeffs.
+ kKeyTemporalLayerId = 'iLyr', // int32_t, temporal layer-id. 0-based (0 => base layer)
+ kKeyTemporalLayerCount = 'cLyr', // int32_t, number of temporal layers encoded
+
+ kKeyTileWidth = 'tilW', // int32_t, HEIF tile width
+ kKeyTileHeight = 'tilH', // int32_t, HEIF tile height
+ kKeyGridRows = 'grdR', // int32_t, HEIF grid rows
+ kKeyGridCols = 'grdC', // int32_t, HEIF grid columns
+ kKeyIccProfile = 'prof', // raw data, ICC profile data
+ kKeyIsPrimaryImage = 'prim', // bool (int32_t), image track is the primary image
+ kKeyFrameCount = 'nfrm', // int32_t, total number of frame in video track
+ kKeyExifOffset = 'exof', // int64_t, Exif data offset
+ kKeyExifSize = 'exsz', // int64_t, Exif data size
+ kKeyIsExif = 'exif', // bool (int32_t) buffer contains exif data block
+};
+
+enum {
+ kTypeESDS = 'esds',
+ kTypeAVCC = 'avcc',
+ kTypeHVCC = 'hvcc',
+ kTypeD263 = 'd263',
+};
+
+enum {
+ kCryptoModeUnencrypted = 0,
+ kCryptoModeAesCtr = 1,
+ kCryptoModeAesCbc = 2,
+};
+
+class Parcel;
+
+class MetaDataBase {
+public:
+ MetaDataBase();
+ MetaDataBase(const MetaDataBase &from);
+ MetaDataBase& operator = (const MetaDataBase &);
+
+ virtual ~MetaDataBase();
+
+ enum Type {
+ TYPE_NONE = 'none',
+ TYPE_C_STRING = 'cstr',
+ TYPE_INT32 = 'in32',
+ TYPE_INT64 = 'in64',
+ TYPE_FLOAT = 'floa',
+ TYPE_POINTER = 'ptr ',
+ TYPE_RECT = 'rect',
+ };
+
+ void clear();
+ bool remove(uint32_t key);
+
+ bool setCString(uint32_t key, const char *value);
+ bool setInt32(uint32_t key, int32_t value);
+ bool setInt64(uint32_t key, int64_t value);
+ bool setFloat(uint32_t key, float value);
+ bool setPointer(uint32_t key, void *value);
+
+ bool setRect(
+ uint32_t key,
+ int32_t left, int32_t top,
+ int32_t right, int32_t bottom);
+
+ bool findCString(uint32_t key, const char **value) const;
+ bool findInt32(uint32_t key, int32_t *value) const;
+ bool findInt64(uint32_t key, int64_t *value) const;
+ bool findFloat(uint32_t key, float *value) const;
+ bool findPointer(uint32_t key, void **value) const;
+
+ bool findRect(
+ uint32_t key,
+ int32_t *left, int32_t *top,
+ int32_t *right, int32_t *bottom) const;
+
+ bool setData(uint32_t key, uint32_t type, const void *data, size_t size);
+
+ bool findData(uint32_t key, uint32_t *type,
+ const void **data, size_t *size) const;
+
+ bool hasData(uint32_t key) const;
+
+ String8 toString() const;
+ void dumpToLog() const;
+
+private:
+ friend class BpMediaSource;
+ friend class BnMediaSource;
+ friend class BnMediaExtractor;
+ friend class MetaData;
+
+ struct typed_data;
+ struct Rect;
+ struct MetaDataInternal;
+ MetaDataInternal *mInternalData;
+ status_t writeToParcel(Parcel &parcel);
+ status_t updateFromParcel(const Parcel &parcel);
+};
+
+} // namespace android
+
+#endif // META_DATA_H_
diff --git a/media/libmediametrics/Android.bp b/media/libmediametrics/Android.bp
index 15dac59..07e124b 100644
--- a/media/libmediametrics/Android.bp
+++ b/media/libmediametrics/Android.bp
@@ -1,4 +1,6 @@
-cc_library_shared {
+// TODO: change it back to cc_library_shared when there is a way to
+// expose media metrics as stable API.
+cc_library {
name: "libmediametrics",
srcs: [
diff --git a/media/libmediametrics/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
index f968c09..135c9b6 100644
--- a/media/libmediametrics/MediaAnalyticsItem.cpp
+++ b/media/libmediametrics/MediaAnalyticsItem.cpp
@@ -29,8 +29,6 @@
#include <utils/SortedVector.h>
#include <utils/threads.h>
-#include <media/stagefright/foundation/AString.h>
-
#include <binder/IServiceManager.h>
#include <media/IMediaAnalyticsService.h>
#include <media/MediaAnalyticsItem.h>
@@ -62,7 +60,7 @@
mPkgVersionCode(0),
mSessionID(MediaAnalyticsItem::SessionIDNone),
mTimestamp(0),
- mFinalized(0),
+ mFinalized(1),
mPropCount(0), mPropSize(0), mProps(NULL)
{
mKey = MediaAnalyticsItem::kKeyNone;
@@ -74,7 +72,7 @@
mPkgVersionCode(0),
mSessionID(MediaAnalyticsItem::SessionIDNone),
mTimestamp(0),
- mFinalized(0),
+ mFinalized(1),
mPropCount(0), mPropSize(0), mProps(NULL)
{
if (DEBUG_ALLOCATIONS) {
@@ -139,16 +137,6 @@
return dst;
}
-// so clients can send intermediate values to be overlaid later
-MediaAnalyticsItem &MediaAnalyticsItem::setFinalized(bool value) {
- mFinalized = value;
- return *this;
-}
-
-bool MediaAnalyticsItem::getFinalized() const {
- return mFinalized;
-}
-
MediaAnalyticsItem &MediaAnalyticsItem::setSessionID(MediaAnalyticsItem::SessionID_t id) {
mSessionID = id;
return *this;
@@ -205,21 +193,17 @@
return mUid;
}
-MediaAnalyticsItem &MediaAnalyticsItem::setPkgName(AString pkgName) {
+MediaAnalyticsItem &MediaAnalyticsItem::setPkgName(const std::string &pkgName) {
mPkgName = pkgName;
return *this;
}
-AString MediaAnalyticsItem::getPkgName() const {
- return mPkgName;
-}
-
-MediaAnalyticsItem &MediaAnalyticsItem::setPkgVersionCode(int32_t pkgVersionCode) {
+MediaAnalyticsItem &MediaAnalyticsItem::setPkgVersionCode(int64_t pkgVersionCode) {
mPkgVersionCode = pkgVersionCode;
return *this;
}
-int32_t MediaAnalyticsItem::getPkgVersionCode() const {
+int64_t MediaAnalyticsItem::getPkgVersionCode() const {
return mPkgVersionCode;
}
@@ -264,12 +248,17 @@
}
void MediaAnalyticsItem::Prop::setName(const char *name, size_t len) {
- mNameLen = len;
+ free((void *)mName);
mName = (const char *) malloc(len+1);
+ LOG_ALWAYS_FATAL_IF(mName == NULL,
+ "failed malloc() for property '%s' (len %zu)",
+ name, len);
memcpy ((void *)mName, name, len+1);
+ mNameLen = len;
}
-// used only as part of a storing operation
+// consider this "find-or-allocate".
+// caller validates type and uses clearPropValue() accordingly
MediaAnalyticsItem::Prop *MediaAnalyticsItem::allocateProp(const char *name) {
size_t len = strlen(name);
size_t i = findPropIndex(name, len);
@@ -279,13 +268,14 @@
prop = &mProps[i];
} else {
if (i == mPropSize) {
- growProps();
- // XXX: verify success
+ if (growProps() == false) {
+ ALOGE("failed allocation for new props");
+ return NULL;
+ }
}
i = mPropCount++;
prop = &mProps[i];
prop->setName(name, len);
- prop->mType = kTypeNone; // make caller set type info
}
return prop;
@@ -312,41 +302,59 @@
// set the values
void MediaAnalyticsItem::setInt32(MediaAnalyticsItem::Attr name, int32_t value) {
Prop *prop = allocateProp(name);
- prop->mType = kTypeInt32;
- prop->u.int32Value = value;
+ if (prop != NULL) {
+ clearPropValue(prop);
+ prop->mType = kTypeInt32;
+ prop->u.int32Value = value;
+ }
}
void MediaAnalyticsItem::setInt64(MediaAnalyticsItem::Attr name, int64_t value) {
Prop *prop = allocateProp(name);
- prop->mType = kTypeInt64;
- prop->u.int64Value = value;
+ if (prop != NULL) {
+ clearPropValue(prop);
+ prop->mType = kTypeInt64;
+ prop->u.int64Value = value;
+ }
}
void MediaAnalyticsItem::setDouble(MediaAnalyticsItem::Attr name, double value) {
Prop *prop = allocateProp(name);
- prop->mType = kTypeDouble;
- prop->u.doubleValue = value;
+ if (prop != NULL) {
+ clearPropValue(prop);
+ prop->mType = kTypeDouble;
+ prop->u.doubleValue = value;
+ }
}
void MediaAnalyticsItem::setCString(MediaAnalyticsItem::Attr name, const char *value) {
Prop *prop = allocateProp(name);
// any old value will be gone
- prop->mType = kTypeCString;
- prop->u.CStringValue = strdup(value);
+ if (prop != NULL) {
+ clearPropValue(prop);
+ prop->mType = kTypeCString;
+ prop->u.CStringValue = strdup(value);
+ }
}
void MediaAnalyticsItem::setRate(MediaAnalyticsItem::Attr name, int64_t count, int64_t duration) {
Prop *prop = allocateProp(name);
- prop->mType = kTypeRate;
- prop->u.rate.count = count;
- prop->u.rate.duration = duration;
+ if (prop != NULL) {
+ clearPropValue(prop);
+ prop->mType = kTypeRate;
+ prop->u.rate.count = count;
+ prop->u.rate.duration = duration;
+ }
}
// find/add/set fused into a single operation
void MediaAnalyticsItem::addInt32(MediaAnalyticsItem::Attr name, int32_t value) {
Prop *prop = allocateProp(name);
+ if (prop == NULL) {
+ return;
+ }
switch (prop->mType) {
case kTypeInt32:
prop->u.int32Value += value;
@@ -361,6 +369,9 @@
void MediaAnalyticsItem::addInt64(MediaAnalyticsItem::Attr name, int64_t value) {
Prop *prop = allocateProp(name);
+ if (prop == NULL) {
+ return;
+ }
switch (prop->mType) {
case kTypeInt64:
prop->u.int64Value += value;
@@ -375,6 +386,9 @@
void MediaAnalyticsItem::addRate(MediaAnalyticsItem::Attr name, int64_t count, int64_t duration) {
Prop *prop = allocateProp(name);
+ if (prop == NULL) {
+ return;
+ }
switch (prop->mType) {
case kTypeRate:
prop->u.rate.count += count;
@@ -391,6 +405,9 @@
void MediaAnalyticsItem::addDouble(MediaAnalyticsItem::Attr name, double value) {
Prop *prop = allocateProp(name);
+ if (prop == NULL) {
+ return;
+ }
switch (prop->mType) {
case kTypeDouble:
prop->u.doubleValue += value;
@@ -577,6 +594,9 @@
// fix any pointers that we blindly copied, so we have our own copies
if (dst->mName) {
void *p = malloc(dst->mNameLen + 1);
+ LOG_ALWAYS_FATAL_IF(p == NULL,
+ "failed malloc() duping property '%s' (len %zu)",
+ dst->mName, dst->mNameLen);
memcpy (p, src->mName, dst->mNameLen + 1);
dst->mName = (const char *) p;
}
@@ -585,7 +605,7 @@
}
}
-void MediaAnalyticsItem::growProps(int increment)
+bool MediaAnalyticsItem::growProps(int increment)
{
if (increment <= 0) {
increment = kGrowProps;
@@ -599,6 +619,10 @@
}
mProps = ni;
mPropSize = nsize;
+ return true;
+ } else {
+ ALOGW("MediaAnalyticsItem::growProps fails");
+ return false;
}
}
@@ -612,9 +636,12 @@
mPid = data.readInt32();
mUid = data.readInt32();
mPkgName = data.readCString();
- mPkgVersionCode = data.readInt32();
+ mPkgVersionCode = data.readInt64();
mSessionID = data.readInt64();
+ // We no longer pay attention to user setting of finalized, BUT it's
+ // still part of the wire packet -- so read & discard.
mFinalized = data.readInt32();
+ mFinalized = 1;
mTimestamp = data.readInt64();
int count = data.readInt32();
@@ -659,7 +686,7 @@
data->writeInt32(mPid);
data->writeInt32(mUid);
data->writeCString(mPkgName.c_str());
- data->writeInt32(mPkgVersionCode);
+ data->writeInt64(mPkgVersionCode);
data->writeInt64(mSessionID);
data->writeInt32(mFinalized);
data->writeInt64(mTimestamp);
@@ -699,11 +726,11 @@
}
-AString MediaAnalyticsItem::toString() {
- return toString(-1);
+std::string MediaAnalyticsItem::toString() {
+ return toString(PROTO_LAST);
}
-AString MediaAnalyticsItem::toString(int version) {
+std::string MediaAnalyticsItem::toString(int version) {
// v0 : released with 'o'
// v1 : bug fix (missing pid/finalized separator),
@@ -716,7 +743,7 @@
version = PROTO_LAST;
}
- AString result;
+ std::string result;
char buffer[512];
if (version == PROTO_V0) {
@@ -738,7 +765,7 @@
if (version >= PROTO_V1) {
result.append(mPkgName);
- snprintf(buffer, sizeof(buffer), ":%d:", mPkgVersionCode);
+ snprintf(buffer, sizeof(buffer), ":%" PRId64 ":", mPkgVersionCode);
result.append(buffer);
}
@@ -813,7 +840,7 @@
bool MediaAnalyticsItem::selfrecord(bool forcenew) {
if (DEBUG_API) {
- AString p = this->toString();
+ std::string p = this->toString();
ALOGD("selfrecord of: %s [forcenew=%d]", p.c_str(), forcenew);
}
@@ -822,13 +849,13 @@
if (svc != NULL) {
MediaAnalyticsItem::SessionID_t newid = svc->submit(this, forcenew);
if (newid == SessionIDInvalid) {
- AString p = this->toString();
+ std::string p = this->toString();
ALOGW("Failed to record: %s [forcenew=%d]", p.c_str(), forcenew);
return false;
}
return true;
} else {
- AString p = this->toString();
+ std::string p = this->toString();
ALOGW("Unable to record: %s [forcenew=%d]", p.c_str(), forcenew);
return false;
}
@@ -956,39 +983,30 @@
mSessionID = incoming->mSessionID;
}
- // we always take the more recent 'finalized' value
- setFinalized(incoming->getFinalized());
-
// for each attribute from 'incoming', resolve appropriately
int nattr = incoming->mPropCount;
for (int i = 0 ; i < nattr; i++ ) {
Prop *iprop = &incoming->mProps[i];
- Prop *oprop = findProp(iprop->mName);
const char *p = iprop->mName;
size_t len = strlen(p);
- char semantic = p[len-1];
+
+ // should ignore a zero length name...
+ if (len == 0) {
+ continue;
+ }
+
+ Prop *oprop = findProp(iprop->mName);
if (oprop == NULL) {
// no oprop, so we insert the new one
oprop = allocateProp(p);
- copyProp(oprop, iprop);
- } else {
- // merge iprop into oprop
- switch (semantic) {
- case '<': // first aka keep old)
- /* nop */
- break;
-
- default: // default is 'last'
- case '>': // last (aka keep new)
- copyProp(oprop, iprop);
- break;
-
- case '+': /* sum */
- // XXX validate numeric types, sum in place
- break;
-
+ if (oprop != NULL) {
+ copyProp(oprop, iprop);
+ } else {
+ ALOGW("dropped property '%s'", iprop->mName);
}
+ } else {
+ copyProp(oprop, iprop);
}
}
diff --git a/media/libmediametrics/include/MediaAnalyticsItem.h b/media/libmediametrics/include/MediaAnalyticsItem.h
index dd7452f..263cde7 100644
--- a/media/libmediametrics/include/MediaAnalyticsItem.h
+++ b/media/libmediametrics/include/MediaAnalyticsItem.h
@@ -18,6 +18,7 @@
#define ANDROID_MEDIA_MEDIAANALYTICSITEM_H
#include <cutils/properties.h>
+#include <string>
#include <sys/types.h>
#include <utils/Errors.h>
#include <utils/KeyedVector.h>
@@ -25,13 +26,10 @@
#include <utils/StrongPointer.h>
#include <utils/Timers.h>
-#include <media/stagefright/foundation/AString.h>
-
namespace android {
-
-
class IMediaAnalyticsService;
+class Parcel;
// the class interface
//
@@ -66,7 +64,7 @@
// values can be "component/component"
// basic values: "video", "audio", "drm"
// XXX: need to better define the format
- typedef AString Key;
+ typedef std::string Key;
static const Key kKeyNone; // ""
static const Key kKeyAny; // "*"
@@ -91,10 +89,6 @@
MediaAnalyticsItem(Key);
~MediaAnalyticsItem();
- // so clients can send intermediate values to be overlaid later
- MediaAnalyticsItem &setFinalized(bool);
- bool getFinalized() const;
-
// SessionID ties multiple submissions for same key together
// so that if video "height" and "width" are known at one point
// and "framerate" is only known later, they can be be brought
@@ -170,18 +164,18 @@
MediaAnalyticsItem &setUid(uid_t);
uid_t getUid() const;
- MediaAnalyticsItem &setPkgName(AString);
- AString getPkgName() const;
+ MediaAnalyticsItem &setPkgName(const std::string &pkgName);
+ std::string getPkgName() const { return mPkgName; }
- MediaAnalyticsItem &setPkgVersionCode(int32_t);
- int32_t getPkgVersionCode() const;
+ MediaAnalyticsItem &setPkgVersionCode(int64_t);
+ int64_t getPkgVersionCode() const;
// our serialization code for binder calls
int32_t writeToParcel(Parcel *);
int32_t readFromParcel(const Parcel&);
- AString toString();
- AString toString(int version);
+ std::string toString();
+ std::string toString(int version);
// are we collecting analytics data
static bool isEnabled();
@@ -204,8 +198,8 @@
// to help validate that A doesn't mess with B's records
pid_t mPid;
uid_t mUid;
- AString mPkgName;
- int32_t mPkgVersionCode;
+ std::string mPkgName;
+ int64_t mPkgVersionCode;
// let's reuse a binder connection
static sp<IMediaAnalyticsService> sAnalyticsService;
@@ -243,7 +237,7 @@
enum {
kGrowProps = 10
};
- void growProps(int increment = kGrowProps);
+ bool growProps(int increment = kGrowProps);
size_t findPropIndex(const char *name, size_t len);
Prop *findProp(const char *name);
Prop *allocateProp(const char *name);
diff --git a/media/libmediaplayer2/Android.bp b/media/libmediaplayer2/Android.bp
new file mode 100644
index 0000000..1fa8789
--- /dev/null
+++ b/media/libmediaplayer2/Android.bp
@@ -0,0 +1,79 @@
+cc_library_headers {
+ name: "libmediaplayer2_headers",
+ vendor_available: true,
+ export_include_dirs: ["include"],
+}
+
+cc_library {
+ name: "libmediaplayer2",
+
+ srcs: [
+ "JAudioTrack.cpp",
+ "MediaPlayer2AudioOutput.cpp",
+ "mediaplayer2.cpp",
+ ],
+
+ shared_libs: [
+ "libandroid_runtime",
+ "libaudioclient",
+ "libbinder",
+ "libcutils",
+ "libgui",
+ "liblog",
+ "libmedia_omx",
+ "libmedia_player2_util",
+ "libmediaextractor",
+ "libstagefright_foundation",
+ "libui",
+ "libutils",
+
+ "libcrypto",
+ "libmediadrm",
+ "libmediametrics",
+ "libmediandk",
+ "libmediautils",
+ "libmemunreachable",
+ "libnativewindow",
+ "libpowermanager",
+ "libstagefright_httplive",
+ "libstagefright_player2",
+ ],
+
+ export_shared_lib_headers: [
+ "libaudioclient",
+ "libbinder",
+ "libmedia_omx",
+ ],
+
+ header_libs: [
+ "media_plugin_headers",
+ ],
+
+ static_libs: [
+ "libmedia_helper",
+ "libstagefright_nuplayer2",
+ "libstagefright_rtsp",
+ "libstagefright_timedtext",
+ ],
+
+ export_include_dirs: [
+ "include",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wno-error=deprecated-declarations",
+ "-Wall",
+ ],
+
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
+ },
+}
diff --git a/media/libmediaplayer2/JAudioTrack.cpp b/media/libmediaplayer2/JAudioTrack.cpp
new file mode 100644
index 0000000..ac0cc57
--- /dev/null
+++ b/media/libmediaplayer2/JAudioTrack.cpp
@@ -0,0 +1,688 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "JAudioTrack"
+
+#include "media/JAudioAttributes.h"
+#include "media/JAudioFormat.h"
+#include "mediaplayer2/JAudioTrack.h"
+
+#include <android_media_AudioErrors.h>
+#include <android_runtime/AndroidRuntime.h>
+
+namespace android {
+
+// TODO: Store Java class/methodID as a member variable in the class.
+// TODO: Add NULL && Exception checks after every JNI call.
+JAudioTrack::JAudioTrack( // < Usages of the arguments are below >
+ audio_stream_type_t streamType, // AudioAudioAttributes
+ uint32_t sampleRate, // AudioFormat && bufferSizeInBytes
+ audio_format_t format, // AudioFormat && bufferSizeInBytes
+ audio_channel_mask_t channelMask, // AudioFormat && bufferSizeInBytes
+ callback_t cbf, // Offload
+ void* user, // Offload
+ size_t frameCount, // bufferSizeInBytes
+ audio_session_t sessionId, // AudioTrack
+ const audio_attributes_t* pAttributes, // AudioAttributes
+ float maxRequiredSpeed) { // bufferSizeInBytes
+
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jclass jAudioTrackCls = env->FindClass("android/media/AudioTrack");
+ mAudioTrackCls = (jclass) env->NewGlobalRef(jAudioTrackCls);
+
+ maxRequiredSpeed = std::min(std::max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
+
+ int bufferSizeInBytes = 0;
+ if (sampleRate == 0 || frameCount > 0) {
+ // Manually calculate buffer size.
+ bufferSizeInBytes = audio_channel_count_from_out_mask(channelMask)
+ * audio_bytes_per_sample(format) * (frameCount > 0 ? frameCount : 1);
+ } else if (sampleRate > 0) {
+ // Call Java AudioTrack::getMinBufferSize().
+ jmethodID jGetMinBufferSize =
+ env->GetStaticMethodID(mAudioTrackCls, "getMinBufferSize", "(III)I");
+ bufferSizeInBytes = env->CallStaticIntMethod(mAudioTrackCls, jGetMinBufferSize,
+ sampleRate, outChannelMaskFromNative(channelMask), audioFormatFromNative(format));
+ }
+ bufferSizeInBytes = (int) (bufferSizeInBytes * maxRequiredSpeed);
+
+ // Create a Java AudioTrack object through its Builder.
+ jclass jBuilderCls = env->FindClass("android/media/AudioTrack$Builder");
+ jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
+ jobject jBuilderObj = env->NewObject(jBuilderCls, jBuilderCtor);
+
+ jmethodID jSetAudioAttributes = env->GetMethodID(jBuilderCls, "setAudioAttributes",
+ "(Landroid/media/AudioAttributes;)Landroid/media/AudioTrack$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetAudioAttributes,
+ JAudioAttributes::createAudioAttributesObj(env, pAttributes, streamType));
+
+ jmethodID jSetAudioFormat = env->GetMethodID(jBuilderCls, "setAudioFormat",
+ "(Landroid/media/AudioFormat;)Landroid/media/AudioTrack$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetAudioFormat,
+ JAudioFormat::createAudioFormatObj(env, sampleRate, format, channelMask));
+
+ jmethodID jSetBufferSizeInBytes = env->GetMethodID(jBuilderCls, "setBufferSizeInBytes",
+ "(I)Landroid/media/AudioTrack$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetBufferSizeInBytes, bufferSizeInBytes);
+
+ // We only use streaming mode of Java AudioTrack.
+ jfieldID jModeStream = env->GetStaticFieldID(mAudioTrackCls, "MODE_STREAM", "I");
+ jint transferMode = env->GetStaticIntField(mAudioTrackCls, jModeStream);
+ jmethodID jSetTransferMode = env->GetMethodID(jBuilderCls, "setTransferMode",
+ "(I)Landroid/media/AudioTrack$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetTransferMode,
+ transferMode /* Java AudioTrack::MODE_STREAM */);
+
+ if (sessionId != 0) {
+ jmethodID jSetSessionId = env->GetMethodID(jBuilderCls, "setSessionId",
+ "(I)Landroid/media/AudioTrack$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetSessionId, sessionId);
+ }
+
+ if (cbf != NULL) {
+ jmethodID jSetOffloadedPlayback = env->GetMethodID(jBuilderCls, "setOffloadedPlayback",
+ "(Z)Landroid/media/AudioTrack$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetOffloadedPlayback, true);
+ mFlags = AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+ }
+
+ jmethodID jBuild = env->GetMethodID(jBuilderCls, "build", "()Landroid/media/AudioTrack;");
+ mAudioTrackObj = env->CallObjectMethod(jBuilderObj, jBuild);
+
+ if (cbf != NULL) {
+ // Set offload mode callback
+ jobject jStreamEventCallbackObj = createStreamEventCallback(cbf, user);
+ jobject jExecutorObj = createCallbackExecutor();
+ jmethodID jSetStreamEventCallback = env->GetMethodID(
+ jAudioTrackCls,
+ "setStreamEventCallback",
+ "(Ljava/util/concurrent/Executor;Landroid/media/AudioTrack$StreamEventCallback;)V");
+ env->CallVoidMethod(
+ mAudioTrackObj, jSetStreamEventCallback, jExecutorObj, jStreamEventCallbackObj);
+ }
+}
+
+JAudioTrack::~JAudioTrack() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ env->DeleteGlobalRef(mAudioTrackCls);
+}
+
+size_t JAudioTrack::frameCount() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jGetBufferSizeInFrames = env->GetMethodID(
+ mAudioTrackCls, "getBufferSizeInFrames", "()I");
+ return env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
+}
+
+size_t JAudioTrack::channelCount() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jGetChannelCount = env->GetMethodID(mAudioTrackCls, "getChannelCount", "()I");
+ return env->CallIntMethod(mAudioTrackObj, jGetChannelCount);
+}
+
+uint32_t JAudioTrack::latency() {
+ // TODO: Currently hard-coded as returning zero.
+ return 0;
+}
+
+status_t JAudioTrack::getPosition(uint32_t *position) {
+ if (position == NULL) {
+ return BAD_VALUE;
+ }
+
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jGetPlaybackHeadPosition = env->GetMethodID(
+ mAudioTrackCls, "getPlaybackHeadPosition", "()I");
+ *position = env->CallIntMethod(mAudioTrackObj, jGetPlaybackHeadPosition);
+
+ return NO_ERROR;
+}
+
+bool JAudioTrack::getTimestamp(AudioTimestamp& timestamp) {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+
+ jclass jAudioTimeStampCls = env->FindClass("android/media/AudioTimestamp");
+ jobject jAudioTimeStampObj = env->AllocObject(jAudioTimeStampCls);
+
+ jfieldID jFramePosition = env->GetFieldID(jAudioTimeStampCls, "framePosition", "L");
+ jfieldID jNanoTime = env->GetFieldID(jAudioTimeStampCls, "nanoTime", "L");
+
+ jmethodID jGetTimestamp = env->GetMethodID(mAudioTrackCls,
+ "getTimestamp", "(Landroid/media/AudioTimestamp)B");
+ bool success = env->CallBooleanMethod(mAudioTrackObj, jGetTimestamp, jAudioTimeStampObj);
+
+ if (!success) {
+ return false;
+ }
+
+ long long framePosition = env->GetLongField(jAudioTimeStampObj, jFramePosition);
+ long long nanoTime = env->GetLongField(jAudioTimeStampObj, jNanoTime);
+
+ struct timespec ts;
+ const long long secondToNano = 1000000000LL; // 1E9
+ ts.tv_sec = nanoTime / secondToNano;
+ ts.tv_nsec = nanoTime % secondToNano;
+ timestamp.mTime = ts;
+ timestamp.mPosition = (uint32_t) framePosition;
+
+ return true;
+}
+
+status_t JAudioTrack::getTimestamp(ExtendedTimestamp *timestamp __unused) {
+ // TODO: Implement this after appropriate Java AudioTrack method is available.
+ return NO_ERROR;
+}
+
+status_t JAudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate) {
+ // TODO: existing native AudioTrack returns INVALID_OPERATION on offload/direct/fast tracks.
+ // Should we do the same thing?
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+
+ jclass jPlaybackParamsCls = env->FindClass("android/media/PlaybackParams");
+ jmethodID jPlaybackParamsCtor = env->GetMethodID(jPlaybackParamsCls, "<init>", "()V");
+ jobject jPlaybackParamsObj = env->NewObject(jPlaybackParamsCls, jPlaybackParamsCtor);
+
+ jmethodID jSetAudioFallbackMode = env->GetMethodID(
+ jPlaybackParamsCls, "setAudioFallbackMode", "(I)Landroid/media/PlaybackParams;");
+ jPlaybackParamsObj = env->CallObjectMethod(
+ jPlaybackParamsObj, jSetAudioFallbackMode, playbackRate.mFallbackMode);
+
+ jmethodID jSetAudioStretchMode = env->GetMethodID(
+ jPlaybackParamsCls, "setAudioStretchMode", "(I)Landroid/media/PlaybackParams;");
+ jPlaybackParamsObj = env->CallObjectMethod(
+ jPlaybackParamsObj, jSetAudioStretchMode, playbackRate.mStretchMode);
+
+ jmethodID jSetPitch = env->GetMethodID(
+ jPlaybackParamsCls, "setPitch", "(F)Landroid/media/PlaybackParams;");
+ jPlaybackParamsObj = env->CallObjectMethod(jPlaybackParamsObj, jSetPitch, playbackRate.mPitch);
+
+ jmethodID jSetSpeed = env->GetMethodID(
+ jPlaybackParamsCls, "setSpeed", "(F)Landroid/media/PlaybackParams;");
+ jPlaybackParamsObj = env->CallObjectMethod(jPlaybackParamsObj, jSetSpeed, playbackRate.mSpeed);
+
+
+ // Set this Java PlaybackParams object into Java AudioTrack.
+ jmethodID jSetPlaybackParams = env->GetMethodID(
+ mAudioTrackCls, "setPlaybackParams", "(Landroid/media/PlaybackParams;)V");
+ env->CallVoidMethod(mAudioTrackObj, jSetPlaybackParams, jPlaybackParamsObj);
+ // TODO: Should we catch the Java IllegalArgumentException?
+
+ return NO_ERROR;
+}
+
+const AudioPlaybackRate JAudioTrack::getPlaybackRate() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+
+ jmethodID jGetPlaybackParams = env->GetMethodID(
+ mAudioTrackCls, "getPlaybackParams", "()Landroid/media/PlaybackParams;");
+ jobject jPlaybackParamsObj = env->CallObjectMethod(mAudioTrackObj, jGetPlaybackParams);
+
+ AudioPlaybackRate playbackRate;
+ jclass jPlaybackParamsCls = env->FindClass("android/media/PlaybackParams");
+
+ jmethodID jGetAudioFallbackMode = env->GetMethodID(
+ jPlaybackParamsCls, "getAudioFallbackMode", "()I");
+ // TODO: Should we enable passing AUDIO_TIMESTRETCH_FALLBACK_CUT_REPEAT?
+ // The enum is internal only, so it is not defined in PlaybackParmas.java.
+ // TODO: Is this right way to convert an int to an enum?
+ playbackRate.mFallbackMode = static_cast<AudioTimestretchFallbackMode>(
+ env->CallIntMethod(jPlaybackParamsObj, jGetAudioFallbackMode));
+
+ jmethodID jGetAudioStretchMode = env->GetMethodID(
+ jPlaybackParamsCls, "getAudioStretchMode", "()I");
+ playbackRate.mStretchMode = static_cast<AudioTimestretchStretchMode>(
+ env->CallIntMethod(jPlaybackParamsObj, jGetAudioStretchMode));
+
+ jmethodID jGetPitch = env->GetMethodID(jPlaybackParamsCls, "getPitch", "()F");
+ playbackRate.mPitch = env->CallFloatMethod(jPlaybackParamsObj, jGetPitch);
+
+ jmethodID jGetSpeed = env->GetMethodID(jPlaybackParamsCls, "getSpeed", "()F");
+ playbackRate.mSpeed = env->CallFloatMethod(jPlaybackParamsObj, jGetSpeed);
+
+ return playbackRate;
+}
+
+media::VolumeShaper::Status JAudioTrack::applyVolumeShaper(
+ const sp<media::VolumeShaper::Configuration>& configuration,
+ const sp<media::VolumeShaper::Operation>& operation) {
+
+ jobject jConfigurationObj = createVolumeShaperConfigurationObj(configuration);
+ jobject jOperationObj = createVolumeShaperOperationObj(operation);
+
+ if (jConfigurationObj == NULL || jOperationObj == NULL) {
+ return media::VolumeShaper::Status(BAD_VALUE);
+ }
+
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+
+ jmethodID jCreateVolumeShaper = env->GetMethodID(mAudioTrackCls, "createVolumeShaper",
+ "(Landroid/media/VolumeShaper$Configuration;)Landroid/media/VolumeShaper;");
+ jobject jVolumeShaperObj = env->CallObjectMethod(
+ mAudioTrackObj, jCreateVolumeShaper, jConfigurationObj);
+
+ jclass jVolumeShaperCls = env->FindClass("android/media/VolumeShaper");
+ jmethodID jApply = env->GetMethodID(jVolumeShaperCls, "apply",
+ "(Landroid/media/VolumeShaper$Operation;)V");
+ env->CallVoidMethod(jVolumeShaperObj, jApply, jOperationObj);
+
+ return media::VolumeShaper::Status(NO_ERROR);
+}
+
+status_t JAudioTrack::setAuxEffectSendLevel(float level) {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jSetAuxEffectSendLevel = env->GetMethodID(
+ mAudioTrackCls, "setAuxEffectSendLevel", "(F)I");
+ int result = env->CallIntMethod(mAudioTrackObj, jSetAuxEffectSendLevel, level);
+ return javaToNativeStatus(result);
+}
+
+status_t JAudioTrack::attachAuxEffect(int effectId) {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jAttachAuxEffect = env->GetMethodID(mAudioTrackCls, "attachAuxEffect", "(I)I");
+ int result = env->CallIntMethod(mAudioTrackObj, jAttachAuxEffect, effectId);
+ return javaToNativeStatus(result);
+}
+
+status_t JAudioTrack::setVolume(float left, float right) {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ // TODO: Java setStereoVolume is deprecated. Do we really need this method?
+ jmethodID jSetStereoVolume = env->GetMethodID(mAudioTrackCls, "setStereoVolume", "(FF)I");
+ int result = env->CallIntMethod(mAudioTrackObj, jSetStereoVolume, left, right);
+ return javaToNativeStatus(result);
+}
+
+status_t JAudioTrack::setVolume(float volume) {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jSetVolume = env->GetMethodID(mAudioTrackCls, "setVolume", "(F)I");
+ int result = env->CallIntMethod(mAudioTrackObj, jSetVolume, volume);
+ return javaToNativeStatus(result);
+}
+
+status_t JAudioTrack::start() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jPlay = env->GetMethodID(mAudioTrackCls, "play", "()V");
+ // TODO: Should we catch the Java IllegalStateException from play()?
+ env->CallVoidMethod(mAudioTrackObj, jPlay);
+ return NO_ERROR;
+}
+
+ssize_t JAudioTrack::write(const void* buffer, size_t size, bool blocking) {
+ if (buffer == NULL) {
+ return BAD_VALUE;
+ }
+
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jbyteArray jAudioData = env->NewByteArray(size);
+ env->SetByteArrayRegion(jAudioData, 0, size, (jbyte *) buffer);
+
+ jclass jByteBufferCls = env->FindClass("java/nio/ByteBuffer");
+ jmethodID jWrap = env->GetStaticMethodID(jByteBufferCls, "wrap", "([B)Ljava/nio/ByteBuffer;");
+ jobject jByteBufferObj = env->CallStaticObjectMethod(jByteBufferCls, jWrap, jAudioData);
+
+ int writeMode = 0;
+ if (blocking) {
+ jfieldID jWriteBlocking = env->GetStaticFieldID(mAudioTrackCls, "WRITE_BLOCKING", "I");
+ writeMode = env->GetStaticIntField(mAudioTrackCls, jWriteBlocking);
+ } else {
+ jfieldID jWriteNonBlocking = env->GetStaticFieldID(
+ mAudioTrackCls, "WRITE_NON_BLOCKING", "I");
+ writeMode = env->GetStaticIntField(mAudioTrackCls, jWriteNonBlocking);
+ }
+
+ jmethodID jWrite = env->GetMethodID(mAudioTrackCls, "write", "(Ljava/nio/ByteBuffer;II)I");
+ int result = env->CallIntMethod(mAudioTrackObj, jWrite, jByteBufferObj, size, writeMode);
+
+ if (result >= 0) {
+ return result;
+ } else {
+ return javaToNativeStatus(result);
+ }
+}
+
+void JAudioTrack::stop() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jStop = env->GetMethodID(mAudioTrackCls, "stop", "()V");
+ env->CallVoidMethod(mAudioTrackObj, jStop);
+ // TODO: Should we catch IllegalStateException?
+}
+
+// TODO: Is the right implementation?
+bool JAudioTrack::stopped() const {
+ return !isPlaying();
+}
+
+void JAudioTrack::flush() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jFlush = env->GetMethodID(mAudioTrackCls, "flush", "()V");
+ env->CallVoidMethod(mAudioTrackObj, jFlush);
+}
+
+void JAudioTrack::pause() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jPause = env->GetMethodID(mAudioTrackCls, "pause", "()V");
+ env->CallVoidMethod(mAudioTrackObj, jPause);
+ // TODO: Should we catch IllegalStateException?
+}
+
+bool JAudioTrack::isPlaying() const {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jGetPlayState = env->GetMethodID(mAudioTrackCls, "getPlayState", "()I");
+ int currentPlayState = env->CallIntMethod(mAudioTrackObj, jGetPlayState);
+
+ // TODO: In Java AudioTrack, there is no STOPPING state.
+ // This means while stopping, isPlaying() will return different value in two class.
+ // - in existing native AudioTrack: true
+ // - in JAudioTrack: false
+ // If not okay, also modify the implementation of stopped().
+ jfieldID jPlayStatePlaying = env->GetStaticFieldID(mAudioTrackCls, "PLAYSTATE_PLAYING", "I");
+ int statePlaying = env->GetStaticIntField(mAudioTrackCls, jPlayStatePlaying);
+ return currentPlayState == statePlaying;
+}
+
+uint32_t JAudioTrack::getSampleRate() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jGetSampleRate = env->GetMethodID(mAudioTrackCls, "getSampleRate", "()I");
+ return env->CallIntMethod(mAudioTrackObj, jGetSampleRate);
+}
+
+status_t JAudioTrack::getBufferDurationInUs(int64_t *duration) {
+ if (duration == nullptr) {
+ return BAD_VALUE;
+ }
+
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jGetBufferSizeInFrames = env->GetMethodID(
+ mAudioTrackCls, "getBufferSizeInFrames", "()I");
+ int bufferSizeInFrames = env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
+
+ const double secondToMicro = 1000000LL; // 1E6
+ int sampleRate = JAudioTrack::getSampleRate();
+ float speed = JAudioTrack::getPlaybackRate().mSpeed;
+
+ *duration = (int64_t) (bufferSizeInFrames * secondToMicro / (sampleRate * speed));
+ return NO_ERROR;
+}
+
+audio_format_t JAudioTrack::format() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jGetAudioFormat = env->GetMethodID(mAudioTrackCls, "getAudioFormat", "()I");
+ int javaFormat = env->CallIntMethod(mAudioTrackObj, jGetAudioFormat);
+ return audioFormatToNative(javaFormat);
+}
+
+status_t JAudioTrack::dump(int fd, const Vector<String16>& args __unused) const
+{
+ String8 result;
+
+ result.append(" JAudioTrack::dump\n");
+
+ // TODO: Remove logs that includes unavailable information from below.
+// result.appendFormat(" status(%d), state(%d), session Id(%d), flags(%#x)\n",
+// mStatus, mState, mSessionId, mFlags);
+// result.appendFormat(" stream type(%d), left - right volume(%f, %f)\n",
+// (mStreamType == AUDIO_STREAM_DEFAULT) ?
+// audio_attributes_to_stream_type(&mAttributes) : mStreamType,
+// mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
+// result.appendFormat(" format(%#x), channel mask(%#x), channel count(%u)\n",
+// format(), mChannelMask, channelCount());
+// result.appendFormat(" sample rate(%u), original sample rate(%u), speed(%f)\n",
+// getSampleRate(), mOriginalSampleRate, mPlaybackRate.mSpeed);
+// result.appendFormat(" frame count(%zu), req. frame count(%zu)\n",
+// frameCount(), mReqFrameCount);
+// result.appendFormat(" notif. frame count(%u), req. notif. frame count(%u),"
+// " req. notif. per buff(%u)\n",
+// mNotificationFramesAct, mNotificationFramesReq, mNotificationsPerBufferReq);
+// result.appendFormat(" latency (%d), selected device Id(%d), routed device Id(%d)\n",
+// latency(), mSelectedDeviceId, getRoutedDeviceId());
+// result.appendFormat(" output(%d) AF latency (%u) AF frame count(%zu) AF SampleRate(%u)\n",
+// mOutput, mAfLatency, mAfFrameCount, mAfSampleRate);
+ ::write(fd, result.string(), result.size());
+ return NO_ERROR;
+}
+
+audio_port_handle_t JAudioTrack::getRoutedDeviceId() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jGetRoutedDevice = env->GetMethodID(mAudioTrackCls, "getRoutedDevice",
+ "()Landroid/media/AudioDeviceInfo;");
+ jobject jAudioDeviceInfoObj = env->CallObjectMethod(mAudioTrackObj, jGetRoutedDevice);
+ if (env->IsSameObject(jAudioDeviceInfoObj, NULL)) {
+ return AUDIO_PORT_HANDLE_NONE;
+ }
+
+ jclass jAudioDeviceInfoCls = env->FindClass("Landroid/media/AudioDeviceInfo");
+ jmethodID jGetId = env->GetMethodID(jAudioDeviceInfoCls, "getId", "()I");
+ jint routedDeviceId = env->CallIntMethod(jAudioDeviceInfoObj, jGetId);
+ return routedDeviceId;
+}
+
+audio_session_t JAudioTrack::getAudioSessionId() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jmethodID jGetAudioSessionId = env->GetMethodID(mAudioTrackCls, "getAudioSessionId", "()I");
+ jint sessionId = env->CallIntMethod(mAudioTrackObj, jGetAudioSessionId);
+ return (audio_session_t) sessionId;
+}
+
+status_t JAudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jclass jMP2ImplCls = env->FindClass("android/media/MediaPlayer2Impl");
+ jmethodID jSetAudioOutputDeviceById = env->GetMethodID(
+ jMP2ImplCls, "setAudioOutputDeviceById", "(Landroid/media/AudioTrack;I)Z");
+ jboolean result = env->CallStaticBooleanMethod(
+ jMP2ImplCls, jSetAudioOutputDeviceById, mAudioTrackObj, deviceId);
+ return result == true ? NO_ERROR : BAD_VALUE;
+}
+
+status_t JAudioTrack::pendingDuration(int32_t *msec) {
+ if (msec == nullptr) {
+ return BAD_VALUE;
+ }
+
+ bool isPurePcmData = audio_is_linear_pcm(format()) && (getFlags() & AUDIO_FLAG_HW_AV_SYNC) == 0;
+ if (!isPurePcmData) {
+ return INVALID_OPERATION;
+ }
+
+ // TODO: Need to know the difference btw. client and server time.
+ // If getTimestamp(ExtendedTimestamp) is ready, and un-comment below and modify appropriately.
+ // (copied from AudioTrack.cpp)
+
+// ExtendedTimestamp ets;
+// ExtendedTimestamp::LOCATION location = ExtendedTimestamp::LOCATION_SERVER;
+// if (getTimestamp_l(&ets) == OK && ets.mTimeNs[location] > 0) {
+// int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
+// - ets.mPosition[location];
+// if (diff < 0) {
+// *msec = 0;
+// } else {
+// // ms is the playback time by frames
+// int64_t ms = (int64_t)((double)diff * 1000 /
+// ((double)mSampleRate * mPlaybackRate.mSpeed));
+// // clockdiff is the timestamp age (negative)
+// int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
+// ets.mTimeNs[location]
+// + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
+// - systemTime(SYSTEM_TIME_MONOTONIC);
+//
+// //ALOGV("ms: %lld clockdiff: %lld", (long long)ms, (long long)clockdiff);
+// static const int NANOS_PER_MILLIS = 1000000;
+// *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
+// }
+// return NO_ERROR;
+// }
+
+ return NO_ERROR;
+}
+
+status_t JAudioTrack::addAudioDeviceCallback(
+ const sp<AudioSystem::AudioDeviceCallback>& callback __unused) {
+ // TODO: Implement this after appropriate Java AudioTrack method is available.
+ return NO_ERROR;
+}
+
+status_t JAudioTrack::removeAudioDeviceCallback(
+ const sp<AudioSystem::AudioDeviceCallback>& callback __unused) {
+ // TODO: Implement this after appropriate Java AudioTrack method is available.
+ return NO_ERROR;
+}
+
+/////////////////////////////////////////////////////////////
+/// Private method begins ///
+/////////////////////////////////////////////////////////////
+
+jobject JAudioTrack::createVolumeShaperConfigurationObj(
+ const sp<media::VolumeShaper::Configuration>& config) {
+
+ // TODO: Java VolumeShaper's setId() / setOptionFlags() are hidden.
+ if (config == NULL || config->getType() == media::VolumeShaper::Configuration::TYPE_ID) {
+ return NULL;
+ }
+
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+
+ // Referenced "android_media_VolumeShaper.h".
+ jfloatArray xarray = nullptr;
+ jfloatArray yarray = nullptr;
+ if (config->getType() == media::VolumeShaper::Configuration::TYPE_SCALE) {
+ // convert curve arrays
+ xarray = env->NewFloatArray(config->size());
+ yarray = env->NewFloatArray(config->size());
+ float * const x = env->GetFloatArrayElements(xarray, nullptr /* isCopy */);
+ float * const y = env->GetFloatArrayElements(yarray, nullptr /* isCopy */);
+ float *xptr = x, *yptr = y;
+ for (const auto &pt : *config.get()) {
+ *xptr++ = pt.first;
+ *yptr++ = pt.second;
+ }
+ env->ReleaseFloatArrayElements(xarray, x, 0 /* mode */);
+ env->ReleaseFloatArrayElements(yarray, y, 0 /* mode */);
+ }
+
+ jclass jBuilderCls = env->FindClass("android/media/VolumeShaper$Configuration$Builder");
+ jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
+ jobject jBuilderObj = env->NewObject(jBuilderCls, jBuilderCtor);
+
+ jmethodID jSetDuration = env->GetMethodID(jBuilderCls, "setDuration",
+ "(L)Landroid/media/VolumeShaper$Configuration$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jSetDuration, (jlong) config->getDurationMs());
+
+ jmethodID jSetInterpolatorType = env->GetMethodID(jBuilderCls, "setInterpolatorType",
+ "(I)Landroid/media/VolumeShaper$Configuration$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jSetInterpolatorType,
+ config->getInterpolatorType());
+
+ jmethodID jSetCurve = env->GetMethodID(jBuilderCls, "setCurve",
+ "([F[F)Landroid/media/VolumeShaper$Configuration$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jSetCurve, xarray, yarray);
+
+ jmethodID jBuild = env->GetMethodID(jBuilderCls, "build",
+ "()Landroid/media/VolumeShaper$Configuration;");
+ return env->CallObjectMethod(jBuilderObj, jBuild);
+}
+
+jobject JAudioTrack::createVolumeShaperOperationObj(
+ const sp<media::VolumeShaper::Operation>& operation) {
+
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+
+ jclass jBuilderCls = env->FindClass("android/media/VolumeShaper$Operation$Builder");
+ jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
+ jobject jBuilderObj = env->NewObject(jBuilderCls, jBuilderCtor);
+
+ // Set XOffset
+ jmethodID jSetXOffset = env->GetMethodID(jBuilderCls, "setXOffset",
+ "(F)Landroid/media/VolumeShaper$Operation$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jSetXOffset, operation->getXOffset());
+
+ int32_t flags = operation->getFlags();
+
+ if (operation->getReplaceId() >= 0) {
+ jmethodID jReplace = env->GetMethodID(jBuilderCls, "replace",
+ "(IB)Landroid/media/VolumeShaper$Operation$Builder;");
+ bool join = (flags | media::VolumeShaper::Operation::FLAG_JOIN) != 0;
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jReplace, operation->getReplaceId(), join);
+ }
+
+ if (flags | media::VolumeShaper::Operation::FLAG_REVERSE) {
+ jmethodID jReverse = env->GetMethodID(jBuilderCls, "reverse",
+ "()Landroid/media/VolumeShaper$Operation$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jReverse);
+ }
+
+ // TODO: VolumeShaper Javadoc says "Do not call terminate() directly". Can we call this?
+ if (flags | media::VolumeShaper::Operation::FLAG_TERMINATE) {
+ jmethodID jTerminate = env->GetMethodID(jBuilderCls, "terminate",
+ "()Landroid/media/VolumeShaper$Operation$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jTerminate);
+ }
+
+ if (flags | media::VolumeShaper::Operation::FLAG_DELAY) {
+ jmethodID jDefer = env->GetMethodID(jBuilderCls, "defer",
+ "()Landroid/media/VolumeShaper$Operation$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jDefer);
+ }
+
+ if (flags | media::VolumeShaper::Operation::FLAG_CREATE_IF_NECESSARY) {
+ jmethodID jCreateIfNeeded = env->GetMethodID(jBuilderCls, "createIfNeeded",
+ "()Landroid/media/VolumeShaper$Operation$Builder;");
+ jBuilderObj = env->CallObjectMethod(jBuilderCls, jCreateIfNeeded);
+ }
+
+ // TODO: Handle error case (can it be NULL?)
+ jmethodID jBuild = env->GetMethodID(jBuilderCls, "build",
+ "()Landroid/media/VolumeShaper$Operation;");
+ return env->CallObjectMethod(jBuilderObj, jBuild);
+}
+
+jobject JAudioTrack::createStreamEventCallback(callback_t cbf, void* user) {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jclass jCallbackCls = env->FindClass("android/media/MediaPlayer2Impl$StreamEventCallback");
+ jmethodID jCallbackCtor = env->GetMethodID(jCallbackCls, "<init>", "(JJJ)V");
+ jobject jCallbackObj = env->NewObject(jCallbackCls, jCallbackCtor, this, cbf, user);
+ return jCallbackObj;
+}
+
+jobject JAudioTrack::createCallbackExecutor() {
+ JNIEnv *env = AndroidRuntime::getJNIEnv();
+ jclass jExecutorsCls = env->FindClass("java/util/concurrent/Executors");
+ jmethodID jNewSingleThreadExecutor = env->GetStaticMethodID(jExecutorsCls,
+ "newSingleThreadExecutor", "()Ljava/util/concurrent/ExecutorService;");
+ jobject jSingleThreadExecutorObj =
+ env->CallStaticObjectMethod(jExecutorsCls, jNewSingleThreadExecutor);
+ return jSingleThreadExecutorObj;
+}
+
+status_t JAudioTrack::javaToNativeStatus(int javaStatus) {
+ switch (javaStatus) {
+ case AUDIO_JAVA_SUCCESS:
+ return NO_ERROR;
+ case AUDIO_JAVA_BAD_VALUE:
+ return BAD_VALUE;
+ case AUDIO_JAVA_INVALID_OPERATION:
+ return INVALID_OPERATION;
+ case AUDIO_JAVA_PERMISSION_DENIED:
+ return PERMISSION_DENIED;
+ case AUDIO_JAVA_NO_INIT:
+ return NO_INIT;
+ case AUDIO_JAVA_WOULD_BLOCK:
+ return WOULD_BLOCK;
+ case AUDIO_JAVA_DEAD_OBJECT:
+ return DEAD_OBJECT;
+ default:
+ return UNKNOWN_ERROR;
+ }
+}
+
+} // namespace android
diff --git a/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp b/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
new file mode 100644
index 0000000..a8e1d1f
--- /dev/null
+++ b/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
@@ -0,0 +1,727 @@
+/*
+**
+** Copyright 2018, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaPlayer2AudioOutput"
+#include <mediaplayer2/MediaPlayer2AudioOutput.h>
+
+#include <cutils/properties.h> // for property_get
+#include <utils/Log.h>
+
+#include <media/AudioPolicyHelper.h>
+#include <media/AudioTrack.h>
+#include <media/stagefright/foundation/ADebug.h>
+
+namespace {
+
+const float kMaxRequiredSpeed = 8.0f; // for PCM tracks allow up to 8x speedup.
+
+} // anonymous namespace
+
+namespace android {
+
+// TODO: Find real cause of Audio/Video delay in PV framework and remove this workaround
+/* static */ int MediaPlayer2AudioOutput::mMinBufferCount = 4;
+/* static */ bool MediaPlayer2AudioOutput::mIsOnEmulator = false;
+
+status_t MediaPlayer2AudioOutput::dump(int fd, const Vector<String16>& args) const {
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ result.append(" MediaPlayer2AudioOutput\n");
+ snprintf(buffer, 255, " stream type(%d), left - right volume(%f, %f)\n",
+ mStreamType, mLeftVolume, mRightVolume);
+ result.append(buffer);
+ snprintf(buffer, 255, " msec per frame(%f), latency (%d)\n",
+ mMsecsPerFrame, (mTrack != 0) ? mTrack->latency() : -1);
+ result.append(buffer);
+ snprintf(buffer, 255, " aux effect id(%d), send level (%f)\n",
+ mAuxEffectId, mSendLevel);
+ result.append(buffer);
+
+ ::write(fd, result.string(), result.size());
+ if (mTrack != 0) {
+ mTrack->dump(fd, args);
+ }
+ return NO_ERROR;
+}
+
+MediaPlayer2AudioOutput::MediaPlayer2AudioOutput(audio_session_t sessionId, uid_t uid, int pid,
+ const audio_attributes_t* attr, const sp<AudioSystem::AudioDeviceCallback>& deviceCallback)
+ : mCallback(NULL),
+ mCallbackCookie(NULL),
+ mCallbackData(NULL),
+ mStreamType(AUDIO_STREAM_MUSIC),
+ mLeftVolume(1.0),
+ mRightVolume(1.0),
+ mPlaybackRate(AUDIO_PLAYBACK_RATE_DEFAULT),
+ mSampleRateHz(0),
+ mMsecsPerFrame(0),
+ mFrameSize(0),
+ mSessionId(sessionId),
+ mUid(uid),
+ mPid(pid),
+ mSendLevel(0.0),
+ mAuxEffectId(0),
+ mFlags(AUDIO_OUTPUT_FLAG_NONE),
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mDeviceCallbackEnabled(false),
+ mDeviceCallback(deviceCallback) {
+ ALOGV("MediaPlayer2AudioOutput(%d)", sessionId);
+ if (attr != NULL) {
+ mAttributes = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
+ if (mAttributes != NULL) {
+ memcpy(mAttributes, attr, sizeof(audio_attributes_t));
+ mStreamType = audio_attributes_to_stream_type(attr);
+ }
+ } else {
+ mAttributes = NULL;
+ }
+
+ setMinBufferCount();
+}
+
+MediaPlayer2AudioOutput::~MediaPlayer2AudioOutput() {
+ close();
+ free(mAttributes);
+ delete mCallbackData;
+}
+
+//static
+void MediaPlayer2AudioOutput::setMinBufferCount() {
+ char value[PROPERTY_VALUE_MAX];
+ if (property_get("ro.kernel.qemu", value, 0)) {
+ mIsOnEmulator = true;
+ mMinBufferCount = 12; // to prevent systematic buffer underrun for emulator
+ }
+}
+
+// static
+bool MediaPlayer2AudioOutput::isOnEmulator() {
+ setMinBufferCount(); // benign race wrt other threads
+ return mIsOnEmulator;
+}
+
+// static
+int MediaPlayer2AudioOutput::getMinBufferCount() {
+ setMinBufferCount(); // benign race wrt other threads
+ return mMinBufferCount;
+}
+
+ssize_t MediaPlayer2AudioOutput::bufferSize() const {
+ Mutex::Autolock lock(mLock);
+ if (mTrack == 0) {
+ return NO_INIT;
+ }
+ return mTrack->frameCount() * mFrameSize;
+}
+
+ssize_t MediaPlayer2AudioOutput::frameCount() const {
+ Mutex::Autolock lock(mLock);
+ if (mTrack == 0) {
+ return NO_INIT;
+ }
+ return mTrack->frameCount();
+}
+
+ssize_t MediaPlayer2AudioOutput::channelCount() const {
+ Mutex::Autolock lock(mLock);
+ if (mTrack == 0) {
+ return NO_INIT;
+ }
+ return mTrack->channelCount();
+}
+
+ssize_t MediaPlayer2AudioOutput::frameSize() const {
+ Mutex::Autolock lock(mLock);
+ if (mTrack == 0) {
+ return NO_INIT;
+ }
+ return mFrameSize;
+}
+
+uint32_t MediaPlayer2AudioOutput::latency () const {
+ Mutex::Autolock lock(mLock);
+ if (mTrack == 0) {
+ return 0;
+ }
+ return mTrack->latency();
+}
+
+float MediaPlayer2AudioOutput::msecsPerFrame() const {
+ Mutex::Autolock lock(mLock);
+ return mMsecsPerFrame;
+}
+
+status_t MediaPlayer2AudioOutput::getPosition(uint32_t *position) const {
+ Mutex::Autolock lock(mLock);
+ if (mTrack == 0) {
+ return NO_INIT;
+ }
+ return mTrack->getPosition(position);
+}
+
+status_t MediaPlayer2AudioOutput::getTimestamp(AudioTimestamp &ts) const {
+ Mutex::Autolock lock(mLock);
+ if (mTrack == 0) {
+ return NO_INIT;
+ }
+ return mTrack->getTimestamp(ts);
+}
+
+// TODO: Remove unnecessary calls to getPlayedOutDurationUs()
+// as it acquires locks and may query the audio driver.
+//
+// Some calls could conceivably retrieve extrapolated data instead of
+// accessing getTimestamp() or getPosition() every time a data buffer with
+// a media time is received.
+//
+// Calculate duration of played samples if played at normal rate (i.e., 1.0).
+int64_t MediaPlayer2AudioOutput::getPlayedOutDurationUs(int64_t nowUs) const {
+ Mutex::Autolock lock(mLock);
+ if (mTrack == 0 || mSampleRateHz == 0) {
+ return 0;
+ }
+
+ uint32_t numFramesPlayed;
+ int64_t numFramesPlayedAtUs;
+ AudioTimestamp ts;
+
+ status_t res = mTrack->getTimestamp(ts);
+ if (res == OK) { // case 1: mixing audio tracks and offloaded tracks.
+ numFramesPlayed = ts.mPosition;
+ numFramesPlayedAtUs = ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
+ //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAtUs);
+ } else if (res == WOULD_BLOCK) { // case 2: transitory state on start of a new track
+ numFramesPlayed = 0;
+ numFramesPlayedAtUs = nowUs;
+ //ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
+ // numFramesPlayed, (long long)numFramesPlayedAtUs);
+ } else { // case 3: transitory at new track or audio fast tracks.
+ res = mTrack->getPosition(&numFramesPlayed);
+ CHECK_EQ(res, (status_t)OK);
+ numFramesPlayedAtUs = nowUs;
+ numFramesPlayedAtUs += 1000LL * mTrack->latency() / 2; /* XXX */
+ //ALOGD("getPosition: %u %lld", numFramesPlayed, (long long)numFramesPlayedAtUs);
+ }
+
+ // CHECK_EQ(numFramesPlayed & (1 << 31), 0); // can't be negative until 12.4 hrs, test
+ // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
+ int64_t durationUs = (int64_t)((int32_t)numFramesPlayed * 1000000LL / mSampleRateHz)
+ + nowUs - numFramesPlayedAtUs;
+ if (durationUs < 0) {
+ // Occurs when numFramesPlayed position is very small and the following:
+ // (1) In case 1, the time nowUs is computed before getTimestamp() is called and
+ // numFramesPlayedAtUs is greater than nowUs by time more than numFramesPlayed.
+ // (2) In case 3, using getPosition and adding mAudioSink->latency() to
+ // numFramesPlayedAtUs, by a time amount greater than numFramesPlayed.
+ //
+ // Both of these are transitory conditions.
+ ALOGV("getPlayedOutDurationUs: negative duration %lld set to zero", (long long)durationUs);
+ durationUs = 0;
+ }
+ ALOGV("getPlayedOutDurationUs(%lld) nowUs(%lld) frames(%u) framesAt(%lld)",
+ (long long)durationUs, (long long)nowUs,
+ numFramesPlayed, (long long)numFramesPlayedAtUs);
+ return durationUs;
+}
+
+status_t MediaPlayer2AudioOutput::getFramesWritten(uint32_t *frameswritten) const {
+ Mutex::Autolock lock(mLock);
+ if (mTrack == 0) {
+ return NO_INIT;
+ }
+ ExtendedTimestamp ets;
+ status_t status = mTrack->getTimestamp(&ets);
+ if (status == OK || status == WOULD_BLOCK) {
+ *frameswritten = (uint32_t)ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT];
+ }
+ return status;
+}
+
+status_t MediaPlayer2AudioOutput::setParameters(const String8& keyValuePairs) {
+ Mutex::Autolock lock(mLock);
+ if (mTrack == 0) {
+ return NO_INIT;
+ }
+ return mTrack->setParameters(keyValuePairs);
+}
+
+String8 MediaPlayer2AudioOutput::getParameters(const String8& keys) {
+ Mutex::Autolock lock(mLock);
+ if (mTrack == 0) {
+ return String8::empty();
+ }
+ return mTrack->getParameters(keys);
+}
+
+void MediaPlayer2AudioOutput::setAudioAttributes(const audio_attributes_t * attributes) {
+ Mutex::Autolock lock(mLock);
+ if (attributes == NULL) {
+ free(mAttributes);
+ mAttributes = NULL;
+ } else {
+ if (mAttributes == NULL) {
+ mAttributes = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
+ }
+ memcpy(mAttributes, attributes, sizeof(audio_attributes_t));
+ mStreamType = audio_attributes_to_stream_type(attributes);
+ }
+}
+
+void MediaPlayer2AudioOutput::setAudioStreamType(audio_stream_type_t streamType) {
+ Mutex::Autolock lock(mLock);
+ // do not allow direct stream type modification if attributes have been set
+ if (mAttributes == NULL) {
+ mStreamType = streamType;
+ }
+}
+
+void MediaPlayer2AudioOutput::close_l() {
+ mTrack.clear();
+}
+
+status_t MediaPlayer2AudioOutput::open(
+ uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
+ audio_format_t format, int bufferCount,
+ AudioCallback cb, void *cookie,
+ audio_output_flags_t flags,
+ const audio_offload_info_t *offloadInfo,
+ bool doNotReconnect,
+ uint32_t suggestedFrameCount) {
+ ALOGV("open(%u, %d, 0x%x, 0x%x, %d, %d 0x%x)", sampleRate, channelCount, channelMask,
+ format, bufferCount, mSessionId, flags);
+
+ // offloading is only supported in callback mode for now.
+ // offloadInfo must be present if offload flag is set
+ if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) &&
+ ((cb == NULL) || (offloadInfo == NULL))) {
+ return BAD_VALUE;
+ }
+
+ // compute frame count for the AudioTrack internal buffer
+ size_t frameCount;
+ if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+ frameCount = 0; // AudioTrack will get frame count from AudioFlinger
+ } else {
+ // try to estimate the buffer processing fetch size from AudioFlinger.
+ // framesPerBuffer is approximate and generally correct, except when it's not :-).
+ uint32_t afSampleRate;
+ size_t afFrameCount;
+ if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) != NO_ERROR) {
+ return NO_INIT;
+ }
+ if (AudioSystem::getOutputSamplingRate(&afSampleRate, mStreamType) != NO_ERROR) {
+ return NO_INIT;
+ }
+ const size_t framesPerBuffer =
+ (unsigned long long)sampleRate * afFrameCount / afSampleRate;
+
+ if (bufferCount == 0) {
+ // use suggestedFrameCount
+ bufferCount = (suggestedFrameCount + framesPerBuffer - 1) / framesPerBuffer;
+ }
+ // Check argument bufferCount against the mininum buffer count
+ if (bufferCount != 0 && bufferCount < mMinBufferCount) {
+ ALOGV("bufferCount (%d) increased to %d", bufferCount, mMinBufferCount);
+ bufferCount = mMinBufferCount;
+ }
+ // if frameCount is 0, then AudioTrack will get frame count from AudioFlinger
+ // which will be the minimum size permitted.
+ frameCount = bufferCount * framesPerBuffer;
+ }
+
+ if (channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER) {
+ channelMask = audio_channel_out_mask_from_count(channelCount);
+ if (0 == channelMask) {
+ ALOGE("open() error, can\'t derive mask for %d audio channels", channelCount);
+ return NO_INIT;
+ }
+ }
+
+ Mutex::Autolock lock(mLock);
+ mCallback = cb;
+ mCallbackCookie = cookie;
+
+ sp<AudioTrack> t;
+ CallbackData *newcbd = NULL;
+
+ ALOGV("creating new AudioTrack");
+
+ if (mCallback != NULL) {
+ newcbd = new CallbackData(this);
+ t = new AudioTrack(
+ mStreamType,
+ sampleRate,
+ format,
+ channelMask,
+ frameCount,
+ flags,
+ CallbackWrapper,
+ newcbd,
+ 0, // notification frames
+ mSessionId,
+ AudioTrack::TRANSFER_CALLBACK,
+ offloadInfo,
+ mUid,
+ mPid,
+ mAttributes,
+ doNotReconnect,
+ 1.0f, // default value for maxRequiredSpeed
+ mSelectedDeviceId);
+ } else {
+ // TODO: Due to buffer memory concerns, we use a max target playback speed
+ // based on mPlaybackRate at the time of open (instead of kMaxRequiredSpeed),
+ // also clamping the target speed to 1.0 <= targetSpeed <= kMaxRequiredSpeed.
+ const float targetSpeed =
+ std::min(std::max(mPlaybackRate.mSpeed, 1.0f), kMaxRequiredSpeed);
+ ALOGW_IF(targetSpeed != mPlaybackRate.mSpeed,
+ "track target speed:%f clamped from playback speed:%f",
+ targetSpeed, mPlaybackRate.mSpeed);
+ t = new AudioTrack(
+ mStreamType,
+ sampleRate,
+ format,
+ channelMask,
+ frameCount,
+ flags,
+ NULL, // callback
+ NULL, // user data
+ 0, // notification frames
+ mSessionId,
+ AudioTrack::TRANSFER_DEFAULT,
+ NULL, // offload info
+ mUid,
+ mPid,
+ mAttributes,
+ doNotReconnect,
+ targetSpeed,
+ mSelectedDeviceId);
+ }
+
+ if ((t == 0) || (t->initCheck() != NO_ERROR)) {
+ ALOGE("Unable to create audio track");
+ delete newcbd;
+ // t goes out of scope, so reference count drops to zero
+ return NO_INIT;
+ } else {
+ // successful AudioTrack initialization implies a legacy stream type was generated
+ // from the audio attributes
+ mStreamType = t->streamType();
+ }
+
+ CHECK((t != NULL) && ((mCallback == NULL) || (newcbd != NULL)));
+
+ mCallbackData = newcbd;
+ ALOGV("setVolume");
+ t->setVolume(mLeftVolume, mRightVolume);
+
+ mSampleRateHz = sampleRate;
+ mFlags = flags;
+ mMsecsPerFrame = 1E3f / (mPlaybackRate.mSpeed * sampleRate);
+ mFrameSize = t->frameSize();
+ mTrack = t;
+
+ return updateTrack_l();
+}
+
+status_t MediaPlayer2AudioOutput::updateTrack_l() {
+ if (mTrack == NULL) {
+ return NO_ERROR;
+ }
+
+ status_t res = NO_ERROR;
+ // Note some output devices may give us a direct track even though we don't specify it.
+ // Example: Line application b/17459982.
+ if ((mTrack->getFlags()
+ & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT)) == 0) {
+ res = mTrack->setPlaybackRate(mPlaybackRate);
+ if (res == NO_ERROR) {
+ mTrack->setAuxEffectSendLevel(mSendLevel);
+ res = mTrack->attachAuxEffect(mAuxEffectId);
+ }
+ }
+ mTrack->setOutputDevice(mSelectedDeviceId);
+ if (mDeviceCallbackEnabled) {
+ mTrack->addAudioDeviceCallback(mDeviceCallback.promote());
+ }
+ ALOGV("updateTrack_l() DONE status %d", res);
+ return res;
+}
+
+status_t MediaPlayer2AudioOutput::start() {
+ ALOGV("start");
+ Mutex::Autolock lock(mLock);
+ if (mCallbackData != NULL) {
+ mCallbackData->endTrackSwitch();
+ }
+ if (mTrack != 0) {
+ mTrack->setVolume(mLeftVolume, mRightVolume);
+ mTrack->setAuxEffectSendLevel(mSendLevel);
+ status_t status = mTrack->start();
+ return status;
+ }
+ return NO_INIT;
+}
+
+ssize_t MediaPlayer2AudioOutput::write(const void* buffer, size_t size, bool blocking) {
+ Mutex::Autolock lock(mLock);
+ LOG_ALWAYS_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
+
+ //ALOGV("write(%p, %u)", buffer, size);
+ if (mTrack != 0) {
+ return mTrack->write(buffer, size, blocking);
+ }
+ return NO_INIT;
+}
+
+void MediaPlayer2AudioOutput::stop() {
+ ALOGV("stop");
+ Mutex::Autolock lock(mLock);
+ if (mTrack != 0) {
+ mTrack->stop();
+ }
+}
+
+void MediaPlayer2AudioOutput::flush() {
+ ALOGV("flush");
+ Mutex::Autolock lock(mLock);
+ if (mTrack != 0) {
+ mTrack->flush();
+ }
+}
+
+void MediaPlayer2AudioOutput::pause() {
+ ALOGV("pause");
+ Mutex::Autolock lock(mLock);
+ if (mTrack != 0) {
+ mTrack->pause();
+ }
+}
+
+void MediaPlayer2AudioOutput::close() {
+ ALOGV("close");
+ sp<AudioTrack> track;
+ {
+ Mutex::Autolock lock(mLock);
+ track = mTrack;
+ close_l(); // clears mTrack
+ }
+ // destruction of the track occurs outside of mutex.
+}
+
+void MediaPlayer2AudioOutput::setVolume(float left, float right) {
+ ALOGV("setVolume(%f, %f)", left, right);
+ Mutex::Autolock lock(mLock);
+ mLeftVolume = left;
+ mRightVolume = right;
+ if (mTrack != 0) {
+ mTrack->setVolume(left, right);
+ }
+}
+
+status_t MediaPlayer2AudioOutput::setPlaybackRate(const AudioPlaybackRate &rate) {
+ ALOGV("setPlaybackRate(%f %f %d %d)",
+ rate.mSpeed, rate.mPitch, rate.mFallbackMode, rate.mStretchMode);
+ Mutex::Autolock lock(mLock);
+ if (mTrack == 0) {
+ // remember rate so that we can set it when the track is opened
+ mPlaybackRate = rate;
+ return OK;
+ }
+ status_t res = mTrack->setPlaybackRate(rate);
+ if (res != NO_ERROR) {
+ return res;
+ }
+ // rate.mSpeed is always greater than 0 if setPlaybackRate succeeded
+ CHECK_GT(rate.mSpeed, 0.f);
+ mPlaybackRate = rate;
+ if (mSampleRateHz != 0) {
+ mMsecsPerFrame = 1E3f / (rate.mSpeed * mSampleRateHz);
+ }
+ return res;
+}
+
+status_t MediaPlayer2AudioOutput::getPlaybackRate(AudioPlaybackRate *rate) {
+ ALOGV("setPlaybackRate");
+ Mutex::Autolock lock(mLock);
+ if (mTrack == 0) {
+ return NO_INIT;
+ }
+ *rate = mTrack->getPlaybackRate();
+ return NO_ERROR;
+}
+
+status_t MediaPlayer2AudioOutput::setAuxEffectSendLevel(float level) {
+ ALOGV("setAuxEffectSendLevel(%f)", level);
+ Mutex::Autolock lock(mLock);
+ mSendLevel = level;
+ if (mTrack != 0) {
+ return mTrack->setAuxEffectSendLevel(level);
+ }
+ return NO_ERROR;
+}
+
+status_t MediaPlayer2AudioOutput::attachAuxEffect(int effectId) {
+ ALOGV("attachAuxEffect(%d)", effectId);
+ Mutex::Autolock lock(mLock);
+ mAuxEffectId = effectId;
+ if (mTrack != 0) {
+ return mTrack->attachAuxEffect(effectId);
+ }
+ return NO_ERROR;
+}
+
+status_t MediaPlayer2AudioOutput::setOutputDevice(audio_port_handle_t deviceId) {
+ ALOGV("setOutputDevice(%d)", deviceId);
+ Mutex::Autolock lock(mLock);
+ mSelectedDeviceId = deviceId;
+ if (mTrack != 0) {
+ return mTrack->setOutputDevice(deviceId);
+ }
+ return NO_ERROR;
+}
+
+status_t MediaPlayer2AudioOutput::getRoutedDeviceId(audio_port_handle_t* deviceId) {
+ ALOGV("getRoutedDeviceId");
+ Mutex::Autolock lock(mLock);
+ if (mTrack != 0) {
+ mRoutedDeviceId = mTrack->getRoutedDeviceId();
+ }
+ *deviceId = mRoutedDeviceId;
+ return NO_ERROR;
+}
+
+status_t MediaPlayer2AudioOutput::enableAudioDeviceCallback(bool enabled) {
+ ALOGV("enableAudioDeviceCallback, %d", enabled);
+ Mutex::Autolock lock(mLock);
+ mDeviceCallbackEnabled = enabled;
+ if (mTrack != 0) {
+ status_t status;
+ if (enabled) {
+ status = mTrack->addAudioDeviceCallback(mDeviceCallback.promote());
+ } else {
+ status = mTrack->removeAudioDeviceCallback(mDeviceCallback.promote());
+ }
+ return status;
+ }
+ return NO_ERROR;
+}
+
+// static
+void MediaPlayer2AudioOutput::CallbackWrapper(
+ int event, void *cookie, void *info) {
+ //ALOGV("callbackwrapper");
+ CallbackData *data = (CallbackData*)cookie;
+ // lock to ensure we aren't caught in the middle of a track switch.
+ data->lock();
+ MediaPlayer2AudioOutput *me = data->getOutput();
+ AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
+ if (me == NULL) {
+ // no output set, likely because the track was scheduled to be reused
+ // by another player, but the format turned out to be incompatible.
+ data->unlock();
+ if (buffer != NULL) {
+ buffer->size = 0;
+ }
+ return;
+ }
+
+ switch(event) {
+ case AudioTrack::EVENT_MORE_DATA: {
+ size_t actualSize = (*me->mCallback)(
+ me, buffer->raw, buffer->size, me->mCallbackCookie,
+ CB_EVENT_FILL_BUFFER);
+
+ // Log when no data is returned from the callback.
+ // (1) We may have no data (especially with network streaming sources).
+ // (2) We may have reached the EOS and the audio track is not stopped yet.
+ // Note that AwesomePlayer/AudioPlayer will only return zero size when it reaches the EOS.
+ // NuPlayer2Renderer will return zero when it doesn't have data (it doesn't block to fill).
+ //
+ // This is a benign busy-wait, with the next data request generated 10 ms or more later;
+ // nevertheless for power reasons, we don't want to see too many of these.
+
+ ALOGV_IF(actualSize == 0 && buffer->size > 0, "callbackwrapper: empty buffer returned");
+
+ buffer->size = actualSize;
+ } break;
+
+ case AudioTrack::EVENT_STREAM_END:
+ // currently only occurs for offloaded callbacks
+ ALOGV("callbackwrapper: deliver EVENT_STREAM_END");
+ (*me->mCallback)(me, NULL /* buffer */, 0 /* size */,
+ me->mCallbackCookie, CB_EVENT_STREAM_END);
+ break;
+
+ case AudioTrack::EVENT_NEW_IAUDIOTRACK :
+ ALOGV("callbackwrapper: deliver EVENT_TEAR_DOWN");
+ (*me->mCallback)(me, NULL /* buffer */, 0 /* size */,
+ me->mCallbackCookie, CB_EVENT_TEAR_DOWN);
+ break;
+
+ case AudioTrack::EVENT_UNDERRUN:
+ // This occurs when there is no data available, typically
+ // when there is a failure to supply data to the AudioTrack. It can also
+ // occur in non-offloaded mode when the audio device comes out of standby.
+ //
+ // If an AudioTrack underruns it outputs silence. Since this happens suddenly
+ // it may sound like an audible pop or glitch.
+ //
+ // The underrun event is sent once per track underrun; the condition is reset
+ // when more data is sent to the AudioTrack.
+ ALOGD("callbackwrapper: EVENT_UNDERRUN (discarded)");
+ break;
+
+ default:
+ ALOGE("received unknown event type: %d inside CallbackWrapper !", event);
+ }
+
+ data->unlock();
+}
+
+audio_session_t MediaPlayer2AudioOutput::getSessionId() const
+{
+ Mutex::Autolock lock(mLock);
+ return mSessionId;
+}
+
+uint32_t MediaPlayer2AudioOutput::getSampleRate() const
+{
+ Mutex::Autolock lock(mLock);
+ if (mTrack == 0) {
+ return 0;
+ }
+ return mTrack->getSampleRate();
+}
+
+int64_t MediaPlayer2AudioOutput::getBufferDurationInUs() const
+{
+ Mutex::Autolock lock(mLock);
+ if (mTrack == 0) {
+ return 0;
+ }
+ int64_t duration;
+ if (mTrack->getBufferDurationInUs(&duration) != OK) {
+ return 0;
+ }
+ return duration;
+}
+
+} // namespace android
diff --git a/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h b/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h
new file mode 100644
index 0000000..301825b
--- /dev/null
+++ b/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h
@@ -0,0 +1,419 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_JAUDIOTRACK_H
+#define ANDROID_JAUDIOTRACK_H
+
+#include <jni.h>
+#include <media/AudioResamplerPublic.h>
+#include <media/AudioSystem.h>
+#include <media/VolumeShaper.h>
+#include <system/audio.h>
+#include <utils/Errors.h>
+
+#include <media/AudioTimestamp.h> // It has dependency on audio.h/Errors.h, but doesn't
+ // include them in it. Therefore it is included here at last.
+
+namespace android {
+
+class JAudioTrack {
+public:
+
+ /* Events used by AudioTrack callback function (callback_t).
+ * Keep in sync with frameworks/base/media/java/android/media/AudioTrack.java NATIVE_EVENT_*.
+ */
+ enum event_type {
+ EVENT_MORE_DATA = 0, // Request to write more data to buffer.
+ EVENT_NEW_IAUDIOTRACK = 6, // IAudioTrack was re-created, either due to re-routing and
+ // voluntary invalidation by mediaserver, or mediaserver crash.
+ EVENT_STREAM_END = 7, // Sent after all the buffers queued in AF and HW are played
+ // back (after stop is called) for an offloaded track.
+ };
+
+ class Buffer
+ {
+ public:
+ size_t mSize; // input/output in bytes.
+ void* mData; // pointer to the audio data.
+ };
+
+ /* As a convenience, if a callback is supplied, a handler thread
+ * is automatically created with the appropriate priority. This thread
+ * invokes the callback when a new buffer becomes available or various conditions occur.
+ *
+ * Parameters:
+ *
+ * event: type of event notified (see enum AudioTrack::event_type).
+ * user: Pointer to context for use by the callback receiver.
+ * info: Pointer to optional parameter according to event type:
+ * - EVENT_MORE_DATA: pointer to JAudioTrack::Buffer struct. The callback must not
+ * write more bytes than indicated by 'size' field and update 'size' if fewer bytes
+ * are written.
+ * - EVENT_NEW_IAUDIOTRACK: unused.
+ * - EVENT_STREAM_END: unused.
+ */
+
+ typedef void (*callback_t)(int event, void* user, void *info);
+
+ /* Creates an JAudioTrack object for non-offload mode.
+ * Once created, the track needs to be started before it can be used.
+ * Unspecified values are set to appropriate default values.
+ *
+ * Parameters:
+ *
+ * streamType: Select the type of audio stream this track is attached to
+ * (e.g. AUDIO_STREAM_MUSIC).
+ * sampleRate: Data source sampling rate in Hz. Zero means to use the sink sample rate.
+ * A non-zero value must be specified if AUDIO_OUTPUT_FLAG_DIRECT is set.
+ * 0 will not work with current policy implementation for direct output
+ * selection where an exact match is needed for sampling rate.
+ * (TODO: Check direct output after flags can be used in Java AudioTrack.)
+ * format: Audio format. For mixed tracks, any PCM format supported by server is OK.
+ * For direct and offloaded tracks, the possible format(s) depends on the
+ * output sink.
+ * (TODO: How can we check whether a format is supported?)
+ * channelMask: Channel mask, such that audio_is_output_channel(channelMask) is true.
+ * cbf: Callback function. If not null, this function is called periodically
+ * to provide new data and inform of marker, position updates, etc.
+ * user: Context for use by the callback receiver.
+ * frameCount: Minimum size of track PCM buffer in frames. This defines the
+ * application's contribution to the latency of the track.
+ * The actual size selected by the JAudioTrack could be larger if the
+ * requested size is not compatible with current audio HAL configuration.
+ * Zero means to use a default value.
+ * sessionId: Specific session ID, or zero to use default.
+ * pAttributes: If not NULL, supersedes streamType for use case selection.
+ * maxRequiredSpeed: For PCM tracks, this creates an appropriate buffer size that will allow
+ * maxRequiredSpeed playback. Values less than 1.0f and greater than
+ * AUDIO_TIMESTRETCH_SPEED_MAX will be clamped. For non-PCM tracks
+ * and direct or offloaded tracks, this parameter is ignored.
+ * (TODO: Handle this after offload / direct track is supported.)
+ *
+ * TODO: Revive removed arguments after offload mode is supported.
+ */
+ JAudioTrack(audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ callback_t cbf,
+ void* user,
+ size_t frameCount = 0,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+ const audio_attributes_t* pAttributes = NULL,
+ float maxRequiredSpeed = 1.0f);
+
+ /*
+ // Q. May be used in AudioTrack.setPreferredDevice(AudioDeviceInfo)?
+ audio_port_handle_t selectedDeviceId,
+
+ // TODO: No place to use these values.
+ int32_t notificationFrames,
+ const audio_offload_info_t *offloadInfo,
+ */
+
+ virtual ~JAudioTrack();
+
+ size_t frameCount();
+ size_t channelCount();
+
+ /* Returns this track's estimated latency in milliseconds.
+ * This includes the latency due to AudioTrack buffer size, AudioMixer (if any)
+ * and audio hardware driver.
+ */
+ uint32_t latency();
+
+ /* Return the total number of frames played since playback start.
+ * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz.
+ * It is reset to zero by flush(), reload(), and stop().
+ *
+ * Parameters:
+ *
+ * position: Address where to return play head position.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - BAD_VALUE: position is NULL
+ */
+ status_t getPosition(uint32_t *position);
+
+ // TODO: Does this comment apply same to Java AudioTrack::getTimestamp?
+ // Changed the return type from status_t to bool, since Java AudioTrack::getTimestamp returns
+ // boolean. Will Java getTimestampWithStatus() be public?
+ /* Poll for a timestamp on demand.
+ * Use if EVENT_NEW_TIMESTAMP is not delivered often enough for your needs,
+ * or if you need to get the most recent timestamp outside of the event callback handler.
+ * Caution: calling this method too often may be inefficient;
+ * if you need a high resolution mapping between frame position and presentation time,
+ * consider implementing that at application level, based on the low resolution timestamps.
+ * Returns true if timestamp is valid.
+ * The timestamp parameter is undefined on return, if false is returned.
+ */
+ bool getTimestamp(AudioTimestamp& timestamp);
+
+ // TODO: This doc is just copied from AudioTrack.h. Revise it after implemenation.
+ /* Return the extended timestamp, with additional timebase info and improved drain behavior.
+ *
+ * This is similar to the AudioTrack.java API:
+ * getTimestamp(@NonNull AudioTimestamp timestamp, @AudioTimestamp.Timebase int timebase)
+ *
+ * Some differences between this method and the getTimestamp(AudioTimestamp& timestamp) method
+ *
+ * 1. stop() by itself does not reset the frame position.
+ * A following start() resets the frame position to 0.
+ * 2. flush() by itself does not reset the frame position.
+ * The frame position advances by the number of frames flushed,
+ * when the first frame after flush reaches the audio sink.
+ * 3. BOOTTIME clock offsets are provided to help synchronize with
+ * non-audio streams, e.g. sensor data.
+ * 4. Position is returned with 64 bits of resolution.
+ *
+ * Parameters:
+ * timestamp: A pointer to the caller allocated ExtendedTimestamp.
+ *
+ * Returns NO_ERROR on success; timestamp is filled with valid data.
+ * BAD_VALUE if timestamp is NULL.
+ * WOULD_BLOCK if called immediately after start() when the number
+ * of frames consumed is less than the
+ * overall hardware latency to physical output. In WOULD_BLOCK cases,
+ * one might poll again, or use getPosition(), or use 0 position and
+ * current time for the timestamp.
+ * If WOULD_BLOCK is returned, the timestamp is still
+ * modified with the LOCATION_CLIENT portion filled.
+ * DEAD_OBJECT if AudioFlinger dies or the output device changes and
+ * the track cannot be automatically restored.
+ * The application needs to recreate the AudioTrack
+ * because the audio device changed or AudioFlinger died.
+ * This typically occurs for direct or offloaded tracks
+ * or if mDoNotReconnect is true.
+ * INVALID_OPERATION if called on a offloaded or direct track.
+ * Use getTimestamp(AudioTimestamp& timestamp) instead.
+ */
+ status_t getTimestamp(ExtendedTimestamp *timestamp);
+
+ /* Set source playback rate for timestretch
+ * 1.0 is normal speed: < 1.0 is slower, > 1.0 is faster
+ * 1.0 is normal pitch: < 1.0 is lower pitch, > 1.0 is higher pitch
+ *
+ * AUDIO_TIMESTRETCH_SPEED_MIN <= speed <= AUDIO_TIMESTRETCH_SPEED_MAX
+ * AUDIO_TIMESTRETCH_PITCH_MIN <= pitch <= AUDIO_TIMESTRETCH_PITCH_MAX
+ *
+ * Speed increases the playback rate of media, but does not alter pitch.
+ * Pitch increases the "tonal frequency" of media, but does not affect the playback rate.
+ */
+ status_t setPlaybackRate(const AudioPlaybackRate &playbackRate);
+
+ /* Return current playback rate */
+ const AudioPlaybackRate getPlaybackRate();
+
+ /* Sets the volume shaper object */
+ media::VolumeShaper::Status applyVolumeShaper(
+ const sp<media::VolumeShaper::Configuration>& configuration,
+ const sp<media::VolumeShaper::Operation>& operation);
+
+ /* Set the send level for this track. An auxiliary effect should be attached
+ * to the track with attachEffect(). Level must be >= 0.0 and <= 1.0.
+ */
+ status_t setAuxEffectSendLevel(float level);
+
+ /* Attach track auxiliary output to specified effect. Use effectId = 0
+ * to detach track from effect.
+ *
+ * Parameters:
+ *
+ * effectId: effectId obtained from AudioEffect::id().
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - INVALID_OPERATION: The effect is not an auxiliary effect.
+ * - BAD_VALUE: The specified effect ID is invalid.
+ */
+ status_t attachAuxEffect(int effectId);
+
+ /* Set volume for this track, mostly used for games' sound effects
+ * left and right volumes. Levels must be >= 0.0 and <= 1.0.
+ * This is the older API. New applications should use setVolume(float) when possible.
+ */
+ status_t setVolume(float left, float right);
+
+ /* Set volume for all channels. This is the preferred API for new applications,
+ * especially for multi-channel content.
+ */
+ status_t setVolume(float volume);
+
+ // TODO: Does this comment equally apply to the Java AudioTrack::play()?
+ /* After it's created the track is not active. Call start() to
+ * make it active. If set, the callback will start being called.
+ * If the track was previously paused, volume is ramped up over the first mix buffer.
+ */
+ status_t start();
+
+ // TODO: Does this comment still applies? It seems not. (obtainBuffer, AudioFlinger, ...)
+ /* As a convenience we provide a write() interface to the audio buffer.
+ * Input parameter 'size' is in byte units.
+ * This is implemented on top of obtainBuffer/releaseBuffer. For best
+ * performance use callbacks. Returns actual number of bytes written >= 0,
+ * or one of the following negative status codes:
+ * INVALID_OPERATION AudioTrack is configured for static buffer or streaming mode
+ * BAD_VALUE size is invalid
+ * WOULD_BLOCK when obtainBuffer() returns same, or
+ * AudioTrack was stopped during the write
+ * DEAD_OBJECT when AudioFlinger dies or the output device changes and
+ * the track cannot be automatically restored.
+ * The application needs to recreate the AudioTrack
+ * because the audio device changed or AudioFlinger died.
+ * This typically occurs for direct or offload tracks
+ * or if mDoNotReconnect is true.
+ * or any other error code returned by IAudioTrack::start() or restoreTrack_l().
+ * Default behavior is to only return when all data has been transferred. Set 'blocking' to
+ * false for the method to return immediately without waiting to try multiple times to write
+ * the full content of the buffer.
+ */
+ ssize_t write(const void* buffer, size_t size, bool blocking = true);
+
+ // TODO: Does this comment equally apply to the Java AudioTrack::stop()?
+ /* Stop a track.
+ * In static buffer mode, the track is stopped immediately.
+ * In streaming mode, the callback will cease being called. Note that obtainBuffer() still
+ * works and will fill up buffers until the pool is exhausted, and then will return WOULD_BLOCK.
+ * In streaming mode the stop does not occur immediately: any data remaining in the buffer
+ * is first drained, mixed, and output, and only then is the track marked as stopped.
+ */
+ void stop();
+ bool stopped() const;
+
+ // TODO: Does this comment equally apply to the Java AudioTrack::flush()?
+ /* Flush a stopped or paused track. All previously buffered data is discarded immediately.
+ * This has the effect of draining the buffers without mixing or output.
+ * Flush is intended for streaming mode, for example before switching to non-contiguous content.
+ * This function is a no-op if the track is not stopped or paused, or uses a static buffer.
+ */
+ void flush();
+
+ // TODO: Does this comment equally apply to the Java AudioTrack::pause()?
+ // At least we are not using obtainBuffer.
+ /* Pause a track. After pause, the callback will cease being called and
+ * obtainBuffer returns WOULD_BLOCK. Note that obtainBuffer() still works
+ * and will fill up buffers until the pool is exhausted.
+ * Volume is ramped down over the next mix buffer following the pause request,
+ * and then the track is marked as paused. It can be resumed with ramp up by start().
+ */
+ void pause();
+
+ bool isPlaying() const;
+
+ /* Return current source sample rate in Hz.
+ * If specified as zero in constructor, this will be the sink sample rate.
+ */
+ uint32_t getSampleRate();
+
+ /* Returns the buffer duration in microseconds at current playback rate. */
+ status_t getBufferDurationInUs(int64_t *duration);
+
+ audio_format_t format();
+
+ /*
+ * Dumps the state of an audio track.
+ * Not a general-purpose API; intended only for use by media player service to dump its tracks.
+ */
+ status_t dump(int fd, const Vector<String16>& args) const;
+
+ /* Returns the ID of the audio device actually used by the output to which this AudioTrack is
+ * attached. When the AudioTrack is inactive, it will return AUDIO_PORT_HANDLE_NONE.
+ */
+ audio_port_handle_t getRoutedDeviceId();
+
+ /* Returns the ID of the audio session this AudioTrack belongs to. */
+ audio_session_t getAudioSessionId();
+
+ /* Selects the audio device to use for output of this AudioTrack. A value of
+ * AUDIO_PORT_HANDLE_NONE indicates default routing.
+ *
+ * Parameters:
+ * The device ID of the selected device (as returned by the AudioDevicesManager API).
+ *
+ * Returned value:
+ * - NO_ERROR: successful operation
+ * - BAD_VALUE: failed to find the valid output device with given device Id.
+ */
+ status_t setOutputDevice(audio_port_handle_t deviceId);
+
+ // TODO: Add AUDIO_OUTPUT_FLAG_DIRECT when it is possible to check.
+ // TODO: Add AUDIO_FLAG_HW_AV_SYNC when it is possible to check.
+ /* Returns the flags */
+ audio_output_flags_t getFlags() const { return mFlags; }
+
+ /* Obtain the pending duration in milliseconds for playback of pure PCM data remaining in
+ * AudioTrack.
+ *
+ * Returns NO_ERROR if successful.
+ * INVALID_OPERATION if the AudioTrack does not contain pure PCM data.
+ * BAD_VALUE if msec is nullptr.
+ */
+ status_t pendingDuration(int32_t *msec);
+
+ /* Adds an AudioDeviceCallback. The caller will be notified when the audio device to which this
+ * AudioTrack is routed is updated.
+ * Replaces any previously installed callback.
+ *
+ * Parameters:
+ *
+ * callback: The callback interface
+ *
+ * Returns NO_ERROR if successful.
+ * INVALID_OPERATION if the same callback is already installed.
+ * NO_INIT or PREMISSION_DENIED if AudioFlinger service is not reachable
+ * BAD_VALUE if the callback is NULL
+ */
+ status_t addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
+
+ /* Removes an AudioDeviceCallback.
+ *
+ * Parameters:
+ *
+ * callback: The callback interface
+ *
+ * Returns NO_ERROR if successful.
+ * INVALID_OPERATION if the callback is not installed
+ * BAD_VALUE if the callback is NULL
+ */
+ status_t removeAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
+
+private:
+ audio_output_flags_t mFlags;
+
+ jclass mAudioTrackCls;
+ jobject mAudioTrackObj;
+
+ /* Creates a Java VolumeShaper.Configuration object from VolumeShaper::Configuration */
+ jobject createVolumeShaperConfigurationObj(
+ const sp<media::VolumeShaper::Configuration>& config);
+
+ /* Creates a Java VolumeShaper.Operation object from VolumeShaper::Operation */
+ jobject createVolumeShaperOperationObj(
+ const sp<media::VolumeShaper::Operation>& operation);
+
+ /* Creates a Java StreamEventCallback object */
+ jobject createStreamEventCallback(callback_t cbf, void* user);
+
+ /* Creates a Java Executor object for running a callback */
+ jobject createCallbackExecutor();
+
+ status_t javaToNativeStatus(int javaStatus);
+};
+
+}; // namespace android
+
+#endif // ANDROID_JAUDIOTRACK_H
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h
new file mode 100644
index 0000000..5d5b8e4
--- /dev/null
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h
@@ -0,0 +1,191 @@
+/*
+**
+** Copyright 2018, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_MEDIAPLAYER2AUDIOOUTPUT_H
+#define ANDROID_MEDIAPLAYER2AUDIOOUTPUT_H
+
+#include <mediaplayer2/MediaPlayer2Interface.h>
+
+#include <utils/String16.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+class AudioTrack;
+
+class MediaPlayer2AudioOutput : public MediaPlayer2Interface::AudioSink
+{
+ class CallbackData;
+
+public:
+ MediaPlayer2AudioOutput(audio_session_t sessionId,
+ uid_t uid,
+ int pid,
+ const audio_attributes_t * attr,
+ const sp<AudioSystem::AudioDeviceCallback>& deviceCallback);
+ virtual ~MediaPlayer2AudioOutput();
+
+ virtual bool ready() const {
+ return mTrack != 0;
+ }
+ virtual ssize_t bufferSize() const;
+ virtual ssize_t frameCount() const;
+ virtual ssize_t channelCount() const;
+ virtual ssize_t frameSize() const;
+ virtual uint32_t latency() const;
+ virtual float msecsPerFrame() const;
+ virtual status_t getPosition(uint32_t *position) const;
+ virtual status_t getTimestamp(AudioTimestamp &ts) const;
+ virtual int64_t getPlayedOutDurationUs(int64_t nowUs) const;
+ virtual status_t getFramesWritten(uint32_t *frameswritten) const;
+ virtual audio_session_t getSessionId() const;
+ virtual uint32_t getSampleRate() const;
+ virtual int64_t getBufferDurationInUs() const;
+
+ virtual status_t open(
+ uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
+ audio_format_t format, int bufferCount,
+ AudioCallback cb, void *cookie,
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+ const audio_offload_info_t *offloadInfo = NULL,
+ bool doNotReconnect = false,
+ uint32_t suggestedFrameCount = 0);
+
+ virtual status_t start();
+ virtual ssize_t write(const void* buffer, size_t size, bool blocking = true);
+ virtual void stop();
+ virtual void flush();
+ virtual void pause();
+ virtual void close();
+ void setAudioStreamType(audio_stream_type_t streamType);
+ virtual audio_stream_type_t getAudioStreamType() const {
+ return mStreamType;
+ }
+ void setAudioAttributes(const audio_attributes_t * attributes);
+
+ void setVolume(float left, float right);
+ virtual status_t setPlaybackRate(const AudioPlaybackRate& rate);
+ virtual status_t getPlaybackRate(AudioPlaybackRate* rate /* nonnull */);
+
+ status_t setAuxEffectSendLevel(float level);
+ status_t attachAuxEffect(int effectId);
+ virtual status_t dump(int fd, const Vector<String16>& args) const;
+
+ static bool isOnEmulator();
+ static int getMinBufferCount();
+ virtual bool needsTrailingPadding() {
+ return true;
+ // TODO: return correct value.
+ //return mNextOutput == NULL;
+ }
+ virtual status_t setParameters(const String8& keyValuePairs);
+ virtual String8 getParameters(const String8& keys);
+
+ // AudioRouting
+ virtual status_t setOutputDevice(audio_port_handle_t deviceId);
+ virtual status_t getRoutedDeviceId(audio_port_handle_t* deviceId);
+ virtual status_t enableAudioDeviceCallback(bool enabled);
+
+private:
+ static void setMinBufferCount();
+ static void CallbackWrapper(int event, void *me, void *info);
+ void deleteRecycledTrack_l();
+ void close_l();
+ status_t updateTrack_l();
+
+ sp<AudioTrack> mTrack;
+ AudioCallback mCallback;
+ void * mCallbackCookie;
+ CallbackData * mCallbackData;
+ audio_stream_type_t mStreamType;
+ audio_attributes_t * mAttributes;
+ float mLeftVolume;
+ float mRightVolume;
+ AudioPlaybackRate mPlaybackRate;
+ uint32_t mSampleRateHz; // sample rate of the content, as set in open()
+ float mMsecsPerFrame;
+ size_t mFrameSize;
+ audio_session_t mSessionId;
+ uid_t mUid;
+ int mPid;
+ float mSendLevel;
+ int mAuxEffectId;
+ audio_output_flags_t mFlags;
+ audio_port_handle_t mSelectedDeviceId;
+ audio_port_handle_t mRoutedDeviceId;
+ bool mDeviceCallbackEnabled;
+ wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
+ mutable Mutex mLock;
+
+ // static variables below not protected by mutex
+ static bool mIsOnEmulator;
+ static int mMinBufferCount; // 12 for emulator; otherwise 4
+
+ // CallbackData is what is passed to the AudioTrack as the "user" data.
+ // We need to be able to target this to a different Output on the fly,
+ // so we can't use the Output itself for this.
+ class CallbackData {
+ friend MediaPlayer2AudioOutput;
+ public:
+ explicit CallbackData(MediaPlayer2AudioOutput *cookie) {
+ mData = cookie;
+ mSwitching = false;
+ }
+ MediaPlayer2AudioOutput *getOutput() const {
+ return mData;
+ }
+ void setOutput(MediaPlayer2AudioOutput* newcookie) {
+ mData = newcookie;
+ }
+ // lock/unlock are used by the callback before accessing the payload of this object
+ void lock() const {
+ mLock.lock();
+ }
+ void unlock() const {
+ mLock.unlock();
+ }
+
+ // tryBeginTrackSwitch/endTrackSwitch are used when the CallbackData is handed over
+ // to the next sink.
+
+ // tryBeginTrackSwitch() returns true only if it obtains the lock.
+ bool tryBeginTrackSwitch() {
+ LOG_ALWAYS_FATAL_IF(mSwitching, "tryBeginTrackSwitch() already called");
+ if (mLock.tryLock() != OK) {
+ return false;
+ }
+ mSwitching = true;
+ return true;
+ }
+ void endTrackSwitch() {
+ if (mSwitching) {
+ mLock.unlock();
+ }
+ mSwitching = false;
+ }
+
+ private:
+ MediaPlayer2AudioOutput *mData;
+ mutable Mutex mLock; // a recursive mutex might make this unnecessary.
+ bool mSwitching;
+ DISALLOW_EVIL_CONSTRUCTORS(CallbackData);
+ };
+};
+
+}; // namespace android
+
+#endif // ANDROID_MEDIAPLAYER2AUDIOOUTPUT_H
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
new file mode 100644
index 0000000..02bf891
--- /dev/null
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
@@ -0,0 +1,280 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIAPLAYER2INTERFACE_H
+#define ANDROID_MEDIAPLAYER2INTERFACE_H
+
+#ifdef __cplusplus
+
+#include <sys/types.h>
+#include <utils/Errors.h>
+#include <utils/String8.h>
+#include <utils/RefBase.h>
+
+#include <media/AVSyncSettings.h>
+#include <media/AudioResamplerPublic.h>
+#include <media/AudioSystem.h>
+#include <media/AudioTimestamp.h>
+#include <media/BufferingSettings.h>
+#include <media/Metadata.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <mediaplayer2/MediaPlayer2Types.h>
+
+// Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
+// global, and not in android::
+struct sockaddr_in;
+
+namespace android {
+
+struct DataSourceDesc;
+class Parcel;
+struct ANativeWindowWrapper;
+
+#define DEFAULT_AUDIOSINK_BUFFERCOUNT 4
+#define DEFAULT_AUDIOSINK_BUFFERSIZE 1200
+#define DEFAULT_AUDIOSINK_SAMPLERATE 44100
+
+// when the channel mask isn't known, use the channel count to derive a mask in AudioSink::open()
+#define CHANNEL_MASK_USE_CHANNEL_ORDER 0
+
+// duration below which we do not allow deep audio buffering
+#define AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US 5000000
+
+class MediaPlayer2InterfaceListener: public RefBase
+{
+public:
+ virtual void notify(int64_t srcId, int msg, int ext1, int ext2, const Parcel *obj) = 0;
+};
+
+class MediaPlayer2Interface : public AHandler {
+public:
+ // AudioSink: abstraction layer for audio output
+ class AudioSink : public RefBase {
+ public:
+ enum cb_event_t {
+ CB_EVENT_FILL_BUFFER, // Request to write more data to buffer.
+ CB_EVENT_STREAM_END, // Sent after all the buffers queued in AF and HW are played
+ // back (after stop is called)
+ CB_EVENT_TEAR_DOWN // The AudioTrack was invalidated due to use case change:
+ // Need to re-evaluate offloading options
+ };
+
+ // Callback returns the number of bytes actually written to the buffer.
+ typedef size_t (*AudioCallback)(
+ AudioSink *audioSink, void *buffer, size_t size, void *cookie, cb_event_t event);
+
+ virtual ~AudioSink() {}
+ virtual bool ready() const = 0; // audio output is open and ready
+ virtual ssize_t bufferSize() const = 0;
+ virtual ssize_t frameCount() const = 0;
+ virtual ssize_t channelCount() const = 0;
+ virtual ssize_t frameSize() const = 0;
+ virtual uint32_t latency() const = 0;
+ virtual float msecsPerFrame() const = 0;
+ virtual status_t getPosition(uint32_t *position) const = 0;
+ virtual status_t getTimestamp(AudioTimestamp &ts) const = 0;
+ virtual int64_t getPlayedOutDurationUs(int64_t nowUs) const = 0;
+ virtual status_t getFramesWritten(uint32_t *frameswritten) const = 0;
+ virtual audio_session_t getSessionId() const = 0;
+ virtual audio_stream_type_t getAudioStreamType() const = 0;
+ virtual uint32_t getSampleRate() const = 0;
+ virtual int64_t getBufferDurationInUs() const = 0;
+
+ // If no callback is specified, use the "write" API below to submit
+ // audio data.
+ virtual status_t open(
+ uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
+ audio_format_t format=AUDIO_FORMAT_PCM_16_BIT,
+ int bufferCount=DEFAULT_AUDIOSINK_BUFFERCOUNT,
+ AudioCallback cb = NULL,
+ void *cookie = NULL,
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+ const audio_offload_info_t *offloadInfo = NULL,
+ bool doNotReconnect = false,
+ uint32_t suggestedFrameCount = 0) = 0;
+
+ virtual status_t start() = 0;
+
+ /* Input parameter |size| is in byte units stored in |buffer|.
+ * Data is copied over and actual number of bytes written (>= 0)
+ * is returned, or no data is copied and a negative status code
+ * is returned (even when |blocking| is true).
+ * When |blocking| is false, AudioSink will immediately return after
+ * part of or full |buffer| is copied over.
+ * When |blocking| is true, AudioSink will wait to copy the entire
+ * buffer, unless an error occurs or the copy operation is
+ * prematurely stopped.
+ */
+ virtual ssize_t write(const void* buffer, size_t size, bool blocking = true) = 0;
+
+ virtual void stop() = 0;
+ virtual void flush() = 0;
+ virtual void pause() = 0;
+ virtual void close() = 0;
+
+ virtual status_t setPlaybackRate(const AudioPlaybackRate& rate) = 0;
+ virtual status_t getPlaybackRate(AudioPlaybackRate* rate /* nonnull */) = 0;
+ virtual bool needsTrailingPadding() {
+ return true;
+ }
+
+ virtual status_t setParameters(const String8& /* keyValuePairs */) {
+ return NO_ERROR;
+ }
+ virtual String8 getParameters(const String8& /* keys */) {
+ return String8::empty();
+ }
+
+ // AudioRouting
+ virtual status_t setOutputDevice(audio_port_handle_t deviceId);
+ virtual status_t getRoutedDeviceId(audio_port_handle_t* deviceId);
+ virtual status_t enableAudioDeviceCallback(bool enabled);
+ };
+
+ MediaPlayer2Interface() : mListener(NULL) { }
+ virtual ~MediaPlayer2Interface() { }
+ virtual status_t initCheck() = 0;
+
+ virtual void setAudioSink(const sp<AudioSink>& audioSink) {
+ mAudioSink = audioSink;
+ }
+
+ virtual status_t setDataSource(const sp<DataSourceDesc> &dsd) = 0;
+
+ virtual status_t prepareNextDataSource(const sp<DataSourceDesc> &dsd) = 0;
+
+ virtual status_t playNextDataSource(int64_t srcId) = 0;
+
+ // pass the buffered native window to the media player service
+ virtual status_t setVideoSurfaceTexture(const sp<ANativeWindowWrapper>& nww) = 0;
+
+ virtual status_t getBufferingSettings(BufferingSettings* buffering /* nonnull */) {
+ *buffering = BufferingSettings();
+ return OK;
+ }
+ virtual status_t setBufferingSettings(const BufferingSettings& /* buffering */) {
+ return OK;
+ }
+
+ virtual status_t prepareAsync() = 0;
+ virtual status_t start() = 0;
+ virtual status_t stop() = 0;
+ virtual status_t pause() = 0;
+ virtual bool isPlaying() = 0;
+ virtual status_t setPlaybackSettings(const AudioPlaybackRate& rate) {
+ // by default, players only support setting rate to the default
+ if (!isAudioPlaybackRateEqual(rate, AUDIO_PLAYBACK_RATE_DEFAULT)) {
+ return BAD_VALUE;
+ }
+ return OK;
+ }
+ virtual status_t getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */) {
+ *rate = AUDIO_PLAYBACK_RATE_DEFAULT;
+ return OK;
+ }
+ virtual status_t setSyncSettings(const AVSyncSettings& sync, float /* videoFps */) {
+ // By default, players only support setting sync source to default; all other sync
+ // settings are ignored. There is no requirement for getters to return set values.
+ if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
+ return BAD_VALUE;
+ }
+ return OK;
+ }
+ virtual status_t getSyncSettings(
+ AVSyncSettings* sync /* nonnull */, float* videoFps /* nonnull */) {
+ *sync = AVSyncSettings();
+ *videoFps = -1.f;
+ return OK;
+ }
+ virtual status_t seekTo(
+ int64_t msec, MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC) = 0;
+ virtual status_t getCurrentPosition(int64_t *msec) = 0;
+ virtual status_t getDuration(int64_t *msec) = 0;
+ virtual status_t reset() = 0;
+ virtual status_t notifyAt(int64_t /* mediaTimeUs */) {
+ return INVALID_OPERATION;
+ }
+ virtual status_t setLooping(int loop) = 0;
+ virtual status_t setParameter(int key, const Parcel &request) = 0;
+ virtual status_t getParameter(int key, Parcel *reply) = 0;
+
+ // Invoke a generic method on the player by using opaque parcels
+ // for the request and reply.
+ //
+ // @param request Parcel that is positioned at the start of the
+ // data sent by the java layer.
+ // @param[out] reply Parcel to hold the reply data. Cannot be null.
+ // @return OK if the call was successful.
+ virtual status_t invoke(const Parcel& request, Parcel *reply) = 0;
+
+ // The Client in the MetadataPlayerService calls this method on
+ // the native player to retrieve all or a subset of metadata.
+ //
+ // @param ids SortedList of metadata ID to be fetch. If empty, all
+ // the known metadata should be returned.
+ // @param[inout] records Parcel where the player appends its metadata.
+ // @return OK if the call was successful.
+ virtual status_t getMetadata(const media::Metadata::Filter& /* ids */,
+ Parcel* /* records */) {
+ return INVALID_OPERATION;
+ };
+
+ void setListener(const sp<MediaPlayer2InterfaceListener> &listener) {
+ Mutex::Autolock autoLock(mListenerLock);
+ mListener = listener;
+ }
+
+ void sendEvent(int64_t srcId, int msg, int ext1=0, int ext2=0, const Parcel *obj=NULL) {
+ sp<MediaPlayer2InterfaceListener> listener;
+ {
+ Mutex::Autolock autoLock(mListenerLock);
+ listener = mListener;
+ }
+
+ if (listener) {
+ listener->notify(srcId, msg, ext1, ext2, obj);
+ }
+ }
+
+ virtual status_t dump(int /* fd */, const Vector<String16>& /* args */) const {
+ return INVALID_OPERATION;
+ }
+
+ virtual void onMessageReceived(const sp<AMessage> & /* msg */) override { }
+
+ // Modular DRM
+ virtual status_t prepareDrm(const uint8_t /* uuid */[16],
+ const Vector<uint8_t>& /* drmSessionId */) {
+ return INVALID_OPERATION;
+ }
+ virtual status_t releaseDrm() {
+ return INVALID_OPERATION;
+ }
+
+protected:
+ sp<AudioSink> mAudioSink;
+
+private:
+ Mutex mListenerLock;
+ sp<MediaPlayer2InterfaceListener> mListener;
+};
+
+}; // namespace android
+
+#endif // __cplusplus
+
+
+#endif // ANDROID_MEDIAPLAYER2INTERFACE_H
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
new file mode 100644
index 0000000..3905b55
--- /dev/null
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIAPLAYER2_TYPES_H
+#define ANDROID_MEDIAPLAYER2_TYPES_H
+
+#include <media/mediaplayer_common.h>
+
+#include <media/MediaSource.h>
+
+namespace android {
+
+typedef MediaSource::ReadOptions::SeekMode MediaPlayer2SeekMode;
+
+enum media2_event_type {
+ MEDIA2_NOP = 0, // interface test message
+ MEDIA2_PREPARED = 1,
+ MEDIA2_PLAYBACK_COMPLETE = 2,
+ MEDIA2_BUFFERING_UPDATE = 3,
+ MEDIA2_SEEK_COMPLETE = 4,
+ MEDIA2_SET_VIDEO_SIZE = 5,
+ MEDIA2_STARTED = 6,
+ MEDIA2_PAUSED = 7,
+ MEDIA2_STOPPED = 8,
+ MEDIA2_SKIPPED = 9,
+ MEDIA2_NOTIFY_TIME = 98,
+ MEDIA2_TIMED_TEXT = 99,
+ MEDIA2_ERROR = 100,
+ MEDIA2_INFO = 200,
+ MEDIA2_SUBTITLE_DATA = 201,
+ MEDIA2_META_DATA = 202,
+ MEDIA2_DRM_INFO = 210,
+ MEDIA2_AUDIO_ROUTING_CHANGED = 10000,
+};
+
+// Generic error codes for the media player framework. Errors are fatal, the
+// playback must abort.
+//
+// Errors are communicated back to the client using the
+// MediaPlayer2Listener::notify method defined below.
+// In this situation, 'notify' is invoked with the following:
+// 'msg' is set to MEDIA_ERROR.
+// 'ext1' should be a value from the enum media2_error_type.
+// 'ext2' contains an implementation dependant error code to provide
+// more details. Should default to 0 when not used.
+//
+// The codes are distributed as follow:
+// 0xx: Reserved
+// 1xx: Android Player errors. Something went wrong inside the MediaPlayer2.
+// 2xx: Media errors (e.g Codec not supported). There is a problem with the
+// media itself.
+// 3xx: Runtime errors. Some extraordinary condition arose making the playback
+// impossible.
+//
+enum media2_error_type {
+ // 0xx
+ MEDIA2_ERROR_UNKNOWN = 1,
+ // 1xx
+ // MEDIA2_ERROR_SERVER_DIED = 100,
+ // 2xx
+ MEDIA2_ERROR_NOT_VALID_FOR_PROGRESSIVE_PLAYBACK = 200,
+ // 3xx
+ MEDIA2_ERROR_FAILED_TO_SET_DATA_SOURCE = 300,
+};
+
+
+// Info and warning codes for the media player framework. These are non fatal,
+// the playback is going on but there might be some user visible issues.
+//
+// Info and warning messages are communicated back to the client using the
+// MediaPlayer2Listener::notify method defined below. In this situation,
+// 'notify' is invoked with the following:
+// 'msg' is set to MEDIA_INFO.
+// 'ext1' should be a value from the enum media2_info_type.
+// 'ext2' contains an implementation dependant info code to provide
+// more details. Should default to 0 when not used.
+//
+// The codes are distributed as follow:
+// 0xx: Reserved
+// 7xx: Android Player info/warning (e.g player lagging behind.)
+// 8xx: Media info/warning (e.g media badly interleaved.)
+//
+enum media2_info_type {
+ // 0xx
+ MEDIA2_INFO_UNKNOWN = 1,
+ // The player was started because it was used as the next player for another
+ // player, which just completed playback
+ MEDIA2_INFO_STARTED_AS_NEXT = 2,
+ // The player just pushed the very first video frame for rendering
+ MEDIA2_INFO_VIDEO_RENDERING_START = 3,
+ // The player just pushed the very first audio frame for rendering
+ MEDIA2_INFO_AUDIO_RENDERING_START = 4,
+ // The player just completed the playback of this data source
+ MEDIA2_INFO_PLAYBACK_COMPLETE = 5,
+ // The player just completed the playback of the full play list
+ MEDIA2_INFO_PLAYLIST_END = 6,
+
+ //1xx
+ // The player just prepared a data source.
+ MEDIA2_INFO_PREPARED = 100,
+ // The player just completed a call play().
+ MEDIA2_INFO_COMPLETE_CALL_PLAY = 101,
+ // The player just completed a call pause().
+ MEDIA2_INFO_COMPLETE_CALL_PAUSE = 102,
+ // The player just completed a call seekTo.
+ MEDIA2_INFO_COMPLETE_CALL_SEEK = 103,
+
+ // 7xx
+ // The video is too complex for the decoder: it can't decode frames fast
+ // enough. Possibly only the audio plays fine at this stage.
+ MEDIA2_INFO_VIDEO_TRACK_LAGGING = 700,
+ // MediaPlayer2 is temporarily pausing playback internally in order to
+ // buffer more data.
+ MEDIA2_INFO_BUFFERING_START = 701,
+ // MediaPlayer2 is resuming playback after filling buffers.
+ MEDIA2_INFO_BUFFERING_END = 702,
+ // Bandwidth in recent past
+ MEDIA2_INFO_NETWORK_BANDWIDTH = 703,
+
+ // 8xx
+ // Bad interleaving means that a media has been improperly interleaved or not
+ // interleaved at all, e.g has all the video samples first then all the audio
+ // ones. Video is playing but a lot of disk seek may be happening.
+ MEDIA2_INFO_BAD_INTERLEAVING = 800,
+ // The media is not seekable (e.g live stream).
+ MEDIA2_INFO_NOT_SEEKABLE = 801,
+ // New media metadata is available.
+ MEDIA2_INFO_METADATA_UPDATE = 802,
+ // Audio can not be played.
+ MEDIA2_INFO_PLAY_AUDIO_ERROR = 804,
+ // Video can not be played.
+ MEDIA2_INFO_PLAY_VIDEO_ERROR = 805,
+
+ //9xx
+ MEDIA2_INFO_TIMED_TEXT_ERROR = 900,
+};
+
+// Do not change these values without updating their counterparts in MediaPlayer2.java
+enum mediaplayer2_states {
+ MEDIAPLAYER2_STATE_IDLE = 1,
+ MEDIAPLAYER2_STATE_PREPARED = 2,
+ MEDIAPLAYER2_STATE_PLAYING = 3,
+ MEDIAPLAYER2_STATE_PAUSED = 4,
+ MEDIAPLAYER2_STATE_ERROR = 5,
+};
+
+enum media_player2_internal_states {
+ MEDIA_PLAYER2_STATE_ERROR = 0,
+ MEDIA_PLAYER2_IDLE = 1 << 0,
+ MEDIA_PLAYER2_INITIALIZED = 1 << 1,
+ MEDIA_PLAYER2_PREPARING = 1 << 2,
+ MEDIA_PLAYER2_PREPARED = 1 << 3,
+ MEDIA_PLAYER2_STARTED = 1 << 4,
+ MEDIA_PLAYER2_PAUSED = 1 << 5,
+ MEDIA_PLAYER2_STOPPED = 1 << 6,
+ MEDIA_PLAYER2_PLAYBACK_COMPLETE = 1 << 7
+};
+
+// Keep KEY_PARAMETER_* in sync with MediaPlayer2.java.
+// The same enum space is used for both set and get, in case there are future keys that
+// can be both set and get. But as of now, all parameters are either set only or get only.
+enum media2_parameter_keys {
+ // Streaming/buffering parameters
+ MEDIA2_KEY_PARAMETER_CACHE_STAT_COLLECT_FREQ_MS = 1100, // set only
+
+ // Return a Parcel containing a single int, which is the channel count of the
+ // audio track, or zero for error (e.g. no audio track) or unknown.
+ MEDIA2_KEY_PARAMETER_AUDIO_CHANNEL_COUNT = 1200, // get only
+
+ // Playback rate expressed in permille (1000 is normal speed), saved as int32_t, with negative
+ // values used for rewinding or reverse playback.
+ MEDIA2_KEY_PARAMETER_PLAYBACK_RATE_PERMILLE = 1300, // set only
+
+ // Set a Parcel containing the value of a parcelled Java AudioAttribute instance
+ MEDIA2_KEY_PARAMETER_AUDIO_ATTRIBUTES = 1400 // set only
+};
+
+// Keep INVOKE_ID_* in sync with MediaPlayer2.java.
+enum media_player2_invoke_ids {
+ MEDIA_PLAYER2_INVOKE_ID_GET_TRACK_INFO = 1,
+ MEDIA_PLAYER2_INVOKE_ID_ADD_EXTERNAL_SOURCE = 2,
+ MEDIA_PLAYER2_INVOKE_ID_ADD_EXTERNAL_SOURCE_FD = 3,
+ MEDIA_PLAYER2_INVOKE_ID_SELECT_TRACK = 4,
+ MEDIA_PLAYER2_INVOKE_ID_UNSELECT_TRACK = 5,
+ MEDIA_PLAYER2_INVOKE_ID_SET_VIDEO_SCALING_MODE = 6,
+ MEDIA_PLAYER2_INVOKE_ID_GET_SELECTED_TRACK = 7
+};
+
+}; // namespace android
+
+#endif // ANDROID_MEDIAPLAYER2_TYPES_H
diff --git a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
new file mode 100644
index 0000000..d586192
--- /dev/null
+++ b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIAPLAYER2_H
+#define ANDROID_MEDIAPLAYER2_H
+
+#include <media/AVSyncSettings.h>
+#include <media/AudioResamplerPublic.h>
+#include <media/BufferingSettings.h>
+#include <media/Metadata.h>
+#include <media/mediaplayer_common.h>
+#include <mediaplayer2/MediaPlayer2Interface.h>
+#include <mediaplayer2/MediaPlayer2Types.h>
+
+#include <utils/Errors.h>
+#include <utils/Mutex.h>
+#include <utils/RefBase.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+#include <system/audio-base.h>
+
+namespace android {
+
+struct ANativeWindowWrapper;
+struct DataSourceDesc;
+class MediaPlayer2AudioOutput;
+
+// ref-counted object for callbacks
+class MediaPlayer2Listener: virtual public RefBase
+{
+public:
+ virtual void notify(int64_t srcId, int msg, int ext1, int ext2, const Parcel *obj) = 0;
+};
+
+class MediaPlayer2 : public MediaPlayer2InterfaceListener
+{
+public:
+ ~MediaPlayer2();
+
+ static sp<MediaPlayer2> Create();
+ static status_t DumpAll(int fd, const Vector<String16>& args);
+
+ void disconnect();
+
+ status_t getSrcId(int64_t *srcId);
+ status_t setDataSource(const sp<DataSourceDesc> &dsd);
+ status_t prepareNextDataSource(const sp<DataSourceDesc> &dsd);
+ status_t playNextDataSource(int64_t srcId);
+ status_t setVideoSurfaceTexture(const sp<ANativeWindowWrapper>& nww);
+ status_t setListener(const sp<MediaPlayer2Listener>& listener);
+ status_t getBufferingSettings(BufferingSettings* buffering /* nonnull */);
+ status_t setBufferingSettings(const BufferingSettings& buffering);
+ status_t prepareAsync();
+ status_t start();
+ status_t stop();
+ status_t pause();
+ bool isPlaying();
+ mediaplayer2_states getMediaPlayer2State();
+ status_t setPlaybackSettings(const AudioPlaybackRate& rate);
+ status_t getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */);
+ status_t setSyncSettings(const AVSyncSettings& sync, float videoFpsHint);
+ status_t getSyncSettings(
+ AVSyncSettings* sync /* nonnull */,
+ float* videoFps /* nonnull */);
+ status_t getVideoWidth(int *w);
+ status_t getVideoHeight(int *h);
+ status_t seekTo(
+ int64_t msec,
+ MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC);
+ status_t notifyAt(int64_t mediaTimeUs);
+ status_t getCurrentPosition(int64_t *msec);
+ status_t getDuration(int64_t *msec);
+ status_t reset();
+ status_t setAudioStreamType(audio_stream_type_t type);
+ status_t getAudioStreamType(audio_stream_type_t *type);
+ status_t setLooping(int loop);
+ bool isLooping();
+ status_t setVolume(float leftVolume, float rightVolume);
+ void notify(int64_t srcId, int msg, int ext1, int ext2,
+ const Parcel *obj = NULL);
+ status_t invoke(const Parcel& request, Parcel *reply);
+ status_t setMetadataFilter(const Parcel& filter);
+ status_t getMetadata(bool update_only, bool apply_filter, Parcel *metadata);
+ status_t setAudioSessionId(audio_session_t sessionId);
+ audio_session_t getAudioSessionId();
+ status_t setAuxEffectSendLevel(float level);
+ status_t attachAuxEffect(int effectId);
+ status_t setParameter(int key, const Parcel& request);
+ status_t getParameter(int key, Parcel* reply);
+
+ // Modular DRM
+ status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId);
+ status_t releaseDrm();
+ // AudioRouting
+ status_t setOutputDevice(audio_port_handle_t deviceId);
+ audio_port_handle_t getRoutedDeviceId();
+ status_t enableAudioDeviceCallback(bool enabled);
+
+ status_t dump(int fd, const Vector<String16>& args);
+
+private:
+ MediaPlayer2();
+ bool init();
+
+ // @param type Of the metadata to be tested.
+ // @return true if the metadata should be dropped according to
+ // the filters.
+ bool shouldDropMetadata(media::Metadata::Type type) const;
+
+ // Add a new element to the set of metadata updated. Noop if
+ // the element exists already.
+ // @param type Of the metadata to be recorded.
+ void addNewMetadataUpdate(media::Metadata::Type type);
+
+ // Disconnect from the currently connected ANativeWindow.
+ void disconnectNativeWindow_l();
+
+ status_t setAudioAttributes_l(const Parcel &request);
+
+ void clear_l();
+ status_t seekTo_l(int64_t msec, MediaPlayer2SeekMode mode);
+ status_t prepareAsync_l();
+ status_t getDuration_l(int64_t *msec);
+ status_t reset_l();
+ status_t checkStateForKeySet_l(int key);
+
+ pid_t mPid;
+ uid_t mUid;
+ sp<MediaPlayer2Interface> mPlayer;
+ sp<MediaPlayer2AudioOutput> mAudioOutput;
+ int64_t mSrcId;
+ thread_id_t mLockThreadId;
+ mutable Mutex mLock;
+ Mutex mNotifyLock;
+ sp<MediaPlayer2Listener> mListener;
+ media_player2_internal_states mCurrentState;
+ int64_t mCurrentPosition;
+ MediaPlayer2SeekMode mCurrentSeekMode;
+ int64_t mSeekPosition;
+ MediaPlayer2SeekMode mSeekMode;
+ audio_stream_type_t mStreamType;
+ Parcel* mAudioAttributesParcel;
+ bool mLoop;
+ float mLeftVolume;
+ float mRightVolume;
+ int mVideoWidth;
+ int mVideoHeight;
+ audio_session_t mAudioSessionId;
+ audio_attributes_t * mAudioAttributes;
+ float mSendLevel;
+
+ sp<ANativeWindowWrapper> mConnectedWindow;
+
+ // Metadata filters.
+ media::Metadata::Filter mMetadataAllow; // protected by mLock
+ media::Metadata::Filter mMetadataDrop; // protected by mLock
+
+ // Metadata updated. For each MEDIA_INFO_METADATA_UPDATE
+ // notification we try to update mMetadataUpdated which is a
+ // set: no duplicate.
+ // getMetadata clears this set.
+ media::Metadata::Filter mMetadataUpdated; // protected by mLock
+};
+
+}; // namespace android
+
+#endif // ANDROID_MEDIAPLAYER2_H
diff --git a/media/libmediaplayer2/mediaplayer2.cpp b/media/libmediaplayer2/mediaplayer2.cpp
new file mode 100644
index 0000000..e5567dc
--- /dev/null
+++ b/media/libmediaplayer2/mediaplayer2.cpp
@@ -0,0 +1,1552 @@
+/*
+**
+** Copyright 2017, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaPlayer2Native"
+
+#include <binder/IServiceManager.h>
+#include <binder/IPCThreadState.h>
+
+#include <media/AudioSystem.h>
+#include <media/DataSourceDesc.h>
+#include <media/MediaAnalyticsItem.h>
+#include <media/MemoryLeakTrackUtil.h>
+#include <media/Metadata.h>
+#include <media/NdkWrapper.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooperRoster.h>
+#include <mediaplayer2/MediaPlayer2AudioOutput.h>
+#include <mediaplayer2/mediaplayer2.h>
+
+#include <utils/Log.h>
+#include <utils/SortedVector.h>
+#include <utils/String8.h>
+
+#include <system/audio.h>
+#include <system/window.h>
+
+#include <nuplayer2/NuPlayer2Driver.h>
+
+#include <dirent.h>
+#include <sys/stat.h>
+
+namespace android {
+
+extern ALooperRoster gLooperRoster;
+
+namespace {
+
+const int kDumpLockRetries = 50;
+const int kDumpLockSleepUs = 20000;
+
+// Max number of entries in the filter.
+const int kMaxFilterSize = 64; // I pulled that out of thin air.
+
+// FIXME: Move all the metadata related function in the Metadata.cpp
+
+// Unmarshall a filter from a Parcel.
+// Filter format in a parcel:
+//
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | number of entries (n) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | metadata type 1 |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | metadata type 2 |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// ....
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | metadata type n |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// @param p Parcel that should start with a filter.
+// @param[out] filter On exit contains the list of metadata type to be
+// filtered.
+// @param[out] status On exit contains the status code to be returned.
+// @return true if the parcel starts with a valid filter.
+bool unmarshallFilter(const Parcel& p,
+ media::Metadata::Filter *filter,
+ status_t *status) {
+ int32_t val;
+ if (p.readInt32(&val) != OK) {
+ ALOGE("Failed to read filter's length");
+ *status = NOT_ENOUGH_DATA;
+ return false;
+ }
+
+ if (val > kMaxFilterSize || val < 0) {
+ ALOGE("Invalid filter len %d", val);
+ *status = BAD_VALUE;
+ return false;
+ }
+
+ const size_t num = val;
+
+ filter->clear();
+ filter->setCapacity(num);
+
+ size_t size = num * sizeof(media::Metadata::Type);
+
+
+ if (p.dataAvail() < size) {
+ ALOGE("Filter too short expected %zu but got %zu", size, p.dataAvail());
+ *status = NOT_ENOUGH_DATA;
+ return false;
+ }
+
+ const media::Metadata::Type *data =
+ static_cast<const media::Metadata::Type*>(p.readInplace(size));
+
+ if (NULL == data) {
+ ALOGE("Filter had no data");
+ *status = BAD_VALUE;
+ return false;
+ }
+
+ // TODO: The stl impl of vector would be more efficient here
+ // because it degenerates into a memcpy on pod types. Try to
+ // replace later or use stl::set.
+ for (size_t i = 0; i < num; ++i) {
+ filter->add(*data);
+ ++data;
+ }
+ *status = OK;
+ return true;
+}
+
+// @param filter Of metadata type.
+// @param val To be searched.
+// @return true if a match was found.
+bool findMetadata(const media::Metadata::Filter& filter, const int32_t val) {
+ // Deal with empty and ANY right away
+ if (filter.isEmpty()) {
+ return false;
+ }
+ if (filter[0] == media::Metadata::kAny) {
+ return true;
+ }
+
+ return filter.indexOf(val) >= 0;
+}
+
+// marshalling tag indicating flattened utf16 tags
+// keep in sync with frameworks/base/media/java/android/media/AudioAttributes.java
+const int32_t kAudioAttributesMarshallTagFlattenTags = 1;
+
+// Audio attributes format in a parcel:
+//
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | usage |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | content_type |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | source |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | flags |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | kAudioAttributesMarshallTagFlattenTags | // ignore tags if not found
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | flattened tags in UTF16 |
+// | ... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// @param p Parcel that contains audio attributes.
+// @param[out] attributes On exit points to an initialized audio_attributes_t structure
+// @param[out] status On exit contains the status code to be returned.
+void unmarshallAudioAttributes(const Parcel& parcel, audio_attributes_t *attributes) {
+ attributes->usage = (audio_usage_t) parcel.readInt32();
+ attributes->content_type = (audio_content_type_t) parcel.readInt32();
+ attributes->source = (audio_source_t) parcel.readInt32();
+ attributes->flags = (audio_flags_mask_t) parcel.readInt32();
+ const bool hasFlattenedTag = (parcel.readInt32() == kAudioAttributesMarshallTagFlattenTags);
+ if (hasFlattenedTag) {
+ // the tags are UTF16, convert to UTF8
+ String16 tags = parcel.readString16();
+ ssize_t realTagSize = utf16_to_utf8_length(tags.string(), tags.size());
+ if (realTagSize <= 0) {
+ strcpy(attributes->tags, "");
+ } else {
+ // copy the flattened string into the attributes as the destination for the conversion:
+ // copying array size -1, array for tags was calloc'd, no need to NULL-terminate it
+ size_t tagSize = realTagSize > AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1 ?
+ AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1 : realTagSize;
+ utf16_to_utf8(tags.string(), tagSize, attributes->tags,
+ sizeof(attributes->tags) / sizeof(attributes->tags[0]));
+ }
+ } else {
+ ALOGE("unmarshallAudioAttributes() received unflattened tags, ignoring tag values");
+ strcpy(attributes->tags, "");
+ }
+}
+
+class AudioDeviceUpdatedNotifier: public AudioSystem::AudioDeviceCallback {
+public:
+ AudioDeviceUpdatedNotifier(const sp<MediaPlayer2Interface>& listener)
+ : mListener(listener) { }
+
+ ~AudioDeviceUpdatedNotifier() { }
+
+ virtual void onAudioDeviceUpdate(audio_io_handle_t audioIo,
+ audio_port_handle_t deviceId) override {
+ sp<MediaPlayer2Interface> listener = mListener.promote();
+ if (listener != NULL) {
+ listener->sendEvent(0, MEDIA2_AUDIO_ROUTING_CHANGED, audioIo, deviceId);
+ } else {
+ ALOGW("listener for process %d death is gone", MEDIA2_AUDIO_ROUTING_CHANGED);
+ }
+ }
+
+private:
+ wp<MediaPlayer2Interface> mListener;
+};
+
+class proxyListener : public MediaPlayer2InterfaceListener {
+public:
+ proxyListener(const wp<MediaPlayer2> &player)
+ : mPlayer(player) { }
+
+ ~proxyListener() { };
+
+ virtual void notify(int64_t srcId, int msg, int ext1, int ext2, const Parcel *obj) override {
+ sp<MediaPlayer2> player = mPlayer.promote();
+ if (player != NULL) {
+ player->notify(srcId, msg, ext1, ext2, obj);
+ }
+ }
+
+private:
+ wp<MediaPlayer2> mPlayer;
+};
+
+Mutex sRecordLock;
+SortedVector<wp<MediaPlayer2> > *sPlayers;
+
+void ensureInit_l() {
+ if (sPlayers == NULL) {
+ sPlayers = new SortedVector<wp<MediaPlayer2> >();
+ }
+}
+
+void addPlayer(const wp<MediaPlayer2>& player) {
+ Mutex::Autolock lock(sRecordLock);
+ ensureInit_l();
+ sPlayers->add(player);
+}
+
+void removePlayer(const wp<MediaPlayer2>& player) {
+ Mutex::Autolock lock(sRecordLock);
+ ensureInit_l();
+ sPlayers->remove(player);
+}
+
+/**
+ * The only arguments this understands right now are -c, -von and -voff,
+ * which are parsed by ALooperRoster::dump()
+ */
+status_t dumpPlayers(int fd, const Vector<String16>& args) {
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+ SortedVector< sp<MediaPlayer2> > players; //to serialise the mutex unlock & client destruction.
+
+ if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
+ snprintf(buffer, SIZE, "Permission Denial: can't dump MediaPlayer2\n");
+ result.append(buffer);
+ } else {
+ {
+ Mutex::Autolock lock(sRecordLock);
+ ensureInit_l();
+ for (int i = 0, n = sPlayers->size(); i < n; ++i) {
+ sp<MediaPlayer2> p = (*sPlayers)[i].promote();
+ if (p != 0) {
+ p->dump(fd, args);
+ }
+ players.add(p);
+ }
+ }
+
+ result.append(" Files opened and/or mapped:\n");
+ snprintf(buffer, SIZE, "/proc/%d/maps", getpid());
+ FILE *f = fopen(buffer, "r");
+ if (f) {
+ while (!feof(f)) {
+ fgets(buffer, SIZE, f);
+ if (strstr(buffer, " /storage/") ||
+ strstr(buffer, " /system/sounds/") ||
+ strstr(buffer, " /data/") ||
+ strstr(buffer, " /system/media/")) {
+ result.append(" ");
+ result.append(buffer);
+ }
+ }
+ fclose(f);
+ } else {
+ result.append("couldn't open ");
+ result.append(buffer);
+ result.append("\n");
+ }
+
+ snprintf(buffer, SIZE, "/proc/%d/fd", getpid());
+ DIR *d = opendir(buffer);
+ if (d) {
+ struct dirent *ent;
+ while((ent = readdir(d)) != NULL) {
+ if (strcmp(ent->d_name,".") && strcmp(ent->d_name,"..")) {
+ snprintf(buffer, SIZE, "/proc/%d/fd/%s", getpid(), ent->d_name);
+ struct stat s;
+ if (lstat(buffer, &s) == 0) {
+ if ((s.st_mode & S_IFMT) == S_IFLNK) {
+ char linkto[256];
+ int len = readlink(buffer, linkto, sizeof(linkto));
+ if(len > 0) {
+ if(len > 255) {
+ linkto[252] = '.';
+ linkto[253] = '.';
+ linkto[254] = '.';
+ linkto[255] = 0;
+ } else {
+ linkto[len] = 0;
+ }
+ if (strstr(linkto, "/storage/") == linkto ||
+ strstr(linkto, "/system/sounds/") == linkto ||
+ strstr(linkto, "/data/") == linkto ||
+ strstr(linkto, "/system/media/") == linkto) {
+ result.append(" ");
+ result.append(buffer);
+ result.append(" -> ");
+ result.append(linkto);
+ result.append("\n");
+ }
+ }
+ } else {
+ result.append(" unexpected type for ");
+ result.append(buffer);
+ result.append("\n");
+ }
+ }
+ }
+ }
+ closedir(d);
+ } else {
+ result.append("couldn't open ");
+ result.append(buffer);
+ result.append("\n");
+ }
+
+ gLooperRoster.dump(fd, args);
+
+ bool dumpMem = false;
+ bool unreachableMemory = false;
+ for (size_t i = 0; i < args.size(); i++) {
+ if (args[i] == String16("-m")) {
+ dumpMem = true;
+ } else if (args[i] == String16("--unreachable")) {
+ unreachableMemory = true;
+ }
+ }
+ if (dumpMem) {
+ result.append("\nDumping memory:\n");
+ std::string s = dumpMemoryAddresses(100 /* limit */);
+ result.append(s.c_str(), s.size());
+ }
+ if (unreachableMemory) {
+ result.append("\nDumping unreachable memory:\n");
+ // TODO - should limit be an argument parameter?
+ // TODO: enable GetUnreachableMemoryString if it's part of stable API
+ //std::string s = GetUnreachableMemoryString(true /* contents */, 10000 /* limit */);
+ //result.append(s.c_str(), s.size());
+ }
+ }
+ write(fd, result.string(), result.size());
+ return NO_ERROR;
+}
+
+} // anonymous namespace
+
+//static
+sp<MediaPlayer2> MediaPlayer2::Create() {
+ sp<MediaPlayer2> player = new MediaPlayer2();
+
+ if (!player->init()) {
+ return NULL;
+ }
+
+ ALOGV("Create new player(%p)", player.get());
+
+ addPlayer(player);
+ return player;
+}
+
+// static
+status_t MediaPlayer2::DumpAll(int fd, const Vector<String16>& args) {
+ return dumpPlayers(fd, args);
+}
+
+MediaPlayer2::MediaPlayer2() {
+ ALOGV("constructor");
+ mSrcId = 0;
+ mLockThreadId = 0;
+ mListener = NULL;
+ mStreamType = AUDIO_STREAM_MUSIC;
+ mAudioAttributesParcel = NULL;
+ mCurrentPosition = -1;
+ mCurrentSeekMode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC;
+ mSeekPosition = -1;
+ mSeekMode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC;
+ mCurrentState = MEDIA_PLAYER2_IDLE;
+ mLoop = false;
+ mLeftVolume = mRightVolume = 1.0;
+ mVideoWidth = mVideoHeight = 0;
+ mAudioSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ AudioSystem::acquireAudioSessionId(mAudioSessionId, -1);
+ mSendLevel = 0;
+
+ // TODO: get pid and uid from JAVA
+ mPid = IPCThreadState::self()->getCallingPid();
+ mUid = IPCThreadState::self()->getCallingUid();
+
+ mAudioAttributes = NULL;
+}
+
+MediaPlayer2::~MediaPlayer2() {
+ ALOGV("destructor");
+ if (mAudioAttributesParcel != NULL) {
+ delete mAudioAttributesParcel;
+ mAudioAttributesParcel = NULL;
+ }
+ AudioSystem::releaseAudioSessionId(mAudioSessionId, -1);
+ disconnect();
+ removePlayer(this);
+ if (mAudioAttributes != NULL) {
+ free(mAudioAttributes);
+ }
+}
+
+bool MediaPlayer2::init() {
+ // TODO: after merge with NuPlayer2Driver, MediaPlayer2 will have its own
+ // looper for notification.
+ return true;
+}
+
+void MediaPlayer2::disconnect() {
+ ALOGV("disconnect");
+ sp<MediaPlayer2Interface> p;
+ {
+ Mutex::Autolock _l(mLock);
+ p = mPlayer;
+ mPlayer.clear();
+ }
+
+ if (p != 0) {
+ p->setListener(NULL);
+ p->reset();
+ }
+
+ {
+ Mutex::Autolock _l(mLock);
+ disconnectNativeWindow_l();
+ }
+}
+
+void MediaPlayer2::clear_l() {
+ mCurrentPosition = -1;
+ mCurrentSeekMode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC;
+ mSeekPosition = -1;
+ mSeekMode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC;
+ mVideoWidth = mVideoHeight = 0;
+}
+
+status_t MediaPlayer2::setListener(const sp<MediaPlayer2Listener>& listener) {
+ ALOGV("setListener");
+ Mutex::Autolock _l(mLock);
+ mListener = listener;
+ return NO_ERROR;
+}
+
+status_t MediaPlayer2::getSrcId(int64_t *srcId) {
+ if (srcId == NULL) {
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock _l(mLock);
+ *srcId = mSrcId;
+ return OK;
+}
+
+status_t MediaPlayer2::setDataSource(const sp<DataSourceDesc> &dsd) {
+ if (dsd == NULL) {
+ return BAD_VALUE;
+ }
+ ALOGV("setDataSource type(%d), srcId(%lld)", dsd->mType, (long long)dsd->mId);
+
+ sp<MediaPlayer2Interface> oldPlayer;
+
+ Mutex::Autolock _l(mLock);
+ {
+ if (!((mCurrentState & MEDIA_PLAYER2_IDLE)
+ || mCurrentState == MEDIA_PLAYER2_STATE_ERROR)) {
+ ALOGE("setDataSource called in wrong state %d", mCurrentState);
+ return INVALID_OPERATION;
+ }
+
+ sp<MediaPlayer2Interface> player = new NuPlayer2Driver(mPid, mUid);
+ status_t err = player->initCheck();
+ if (err != NO_ERROR) {
+ ALOGE("Failed to create player object, initCheck failed(%d)", err);
+ return err;
+ }
+
+ clear_l();
+
+ player->setListener(new proxyListener(this));
+ mAudioOutput = new MediaPlayer2AudioOutput(mAudioSessionId, mUid,
+ mPid, mAudioAttributes, new AudioDeviceUpdatedNotifier(player));
+ player->setAudioSink(mAudioOutput);
+
+ err = player->setDataSource(dsd);
+ if (err != OK) {
+ ALOGE("setDataSource error: %d", err);
+ return err;
+ }
+
+ sp<MediaPlayer2Interface> oldPlayer = mPlayer;
+ mPlayer = player;
+ mSrcId = dsd->mId;
+ mCurrentState = MEDIA_PLAYER2_INITIALIZED;
+ }
+
+ if (oldPlayer != NULL) {
+ oldPlayer->setListener(NULL);
+ oldPlayer->reset();
+ }
+
+ return OK;
+}
+
+status_t MediaPlayer2::prepareNextDataSource(const sp<DataSourceDesc> &dsd) {
+ if (dsd == NULL) {
+ return BAD_VALUE;
+ }
+ ALOGV("prepareNextDataSource type(%d), srcId(%lld)", dsd->mType, (long long)dsd->mId);
+
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == NULL) {
+ ALOGE("prepareNextDataSource failed: state %X, mPlayer(%p)", mCurrentState, mPlayer.get());
+ return INVALID_OPERATION;
+ }
+ return mPlayer->prepareNextDataSource(dsd);
+}
+
+status_t MediaPlayer2::playNextDataSource(int64_t srcId) {
+ ALOGV("playNextDataSource srcId(%lld)", (long long)srcId);
+
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == NULL) {
+ ALOGE("playNextDataSource failed: state %X, mPlayer(%p)", mCurrentState, mPlayer.get());
+ return INVALID_OPERATION;
+ }
+ mSrcId = srcId;
+ return mPlayer->playNextDataSource(srcId);
+}
+
+status_t MediaPlayer2::invoke(const Parcel& request, Parcel *reply) {
+ Mutex::Autolock _l(mLock);
+ const bool hasBeenInitialized =
+ (mCurrentState != MEDIA_PLAYER2_STATE_ERROR) &&
+ ((mCurrentState & MEDIA_PLAYER2_IDLE) != MEDIA_PLAYER2_IDLE);
+ if ((mPlayer == NULL) || !hasBeenInitialized) {
+ ALOGE("invoke failed: wrong state %X, mPlayer(%p)", mCurrentState, mPlayer.get());
+ return INVALID_OPERATION;
+ }
+ ALOGV("invoke %zu", request.dataSize());
+ return mPlayer->invoke(request, reply);
+}
+
+// This call doesn't need to access the native player.
+status_t MediaPlayer2::setMetadataFilter(const Parcel& filter) {
+ ALOGD("setMetadataFilter");
+
+ status_t status;
+ media::Metadata::Filter allow, drop;
+
+ if (unmarshallFilter(filter, &allow, &status) &&
+ unmarshallFilter(filter, &drop, &status)) {
+ Mutex::Autolock lock(mLock);
+
+ mMetadataAllow = allow;
+ mMetadataDrop = drop;
+ }
+ return status;
+}
+
+status_t MediaPlayer2::getMetadata(bool update_only, bool /* apply_filter */, Parcel *reply) {
+ ALOGD("getMetadata");
+ sp<MediaPlayer2Interface> player;
+ media::Metadata::Filter ids;
+ Mutex::Autolock lock(mLock);
+ {
+ if (mPlayer == NULL) {
+ return NO_INIT;
+ }
+
+ player = mPlayer;
+ // Placeholder for the return code, updated by the caller.
+ reply->writeInt32(-1);
+
+ // We don't block notifications while we fetch the data. We clear
+ // mMetadataUpdated first so we don't lose notifications happening
+ // during the rest of this call.
+ if (update_only) {
+ ids = mMetadataUpdated;
+ }
+ mMetadataUpdated.clear();
+ }
+
+ media::Metadata metadata(reply);
+
+ metadata.appendHeader();
+ status_t status = player->getMetadata(ids, reply);
+
+ if (status != OK) {
+ metadata.resetParcel();
+ ALOGE("getMetadata failed %d", status);
+ return status;
+ }
+
+ // FIXME: ement filtering on the result. Not critical since
+ // filtering takes place on the update notifications already. This
+ // would be when all the metadata are fetch and a filter is set.
+
+ // Everything is fine, update the metadata length.
+ metadata.updateLength();
+ return OK;
+}
+
+void MediaPlayer2::disconnectNativeWindow_l() {
+ if (mConnectedWindow != NULL && mConnectedWindow->getANativeWindow() != NULL) {
+ status_t err = native_window_api_disconnect(
+ mConnectedWindow->getANativeWindow(), NATIVE_WINDOW_API_MEDIA);
+
+ if (err != OK) {
+ ALOGW("nativeWindowDisconnect returned an error: %s (%d)",
+ strerror(-err), err);
+ }
+ }
+ mConnectedWindow.clear();
+}
+
+status_t MediaPlayer2::setVideoSurfaceTexture(const sp<ANativeWindowWrapper>& nww) {
+ ANativeWindow *anw = (nww == NULL ? NULL : nww->getANativeWindow());
+ ALOGV("setVideoSurfaceTexture(%p)", anw);
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == 0) {
+ return NO_INIT;
+ }
+
+ if (anw != NULL) {
+ if (mConnectedWindow != NULL
+ && mConnectedWindow->getANativeWindow() == anw) {
+ return OK;
+ }
+ status_t err = native_window_api_connect(anw, NATIVE_WINDOW_API_MEDIA);
+
+ if (err != OK) {
+ ALOGE("setVideoSurfaceTexture failed: %d", err);
+ // Note that we must do the reset before disconnecting from the ANW.
+ // Otherwise queue/dequeue calls could be made on the disconnected
+ // ANW, which may result in errors.
+ mPlayer->reset();
+ disconnectNativeWindow_l();
+ return err;
+ }
+ }
+
+ // Note that we must set the player's new GraphicBufferProducer before
+ // disconnecting the old one. Otherwise queue/dequeue calls could be made
+ // on the disconnected ANW, which may result in errors.
+ status_t err = mPlayer->setVideoSurfaceTexture(nww);
+
+ disconnectNativeWindow_l();
+
+ if (err == OK) {
+ mConnectedWindow = nww;
+ mLock.unlock();
+ } else if (anw != NULL) {
+ mLock.unlock();
+ status_t err = native_window_api_disconnect(anw, NATIVE_WINDOW_API_MEDIA);
+
+ if (err != OK) {
+ ALOGW("nativeWindowDisconnect returned an error: %s (%d)",
+ strerror(-err), err);
+ }
+ }
+
+ return err;
+}
+
+status_t MediaPlayer2::getBufferingSettings(BufferingSettings* buffering /* nonnull */) {
+ ALOGV("getBufferingSettings");
+
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == 0) {
+ return NO_INIT;
+ }
+
+ status_t ret = mPlayer->getBufferingSettings(buffering);
+ if (ret == NO_ERROR) {
+ ALOGV("getBufferingSettings{%s}", buffering->toString().string());
+ } else {
+ ALOGE("getBufferingSettings returned %d", ret);
+ }
+ return ret;
+}
+
+status_t MediaPlayer2::setBufferingSettings(const BufferingSettings& buffering) {
+ ALOGV("setBufferingSettings{%s}", buffering.toString().string());
+
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == 0) {
+ return NO_INIT;
+ }
+ return mPlayer->setBufferingSettings(buffering);
+}
+
+status_t MediaPlayer2::setAudioAttributes_l(const Parcel &parcel) {
+ if (mAudioAttributes != NULL) {
+ free(mAudioAttributes);
+ }
+ mAudioAttributes = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
+ if (mAudioAttributes == NULL) {
+ return NO_MEMORY;
+ }
+ unmarshallAudioAttributes(parcel, mAudioAttributes);
+
+ ALOGV("setAudioAttributes_l() usage=%d content=%d flags=0x%x tags=%s",
+ mAudioAttributes->usage, mAudioAttributes->content_type, mAudioAttributes->flags,
+ mAudioAttributes->tags);
+
+ if (mAudioOutput != 0) {
+ mAudioOutput->setAudioAttributes(mAudioAttributes);
+ }
+ return NO_ERROR;
+}
+
+status_t MediaPlayer2::prepareAsync() {
+ ALOGV("prepareAsync");
+ Mutex::Autolock _l(mLock);
+ if ((mPlayer != 0) && (mCurrentState & (MEDIA_PLAYER2_INITIALIZED | MEDIA_PLAYER2_STOPPED))) {
+ if (mAudioAttributesParcel != NULL) {
+ status_t err = setAudioAttributes_l(*mAudioAttributesParcel);
+ if (err != OK) {
+ return err;
+ }
+ } else if (mAudioOutput != 0) {
+ mAudioOutput->setAudioStreamType(mStreamType);
+ }
+ mCurrentState = MEDIA_PLAYER2_PREPARING;
+ return mPlayer->prepareAsync();
+ }
+ ALOGE("prepareAsync called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
+ return INVALID_OPERATION;
+}
+
+status_t MediaPlayer2::start() {
+ ALOGV("start");
+
+ status_t ret = NO_ERROR;
+ Mutex::Autolock _l(mLock);
+
+ mLockThreadId = getThreadId();
+
+ if (mCurrentState & MEDIA_PLAYER2_STARTED) {
+ ret = NO_ERROR;
+ } else if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER2_PREPARED |
+ MEDIA_PLAYER2_PLAYBACK_COMPLETE | MEDIA_PLAYER2_PAUSED ) ) ) {
+ mPlayer->setLooping(mLoop);
+
+ if (mAudioOutput != 0) {
+ mAudioOutput->setVolume(mLeftVolume, mRightVolume);
+ }
+
+ if (mAudioOutput != 0) {
+ mAudioOutput->setAuxEffectSendLevel(mSendLevel);
+ }
+ mCurrentState = MEDIA_PLAYER2_STARTED;
+ ret = mPlayer->start();
+ if (ret != NO_ERROR) {
+ mCurrentState = MEDIA_PLAYER2_STATE_ERROR;
+ } else {
+ if (mCurrentState == MEDIA_PLAYER2_PLAYBACK_COMPLETE) {
+ ALOGV("playback completed immediately following start()");
+ }
+ }
+ } else {
+ ALOGE("start called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
+ ret = INVALID_OPERATION;
+ }
+
+ mLockThreadId = 0;
+
+ return ret;
+}
+
+status_t MediaPlayer2::stop() {
+ ALOGV("stop");
+ Mutex::Autolock _l(mLock);
+ if (mCurrentState & MEDIA_PLAYER2_STOPPED) return NO_ERROR;
+ if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER2_STARTED | MEDIA_PLAYER2_PREPARED |
+ MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_PLAYBACK_COMPLETE ) ) ) {
+ status_t ret = mPlayer->stop();
+ if (ret != NO_ERROR) {
+ mCurrentState = MEDIA_PLAYER2_STATE_ERROR;
+ } else {
+ mCurrentState = MEDIA_PLAYER2_STOPPED;
+ }
+ return ret;
+ }
+ ALOGE("stop called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
+ return INVALID_OPERATION;
+}
+
+status_t MediaPlayer2::pause() {
+ ALOGV("pause");
+ Mutex::Autolock _l(mLock);
+ if (mCurrentState & (MEDIA_PLAYER2_PAUSED|MEDIA_PLAYER2_PLAYBACK_COMPLETE))
+ return NO_ERROR;
+ if ((mPlayer != 0) && (mCurrentState & MEDIA_PLAYER2_STARTED)) {
+ status_t ret = mPlayer->pause();
+ if (ret != NO_ERROR) {
+ mCurrentState = MEDIA_PLAYER2_STATE_ERROR;
+ } else {
+ mCurrentState = MEDIA_PLAYER2_PAUSED;
+ }
+ return ret;
+ }
+ ALOGE("pause called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
+ return INVALID_OPERATION;
+}
+
+bool MediaPlayer2::isPlaying() {
+ Mutex::Autolock _l(mLock);
+ if (mPlayer != 0) {
+ bool temp = mPlayer->isPlaying();
+ ALOGV("isPlaying: %d", temp);
+ if ((mCurrentState & MEDIA_PLAYER2_STARTED) && ! temp) {
+ ALOGE("internal/external state mismatch corrected");
+ mCurrentState = MEDIA_PLAYER2_PAUSED;
+ } else if ((mCurrentState & MEDIA_PLAYER2_PAUSED) && temp) {
+ ALOGE("internal/external state mismatch corrected");
+ mCurrentState = MEDIA_PLAYER2_STARTED;
+ }
+ return temp;
+ }
+ ALOGV("isPlaying: no active player");
+ return false;
+}
+
+mediaplayer2_states MediaPlayer2::getMediaPlayer2State() {
+ Mutex::Autolock _l(mLock);
+ if (mCurrentState & MEDIA_PLAYER2_STATE_ERROR) {
+ return MEDIAPLAYER2_STATE_ERROR;
+ }
+ if (mPlayer == 0
+ || (mCurrentState &
+ (MEDIA_PLAYER2_IDLE | MEDIA_PLAYER2_INITIALIZED | MEDIA_PLAYER2_PREPARING))) {
+ return MEDIAPLAYER2_STATE_IDLE;
+ }
+ if (mCurrentState & MEDIA_PLAYER2_STARTED) {
+ return MEDIAPLAYER2_STATE_PLAYING;
+ }
+ if (mCurrentState
+ & (MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_STOPPED | MEDIA_PLAYER2_PLAYBACK_COMPLETE)) {
+ return MEDIAPLAYER2_STATE_PAUSED;
+ }
+ // now only mCurrentState & MEDIA_PLAYER2_PREPARED is true
+ return MEDIAPLAYER2_STATE_PREPARED;
+}
+
+status_t MediaPlayer2::setPlaybackSettings(const AudioPlaybackRate& rate) {
+ ALOGV("setPlaybackSettings: %f %f %d %d",
+ rate.mSpeed, rate.mPitch, rate.mFallbackMode, rate.mStretchMode);
+ // Negative speed and pitch does not make sense. Further validation will
+ // be done by the respective mediaplayers.
+ if (rate.mSpeed <= 0.f || rate.mPitch < 0.f) {
+ return BAD_VALUE;
+ }
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == 0 || (mCurrentState & MEDIA_PLAYER2_STOPPED)) {
+ return INVALID_OPERATION;
+ }
+
+ status_t err = mPlayer->setPlaybackSettings(rate);
+ return err;
+}
+
+status_t MediaPlayer2::getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */) {
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == 0) {
+ return INVALID_OPERATION;
+ }
+ status_t ret = mPlayer->getPlaybackSettings(rate);
+ if (ret == NO_ERROR) {
+ ALOGV("getPlaybackSettings(%f, %f, %d, %d)",
+ rate->mSpeed, rate->mPitch, rate->mFallbackMode, rate->mStretchMode);
+ } else {
+ ALOGV("getPlaybackSettings returned %d", ret);
+ }
+ return ret;
+}
+
+status_t MediaPlayer2::setSyncSettings(const AVSyncSettings& sync, float videoFpsHint) {
+ ALOGV("setSyncSettings: %u %u %f %f",
+ sync.mSource, sync.mAudioAdjustMode, sync.mTolerance, videoFpsHint);
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == 0) return INVALID_OPERATION;
+ return mPlayer->setSyncSettings(sync, videoFpsHint);
+}
+
+status_t MediaPlayer2::getSyncSettings(
+ AVSyncSettings* sync /* nonnull */, float* videoFps /* nonnull */) {
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == 0) {
+ return INVALID_OPERATION;
+ }
+ status_t ret = mPlayer->getSyncSettings(sync, videoFps);
+ if (ret == NO_ERROR) {
+ ALOGV("getSyncSettings(%u, %u, %f, %f)",
+ sync->mSource, sync->mAudioAdjustMode, sync->mTolerance, *videoFps);
+ } else {
+ ALOGV("getSyncSettings returned %d", ret);
+ }
+ return ret;
+
+}
+
+status_t MediaPlayer2::getVideoWidth(int *w) {
+ ALOGV("getVideoWidth");
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == 0) {
+ return INVALID_OPERATION;
+ }
+ *w = mVideoWidth;
+ return NO_ERROR;
+}
+
+status_t MediaPlayer2::getVideoHeight(int *h) {
+ ALOGV("getVideoHeight");
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == 0) {
+ return INVALID_OPERATION;
+ }
+ *h = mVideoHeight;
+ return NO_ERROR;
+}
+
+status_t MediaPlayer2::getCurrentPosition(int64_t *msec) {
+ ALOGV("getCurrentPosition");
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == 0) {
+ return INVALID_OPERATION;
+ }
+ if (mCurrentPosition >= 0) {
+ ALOGV("Using cached seek position: %lld", (long long)mCurrentPosition);
+ *msec = mCurrentPosition;
+ return NO_ERROR;
+ }
+ status_t ret = mPlayer->getCurrentPosition(msec);
+ if (ret == NO_ERROR) {
+ ALOGV("getCurrentPosition = %lld", (long long)*msec);
+ } else {
+ ALOGE("getCurrentPosition returned %d", ret);
+ }
+ return ret;
+}
+
+status_t MediaPlayer2::getDuration(int64_t *msec) {
+ Mutex::Autolock _l(mLock);
+ ALOGV("getDuration_l");
+ bool isValidState = (mCurrentState & (MEDIA_PLAYER2_PREPARED | MEDIA_PLAYER2_STARTED |
+ MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_STOPPED | MEDIA_PLAYER2_PLAYBACK_COMPLETE));
+ if (mPlayer == 0 || !isValidState) {
+ ALOGE("Attempt to call getDuration in wrong state: mPlayer=%p, mCurrentState=%u",
+ mPlayer.get(), mCurrentState);
+ return INVALID_OPERATION;
+ }
+ int64_t durationMs;
+ status_t ret = mPlayer->getDuration(&durationMs);
+
+ if (ret == NO_ERROR) {
+ ALOGV("getDuration = %lld", (long long)durationMs);
+ } else {
+ ALOGE("getDuration returned %d", ret);
+ // Do not enter error state just because no duration was available.
+ durationMs = -1;
+ }
+
+ if (msec) {
+ *msec = durationMs;
+ }
+ return OK;
+}
+
+status_t MediaPlayer2::seekTo_l(int64_t msec, MediaPlayer2SeekMode mode) {
+ ALOGV("seekTo (%lld, %d)", (long long)msec, mode);
+ if ((mPlayer == 0) || !(mCurrentState & (MEDIA_PLAYER2_STARTED | MEDIA_PLAYER2_PREPARED |
+ MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_PLAYBACK_COMPLETE))) {
+ ALOGE("Attempt to perform seekTo in wrong state: mPlayer=%p, mCurrentState=%u",
+ mPlayer.get(), mCurrentState);
+ return INVALID_OPERATION;
+ }
+ if (msec < 0) {
+ ALOGW("Attempt to seek to invalid position: %lld", (long long)msec);
+ msec = 0;
+ }
+
+ int64_t durationMs;
+ status_t err = mPlayer->getDuration(&durationMs);
+
+ if (err != OK) {
+ ALOGW("Stream has no duration and is therefore not seekable.");
+ return err;
+ }
+
+ if (msec > durationMs) {
+ ALOGW("Attempt to seek to past end of file: request = %lld, durationMs = %lld",
+ (long long)msec, (long long)durationMs);
+
+ msec = durationMs;
+ }
+
+ // cache duration
+ mCurrentPosition = msec;
+ mCurrentSeekMode = mode;
+ if (mSeekPosition < 0) {
+ mSeekPosition = msec;
+ mSeekMode = mode;
+ return mPlayer->seekTo(msec, mode);
+ }
+ ALOGV("Seek in progress - queue up seekTo[%lld, %d]", (long long)msec, mode);
+ return NO_ERROR;
+}
+
+status_t MediaPlayer2::seekTo(int64_t msec, MediaPlayer2SeekMode mode) {
+ mLockThreadId = getThreadId();
+ Mutex::Autolock _l(mLock);
+ status_t result = seekTo_l(msec, mode);
+ mLockThreadId = 0;
+
+ return result;
+}
+
+status_t MediaPlayer2::notifyAt(int64_t mediaTimeUs) {
+ Mutex::Autolock _l(mLock);
+ if (mPlayer != 0) {
+ return INVALID_OPERATION;
+ }
+
+ return mPlayer->notifyAt(mediaTimeUs);
+}
+
+status_t MediaPlayer2::reset_l() {
+ mLoop = false;
+ if (mCurrentState == MEDIA_PLAYER2_IDLE) {
+ return NO_ERROR;
+ }
+ if (mPlayer != 0) {
+ status_t ret = mPlayer->reset();
+ if (ret != NO_ERROR) {
+ ALOGE("reset() failed with return code (%d)", ret);
+ mCurrentState = MEDIA_PLAYER2_STATE_ERROR;
+ } else {
+ mPlayer->setListener(NULL);
+ mCurrentState = MEDIA_PLAYER2_IDLE;
+ }
+ // setDataSource has to be called again to create a
+ // new mediaplayer.
+ mPlayer = 0;
+ return ret;
+ }
+ clear_l();
+ return NO_ERROR;
+}
+
+status_t MediaPlayer2::reset() {
+ ALOGV("reset");
+ mLockThreadId = getThreadId();
+ Mutex::Autolock _l(mLock);
+ status_t result = reset_l();
+ mLockThreadId = 0;
+
+ return result;
+}
+
+status_t MediaPlayer2::setAudioStreamType(audio_stream_type_t type) {
+ ALOGV("MediaPlayer2::setAudioStreamType");
+ Mutex::Autolock _l(mLock);
+ if (mStreamType == type) return NO_ERROR;
+ if (mCurrentState & ( MEDIA_PLAYER2_PREPARED | MEDIA_PLAYER2_STARTED |
+ MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_PLAYBACK_COMPLETE ) ) {
+ // Can't change the stream type after prepare
+ ALOGE("setAudioStream called in state %d", mCurrentState);
+ return INVALID_OPERATION;
+ }
+ // cache
+ mStreamType = type;
+ return OK;
+}
+
+status_t MediaPlayer2::getAudioStreamType(audio_stream_type_t *type) {
+ ALOGV("getAudioStreamType");
+ Mutex::Autolock _l(mLock);
+ *type = mStreamType;
+ return OK;
+}
+
+status_t MediaPlayer2::setLooping(int loop) {
+ ALOGV("MediaPlayer2::setLooping");
+ Mutex::Autolock _l(mLock);
+ mLoop = (loop != 0);
+ if (mPlayer != 0) {
+ return mPlayer->setLooping(loop);
+ }
+ return OK;
+}
+
+bool MediaPlayer2::isLooping() {
+ ALOGV("isLooping");
+ Mutex::Autolock _l(mLock);
+ if (mPlayer != 0) {
+ return mLoop;
+ }
+ ALOGV("isLooping: no active player");
+ return false;
+}
+
+status_t MediaPlayer2::setVolume(float leftVolume, float rightVolume) {
+ ALOGV("MediaPlayer2::setVolume(%f, %f)", leftVolume, rightVolume);
+ Mutex::Autolock _l(mLock);
+ mLeftVolume = leftVolume;
+ mRightVolume = rightVolume;
+ if (mAudioOutput != 0) {
+ mAudioOutput->setVolume(leftVolume, rightVolume);
+ }
+ return OK;
+}
+
+status_t MediaPlayer2::setAudioSessionId(audio_session_t sessionId) {
+ ALOGV("MediaPlayer2::setAudioSessionId(%d)", sessionId);
+ Mutex::Autolock _l(mLock);
+ if (!(mCurrentState & MEDIA_PLAYER2_IDLE)) {
+ ALOGE("setAudioSessionId called in state %d", mCurrentState);
+ return INVALID_OPERATION;
+ }
+ if (sessionId < 0) {
+ return BAD_VALUE;
+ }
+ if (sessionId != mAudioSessionId) {
+ AudioSystem::acquireAudioSessionId(sessionId, -1);
+ AudioSystem::releaseAudioSessionId(mAudioSessionId, -1);
+ mAudioSessionId = sessionId;
+ }
+ return NO_ERROR;
+}
+
+audio_session_t MediaPlayer2::getAudioSessionId() {
+ Mutex::Autolock _l(mLock);
+ return mAudioSessionId;
+}
+
+status_t MediaPlayer2::setAuxEffectSendLevel(float level) {
+ ALOGV("MediaPlayer2::setAuxEffectSendLevel(%f)", level);
+ Mutex::Autolock _l(mLock);
+ mSendLevel = level;
+ if (mAudioOutput != 0) {
+ return mAudioOutput->setAuxEffectSendLevel(level);
+ }
+ return OK;
+}
+
+status_t MediaPlayer2::attachAuxEffect(int effectId) {
+ ALOGV("MediaPlayer2::attachAuxEffect(%d)", effectId);
+ Mutex::Autolock _l(mLock);
+ if (mAudioOutput == 0 ||
+ (mCurrentState & MEDIA_PLAYER2_IDLE) ||
+ (mCurrentState == MEDIA_PLAYER2_STATE_ERROR )) {
+ ALOGE("attachAuxEffect called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
+ return INVALID_OPERATION;
+ }
+
+ return mAudioOutput->attachAuxEffect(effectId);
+}
+
+// always call with lock held
+status_t MediaPlayer2::checkStateForKeySet_l(int key) {
+ switch(key) {
+ case MEDIA2_KEY_PARAMETER_AUDIO_ATTRIBUTES:
+ if (mCurrentState & ( MEDIA_PLAYER2_PREPARED | MEDIA_PLAYER2_STARTED |
+ MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_PLAYBACK_COMPLETE) ) {
+ // Can't change the audio attributes after prepare
+ ALOGE("trying to set audio attributes called in state %d", mCurrentState);
+ return INVALID_OPERATION;
+ }
+ break;
+ default:
+ // parameter doesn't require player state check
+ break;
+ }
+ return OK;
+}
+
+status_t MediaPlayer2::setParameter(int key, const Parcel& request) {
+ ALOGV("MediaPlayer2::setParameter(%d)", key);
+ status_t status = INVALID_OPERATION;
+ Mutex::Autolock _l(mLock);
+ if (checkStateForKeySet_l(key) != OK) {
+ return status;
+ }
+ switch (key) {
+ case MEDIA2_KEY_PARAMETER_AUDIO_ATTRIBUTES:
+ // save the marshalled audio attributes
+ if (mAudioAttributesParcel != NULL) {
+ delete mAudioAttributesParcel;
+ }
+ mAudioAttributesParcel = new Parcel();
+ mAudioAttributesParcel->appendFrom(&request, 0, request.dataSize());
+ status = setAudioAttributes_l(request);
+ if (status != OK) {
+ return status;
+ }
+ break;
+ default:
+ ALOGV_IF(mPlayer == NULL, "setParameter: no active player");
+ break;
+ }
+
+ if (mPlayer != NULL) {
+ status = mPlayer->setParameter(key, request);
+ }
+ return status;
+}
+
+status_t MediaPlayer2::getParameter(int key, Parcel *reply) {
+ ALOGV("MediaPlayer2::getParameter(%d)", key);
+ Mutex::Autolock _l(mLock);
+ if (key == MEDIA2_KEY_PARAMETER_AUDIO_ATTRIBUTES) {
+ if (reply == NULL) {
+ return BAD_VALUE;
+ }
+ if (mAudioAttributesParcel != NULL) {
+ reply->appendFrom(mAudioAttributesParcel, 0, mAudioAttributesParcel->dataSize());
+ }
+ return OK;
+ }
+
+ if (mPlayer == NULL) {
+ ALOGV("getParameter: no active player");
+ return INVALID_OPERATION;
+ }
+
+ status_t status = mPlayer->getParameter(key, reply);
+ if (status != OK) {
+ ALOGD("getParameter returns %d", status);
+ }
+ return status;
+}
+
+bool MediaPlayer2::shouldDropMetadata(media::Metadata::Type code) const {
+ Mutex::Autolock lock(mLock);
+
+ if (findMetadata(mMetadataDrop, code)) {
+ return true;
+ }
+
+ if (mMetadataAllow.isEmpty() || findMetadata(mMetadataAllow, code)) {
+ return false;
+ } else {
+ return true;
+ }
+}
+
+
+void MediaPlayer2::addNewMetadataUpdate(media::Metadata::Type metadata_type) {
+ Mutex::Autolock lock(mLock);
+ if (mMetadataUpdated.indexOf(metadata_type) < 0) {
+ mMetadataUpdated.add(metadata_type);
+ }
+}
+
+void MediaPlayer2::notify(int64_t srcId, int msg, int ext1, int ext2, const Parcel *obj) {
+ ALOGV("message received srcId=%lld, msg=%d, ext1=%d, ext2=%d",
+ (long long)srcId, msg, ext1, ext2);
+
+ if (MEDIA2_INFO == msg && MEDIA2_INFO_METADATA_UPDATE == ext1) {
+ const media::Metadata::Type metadata_type = ext2;
+
+ if(shouldDropMetadata(metadata_type)) {
+ return;
+ }
+
+ // Update the list of metadata that have changed. getMetadata
+ // also access mMetadataUpdated and clears it.
+ addNewMetadataUpdate(metadata_type);
+ }
+
+ bool send = true;
+ bool locked = false;
+
+ // TODO: In the future, we might be on the same thread if the app is
+ // running in the same process as the media server. In that case,
+ // this will deadlock.
+ //
+ // The threadId hack below works around this for the care of prepare,
+ // seekTo, start, and reset within the same process.
+ // FIXME: Remember, this is a hack, it's not even a hack that is applied
+ // consistently for all use-cases, this needs to be revisited.
+ if (mLockThreadId != getThreadId()) {
+ mLock.lock();
+ locked = true;
+ }
+
+ // Allows calls from JNI in idle state to notify errors
+ if (!(msg == MEDIA2_ERROR && mCurrentState == MEDIA_PLAYER2_IDLE) && mPlayer == 0) {
+ ALOGV("notify(%lld, %d, %d, %d) callback on disconnected mediaplayer",
+ (long long)srcId, msg, ext1, ext2);
+ if (locked) mLock.unlock(); // release the lock when done.
+ return;
+ }
+
+ switch (msg) {
+ case MEDIA2_NOP: // interface test message
+ break;
+ case MEDIA2_PREPARED:
+ ALOGV("MediaPlayer2::notify() prepared");
+ mCurrentState = MEDIA_PLAYER2_PREPARED;
+ break;
+ case MEDIA2_DRM_INFO:
+ ALOGV("MediaPlayer2::notify() MEDIA2_DRM_INFO(%lld, %d, %d, %d, %p)",
+ (long long)srcId, msg, ext1, ext2, obj);
+ break;
+ case MEDIA2_PLAYBACK_COMPLETE:
+ ALOGV("playback complete");
+ if (mCurrentState == MEDIA_PLAYER2_IDLE) {
+ ALOGE("playback complete in idle state");
+ }
+ if (!mLoop) {
+ mCurrentState = MEDIA_PLAYER2_PLAYBACK_COMPLETE;
+ }
+ break;
+ case MEDIA2_ERROR:
+ // Always log errors.
+ // ext1: Media framework error code.
+ // ext2: Implementation dependant error code.
+ ALOGE("error (%d, %d)", ext1, ext2);
+ mCurrentState = MEDIA_PLAYER2_STATE_ERROR;
+ break;
+ case MEDIA2_INFO:
+ // ext1: Media framework error code.
+ // ext2: Implementation dependant error code.
+ if (ext1 != MEDIA2_INFO_VIDEO_TRACK_LAGGING) {
+ ALOGW("info/warning (%d, %d)", ext1, ext2);
+ }
+ break;
+ case MEDIA2_SEEK_COMPLETE:
+ ALOGV("Received seek complete");
+ if (mSeekPosition != mCurrentPosition || (mSeekMode != mCurrentSeekMode)) {
+ ALOGV("Executing queued seekTo(%lld, %d)",
+ (long long)mCurrentPosition, mCurrentSeekMode);
+ mSeekPosition = -1;
+ mSeekMode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC;
+ seekTo_l(mCurrentPosition, mCurrentSeekMode);
+ }
+ else {
+ ALOGV("All seeks complete - return to regularly scheduled program");
+ mCurrentPosition = mSeekPosition = -1;
+ mCurrentSeekMode = mSeekMode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC;
+ }
+ break;
+ case MEDIA2_BUFFERING_UPDATE:
+ ALOGV("buffering %d", ext1);
+ break;
+ case MEDIA2_SET_VIDEO_SIZE:
+ ALOGV("New video size %d x %d", ext1, ext2);
+ mVideoWidth = ext1;
+ mVideoHeight = ext2;
+ break;
+ case MEDIA2_NOTIFY_TIME:
+ ALOGV("Received notify time message");
+ break;
+ case MEDIA2_TIMED_TEXT:
+ ALOGV("Received timed text message");
+ break;
+ case MEDIA2_SUBTITLE_DATA:
+ ALOGV("Received subtitle data message");
+ break;
+ case MEDIA2_META_DATA:
+ ALOGV("Received timed metadata message");
+ break;
+ default:
+ ALOGV("unrecognized message: (%d, %d, %d)", msg, ext1, ext2);
+ break;
+ }
+
+ sp<MediaPlayer2Listener> listener = mListener;
+ if (locked) mLock.unlock();
+
+ // this prevents re-entrant calls into client code
+ if ((listener != 0) && send) {
+ Mutex::Autolock _l(mNotifyLock);
+ ALOGV("callback application");
+ listener->notify(srcId, msg, ext1, ext2, obj);
+ ALOGV("back from callback");
+ }
+}
+
+// Modular DRM
+status_t MediaPlayer2::prepareDrm(const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId) {
+ // TODO change to ALOGV
+ ALOGD("prepareDrm: uuid: %p drmSessionId: %p(%zu)", uuid,
+ drmSessionId.array(), drmSessionId.size());
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == NULL) {
+ return NO_INIT;
+ }
+
+ // Only allowed it in player's preparing/prepared state.
+ // We get here only if MEDIA_DRM_INFO has already arrived (e.g., prepare is half-way through or
+ // completed) so the state change to "prepared" might not have happened yet (e.g., buffering).
+ // Still, we can allow prepareDrm for the use case of being called in OnDrmInfoListener.
+ if (!(mCurrentState & (MEDIA_PLAYER2_PREPARING | MEDIA_PLAYER2_PREPARED))) {
+ ALOGE("prepareDrm is called in the wrong state (%d).", mCurrentState);
+ return INVALID_OPERATION;
+ }
+
+ if (drmSessionId.isEmpty()) {
+ ALOGE("prepareDrm: Unexpected. Can't proceed with crypto. Empty drmSessionId.");
+ return INVALID_OPERATION;
+ }
+
+ // Passing down to mediaserver mainly for creating the crypto
+ status_t status = mPlayer->prepareDrm(uuid, drmSessionId);
+ ALOGE_IF(status != OK, "prepareDrm: Failed at mediaserver with ret: %d", status);
+
+ // TODO change to ALOGV
+ ALOGD("prepareDrm: mediaserver::prepareDrm ret=%d", status);
+
+ return status;
+}
+
+status_t MediaPlayer2::releaseDrm() {
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == NULL) {
+ return NO_INIT;
+ }
+
+ // Not allowing releaseDrm in an active/resumable state
+ if (mCurrentState & (MEDIA_PLAYER2_STARTED |
+ MEDIA_PLAYER2_PAUSED |
+ MEDIA_PLAYER2_PLAYBACK_COMPLETE |
+ MEDIA_PLAYER2_STATE_ERROR)) {
+ ALOGE("releaseDrm Unexpected state %d. Can only be called in stopped/idle.", mCurrentState);
+ return INVALID_OPERATION;
+ }
+
+ status_t status = mPlayer->releaseDrm();
+ // TODO change to ALOGV
+ ALOGD("releaseDrm: mediaserver::releaseDrm ret: %d", status);
+ if (status != OK) {
+ ALOGE("releaseDrm: Failed at mediaserver with ret: %d", status);
+ // Overriding to OK so the client proceed with its own cleanup
+ // Client can't do more cleanup. mediaserver release its crypto at end of session anyway.
+ status = OK;
+ }
+
+ return status;
+}
+
+status_t MediaPlayer2::setOutputDevice(audio_port_handle_t deviceId) {
+ Mutex::Autolock _l(mLock);
+ if (mAudioOutput == NULL) {
+ ALOGV("setOutputDevice: audio sink not init");
+ return NO_INIT;
+ }
+ return mAudioOutput->setOutputDevice(deviceId);
+}
+
+audio_port_handle_t MediaPlayer2::getRoutedDeviceId() {
+ Mutex::Autolock _l(mLock);
+ if (mAudioOutput == NULL) {
+ ALOGV("getRoutedDeviceId: audio sink not init");
+ return AUDIO_PORT_HANDLE_NONE;
+ }
+ audio_port_handle_t deviceId;
+ status_t status = mAudioOutput->getRoutedDeviceId(&deviceId);
+ if (status != NO_ERROR) {
+ return AUDIO_PORT_HANDLE_NONE;
+ }
+ return deviceId;
+}
+
+status_t MediaPlayer2::enableAudioDeviceCallback(bool enabled) {
+ Mutex::Autolock _l(mLock);
+ if (mAudioOutput == NULL) {
+ ALOGV("addAudioDeviceCallback: player not init");
+ return NO_INIT;
+ }
+ return mAudioOutput->enableAudioDeviceCallback(enabled);
+}
+
+status_t MediaPlayer2::dump(int fd, const Vector<String16>& args) {
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+ result.append(" MediaPlayer2\n");
+ snprintf(buffer, 255, " pid(%d), looping(%s)\n", mPid, mLoop?"true": "false");
+ result.append(buffer);
+
+ sp<MediaPlayer2Interface> player;
+ sp<MediaPlayer2AudioOutput> audioOutput;
+ bool locked = false;
+ for (int i = 0; i < kDumpLockRetries; ++i) {
+ if (mLock.tryLock() == NO_ERROR) {
+ locked = true;
+ break;
+ }
+ usleep(kDumpLockSleepUs);
+ }
+
+ if (locked) {
+ player = mPlayer;
+ audioOutput = mAudioOutput;
+ mLock.unlock();
+ } else {
+ result.append(" lock is taken, no dump from player and audio output\n");
+ }
+ write(fd, result.string(), result.size());
+
+ if (player != NULL) {
+ player->dump(fd, args);
+ }
+ if (audioOutput != 0) {
+ audioOutput->dump(fd, args);
+ }
+ write(fd, "\n", 1);
+ return NO_ERROR;
+}
+
+} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/Android.bp b/media/libmediaplayer2/nuplayer2/Android.bp
new file mode 100644
index 0000000..1634f35
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/Android.bp
@@ -0,0 +1,68 @@
+cc_library_static {
+
+ srcs: [
+ "JWakeLock.cpp",
+ "GenericSource2.cpp",
+ "HTTPLiveSource2.cpp",
+ "NuPlayer2.cpp",
+ "NuPlayer2CCDecoder.cpp",
+ "NuPlayer2Decoder.cpp",
+ "NuPlayer2DecoderBase.cpp",
+ "NuPlayer2DecoderPassThrough.cpp",
+ "NuPlayer2Driver.cpp",
+ "NuPlayer2Drm.cpp",
+ "NuPlayer2Renderer.cpp",
+ "RTSPSource2.cpp",
+ ],
+
+ header_libs: [
+ "libmediaplayer2_headers",
+ "media_plugin_headers",
+ ],
+
+ include_dirs: [
+ "frameworks/av/media/libstagefright",
+ "frameworks/av/media/libstagefright/httplive",
+ "frameworks/av/media/libstagefright/include",
+ "frameworks/av/media/libstagefright/mpeg2ts",
+ "frameworks/av/media/libstagefright/rtsp",
+ "frameworks/av/media/libstagefright/timedtext",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+ product_variables: {
+ debuggable: {
+ cflags: [
+ "-DENABLE_STAGEFRIGHT_EXPERIMENTS",
+ ],
+ }
+ },
+
+ shared_libs: [
+ "libbinder",
+ "libui",
+ "libgui",
+ "libmedia",
+ "libmediadrm",
+ "libmediandk",
+ "libpowermanager",
+ ],
+
+ static_libs: [
+ "libmedia_helper",
+ ],
+
+ name: "libstagefright_nuplayer2",
+
+ sanitize: {
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
+ },
+
+}
diff --git a/media/libmediaplayer2/nuplayer2/GenericSource2.cpp b/media/libmediaplayer2/nuplayer2/GenericSource2.cpp
new file mode 100644
index 0000000..196b103
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/GenericSource2.cpp
@@ -0,0 +1,1615 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "GenericSource2"
+
+#include "GenericSource2.h"
+#include "NuPlayer2Drm.h"
+
+#include "AnotherPacketSource.h"
+#include <binder/IServiceManager.h>
+#include <cutils/properties.h>
+#include <media/DataSource.h>
+#include <media/MediaBufferHolder.h>
+#include <media/IMediaExtractorService.h>
+#include <media/IMediaSource.h>
+#include <media/MediaHTTPService.h>
+#include <media/MediaExtractor.h>
+#include <media/MediaSource.h>
+#include <media/NdkWrapper.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/DataSourceFactory.h>
+#include <media/stagefright/InterfaceUtils.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaClock.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaExtractorFactory.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/NdkUtils.h>
+#include <media/stagefright/Utils.h>
+#include "../../libstagefright/include/NuCachedSource2.h"
+#include "../../libstagefright/include/HTTPBase.h"
+
+namespace android {
+
+static const int kInitialMarkMs = 5000; // 5secs
+
+//static const int kPausePlaybackMarkMs = 2000; // 2secs
+static const int kResumePlaybackMarkMs = 15000; // 15secs
+
+NuPlayer2::GenericSource2::GenericSource2(
+ const sp<AMessage> ¬ify,
+ uid_t uid,
+ const sp<MediaClock> &mediaClock)
+ : Source(notify),
+ mAudioTimeUs(0),
+ mAudioLastDequeueTimeUs(0),
+ mVideoTimeUs(0),
+ mVideoLastDequeueTimeUs(0),
+ mPrevBufferPercentage(-1),
+ mPollBufferingGeneration(0),
+ mSentPauseOnBuffering(false),
+ mAudioDataGeneration(0),
+ mVideoDataGeneration(0),
+ mFetchSubtitleDataGeneration(0),
+ mFetchTimedTextDataGeneration(0),
+ mDurationUs(-1ll),
+ mAudioIsVorbis(false),
+ mIsSecure(false),
+ mIsStreaming(false),
+ mUID(uid),
+ mMediaClock(mediaClock),
+ mFd(-1),
+ mBitrate(-1ll),
+ mPendingReadBufferTypes(0) {
+ ALOGV("GenericSource2");
+ CHECK(mediaClock != NULL);
+
+ mBufferingSettings.mInitialMarkMs = kInitialMarkMs;
+ mBufferingSettings.mResumePlaybackMarkMs = kResumePlaybackMarkMs;
+ resetDataSource();
+}
+
+void NuPlayer2::GenericSource2::resetDataSource() {
+ ALOGV("resetDataSource");
+
+ mHTTPService.clear();
+ mHttpSource.clear();
+ mDisconnected = false;
+ mUri.clear();
+ mUriHeaders.clear();
+ if (mFd >= 0) {
+ close(mFd);
+ mFd = -1;
+ }
+ mOffset = 0;
+ mLength = 0;
+ mStarted = false;
+ mPreparing = false;
+
+ mIsDrmProtected = false;
+ mIsDrmReleased = false;
+ mIsSecure = false;
+ mMimes.clear();
+}
+
+status_t NuPlayer2::GenericSource2::setDataSource(
+ const sp<MediaHTTPService> &httpService,
+ const char *url,
+ const KeyedVector<String8, String8> *headers) {
+ Mutex::Autolock _l(mLock);
+ ALOGV("setDataSource url: %s", url);
+
+ resetDataSource();
+
+ mHTTPService = httpService;
+ mUri = url;
+
+ if (headers) {
+ mUriHeaders = *headers;
+ }
+
+ // delay data source creation to prepareAsync() to avoid blocking
+ // the calling thread in setDataSource for any significant time.
+ return OK;
+}
+
+status_t NuPlayer2::GenericSource2::setDataSource(
+ int fd, int64_t offset, int64_t length) {
+ Mutex::Autolock _l(mLock);
+ ALOGV("setDataSource %d/%lld/%lld", fd, (long long)offset, (long long)length);
+
+ resetDataSource();
+
+ mFd = dup(fd);
+ mOffset = offset;
+ mLength = length;
+
+ // delay data source creation to prepareAsync() to avoid blocking
+ // the calling thread in setDataSource for any significant time.
+ return OK;
+}
+
+status_t NuPlayer2::GenericSource2::setDataSource(const sp<DataSource>& source) {
+ Mutex::Autolock _l(mLock);
+ ALOGV("setDataSource (source: %p)", source.get());
+
+ resetDataSource();
+ mDataSource = source;
+ return OK;
+}
+
+sp<MetaData> NuPlayer2::GenericSource2::getFileFormatMeta() const {
+ Mutex::Autolock _l(mLock);
+ return mFileMeta;
+}
+
+status_t NuPlayer2::GenericSource2::initFromDataSource() {
+ mExtractor = new AMediaExtractorWrapper(AMediaExtractor_new());
+ CHECK(mDataSource != NULL || mFd != -1);
+ sp<DataSource> dataSource = mDataSource;
+ const int fd = mFd;
+ const int64_t offset = mOffset;
+ const int64_t length = mLength;
+
+ mLock.unlock();
+ // This might take long time if data source is not reliable.
+ status_t err;
+ if (dataSource != nullptr) {
+ mDataSourceWrapper = new AMediaDataSourceWrapper(dataSource);
+ err = mExtractor->setDataSource(mDataSourceWrapper->getAMediaDataSource());
+ } else {
+ err = mExtractor->setDataSource(fd, offset, length);
+ }
+
+ if (err != OK) {
+ ALOGE("initFromDataSource, failed to create data source!");
+ mLock.lock();
+ return UNKNOWN_ERROR;
+ }
+
+ size_t numtracks = mExtractor->getTrackCount();
+ if (numtracks == 0) {
+ ALOGE("initFromDataSource, source has no track!");
+ mLock.lock();
+ return UNKNOWN_ERROR;
+ }
+
+ mLock.lock();
+ mFd = -1;
+ mDataSource = dataSource;
+ mFileMeta = convertMediaFormatWrapperToMetaData(mExtractor->getFormat());
+ if (mFileMeta != NULL) {
+ int64_t duration;
+ if (mFileMeta->findInt64(kKeyDuration, &duration)) {
+ mDurationUs = duration;
+ }
+ }
+
+ int32_t totalBitrate = 0;
+
+ mMimes.clear();
+
+ for (size_t i = 0; i < numtracks; ++i) {
+
+ sp<AMediaFormatWrapper> trackFormat = mExtractor->getTrackFormat(i);
+ if (trackFormat == NULL) {
+ ALOGE("no metadata for track %zu", i);
+ return UNKNOWN_ERROR;
+ }
+
+ sp<AMediaExtractorWrapper> trackExtractor = new AMediaExtractorWrapper(AMediaExtractor_new());
+ if (mDataSourceWrapper != nullptr) {
+ err = trackExtractor->setDataSource(mDataSourceWrapper->getAMediaDataSource());
+ } else {
+ err = trackExtractor->setDataSource(fd, offset, length);
+ }
+
+ const char *mime;
+ sp<MetaData> meta = convertMediaFormatWrapperToMetaData(trackFormat);
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+ ALOGV("initFromDataSource track[%zu]: %s", i, mime);
+
+ // Do the string compare immediately with "mime",
+ // we can't assume "mime" would stay valid after another
+ // extractor operation, some extractors might modify meta
+ // during getTrack() and make it invalid.
+ if (!strncasecmp(mime, "audio/", 6)) {
+ if (mAudioTrack.mExtractor == NULL) {
+ mAudioTrack.mIndex = i;
+ mAudioTrack.mExtractor = trackExtractor;
+ mAudioTrack.mExtractor->selectTrack(i);
+ mAudioTrack.mPackets = new AnotherPacketSource(meta);
+
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) {
+ mAudioIsVorbis = true;
+ } else {
+ mAudioIsVorbis = false;
+ }
+
+ mMimes.add(String8(mime));
+ }
+ } else if (!strncasecmp(mime, "video/", 6)) {
+ if (mVideoTrack.mExtractor == NULL) {
+ mVideoTrack.mIndex = i;
+ mVideoTrack.mExtractor = trackExtractor;
+ mVideoTrack.mExtractor->selectTrack(i);
+ mVideoTrack.mPackets = new AnotherPacketSource(meta);
+
+ // video always at the beginning
+ mMimes.insertAt(String8(mime), 0);
+ }
+ }
+
+ mExtractors.push(trackExtractor);
+ int64_t durationUs;
+ if (meta->findInt64(kKeyDuration, &durationUs)) {
+ if (durationUs > mDurationUs) {
+ mDurationUs = durationUs;
+ }
+ }
+
+ int32_t bitrate;
+ if (totalBitrate >= 0 && meta->findInt32(kKeyBitRate, &bitrate)) {
+ totalBitrate += bitrate;
+ } else {
+ totalBitrate = -1;
+ }
+ }
+
+ ALOGV("initFromDataSource mExtractors.size(): %zu mIsSecure: %d mime[0]: %s", mExtractors.size(),
+ mIsSecure, (mMimes.isEmpty() ? "NONE" : mMimes[0].string()));
+
+ if (mExtractors.size() == 0) {
+ ALOGE("b/23705695");
+ return UNKNOWN_ERROR;
+ }
+
+ // Modular DRM: The return value doesn't affect source initialization.
+ (void)checkDrmInfo();
+
+ mBitrate = totalBitrate;
+
+ return OK;
+}
+
+status_t NuPlayer2::GenericSource2::getBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) {
+ {
+ Mutex::Autolock _l(mLock);
+ *buffering = mBufferingSettings;
+ }
+
+ ALOGV("getBufferingSettings{%s}", buffering->toString().string());
+ return OK;
+}
+
+status_t NuPlayer2::GenericSource2::setBufferingSettings(const BufferingSettings& buffering) {
+ ALOGV("setBufferingSettings{%s}", buffering.toString().string());
+
+ Mutex::Autolock _l(mLock);
+ mBufferingSettings = buffering;
+ return OK;
+}
+
+int64_t NuPlayer2::GenericSource2::getLastReadPosition() {
+ if (mAudioTrack.mExtractor != NULL) {
+ return mAudioTimeUs;
+ } else if (mVideoTrack.mExtractor != NULL) {
+ return mVideoTimeUs;
+ } else {
+ return 0;
+ }
+}
+
+bool NuPlayer2::GenericSource2::isStreaming() const {
+ Mutex::Autolock _l(mLock);
+ return mIsStreaming;
+}
+
+NuPlayer2::GenericSource2::~GenericSource2() {
+ ALOGV("~GenericSource2");
+ if (mLooper != NULL) {
+ mLooper->unregisterHandler(id());
+ mLooper->stop();
+ }
+ if (mDataSource != NULL) {
+ mDataSource->close();
+ }
+ resetDataSource();
+}
+
+void NuPlayer2::GenericSource2::prepareAsync() {
+ Mutex::Autolock _l(mLock);
+ ALOGV("prepareAsync: (looper: %d)", (mLooper != NULL));
+
+ if (mLooper == NULL) {
+ mLooper = new ALooper;
+ mLooper->setName("generic");
+ mLooper->start(false, /* runOnCallingThread */
+ true, /* canCallJava */
+ PRIORITY_DEFAULT);
+
+ mLooper->registerHandler(this);
+ }
+
+ sp<AMessage> msg = new AMessage(kWhatPrepareAsync, this);
+ msg->post();
+}
+
+void NuPlayer2::GenericSource2::onPrepareAsync() {
+ ALOGV("onPrepareAsync: mDataSource: %d", (mDataSource != NULL));
+
+ // delayed data source creation
+ if (mDataSource == NULL) {
+ // set to false first, if the extractor
+ // comes back as secure, set it to true then.
+ mIsSecure = false;
+
+ if (!mUri.empty()) {
+ const char* uri = mUri.c_str();
+ String8 contentType;
+
+ if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8)) {
+ mHttpSource = DataSourceFactory::CreateMediaHTTP(mHTTPService);
+ if (mHttpSource == NULL) {
+ ALOGE("Failed to create http source!");
+ notifyPreparedAndCleanup(UNKNOWN_ERROR);
+ return;
+ }
+ }
+
+ mLock.unlock();
+ // This might take long time if connection has some issue.
+ sp<DataSource> dataSource = DataSourceFactory::CreateFromURI(
+ mHTTPService, uri, &mUriHeaders, &contentType,
+ static_cast<HTTPBase *>(mHttpSource.get()));
+ mLock.lock();
+ if (!mDisconnected) {
+ mDataSource = dataSource;
+ }
+ }
+
+ if (mFd == -1 && mDataSource == NULL) {
+ ALOGE("Failed to create data source!");
+ notifyPreparedAndCleanup(UNKNOWN_ERROR);
+ return;
+ }
+ }
+
+ if (mDataSource != nullptr && mDataSource->flags() & DataSource::kIsCachingDataSource) {
+ mCachedSource = static_cast<NuCachedSource2 *>(mDataSource.get());
+ }
+
+ // For cached streaming cases, we need to wait for enough
+ // buffering before reporting prepared.
+ mIsStreaming = (mCachedSource != NULL);
+
+ // init extractor from data source
+ status_t err = initFromDataSource();
+
+ if (err != OK) {
+ ALOGE("Failed to init from data source!");
+ notifyPreparedAndCleanup(err);
+ return;
+ }
+
+ if (mVideoTrack.mExtractor != NULL) {
+ sp<MetaData> meta = getFormatMeta_l(false /* audio */);
+ sp<AMessage> msg = new AMessage;
+ err = convertMetaDataToMessage(meta, &msg);
+ if(err != OK) {
+ notifyPreparedAndCleanup(err);
+ return;
+ }
+ notifyVideoSizeChanged(msg);
+ }
+
+ notifyFlagsChanged(
+ // FLAG_SECURE will be known if/when prepareDrm is called by the app
+ // FLAG_PROTECTED will be known if/when prepareDrm is called by the app
+ FLAG_CAN_PAUSE |
+ FLAG_CAN_SEEK_BACKWARD |
+ FLAG_CAN_SEEK_FORWARD |
+ FLAG_CAN_SEEK);
+
+ finishPrepareAsync();
+
+ ALOGV("onPrepareAsync: Done");
+}
+
+void NuPlayer2::GenericSource2::finishPrepareAsync() {
+ ALOGV("finishPrepareAsync");
+
+ if (mIsStreaming) {
+ mCachedSource->resumeFetchingIfNecessary();
+ mPreparing = true;
+ schedulePollBuffering();
+ } else {
+ notifyPrepared();
+ }
+
+ if (mAudioTrack.mExtractor != NULL) {
+ postReadBuffer(MEDIA_TRACK_TYPE_AUDIO);
+ }
+
+ if (mVideoTrack.mExtractor != NULL) {
+ postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
+ }
+}
+
+void NuPlayer2::GenericSource2::notifyPreparedAndCleanup(status_t err) {
+ if (err != OK) {
+ mDataSource.clear();
+ mCachedSource.clear();
+ mHttpSource.clear();
+
+ mBitrate = -1;
+ mPrevBufferPercentage = -1;
+ ++mPollBufferingGeneration;
+ }
+ notifyPrepared(err);
+}
+
+void NuPlayer2::GenericSource2::start() {
+ Mutex::Autolock _l(mLock);
+ ALOGI("start");
+
+ if (mAudioTrack.mExtractor != NULL) {
+ postReadBuffer(MEDIA_TRACK_TYPE_AUDIO);
+ }
+
+ if (mVideoTrack.mExtractor != NULL) {
+ postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
+ }
+
+ mStarted = true;
+}
+
+void NuPlayer2::GenericSource2::stop() {
+ Mutex::Autolock _l(mLock);
+ mStarted = false;
+}
+
+void NuPlayer2::GenericSource2::pause() {
+ Mutex::Autolock _l(mLock);
+ mStarted = false;
+}
+
+void NuPlayer2::GenericSource2::resume() {
+ Mutex::Autolock _l(mLock);
+ mStarted = true;
+}
+
+void NuPlayer2::GenericSource2::disconnect() {
+ sp<DataSource> dataSource, httpSource;
+ {
+ Mutex::Autolock _l(mLock);
+ dataSource = mDataSource;
+ httpSource = mHttpSource;
+ mDisconnected = true;
+ }
+
+ if (dataSource != NULL) {
+ // disconnect data source
+ if (dataSource->flags() & DataSource::kIsCachingDataSource) {
+ static_cast<NuCachedSource2 *>(dataSource.get())->disconnect();
+ }
+ } else if (httpSource != NULL) {
+ static_cast<HTTPBase *>(httpSource.get())->disconnect();
+ }
+
+ mDataSourceWrapper = NULL;
+
+}
+
+status_t NuPlayer2::GenericSource2::feedMoreTSData() {
+ return OK;
+}
+
+void NuPlayer2::GenericSource2::sendCacheStats() {
+ int32_t kbps = 0;
+ status_t err = UNKNOWN_ERROR;
+
+ if (mCachedSource != NULL) {
+ err = mCachedSource->getEstimatedBandwidthKbps(&kbps);
+ }
+
+ if (err == OK) {
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatCacheStats);
+ notify->setInt32("bandwidth", kbps);
+ notify->post();
+ }
+}
+
+void NuPlayer2::GenericSource2::onMessageReceived(const sp<AMessage> &msg) {
+ Mutex::Autolock _l(mLock);
+ switch (msg->what()) {
+ case kWhatPrepareAsync:
+ {
+ onPrepareAsync();
+ break;
+ }
+ case kWhatFetchSubtitleData:
+ {
+ fetchTextData(kWhatSendSubtitleData, MEDIA_TRACK_TYPE_SUBTITLE,
+ mFetchSubtitleDataGeneration, mSubtitleTrack.mPackets, msg);
+ break;
+ }
+
+ case kWhatFetchTimedTextData:
+ {
+ fetchTextData(kWhatSendTimedTextData, MEDIA_TRACK_TYPE_TIMEDTEXT,
+ mFetchTimedTextDataGeneration, mTimedTextTrack.mPackets, msg);
+ break;
+ }
+
+ case kWhatSendSubtitleData:
+ {
+ sendTextData(kWhatSubtitleData, MEDIA_TRACK_TYPE_SUBTITLE,
+ mFetchSubtitleDataGeneration, mSubtitleTrack.mPackets, msg);
+ break;
+ }
+
+ case kWhatSendGlobalTimedTextData:
+ {
+ sendGlobalTextData(kWhatTimedTextData, mFetchTimedTextDataGeneration, msg);
+ break;
+ }
+ case kWhatSendTimedTextData:
+ {
+ sendTextData(kWhatTimedTextData, MEDIA_TRACK_TYPE_TIMEDTEXT,
+ mFetchTimedTextDataGeneration, mTimedTextTrack.mPackets, msg);
+ break;
+ }
+
+ case kWhatChangeAVSource:
+ {
+ int32_t trackIndex;
+ CHECK(msg->findInt32("trackIndex", &trackIndex));
+ const sp<AMediaExtractorWrapper> extractor = mExtractors.itemAt(trackIndex);
+
+ Track* track;
+ AString mime;
+ media_track_type trackType, counterpartType;
+ sp<AMediaFormatWrapper> format = extractor->getTrackFormat(trackIndex);
+ format->getString(AMEDIAFORMAT_KEY_MIME, &mime);
+ if (!strncasecmp(mime.c_str(), "audio/", 6)) {
+ track = &mAudioTrack;
+ trackType = MEDIA_TRACK_TYPE_AUDIO;
+ counterpartType = MEDIA_TRACK_TYPE_VIDEO;;
+ } else {
+ CHECK(!strncasecmp(mime.c_str(), "video/", 6));
+ track = &mVideoTrack;
+ trackType = MEDIA_TRACK_TYPE_VIDEO;
+ counterpartType = MEDIA_TRACK_TYPE_AUDIO;;
+ }
+
+
+ track->mExtractor = extractor;
+ track->mExtractor->selectSingleTrack(trackIndex);
+ track->mIndex = trackIndex;
+ ++mAudioDataGeneration;
+ ++mVideoDataGeneration;
+
+ int64_t timeUs, actualTimeUs;
+ const bool formatChange = true;
+ if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
+ timeUs = mAudioLastDequeueTimeUs;
+ } else {
+ timeUs = mVideoLastDequeueTimeUs;
+ }
+ readBuffer(trackType, timeUs, MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC /* mode */,
+ &actualTimeUs, formatChange);
+ readBuffer(counterpartType, -1, MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC /* mode */,
+ NULL, !formatChange);
+ ALOGV("timeUs %lld actualTimeUs %lld", (long long)timeUs, (long long)actualTimeUs);
+
+ break;
+ }
+
+ case kWhatSeek:
+ {
+ onSeek(msg);
+ break;
+ }
+
+ case kWhatReadBuffer:
+ {
+ onReadBuffer(msg);
+ break;
+ }
+
+ case kWhatPollBuffering:
+ {
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+ if (generation == mPollBufferingGeneration) {
+ onPollBuffering();
+ }
+ break;
+ }
+
+ default:
+ Source::onMessageReceived(msg);
+ break;
+ }
+}
+
+void NuPlayer2::GenericSource2::fetchTextData(
+ uint32_t sendWhat,
+ media_track_type type,
+ int32_t curGen,
+ const sp<AnotherPacketSource>& packets,
+ const sp<AMessage>& msg) {
+ int32_t msgGeneration;
+ CHECK(msg->findInt32("generation", &msgGeneration));
+ if (msgGeneration != curGen) {
+ // stale
+ return;
+ }
+
+ int32_t avail;
+ if (packets->hasBufferAvailable(&avail)) {
+ return;
+ }
+
+ int64_t timeUs;
+ CHECK(msg->findInt64("timeUs", &timeUs));
+
+ int64_t subTimeUs = 0;
+ readBuffer(type, timeUs, MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC /* mode */, &subTimeUs);
+
+ status_t eosResult;
+ if (!packets->hasBufferAvailable(&eosResult)) {
+ return;
+ }
+
+ if (msg->what() == kWhatFetchSubtitleData) {
+ subTimeUs -= 1000000ll; // send subtile data one second earlier
+ }
+ sp<AMessage> msg2 = new AMessage(sendWhat, this);
+ msg2->setInt32("generation", msgGeneration);
+ mMediaClock->addTimer(msg2, subTimeUs);
+}
+
+void NuPlayer2::GenericSource2::sendTextData(
+ uint32_t what,
+ media_track_type type,
+ int32_t curGen,
+ const sp<AnotherPacketSource>& packets,
+ const sp<AMessage>& msg) {
+ int32_t msgGeneration;
+ CHECK(msg->findInt32("generation", &msgGeneration));
+ if (msgGeneration != curGen) {
+ // stale
+ return;
+ }
+
+ int64_t subTimeUs;
+ if (packets->nextBufferTime(&subTimeUs) != OK) {
+ return;
+ }
+
+ int64_t nextSubTimeUs;
+ readBuffer(type, -1, MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC /* mode */, &nextSubTimeUs);
+
+ sp<ABuffer> buffer;
+ status_t dequeueStatus = packets->dequeueAccessUnit(&buffer);
+ if (dequeueStatus == OK) {
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", what);
+ notify->setBuffer("buffer", buffer);
+ notify->post();
+
+ if (msg->what() == kWhatSendSubtitleData) {
+ nextSubTimeUs -= 1000000ll; // send subtile data one second earlier
+ }
+ mMediaClock->addTimer(msg, nextSubTimeUs);
+ }
+}
+
+void NuPlayer2::GenericSource2::sendGlobalTextData(
+ uint32_t what,
+ int32_t curGen,
+ sp<AMessage> msg) {
+ int32_t msgGeneration;
+ CHECK(msg->findInt32("generation", &msgGeneration));
+ if (msgGeneration != curGen) {
+ // stale
+ return;
+ }
+
+ void *data = NULL;
+ size_t size = 0;
+ if (mTimedTextTrack.mExtractor->getTrackFormat(mTimedTextTrack.mIndex)->getBuffer(
+ "text", &data, &size)) {
+ mGlobalTimedText = new ABuffer(size);
+ if (mGlobalTimedText->data()) {
+ memcpy(mGlobalTimedText->data(), data, size);
+ sp<AMessage> globalMeta = mGlobalTimedText->meta();
+ globalMeta->setInt64("timeUs", 0);
+ globalMeta->setString("mime", MEDIA_MIMETYPE_TEXT_3GPP);
+ globalMeta->setInt32("global", 1);
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", what);
+ notify->setBuffer("buffer", mGlobalTimedText);
+ notify->post();
+ }
+ }
+}
+
+sp<AMessage> NuPlayer2::GenericSource2::getFormat(bool audio) {
+ Mutex::Autolock _l(mLock);
+ return getFormat_l(audio);
+}
+
+sp<MetaData> NuPlayer2::GenericSource2::getFormatMeta(bool audio) {
+ Mutex::Autolock _l(mLock);
+ return getFormatMeta_l(audio);
+}
+
+sp<AMessage> NuPlayer2::GenericSource2::getFormat_l(bool audio) {
+ sp<AMediaExtractorWrapper> extractor = audio ? mAudioTrack.mExtractor : mVideoTrack.mExtractor;
+ size_t trackIndex = audio ? mAudioTrack.mIndex : mVideoTrack.mIndex;
+
+ if (extractor == NULL) {
+ return NULL;
+ }
+
+ return extractor->getTrackFormat(trackIndex)->toAMessage();
+}
+
+sp<MetaData> NuPlayer2::GenericSource2::getFormatMeta_l(bool audio) {
+ sp<AMediaExtractorWrapper> extractor = audio ? mAudioTrack.mExtractor : mVideoTrack.mExtractor;
+ size_t trackIndex = audio ? mAudioTrack.mIndex : mVideoTrack.mIndex;
+
+ if (extractor == NULL) {
+ return NULL;
+ }
+
+ return convertMediaFormatWrapperToMetaData(extractor->getTrackFormat(trackIndex));
+}
+
+status_t NuPlayer2::GenericSource2::dequeueAccessUnit(
+ bool audio, sp<ABuffer> *accessUnit) {
+ Mutex::Autolock _l(mLock);
+ // If has gone through stop/releaseDrm sequence, we no longer send down any buffer b/c
+ // the codec's crypto object has gone away (b/37960096).
+ // Note: This will be unnecessary when stop() changes behavior and releases codec (b/35248283).
+ if (!mStarted && mIsDrmReleased) {
+ return -EWOULDBLOCK;
+ }
+
+ Track *track = audio ? &mAudioTrack : &mVideoTrack;
+
+ if (track->mExtractor == NULL) {
+ return -EWOULDBLOCK;
+ }
+
+ status_t finalResult;
+ if (!track->mPackets->hasBufferAvailable(&finalResult)) {
+ if (finalResult == OK) {
+ postReadBuffer(
+ audio ? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
+ return -EWOULDBLOCK;
+ }
+ return finalResult;
+ }
+
+ status_t result = track->mPackets->dequeueAccessUnit(accessUnit);
+
+ // start pulling in more buffers if cache is running low
+ // so that decoder has less chance of being starved
+ if (!mIsStreaming) {
+ if (track->mPackets->getAvailableBufferCount(&finalResult) < 2) {
+ postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
+ }
+ } else {
+ int64_t durationUs = track->mPackets->getBufferedDurationUs(&finalResult);
+ // TODO: maxRebufferingMarkMs could be larger than
+ // mBufferingSettings.mResumePlaybackMarkMs
+ int64_t restartBufferingMarkUs =
+ mBufferingSettings.mResumePlaybackMarkMs * 1000ll / 2;
+ if (finalResult == OK) {
+ if (durationUs < restartBufferingMarkUs) {
+ postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
+ }
+ if (track->mPackets->getAvailableBufferCount(&finalResult) < 2
+ && !mSentPauseOnBuffering && !mPreparing) {
+ mCachedSource->resumeFetchingIfNecessary();
+ sendCacheStats();
+ mSentPauseOnBuffering = true;
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatPauseOnBufferingStart);
+ notify->post();
+ }
+ }
+ }
+
+ if (result != OK) {
+ if (mSubtitleTrack.mExtractor != NULL) {
+ mSubtitleTrack.mPackets->clear();
+ mFetchSubtitleDataGeneration++;
+ }
+ if (mTimedTextTrack.mExtractor != NULL) {
+ mTimedTextTrack.mPackets->clear();
+ mFetchTimedTextDataGeneration++;
+ }
+ return result;
+ }
+
+ int64_t timeUs;
+ status_t eosResult; // ignored
+ CHECK((*accessUnit)->meta()->findInt64("timeUs", &timeUs));
+ if (audio) {
+ mAudioLastDequeueTimeUs = timeUs;
+ } else {
+ mVideoLastDequeueTimeUs = timeUs;
+ }
+
+ if (mSubtitleTrack.mExtractor != NULL
+ && !mSubtitleTrack.mPackets->hasBufferAvailable(&eosResult)) {
+ sp<AMessage> msg = new AMessage(kWhatFetchSubtitleData, this);
+ msg->setInt64("timeUs", timeUs);
+ msg->setInt32("generation", mFetchSubtitleDataGeneration);
+ msg->post();
+ }
+
+ if (mTimedTextTrack.mExtractor != NULL
+ && !mTimedTextTrack.mPackets->hasBufferAvailable(&eosResult)) {
+ sp<AMessage> msg = new AMessage(kWhatFetchTimedTextData, this);
+ msg->setInt64("timeUs", timeUs);
+ msg->setInt32("generation", mFetchTimedTextDataGeneration);
+ msg->post();
+ }
+
+ return result;
+}
+
+status_t NuPlayer2::GenericSource2::getDuration(int64_t *durationUs) {
+ Mutex::Autolock _l(mLock);
+ *durationUs = mDurationUs;
+ return OK;
+}
+
+size_t NuPlayer2::GenericSource2::getTrackCount() const {
+ Mutex::Autolock _l(mLock);
+ return mExtractors.size();
+}
+
+sp<AMessage> NuPlayer2::GenericSource2::getTrackInfo(size_t trackIndex) const {
+ Mutex::Autolock _l(mLock);
+ size_t trackCount = mExtractors.size();
+ if (trackIndex >= trackCount) {
+ return NULL;
+ }
+
+ sp<AMessage> format = mExtractors.itemAt(trackIndex)->getTrackFormat(trackIndex)->toAMessage();
+ if (format == NULL) {
+ ALOGE("no metadata for track %zu", trackIndex);
+ return NULL;
+ }
+
+ AString mime;
+ CHECK(format->findString(AMEDIAFORMAT_KEY_MIME, &mime));
+
+ int32_t trackType;
+ if (!strncasecmp(mime.c_str(), "video/", 6)) {
+ trackType = MEDIA_TRACK_TYPE_VIDEO;
+ } else if (!strncasecmp(mime.c_str(), "audio/", 6)) {
+ trackType = MEDIA_TRACK_TYPE_AUDIO;
+ } else if (!strcasecmp(mime.c_str(), MEDIA_MIMETYPE_TEXT_3GPP)) {
+ trackType = MEDIA_TRACK_TYPE_TIMEDTEXT;
+ } else {
+ trackType = MEDIA_TRACK_TYPE_UNKNOWN;
+ }
+ format->setInt32("type", trackType);
+
+ AString lang;
+ if (!format->findString("language", &lang)) {
+ format->setString("language", "und");
+ }
+
+ if (trackType == MEDIA_TRACK_TYPE_SUBTITLE) {
+ int32_t isAutoselect = 1, isDefault = 0, isForced = 0;
+ format->findInt32(AMEDIAFORMAT_KEY_IS_AUTOSELECT, &isAutoselect);
+ format->findInt32(AMEDIAFORMAT_KEY_IS_DEFAULT, &isDefault);
+ format->findInt32(AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE, &isForced);
+
+ format->setInt32("auto", !!isAutoselect);
+ format->setInt32("default", !!isDefault);
+ format->setInt32("forced", !!isForced);
+ }
+
+ return format;
+}
+
+ssize_t NuPlayer2::GenericSource2::getSelectedTrack(media_track_type type) const {
+ Mutex::Autolock _l(mLock);
+ const Track *track = NULL;
+ switch (type) {
+ case MEDIA_TRACK_TYPE_VIDEO:
+ track = &mVideoTrack;
+ break;
+ case MEDIA_TRACK_TYPE_AUDIO:
+ track = &mAudioTrack;
+ break;
+ case MEDIA_TRACK_TYPE_TIMEDTEXT:
+ track = &mTimedTextTrack;
+ break;
+ case MEDIA_TRACK_TYPE_SUBTITLE:
+ track = &mSubtitleTrack;
+ break;
+ default:
+ break;
+ }
+
+ if (track != NULL && track->mExtractor != NULL) {
+ return track->mIndex;
+ }
+
+ return -1;
+}
+
+status_t NuPlayer2::GenericSource2::selectTrack(size_t trackIndex, bool select, int64_t timeUs) {
+ Mutex::Autolock _l(mLock);
+ ALOGV("%s track: %zu", select ? "select" : "deselect", trackIndex);
+
+ if (trackIndex >= mExtractors.size()) {
+ return BAD_INDEX;
+ }
+
+ if (!select) {
+ Track* track = NULL;
+ if (mSubtitleTrack.mExtractor != NULL && trackIndex == mSubtitleTrack.mIndex) {
+ track = &mSubtitleTrack;
+ mFetchSubtitleDataGeneration++;
+ } else if (mTimedTextTrack.mExtractor != NULL && trackIndex == mTimedTextTrack.mIndex) {
+ track = &mTimedTextTrack;
+ mFetchTimedTextDataGeneration++;
+ }
+ if (track == NULL) {
+ return INVALID_OPERATION;
+ }
+ track->mExtractor = NULL;
+ track->mPackets->clear();
+ return OK;
+ }
+
+ const sp<AMediaExtractorWrapper> extractor = mExtractors.itemAt(trackIndex);
+ sp<MetaData> meta = convertMediaFormatWrapperToMetaData(extractor->getTrackFormat(trackIndex));
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+ if (!strncasecmp(mime, "text/", 5)) {
+ bool isSubtitle = strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP);
+ Track *track = isSubtitle ? &mSubtitleTrack : &mTimedTextTrack;
+ if (track->mExtractor != NULL && track->mIndex == trackIndex) {
+ return OK;
+ }
+ track->mIndex = trackIndex;
+ track->mExtractor = mExtractors.itemAt(trackIndex);
+ track->mExtractor->selectSingleTrack(trackIndex);
+ if (track->mPackets == NULL) {
+ track->mPackets = new AnotherPacketSource(meta);
+ } else {
+ track->mPackets->clear();
+ track->mPackets->setFormat(meta);
+
+ }
+
+ if (isSubtitle) {
+ mFetchSubtitleDataGeneration++;
+ } else {
+ mFetchTimedTextDataGeneration++;
+ }
+
+ status_t eosResult; // ignored
+ if (mSubtitleTrack.mExtractor != NULL
+ && !mSubtitleTrack.mPackets->hasBufferAvailable(&eosResult)) {
+ sp<AMessage> msg = new AMessage(kWhatFetchSubtitleData, this);
+ msg->setInt64("timeUs", timeUs);
+ msg->setInt32("generation", mFetchSubtitleDataGeneration);
+ msg->post();
+ }
+
+ sp<AMessage> msg2 = new AMessage(kWhatSendGlobalTimedTextData, this);
+ msg2->setInt32("generation", mFetchTimedTextDataGeneration);
+ msg2->post();
+
+ if (mTimedTextTrack.mExtractor != NULL
+ && !mTimedTextTrack.mPackets->hasBufferAvailable(&eosResult)) {
+ sp<AMessage> msg = new AMessage(kWhatFetchTimedTextData, this);
+ msg->setInt64("timeUs", timeUs);
+ msg->setInt32("generation", mFetchTimedTextDataGeneration);
+ msg->post();
+ }
+
+ return OK;
+ } else if (!strncasecmp(mime, "audio/", 6) || !strncasecmp(mime, "video/", 6)) {
+ bool audio = !strncasecmp(mime, "audio/", 6);
+ Track *track = audio ? &mAudioTrack : &mVideoTrack;
+ if (track->mExtractor != NULL && track->mIndex == trackIndex) {
+ return OK;
+ }
+
+ sp<AMessage> msg = new AMessage(kWhatChangeAVSource, this);
+ msg->setInt32("trackIndex", trackIndex);
+ msg->post();
+ return OK;
+ }
+
+ return INVALID_OPERATION;
+}
+
+status_t NuPlayer2::GenericSource2::seekTo(int64_t seekTimeUs, MediaPlayer2SeekMode mode) {
+ ALOGV("seekTo: %lld, %d", (long long)seekTimeUs, mode);
+ sp<AMessage> msg = new AMessage(kWhatSeek, this);
+ msg->setInt64("seekTimeUs", seekTimeUs);
+ msg->setInt32("mode", mode);
+
+ // Need to call readBuffer on |mLooper| to ensure the calls to
+ // IMediaSource::read* are serialized. Note that IMediaSource::read*
+ // is called without |mLock| acquired and MediaSource is not thread safe.
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ }
+
+ return err;
+}
+
+void NuPlayer2::GenericSource2::onSeek(const sp<AMessage>& msg) {
+ int64_t seekTimeUs;
+ int32_t mode;
+ CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
+ CHECK(msg->findInt32("mode", &mode));
+
+ sp<AMessage> response = new AMessage;
+ status_t err = doSeek(seekTimeUs, (MediaPlayer2SeekMode)mode);
+ response->setInt32("err", err);
+
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+}
+
+status_t NuPlayer2::GenericSource2::doSeek(int64_t seekTimeUs, MediaPlayer2SeekMode mode) {
+ if (mVideoTrack.mExtractor != NULL) {
+ ++mVideoDataGeneration;
+
+ int64_t actualTimeUs;
+ readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, mode, &actualTimeUs);
+
+ if (mode != MediaPlayer2SeekMode::SEEK_CLOSEST) {
+ seekTimeUs = actualTimeUs;
+ }
+ mVideoLastDequeueTimeUs = actualTimeUs;
+ }
+
+ if (mAudioTrack.mExtractor != NULL) {
+ ++mAudioDataGeneration;
+ readBuffer(MEDIA_TRACK_TYPE_AUDIO, seekTimeUs, MediaPlayer2SeekMode::SEEK_CLOSEST);
+ mAudioLastDequeueTimeUs = seekTimeUs;
+ }
+
+ if (mSubtitleTrack.mExtractor != NULL) {
+ mSubtitleTrack.mPackets->clear();
+ mFetchSubtitleDataGeneration++;
+ }
+
+ if (mTimedTextTrack.mExtractor != NULL) {
+ mTimedTextTrack.mPackets->clear();
+ mFetchTimedTextDataGeneration++;
+ }
+
+ ++mPollBufferingGeneration;
+ schedulePollBuffering();
+ return OK;
+}
+
+sp<ABuffer> NuPlayer2::GenericSource2::mediaBufferToABuffer(
+ MediaBufferBase* mb,
+ media_track_type trackType) {
+ bool audio = trackType == MEDIA_TRACK_TYPE_AUDIO;
+ size_t outLength = mb->range_length();
+
+ if (audio && mAudioIsVorbis) {
+ outLength += sizeof(int32_t);
+ }
+
+ sp<ABuffer> ab;
+
+ if (mIsDrmProtected) {
+ // Modular DRM
+ // Enabled for both video/audio so 1) media buffer is reused without extra copying
+ // 2) meta data can be retrieved in onInputBufferFetched for calling queueSecureInputBuffer.
+
+ // data is already provided in the buffer
+ ab = new ABuffer(NULL, mb->range_length());
+ ab->meta()->setObject("mediaBufferHolder", new MediaBufferHolder(mb));
+
+ // Modular DRM: Required b/c of the above add_ref.
+ // If ref>0, there must be an observer, or it'll crash at release().
+ // TODO: MediaBuffer might need to be revised to ease such need.
+ mb->setObserver(this);
+ // setMediaBufferBase() interestingly doesn't increment the ref count on its own.
+ // Extra increment (since we want to keep mb alive and attached to ab beyond this function
+ // call. This is to counter the effect of mb->release() towards the end.
+ mb->add_ref();
+
+ } else {
+ ab = new ABuffer(outLength);
+ memcpy(ab->data(),
+ (const uint8_t *)mb->data() + mb->range_offset(),
+ mb->range_length());
+ }
+
+ if (audio && mAudioIsVorbis) {
+ int32_t numPageSamples;
+ if (!mb->meta_data().findInt32(kKeyValidSamples, &numPageSamples)) {
+ numPageSamples = -1;
+ }
+
+ uint8_t* abEnd = ab->data() + mb->range_length();
+ memcpy(abEnd, &numPageSamples, sizeof(numPageSamples));
+ }
+
+ sp<AMessage> meta = ab->meta();
+
+ int64_t timeUs;
+ CHECK(mb->meta_data().findInt64(kKeyTime, &timeUs));
+ meta->setInt64("timeUs", timeUs);
+
+ if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
+ int32_t layerId;
+ if (mb->meta_data().findInt32(kKeyTemporalLayerId, &layerId)) {
+ meta->setInt32("temporal-layer-id", layerId);
+ }
+ }
+
+ if (trackType == MEDIA_TRACK_TYPE_TIMEDTEXT) {
+ AString mime;
+ sp<AMediaExtractorWrapper> extractor = mTimedTextTrack.mExtractor;
+ size_t trackIndex = mTimedTextTrack.mIndex;
+ CHECK(extractor != NULL
+ && extractor->getTrackFormat(trackIndex)->getString(AMEDIAFORMAT_KEY_MIME, &mime));
+ meta->setString("mime", mime.c_str());
+ }
+
+ int64_t durationUs;
+ if (mb->meta_data().findInt64(kKeyDuration, &durationUs)) {
+ meta->setInt64("durationUs", durationUs);
+ }
+
+ if (trackType == MEDIA_TRACK_TYPE_SUBTITLE) {
+ meta->setInt32(AMEDIAFORMAT_KEY_TRACK_INDEX, mSubtitleTrack.mIndex);
+ }
+
+ uint32_t dataType; // unused
+ const void *seiData;
+ size_t seiLength;
+ if (mb->meta_data().findData(kKeySEI, &dataType, &seiData, &seiLength)) {
+ sp<ABuffer> sei = ABuffer::CreateAsCopy(seiData, seiLength);;
+ meta->setBuffer("sei", sei);
+ }
+
+ const void *mpegUserDataPointer;
+ size_t mpegUserDataLength;
+ if (mb->meta_data().findData(
+ kKeyMpegUserData, &dataType, &mpegUserDataPointer, &mpegUserDataLength)) {
+ sp<ABuffer> mpegUserData = ABuffer::CreateAsCopy(mpegUserDataPointer, mpegUserDataLength);
+ meta->setBuffer(AMEDIAFORMAT_KEY_MPEG_USER_DATA, mpegUserData);
+ }
+
+ mb->release();
+ mb = NULL;
+
+ return ab;
+}
+
+int32_t NuPlayer2::GenericSource2::getDataGeneration(media_track_type type) const {
+ int32_t generation = -1;
+ switch (type) {
+ case MEDIA_TRACK_TYPE_VIDEO:
+ generation = mVideoDataGeneration;
+ break;
+ case MEDIA_TRACK_TYPE_AUDIO:
+ generation = mAudioDataGeneration;
+ break;
+ case MEDIA_TRACK_TYPE_TIMEDTEXT:
+ generation = mFetchTimedTextDataGeneration;
+ break;
+ case MEDIA_TRACK_TYPE_SUBTITLE:
+ generation = mFetchSubtitleDataGeneration;
+ break;
+ default:
+ break;
+ }
+
+ return generation;
+}
+
+void NuPlayer2::GenericSource2::postReadBuffer(media_track_type trackType) {
+ if ((mPendingReadBufferTypes & (1 << trackType)) == 0) {
+ mPendingReadBufferTypes |= (1 << trackType);
+ sp<AMessage> msg = new AMessage(kWhatReadBuffer, this);
+ msg->setInt32("trackType", trackType);
+ msg->post();
+ }
+}
+
+void NuPlayer2::GenericSource2::onReadBuffer(const sp<AMessage>& msg) {
+ int32_t tmpType;
+ CHECK(msg->findInt32("trackType", &tmpType));
+ media_track_type trackType = (media_track_type)tmpType;
+ mPendingReadBufferTypes &= ~(1 << trackType);
+ readBuffer(trackType);
+}
+
+void NuPlayer2::GenericSource2::readBuffer(
+ media_track_type trackType, int64_t seekTimeUs, MediaPlayer2SeekMode mode,
+ int64_t *actualTimeUs, bool formatChange) {
+ Track *track;
+ size_t maxBuffers = 1;
+ switch (trackType) {
+ case MEDIA_TRACK_TYPE_VIDEO:
+ track = &mVideoTrack;
+ maxBuffers = 8; // too large of a number may influence seeks
+ break;
+ case MEDIA_TRACK_TYPE_AUDIO:
+ track = &mAudioTrack;
+ maxBuffers = 64;
+ break;
+ case MEDIA_TRACK_TYPE_SUBTITLE:
+ track = &mSubtitleTrack;
+ break;
+ case MEDIA_TRACK_TYPE_TIMEDTEXT:
+ track = &mTimedTextTrack;
+ break;
+ default:
+ TRESPASS();
+ }
+
+ if (track->mExtractor == NULL) {
+ return;
+ }
+
+ if (actualTimeUs) {
+ *actualTimeUs = seekTimeUs;
+ }
+
+
+ bool seeking = false;
+ sp<AMediaExtractorWrapper> extractor = track->mExtractor;
+ if (seekTimeUs >= 0) {
+ extractor->seekTo(seekTimeUs, mode);
+ seeking = true;
+ }
+
+ int32_t generation = getDataGeneration(trackType);
+ for (size_t numBuffers = 0; numBuffers < maxBuffers; ) {
+ Vector<sp<ABuffer> > aBuffers;
+
+ mLock.unlock();
+
+ sp<AMediaFormatWrapper> format;
+ ssize_t sampleSize = -1;
+ status_t err = extractor->getSampleFormat(format);
+ if (err == OK) {
+ sampleSize = extractor->getSampleSize();
+ }
+
+ if (err != OK || sampleSize < 0) {
+ mLock.lock();
+ track->mPackets->signalEOS(err != OK ? err : ERROR_END_OF_STREAM);
+ break;
+ }
+
+ sp<ABuffer> abuf = new ABuffer(sampleSize);
+ sampleSize = extractor->readSampleData(abuf);
+ mLock.lock();
+
+ // in case track has been changed since we don't have lock for some time.
+ if (generation != getDataGeneration(trackType)) {
+ break;
+ }
+
+ int64_t timeUs = extractor->getSampleTime();
+ if (timeUs < 0) {
+ track->mPackets->signalEOS(ERROR_MALFORMED);
+ break;
+ }
+
+ sp<AMessage> meta = abuf->meta();
+ format->writeToAMessage(meta);
+ meta->setInt64("timeUs", timeUs);
+ if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
+ mAudioTimeUs = timeUs;
+ } else if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
+ mVideoTimeUs = timeUs;
+ }
+
+ queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
+
+ if (numBuffers == 0 && actualTimeUs != nullptr) {
+ *actualTimeUs = timeUs;
+ }
+ if (seeking) {
+ if (meta != nullptr && mode == MediaPlayer2SeekMode::SEEK_CLOSEST
+ && seekTimeUs > timeUs) {
+ sp<AMessage> extra = new AMessage;
+ extra->setInt64("resume-at-mediaTimeUs", seekTimeUs);
+ meta->setMessage("extra", extra);
+ }
+ }
+
+ track->mPackets->queueAccessUnit(abuf);
+ formatChange = false;
+ seeking = false;
+ ++numBuffers;
+ extractor->advance();
+
+ }
+
+ if (mIsStreaming
+ && (trackType == MEDIA_TRACK_TYPE_VIDEO || trackType == MEDIA_TRACK_TYPE_AUDIO)) {
+ status_t finalResult;
+ int64_t durationUs = track->mPackets->getBufferedDurationUs(&finalResult);
+
+ // TODO: maxRebufferingMarkMs could be larger than
+ // mBufferingSettings.mResumePlaybackMarkMs
+ int64_t markUs = (mPreparing ? mBufferingSettings.mInitialMarkMs
+ : mBufferingSettings.mResumePlaybackMarkMs) * 1000ll;
+ if (finalResult == ERROR_END_OF_STREAM || durationUs >= markUs) {
+ if (mPreparing || mSentPauseOnBuffering) {
+ Track *counterTrack =
+ (trackType == MEDIA_TRACK_TYPE_VIDEO ? &mAudioTrack : &mVideoTrack);
+ if (counterTrack->mExtractor != NULL) {
+ durationUs = counterTrack->mPackets->getBufferedDurationUs(&finalResult);
+ }
+ if (finalResult == ERROR_END_OF_STREAM || durationUs >= markUs) {
+ if (mPreparing) {
+ notifyPrepared();
+ mPreparing = false;
+ } else {
+ sendCacheStats();
+ mSentPauseOnBuffering = false;
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatResumeOnBufferingEnd);
+ notify->post();
+ }
+ }
+ }
+ return;
+ }
+
+ postReadBuffer(trackType);
+ }
+}
+
+void NuPlayer2::GenericSource2::queueDiscontinuityIfNeeded(
+ bool seeking, bool formatChange, media_track_type trackType, Track *track) {
+ // formatChange && seeking: track whose source is changed during selection
+ // formatChange && !seeking: track whose source is not changed during selection
+ // !formatChange: normal seek
+ if ((seeking || formatChange)
+ && (trackType == MEDIA_TRACK_TYPE_AUDIO
+ || trackType == MEDIA_TRACK_TYPE_VIDEO)) {
+ ATSParser::DiscontinuityType type = (formatChange && seeking)
+ ? ATSParser::DISCONTINUITY_FORMATCHANGE
+ : ATSParser::DISCONTINUITY_NONE;
+ track->mPackets->queueDiscontinuity(type, NULL /* extra */, true /* discard */);
+ }
+}
+
+void NuPlayer2::GenericSource2::notifyBufferingUpdate(int32_t percentage) {
+ // Buffering percent could go backward as it's estimated from remaining
+ // data and last access time. This could cause the buffering position
+ // drawn on media control to jitter slightly. Remember previously reported
+ // percentage and don't allow it to go backward.
+ if (percentage < mPrevBufferPercentage) {
+ percentage = mPrevBufferPercentage;
+ } else if (percentage > 100) {
+ percentage = 100;
+ }
+
+ mPrevBufferPercentage = percentage;
+
+ ALOGV("notifyBufferingUpdate: buffering %d%%", percentage);
+
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatBufferingUpdate);
+ notify->setInt32("percentage", percentage);
+ notify->post();
+}
+
+void NuPlayer2::GenericSource2::schedulePollBuffering() {
+ sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
+ msg->setInt32("generation", mPollBufferingGeneration);
+ // Enquires buffering status every second.
+ msg->post(1000000ll);
+}
+
+void NuPlayer2::GenericSource2::onPollBuffering() {
+ status_t finalStatus = UNKNOWN_ERROR;
+ int64_t cachedDurationUs = -1ll;
+ ssize_t cachedDataRemaining = -1;
+
+ if (mCachedSource != NULL) {
+ cachedDataRemaining = mCachedSource->approxDataRemaining(&finalStatus);
+
+ if (finalStatus == OK) {
+ off64_t size;
+ int64_t bitrate = 0ll;
+ if (mDurationUs > 0 && mCachedSource->getSize(&size) == OK) {
+ // |bitrate| uses bits/second unit, while size is number of bytes.
+ bitrate = size * 8000000ll / mDurationUs;
+ } else if (mBitrate > 0) {
+ bitrate = mBitrate;
+ }
+ if (bitrate > 0) {
+ cachedDurationUs = cachedDataRemaining * 8000000ll / bitrate;
+ }
+ }
+ }
+
+ if (finalStatus != OK) {
+ ALOGV("onPollBuffering: EOS (finalStatus = %d)", finalStatus);
+
+ if (finalStatus == ERROR_END_OF_STREAM) {
+ notifyBufferingUpdate(100);
+ }
+
+ return;
+ }
+
+ if (cachedDurationUs >= 0ll) {
+ if (mDurationUs > 0ll) {
+ int64_t cachedPosUs = getLastReadPosition() + cachedDurationUs;
+ int percentage = 100.0 * cachedPosUs / mDurationUs;
+ if (percentage > 100) {
+ percentage = 100;
+ }
+
+ notifyBufferingUpdate(percentage);
+ }
+
+ ALOGV("onPollBuffering: cachedDurationUs %.1f sec", cachedDurationUs / 1000000.0f);
+ }
+
+ schedulePollBuffering();
+}
+
+// Modular DRM
+status_t NuPlayer2::GenericSource2::prepareDrm(
+ const uint8_t uuid[16],
+ const Vector<uint8_t> &drmSessionId,
+ sp<AMediaCryptoWrapper> *outCrypto) {
+ Mutex::Autolock _l(mLock);
+ ALOGV("prepareDrm");
+
+ mIsDrmProtected = false;
+ mIsDrmReleased = false;
+ mIsSecure = false;
+
+ status_t status = OK;
+ sp<AMediaCryptoWrapper> crypto =
+ new AMediaCryptoWrapper(uuid, drmSessionId.array(), drmSessionId.size());
+ if (crypto == NULL) {
+ ALOGE("prepareDrm: failed to create crypto.");
+ return UNKNOWN_ERROR;
+ }
+ ALOGV("prepareDrm: crypto created for uuid: %s",
+ DrmUUID::toHexString(uuid).string());
+
+ *outCrypto = crypto;
+ // as long a there is an active crypto
+ mIsDrmProtected = true;
+
+ if (mMimes.size() == 0) {
+ status = UNKNOWN_ERROR;
+ ALOGE("prepareDrm: Unexpected. Must have at least one track. status: %d", status);
+ return status;
+ }
+
+ // first mime in this list is either the video track, or the first audio track
+ const char *mime = mMimes[0].string();
+ mIsSecure = crypto->requiresSecureDecoderComponent(mime);
+ ALOGV("prepareDrm: requiresSecureDecoderComponent mime: %s isSecure: %d",
+ mime, mIsSecure);
+
+ // Checking the member flags while in the looper to send out the notification.
+ // The legacy mDecryptHandle!=NULL check (for FLAG_PROTECTED) is equivalent to mIsDrmProtected.
+ notifyFlagsChanged(
+ (mIsSecure ? FLAG_SECURE : 0) |
+ // Setting "protected screen" only for L1: b/38390836
+ (mIsSecure ? FLAG_PROTECTED : 0) |
+ FLAG_CAN_PAUSE |
+ FLAG_CAN_SEEK_BACKWARD |
+ FLAG_CAN_SEEK_FORWARD |
+ FLAG_CAN_SEEK);
+
+ if (status == OK) {
+ ALOGV("prepareDrm: mCrypto: %p", outCrypto->get());
+ ALOGD("prepareDrm ret: %d ", status);
+ } else {
+ ALOGE("prepareDrm err: %d", status);
+ }
+ return status;
+}
+
+status_t NuPlayer2::GenericSource2::releaseDrm() {
+ Mutex::Autolock _l(mLock);
+ ALOGV("releaseDrm");
+
+ if (mIsDrmProtected) {
+ mIsDrmProtected = false;
+ // to prevent returning any more buffer after stop/releaseDrm (b/37960096)
+ mIsDrmReleased = true;
+ ALOGV("releaseDrm: mIsDrmProtected is reset.");
+ } else {
+ ALOGE("releaseDrm: mIsDrmProtected is already false.");
+ }
+
+ return OK;
+}
+
+status_t NuPlayer2::GenericSource2::checkDrmInfo()
+{
+ // clearing the flag at prepare in case the player is reused after stop/releaseDrm with the
+ // same source without being reset (called by prepareAsync/initFromDataSource)
+ mIsDrmReleased = false;
+
+ if (mExtractor == NULL) {
+ ALOGV("checkDrmInfo: No extractor");
+ return OK; // letting the caller responds accordingly
+ }
+
+ PsshInfo *psshInfo = mExtractor->getPsshInfo();
+ if (psshInfo == NULL) {
+ ALOGV("checkDrmInfo: No PSSH");
+ return OK; // source without DRM info
+ }
+
+ sp<ABuffer> drmInfoBuffer = NuPlayer2Drm::retrieveDrmInfo(psshInfo);
+ ALOGV("checkDrmInfo: MEDIA_DRM_INFO PSSH drm info size: %d", (int)drmInfoBuffer->size());
+
+ if (drmInfoBuffer->size() == 0) {
+ ALOGE("checkDrmInfo: Unexpected parcel size: 0");
+ return UNKNOWN_ERROR;
+ }
+
+ notifyDrmInfo(drmInfoBuffer);
+
+ return OK;
+}
+
+void NuPlayer2::GenericSource2::signalBufferReturned(MediaBufferBase *buffer)
+{
+ //ALOGV("signalBufferReturned %p refCount: %d", buffer, buffer->localRefcount());
+
+ buffer->setObserver(NULL);
+ buffer->release(); // this leads to delete since that there is no observor
+}
+
+} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/GenericSource2.h b/media/libmediaplayer2/nuplayer2/GenericSource2.h
new file mode 100644
index 0000000..9bc5182
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/GenericSource2.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GENERIC_SOURCE2_H_
+
+#define GENERIC_SOURCE2_H_
+
+#include "NuPlayer2.h"
+#include "NuPlayer2Source.h"
+
+#include "ATSParser.h"
+
+#include <media/stagefright/MediaBuffer.h>
+#include <mediaplayer2/mediaplayer2.h>
+#include <media/NdkMediaDataSource.h>
+#include <media/NdkMediaExtractor.h>
+#include <media/NdkWrapper.h>
+
+namespace android {
+
+class DecryptHandle;
+struct AnotherPacketSource;
+struct ARTSPController;
+class DataSource;
+class IDataSource;
+class IMediaSource;
+struct MediaHTTPService;
+struct MediaSource;
+class MediaBuffer;
+struct MediaClock;
+struct NuCachedSource2;
+
+struct NuPlayer2::GenericSource2 : public NuPlayer2::Source,
+ public MediaBufferObserver // Modular DRM
+{
+ GenericSource2(const sp<AMessage> ¬ify, uid_t uid,
+ const sp<MediaClock> &mediaClock);
+
+ status_t setDataSource(
+ const sp<MediaHTTPService> &httpService,
+ const char *url,
+ const KeyedVector<String8, String8> *headers);
+
+ status_t setDataSource(int fd, int64_t offset, int64_t length);
+
+ status_t setDataSource(const sp<DataSource>& dataSource);
+
+ virtual status_t getBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) override;
+ virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
+ virtual void prepareAsync();
+
+ virtual void start();
+ virtual void stop();
+ virtual void pause();
+ virtual void resume();
+
+ virtual void disconnect();
+
+ virtual status_t feedMoreTSData();
+
+ virtual sp<MetaData> getFileFormatMeta() const;
+
+ virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
+
+ virtual status_t getDuration(int64_t *durationUs);
+ virtual size_t getTrackCount() const;
+ virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
+ virtual ssize_t getSelectedTrack(media_track_type type) const;
+ virtual status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
+ virtual status_t seekTo(
+ int64_t seekTimeUs,
+ MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC) override;
+
+ virtual bool isStreaming() const;
+
+ // Modular DRM
+ virtual void signalBufferReturned(MediaBufferBase *buffer);
+
+ virtual status_t prepareDrm(
+ const uint8_t uuid[16],
+ const Vector<uint8_t> &drmSessionId,
+ sp<AMediaCryptoWrapper> *outCrypto);
+
+ virtual status_t releaseDrm();
+
+
+protected:
+ virtual ~GenericSource2();
+
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
+ virtual sp<AMessage> getFormat(bool audio);
+ virtual sp<MetaData> getFormatMeta(bool audio);
+
+private:
+ enum {
+ kWhatPrepareAsync,
+ kWhatFetchSubtitleData,
+ kWhatFetchTimedTextData,
+ kWhatSendSubtitleData,
+ kWhatSendGlobalTimedTextData,
+ kWhatSendTimedTextData,
+ kWhatChangeAVSource,
+ kWhatPollBuffering,
+ kWhatSeek,
+ kWhatReadBuffer,
+ kWhatStart,
+ kWhatResume,
+ kWhatSecureDecodersInstantiated,
+ };
+
+ struct Track {
+ size_t mIndex;
+ sp<AMediaExtractorWrapper> mExtractor;
+ sp<AnotherPacketSource> mPackets;
+ };
+
+ int64_t mAudioTimeUs;
+ int64_t mAudioLastDequeueTimeUs;
+ int64_t mVideoTimeUs;
+ int64_t mVideoLastDequeueTimeUs;
+
+ BufferingSettings mBufferingSettings;
+ int32_t mPrevBufferPercentage;
+ int32_t mPollBufferingGeneration;
+ bool mSentPauseOnBuffering;
+
+ int32_t mAudioDataGeneration;
+ int32_t mVideoDataGeneration;
+ int32_t mFetchSubtitleDataGeneration;
+ int32_t mFetchTimedTextDataGeneration;
+ int64_t mDurationUs;
+ bool mAudioIsVorbis;
+ // Secure codec is required.
+ bool mIsSecure;
+ bool mIsStreaming;
+ uid_t mUID;
+ const sp<MediaClock> mMediaClock;
+ sp<MediaHTTPService> mHTTPService;
+ AString mUri;
+ KeyedVector<String8, String8> mUriHeaders;
+ int mFd;
+ int64_t mOffset;
+ int64_t mLength;
+
+ bool mDisconnected;
+ sp<DataSource> mDataSource;
+ sp<NuCachedSource2> mCachedSource;
+ sp<DataSource> mHttpSource;
+ sp<MetaData> mFileMeta;
+ sp<AMediaDataSourceWrapper> mDataSourceWrapper;
+ sp<AMediaExtractorWrapper> mExtractor;
+ Vector<sp<AMediaExtractorWrapper> > mExtractors;
+ bool mStarted;
+ bool mPreparing;
+ int64_t mBitrate;
+ uint32_t mPendingReadBufferTypes;
+ sp<ABuffer> mGlobalTimedText;
+
+ Track mVideoTrack;
+ Track mAudioTrack;
+ Track mSubtitleTrack;
+ Track mTimedTextTrack;
+
+ mutable Mutex mLock;
+
+ sp<ALooper> mLooper;
+
+ void resetDataSource();
+
+ status_t initFromDataSource();
+ int64_t getLastReadPosition();
+
+ void notifyPreparedAndCleanup(status_t err);
+ void onSecureDecodersInstantiated(status_t err);
+ void finishPrepareAsync();
+ status_t startSources();
+
+ void onSeek(const sp<AMessage>& msg);
+ status_t doSeek(int64_t seekTimeUs, MediaPlayer2SeekMode mode);
+
+ void onPrepareAsync();
+
+ void fetchTextData(
+ uint32_t what, media_track_type type,
+ int32_t curGen, const sp<AnotherPacketSource>& packets, const sp<AMessage>& msg);
+
+ void sendGlobalTextData(
+ uint32_t what,
+ int32_t curGen, sp<AMessage> msg);
+
+ void sendTextData(
+ uint32_t what, media_track_type type,
+ int32_t curGen, const sp<AnotherPacketSource>& packets, const sp<AMessage>& msg);
+
+ sp<ABuffer> mediaBufferToABuffer(
+ MediaBufferBase *mbuf,
+ media_track_type trackType);
+
+ void postReadBuffer(media_track_type trackType);
+ void onReadBuffer(const sp<AMessage>& msg);
+ // When |mode| is MediaPlayer2SeekMode::SEEK_CLOSEST, the buffer read shall
+ // include an item indicating skipping rendering all buffers with timestamp
+ // earlier than |seekTimeUs|.
+ // For other modes, the buffer read will not include the item as above in order
+ // to facilitate fast seek operation.
+ void readBuffer(
+ media_track_type trackType,
+ int64_t seekTimeUs = -1ll,
+ MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC,
+ int64_t *actualTimeUs = NULL, bool formatChange = false);
+
+ void queueDiscontinuityIfNeeded(
+ bool seeking, bool formatChange, media_track_type trackType, Track *track);
+
+ void schedulePollBuffering();
+ void onPollBuffering();
+ void notifyBufferingUpdate(int32_t percentage);
+
+ void sendCacheStats();
+
+ sp<AMessage> getFormat_l(bool audio);
+ sp<MetaData> getFormatMeta_l(bool audio);
+ int32_t getDataGeneration(media_track_type type) const;
+
+ // Modular DRM
+ // The source is DRM protected and is prepared for DRM.
+ bool mIsDrmProtected;
+ // releaseDrm has been processed.
+ bool mIsDrmReleased;
+ Vector<String8> mMimes;
+
+ status_t checkDrmInfo();
+
+ DISALLOW_EVIL_CONSTRUCTORS(GenericSource2);
+};
+
+} // namespace android
+
+#endif // GENERIC_SOURCE2_H_
diff --git a/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.cpp b/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.cpp
new file mode 100644
index 0000000..a61cacd
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.cpp
@@ -0,0 +1,449 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "HTTPLiveSource2"
+#include <utils/Log.h>
+
+#include "HTTPLiveSource2.h"
+
+#include "AnotherPacketSource.h"
+#include "LiveDataSource.h"
+
+#include <media/MediaHTTPService.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/Utils.h>
+
+// default buffer prepare/ready/underflow marks
+static const int kReadyMarkMs = 5000; // 5 seconds
+static const int kPrepareMarkMs = 1500; // 1.5 seconds
+
+namespace android {
+
+NuPlayer2::HTTPLiveSource2::HTTPLiveSource2(
+ const sp<AMessage> ¬ify,
+ const sp<MediaHTTPService> &httpService,
+ const char *url,
+ const KeyedVector<String8, String8> *headers)
+ : Source(notify),
+ mHTTPService(httpService),
+ mURL(url),
+ mFlags(0),
+ mFinalResult(OK),
+ mOffset(0),
+ mFetchSubtitleDataGeneration(0),
+ mFetchMetaDataGeneration(0),
+ mHasMetadata(false),
+ mMetadataSelected(false) {
+ mBufferingSettings.mInitialMarkMs = kPrepareMarkMs;
+ mBufferingSettings.mResumePlaybackMarkMs = kReadyMarkMs;
+ if (headers) {
+ mExtraHeaders = *headers;
+
+ ssize_t index =
+ mExtraHeaders.indexOfKey(String8("x-hide-urls-from-log"));
+
+ if (index >= 0) {
+ mFlags |= kFlagIncognito;
+
+ mExtraHeaders.removeItemsAt(index);
+ }
+ }
+}
+
+NuPlayer2::HTTPLiveSource2::~HTTPLiveSource2() {
+ if (mLiveSession != NULL) {
+ mLiveSession->disconnect();
+
+ mLiveLooper->unregisterHandler(mLiveSession->id());
+ mLiveLooper->unregisterHandler(id());
+ mLiveLooper->stop();
+
+ mLiveSession.clear();
+ mLiveLooper.clear();
+ }
+}
+
+status_t NuPlayer2::HTTPLiveSource2::getBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) {
+ *buffering = mBufferingSettings;
+
+ return OK;
+}
+
+status_t NuPlayer2::HTTPLiveSource2::setBufferingSettings(const BufferingSettings& buffering) {
+ mBufferingSettings = buffering;
+
+ if (mLiveSession != NULL) {
+ mLiveSession->setBufferingSettings(mBufferingSettings);
+ }
+
+ return OK;
+}
+
+void NuPlayer2::HTTPLiveSource2::prepareAsync() {
+ if (mLiveLooper == NULL) {
+ mLiveLooper = new ALooper;
+ mLiveLooper->setName("http live");
+ mLiveLooper->start(false, /* runOnCallingThread */
+ true /* canCallJava */);
+
+ mLiveLooper->registerHandler(this);
+ }
+
+ sp<AMessage> notify = new AMessage(kWhatSessionNotify, this);
+
+ mLiveSession = new LiveSession(
+ notify,
+ (mFlags & kFlagIncognito) ? LiveSession::kFlagIncognito : 0,
+ mHTTPService);
+
+ mLiveLooper->registerHandler(mLiveSession);
+
+ mLiveSession->setBufferingSettings(mBufferingSettings);
+ mLiveSession->connectAsync(
+ mURL.c_str(), mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders);
+}
+
+void NuPlayer2::HTTPLiveSource2::start() {
+}
+
+sp<MetaData> NuPlayer2::HTTPLiveSource2::getFormatMeta(bool audio) {
+ sp<MetaData> meta;
+ if (mLiveSession != NULL) {
+ mLiveSession->getStreamFormatMeta(
+ audio ? LiveSession::STREAMTYPE_AUDIO
+ : LiveSession::STREAMTYPE_VIDEO,
+ &meta);
+ }
+
+ return meta;
+}
+
+sp<AMessage> NuPlayer2::HTTPLiveSource2::getFormat(bool audio) {
+ sp<MetaData> meta;
+ status_t err = -EWOULDBLOCK;
+ if (mLiveSession != NULL) {
+ err = mLiveSession->getStreamFormatMeta(
+ audio ? LiveSession::STREAMTYPE_AUDIO
+ : LiveSession::STREAMTYPE_VIDEO,
+ &meta);
+ }
+
+ sp<AMessage> format;
+ if (err == -EWOULDBLOCK) {
+ format = new AMessage();
+ format->setInt32("err", err);
+ return format;
+ }
+
+ if (err != OK || convertMetaDataToMessage(meta, &format) != OK) {
+ return NULL;
+ }
+ return format;
+}
+
+status_t NuPlayer2::HTTPLiveSource2::feedMoreTSData() {
+ return OK;
+}
+
+status_t NuPlayer2::HTTPLiveSource2::dequeueAccessUnit(
+ bool audio, sp<ABuffer> *accessUnit) {
+ return mLiveSession->dequeueAccessUnit(
+ audio ? LiveSession::STREAMTYPE_AUDIO
+ : LiveSession::STREAMTYPE_VIDEO,
+ accessUnit);
+}
+
+status_t NuPlayer2::HTTPLiveSource2::getDuration(int64_t *durationUs) {
+ return mLiveSession->getDuration(durationUs);
+}
+
+size_t NuPlayer2::HTTPLiveSource2::getTrackCount() const {
+ return mLiveSession->getTrackCount();
+}
+
+sp<AMessage> NuPlayer2::HTTPLiveSource2::getTrackInfo(size_t trackIndex) const {
+ return mLiveSession->getTrackInfo(trackIndex);
+}
+
+ssize_t NuPlayer2::HTTPLiveSource2::getSelectedTrack(media_track_type type) const {
+ if (mLiveSession == NULL) {
+ return -1;
+ } else if (type == MEDIA_TRACK_TYPE_METADATA) {
+ // MEDIA_TRACK_TYPE_METADATA is always last track
+ // mMetadataSelected can only be true when mHasMetadata is true
+ return mMetadataSelected ? (mLiveSession->getTrackCount() - 1) : -1;
+ } else {
+ return mLiveSession->getSelectedTrack(type);
+ }
+}
+
+status_t NuPlayer2::HTTPLiveSource2::selectTrack(size_t trackIndex, bool select, int64_t /*timeUs*/) {
+ if (mLiveSession == NULL) {
+ return INVALID_OPERATION;
+ }
+
+ status_t err = INVALID_OPERATION;
+ bool postFetchMsg = false, isSub = false;
+ if (!mHasMetadata || trackIndex != mLiveSession->getTrackCount() - 1) {
+ err = mLiveSession->selectTrack(trackIndex, select);
+ postFetchMsg = select;
+ isSub = true;
+ } else {
+ // metadata track; i.e. (mHasMetadata && trackIndex == mLiveSession->getTrackCount() - 1)
+ if (mMetadataSelected && !select) {
+ err = OK;
+ } else if (!mMetadataSelected && select) {
+ postFetchMsg = true;
+ err = OK;
+ } else {
+ err = BAD_VALUE; // behave as LiveSession::selectTrack
+ }
+
+ mMetadataSelected = select;
+ }
+
+ if (err == OK) {
+ int32_t &generation = isSub ? mFetchSubtitleDataGeneration : mFetchMetaDataGeneration;
+ generation++;
+ if (postFetchMsg) {
+ int32_t what = isSub ? kWhatFetchSubtitleData : kWhatFetchMetaData;
+ sp<AMessage> msg = new AMessage(what, this);
+ msg->setInt32("generation", generation);
+ msg->post();
+ }
+ }
+
+ // LiveSession::selectTrack returns BAD_VALUE when selecting the currently
+ // selected track, or unselecting a non-selected track. In this case it's an
+ // no-op so we return OK.
+ return (err == OK || err == BAD_VALUE) ? (status_t)OK : err;
+}
+
+status_t NuPlayer2::HTTPLiveSource2::seekTo(int64_t seekTimeUs, MediaPlayer2SeekMode mode) {
+ if (mLiveSession->isSeekable()) {
+ return mLiveSession->seekTo(seekTimeUs, mode);
+ } else {
+ return INVALID_OPERATION;
+ }
+}
+
+void NuPlayer2::HTTPLiveSource2::pollForRawData(
+ const sp<AMessage> &msg, int32_t currentGeneration,
+ LiveSession::StreamType fetchType, int32_t pushWhat) {
+
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+
+ if (generation != currentGeneration) {
+ return;
+ }
+
+ sp<ABuffer> buffer;
+ while (mLiveSession->dequeueAccessUnit(fetchType, &buffer) == OK) {
+
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", pushWhat);
+ notify->setBuffer("buffer", buffer);
+
+ int64_t timeUs, baseUs, delayUs;
+ CHECK(buffer->meta()->findInt64("baseUs", &baseUs));
+ CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+ delayUs = baseUs + timeUs - ALooper::GetNowUs();
+
+ if (fetchType == LiveSession::STREAMTYPE_SUBTITLES) {
+ notify->post();
+ msg->post(delayUs > 0ll ? delayUs : 0ll);
+ return;
+ } else if (fetchType == LiveSession::STREAMTYPE_METADATA) {
+ if (delayUs < -1000000ll) { // 1 second
+ continue;
+ }
+ notify->post();
+ // push all currently available metadata buffers in each invocation of pollForRawData
+ // continue;
+ } else {
+ TRESPASS();
+ }
+ }
+
+ // try again in 1 second
+ msg->post(1000000ll);
+}
+
+void NuPlayer2::HTTPLiveSource2::onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatSessionNotify:
+ {
+ onSessionNotify(msg);
+ break;
+ }
+
+ case kWhatFetchSubtitleData:
+ {
+ pollForRawData(
+ msg, mFetchSubtitleDataGeneration,
+ /* fetch */ LiveSession::STREAMTYPE_SUBTITLES,
+ /* push */ kWhatSubtitleData);
+
+ break;
+ }
+
+ case kWhatFetchMetaData:
+ {
+ if (!mMetadataSelected) {
+ break;
+ }
+
+ pollForRawData(
+ msg, mFetchMetaDataGeneration,
+ /* fetch */ LiveSession::STREAMTYPE_METADATA,
+ /* push */ kWhatTimedMetaData);
+
+ break;
+ }
+
+ default:
+ Source::onMessageReceived(msg);
+ break;
+ }
+}
+
+void NuPlayer2::HTTPLiveSource2::onSessionNotify(const sp<AMessage> &msg) {
+ int32_t what;
+ CHECK(msg->findInt32("what", &what));
+
+ switch (what) {
+ case LiveSession::kWhatPrepared:
+ {
+ // notify the current size here if we have it, otherwise report an initial size of (0,0)
+ sp<AMessage> format = getFormat(false /* audio */);
+ int32_t width;
+ int32_t height;
+ if (format != NULL &&
+ format->findInt32("width", &width) && format->findInt32("height", &height)) {
+ notifyVideoSizeChanged(format);
+ } else {
+ notifyVideoSizeChanged();
+ }
+
+ uint32_t flags = 0;
+ if (mLiveSession->isSeekable()) {
+ flags |= FLAG_CAN_PAUSE;
+ flags |= FLAG_CAN_SEEK;
+ flags |= FLAG_CAN_SEEK_BACKWARD;
+ flags |= FLAG_CAN_SEEK_FORWARD;
+ }
+
+ if (mLiveSession->hasDynamicDuration()) {
+ flags |= FLAG_DYNAMIC_DURATION;
+ }
+
+ notifyFlagsChanged(flags);
+
+ notifyPrepared();
+ break;
+ }
+
+ case LiveSession::kWhatPreparationFailed:
+ {
+ status_t err;
+ CHECK(msg->findInt32("err", &err));
+
+ notifyPrepared(err);
+ break;
+ }
+
+ case LiveSession::kWhatStreamsChanged:
+ {
+ uint32_t changedMask;
+ CHECK(msg->findInt32(
+ "changedMask", (int32_t *)&changedMask));
+
+ bool audio = changedMask & LiveSession::STREAMTYPE_AUDIO;
+ bool video = changedMask & LiveSession::STREAMTYPE_VIDEO;
+
+ sp<AMessage> reply;
+ CHECK(msg->findMessage("reply", &reply));
+
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatQueueDecoderShutdown);
+ notify->setInt32("audio", audio);
+ notify->setInt32("video", video);
+ notify->setMessage("reply", reply);
+ notify->post();
+ break;
+ }
+
+ case LiveSession::kWhatBufferingStart:
+ {
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatPauseOnBufferingStart);
+ notify->post();
+ break;
+ }
+
+ case LiveSession::kWhatBufferingEnd:
+ {
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatResumeOnBufferingEnd);
+ notify->post();
+ break;
+ }
+
+
+ case LiveSession::kWhatBufferingUpdate:
+ {
+ sp<AMessage> notify = dupNotify();
+ int32_t percentage;
+ CHECK(msg->findInt32("percentage", &percentage));
+ notify->setInt32("what", kWhatBufferingUpdate);
+ notify->setInt32("percentage", percentage);
+ notify->post();
+ break;
+ }
+
+ case LiveSession::kWhatMetadataDetected:
+ {
+ if (!mHasMetadata) {
+ mHasMetadata = true;
+
+ sp<AMessage> notify = dupNotify();
+ // notification without buffer triggers MEDIA2_INFO_METADATA_UPDATE
+ notify->setInt32("what", kWhatTimedMetaData);
+ notify->post();
+ }
+ break;
+ }
+
+ case LiveSession::kWhatError:
+ {
+ break;
+ }
+
+ default:
+ TRESPASS();
+ }
+}
+
+} // namespace android
+
diff --git a/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.h b/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.h
new file mode 100644
index 0000000..97d3653
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HTTP_LIVE_SOURCE2_H_
+
+#define HTTP_LIVE_SOURCE2_H_
+
+#include "NuPlayer2.h"
+#include "NuPlayer2Source.h"
+
+#include "LiveSession.h"
+
+namespace android {
+
+struct LiveSession;
+
+struct NuPlayer2::HTTPLiveSource2 : public NuPlayer2::Source {
+ HTTPLiveSource2(
+ const sp<AMessage> ¬ify,
+ const sp<MediaHTTPService> &httpService,
+ const char *url,
+ const KeyedVector<String8, String8> *headers);
+
+ virtual status_t getBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) override;
+ virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
+ virtual void prepareAsync();
+ virtual void start();
+
+ virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
+ virtual sp<MetaData> getFormatMeta(bool audio);
+ virtual sp<AMessage> getFormat(bool audio);
+
+ virtual status_t feedMoreTSData();
+ virtual status_t getDuration(int64_t *durationUs);
+ virtual size_t getTrackCount() const;
+ virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
+ virtual ssize_t getSelectedTrack(media_track_type /* type */) const;
+ virtual status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
+ virtual status_t seekTo(
+ int64_t seekTimeUs,
+ MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC) override;
+
+protected:
+ virtual ~HTTPLiveSource2();
+
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+ enum Flags {
+ // Don't log any URLs.
+ kFlagIncognito = 1,
+ };
+
+ enum {
+ kWhatSessionNotify,
+ kWhatFetchSubtitleData,
+ kWhatFetchMetaData,
+ };
+
+ sp<MediaHTTPService> mHTTPService;
+ AString mURL;
+ KeyedVector<String8, String8> mExtraHeaders;
+ uint32_t mFlags;
+ status_t mFinalResult;
+ off64_t mOffset;
+ sp<ALooper> mLiveLooper;
+ sp<LiveSession> mLiveSession;
+ int32_t mFetchSubtitleDataGeneration;
+ int32_t mFetchMetaDataGeneration;
+ bool mHasMetadata;
+ bool mMetadataSelected;
+ BufferingSettings mBufferingSettings;
+
+ void onSessionNotify(const sp<AMessage> &msg);
+ void pollForRawData(
+ const sp<AMessage> &msg, int32_t currentGeneration,
+ LiveSession::StreamType fetchType, int32_t pushWhat);
+
+ DISALLOW_EVIL_CONSTRUCTORS(HTTPLiveSource2);
+};
+
+} // namespace android
+
+#endif // HTTP_LIVE_SOURCE2_H_
diff --git a/media/libmediaplayer2/nuplayer2/JWakeLock.cpp b/media/libmediaplayer2/nuplayer2/JWakeLock.cpp
new file mode 100644
index 0000000..c9a1071
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/JWakeLock.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "JWakeLock"
+#include <utils/Log.h>
+
+#include "JWakeLock.h"
+
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <powermanager/PowerManager.h>
+
+
+namespace android {
+
+//TODO: use JAVA PowerManager, instead of binder
+JWakeLock::JWakeLock() :
+ mPowerManager(NULL),
+ mWakeLockToken(NULL),
+ mWakeLockCount(0),
+ mDeathRecipient(new PMDeathRecipient(this)) {}
+
+JWakeLock::~JWakeLock() {
+ if (mPowerManager != NULL) {
+ sp<IBinder> binder = IInterface::asBinder(mPowerManager);
+ binder->unlinkToDeath(mDeathRecipient);
+ }
+ clearPowerManager();
+}
+
+bool JWakeLock::acquire() {
+ if (mWakeLockCount == 0) {
+ CHECK(mWakeLockToken == NULL);
+ if (mPowerManager == NULL) {
+ // use checkService() to avoid blocking if power service is not up yet
+ sp<IBinder> binder =
+ defaultServiceManager()->checkService(String16("power"));
+ if (binder == NULL) {
+ ALOGW("could not get the power manager service");
+ } else {
+ mPowerManager = interface_cast<IPowerManager>(binder);
+ binder->linkToDeath(mDeathRecipient);
+ }
+ }
+ if (mPowerManager != NULL) {
+ sp<IBinder> binder = new BBinder();
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ status_t status = mPowerManager->acquireWakeLock(
+ POWERMANAGER_PARTIAL_WAKE_LOCK,
+ binder, String16("JWakeLock"), String16("media"));
+ IPCThreadState::self()->restoreCallingIdentity(token);
+ if (status == NO_ERROR) {
+ mWakeLockToken = binder;
+ mWakeLockCount++;
+ return true;
+ }
+ }
+ } else {
+ mWakeLockCount++;
+ return true;
+ }
+ return false;
+}
+
+void JWakeLock::release(bool force) {
+ if (mWakeLockCount == 0) {
+ return;
+ }
+ if (force) {
+ // Force wakelock release below by setting reference count to 1.
+ mWakeLockCount = 1;
+ }
+ if (--mWakeLockCount == 0) {
+ CHECK(mWakeLockToken != NULL);
+ if (mPowerManager != NULL) {
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ mPowerManager->releaseWakeLock(mWakeLockToken, 0 /* flags */);
+ IPCThreadState::self()->restoreCallingIdentity(token);
+ }
+ mWakeLockToken.clear();
+ }
+}
+
+void JWakeLock::clearPowerManager() {
+ release(true);
+ mPowerManager.clear();
+}
+
+void JWakeLock::PMDeathRecipient::binderDied(const wp<IBinder>& who __unused) {
+ if (mWakeLock != NULL) {
+ mWakeLock->clearPowerManager();
+ }
+}
+
+} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/JWakeLock.h b/media/libmediaplayer2/nuplayer2/JWakeLock.h
new file mode 100644
index 0000000..eace87e
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/JWakeLock.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef J_WAKELOCK_H_
+#define J_WAKELOCK_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <powermanager/IPowerManager.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class JWakeLock : public RefBase {
+
+public:
+ JWakeLock();
+
+ // NOTE: acquire and release are not thread safe
+
+ // returns true if wakelock was acquired
+ bool acquire();
+ void release(bool force = false);
+
+ virtual ~JWakeLock();
+
+private:
+ sp<IPowerManager> mPowerManager;
+ sp<IBinder> mWakeLockToken;
+ uint32_t mWakeLockCount;
+
+ class PMDeathRecipient : public IBinder::DeathRecipient {
+ public:
+ explicit PMDeathRecipient(JWakeLock *wakeLock) : mWakeLock(wakeLock) {}
+ virtual ~PMDeathRecipient() {}
+
+ // IBinder::DeathRecipient
+ virtual void binderDied(const wp<IBinder> &who);
+
+ private:
+ PMDeathRecipient(const PMDeathRecipient&);
+ PMDeathRecipient& operator= (const PMDeathRecipient&);
+
+ JWakeLock *mWakeLock;
+ };
+
+ const sp<PMDeathRecipient> mDeathRecipient;
+
+ void clearPowerManager();
+
+ DISALLOW_EVIL_CONSTRUCTORS(JWakeLock);
+};
+
+} // namespace android
+
+#endif // J_WAKELOCK_H_
diff --git a/media/libstagefright/matroska/MODULE_LICENSE_APACHE2 b/media/libmediaplayer2/nuplayer2/MODULE_LICENSE_APACHE2
similarity index 100%
copy from media/libstagefright/matroska/MODULE_LICENSE_APACHE2
copy to media/libmediaplayer2/nuplayer2/MODULE_LICENSE_APACHE2
diff --git a/media/libstagefright/matroska/NOTICE b/media/libmediaplayer2/nuplayer2/NOTICE
similarity index 100%
rename from media/libstagefright/matroska/NOTICE
rename to media/libmediaplayer2/nuplayer2/NOTICE
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
new file mode 100644
index 0000000..060b698
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
@@ -0,0 +1,3114 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NuPlayer2"
+
+#include <inttypes.h>
+
+#include <utils/Log.h>
+
+#include "NuPlayer2.h"
+
+#include "HTTPLiveSource2.h"
+#include "NuPlayer2CCDecoder.h"
+#include "NuPlayer2Decoder.h"
+#include "NuPlayer2DecoderBase.h"
+#include "NuPlayer2DecoderPassThrough.h"
+#include "NuPlayer2Driver.h"
+#include "NuPlayer2Renderer.h"
+#include "NuPlayer2Source.h"
+#include "RTSPSource2.h"
+#include "GenericSource2.h"
+#include "TextDescriptions.h"
+
+#include "ATSParser.h"
+
+#include <cutils/properties.h>
+
+#include <media/AudioParameter.h>
+#include <media/AudioResamplerPublic.h>
+#include <media/AVSyncSettings.h>
+#include <media/DataSourceDesc.h>
+#include <media/MediaCodecBuffer.h>
+#include <media/NdkWrapper.h>
+
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/avc_utils.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaClock.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+
+#include "ESDS.h"
+#include <media/stagefright/Utils.h>
+
+#include <system/window.h>
+
+namespace android {
+
+static status_t sendMetaDataToHal(sp<MediaPlayer2Interface::AudioSink>& sink,
+ const sp<MetaData>& meta) {
+ int32_t sampleRate = 0;
+ int32_t bitRate = 0;
+ int32_t channelMask = 0;
+ int32_t delaySamples = 0;
+ int32_t paddingSamples = 0;
+
+ AudioParameter param = AudioParameter();
+
+ if (meta->findInt32(kKeySampleRate, &sampleRate)) {
+ param.addInt(String8(AUDIO_OFFLOAD_CODEC_SAMPLE_RATE), sampleRate);
+ }
+ if (meta->findInt32(kKeyChannelMask, &channelMask)) {
+ param.addInt(String8(AUDIO_OFFLOAD_CODEC_NUM_CHANNEL), channelMask);
+ }
+ if (meta->findInt32(kKeyBitRate, &bitRate)) {
+ param.addInt(String8(AUDIO_OFFLOAD_CODEC_AVG_BIT_RATE), bitRate);
+ }
+ if (meta->findInt32(kKeyEncoderDelay, &delaySamples)) {
+ param.addInt(String8(AUDIO_OFFLOAD_CODEC_DELAY_SAMPLES), delaySamples);
+ }
+ if (meta->findInt32(kKeyEncoderPadding, &paddingSamples)) {
+ param.addInt(String8(AUDIO_OFFLOAD_CODEC_PADDING_SAMPLES), paddingSamples);
+ }
+
+ ALOGV("sendMetaDataToHal: bitRate %d, sampleRate %d, chanMask %d,"
+ "delaySample %d, paddingSample %d", bitRate, sampleRate,
+ channelMask, delaySamples, paddingSamples);
+
+ sink->setParameters(param.toString());
+ return OK;
+}
+
+
+struct NuPlayer2::Action : public RefBase {
+ Action() {}
+
+ virtual void execute(NuPlayer2 *player) = 0;
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(Action);
+};
+
+struct NuPlayer2::SeekAction : public Action {
+ explicit SeekAction(int64_t seekTimeUs, MediaPlayer2SeekMode mode)
+ : mSeekTimeUs(seekTimeUs),
+ mMode(mode) {
+ }
+
+ virtual void execute(NuPlayer2 *player) {
+ player->performSeek(mSeekTimeUs, mMode);
+ }
+
+private:
+ int64_t mSeekTimeUs;
+ MediaPlayer2SeekMode mMode;
+
+ DISALLOW_EVIL_CONSTRUCTORS(SeekAction);
+};
+
+struct NuPlayer2::ResumeDecoderAction : public Action {
+ explicit ResumeDecoderAction(bool needNotify)
+ : mNeedNotify(needNotify) {
+ }
+
+ virtual void execute(NuPlayer2 *player) {
+ player->performResumeDecoders(mNeedNotify);
+ }
+
+private:
+ bool mNeedNotify;
+
+ DISALLOW_EVIL_CONSTRUCTORS(ResumeDecoderAction);
+};
+
+struct NuPlayer2::SetSurfaceAction : public Action {
+ explicit SetSurfaceAction(const sp<ANativeWindowWrapper> &nww)
+ : mNativeWindow(nww) {
+ }
+
+ virtual void execute(NuPlayer2 *player) {
+ player->performSetSurface(mNativeWindow);
+ }
+
+private:
+ sp<ANativeWindowWrapper> mNativeWindow;
+
+ DISALLOW_EVIL_CONSTRUCTORS(SetSurfaceAction);
+};
+
+struct NuPlayer2::FlushDecoderAction : public Action {
+ FlushDecoderAction(FlushCommand audio, FlushCommand video)
+ : mAudio(audio),
+ mVideo(video) {
+ }
+
+ virtual void execute(NuPlayer2 *player) {
+ player->performDecoderFlush(mAudio, mVideo);
+ }
+
+private:
+ FlushCommand mAudio;
+ FlushCommand mVideo;
+
+ DISALLOW_EVIL_CONSTRUCTORS(FlushDecoderAction);
+};
+
+struct NuPlayer2::PostMessageAction : public Action {
+ explicit PostMessageAction(const sp<AMessage> &msg)
+ : mMessage(msg) {
+ }
+
+ virtual void execute(NuPlayer2 *) {
+ mMessage->post();
+ }
+
+private:
+ sp<AMessage> mMessage;
+
+ DISALLOW_EVIL_CONSTRUCTORS(PostMessageAction);
+};
+
+// Use this if there's no state necessary to save in order to execute
+// the action.
+struct NuPlayer2::SimpleAction : public Action {
+ typedef void (NuPlayer2::*ActionFunc)();
+
+ explicit SimpleAction(ActionFunc func)
+ : mFunc(func) {
+ }
+
+ virtual void execute(NuPlayer2 *player) {
+ (player->*mFunc)();
+ }
+
+private:
+ ActionFunc mFunc;
+
+ DISALLOW_EVIL_CONSTRUCTORS(SimpleAction);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+NuPlayer2::NuPlayer2(pid_t pid, uid_t uid, const sp<MediaClock> &mediaClock)
+ : mPID(pid),
+ mUID(uid),
+ mMediaClock(mediaClock),
+ mSourceFlags(0),
+ mOffloadAudio(false),
+ mAudioDecoderGeneration(0),
+ mVideoDecoderGeneration(0),
+ mRendererGeneration(0),
+ mLastStartedPlayingTimeNs(0),
+ mPreviousSeekTimeUs(0),
+ mAudioEOS(false),
+ mVideoEOS(false),
+ mScanSourcesPending(false),
+ mScanSourcesGeneration(0),
+ mPollDurationGeneration(0),
+ mTimedTextGeneration(0),
+ mFlushingAudio(NONE),
+ mFlushingVideo(NONE),
+ mResumePending(false),
+ mVideoScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW),
+ mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
+ mVideoFpsHint(-1.f),
+ mStarted(false),
+ mPrepared(false),
+ mResetting(false),
+ mSourceStarted(false),
+ mAudioDecoderError(false),
+ mVideoDecoderError(false),
+ mPaused(false),
+ mPausedByClient(true),
+ mPausedForBuffering(false),
+ mIsDrmProtected(false),
+ mDataSourceType(DATA_SOURCE_TYPE_NONE) {
+ CHECK(mediaClock != NULL);
+ clearFlushComplete();
+}
+
+NuPlayer2::~NuPlayer2() {
+}
+
+void NuPlayer2::setDriver(const wp<NuPlayer2Driver> &driver) {
+ mDriver = driver;
+}
+
+static bool IsHTTPLiveURL(const char *url) {
+ if (!strncasecmp("http://", url, 7)
+ || !strncasecmp("https://", url, 8)
+ || !strncasecmp("file://", url, 7)) {
+ size_t len = strlen(url);
+ if (len >= 5 && !strcasecmp(".m3u8", &url[len - 5])) {
+ return true;
+ }
+
+ if (strstr(url,"m3u8")) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+status_t NuPlayer2::createNuPlayer2Source(const sp<DataSourceDesc> &dsd,
+ sp<Source> *source,
+ DATA_SOURCE_TYPE *dataSourceType) {
+ status_t err = NO_ERROR;
+ sp<AMessage> notify = new AMessage(kWhatSourceNotify, this);
+ notify->setInt64("srcId", dsd->mId);
+
+ switch (dsd->mType) {
+ case DataSourceDesc::TYPE_URL:
+ {
+ const char *url = dsd->mUrl.c_str();
+ size_t len = strlen(url);
+
+ const sp<MediaHTTPService> &httpService = dsd->mHttpService;
+ KeyedVector<String8, String8> *headers = &(dsd->mHeaders);
+
+ if (IsHTTPLiveURL(url)) {
+ *source = new HTTPLiveSource2(notify, httpService, url, headers);
+ ALOGV("createNuPlayer2Source HTTPLiveSource2 %s", url);
+ *dataSourceType = DATA_SOURCE_TYPE_HTTP_LIVE;
+ } else if (!strncasecmp(url, "rtsp://", 7)) {
+ *source = new RTSPSource2(
+ notify, httpService, url, headers, mUID);
+ ALOGV("createNuPlayer2Source RTSPSource2 %s", url);
+ *dataSourceType = DATA_SOURCE_TYPE_RTSP;
+ } else if ((!strncasecmp(url, "http://", 7)
+ || !strncasecmp(url, "https://", 8))
+ && ((len >= 4 && !strcasecmp(".sdp", &url[len - 4]))
+ || strstr(url, ".sdp?"))) {
+ *source = new RTSPSource2(
+ notify, httpService, url, headers, mUID, true);
+ ALOGV("createNuPlayer2Source RTSPSource2 http/https/.sdp %s", url);
+ *dataSourceType = DATA_SOURCE_TYPE_RTSP;
+ } else {
+ ALOGV("createNuPlayer2Source GenericSource2 %s", url);
+
+ sp<GenericSource2> genericSource =
+ new GenericSource2(notify, mUID, mMediaClock);
+
+ err = genericSource->setDataSource(httpService, url, headers);
+
+ if (err == OK) {
+ *source = genericSource;
+ } else {
+ *source = NULL;
+ ALOGE("Failed to create NuPlayer2Source!");
+ }
+
+ // regardless of success/failure
+ *dataSourceType = DATA_SOURCE_TYPE_GENERIC_URL;
+ }
+ break;
+ }
+
+ case DataSourceDesc::TYPE_FD:
+ {
+ sp<GenericSource2> genericSource =
+ new GenericSource2(notify, mUID, mMediaClock);
+
+ ALOGV("createNuPlayer2Source fd %d/%lld/%lld source: %p",
+ dsd->mFD, (long long)dsd->mFDOffset, (long long)dsd->mFDLength,
+ genericSource.get());
+
+ err = genericSource->setDataSource(dsd->mFD, dsd->mFDOffset, dsd->mFDLength);
+
+ if (err != OK) {
+ ALOGE("Failed to create NuPlayer2Source!");
+ *source = NULL;
+ } else {
+ *source = genericSource;
+ }
+
+ *dataSourceType = DATA_SOURCE_TYPE_GENERIC_FD;
+ break;
+ }
+
+ case DataSourceDesc::TYPE_CALLBACK:
+ {
+ sp<GenericSource2> genericSource =
+ new GenericSource2(notify, mUID, mMediaClock);
+ err = genericSource->setDataSource(dsd->mCallbackSource);
+
+ if (err != OK) {
+ ALOGE("Failed to create NuPlayer2Source!");
+ *source = NULL;
+ } else {
+ *source = genericSource;
+ }
+
+ *dataSourceType = DATA_SOURCE_TYPE_MEDIA;
+ break;
+ }
+
+ default:
+ err = BAD_TYPE;
+ *source = NULL;
+ *dataSourceType = DATA_SOURCE_TYPE_NONE;
+ ALOGE("invalid data source type!");
+ break;
+ }
+
+ return err;
+}
+
+void NuPlayer2::setDataSourceAsync(const sp<DataSourceDesc> &dsd) {
+ DATA_SOURCE_TYPE dataSourceType;
+ sp<Source> source;
+ createNuPlayer2Source(dsd, &source, &dataSourceType);
+
+ // TODO: currently NuPlayer2Driver makes blocking call to setDataSourceAsync
+ // and expects notifySetDataSourceCompleted regardless of success or failure.
+ // This will be changed since setDataSource should be asynchronous at JAVA level.
+ // When it succeeds, app will get onInfo notification. Otherwise, onError
+ // will be called.
+ /*
+ if (err != OK) {
+ notifyListener(dsd->mId, MEDIA2_ERROR, MEDIA2_ERROR_FAILED_TO_SET_DATA_SOURCE, err);
+ return;
+ }
+
+ // Now, source != NULL.
+ */
+
+ mDataSourceType = dataSourceType;
+
+ sp<AMessage> msg = new AMessage(kWhatSetDataSource, this);
+ msg->setObject("source", source);
+ msg->setInt64("srcId", dsd->mId);
+ msg->post();
+}
+
+void NuPlayer2::prepareNextDataSourceAsync(const sp<DataSourceDesc> &dsd) {
+ DATA_SOURCE_TYPE dataSourceType;
+ sp<Source> source;
+ createNuPlayer2Source(dsd, &source, &dataSourceType);
+
+ /*
+ if (err != OK) {
+ notifyListener(dsd->mId, MEDIA2_ERROR, MEDIA2_ERROR_FAILED_TO_SET_DATA_SOURCE, err);
+ return;
+ }
+
+ // Now, source != NULL.
+ */
+
+ mNextDataSourceType = dataSourceType;
+
+ sp<AMessage> msg = new AMessage(kWhatPrepareNextDataSource, this);
+ msg->setObject("source", source);
+ msg->setInt64("srcId", dsd->mId);
+ msg->post();
+}
+
+void NuPlayer2::playNextDataSource(int64_t srcId) {
+ disconnectSource();
+
+ sp<AMessage> msg = new AMessage(kWhatPlayNextDataSource, this);
+ msg->setInt64("srcId", srcId);
+ msg->post();
+}
+
+status_t NuPlayer2::getBufferingSettings(
+ BufferingSettings *buffering /* nonnull */) {
+ sp<AMessage> msg = new AMessage(kWhatGetBufferingSettings, this);
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ if (err == OK) {
+ readFromAMessage(response, buffering);
+ }
+ }
+ return err;
+}
+
+status_t NuPlayer2::setBufferingSettings(const BufferingSettings& buffering) {
+ sp<AMessage> msg = new AMessage(kWhatSetBufferingSettings, this);
+ writeToAMessage(msg, buffering);
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ }
+ return err;
+}
+
+void NuPlayer2::prepareAsync() {
+ ALOGV("prepareAsync");
+
+ (new AMessage(kWhatPrepare, this))->post();
+}
+
+void NuPlayer2::setVideoSurfaceTextureAsync(const sp<ANativeWindowWrapper> &nww) {
+ sp<AMessage> msg = new AMessage(kWhatSetVideoSurface, this);
+
+ if (nww == NULL || nww->getANativeWindow() == NULL) {
+ msg->setObject("surface", NULL);
+ } else {
+ msg->setObject("surface", nww);
+ }
+
+ msg->post();
+}
+
+void NuPlayer2::setAudioSink(const sp<MediaPlayer2Interface::AudioSink> &sink) {
+ sp<AMessage> msg = new AMessage(kWhatSetAudioSink, this);
+ msg->setObject("sink", sink);
+ msg->post();
+}
+
+void NuPlayer2::start() {
+ (new AMessage(kWhatStart, this))->post();
+}
+
+status_t NuPlayer2::setPlaybackSettings(const AudioPlaybackRate &rate) {
+ // do some cursory validation of the settings here. audio modes are
+ // only validated when set on the audiosink.
+ if ((rate.mSpeed != 0.f && rate.mSpeed < AUDIO_TIMESTRETCH_SPEED_MIN)
+ || rate.mSpeed > AUDIO_TIMESTRETCH_SPEED_MAX
+ || rate.mPitch < AUDIO_TIMESTRETCH_SPEED_MIN
+ || rate.mPitch > AUDIO_TIMESTRETCH_SPEED_MAX) {
+ return BAD_VALUE;
+ }
+ sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
+ writeToAMessage(msg, rate);
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ }
+ return err;
+}
+
+status_t NuPlayer2::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
+ sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ if (err == OK) {
+ readFromAMessage(response, rate);
+ }
+ }
+ return err;
+}
+
+status_t NuPlayer2::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
+ sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
+ writeToAMessage(msg, sync, videoFpsHint);
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ }
+ return err;
+}
+
+status_t NuPlayer2::getSyncSettings(
+ AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
+ sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ if (err == OK) {
+ readFromAMessage(response, sync, videoFps);
+ }
+ }
+ return err;
+}
+
+void NuPlayer2::pause() {
+ (new AMessage(kWhatPause, this))->post();
+}
+
+void NuPlayer2::resetAsync() {
+ disconnectSource();
+ (new AMessage(kWhatReset, this))->post();
+}
+
+void NuPlayer2::disconnectSource() {
+ sp<Source> source;
+ {
+ Mutex::Autolock autoLock(mSourceLock);
+ source = mSource;
+ }
+
+ if (source != NULL) {
+ // During a reset, the data source might be unresponsive already, we need to
+ // disconnect explicitly so that reads exit promptly.
+ // We can't queue the disconnect request to the looper, as it might be
+ // queued behind a stuck read and never gets processed.
+ // Doing a disconnect outside the looper to allows the pending reads to exit
+ // (either successfully or with error).
+ source->disconnect();
+ }
+
+}
+
+status_t NuPlayer2::notifyAt(int64_t mediaTimeUs) {
+ sp<AMessage> notify = new AMessage(kWhatNotifyTime, this);
+ notify->setInt64("timerUs", mediaTimeUs);
+ mMediaClock->addTimer(notify, mediaTimeUs);
+ return OK;
+}
+
+void NuPlayer2::seekToAsync(int64_t seekTimeUs, MediaPlayer2SeekMode mode, bool needNotify) {
+ sp<AMessage> msg = new AMessage(kWhatSeek, this);
+ msg->setInt64("seekTimeUs", seekTimeUs);
+ msg->setInt32("mode", mode);
+ msg->setInt32("needNotify", needNotify);
+ msg->post();
+}
+
+
+void NuPlayer2::writeTrackInfo(
+ Parcel* reply, const sp<AMessage>& format) const {
+ if (format == NULL) {
+ ALOGE("NULL format");
+ return;
+ }
+ int32_t trackType;
+ if (!format->findInt32("type", &trackType)) {
+ ALOGE("no track type");
+ return;
+ }
+
+ AString mime;
+ if (!format->findString("mime", &mime)) {
+ // Java MediaPlayer only uses mimetype for subtitle and timedtext tracks.
+ // If we can't find the mimetype here it means that we wouldn't be needing
+ // the mimetype on the Java end. We still write a placeholder mime to keep the
+ // (de)serialization logic simple.
+ if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
+ mime = "audio/";
+ } else if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
+ mime = "video/";
+ } else {
+ ALOGE("unknown track type: %d", trackType);
+ return;
+ }
+ }
+
+ AString lang;
+ if (!format->findString("language", &lang)) {
+ ALOGE("no language");
+ return;
+ }
+
+ reply->writeInt32(2); // write something non-zero
+ reply->writeInt32(trackType);
+ reply->writeString16(String16(mime.c_str()));
+ reply->writeString16(String16(lang.c_str()));
+
+ if (trackType == MEDIA_TRACK_TYPE_SUBTITLE) {
+ int32_t isAuto, isDefault, isForced;
+ CHECK(format->findInt32("auto", &isAuto));
+ CHECK(format->findInt32("default", &isDefault));
+ CHECK(format->findInt32("forced", &isForced));
+
+ reply->writeInt32(isAuto);
+ reply->writeInt32(isDefault);
+ reply->writeInt32(isForced);
+ }
+}
+
+void NuPlayer2::onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatSetDataSource:
+ {
+ ALOGV("kWhatSetDataSource");
+
+ CHECK(mSource == NULL);
+
+ status_t err = OK;
+ sp<RefBase> obj;
+ CHECK(msg->findObject("source", &obj));
+ if (obj != NULL) {
+ Mutex::Autolock autoLock(mSourceLock);
+ CHECK(msg->findInt64("srcId", &mSrcId));
+ mSource = static_cast<Source *>(obj.get());
+ } else {
+ err = UNKNOWN_ERROR;
+ ALOGE("kWhatSetDataSource, source should not be NULL");
+ }
+
+ CHECK(mDriver != NULL);
+ sp<NuPlayer2Driver> driver = mDriver.promote();
+ if (driver != NULL) {
+ driver->notifySetDataSourceCompleted(mSrcId, err);
+ }
+ break;
+ }
+
+ case kWhatPrepareNextDataSource:
+ {
+ ALOGV("kWhatPrepareNextDataSource");
+
+ status_t err = OK;
+ sp<RefBase> obj;
+ CHECK(msg->findObject("source", &obj));
+ if (obj != NULL) {
+ Mutex::Autolock autoLock(mSourceLock);
+ CHECK(msg->findInt64("srcId", &mNextSrcId));
+ mNextSource = static_cast<Source *>(obj.get());
+ mNextSource->prepareAsync();
+ } else {
+ err = UNKNOWN_ERROR;
+ }
+
+ break;
+ }
+
+ case kWhatPlayNextDataSource:
+ {
+ ALOGV("kWhatPlayNextDataSource");
+ int64_t srcId;
+ CHECK(msg->findInt64("srcId", &srcId));
+ if (srcId != mNextSrcId) {
+ notifyListener(srcId, MEDIA2_ERROR, MEDIA2_ERROR_UNKNOWN, 0);
+ return;
+ }
+
+ mResetting = true;
+ stopPlaybackTimer("kWhatPlayNextDataSource");
+ stopRebufferingTimer(true);
+
+ mDeferredActions.push_back(
+ new FlushDecoderAction(
+ FLUSH_CMD_SHUTDOWN /* audio */,
+ FLUSH_CMD_SHUTDOWN /* video */));
+
+ mDeferredActions.push_back(
+ new SimpleAction(&NuPlayer2::performPlayNextDataSource));
+
+ processDeferredActions();
+ break;
+ }
+
+ case kWhatGetBufferingSettings:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ ALOGV("kWhatGetBufferingSettings");
+ BufferingSettings buffering;
+ status_t err = OK;
+ if (mSource != NULL) {
+ err = mSource->getBufferingSettings(&buffering);
+ } else {
+ err = INVALID_OPERATION;
+ }
+ sp<AMessage> response = new AMessage;
+ if (err == OK) {
+ writeToAMessage(response, buffering);
+ }
+ response->setInt32("err", err);
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatSetBufferingSettings:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ ALOGV("kWhatSetBufferingSettings");
+ BufferingSettings buffering;
+ readFromAMessage(msg, &buffering);
+ status_t err = OK;
+ if (mSource != NULL) {
+ err = mSource->setBufferingSettings(buffering);
+ } else {
+ err = INVALID_OPERATION;
+ }
+ sp<AMessage> response = new AMessage;
+ response->setInt32("err", err);
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatPrepare:
+ {
+ ALOGV("onMessageReceived kWhatPrepare");
+
+ mSource->prepareAsync();
+ break;
+ }
+
+ case kWhatGetTrackInfo:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ Parcel* reply;
+ CHECK(msg->findPointer("reply", (void**)&reply));
+
+ size_t inbandTracks = 0;
+ if (mSource != NULL) {
+ inbandTracks = mSource->getTrackCount();
+ }
+
+ size_t ccTracks = 0;
+ if (mCCDecoder != NULL) {
+ ccTracks = mCCDecoder->getTrackCount();
+ }
+
+ // total track count
+ reply->writeInt32(inbandTracks + ccTracks);
+
+ // write inband tracks
+ for (size_t i = 0; i < inbandTracks; ++i) {
+ writeTrackInfo(reply, mSource->getTrackInfo(i));
+ }
+
+ // write CC track
+ for (size_t i = 0; i < ccTracks; ++i) {
+ writeTrackInfo(reply, mCCDecoder->getTrackInfo(i));
+ }
+
+ sp<AMessage> response = new AMessage;
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatGetSelectedTrack:
+ {
+ status_t err = INVALID_OPERATION;
+ if (mSource != NULL) {
+ err = OK;
+
+ int32_t type32;
+ CHECK(msg->findInt32("type", (int32_t*)&type32));
+ media_track_type type = (media_track_type)type32;
+ ssize_t selectedTrack = mSource->getSelectedTrack(type);
+
+ Parcel* reply;
+ CHECK(msg->findPointer("reply", (void**)&reply));
+ reply->writeInt32(selectedTrack);
+ }
+
+ sp<AMessage> response = new AMessage;
+ response->setInt32("err", err);
+
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatSelectTrack:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ size_t trackIndex;
+ int32_t select;
+ int64_t timeUs;
+ CHECK(msg->findSize("trackIndex", &trackIndex));
+ CHECK(msg->findInt32("select", &select));
+ CHECK(msg->findInt64("timeUs", &timeUs));
+
+ status_t err = INVALID_OPERATION;
+
+ size_t inbandTracks = 0;
+ if (mSource != NULL) {
+ inbandTracks = mSource->getTrackCount();
+ }
+ size_t ccTracks = 0;
+ if (mCCDecoder != NULL) {
+ ccTracks = mCCDecoder->getTrackCount();
+ }
+
+ if (trackIndex < inbandTracks) {
+ err = mSource->selectTrack(trackIndex, select, timeUs);
+
+ if (!select && err == OK) {
+ int32_t type;
+ sp<AMessage> info = mSource->getTrackInfo(trackIndex);
+ if (info != NULL
+ && info->findInt32("type", &type)
+ && type == MEDIA_TRACK_TYPE_TIMEDTEXT) {
+ ++mTimedTextGeneration;
+ }
+ }
+ } else {
+ trackIndex -= inbandTracks;
+
+ if (trackIndex < ccTracks) {
+ err = mCCDecoder->selectTrack(trackIndex, select);
+ }
+ }
+
+ sp<AMessage> response = new AMessage;
+ response->setInt32("err", err);
+
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatPollDuration:
+ {
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+
+ if (generation != mPollDurationGeneration) {
+ // stale
+ break;
+ }
+
+ int64_t durationUs;
+ if (mDriver != NULL && mSource->getDuration(&durationUs) == OK) {
+ sp<NuPlayer2Driver> driver = mDriver.promote();
+ if (driver != NULL) {
+ driver->notifyDuration(mSrcId, durationUs);
+ }
+ }
+
+ msg->post(1000000ll); // poll again in a second.
+ break;
+ }
+
+ case kWhatSetVideoSurface:
+ {
+
+ sp<RefBase> obj;
+ CHECK(msg->findObject("surface", &obj));
+ sp<ANativeWindowWrapper> nww = static_cast<ANativeWindowWrapper *>(obj.get());
+
+ ALOGD("onSetVideoSurface(%p, %s video decoder)",
+ (nww == NULL ? NULL : nww->getANativeWindow()),
+ (mSource != NULL && mStarted && mSource->getFormat(false /* audio */) != NULL
+ && mVideoDecoder != NULL) ? "have" : "no");
+
+ // Need to check mStarted before calling mSource->getFormat because NuPlayer2 might
+ // be in preparing state and it could take long time.
+ // When mStarted is true, mSource must have been set.
+ if (mSource == NULL || !mStarted || mSource->getFormat(false /* audio */) == NULL
+ // NOTE: mVideoDecoder's mNativeWindow is always non-null
+ || (mVideoDecoder != NULL && mVideoDecoder->setVideoSurface(nww) == OK)) {
+ performSetSurface(nww);
+ break;
+ }
+
+ mDeferredActions.push_back(
+ new FlushDecoderAction(
+ (obj != NULL ? FLUSH_CMD_FLUSH : FLUSH_CMD_NONE) /* audio */,
+ FLUSH_CMD_SHUTDOWN /* video */));
+
+ mDeferredActions.push_back(new SetSurfaceAction(nww));
+
+ if (obj != NULL) {
+ if (mStarted) {
+ // Issue a seek to refresh the video screen only if started otherwise
+ // the extractor may not yet be started and will assert.
+ // If the video decoder is not set (perhaps audio only in this case)
+ // do not perform a seek as it is not needed.
+ int64_t currentPositionUs = 0;
+ if (getCurrentPosition(¤tPositionUs) == OK) {
+ mDeferredActions.push_back(
+ new SeekAction(currentPositionUs,
+ MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC /* mode */));
+ }
+ }
+
+ // If there is a new surface texture, instantiate decoders
+ // again if possible.
+ mDeferredActions.push_back(
+ new SimpleAction(&NuPlayer2::performScanSources));
+
+ // After a flush without shutdown, decoder is paused.
+ // Don't resume it until source seek is done, otherwise it could
+ // start pulling stale data too soon.
+ mDeferredActions.push_back(
+ new ResumeDecoderAction(false /* needNotify */));
+ }
+
+ processDeferredActions();
+ break;
+ }
+
+ case kWhatSetAudioSink:
+ {
+ ALOGV("kWhatSetAudioSink");
+
+ sp<RefBase> obj;
+ CHECK(msg->findObject("sink", &obj));
+
+ mAudioSink = static_cast<MediaPlayer2Interface::AudioSink *>(obj.get());
+ break;
+ }
+
+ case kWhatStart:
+ {
+ ALOGV("kWhatStart");
+ if (mStarted) {
+ // do not resume yet if the source is still buffering
+ if (!mPausedForBuffering) {
+ onResume();
+ }
+ } else {
+ onStart();
+ }
+ mPausedByClient = false;
+ notifyListener(mSrcId, MEDIA2_STARTED, 0, 0);
+ break;
+ }
+
+ case kWhatConfigPlayback:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ AudioPlaybackRate rate /* sanitized */;
+ readFromAMessage(msg, &rate);
+ status_t err = OK;
+ if (mRenderer != NULL) {
+ // AudioSink allows only 1.f and 0.f for offload mode.
+ // For other speed, switch to non-offload mode.
+ if (mOffloadAudio && ((rate.mSpeed != 0.f && rate.mSpeed != 1.f)
+ || rate.mPitch != 1.f)) {
+ int64_t currentPositionUs;
+ if (getCurrentPosition(¤tPositionUs) != OK) {
+ currentPositionUs = mPreviousSeekTimeUs;
+ }
+
+ // Set mPlaybackSettings so that the new audio decoder can
+ // be created correctly.
+ mPlaybackSettings = rate;
+ if (!mPaused) {
+ mRenderer->pause();
+ }
+ restartAudio(
+ currentPositionUs, true /* forceNonOffload */,
+ true /* needsToCreateAudioDecoder */);
+ if (!mPaused) {
+ mRenderer->resume();
+ }
+ }
+
+ err = mRenderer->setPlaybackSettings(rate);
+ }
+ if (err == OK) {
+ if (rate.mSpeed == 0.f) {
+ onPause();
+ notifyListener(mSrcId, MEDIA2_PAUSED, 0, 0);
+ mPausedByClient = true;
+ // save all other settings (using non-paused speed)
+ // so we can restore them on start
+ AudioPlaybackRate newRate = rate;
+ newRate.mSpeed = mPlaybackSettings.mSpeed;
+ mPlaybackSettings = newRate;
+ } else { /* rate.mSpeed != 0.f */
+ mPlaybackSettings = rate;
+ if (mStarted) {
+ // do not resume yet if the source is still buffering
+ if (!mPausedForBuffering) {
+ onResume();
+ }
+ } else if (mPrepared) {
+ onStart();
+ }
+
+ mPausedByClient = false;
+ }
+ }
+
+ if (mVideoDecoder != NULL) {
+ sp<AMessage> params = new AMessage();
+ params->setFloat("playback-speed", mPlaybackSettings.mSpeed);
+ mVideoDecoder->setParameters(params);
+ }
+
+ sp<AMessage> response = new AMessage;
+ response->setInt32("err", err);
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatGetPlaybackSettings:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ AudioPlaybackRate rate = mPlaybackSettings;
+ status_t err = OK;
+ if (mRenderer != NULL) {
+ err = mRenderer->getPlaybackSettings(&rate);
+ }
+ if (err == OK) {
+ // get playback settings used by renderer, as it may be
+ // slightly off due to audiosink not taking small changes.
+ mPlaybackSettings = rate;
+ if (mPaused) {
+ rate.mSpeed = 0.f;
+ }
+ }
+ sp<AMessage> response = new AMessage;
+ if (err == OK) {
+ writeToAMessage(response, rate);
+ }
+ response->setInt32("err", err);
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatConfigSync:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ ALOGV("kWhatConfigSync");
+ AVSyncSettings sync;
+ float videoFpsHint;
+ readFromAMessage(msg, &sync, &videoFpsHint);
+ status_t err = OK;
+ if (mRenderer != NULL) {
+ err = mRenderer->setSyncSettings(sync, videoFpsHint);
+ }
+ if (err == OK) {
+ mSyncSettings = sync;
+ mVideoFpsHint = videoFpsHint;
+ }
+ sp<AMessage> response = new AMessage;
+ response->setInt32("err", err);
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatGetSyncSettings:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ AVSyncSettings sync = mSyncSettings;
+ float videoFps = mVideoFpsHint;
+ status_t err = OK;
+ if (mRenderer != NULL) {
+ err = mRenderer->getSyncSettings(&sync, &videoFps);
+ if (err == OK) {
+ mSyncSettings = sync;
+ mVideoFpsHint = videoFps;
+ }
+ }
+ sp<AMessage> response = new AMessage;
+ if (err == OK) {
+ writeToAMessage(response, sync, videoFps);
+ }
+ response->setInt32("err", err);
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatScanSources:
+ {
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+ if (generation != mScanSourcesGeneration) {
+ // Drop obsolete msg.
+ break;
+ }
+
+ mScanSourcesPending = false;
+
+ ALOGV("scanning sources haveAudio=%d, haveVideo=%d",
+ mAudioDecoder != NULL, mVideoDecoder != NULL);
+
+ bool mHadAnySourcesBefore =
+ (mAudioDecoder != NULL) || (mVideoDecoder != NULL);
+ bool rescan = false;
+
+ // initialize video before audio because successful initialization of
+ // video may change deep buffer mode of audio.
+ if (mNativeWindow != NULL && mNativeWindow->getANativeWindow() != NULL) {
+ if (instantiateDecoder(false, &mVideoDecoder) == -EWOULDBLOCK) {
+ rescan = true;
+ }
+ }
+
+ // Don't try to re-open audio sink if there's an existing decoder.
+ if (mAudioSink != NULL && mAudioDecoder == NULL) {
+ if (instantiateDecoder(true, &mAudioDecoder) == -EWOULDBLOCK) {
+ rescan = true;
+ }
+ }
+
+ if (!mHadAnySourcesBefore
+ && (mAudioDecoder != NULL || mVideoDecoder != NULL)) {
+ // This is the first time we've found anything playable.
+
+ if (mSourceFlags & Source::FLAG_DYNAMIC_DURATION) {
+ schedulePollDuration();
+ }
+ }
+
+ status_t err;
+ if ((err = mSource->feedMoreTSData()) != OK) {
+ if (mAudioDecoder == NULL && mVideoDecoder == NULL) {
+ // We're not currently decoding anything (no audio or
+ // video tracks found) and we just ran out of input data.
+
+ if (err == ERROR_END_OF_STREAM) {
+ notifyListener(mSrcId, MEDIA2_PLAYBACK_COMPLETE, 0, 0);
+ } else {
+ notifyListener(mSrcId, MEDIA2_ERROR, MEDIA2_ERROR_UNKNOWN, err);
+ }
+ }
+ break;
+ }
+
+ if (rescan) {
+ msg->post(100000ll);
+ mScanSourcesPending = true;
+ }
+ break;
+ }
+
+ case kWhatVideoNotify:
+ case kWhatAudioNotify:
+ {
+ bool audio = msg->what() == kWhatAudioNotify;
+
+ int32_t currentDecoderGeneration =
+ (audio? mAudioDecoderGeneration : mVideoDecoderGeneration);
+ int32_t requesterGeneration = currentDecoderGeneration - 1;
+ CHECK(msg->findInt32("generation", &requesterGeneration));
+
+ if (requesterGeneration != currentDecoderGeneration) {
+ ALOGV("got message from old %s decoder, generation(%d:%d)",
+ audio ? "audio" : "video", requesterGeneration,
+ currentDecoderGeneration);
+ sp<AMessage> reply;
+ if (!(msg->findMessage("reply", &reply))) {
+ return;
+ }
+
+ reply->setInt32("err", INFO_DISCONTINUITY);
+ reply->post();
+ return;
+ }
+
+ int32_t what;
+ CHECK(msg->findInt32("what", &what));
+
+ if (what == DecoderBase::kWhatInputDiscontinuity) {
+ int32_t formatChange;
+ CHECK(msg->findInt32("formatChange", &formatChange));
+
+ ALOGV("%s discontinuity: formatChange %d",
+ audio ? "audio" : "video", formatChange);
+
+ if (formatChange) {
+ mDeferredActions.push_back(
+ new FlushDecoderAction(
+ audio ? FLUSH_CMD_SHUTDOWN : FLUSH_CMD_NONE,
+ audio ? FLUSH_CMD_NONE : FLUSH_CMD_SHUTDOWN));
+ }
+
+ mDeferredActions.push_back(
+ new SimpleAction(
+ &NuPlayer2::performScanSources));
+
+ processDeferredActions();
+ } else if (what == DecoderBase::kWhatEOS) {
+ int32_t err;
+ CHECK(msg->findInt32("err", &err));
+
+ if (err == ERROR_END_OF_STREAM) {
+ ALOGV("got %s decoder EOS", audio ? "audio" : "video");
+ } else {
+ ALOGV("got %s decoder EOS w/ error %d",
+ audio ? "audio" : "video",
+ err);
+ }
+
+ mRenderer->queueEOS(audio, err);
+ } else if (what == DecoderBase::kWhatFlushCompleted) {
+ ALOGV("decoder %s flush completed", audio ? "audio" : "video");
+
+ handleFlushComplete(audio, true /* isDecoder */);
+ finishFlushIfPossible();
+ } else if (what == DecoderBase::kWhatVideoSizeChanged) {
+ sp<AMessage> format;
+ CHECK(msg->findMessage("format", &format));
+
+ sp<AMessage> inputFormat =
+ mSource->getFormat(false /* audio */);
+
+ setVideoScalingMode(mVideoScalingMode);
+ updateVideoSize(mSrcId, inputFormat, format);
+ } else if (what == DecoderBase::kWhatShutdownCompleted) {
+ ALOGV("%s shutdown completed", audio ? "audio" : "video");
+ if (audio) {
+ mAudioDecoder.clear();
+ mAudioDecoderError = false;
+ ++mAudioDecoderGeneration;
+
+ CHECK_EQ((int)mFlushingAudio, (int)SHUTTING_DOWN_DECODER);
+ mFlushingAudio = SHUT_DOWN;
+ } else {
+ mVideoDecoder.clear();
+ mVideoDecoderError = false;
+ ++mVideoDecoderGeneration;
+
+ CHECK_EQ((int)mFlushingVideo, (int)SHUTTING_DOWN_DECODER);
+ mFlushingVideo = SHUT_DOWN;
+ }
+
+ finishFlushIfPossible();
+ } else if (what == DecoderBase::kWhatResumeCompleted) {
+ finishResume();
+ } else if (what == DecoderBase::kWhatError) {
+ status_t err;
+ if (!msg->findInt32("err", &err) || err == OK) {
+ err = UNKNOWN_ERROR;
+ }
+
+ // Decoder errors can be due to Source (e.g. from streaming),
+ // or from decoding corrupted bitstreams, or from other decoder
+ // MediaCodec operations (e.g. from an ongoing reset or seek).
+ // They may also be due to openAudioSink failure at
+ // decoder start or after a format change.
+ //
+ // We try to gracefully shut down the affected decoder if possible,
+ // rather than trying to force the shutdown with something
+ // similar to performReset(). This method can lead to a hang
+ // if MediaCodec functions block after an error, but they should
+ // typically return INVALID_OPERATION instead of blocking.
+
+ FlushStatus *flushing = audio ? &mFlushingAudio : &mFlushingVideo;
+ ALOGE("received error(%#x) from %s decoder, flushing(%d), now shutting down",
+ err, audio ? "audio" : "video", *flushing);
+
+ switch (*flushing) {
+ case NONE:
+ mDeferredActions.push_back(
+ new FlushDecoderAction(
+ audio ? FLUSH_CMD_SHUTDOWN : FLUSH_CMD_NONE,
+ audio ? FLUSH_CMD_NONE : FLUSH_CMD_SHUTDOWN));
+ processDeferredActions();
+ break;
+ case FLUSHING_DECODER:
+ *flushing = FLUSHING_DECODER_SHUTDOWN; // initiate shutdown after flush.
+ break; // Wait for flush to complete.
+ case FLUSHING_DECODER_SHUTDOWN:
+ break; // Wait for flush to complete.
+ case SHUTTING_DOWN_DECODER:
+ break; // Wait for shutdown to complete.
+ case FLUSHED:
+ getDecoder(audio)->initiateShutdown(); // In the middle of a seek.
+ *flushing = SHUTTING_DOWN_DECODER; // Shut down.
+ break;
+ case SHUT_DOWN:
+ finishFlushIfPossible(); // Should not occur.
+ break; // Finish anyways.
+ }
+ if (mSource != nullptr) {
+ if (audio) {
+ if (mVideoDecoderError || mSource->getFormat(false /* audio */) == NULL
+ || mNativeWindow == NULL || mNativeWindow->getANativeWindow() == NULL
+ || mVideoDecoder == NULL) {
+ // When both audio and video have error, or this stream has only audio
+ // which has error, notify client of error.
+ notifyListener(mSrcId, MEDIA2_ERROR, MEDIA2_ERROR_UNKNOWN, err);
+ } else {
+ // Only audio track has error. Video track could be still good to play.
+ notifyListener(mSrcId, MEDIA2_INFO, MEDIA2_INFO_PLAY_AUDIO_ERROR, err);
+ }
+ mAudioDecoderError = true;
+ } else {
+ if (mAudioDecoderError || mSource->getFormat(true /* audio */) == NULL
+ || mAudioSink == NULL || mAudioDecoder == NULL) {
+ // When both audio and video have error, or this stream has only video
+ // which has error, notify client of error.
+ notifyListener(mSrcId, MEDIA2_ERROR, MEDIA2_ERROR_UNKNOWN, err);
+ } else {
+ // Only video track has error. Audio track could be still good to play.
+ notifyListener(mSrcId, MEDIA2_INFO, MEDIA2_INFO_PLAY_VIDEO_ERROR, err);
+ }
+ mVideoDecoderError = true;
+ }
+ }
+ } else {
+ ALOGV("Unhandled decoder notification %d '%c%c%c%c'.",
+ what,
+ what >> 24,
+ (what >> 16) & 0xff,
+ (what >> 8) & 0xff,
+ what & 0xff);
+ }
+
+ break;
+ }
+
+ case kWhatRendererNotify:
+ {
+ int32_t requesterGeneration = mRendererGeneration - 1;
+ CHECK(msg->findInt32("generation", &requesterGeneration));
+ if (requesterGeneration != mRendererGeneration) {
+ ALOGV("got message from old renderer, generation(%d:%d)",
+ requesterGeneration, mRendererGeneration);
+ return;
+ }
+
+ int32_t what;
+ CHECK(msg->findInt32("what", &what));
+
+ if (what == Renderer::kWhatEOS) {
+ int32_t audio;
+ CHECK(msg->findInt32("audio", &audio));
+
+ int32_t finalResult;
+ CHECK(msg->findInt32("finalResult", &finalResult));
+
+ if (audio) {
+ mAudioEOS = true;
+ } else {
+ mVideoEOS = true;
+ }
+
+ if (finalResult == ERROR_END_OF_STREAM) {
+ ALOGV("reached %s EOS", audio ? "audio" : "video");
+ } else {
+ ALOGE("%s track encountered an error (%d)",
+ audio ? "audio" : "video", finalResult);
+
+ notifyListener(
+ mSrcId, MEDIA2_ERROR, MEDIA2_ERROR_UNKNOWN, finalResult);
+ }
+
+ if ((mAudioEOS || mAudioDecoder == NULL)
+ && (mVideoEOS || mVideoDecoder == NULL)) {
+ notifyListener(mSrcId, MEDIA2_PLAYBACK_COMPLETE, 0, 0);
+ }
+ } else if (what == Renderer::kWhatFlushComplete) {
+ int32_t audio;
+ CHECK(msg->findInt32("audio", &audio));
+
+ if (audio) {
+ mAudioEOS = false;
+ } else {
+ mVideoEOS = false;
+ }
+
+ ALOGV("renderer %s flush completed.", audio ? "audio" : "video");
+ if (audio && (mFlushingAudio == NONE || mFlushingAudio == FLUSHED
+ || mFlushingAudio == SHUT_DOWN)) {
+ // Flush has been handled by tear down.
+ break;
+ }
+ handleFlushComplete(audio, false /* isDecoder */);
+ finishFlushIfPossible();
+ } else if (what == Renderer::kWhatVideoRenderingStart) {
+ notifyListener(mSrcId, MEDIA2_INFO, MEDIA2_INFO_VIDEO_RENDERING_START, 0);
+ } else if (what == Renderer::kWhatMediaRenderingStart) {
+ ALOGV("media rendering started");
+ notifyListener(mSrcId, MEDIA2_STARTED, 0, 0);
+ } else if (what == Renderer::kWhatAudioTearDown) {
+ int32_t reason;
+ CHECK(msg->findInt32("reason", &reason));
+ ALOGV("Tear down audio with reason %d.", reason);
+ if (reason == Renderer::kDueToTimeout && !(mPaused && mOffloadAudio)) {
+ // TimeoutWhenPaused is only for offload mode.
+ ALOGW("Receive a stale message for teardown.");
+ break;
+ }
+ int64_t positionUs;
+ if (!msg->findInt64("positionUs", &positionUs)) {
+ positionUs = mPreviousSeekTimeUs;
+ }
+
+ restartAudio(
+ positionUs, reason == Renderer::kForceNonOffload /* forceNonOffload */,
+ reason != Renderer::kDueToTimeout /* needsToCreateAudioDecoder */);
+ }
+ break;
+ }
+
+ case kWhatMoreDataQueued:
+ {
+ break;
+ }
+
+ case kWhatReset:
+ {
+ ALOGV("kWhatReset");
+
+ mResetting = true;
+ stopPlaybackTimer("kWhatReset");
+ stopRebufferingTimer(true);
+
+ mDeferredActions.push_back(
+ new FlushDecoderAction(
+ FLUSH_CMD_SHUTDOWN /* audio */,
+ FLUSH_CMD_SHUTDOWN /* video */));
+
+ mDeferredActions.push_back(
+ new SimpleAction(&NuPlayer2::performReset));
+
+ processDeferredActions();
+ break;
+ }
+
+ case kWhatNotifyTime:
+ {
+ ALOGV("kWhatNotifyTime");
+ int64_t timerUs;
+ CHECK(msg->findInt64("timerUs", &timerUs));
+
+ notifyListener(mSrcId, MEDIA2_NOTIFY_TIME, timerUs, 0);
+ break;
+ }
+
+ case kWhatSeek:
+ {
+ int64_t seekTimeUs;
+ int32_t mode;
+ int32_t needNotify;
+ CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
+ CHECK(msg->findInt32("mode", &mode));
+ CHECK(msg->findInt32("needNotify", &needNotify));
+
+ ALOGV("kWhatSeek seekTimeUs=%lld us, mode=%d, needNotify=%d",
+ (long long)seekTimeUs, mode, needNotify);
+
+ if (!mStarted) {
+ if (!mSourceStarted) {
+ mSourceStarted = true;
+ mSource->start();
+ }
+ if (seekTimeUs > 0) {
+ performSeek(seekTimeUs, (MediaPlayer2SeekMode)mode);
+ }
+
+ if (needNotify) {
+ notifyDriverSeekComplete(mSrcId);
+ }
+ break;
+ }
+
+ // seeks can take a while, so we essentially paused
+ notifyListener(mSrcId, MEDIA2_PAUSED, 0, 0);
+
+ mDeferredActions.push_back(
+ new FlushDecoderAction(FLUSH_CMD_FLUSH /* audio */,
+ FLUSH_CMD_FLUSH /* video */));
+
+ mDeferredActions.push_back(
+ new SeekAction(seekTimeUs, (MediaPlayer2SeekMode)mode));
+
+ // After a flush without shutdown, decoder is paused.
+ // Don't resume it until source seek is done, otherwise it could
+ // start pulling stale data too soon.
+ mDeferredActions.push_back(
+ new ResumeDecoderAction(needNotify));
+
+ processDeferredActions();
+ break;
+ }
+
+ case kWhatPause:
+ {
+ onPause();
+ notifyListener(mSrcId, MEDIA2_PAUSED, 0, 0);
+ mPausedByClient = true;
+ break;
+ }
+
+ case kWhatSourceNotify:
+ {
+ onSourceNotify(msg);
+ break;
+ }
+
+ case kWhatClosedCaptionNotify:
+ {
+ onClosedCaptionNotify(msg);
+ break;
+ }
+
+ case kWhatPrepareDrm:
+ {
+ status_t status = onPrepareDrm(msg);
+
+ sp<AMessage> response = new AMessage;
+ response->setInt32("status", status);
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatReleaseDrm:
+ {
+ status_t status = onReleaseDrm();
+
+ sp<AMessage> response = new AMessage;
+ response->setInt32("status", status);
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+ break;
+ }
+
+ default:
+ TRESPASS();
+ break;
+ }
+}
+
+void NuPlayer2::onResume() {
+ if (!mPaused || mResetting) {
+ ALOGD_IF(mResetting, "resetting, onResume discarded");
+ return;
+ }
+ mPaused = false;
+ if (mSource != NULL) {
+ mSource->resume();
+ } else {
+ ALOGW("resume called when source is gone or not set");
+ }
+ // |mAudioDecoder| may have been released due to the pause timeout, so re-create it if
+ // needed.
+ if (audioDecoderStillNeeded() && mAudioDecoder == NULL) {
+ instantiateDecoder(true /* audio */, &mAudioDecoder);
+ }
+ if (mRenderer != NULL) {
+ mRenderer->resume();
+ } else {
+ ALOGW("resume called when renderer is gone or not set");
+ }
+
+ startPlaybackTimer("onresume");
+}
+
+void NuPlayer2::onStart() {
+ ALOGV("onStart: mCrypto: %p", mCrypto.get());
+
+ if (!mSourceStarted) {
+ mSourceStarted = true;
+ mSource->start();
+ }
+
+ mOffloadAudio = false;
+ mAudioEOS = false;
+ mVideoEOS = false;
+ mStarted = true;
+ mPaused = false;
+
+ uint32_t flags = 0;
+
+ if (mSource->isRealTime()) {
+ flags |= Renderer::FLAG_REAL_TIME;
+ }
+
+ bool hasAudio = (mSource->getFormat(true /* audio */) != NULL);
+ bool hasVideo = (mSource->getFormat(false /* audio */) != NULL);
+ if (!hasAudio && !hasVideo) {
+ ALOGE("no metadata for either audio or video source");
+ mSource->stop();
+ mSourceStarted = false;
+ notifyListener(mSrcId, MEDIA2_ERROR, MEDIA2_ERROR_UNKNOWN, ERROR_MALFORMED);
+ return;
+ }
+ ALOGV_IF(!hasAudio, "no metadata for audio source"); // video only stream
+
+ sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
+
+ audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
+ if (mAudioSink != NULL) {
+ streamType = mAudioSink->getAudioStreamType();
+ }
+
+ mOffloadAudio =
+ canOffloadStream(audioMeta, hasVideo, mSource->isStreaming(), streamType)
+ && (mPlaybackSettings.mSpeed == 1.f && mPlaybackSettings.mPitch == 1.f);
+
+ // Modular DRM: Disabling audio offload if the source is protected
+ if (mOffloadAudio && mIsDrmProtected) {
+ mOffloadAudio = false;
+ ALOGV("onStart: Disabling mOffloadAudio now that the source is protected.");
+ }
+
+ if (mOffloadAudio) {
+ flags |= Renderer::FLAG_OFFLOAD_AUDIO;
+ }
+
+ sp<AMessage> notify = new AMessage(kWhatRendererNotify, this);
+ ++mRendererGeneration;
+ notify->setInt32("generation", mRendererGeneration);
+ mRenderer = new Renderer(mAudioSink, mMediaClock, notify, flags);
+ mRendererLooper = new ALooper;
+ mRendererLooper->setName("NuPlayerRenderer");
+ mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+ mRendererLooper->registerHandler(mRenderer);
+
+ status_t err = mRenderer->setPlaybackSettings(mPlaybackSettings);
+ if (err != OK) {
+ mSource->stop();
+ mSourceStarted = false;
+ notifyListener(mSrcId, MEDIA2_ERROR, MEDIA2_ERROR_UNKNOWN, err);
+ return;
+ }
+
+ float rate = getFrameRate();
+ if (rate > 0) {
+ mRenderer->setVideoFrameRate(rate);
+ }
+
+ if (mVideoDecoder != NULL) {
+ mVideoDecoder->setRenderer(mRenderer);
+ }
+ if (mAudioDecoder != NULL) {
+ mAudioDecoder->setRenderer(mRenderer);
+ }
+
+ startPlaybackTimer("onstart");
+
+ postScanSources();
+}
+
+void NuPlayer2::startPlaybackTimer(const char *where) {
+ Mutex::Autolock autoLock(mPlayingTimeLock);
+ if (mLastStartedPlayingTimeNs == 0) {
+ mLastStartedPlayingTimeNs = systemTime();
+ ALOGV("startPlaybackTimer() time %20" PRId64 " (%s)", mLastStartedPlayingTimeNs, where);
+ }
+}
+
+void NuPlayer2::stopPlaybackTimer(const char *where) {
+ Mutex::Autolock autoLock(mPlayingTimeLock);
+
+ ALOGV("stopPlaybackTimer() time %20" PRId64 " (%s)", mLastStartedPlayingTimeNs, where);
+
+ if (mLastStartedPlayingTimeNs != 0) {
+ sp<NuPlayer2Driver> driver = mDriver.promote();
+ if (driver != NULL) {
+ int64_t now = systemTime();
+ int64_t played = now - mLastStartedPlayingTimeNs;
+ ALOGV("stopPlaybackTimer() log %20" PRId64 "", played);
+
+ if (played > 0) {
+ driver->notifyMorePlayingTimeUs(mSrcId, (played+500)/1000);
+ }
+ }
+ mLastStartedPlayingTimeNs = 0;
+ }
+}
+
+void NuPlayer2::startRebufferingTimer() {
+ Mutex::Autolock autoLock(mPlayingTimeLock);
+ if (mLastStartedRebufferingTimeNs == 0) {
+ mLastStartedRebufferingTimeNs = systemTime();
+ ALOGV("startRebufferingTimer() time %20" PRId64 "", mLastStartedRebufferingTimeNs);
+ }
+}
+
+void NuPlayer2::stopRebufferingTimer(bool exitingPlayback) {
+ Mutex::Autolock autoLock(mPlayingTimeLock);
+
+ ALOGV("stopRebufferTimer() time %20" PRId64 " (exiting %d)", mLastStartedRebufferingTimeNs, exitingPlayback);
+
+ if (mLastStartedRebufferingTimeNs != 0) {
+ sp<NuPlayer2Driver> driver = mDriver.promote();
+ if (driver != NULL) {
+ int64_t now = systemTime();
+ int64_t rebuffered = now - mLastStartedRebufferingTimeNs;
+ ALOGV("stopRebufferingTimer() log %20" PRId64 "", rebuffered);
+
+ if (rebuffered > 0) {
+ driver->notifyMoreRebufferingTimeUs(mSrcId, (rebuffered+500)/1000);
+ if (exitingPlayback) {
+ driver->notifyRebufferingWhenExit(mSrcId, true);
+ }
+ }
+ }
+ mLastStartedRebufferingTimeNs = 0;
+ }
+}
+
+void NuPlayer2::onPause() {
+
+ stopPlaybackTimer("onPause");
+
+ if (mPaused) {
+ return;
+ }
+ mPaused = true;
+ if (mSource != NULL) {
+ mSource->pause();
+ } else {
+ ALOGW("pause called when source is gone or not set");
+ }
+ if (mRenderer != NULL) {
+ mRenderer->pause();
+ } else {
+ ALOGW("pause called when renderer is gone or not set");
+ }
+
+}
+
+bool NuPlayer2::audioDecoderStillNeeded() {
+ // Audio decoder is no longer needed if it's in shut/shutting down status.
+ return ((mFlushingAudio != SHUT_DOWN) && (mFlushingAudio != SHUTTING_DOWN_DECODER));
+}
+
+void NuPlayer2::handleFlushComplete(bool audio, bool isDecoder) {
+ // We wait for both the decoder flush and the renderer flush to complete
+ // before entering either the FLUSHED or the SHUTTING_DOWN_DECODER state.
+
+ mFlushComplete[audio][isDecoder] = true;
+ if (!mFlushComplete[audio][!isDecoder]) {
+ return;
+ }
+
+ FlushStatus *state = audio ? &mFlushingAudio : &mFlushingVideo;
+ switch (*state) {
+ case FLUSHING_DECODER:
+ {
+ *state = FLUSHED;
+ break;
+ }
+
+ case FLUSHING_DECODER_SHUTDOWN:
+ {
+ *state = SHUTTING_DOWN_DECODER;
+
+ ALOGV("initiating %s decoder shutdown", audio ? "audio" : "video");
+ getDecoder(audio)->initiateShutdown();
+ break;
+ }
+
+ default:
+ // decoder flush completes only occur in a flushing state.
+ LOG_ALWAYS_FATAL_IF(isDecoder, "decoder flush in invalid state %d", *state);
+ break;
+ }
+}
+
+void NuPlayer2::finishFlushIfPossible() {
+ if (mFlushingAudio != NONE && mFlushingAudio != FLUSHED
+ && mFlushingAudio != SHUT_DOWN) {
+ return;
+ }
+
+ if (mFlushingVideo != NONE && mFlushingVideo != FLUSHED
+ && mFlushingVideo != SHUT_DOWN) {
+ return;
+ }
+
+ ALOGV("both audio and video are flushed now.");
+
+ mFlushingAudio = NONE;
+ mFlushingVideo = NONE;
+
+ clearFlushComplete();
+
+ processDeferredActions();
+}
+
+void NuPlayer2::postScanSources() {
+ if (mScanSourcesPending) {
+ return;
+ }
+
+ sp<AMessage> msg = new AMessage(kWhatScanSources, this);
+ msg->setInt32("generation", mScanSourcesGeneration);
+ msg->post();
+
+ mScanSourcesPending = true;
+}
+
+void NuPlayer2::tryOpenAudioSinkForOffload(
+ const sp<AMessage> &format, const sp<MetaData> &audioMeta, bool hasVideo) {
+ // Note: This is called early in NuPlayer2 to determine whether offloading
+ // is possible; otherwise the decoders call the renderer openAudioSink directly.
+
+ status_t err = mRenderer->openAudioSink(
+ format, true /* offloadOnly */, hasVideo,
+ AUDIO_OUTPUT_FLAG_NONE, &mOffloadAudio, mSource->isStreaming());
+ if (err != OK) {
+ // Any failure we turn off mOffloadAudio.
+ mOffloadAudio = false;
+ } else if (mOffloadAudio) {
+ sendMetaDataToHal(mAudioSink, audioMeta);
+ }
+}
+
+void NuPlayer2::closeAudioSink() {
+ mRenderer->closeAudioSink();
+}
+
+void NuPlayer2::restartAudio(
+ int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder) {
+ if (mAudioDecoder != NULL) {
+ mAudioDecoder->pause();
+ mAudioDecoder.clear();
+ mAudioDecoderError = false;
+ ++mAudioDecoderGeneration;
+ }
+ if (mFlushingAudio == FLUSHING_DECODER) {
+ mFlushComplete[1 /* audio */][1 /* isDecoder */] = true;
+ mFlushingAudio = FLUSHED;
+ finishFlushIfPossible();
+ } else if (mFlushingAudio == FLUSHING_DECODER_SHUTDOWN
+ || mFlushingAudio == SHUTTING_DOWN_DECODER) {
+ mFlushComplete[1 /* audio */][1 /* isDecoder */] = true;
+ mFlushingAudio = SHUT_DOWN;
+ finishFlushIfPossible();
+ needsToCreateAudioDecoder = false;
+ }
+ if (mRenderer == NULL) {
+ return;
+ }
+ closeAudioSink();
+ mRenderer->flush(true /* audio */, false /* notifyComplete */);
+ if (mVideoDecoder != NULL) {
+ mRenderer->flush(false /* audio */, false /* notifyComplete */);
+ }
+
+ performSeek(currentPositionUs, MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC /* mode */);
+
+ if (forceNonOffload) {
+ mRenderer->signalDisableOffloadAudio();
+ mOffloadAudio = false;
+ }
+ if (needsToCreateAudioDecoder) {
+ instantiateDecoder(true /* audio */, &mAudioDecoder, !forceNonOffload);
+ }
+}
+
+void NuPlayer2::determineAudioModeChange(const sp<AMessage> &audioFormat) {
+ if (mSource == NULL || mAudioSink == NULL) {
+ return;
+ }
+
+ if (mRenderer == NULL) {
+ ALOGW("No renderer can be used to determine audio mode. Use non-offload for safety.");
+ mOffloadAudio = false;
+ return;
+ }
+
+ sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
+ sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
+ audio_stream_type_t streamType = mAudioSink->getAudioStreamType();
+ const bool hasVideo = (videoFormat != NULL);
+ bool canOffload = canOffloadStream(
+ audioMeta, hasVideo, mSource->isStreaming(), streamType)
+ && (mPlaybackSettings.mSpeed == 1.f && mPlaybackSettings.mPitch == 1.f);
+
+ // Modular DRM: Disabling audio offload if the source is protected
+ if (canOffload && mIsDrmProtected) {
+ canOffload = false;
+ ALOGV("determineAudioModeChange: Disabling mOffloadAudio b/c the source is protected.");
+ }
+
+ if (canOffload) {
+ if (!mOffloadAudio) {
+ mRenderer->signalEnableOffloadAudio();
+ }
+ // open audio sink early under offload mode.
+ tryOpenAudioSinkForOffload(audioFormat, audioMeta, hasVideo);
+ } else {
+ if (mOffloadAudio) {
+ mRenderer->signalDisableOffloadAudio();
+ mOffloadAudio = false;
+ }
+ }
+}
+
+status_t NuPlayer2::instantiateDecoder(
+ bool audio, sp<DecoderBase> *decoder, bool checkAudioModeChange) {
+ // The audio decoder could be cleared by tear down. If still in shut down
+ // process, no need to create a new audio decoder.
+ if (*decoder != NULL || (audio && mFlushingAudio == SHUT_DOWN)) {
+ return OK;
+ }
+
+ sp<AMessage> format = mSource->getFormat(audio);
+
+ if (format == NULL) {
+ return UNKNOWN_ERROR;
+ } else {
+ status_t err;
+ if (format->findInt32("err", &err) && err) {
+ return err;
+ }
+ }
+
+ format->setInt32("priority", 0 /* realtime */);
+
+ if (!audio) {
+ AString mime;
+ CHECK(format->findString("mime", &mime));
+
+ sp<AMessage> ccNotify = new AMessage(kWhatClosedCaptionNotify, this);
+ if (mCCDecoder == NULL) {
+ mCCDecoder = new CCDecoder(ccNotify);
+ }
+
+ if (mSourceFlags & Source::FLAG_SECURE) {
+ format->setInt32("secure", true);
+ }
+
+ if (mSourceFlags & Source::FLAG_PROTECTED) {
+ format->setInt32("protected", true);
+ }
+
+ float rate = getFrameRate();
+ if (rate > 0) {
+ format->setFloat("operating-rate", rate * mPlaybackSettings.mSpeed);
+ }
+ }
+
+ if (audio) {
+ sp<AMessage> notify = new AMessage(kWhatAudioNotify, this);
+ ++mAudioDecoderGeneration;
+ notify->setInt32("generation", mAudioDecoderGeneration);
+
+ if (checkAudioModeChange) {
+ determineAudioModeChange(format);
+ }
+ if (mOffloadAudio) {
+ mSource->setOffloadAudio(true /* offload */);
+
+ const bool hasVideo = (mSource->getFormat(false /*audio */) != NULL);
+ format->setInt32("has-video", hasVideo);
+ *decoder = new DecoderPassThrough(notify, mSource, mRenderer);
+ ALOGV("instantiateDecoder audio DecoderPassThrough hasVideo: %d", hasVideo);
+ } else {
+ mSource->setOffloadAudio(false /* offload */);
+
+ *decoder = new Decoder(notify, mSource, mPID, mUID, mRenderer);
+ ALOGV("instantiateDecoder audio Decoder");
+ }
+ mAudioDecoderError = false;
+ } else {
+ sp<AMessage> notify = new AMessage(kWhatVideoNotify, this);
+ ++mVideoDecoderGeneration;
+ notify->setInt32("generation", mVideoDecoderGeneration);
+
+ *decoder = new Decoder(
+ notify, mSource, mPID, mUID, mRenderer, mNativeWindow, mCCDecoder);
+ mVideoDecoderError = false;
+
+ // enable FRC if high-quality AV sync is requested, even if not
+ // directly queuing to display, as this will even improve textureview
+ // playback.
+ {
+ if (property_get_bool("persist.sys.media.avsync", false)) {
+ format->setInt32("auto-frc", 1);
+ }
+ }
+ }
+ (*decoder)->init();
+
+ // Modular DRM
+ if (mIsDrmProtected) {
+ format->setObject("crypto", mCrypto);
+ ALOGV("instantiateDecoder: mCrypto: %p isSecure: %d", mCrypto.get(),
+ (mSourceFlags & Source::FLAG_SECURE) != 0);
+ }
+
+ (*decoder)->configure(format);
+
+ if (!audio) {
+ sp<AMessage> params = new AMessage();
+ float rate = getFrameRate();
+ if (rate > 0) {
+ params->setFloat("frame-rate-total", rate);
+ }
+
+ sp<MetaData> fileMeta = getFileMeta();
+ if (fileMeta != NULL) {
+ int32_t videoTemporalLayerCount;
+ if (fileMeta->findInt32(kKeyTemporalLayerCount, &videoTemporalLayerCount)
+ && videoTemporalLayerCount > 0) {
+ params->setInt32("temporal-layer-count", videoTemporalLayerCount);
+ }
+ }
+
+ if (params->countEntries() > 0) {
+ (*decoder)->setParameters(params);
+ }
+ }
+ return OK;
+}
+
+void NuPlayer2::updateVideoSize(
+ int64_t srcId,
+ const sp<AMessage> &inputFormat,
+ const sp<AMessage> &outputFormat) {
+ if (inputFormat == NULL) {
+ ALOGW("Unknown video size, reporting 0x0!");
+ notifyListener(srcId, MEDIA2_SET_VIDEO_SIZE, 0, 0);
+ return;
+ }
+ int32_t err = OK;
+ inputFormat->findInt32("err", &err);
+ if (err == -EWOULDBLOCK) {
+ ALOGW("Video meta is not available yet!");
+ return;
+ }
+ if (err != OK) {
+ ALOGW("Something is wrong with video meta!");
+ return;
+ }
+
+ int32_t displayWidth, displayHeight;
+ if (outputFormat != NULL) {
+ int32_t width, height;
+ CHECK(outputFormat->findInt32("width", &width));
+ CHECK(outputFormat->findInt32("height", &height));
+
+ int32_t cropLeft, cropTop, cropRight, cropBottom;
+ CHECK(outputFormat->findRect(
+ "crop",
+ &cropLeft, &cropTop, &cropRight, &cropBottom));
+
+ displayWidth = cropRight - cropLeft + 1;
+ displayHeight = cropBottom - cropTop + 1;
+
+ ALOGV("Video output format changed to %d x %d "
+ "(crop: %d x %d @ (%d, %d))",
+ width, height,
+ displayWidth,
+ displayHeight,
+ cropLeft, cropTop);
+ } else {
+ CHECK(inputFormat->findInt32("width", &displayWidth));
+ CHECK(inputFormat->findInt32("height", &displayHeight));
+
+ ALOGV("Video input format %d x %d", displayWidth, displayHeight);
+ }
+
+ // Take into account sample aspect ratio if necessary:
+ int32_t sarWidth, sarHeight;
+ if (inputFormat->findInt32("sar-width", &sarWidth)
+ && inputFormat->findInt32("sar-height", &sarHeight)
+ && sarWidth > 0 && sarHeight > 0) {
+ ALOGV("Sample aspect ratio %d : %d", sarWidth, sarHeight);
+
+ displayWidth = (displayWidth * sarWidth) / sarHeight;
+
+ ALOGV("display dimensions %d x %d", displayWidth, displayHeight);
+ } else {
+ int32_t width, height;
+ if (inputFormat->findInt32("display-width", &width)
+ && inputFormat->findInt32("display-height", &height)
+ && width > 0 && height > 0
+ && displayWidth > 0 && displayHeight > 0) {
+ if (displayHeight * (int64_t)width / height > (int64_t)displayWidth) {
+ displayHeight = (int32_t)(displayWidth * (int64_t)height / width);
+ } else {
+ displayWidth = (int32_t)(displayHeight * (int64_t)width / height);
+ }
+ ALOGV("Video display width and height are overridden to %d x %d",
+ displayWidth, displayHeight);
+ }
+ }
+
+ int32_t rotationDegrees;
+ if (!inputFormat->findInt32("rotation-degrees", &rotationDegrees)) {
+ rotationDegrees = 0;
+ }
+
+ if (rotationDegrees == 90 || rotationDegrees == 270) {
+ int32_t tmp = displayWidth;
+ displayWidth = displayHeight;
+ displayHeight = tmp;
+ }
+
+ notifyListener(
+ srcId,
+ MEDIA2_SET_VIDEO_SIZE,
+ displayWidth,
+ displayHeight);
+}
+
+void NuPlayer2::notifyListener(int64_t srcId, int msg, int ext1, int ext2, const Parcel *in) {
+ if (mDriver == NULL) {
+ return;
+ }
+
+ sp<NuPlayer2Driver> driver = mDriver.promote();
+
+ if (driver == NULL) {
+ return;
+ }
+
+ driver->notifyListener(srcId, msg, ext1, ext2, in);
+}
+
+void NuPlayer2::flushDecoder(bool audio, bool needShutdown) {
+ ALOGV("[%s] flushDecoder needShutdown=%d",
+ audio ? "audio" : "video", needShutdown);
+
+ const sp<DecoderBase> &decoder = getDecoder(audio);
+ if (decoder == NULL) {
+ ALOGI("flushDecoder %s without decoder present",
+ audio ? "audio" : "video");
+ return;
+ }
+
+ // Make sure we don't continue to scan sources until we finish flushing.
+ ++mScanSourcesGeneration;
+ if (mScanSourcesPending) {
+ if (!needShutdown) {
+ mDeferredActions.push_back(
+ new SimpleAction(&NuPlayer2::performScanSources));
+ }
+ mScanSourcesPending = false;
+ }
+
+ decoder->signalFlush();
+
+ FlushStatus newStatus =
+ needShutdown ? FLUSHING_DECODER_SHUTDOWN : FLUSHING_DECODER;
+
+ mFlushComplete[audio][false /* isDecoder */] = (mRenderer == NULL);
+ mFlushComplete[audio][true /* isDecoder */] = false;
+ if (audio) {
+ ALOGE_IF(mFlushingAudio != NONE,
+ "audio flushDecoder() is called in state %d", mFlushingAudio);
+ mFlushingAudio = newStatus;
+ } else {
+ ALOGE_IF(mFlushingVideo != NONE,
+ "video flushDecoder() is called in state %d", mFlushingVideo);
+ mFlushingVideo = newStatus;
+ }
+}
+
+void NuPlayer2::queueDecoderShutdown(
+ bool audio, bool video, const sp<AMessage> &reply) {
+ ALOGI("queueDecoderShutdown audio=%d, video=%d", audio, video);
+
+ mDeferredActions.push_back(
+ new FlushDecoderAction(
+ audio ? FLUSH_CMD_SHUTDOWN : FLUSH_CMD_NONE,
+ video ? FLUSH_CMD_SHUTDOWN : FLUSH_CMD_NONE));
+
+ mDeferredActions.push_back(
+ new SimpleAction(&NuPlayer2::performScanSources));
+
+ mDeferredActions.push_back(new PostMessageAction(reply));
+
+ processDeferredActions();
+}
+
+status_t NuPlayer2::setVideoScalingMode(int32_t mode) {
+ mVideoScalingMode = mode;
+ if (mNativeWindow != NULL && mNativeWindow->getANativeWindow() != NULL) {
+ status_t ret = native_window_set_scaling_mode(
+ mNativeWindow->getANativeWindow(), mVideoScalingMode);
+ if (ret != OK) {
+ ALOGE("Failed to set scaling mode (%d): %s",
+ -ret, strerror(-ret));
+ return ret;
+ }
+ }
+ return OK;
+}
+
+status_t NuPlayer2::getTrackInfo(Parcel* reply) const {
+ sp<AMessage> msg = new AMessage(kWhatGetTrackInfo, this);
+ msg->setPointer("reply", reply);
+
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ return err;
+}
+
+status_t NuPlayer2::getSelectedTrack(int32_t type, Parcel* reply) const {
+ sp<AMessage> msg = new AMessage(kWhatGetSelectedTrack, this);
+ msg->setPointer("reply", reply);
+ msg->setInt32("type", type);
+
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ }
+ return err;
+}
+
+status_t NuPlayer2::selectTrack(size_t trackIndex, bool select, int64_t timeUs) {
+ sp<AMessage> msg = new AMessage(kWhatSelectTrack, this);
+ msg->setSize("trackIndex", trackIndex);
+ msg->setInt32("select", select);
+ msg->setInt64("timeUs", timeUs);
+
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+
+ if (err != OK) {
+ return err;
+ }
+
+ if (!response->findInt32("err", &err)) {
+ err = OK;
+ }
+
+ return err;
+}
+
+status_t NuPlayer2::getCurrentPosition(int64_t *mediaUs) {
+ sp<Renderer> renderer = mRenderer;
+ if (renderer == NULL) {
+ return NO_INIT;
+ }
+
+ return renderer->getCurrentPosition(mediaUs);
+}
+
+void NuPlayer2::getStats(Vector<sp<AMessage> > *mTrackStats) {
+ CHECK(mTrackStats != NULL);
+
+ mTrackStats->clear();
+ if (mVideoDecoder != NULL) {
+ mTrackStats->push_back(mVideoDecoder->getStats());
+ }
+ if (mAudioDecoder != NULL) {
+ mTrackStats->push_back(mAudioDecoder->getStats());
+ }
+}
+
+sp<MetaData> NuPlayer2::getFileMeta() {
+ return mSource->getFileFormatMeta();
+}
+
+float NuPlayer2::getFrameRate() {
+ sp<MetaData> meta = mSource->getFormatMeta(false /* audio */);
+ if (meta == NULL) {
+ return 0;
+ }
+ int32_t rate;
+ if (!meta->findInt32(kKeyFrameRate, &rate)) {
+ // fall back to try file meta
+ sp<MetaData> fileMeta = getFileMeta();
+ if (fileMeta == NULL) {
+ ALOGW("source has video meta but not file meta");
+ return -1;
+ }
+ int32_t fileMetaRate;
+ if (!fileMeta->findInt32(kKeyFrameRate, &fileMetaRate)) {
+ return -1;
+ }
+ return fileMetaRate;
+ }
+ return rate;
+}
+
+void NuPlayer2::schedulePollDuration() {
+ sp<AMessage> msg = new AMessage(kWhatPollDuration, this);
+ msg->setInt32("generation", mPollDurationGeneration);
+ msg->post();
+}
+
+void NuPlayer2::cancelPollDuration() {
+ ++mPollDurationGeneration;
+}
+
+void NuPlayer2::processDeferredActions() {
+ while (!mDeferredActions.empty()) {
+ // We won't execute any deferred actions until we're no longer in
+ // an intermediate state, i.e. one more more decoders are currently
+ // flushing or shutting down.
+
+ if (mFlushingAudio != NONE || mFlushingVideo != NONE) {
+ // We're currently flushing, postpone the reset until that's
+ // completed.
+
+ ALOGV("postponing action mFlushingAudio=%d, mFlushingVideo=%d",
+ mFlushingAudio, mFlushingVideo);
+
+ break;
+ }
+
+ sp<Action> action = *mDeferredActions.begin();
+ mDeferredActions.erase(mDeferredActions.begin());
+
+ action->execute(this);
+ }
+}
+
+void NuPlayer2::performSeek(int64_t seekTimeUs, MediaPlayer2SeekMode mode) {
+ ALOGV("performSeek seekTimeUs=%lld us (%.2f secs), mode=%d",
+ (long long)seekTimeUs, seekTimeUs / 1E6, mode);
+
+ if (mSource == NULL) {
+ // This happens when reset occurs right before the loop mode
+ // asynchronously seeks to the start of the stream.
+ LOG_ALWAYS_FATAL_IF(mAudioDecoder != NULL || mVideoDecoder != NULL,
+ "mSource is NULL and decoders not NULL audio(%p) video(%p)",
+ mAudioDecoder.get(), mVideoDecoder.get());
+ return;
+ }
+ mPreviousSeekTimeUs = seekTimeUs;
+ mSource->seekTo(seekTimeUs, mode);
+ ++mTimedTextGeneration;
+
+ // everything's flushed, continue playback.
+}
+
+void NuPlayer2::performDecoderFlush(FlushCommand audio, FlushCommand video) {
+ ALOGV("performDecoderFlush audio=%d, video=%d", audio, video);
+
+ if ((audio == FLUSH_CMD_NONE || mAudioDecoder == NULL)
+ && (video == FLUSH_CMD_NONE || mVideoDecoder == NULL)) {
+ return;
+ }
+
+ if (audio != FLUSH_CMD_NONE && mAudioDecoder != NULL) {
+ flushDecoder(true /* audio */, (audio == FLUSH_CMD_SHUTDOWN));
+ }
+
+ if (video != FLUSH_CMD_NONE && mVideoDecoder != NULL) {
+ flushDecoder(false /* audio */, (video == FLUSH_CMD_SHUTDOWN));
+ }
+}
+
+void NuPlayer2::performReset() {
+ ALOGV("performReset");
+
+ CHECK(mAudioDecoder == NULL);
+ CHECK(mVideoDecoder == NULL);
+
+ stopPlaybackTimer("performReset");
+ stopRebufferingTimer(true);
+
+ cancelPollDuration();
+
+ ++mScanSourcesGeneration;
+ mScanSourcesPending = false;
+
+ if (mRendererLooper != NULL) {
+ if (mRenderer != NULL) {
+ mRendererLooper->unregisterHandler(mRenderer->id());
+ }
+ mRendererLooper->stop();
+ mRendererLooper.clear();
+ }
+ mRenderer.clear();
+ ++mRendererGeneration;
+
+ if (mSource != NULL) {
+ mSource->stop();
+
+ Mutex::Autolock autoLock(mSourceLock);
+ mSource.clear();
+ }
+
+ if (mDriver != NULL) {
+ sp<NuPlayer2Driver> driver = mDriver.promote();
+ if (driver != NULL) {
+ driver->notifyResetComplete(mSrcId);
+ }
+ }
+
+ mStarted = false;
+ mPrepared = false;
+ mResetting = false;
+ mSourceStarted = false;
+
+ // Modular DRM
+ if (mCrypto != NULL) {
+ // decoders will be flushed before this so their mCrypto would go away on their own
+ // TODO change to ALOGV
+ ALOGD("performReset mCrypto: %p", mCrypto.get());
+ mCrypto.clear();
+ }
+ mIsDrmProtected = false;
+}
+
+void NuPlayer2::performPlayNextDataSource() {
+ ALOGV("performPlayNextDataSource");
+
+ CHECK(mAudioDecoder == NULL);
+ CHECK(mVideoDecoder == NULL);
+
+ stopPlaybackTimer("performPlayNextDataSource");
+ stopRebufferingTimer(true);
+
+ cancelPollDuration();
+
+ ++mScanSourcesGeneration;
+ mScanSourcesPending = false;
+
+ ++mRendererGeneration;
+
+ if (mSource != NULL) {
+ mSource->stop();
+ }
+
+ long previousSrcId;
+ {
+ Mutex::Autolock autoLock(mSourceLock);
+ mSource = mNextSource;
+ mNextSource = NULL;
+ previousSrcId = mSrcId;
+ mSrcId = mNextSrcId;
+ ++mNextSrcId; // to distinguish the two sources.
+ }
+
+ if (mDriver != NULL) {
+ sp<NuPlayer2Driver> driver = mDriver.promote();
+ if (driver != NULL) {
+ notifyListener(previousSrcId, MEDIA2_INFO, MEDIA2_INFO_PLAYBACK_COMPLETE, 0);
+ notifyListener(mSrcId, MEDIA2_INFO, MEDIA2_INFO_STARTED_AS_NEXT, 0);
+ }
+ }
+
+ mStarted = false;
+ mPrepared = true; // TODO: what if it's not prepared
+ mResetting = false;
+ mSourceStarted = false;
+
+ // Modular DRM
+ if (mCrypto != NULL) {
+ // decoders will be flushed before this so their mCrypto would go away on their own
+ // TODO change to ALOGV
+ ALOGD("performReset mCrypto: %p", mCrypto.get());
+ mCrypto.clear();
+ }
+ mIsDrmProtected = false;
+
+ if (mRenderer != NULL) {
+ mRenderer->resume();
+ }
+
+ onStart();
+ mPausedByClient = false;
+ notifyListener(mSrcId, MEDIA2_STARTED, 0, 0);
+}
+
+void NuPlayer2::performScanSources() {
+ ALOGV("performScanSources");
+
+ if (!mStarted) {
+ return;
+ }
+
+ if (mAudioDecoder == NULL || mVideoDecoder == NULL) {
+ postScanSources();
+ }
+}
+
+void NuPlayer2::performSetSurface(const sp<ANativeWindowWrapper> &nww) {
+ ALOGV("performSetSurface");
+
+ mNativeWindow = nww;
+
+ // XXX - ignore error from setVideoScalingMode for now
+ setVideoScalingMode(mVideoScalingMode);
+
+ if (mDriver != NULL) {
+ sp<NuPlayer2Driver> driver = mDriver.promote();
+ if (driver != NULL) {
+ driver->notifySetSurfaceComplete(mSrcId);
+ }
+ }
+}
+
+void NuPlayer2::performResumeDecoders(bool needNotify) {
+ if (needNotify) {
+ mResumePending = true;
+ if (mVideoDecoder == NULL) {
+ // if audio-only, we can notify seek complete now,
+ // as the resume operation will be relatively fast.
+ finishResume();
+ }
+ }
+
+ if (mVideoDecoder != NULL) {
+ // When there is continuous seek, MediaPlayer will cache the seek
+ // position, and send down new seek request when previous seek is
+ // complete. Let's wait for at least one video output frame before
+ // notifying seek complete, so that the video thumbnail gets updated
+ // when seekbar is dragged.
+ mVideoDecoder->signalResume(needNotify);
+ }
+
+ if (mAudioDecoder != NULL) {
+ mAudioDecoder->signalResume(false /* needNotify */);
+ }
+}
+
+void NuPlayer2::finishResume() {
+ if (mResumePending) {
+ mResumePending = false;
+ notifyDriverSeekComplete(mSrcId);
+ }
+}
+
+void NuPlayer2::notifyDriverSeekComplete(int64_t srcId) {
+ if (mDriver != NULL) {
+ sp<NuPlayer2Driver> driver = mDriver.promote();
+ if (driver != NULL) {
+ driver->notifySeekComplete(srcId);
+ }
+ }
+}
+
+void NuPlayer2::onSourceNotify(const sp<AMessage> &msg) {
+ int32_t what;
+ CHECK(msg->findInt32("what", &what));
+
+ int64_t srcId;
+ CHECK(msg->findInt64("srcId", &srcId));
+ switch (what) {
+ case Source::kWhatPrepared:
+ {
+ ALOGV("NuPlayer2::onSourceNotify Source::kWhatPrepared source: %p", mSource.get());
+ if (mSource == NULL) {
+ // This is a stale notification from a source that was
+ // asynchronously preparing when the client called reset().
+ // We handled the reset, the source is gone.
+ break;
+ }
+
+ int32_t err;
+ CHECK(msg->findInt32("err", &err));
+
+ if (err != OK) {
+ // shut down potential secure codecs in case client never calls reset
+ mDeferredActions.push_back(
+ new FlushDecoderAction(FLUSH_CMD_SHUTDOWN /* audio */,
+ FLUSH_CMD_SHUTDOWN /* video */));
+ processDeferredActions();
+ } else {
+ mPrepared = true;
+ }
+
+ sp<NuPlayer2Driver> driver = mDriver.promote();
+ if (driver != NULL) {
+ // notify duration first, so that it's definitely set when
+ // the app received the "prepare complete" callback.
+ int64_t durationUs;
+ if (mSource->getDuration(&durationUs) == OK) {
+ driver->notifyDuration(srcId, durationUs);
+ }
+ driver->notifyPrepareCompleted(srcId, err);
+ }
+
+ break;
+ }
+
+ // Modular DRM
+ case Source::kWhatDrmInfo:
+ {
+ Parcel parcel;
+ sp<ABuffer> drmInfo;
+ CHECK(msg->findBuffer("drmInfo", &drmInfo));
+ parcel.setData(drmInfo->data(), drmInfo->size());
+
+ ALOGV("onSourceNotify() kWhatDrmInfo MEDIA2_DRM_INFO drmInfo: %p parcel size: %zu",
+ drmInfo.get(), parcel.dataSize());
+
+ notifyListener(srcId, MEDIA2_DRM_INFO, 0 /* ext1 */, 0 /* ext2 */, &parcel);
+
+ break;
+ }
+
+ case Source::kWhatFlagsChanged:
+ {
+ uint32_t flags;
+ CHECK(msg->findInt32("flags", (int32_t *)&flags));
+
+ sp<NuPlayer2Driver> driver = mDriver.promote();
+ if (driver != NULL) {
+
+ ALOGV("onSourceNotify() kWhatFlagsChanged FLAG_CAN_PAUSE: %d "
+ "FLAG_CAN_SEEK_BACKWARD: %d \n\t\t\t\t FLAG_CAN_SEEK_FORWARD: %d "
+ "FLAG_CAN_SEEK: %d FLAG_DYNAMIC_DURATION: %d \n"
+ "\t\t\t\t FLAG_SECURE: %d FLAG_PROTECTED: %d",
+ (flags & Source::FLAG_CAN_PAUSE) != 0,
+ (flags & Source::FLAG_CAN_SEEK_BACKWARD) != 0,
+ (flags & Source::FLAG_CAN_SEEK_FORWARD) != 0,
+ (flags & Source::FLAG_CAN_SEEK) != 0,
+ (flags & Source::FLAG_DYNAMIC_DURATION) != 0,
+ (flags & Source::FLAG_SECURE) != 0,
+ (flags & Source::FLAG_PROTECTED) != 0);
+
+ if ((flags & NuPlayer2::Source::FLAG_CAN_SEEK) == 0) {
+ driver->notifyListener(
+ srcId, MEDIA2_INFO, MEDIA2_INFO_NOT_SEEKABLE, 0);
+ }
+ driver->notifyFlagsChanged(srcId, flags);
+ }
+
+ if ((mSourceFlags & Source::FLAG_DYNAMIC_DURATION)
+ && (!(flags & Source::FLAG_DYNAMIC_DURATION))) {
+ cancelPollDuration();
+ } else if (!(mSourceFlags & Source::FLAG_DYNAMIC_DURATION)
+ && (flags & Source::FLAG_DYNAMIC_DURATION)
+ && (mAudioDecoder != NULL || mVideoDecoder != NULL)) {
+ schedulePollDuration();
+ }
+
+ mSourceFlags = flags;
+ break;
+ }
+
+ case Source::kWhatVideoSizeChanged:
+ {
+ sp<AMessage> format;
+ CHECK(msg->findMessage("format", &format));
+
+ updateVideoSize(srcId, format);
+ break;
+ }
+
+ case Source::kWhatBufferingUpdate:
+ {
+ int32_t percentage;
+ CHECK(msg->findInt32("percentage", &percentage));
+
+ notifyListener(srcId, MEDIA2_BUFFERING_UPDATE, percentage, 0);
+ break;
+ }
+
+ case Source::kWhatPauseOnBufferingStart:
+ {
+ // ignore if not playing
+ if (mStarted) {
+ ALOGI("buffer low, pausing...");
+
+ startRebufferingTimer();
+ mPausedForBuffering = true;
+ onPause();
+ }
+ notifyListener(srcId, MEDIA2_INFO, MEDIA2_INFO_BUFFERING_START, 0);
+ break;
+ }
+
+ case Source::kWhatResumeOnBufferingEnd:
+ {
+ // ignore if not playing
+ if (mStarted) {
+ ALOGI("buffer ready, resuming...");
+
+ stopRebufferingTimer(false);
+ mPausedForBuffering = false;
+
+ // do not resume yet if client didn't unpause
+ if (!mPausedByClient) {
+ onResume();
+ }
+ }
+ notifyListener(srcId, MEDIA2_INFO, MEDIA2_INFO_BUFFERING_END, 0);
+ break;
+ }
+
+ case Source::kWhatCacheStats:
+ {
+ int32_t kbps;
+ CHECK(msg->findInt32("bandwidth", &kbps));
+
+ notifyListener(srcId, MEDIA2_INFO, MEDIA2_INFO_NETWORK_BANDWIDTH, kbps);
+ break;
+ }
+
+ case Source::kWhatSubtitleData:
+ {
+ sp<ABuffer> buffer;
+ CHECK(msg->findBuffer("buffer", &buffer));
+
+ sendSubtitleData(buffer, 0 /* baseIndex */);
+ break;
+ }
+
+ case Source::kWhatTimedMetaData:
+ {
+ sp<ABuffer> buffer;
+ if (!msg->findBuffer("buffer", &buffer)) {
+ notifyListener(srcId, MEDIA2_INFO, MEDIA2_INFO_METADATA_UPDATE, 0);
+ } else {
+ sendTimedMetaData(buffer);
+ }
+ break;
+ }
+
+ case Source::kWhatTimedTextData:
+ {
+ int32_t generation;
+ if (msg->findInt32("generation", &generation)
+ && generation != mTimedTextGeneration) {
+ break;
+ }
+
+ sp<ABuffer> buffer;
+ CHECK(msg->findBuffer("buffer", &buffer));
+
+ sp<NuPlayer2Driver> driver = mDriver.promote();
+ if (driver == NULL) {
+ break;
+ }
+
+ int64_t posMs;
+ int64_t timeUs, posUs;
+ driver->getCurrentPosition(&posMs);
+ posUs = posMs * 1000ll;
+ CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+
+ if (posUs < timeUs) {
+ if (!msg->findInt32("generation", &generation)) {
+ msg->setInt32("generation", mTimedTextGeneration);
+ }
+ msg->post(timeUs - posUs);
+ } else {
+ sendTimedTextData(buffer);
+ }
+ break;
+ }
+
+ case Source::kWhatQueueDecoderShutdown:
+ {
+ int32_t audio, video;
+ CHECK(msg->findInt32("audio", &audio));
+ CHECK(msg->findInt32("video", &video));
+
+ sp<AMessage> reply;
+ CHECK(msg->findMessage("reply", &reply));
+
+ queueDecoderShutdown(audio, video, reply);
+ break;
+ }
+
+ case Source::kWhatDrmNoLicense:
+ {
+ notifyListener(srcId, MEDIA2_ERROR, MEDIA2_ERROR_UNKNOWN, ERROR_DRM_NO_LICENSE);
+ break;
+ }
+
+ default:
+ TRESPASS();
+ }
+}
+
+void NuPlayer2::onClosedCaptionNotify(const sp<AMessage> &msg) {
+ int32_t what;
+ CHECK(msg->findInt32("what", &what));
+
+ switch (what) {
+ case NuPlayer2::CCDecoder::kWhatClosedCaptionData:
+ {
+ sp<ABuffer> buffer;
+ CHECK(msg->findBuffer("buffer", &buffer));
+
+ size_t inbandTracks = 0;
+ if (mSource != NULL) {
+ inbandTracks = mSource->getTrackCount();
+ }
+
+ sendSubtitleData(buffer, inbandTracks);
+ break;
+ }
+
+ case NuPlayer2::CCDecoder::kWhatTrackAdded:
+ {
+ notifyListener(mSrcId, MEDIA2_INFO, MEDIA2_INFO_METADATA_UPDATE, 0);
+
+ break;
+ }
+
+ default:
+ TRESPASS();
+ }
+
+
+}
+
+void NuPlayer2::sendSubtitleData(const sp<ABuffer> &buffer, int32_t baseIndex) {
+ int32_t trackIndex;
+ int64_t timeUs, durationUs;
+ CHECK(buffer->meta()->findInt32(AMEDIAFORMAT_KEY_TRACK_INDEX, &trackIndex));
+ CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+ CHECK(buffer->meta()->findInt64("durationUs", &durationUs));
+
+ Parcel in;
+ in.writeInt32(trackIndex + baseIndex);
+ in.writeInt64(timeUs);
+ in.writeInt64(durationUs);
+ in.writeInt32(buffer->size());
+ in.writeInt32(buffer->size());
+ in.write(buffer->data(), buffer->size());
+
+ notifyListener(mSrcId, MEDIA2_SUBTITLE_DATA, 0, 0, &in);
+}
+
+void NuPlayer2::sendTimedMetaData(const sp<ABuffer> &buffer) {
+ int64_t timeUs;
+ CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+
+ Parcel in;
+ in.writeInt64(timeUs);
+ in.writeInt32(buffer->size());
+ in.writeInt32(buffer->size());
+ in.write(buffer->data(), buffer->size());
+
+ notifyListener(mSrcId, MEDIA2_META_DATA, 0, 0, &in);
+}
+
+void NuPlayer2::sendTimedTextData(const sp<ABuffer> &buffer) {
+ const void *data;
+ size_t size = 0;
+ int64_t timeUs;
+ int32_t flag = TextDescriptions::IN_BAND_TEXT_3GPP;
+
+ AString mime;
+ CHECK(buffer->meta()->findString("mime", &mime));
+ CHECK(strcasecmp(mime.c_str(), MEDIA_MIMETYPE_TEXT_3GPP) == 0);
+
+ data = buffer->data();
+ size = buffer->size();
+
+ Parcel parcel;
+ if (size > 0) {
+ CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+ int32_t global = 0;
+ if (buffer->meta()->findInt32("global", &global) && global) {
+ flag |= TextDescriptions::GLOBAL_DESCRIPTIONS;
+ } else {
+ flag |= TextDescriptions::LOCAL_DESCRIPTIONS;
+ }
+ TextDescriptions::getParcelOfDescriptions(
+ (const uint8_t *)data, size, flag, timeUs / 1000, &parcel);
+ }
+
+ if ((parcel.dataSize() > 0)) {
+ notifyListener(mSrcId, MEDIA2_TIMED_TEXT, 0, 0, &parcel);
+ } else { // send an empty timed text
+ notifyListener(mSrcId, MEDIA2_TIMED_TEXT, 0, 0);
+ }
+}
+
+const char *NuPlayer2::getDataSourceType() {
+ switch (mDataSourceType) {
+ case DATA_SOURCE_TYPE_HTTP_LIVE:
+ return "HTTPLive";
+
+ case DATA_SOURCE_TYPE_RTSP:
+ return "RTSP";
+
+ case DATA_SOURCE_TYPE_GENERIC_URL:
+ return "GenURL";
+
+ case DATA_SOURCE_TYPE_GENERIC_FD:
+ return "GenFD";
+
+ case DATA_SOURCE_TYPE_MEDIA:
+ return "Media";
+
+ case DATA_SOURCE_TYPE_NONE:
+ default:
+ return "None";
+ }
+ }
+
+// Modular DRM begin
+status_t NuPlayer2::prepareDrm(const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId)
+{
+ ALOGV("prepareDrm ");
+
+ // Passing to the looper anyway; called in a pre-config prepared state so no race on mCrypto
+ sp<AMessage> msg = new AMessage(kWhatPrepareDrm, this);
+ // synchronous call so just passing the address but with local copies of "const" args
+ uint8_t UUID[16];
+ memcpy(UUID, uuid, sizeof(UUID));
+ Vector<uint8_t> sessionId = drmSessionId;
+ msg->setPointer("uuid", (void*)UUID);
+ msg->setPointer("drmSessionId", (void*)&sessionId);
+
+ sp<AMessage> response;
+ status_t status = msg->postAndAwaitResponse(&response);
+
+ if (status == OK && response != NULL) {
+ CHECK(response->findInt32("status", &status));
+ ALOGV("prepareDrm ret: %d ", status);
+ } else {
+ ALOGE("prepareDrm err: %d", status);
+ }
+
+ return status;
+}
+
+status_t NuPlayer2::releaseDrm()
+{
+ ALOGV("releaseDrm ");
+
+ sp<AMessage> msg = new AMessage(kWhatReleaseDrm, this);
+
+ sp<AMessage> response;
+ status_t status = msg->postAndAwaitResponse(&response);
+
+ if (status == OK && response != NULL) {
+ CHECK(response->findInt32("status", &status));
+ ALOGV("releaseDrm ret: %d ", status);
+ } else {
+ ALOGE("releaseDrm err: %d", status);
+ }
+
+ return status;
+}
+
+status_t NuPlayer2::onPrepareDrm(const sp<AMessage> &msg)
+{
+ // TODO change to ALOGV
+ ALOGD("onPrepareDrm ");
+
+ status_t status = INVALID_OPERATION;
+ if (mSource == NULL) {
+ ALOGE("onPrepareDrm: No source. onPrepareDrm failed with %d.", status);
+ return status;
+ }
+
+ uint8_t *uuid;
+ Vector<uint8_t> *drmSessionId;
+ CHECK(msg->findPointer("uuid", (void**)&uuid));
+ CHECK(msg->findPointer("drmSessionId", (void**)&drmSessionId));
+
+ status = OK;
+ sp<AMediaCryptoWrapper> crypto = NULL;
+
+ status = mSource->prepareDrm(uuid, *drmSessionId, &crypto);
+ if (crypto == NULL) {
+ ALOGE("onPrepareDrm: mSource->prepareDrm failed. status: %d", status);
+ return status;
+ }
+ ALOGV("onPrepareDrm: mSource->prepareDrm succeeded");
+
+ if (mCrypto != NULL) {
+ ALOGE("onPrepareDrm: Unexpected. Already having mCrypto: %p", mCrypto.get());
+ mCrypto.clear();
+ }
+
+ mCrypto = crypto;
+ mIsDrmProtected = true;
+ // TODO change to ALOGV
+ ALOGD("onPrepareDrm: mCrypto: %p", mCrypto.get());
+
+ return status;
+}
+
+status_t NuPlayer2::onReleaseDrm()
+{
+ // TODO change to ALOGV
+ ALOGD("onReleaseDrm ");
+
+ if (!mIsDrmProtected) {
+ ALOGW("onReleaseDrm: Unexpected. mIsDrmProtected is already false.");
+ }
+
+ mIsDrmProtected = false;
+
+ status_t status;
+ if (mCrypto != NULL) {
+ // notifying the source first before removing crypto from codec
+ if (mSource != NULL) {
+ mSource->releaseDrm();
+ }
+
+ status=OK;
+ // first making sure the codecs have released their crypto reference
+ const sp<DecoderBase> &videoDecoder = getDecoder(false/*audio*/);
+ if (videoDecoder != NULL) {
+ status = videoDecoder->releaseCrypto();
+ ALOGV("onReleaseDrm: video decoder ret: %d", status);
+ }
+
+ const sp<DecoderBase> &audioDecoder = getDecoder(true/*audio*/);
+ if (audioDecoder != NULL) {
+ status_t status_audio = audioDecoder->releaseCrypto();
+ if (status == OK) { // otherwise, returning the first error
+ status = status_audio;
+ }
+ ALOGV("onReleaseDrm: audio decoder ret: %d", status_audio);
+ }
+
+ // TODO change to ALOGV
+ ALOGD("onReleaseDrm: mCrypto: %p", mCrypto.get());
+ mCrypto.clear();
+ } else { // mCrypto == NULL
+ ALOGE("onReleaseDrm: Unexpected. There is no crypto.");
+ status = INVALID_OPERATION;
+ }
+
+ return status;
+}
+// Modular DRM end
+////////////////////////////////////////////////////////////////////////////////
+
+sp<AMessage> NuPlayer2::Source::getFormat(bool audio) {
+ sp<MetaData> meta = getFormatMeta(audio);
+
+ if (meta == NULL) {
+ return NULL;
+ }
+
+ sp<AMessage> msg = new AMessage;
+
+ if(convertMetaDataToMessage(meta, &msg) == OK) {
+ return msg;
+ }
+ return NULL;
+}
+
+void NuPlayer2::Source::notifyFlagsChanged(uint32_t flags) {
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatFlagsChanged);
+ notify->setInt32("flags", flags);
+ notify->post();
+}
+
+void NuPlayer2::Source::notifyVideoSizeChanged(const sp<AMessage> &format) {
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatVideoSizeChanged);
+ notify->setMessage("format", format);
+ notify->post();
+}
+
+void NuPlayer2::Source::notifyPrepared(status_t err) {
+ ALOGV("Source::notifyPrepared %d", err);
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatPrepared);
+ notify->setInt32("err", err);
+ notify->post();
+}
+
+void NuPlayer2::Source::notifyDrmInfo(const sp<ABuffer> &drmInfoBuffer)
+{
+ ALOGV("Source::notifyDrmInfo");
+
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatDrmInfo);
+ notify->setBuffer("drmInfo", drmInfoBuffer);
+
+ notify->post();
+}
+
+void NuPlayer2::Source::onMessageReceived(const sp<AMessage> & /* msg */) {
+ TRESPASS();
+}
+
+} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.h b/media/libmediaplayer2/nuplayer2/NuPlayer2.h
new file mode 100644
index 0000000..96f85f9
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NU_PLAYER2_H_
+
+#define NU_PLAYER2_H_
+
+#include <media/AudioResamplerPublic.h>
+#include <media/stagefright/foundation/AHandler.h>
+
+#include <mediaplayer2/MediaPlayer2Interface.h>
+
+namespace android {
+
+struct ABuffer;
+struct AMediaCryptoWrapper;
+struct AMessage;
+struct ANativeWindowWrapper;
+struct AudioPlaybackRate;
+struct AVSyncSettings;
+struct DataSourceDesc;
+struct MediaClock;
+struct MediaHTTPService;
+class MetaData;
+struct NuPlayer2Driver;
+
+struct NuPlayer2 : public AHandler {
+ explicit NuPlayer2(pid_t pid, uid_t uid, const sp<MediaClock> &mediaClock);
+
+ void setDriver(const wp<NuPlayer2Driver> &driver);
+
+ void setDataSourceAsync(const sp<DataSourceDesc> &dsd);
+ void prepareNextDataSourceAsync(const sp<DataSourceDesc> &dsd);
+ void playNextDataSource(int64_t srcId);
+
+ status_t getBufferingSettings(BufferingSettings* buffering /* nonnull */);
+ status_t setBufferingSettings(const BufferingSettings& buffering);
+
+ void prepareAsync();
+
+ void setVideoSurfaceTextureAsync(const sp<ANativeWindowWrapper> &nww);
+
+ void setAudioSink(const sp<MediaPlayer2Interface::AudioSink> &sink);
+ status_t setPlaybackSettings(const AudioPlaybackRate &rate);
+ status_t getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
+ status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint);
+ status_t getSyncSettings(AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
+
+ void start();
+
+ void pause();
+
+ // Will notify the driver through "notifyResetComplete" once finished.
+ void resetAsync();
+
+ // Request a notification when specified media time is reached.
+ status_t notifyAt(int64_t mediaTimeUs);
+
+ // Will notify the driver through "notifySeekComplete" once finished
+ // and needNotify is true.
+ void seekToAsync(
+ int64_t seekTimeUs,
+ MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC,
+ bool needNotify = false);
+
+ status_t setVideoScalingMode(int32_t mode);
+ status_t getTrackInfo(Parcel* reply) const;
+ status_t getSelectedTrack(int32_t type, Parcel* reply) const;
+ status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
+ status_t getCurrentPosition(int64_t *mediaUs);
+ void getStats(Vector<sp<AMessage> > *mTrackStats);
+
+ sp<MetaData> getFileMeta();
+ float getFrameRate();
+
+ // Modular DRM
+ status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId);
+ status_t releaseDrm();
+
+ const char *getDataSourceType();
+
+protected:
+ virtual ~NuPlayer2();
+
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
+public:
+ struct StreamListener;
+ struct Source;
+
+private:
+ struct Decoder;
+ struct DecoderBase;
+ struct DecoderPassThrough;
+ struct CCDecoder;
+ struct GenericSource2;
+ struct HTTPLiveSource2;
+ struct Renderer;
+ struct RTSPSource2;
+ struct Action;
+ struct SeekAction;
+ struct SetSurfaceAction;
+ struct ResumeDecoderAction;
+ struct FlushDecoderAction;
+ struct PostMessageAction;
+ struct SimpleAction;
+
+ enum {
+ kWhatSetDataSource = '=DaS',
+ kWhatPrepare = 'prep',
+ kWhatPrepareNextDataSource = 'pNDS',
+ kWhatPlayNextDataSource = 'plNS',
+ kWhatSetVideoSurface = '=VSu',
+ kWhatSetAudioSink = '=AuS',
+ kWhatMoreDataQueued = 'more',
+ kWhatConfigPlayback = 'cfPB',
+ kWhatConfigSync = 'cfSy',
+ kWhatGetPlaybackSettings = 'gPbS',
+ kWhatGetSyncSettings = 'gSyS',
+ kWhatStart = 'strt',
+ kWhatScanSources = 'scan',
+ kWhatVideoNotify = 'vidN',
+ kWhatAudioNotify = 'audN',
+ kWhatClosedCaptionNotify = 'capN',
+ kWhatRendererNotify = 'renN',
+ kWhatReset = 'rset',
+ kWhatNotifyTime = 'nfyT',
+ kWhatSeek = 'seek',
+ kWhatPause = 'paus',
+ kWhatResume = 'rsme',
+ kWhatPollDuration = 'polD',
+ kWhatSourceNotify = 'srcN',
+ kWhatGetTrackInfo = 'gTrI',
+ kWhatGetSelectedTrack = 'gSel',
+ kWhatSelectTrack = 'selT',
+ kWhatGetBufferingSettings = 'gBus',
+ kWhatSetBufferingSettings = 'sBuS',
+ kWhatPrepareDrm = 'pDrm',
+ kWhatReleaseDrm = 'rDrm',
+ };
+
+ wp<NuPlayer2Driver> mDriver;
+ pid_t mPID;
+ uid_t mUID;
+ const sp<MediaClock> mMediaClock;
+ Mutex mSourceLock; // guard |mSource|.
+ sp<Source> mSource;
+ int64_t mSrcId;
+ uint32_t mSourceFlags;
+ sp<Source> mNextSource;
+ int64_t mNextSrcId;
+ uint32_t mNextSourceFlags;
+ sp<ANativeWindowWrapper> mNativeWindow;
+ sp<MediaPlayer2Interface::AudioSink> mAudioSink;
+ sp<DecoderBase> mVideoDecoder;
+ bool mOffloadAudio;
+ sp<DecoderBase> mAudioDecoder;
+ sp<CCDecoder> mCCDecoder;
+ sp<Renderer> mRenderer;
+ sp<ALooper> mRendererLooper;
+ int32_t mAudioDecoderGeneration;
+ int32_t mVideoDecoderGeneration;
+ int32_t mRendererGeneration;
+
+ Mutex mPlayingTimeLock;
+ int64_t mLastStartedPlayingTimeNs;
+ void stopPlaybackTimer(const char *where);
+ void startPlaybackTimer(const char *where);
+
+ int64_t mLastStartedRebufferingTimeNs;
+ void startRebufferingTimer();
+ void stopRebufferingTimer(bool exitingPlayback);
+
+ int64_t mPreviousSeekTimeUs;
+
+ List<sp<Action> > mDeferredActions;
+
+ bool mAudioEOS;
+ bool mVideoEOS;
+
+ bool mScanSourcesPending;
+ int32_t mScanSourcesGeneration;
+
+ int32_t mPollDurationGeneration;
+ int32_t mTimedTextGeneration;
+
+ enum FlushStatus {
+ NONE,
+ FLUSHING_DECODER,
+ FLUSHING_DECODER_SHUTDOWN,
+ SHUTTING_DOWN_DECODER,
+ FLUSHED,
+ SHUT_DOWN,
+ };
+
+ enum FlushCommand {
+ FLUSH_CMD_NONE,
+ FLUSH_CMD_FLUSH,
+ FLUSH_CMD_SHUTDOWN,
+ };
+
+ // Status of flush responses from the decoder and renderer.
+ bool mFlushComplete[2][2];
+
+ FlushStatus mFlushingAudio;
+ FlushStatus mFlushingVideo;
+
+ // Status of flush responses from the decoder and renderer.
+ bool mResumePending;
+
+ int32_t mVideoScalingMode;
+
+ AudioPlaybackRate mPlaybackSettings;
+ AVSyncSettings mSyncSettings;
+ float mVideoFpsHint;
+ bool mStarted;
+ bool mPrepared;
+ bool mResetting;
+ bool mSourceStarted;
+ bool mAudioDecoderError;
+ bool mVideoDecoderError;
+
+ // Actual pause state, either as requested by client or due to buffering.
+ bool mPaused;
+
+ // Pause state as requested by client. Note that if mPausedByClient is
+ // true, mPaused is always true; if mPausedByClient is false, mPaused could
+ // still become true, when we pause internally due to buffering.
+ bool mPausedByClient;
+
+ // Pause state as requested by source (internally) due to buffering
+ bool mPausedForBuffering;
+
+ // Modular DRM
+ sp<AMediaCryptoWrapper> mCrypto;
+ bool mIsDrmProtected;
+
+ typedef enum {
+ DATA_SOURCE_TYPE_NONE,
+ DATA_SOURCE_TYPE_HTTP_LIVE,
+ DATA_SOURCE_TYPE_RTSP,
+ DATA_SOURCE_TYPE_GENERIC_URL,
+ DATA_SOURCE_TYPE_GENERIC_FD,
+ DATA_SOURCE_TYPE_MEDIA,
+ } DATA_SOURCE_TYPE;
+
+ std::atomic<DATA_SOURCE_TYPE> mDataSourceType;
+ std::atomic<DATA_SOURCE_TYPE> mNextDataSourceType;
+
+ inline const sp<DecoderBase> &getDecoder(bool audio) {
+ return audio ? mAudioDecoder : mVideoDecoder;
+ }
+
+ inline void clearFlushComplete() {
+ mFlushComplete[0][0] = false;
+ mFlushComplete[0][1] = false;
+ mFlushComplete[1][0] = false;
+ mFlushComplete[1][1] = false;
+ }
+
+ void disconnectSource();
+
+ status_t createNuPlayer2Source(const sp<DataSourceDesc> &dsd,
+ sp<Source> *source,
+ DATA_SOURCE_TYPE *dataSourceType);
+
+ void tryOpenAudioSinkForOffload(
+ const sp<AMessage> &format, const sp<MetaData> &audioMeta, bool hasVideo);
+ void closeAudioSink();
+ void restartAudio(
+ int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder);
+ void determineAudioModeChange(const sp<AMessage> &audioFormat);
+
+ status_t instantiateDecoder(
+ bool audio, sp<DecoderBase> *decoder, bool checkAudioModeChange = true);
+
+ void updateVideoSize(
+ int64_t srcId,
+ const sp<AMessage> &inputFormat,
+ const sp<AMessage> &outputFormat = NULL);
+
+ void notifyListener(int64_t srcId, int msg, int ext1, int ext2, const Parcel *in = NULL);
+
+ void handleFlushComplete(bool audio, bool isDecoder);
+ void finishFlushIfPossible();
+
+ void onStart();
+ void onResume();
+ void onPause();
+
+ bool audioDecoderStillNeeded();
+
+ void flushDecoder(bool audio, bool needShutdown);
+
+ void finishResume();
+ void notifyDriverSeekComplete(int64_t srcId);
+
+ void postScanSources();
+
+ void schedulePollDuration();
+ void cancelPollDuration();
+
+ void processDeferredActions();
+
+ void performSeek(int64_t seekTimeUs, MediaPlayer2SeekMode mode);
+ void performDecoderFlush(FlushCommand audio, FlushCommand video);
+ void performReset();
+ void performPlayNextDataSource();
+ void performScanSources();
+ void performSetSurface(const sp<ANativeWindowWrapper> &nw);
+ void performResumeDecoders(bool needNotify);
+
+ void onSourceNotify(const sp<AMessage> &msg);
+ void onClosedCaptionNotify(const sp<AMessage> &msg);
+
+ void queueDecoderShutdown(
+ bool audio, bool video, const sp<AMessage> &reply);
+
+ void sendSubtitleData(const sp<ABuffer> &buffer, int32_t baseIndex);
+ void sendTimedMetaData(const sp<ABuffer> &buffer);
+ void sendTimedTextData(const sp<ABuffer> &buffer);
+
+ void writeTrackInfo(Parcel* reply, const sp<AMessage>& format) const;
+
+ status_t onPrepareDrm(const sp<AMessage> &msg);
+ status_t onReleaseDrm();
+
+ DISALLOW_EVIL_CONSTRUCTORS(NuPlayer2);
+};
+
+} // namespace android
+
+#endif // NU_PLAYER2_H_
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp
new file mode 100644
index 0000000..e48e388
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp
@@ -0,0 +1,580 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NuPlayer2CCDecoder"
+#include <utils/Log.h>
+#include <inttypes.h>
+
+#include "NuPlayer2CCDecoder.h"
+
+#include <media/NdkMediaFormat.h>
+#include <media/stagefright/foundation/ABitReader.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/avc_utils.h>
+#include <media/stagefright/MediaDefs.h>
+
+namespace android {
+
+// In CEA-708B, the maximum bandwidth of CC is set to 9600bps.
+static const size_t kMaxBandwithSizeBytes = 9600 / 8;
+
+struct CCData {
+ CCData(uint8_t type, uint8_t data1, uint8_t data2)
+ : mType(type), mData1(data1), mData2(data2) {
+ }
+ bool getChannel(size_t *channel) const {
+ if (mData1 >= 0x10 && mData1 <= 0x1f) {
+ *channel = (mData1 >= 0x18 ? 1 : 0) + (mType ? 2 : 0);
+ return true;
+ }
+ return false;
+ }
+
+ uint8_t mType;
+ uint8_t mData1;
+ uint8_t mData2;
+};
+
+static bool isNullPad(CCData *cc) {
+ return cc->mData1 < 0x10 && cc->mData2 < 0x10;
+}
+
+static void dumpBytePair(const sp<ABuffer> &ccBuf) __attribute__ ((unused));
+static void dumpBytePair(const sp<ABuffer> &ccBuf) {
+ size_t offset = 0;
+ AString out;
+
+ while (offset < ccBuf->size()) {
+ char tmp[128];
+
+ CCData *cc = (CCData *) (ccBuf->data() + offset);
+
+ if (isNullPad(cc)) {
+ // 1 null pad or XDS metadata, ignore
+ offset += sizeof(CCData);
+ continue;
+ }
+
+ if (cc->mData1 >= 0x20 && cc->mData1 <= 0x7f) {
+ // 2 basic chars
+ snprintf(tmp, sizeof(tmp), "[%d]Basic: %c %c", cc->mType, cc->mData1, cc->mData2);
+ } else if ((cc->mData1 == 0x11 || cc->mData1 == 0x19)
+ && cc->mData2 >= 0x30 && cc->mData2 <= 0x3f) {
+ // 1 special char
+ snprintf(tmp, sizeof(tmp), "[%d]Special: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ } else if ((cc->mData1 == 0x12 || cc->mData1 == 0x1A)
+ && cc->mData2 >= 0x20 && cc->mData2 <= 0x3f){
+ // 1 Spanish/French char
+ snprintf(tmp, sizeof(tmp), "[%d]Spanish: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ } else if ((cc->mData1 == 0x13 || cc->mData1 == 0x1B)
+ && cc->mData2 >= 0x20 && cc->mData2 <= 0x3f){
+ // 1 Portuguese/German/Danish char
+ snprintf(tmp, sizeof(tmp), "[%d]German: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ } else if ((cc->mData1 == 0x11 || cc->mData1 == 0x19)
+ && cc->mData2 >= 0x20 && cc->mData2 <= 0x2f){
+ // Mid-Row Codes (Table 69)
+ snprintf(tmp, sizeof(tmp), "[%d]Mid-row: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ } else if (((cc->mData1 == 0x14 || cc->mData1 == 0x1c)
+ && cc->mData2 >= 0x20 && cc->mData2 <= 0x2f)
+ ||
+ ((cc->mData1 == 0x17 || cc->mData1 == 0x1f)
+ && cc->mData2 >= 0x21 && cc->mData2 <= 0x23)){
+ // Misc Control Codes (Table 70)
+ snprintf(tmp, sizeof(tmp), "[%d]Ctrl: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ } else if ((cc->mData1 & 0x70) == 0x10
+ && (cc->mData2 & 0x40) == 0x40
+ && ((cc->mData1 & 0x07) || !(cc->mData2 & 0x20)) ) {
+ // Preamble Address Codes (Table 71)
+ snprintf(tmp, sizeof(tmp), "[%d]PAC: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ } else {
+ snprintf(tmp, sizeof(tmp), "[%d]Invalid: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ }
+
+ if (out.size() > 0) {
+ out.append(", ");
+ }
+
+ out.append(tmp);
+
+ offset += sizeof(CCData);
+ }
+
+ ALOGI("%s", out.c_str());
+}
+
+NuPlayer2::CCDecoder::CCDecoder(const sp<AMessage> ¬ify)
+ : mNotify(notify),
+ mSelectedTrack(-1),
+ mDTVCCPacket(new ABuffer(kMaxBandwithSizeBytes)) {
+ mDTVCCPacket->setRange(0, 0);
+
+ // In CEA-608, streams from packets which have the value 0 of cc_type contain CC1 and CC2, and
+ // streams from packets which have the value 1 of cc_type contain CC3 and CC4.
+ // The following array indicates the current transmitting channels for each value of cc_type.
+ mLine21Channels[0] = 0; // CC1
+ mLine21Channels[1] = 2; // CC3
+}
+
+size_t NuPlayer2::CCDecoder::getTrackCount() const {
+ return mTracks.size();
+}
+
+sp<AMessage> NuPlayer2::CCDecoder::getTrackInfo(size_t index) const {
+ if (!isTrackValid(index)) {
+ return NULL;
+ }
+
+ sp<AMessage> format = new AMessage();
+
+ CCTrack track = mTracks[index];
+
+ format->setInt32("type", MEDIA_TRACK_TYPE_SUBTITLE);
+ format->setString("language", "und");
+
+ switch (track.mTrackType) {
+ case kTrackTypeCEA608:
+ format->setString("mime", MEDIA_MIMETYPE_TEXT_CEA_608);
+ break;
+ case kTrackTypeCEA708:
+ format->setString("mime", MEDIA_MIMETYPE_TEXT_CEA_708);
+ break;
+ default:
+ ALOGE("Unknown track type: %d", track.mTrackType);
+ return NULL;
+ }
+
+ // For CEA-608 CC1, field 0 channel 0
+ bool isDefaultAuto = track.mTrackType == kTrackTypeCEA608
+ && track.mTrackChannel == 0;
+ // For CEA-708, Primary Caption Service.
+ bool isDefaultOnly = track.mTrackType == kTrackTypeCEA708
+ && track.mTrackChannel == 1;
+ format->setInt32("auto", isDefaultAuto);
+ format->setInt32("default", isDefaultAuto || isDefaultOnly);
+ format->setInt32("forced", 0);
+
+ return format;
+}
+
+status_t NuPlayer2::CCDecoder::selectTrack(size_t index, bool select) {
+ if (!isTrackValid(index)) {
+ return BAD_VALUE;
+ }
+
+ if (select) {
+ if (mSelectedTrack == (ssize_t)index) {
+ ALOGE("track %zu already selected", index);
+ return BAD_VALUE;
+ }
+ ALOGV("selected track %zu", index);
+ mSelectedTrack = index;
+ } else {
+ if (mSelectedTrack != (ssize_t)index) {
+ ALOGE("track %zu is not selected", index);
+ return BAD_VALUE;
+ }
+ ALOGV("unselected track %zu", index);
+ mSelectedTrack = -1;
+ }
+
+ // Clear the previous track payloads
+ mCCMap.clear();
+
+ return OK;
+}
+
+bool NuPlayer2::CCDecoder::isSelected() const {
+ return mSelectedTrack >= 0 && mSelectedTrack < (int32_t)getTrackCount();
+}
+
+bool NuPlayer2::CCDecoder::isTrackValid(size_t index) const {
+ return index < getTrackCount();
+}
+
+// returns true if a new CC track is found
+bool NuPlayer2::CCDecoder::extractFromSEI(const sp<ABuffer> &accessUnit) {
+ sp<ABuffer> sei;
+ if (!accessUnit->meta()->findBuffer("sei", &sei) || sei == NULL) {
+ return false;
+ }
+
+ int64_t timeUs;
+ CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
+ bool trackAdded = false;
+
+ const NALPosition *nal = (NALPosition *)sei->data();
+
+ for (size_t i = 0; i < sei->size() / sizeof(NALPosition); ++i, ++nal) {
+ trackAdded |= parseSEINalUnit(
+ timeUs, accessUnit->data() + nal->nalOffset, nal->nalSize);
+ }
+
+ return trackAdded;
+}
+
+// returns true if a new CC track is found
+bool NuPlayer2::CCDecoder::parseSEINalUnit(int64_t timeUs, const uint8_t *data, size_t size) {
+ unsigned nalType = data[0] & 0x1f;
+
+ // the buffer should only have SEI in it
+ if (nalType != 6) {
+ return false;
+ }
+
+ bool trackAdded = false;
+ NALBitReader br(data + 1, size - 1);
+
+ // sei_message()
+ while (br.atLeastNumBitsLeft(16)) { // at least 16-bit for sei_message()
+ uint32_t payload_type = 0;
+ size_t payload_size = 0;
+ uint8_t last_byte;
+
+ do {
+ last_byte = br.getBits(8);
+ payload_type += last_byte;
+ } while (last_byte == 0xFF);
+
+ do {
+ last_byte = br.getBits(8);
+ payload_size += last_byte;
+ } while (last_byte == 0xFF);
+
+ if (payload_size > SIZE_MAX / 8
+ || !br.atLeastNumBitsLeft(payload_size * 8)) {
+ ALOGV("Malformed SEI payload");
+ break;
+ }
+
+ // sei_payload()
+ if (payload_type == 4) {
+ bool isCC = false;
+ if (payload_size > 1 + 2 + 4 + 1) {
+ // user_data_registered_itu_t_t35()
+
+ // ATSC A/72: 6.4.2
+ uint8_t itu_t_t35_country_code = br.getBits(8);
+ uint16_t itu_t_t35_provider_code = br.getBits(16);
+ uint32_t user_identifier = br.getBits(32);
+ uint8_t user_data_type_code = br.getBits(8);
+
+ payload_size -= 1 + 2 + 4 + 1;
+
+ isCC = itu_t_t35_country_code == 0xB5
+ && itu_t_t35_provider_code == 0x0031
+ && user_identifier == 'GA94'
+ && user_data_type_code == 0x3;
+ }
+
+ if (isCC && payload_size > 2) {
+ trackAdded |= parseMPEGCCData(timeUs, br.data(), br.numBitsLeft() / 8);
+ } else {
+ ALOGV("Malformed SEI payload type 4");
+ }
+ } else {
+ ALOGV("Unsupported SEI payload type %d", payload_type);
+ }
+
+ // skipping remaining bits of this payload
+ br.skipBits(payload_size * 8);
+ }
+
+ return trackAdded;
+}
+
+// returns true if a new CC track is found
+bool NuPlayer2::CCDecoder::extractFromMPEGUserData(const sp<ABuffer> &accessUnit) {
+ sp<ABuffer> mpegUserData;
+ if (!accessUnit->meta()->findBuffer(AMEDIAFORMAT_KEY_MPEG_USER_DATA, &mpegUserData)
+ || mpegUserData == NULL) {
+ return false;
+ }
+
+ int64_t timeUs;
+ CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
+ bool trackAdded = false;
+
+ const size_t *userData = (size_t *)mpegUserData->data();
+
+ for (size_t i = 0; i < mpegUserData->size() / sizeof(size_t); ++i) {
+ trackAdded |= parseMPEGUserDataUnit(
+ timeUs, accessUnit->data() + userData[i], accessUnit->size() - userData[i]);
+ }
+
+ return trackAdded;
+}
+
+// returns true if a new CC track is found
+bool NuPlayer2::CCDecoder::parseMPEGUserDataUnit(int64_t timeUs, const uint8_t *data, size_t size) {
+ ABitReader br(data + 4, 5);
+
+ uint32_t user_identifier = br.getBits(32);
+ uint8_t user_data_type = br.getBits(8);
+
+ if (user_identifier == 'GA94' && user_data_type == 0x3) {
+ return parseMPEGCCData(timeUs, data + 9, size - 9);
+ }
+
+ return false;
+}
+
+// returns true if a new CC track is found
+bool NuPlayer2::CCDecoder::parseMPEGCCData(int64_t timeUs, const uint8_t *data, size_t size) {
+ bool trackAdded = false;
+
+ // MPEG_cc_data()
+ // ATSC A/53 Part 4: 6.2.3.1
+ ABitReader br(data, size);
+
+ if (br.numBitsLeft() <= 16) {
+ return false;
+ }
+
+ br.skipBits(1);
+ bool process_cc_data_flag = br.getBits(1);
+ br.skipBits(1);
+ size_t cc_count = br.getBits(5);
+ br.skipBits(8);
+
+ if (!process_cc_data_flag || 3 * 8 * cc_count >= br.numBitsLeft()) {
+ return false;
+ }
+
+ sp<ABuffer> line21CCBuf = NULL;
+
+ for (size_t i = 0; i < cc_count; ++i) {
+ br.skipBits(5);
+ bool cc_valid = br.getBits(1);
+ uint8_t cc_type = br.getBits(2);
+
+ if (cc_valid) {
+ if (cc_type == 3) {
+ if (mDTVCCPacket->size() > 0) {
+ trackAdded |= parseDTVCCPacket(
+ timeUs, mDTVCCPacket->data(), mDTVCCPacket->size());
+ mDTVCCPacket->setRange(0, 0);
+ }
+ memcpy(mDTVCCPacket->data() + mDTVCCPacket->size(), br.data(), 2);
+ mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
+ br.skipBits(16);
+ } else if (mDTVCCPacket->size() > 0 && cc_type == 2) {
+ memcpy(mDTVCCPacket->data() + mDTVCCPacket->size(), br.data(), 2);
+ mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
+ br.skipBits(16);
+ } else if (cc_type == 0 || cc_type == 1) {
+ uint8_t cc_data_1 = br.getBits(8) & 0x7f;
+ uint8_t cc_data_2 = br.getBits(8) & 0x7f;
+
+ CCData cc(cc_type, cc_data_1, cc_data_2);
+
+ if (isNullPad(&cc)) {
+ continue;
+ }
+
+ size_t channel;
+ if (cc.getChannel(&channel)) {
+ mLine21Channels[cc_type] = channel;
+
+ // create a new track if it does not exist.
+ getTrackIndex(kTrackTypeCEA608, channel, &trackAdded);
+ }
+
+ if (isSelected() && mTracks[mSelectedTrack].mTrackType == kTrackTypeCEA608
+ && mTracks[mSelectedTrack].mTrackChannel == mLine21Channels[cc_type]) {
+ if (line21CCBuf == NULL) {
+ line21CCBuf = new ABuffer((cc_count - i) * sizeof(CCData));
+ line21CCBuf->setRange(0, 0);
+ }
+ memcpy(line21CCBuf->data() + line21CCBuf->size(), &cc, sizeof(cc));
+ line21CCBuf->setRange(0, line21CCBuf->size() + sizeof(CCData));
+ }
+ } else {
+ br.skipBits(16);
+ }
+ } else {
+ if ((cc_type == 3 || cc_type == 2) && mDTVCCPacket->size() > 0) {
+ trackAdded |= parseDTVCCPacket(timeUs, mDTVCCPacket->data(), mDTVCCPacket->size());
+ mDTVCCPacket->setRange(0, 0);
+ }
+ br.skipBits(16);
+ }
+ }
+
+ if (isSelected() && mTracks[mSelectedTrack].mTrackType == kTrackTypeCEA608
+ && line21CCBuf != NULL && line21CCBuf->size() > 0) {
+ mCCMap.add(timeUs, line21CCBuf);
+ }
+
+ return trackAdded;
+}
+
+// returns true if a new CC track is found
+bool NuPlayer2::CCDecoder::parseDTVCCPacket(int64_t timeUs, const uint8_t *data, size_t size) {
+ // CEA-708B 5 DTVCC Packet Layer.
+ ABitReader br(data, size);
+ br.skipBits(2);
+
+ size_t packet_size = br.getBits(6);
+ if (packet_size == 0) packet_size = 64;
+ packet_size *= 2;
+
+ if (size != packet_size) {
+ return false;
+ }
+
+ bool trackAdded = false;
+
+ while (br.numBitsLeft() >= 16) {
+ // CEA-708B Figure 5 and 6.
+ uint8_t service_number = br.getBits(3);
+ size_t block_size = br.getBits(5);
+
+ if (service_number == 64) {
+ br.skipBits(2);
+ service_number = br.getBits(6);
+
+ if (service_number < 64) {
+ return trackAdded;
+ }
+ }
+
+ if (br.numBitsLeft() < block_size * 8) {
+ return trackAdded;
+ }
+
+ if (block_size > 0) {
+ size_t trackIndex = getTrackIndex(kTrackTypeCEA708, service_number, &trackAdded);
+ if (mSelectedTrack == (ssize_t)trackIndex) {
+ sp<ABuffer> ccPacket = new ABuffer(block_size);
+ memcpy(ccPacket->data(), br.data(), block_size);
+ mCCMap.add(timeUs, ccPacket);
+ }
+ }
+ br.skipBits(block_size * 8);
+ }
+
+ return trackAdded;
+}
+
+// return the track index for a given type and channel.
+// if the track does not exist, creates a new one.
+size_t NuPlayer2::CCDecoder::getTrackIndex(
+ int32_t trackType, size_t channel, bool *trackAdded) {
+ CCTrack track(trackType, channel);
+ ssize_t index = mTrackIndices.indexOfKey(track);
+
+ if (index < 0) {
+ // A new track is added.
+ index = mTracks.size();
+ mTrackIndices.add(track, index);
+ mTracks.add(track);
+ *trackAdded = true;
+ return index;
+ }
+
+ return mTrackIndices.valueAt(index);
+}
+
+void NuPlayer2::CCDecoder::decode(const sp<ABuffer> &accessUnit) {
+ if (extractFromMPEGUserData(accessUnit) || extractFromSEI(accessUnit)) {
+ sp<AMessage> msg = mNotify->dup();
+ msg->setInt32("what", kWhatTrackAdded);
+ msg->post();
+ }
+ // TODO: extract CC from other sources
+}
+
+void NuPlayer2::CCDecoder::display(int64_t timeUs) {
+ if (!isSelected()) {
+ return;
+ }
+
+ ssize_t index = mCCMap.indexOfKey(timeUs);
+ if (index < 0) {
+ ALOGV("cc for timestamp %" PRId64 " not found", timeUs);
+ return;
+ }
+
+ sp<ABuffer> ccBuf;
+
+ if (index == 0) {
+ ccBuf = mCCMap.valueAt(index);
+ } else {
+ size_t size = 0;
+
+ for (ssize_t i = 0; i <= index; ++i) {
+ size += mCCMap.valueAt(i)->size();
+ }
+
+ ccBuf = new ABuffer(size);
+ ccBuf->setRange(0, 0);
+
+ for (ssize_t i = 0; i <= index; ++i) {
+ sp<ABuffer> buf = mCCMap.valueAt(i);
+ memcpy(ccBuf->data() + ccBuf->size(), buf->data(), buf->size());
+ ccBuf->setRange(0, ccBuf->size() + buf->size());
+ }
+ }
+
+ if (ccBuf->size() > 0) {
+#if 0
+ dumpBytePair(ccBuf);
+#endif
+
+ ccBuf->meta()->setInt32(AMEDIAFORMAT_KEY_TRACK_INDEX, mSelectedTrack);
+ ccBuf->meta()->setInt64("timeUs", timeUs);
+ ccBuf->meta()->setInt64("durationUs", 0ll);
+
+ sp<AMessage> msg = mNotify->dup();
+ msg->setInt32("what", kWhatClosedCaptionData);
+ msg->setBuffer("buffer", ccBuf);
+ msg->post();
+ }
+
+ // remove all entries before timeUs
+ mCCMap.removeItemsAt(0, index + 1);
+}
+
+void NuPlayer2::CCDecoder::flush() {
+ mCCMap.clear();
+ mDTVCCPacket->setRange(0, 0);
+}
+
+int32_t NuPlayer2::CCDecoder::CCTrack::compare(const NuPlayer2::CCDecoder::CCTrack& rhs) const {
+ int32_t cmp = mTrackType - rhs.mTrackType;
+ if (cmp != 0) return cmp;
+ return mTrackChannel - rhs.mTrackChannel;
+}
+
+bool NuPlayer2::CCDecoder::CCTrack::operator<(const NuPlayer2::CCDecoder::CCTrack& rhs) const {
+ return compare(rhs) < 0;
+}
+
+bool NuPlayer2::CCDecoder::CCTrack::operator==(const NuPlayer2::CCDecoder::CCTrack& rhs) const {
+ return compare(rhs) == 0;
+}
+
+bool NuPlayer2::CCDecoder::CCTrack::operator!=(const NuPlayer2::CCDecoder::CCTrack& rhs) const {
+ return compare(rhs) != 0;
+}
+
+} // namespace android
+
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.h b/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.h
new file mode 100644
index 0000000..57d5ea2
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NUPLAYER2_CCDECODER_H_
+
+#define NUPLAYER2_CCDECODER_H_
+
+#include "NuPlayer2.h"
+
+namespace android {
+
+struct NuPlayer2::CCDecoder : public RefBase {
+ enum {
+ kWhatClosedCaptionData,
+ kWhatTrackAdded,
+ };
+
+ enum {
+ kTrackTypeCEA608,
+ kTrackTypeCEA708,
+ };
+
+ explicit CCDecoder(const sp<AMessage> ¬ify);
+
+ size_t getTrackCount() const;
+ sp<AMessage> getTrackInfo(size_t index) const;
+ status_t selectTrack(size_t index, bool select);
+ bool isSelected() const;
+ void decode(const sp<ABuffer> &accessUnit);
+ void display(int64_t timeUs);
+ void flush();
+
+private:
+ // CC track identifier.
+ struct CCTrack {
+ CCTrack() : mTrackType(0), mTrackChannel(0) { }
+
+ CCTrack(const int32_t trackType, const size_t trackChannel)
+ : mTrackType(trackType), mTrackChannel(trackChannel) { }
+
+ int32_t mTrackType;
+ size_t mTrackChannel;
+
+ // The ordering of CCTracks is to build a map of track to index.
+ // It is necessary to find the index of the matched CCTrack when CC data comes.
+ int compare(const NuPlayer2::CCDecoder::CCTrack& rhs) const;
+ inline bool operator<(const NuPlayer2::CCDecoder::CCTrack& rhs) const;
+ inline bool operator==(const NuPlayer2::CCDecoder::CCTrack& rhs) const;
+ inline bool operator!=(const NuPlayer2::CCDecoder::CCTrack& rhs) const;
+ };
+
+ sp<AMessage> mNotify;
+ KeyedVector<int64_t, sp<ABuffer> > mCCMap;
+ ssize_t mSelectedTrack;
+ KeyedVector<CCTrack, size_t> mTrackIndices;
+ Vector<CCTrack> mTracks;
+
+ // CEA-608 closed caption
+ size_t mLine21Channels[2]; // The current channels of NTSC_CC_FIELD_{1, 2}
+
+ // CEA-708 closed caption
+ sp<ABuffer> mDTVCCPacket;
+
+ bool isTrackValid(size_t index) const;
+ size_t getTrackIndex(int32_t trackType, size_t channel, bool *trackAdded);
+
+ // Extract from H.264 SEIs
+ bool extractFromSEI(const sp<ABuffer> &accessUnit);
+ bool parseSEINalUnit(int64_t timeUs, const uint8_t *data, size_t size);
+
+ // Extract from MPEG user data
+ bool extractFromMPEGUserData(const sp<ABuffer> &accessUnit);
+ bool parseMPEGUserDataUnit(int64_t timeUs, const uint8_t *data, size_t size);
+
+ // Extract CC tracks from MPEG_cc_data
+ bool parseMPEGCCData(int64_t timeUs, const uint8_t *data, size_t size);
+ bool parseDTVCCPacket(int64_t timeUs, const uint8_t *data, size_t size);
+
+ DISALLOW_EVIL_CONSTRUCTORS(CCDecoder);
+};
+
+} // namespace android
+
+#endif // NUPLAYER2_CCDECODER_H_
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
new file mode 100644
index 0000000..645138a
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
@@ -0,0 +1,1299 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NuPlayer2Decoder"
+#include <utils/Log.h>
+#include <inttypes.h>
+
+#include <algorithm>
+
+#include "NuPlayer2CCDecoder.h"
+#include "NuPlayer2Decoder.h"
+#include "NuPlayer2Drm.h"
+#include "NuPlayer2Renderer.h"
+#include "NuPlayer2Source.h"
+
+#include <cutils/properties.h>
+#include <media/MediaBufferHolder.h>
+#include <media/MediaCodecBuffer.h>
+#include <media/NdkMediaCodec.h>
+#include <media/NdkWrapper.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/avc_utils.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/SurfaceUtils.h>
+
+#include <system/window.h>
+#include "ATSParser.h"
+
+namespace android {
+
+static float kDisplayRefreshingRate = 60.f; // TODO: get this from the display
+
+// The default total video frame rate of a stream when that info is not available from
+// the source.
+static float kDefaultVideoFrameRateTotal = 30.f;
+
+static inline bool getAudioDeepBufferSetting() {
+ return property_get_bool("media.stagefright.audio.deep", false /* default_value */);
+}
+
+NuPlayer2::Decoder::Decoder(
+ const sp<AMessage> ¬ify,
+ const sp<Source> &source,
+ pid_t pid,
+ uid_t uid,
+ const sp<Renderer> &renderer,
+ const sp<ANativeWindowWrapper> &nww,
+ const sp<CCDecoder> &ccDecoder)
+ : DecoderBase(notify),
+ mNativeWindow(nww),
+ mSource(source),
+ mRenderer(renderer),
+ mCCDecoder(ccDecoder),
+ mPid(pid),
+ mUid(uid),
+ mSkipRenderingUntilMediaTimeUs(-1ll),
+ mNumFramesTotal(0ll),
+ mNumInputFramesDropped(0ll),
+ mNumOutputFramesDropped(0ll),
+ mVideoWidth(0),
+ mVideoHeight(0),
+ mIsAudio(true),
+ mIsVideoAVC(false),
+ mIsSecure(false),
+ mIsEncrypted(false),
+ mIsEncryptedObservedEarlier(false),
+ mFormatChangePending(false),
+ mTimeChangePending(false),
+ mFrameRateTotal(kDefaultVideoFrameRateTotal),
+ mPlaybackSpeed(1.0f),
+ mNumVideoTemporalLayerTotal(1), // decode all layers
+ mNumVideoTemporalLayerAllowed(1),
+ mCurrentMaxVideoTemporalLayerId(0),
+ mResumePending(false),
+ mComponentName("decoder") {
+ mVideoTemporalLayerAggregateFps[0] = mFrameRateTotal;
+}
+
+NuPlayer2::Decoder::~Decoder() {
+ // Need to stop looper first since mCodec could be accessed on the mDecoderLooper.
+ stopLooper();
+ if (mCodec != NULL) {
+ mCodec->release();
+ }
+ releaseAndResetMediaBuffers();
+}
+
+sp<AMessage> NuPlayer2::Decoder::getStats() const {
+ mStats->setInt64("frames-total", mNumFramesTotal);
+ mStats->setInt64("frames-dropped-input", mNumInputFramesDropped);
+ mStats->setInt64("frames-dropped-output", mNumOutputFramesDropped);
+ return mStats;
+}
+
+status_t NuPlayer2::Decoder::setVideoSurface(const sp<ANativeWindowWrapper> &nww) {
+ if (nww == NULL || nww->getANativeWindow() == NULL
+ || ADebug::isExperimentEnabled("legacy-setsurface")) {
+ return BAD_VALUE;
+ }
+
+ sp<AMessage> msg = new AMessage(kWhatSetVideoSurface, this);
+
+ msg->setObject("surface", nww);
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ }
+ return err;
+}
+
+void NuPlayer2::Decoder::onMessageReceived(const sp<AMessage> &msg) {
+ ALOGV("[%s] onMessage: %s", mComponentName.c_str(), msg->debugString().c_str());
+
+ switch (msg->what()) {
+ case kWhatCodecNotify:
+ {
+ int32_t cbID;
+ CHECK(msg->findInt32("callbackID", &cbID));
+
+ ALOGV("[%s] kWhatCodecNotify: cbID = %d, paused = %d",
+ mIsAudio ? "audio" : "video", cbID, mPaused);
+
+ if (mPaused) {
+ break;
+ }
+
+ switch (cbID) {
+ case AMediaCodecWrapper::CB_INPUT_AVAILABLE:
+ {
+ int32_t index;
+ CHECK(msg->findInt32("index", &index));
+
+ handleAnInputBuffer(index);
+ break;
+ }
+
+ case AMediaCodecWrapper::CB_OUTPUT_AVAILABLE:
+ {
+ int32_t index;
+ size_t offset;
+ size_t size;
+ int64_t timeUs;
+ int32_t flags;
+
+ CHECK(msg->findInt32("index", &index));
+ CHECK(msg->findSize("offset", &offset));
+ CHECK(msg->findSize("size", &size));
+ CHECK(msg->findInt64("timeUs", &timeUs));
+ CHECK(msg->findInt32("flags", &flags));
+
+ handleAnOutputBuffer(index, offset, size, timeUs, flags);
+ break;
+ }
+
+ case AMediaCodecWrapper::CB_OUTPUT_FORMAT_CHANGED:
+ {
+ sp<AMessage> format;
+ CHECK(msg->findMessage("format", &format));
+
+ handleOutputFormatChange(format);
+ break;
+ }
+
+ case AMediaCodecWrapper::CB_ERROR:
+ {
+ status_t err;
+ CHECK(msg->findInt32("err", &err));
+ ALOGE("Decoder (%s) reported error : 0x%x",
+ mIsAudio ? "audio" : "video", err);
+
+ handleError(err);
+ break;
+ }
+
+ default:
+ {
+ TRESPASS();
+ break;
+ }
+ }
+
+ break;
+ }
+
+ case kWhatRenderBuffer:
+ {
+ if (!isStaleReply(msg)) {
+ onRenderBuffer(msg);
+ }
+ break;
+ }
+
+ case kWhatAudioOutputFormatChanged:
+ {
+ if (!isStaleReply(msg)) {
+ status_t err;
+ if (msg->findInt32("err", &err) && err != OK) {
+ ALOGE("Renderer reported 0x%x when changing audio output format", err);
+ handleError(err);
+ }
+ }
+ break;
+ }
+
+ case kWhatSetVideoSurface:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ sp<RefBase> obj;
+ CHECK(msg->findObject("surface", &obj));
+ sp<ANativeWindowWrapper> nww =
+ static_cast<ANativeWindowWrapper *>(obj.get()); // non-null
+ if (nww == NULL || nww->getANativeWindow() == NULL) {
+ break;
+ }
+ int32_t err = INVALID_OPERATION;
+ // NOTE: in practice mNativeWindow is always non-null,
+ // but checking here for completeness
+ if (mCodec != NULL
+ && mNativeWindow != NULL && mNativeWindow->getANativeWindow() != NULL) {
+ // TODO: once AwesomePlayer is removed, remove this automatic connecting
+ // to the surface by MediaPlayerService.
+ //
+ // at this point MediaPlayer2Manager::client has already connected to the
+ // surface, which MediaCodec does not expect
+ err = native_window_api_disconnect(nww->getANativeWindow(),
+ NATIVE_WINDOW_API_MEDIA);
+ if (err == OK) {
+ err = mCodec->setOutputSurface(nww);
+ ALOGI_IF(err, "codec setOutputSurface returned: %d", err);
+ if (err == OK) {
+ // reconnect to the old surface as MPS::Client will expect to
+ // be able to disconnect from it.
+ (void)native_window_api_connect(mNativeWindow->getANativeWindow(),
+ NATIVE_WINDOW_API_MEDIA);
+
+ mNativeWindow = nww;
+ }
+ }
+ if (err != OK) {
+ // reconnect to the new surface on error as MPS::Client will expect to
+ // be able to disconnect from it.
+ (void)native_window_api_connect(nww->getANativeWindow(),
+ NATIVE_WINDOW_API_MEDIA);
+ }
+ }
+
+ sp<AMessage> response = new AMessage;
+ response->setInt32("err", err);
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatDrmReleaseCrypto:
+ {
+ ALOGV("kWhatDrmReleaseCrypto");
+ onReleaseCrypto(msg);
+ break;
+ }
+
+ default:
+ DecoderBase::onMessageReceived(msg);
+ break;
+ }
+}
+
+void NuPlayer2::Decoder::onConfigure(const sp<AMessage> &format) {
+ ALOGV("[%s] onConfigure (format=%s)", mComponentName.c_str(), format->debugString().c_str());
+ CHECK(mCodec == NULL);
+
+ mFormatChangePending = false;
+ mTimeChangePending = false;
+
+ ++mBufferGeneration;
+
+ AString mime;
+ CHECK(format->findString("mime", &mime));
+
+ mIsAudio = !strncasecmp("audio/", mime.c_str(), 6);
+ mIsVideoAVC = !strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime.c_str());
+
+ mComponentName = mime;
+ mComponentName.append(" decoder");
+ ALOGV("[%s] onConfigure (nww=%p)", mComponentName.c_str(),
+ (mNativeWindow == NULL ? NULL : mNativeWindow->getANativeWindow()));
+
+ mCodec = AMediaCodecWrapper::CreateDecoderByType(mime);
+ int32_t secure = 0;
+ if (format->findInt32("secure", &secure) && secure != 0) {
+ if (mCodec != NULL) {
+ if (mCodec->getName(&mComponentName) == OK) {
+ mComponentName.append(".secure");
+ mCodec->release();
+ ALOGI("[%s] creating", mComponentName.c_str());
+ mCodec = AMediaCodecWrapper::CreateCodecByName(mComponentName);
+ } else {
+ mCodec = NULL;
+ }
+ }
+ }
+ if (mCodec == NULL) {
+ ALOGE("Failed to create %s%s decoder",
+ (secure ? "secure " : ""), mime.c_str());
+ handleError(NO_INIT);
+ return;
+ }
+ mIsSecure = secure;
+
+ mCodec->getName(&mComponentName);
+
+ status_t err;
+ if (mNativeWindow != NULL && mNativeWindow->getANativeWindow() != NULL) {
+ // disconnect from surface as MediaCodec will reconnect
+ err = native_window_api_disconnect(mNativeWindow->getANativeWindow(),
+ NATIVE_WINDOW_API_MEDIA);
+ // We treat this as a warning, as this is a preparatory step.
+ // Codec will try to connect to the surface, which is where
+ // any error signaling will occur.
+ ALOGW_IF(err != OK, "failed to disconnect from surface: %d", err);
+ }
+
+ // Modular DRM
+ sp<RefBase> objCrypto;
+ format->findObject("crypto", &objCrypto);
+ sp<AMediaCryptoWrapper> crypto = static_cast<AMediaCryptoWrapper *>(objCrypto.get());
+ // non-encrypted source won't have a crypto
+ mIsEncrypted = (crypto != NULL);
+ // configure is called once; still using OR in case the behavior changes.
+ mIsEncryptedObservedEarlier = mIsEncryptedObservedEarlier || mIsEncrypted;
+ ALOGV("onConfigure mCrypto: %p, mIsSecure: %d", crypto.get(), mIsSecure);
+
+ err = mCodec->configure(
+ AMediaFormatWrapper::Create(format),
+ mNativeWindow,
+ crypto,
+ 0 /* flags */);
+
+ if (err != OK) {
+ ALOGE("Failed to configure [%s] decoder (err=%d)", mComponentName.c_str(), err);
+ mCodec->release();
+ mCodec.clear();
+ handleError(err);
+ return;
+ }
+ rememberCodecSpecificData(format);
+
+ // the following should work in configured state
+ sp<AMediaFormatWrapper> outputFormat = mCodec->getOutputFormat();
+ if (outputFormat == NULL) {
+ handleError(INVALID_OPERATION);
+ return;
+ }
+ mInputFormat = mCodec->getInputFormat();
+ if (mInputFormat == NULL) {
+ handleError(INVALID_OPERATION);
+ return;
+ }
+
+ mStats->setString("mime", mime.c_str());
+ mStats->setString("component-name", mComponentName.c_str());
+
+ if (!mIsAudio) {
+ int32_t width, height;
+ if (outputFormat->getInt32("width", &width)
+ && outputFormat->getInt32("height", &height)) {
+ mStats->setInt32("width", width);
+ mStats->setInt32("height", height);
+ }
+ }
+
+ sp<AMessage> reply = new AMessage(kWhatCodecNotify, this);
+ mCodec->setCallback(reply);
+
+ err = mCodec->start();
+ if (err != OK) {
+ ALOGE("Failed to start [%s] decoder (err=%d)", mComponentName.c_str(), err);
+ mCodec->release();
+ mCodec.clear();
+ handleError(err);
+ return;
+ }
+
+ releaseAndResetMediaBuffers();
+
+ mPaused = false;
+ mResumePending = false;
+}
+
+void NuPlayer2::Decoder::onSetParameters(const sp<AMessage> ¶ms) {
+ bool needAdjustLayers = false;
+ float frameRateTotal;
+ if (params->findFloat("frame-rate-total", &frameRateTotal)
+ && mFrameRateTotal != frameRateTotal) {
+ needAdjustLayers = true;
+ mFrameRateTotal = frameRateTotal;
+ }
+
+ int32_t numVideoTemporalLayerTotal;
+ if (params->findInt32("temporal-layer-count", &numVideoTemporalLayerTotal)
+ && numVideoTemporalLayerTotal >= 0
+ && numVideoTemporalLayerTotal <= kMaxNumVideoTemporalLayers
+ && mNumVideoTemporalLayerTotal != numVideoTemporalLayerTotal) {
+ needAdjustLayers = true;
+ mNumVideoTemporalLayerTotal = std::max(numVideoTemporalLayerTotal, 1);
+ }
+
+ if (needAdjustLayers && mNumVideoTemporalLayerTotal > 1) {
+ // TODO: For now, layer fps is calculated for some specific architectures.
+ // But it really should be extracted from the stream.
+ mVideoTemporalLayerAggregateFps[0] =
+ mFrameRateTotal / (float)(1ll << (mNumVideoTemporalLayerTotal - 1));
+ for (int32_t i = 1; i < mNumVideoTemporalLayerTotal; ++i) {
+ mVideoTemporalLayerAggregateFps[i] =
+ mFrameRateTotal / (float)(1ll << (mNumVideoTemporalLayerTotal - i))
+ + mVideoTemporalLayerAggregateFps[i - 1];
+ }
+ }
+
+ float playbackSpeed;
+ if (params->findFloat("playback-speed", &playbackSpeed)
+ && mPlaybackSpeed != playbackSpeed) {
+ needAdjustLayers = true;
+ mPlaybackSpeed = playbackSpeed;
+ }
+
+ if (needAdjustLayers) {
+ float decodeFrameRate = mFrameRateTotal;
+ // enable temporal layering optimization only if we know the layering depth
+ if (mNumVideoTemporalLayerTotal > 1) {
+ int32_t layerId;
+ for (layerId = 0; layerId < mNumVideoTemporalLayerTotal - 1; ++layerId) {
+ if (mVideoTemporalLayerAggregateFps[layerId] * mPlaybackSpeed
+ >= kDisplayRefreshingRate * 0.9) {
+ break;
+ }
+ }
+ mNumVideoTemporalLayerAllowed = layerId + 1;
+ decodeFrameRate = mVideoTemporalLayerAggregateFps[layerId];
+ }
+ ALOGV("onSetParameters: allowed layers=%d, decodeFps=%g",
+ mNumVideoTemporalLayerAllowed, decodeFrameRate);
+
+ if (mCodec == NULL) {
+ ALOGW("onSetParameters called before codec is created.");
+ return;
+ }
+
+ sp<AMediaFormatWrapper> codecParams = new AMediaFormatWrapper();
+ codecParams->setFloat("operating-rate", decodeFrameRate * mPlaybackSpeed);
+ mCodec->setParameters(codecParams);
+ }
+}
+
+void NuPlayer2::Decoder::onSetRenderer(const sp<Renderer> &renderer) {
+ mRenderer = renderer;
+}
+
+void NuPlayer2::Decoder::onResume(bool notifyComplete) {
+ mPaused = false;
+
+ if (notifyComplete) {
+ mResumePending = true;
+ }
+
+ if (mCodec == NULL) {
+ ALOGE("[%s] onResume without a valid codec", mComponentName.c_str());
+ handleError(NO_INIT);
+ return;
+ }
+ mCodec->start();
+}
+
+void NuPlayer2::Decoder::doFlush(bool notifyComplete) {
+ if (mCCDecoder != NULL) {
+ mCCDecoder->flush();
+ }
+
+ if (mRenderer != NULL) {
+ mRenderer->flush(mIsAudio, notifyComplete);
+ mRenderer->signalTimeDiscontinuity();
+ }
+
+ status_t err = OK;
+ if (mCodec != NULL) {
+ err = mCodec->flush();
+ mCSDsToSubmit = mCSDsForCurrentFormat; // copy operator
+ ++mBufferGeneration;
+ }
+
+ if (err != OK) {
+ ALOGE("failed to flush [%s] (err=%d)", mComponentName.c_str(), err);
+ handleError(err);
+ // finish with posting kWhatFlushCompleted.
+ // we attempt to release the buffers even if flush fails.
+ }
+ releaseAndResetMediaBuffers();
+ mPaused = true;
+}
+
+
+void NuPlayer2::Decoder::onFlush() {
+ doFlush(true);
+
+ if (isDiscontinuityPending()) {
+ // This could happen if the client starts seeking/shutdown
+ // after we queued an EOS for discontinuities.
+ // We can consider discontinuity handled.
+ finishHandleDiscontinuity(false /* flushOnTimeChange */);
+ }
+
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatFlushCompleted);
+ notify->post();
+}
+
+void NuPlayer2::Decoder::onShutdown(bool notifyComplete) {
+ status_t err = OK;
+
+ // if there is a pending resume request, notify complete now
+ notifyResumeCompleteIfNecessary();
+
+ if (mCodec != NULL) {
+ err = mCodec->release();
+ mCodec = NULL;
+ ++mBufferGeneration;
+
+ if (mNativeWindow != NULL && mNativeWindow->getANativeWindow() != NULL) {
+ // reconnect to surface as MediaCodec disconnected from it
+ status_t error = native_window_api_connect(mNativeWindow->getANativeWindow(),
+ NATIVE_WINDOW_API_MEDIA);
+ ALOGW_IF(error != NO_ERROR,
+ "[%s] failed to connect to native window, error=%d",
+ mComponentName.c_str(), error);
+ }
+ mComponentName = "decoder";
+ }
+
+ releaseAndResetMediaBuffers();
+
+ if (err != OK) {
+ ALOGE("failed to release [%s] (err=%d)", mComponentName.c_str(), err);
+ handleError(err);
+ // finish with posting kWhatShutdownCompleted.
+ }
+
+ if (notifyComplete) {
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatShutdownCompleted);
+ notify->post();
+ mPaused = true;
+ }
+}
+
+/*
+ * returns true if we should request more data
+ */
+bool NuPlayer2::Decoder::doRequestBuffers() {
+ if (isDiscontinuityPending()) {
+ return false;
+ }
+ status_t err = OK;
+ while (err == OK && !mDequeuedInputBuffers.empty()) {
+ size_t bufferIx = *mDequeuedInputBuffers.begin();
+ sp<AMessage> msg = new AMessage();
+ msg->setSize("buffer-ix", bufferIx);
+ err = fetchInputData(msg);
+ if (err != OK && err != ERROR_END_OF_STREAM) {
+ // if EOS, need to queue EOS buffer
+ break;
+ }
+ mDequeuedInputBuffers.erase(mDequeuedInputBuffers.begin());
+
+ if (!mPendingInputMessages.empty()
+ || !onInputBufferFetched(msg)) {
+ mPendingInputMessages.push_back(msg);
+ }
+ }
+
+ return err == -EWOULDBLOCK
+ && mSource->feedMoreTSData() == OK;
+}
+
+void NuPlayer2::Decoder::handleError(int32_t err)
+{
+ // We cannot immediately release the codec due to buffers still outstanding
+ // in the renderer. We signal to the player the error so it can shutdown/release the
+ // decoder after flushing and increment the generation to discard unnecessary messages.
+
+ ++mBufferGeneration;
+
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatError);
+ notify->setInt32("err", err);
+ notify->post();
+}
+
+status_t NuPlayer2::Decoder::releaseCrypto()
+{
+ ALOGV("releaseCrypto");
+
+ sp<AMessage> msg = new AMessage(kWhatDrmReleaseCrypto, this);
+
+ sp<AMessage> response;
+ status_t status = msg->postAndAwaitResponse(&response);
+ if (status == OK && response != NULL) {
+ CHECK(response->findInt32("status", &status));
+ ALOGV("releaseCrypto ret: %d ", status);
+ } else {
+ ALOGE("releaseCrypto err: %d", status);
+ }
+
+ return status;
+}
+
+void NuPlayer2::Decoder::onReleaseCrypto(const sp<AMessage>& msg)
+{
+ status_t status = INVALID_OPERATION;
+ if (mCodec != NULL) {
+ status = mCodec->releaseCrypto();
+ } else {
+ // returning OK if the codec has been already released
+ status = OK;
+ ALOGE("onReleaseCrypto No mCodec. err: %d", status);
+ }
+
+ sp<AMessage> response = new AMessage;
+ response->setInt32("status", status);
+ // Clearing the state as it's tied to crypto. mIsEncryptedObservedEarlier is sticky though
+ // and lasts for the lifetime of this codec. See its use in fetchInputData.
+ mIsEncrypted = false;
+
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+}
+
+bool NuPlayer2::Decoder::handleAnInputBuffer(size_t index) {
+ if (isDiscontinuityPending()) {
+ return false;
+ }
+
+ if (mCodec == NULL) {
+ ALOGE("[%s] handleAnInputBuffer without a valid codec", mComponentName.c_str());
+ handleError(NO_INIT);
+ return false;
+ }
+
+ size_t bufferSize = 0;
+ uint8_t *bufferBase = mCodec->getInputBuffer(index, &bufferSize);
+
+ if (bufferBase == NULL) {
+ ALOGE("[%s] handleAnInputBuffer, failed to get input buffer", mComponentName.c_str());
+ handleError(UNKNOWN_ERROR);
+ return false;
+ }
+
+ sp<MediaCodecBuffer> buffer =
+ new MediaCodecBuffer(NULL /* format */, new ABuffer(bufferBase, bufferSize));
+
+ if (index >= mInputBuffers.size()) {
+ for (size_t i = mInputBuffers.size(); i <= index; ++i) {
+ mInputBuffers.add();
+ mMediaBuffers.add();
+ mInputBufferIsDequeued.add();
+ mMediaBuffers.editItemAt(i) = NULL;
+ mInputBufferIsDequeued.editItemAt(i) = false;
+ }
+ }
+ mInputBuffers.editItemAt(index) = buffer;
+
+ //CHECK_LT(bufferIx, mInputBuffers.size());
+
+ if (mMediaBuffers[index] != NULL) {
+ mMediaBuffers[index]->release();
+ mMediaBuffers.editItemAt(index) = NULL;
+ }
+ mInputBufferIsDequeued.editItemAt(index) = true;
+
+ if (!mCSDsToSubmit.isEmpty()) {
+ sp<AMessage> msg = new AMessage();
+ msg->setSize("buffer-ix", index);
+
+ sp<ABuffer> buffer = mCSDsToSubmit.itemAt(0);
+ ALOGI("[%s] resubmitting CSD", mComponentName.c_str());
+ msg->setBuffer("buffer", buffer);
+ mCSDsToSubmit.removeAt(0);
+ if (!onInputBufferFetched(msg)) {
+ handleError(UNKNOWN_ERROR);
+ return false;
+ }
+ return true;
+ }
+
+ while (!mPendingInputMessages.empty()) {
+ sp<AMessage> msg = *mPendingInputMessages.begin();
+ if (!onInputBufferFetched(msg)) {
+ break;
+ }
+ mPendingInputMessages.erase(mPendingInputMessages.begin());
+ }
+
+ if (!mInputBufferIsDequeued.editItemAt(index)) {
+ return true;
+ }
+
+ mDequeuedInputBuffers.push_back(index);
+
+ onRequestInputBuffers();
+ return true;
+}
+
+bool NuPlayer2::Decoder::handleAnOutputBuffer(
+ size_t index,
+ size_t offset,
+ size_t size,
+ int64_t timeUs,
+ int32_t flags) {
+ if (mCodec == NULL) {
+ ALOGE("[%s] handleAnOutputBuffer without a valid codec", mComponentName.c_str());
+ handleError(NO_INIT);
+ return false;
+ }
+
+// CHECK_LT(bufferIx, mOutputBuffers.size());
+
+ size_t bufferSize = 0;
+ uint8_t *bufferBase = mCodec->getOutputBuffer(index, &bufferSize);
+
+ if (bufferBase == NULL) {
+ ALOGE("[%s] handleAnOutputBuffer, failed to get output buffer", mComponentName.c_str());
+ handleError(UNKNOWN_ERROR);
+ return false;
+ }
+
+ sp<MediaCodecBuffer> buffer =
+ new MediaCodecBuffer(NULL /* format */, new ABuffer(bufferBase, bufferSize));
+
+ if (index >= mOutputBuffers.size()) {
+ for (size_t i = mOutputBuffers.size(); i <= index; ++i) {
+ mOutputBuffers.add();
+ }
+ }
+
+ mOutputBuffers.editItemAt(index) = buffer;
+
+ buffer->setRange(offset, size);
+ buffer->meta()->clear();
+ buffer->meta()->setInt64("timeUs", timeUs);
+
+ bool eos = flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM;
+ // we do not expect CODECCONFIG or SYNCFRAME for decoder
+
+ sp<AMessage> reply = new AMessage(kWhatRenderBuffer, this);
+ reply->setSize("buffer-ix", index);
+ reply->setInt32("generation", mBufferGeneration);
+
+ if (eos) {
+ ALOGI("[%s] saw output EOS", mIsAudio ? "audio" : "video");
+
+ buffer->meta()->setInt32("eos", true);
+ reply->setInt32("eos", true);
+ }
+
+ mNumFramesTotal += !mIsAudio;
+
+ if (mSkipRenderingUntilMediaTimeUs >= 0) {
+ if (timeUs < mSkipRenderingUntilMediaTimeUs) {
+ ALOGV("[%s] dropping buffer at time %lld as requested.",
+ mComponentName.c_str(), (long long)timeUs);
+
+ reply->post();
+ if (eos) {
+ notifyResumeCompleteIfNecessary();
+ if (mRenderer != NULL && !isDiscontinuityPending()) {
+ mRenderer->queueEOS(mIsAudio, ERROR_END_OF_STREAM);
+ }
+ }
+ return true;
+ }
+
+ mSkipRenderingUntilMediaTimeUs = -1;
+ }
+
+ // wait until 1st frame comes out to signal resume complete
+ notifyResumeCompleteIfNecessary();
+
+ if (mRenderer != NULL) {
+ // send the buffer to renderer.
+ mRenderer->queueBuffer(mIsAudio, buffer, reply);
+ if (eos && !isDiscontinuityPending()) {
+ mRenderer->queueEOS(mIsAudio, ERROR_END_OF_STREAM);
+ }
+ }
+
+ return true;
+}
+
+void NuPlayer2::Decoder::handleOutputFormatChange(const sp<AMessage> &format) {
+ if (!mIsAudio) {
+ int32_t width, height;
+ if (format->findInt32("width", &width)
+ && format->findInt32("height", &height)) {
+ mStats->setInt32("width", width);
+ mStats->setInt32("height", height);
+ }
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatVideoSizeChanged);
+ notify->setMessage("format", format);
+ notify->post();
+ } else if (mRenderer != NULL) {
+ uint32_t flags;
+ int64_t durationUs;
+ bool hasVideo = (mSource->getFormat(false /* audio */) != NULL);
+ if (getAudioDeepBufferSetting() // override regardless of source duration
+ || (mSource->getDuration(&durationUs) == OK
+ && durationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US)) {
+ flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+ } else {
+ flags = AUDIO_OUTPUT_FLAG_NONE;
+ }
+
+ sp<AMessage> reply = new AMessage(kWhatAudioOutputFormatChanged, this);
+ reply->setInt32("generation", mBufferGeneration);
+ mRenderer->changeAudioFormat(
+ format, false /* offloadOnly */, hasVideo,
+ flags, mSource->isStreaming(), reply);
+ }
+}
+
+void NuPlayer2::Decoder::releaseAndResetMediaBuffers() {
+ for (size_t i = 0; i < mMediaBuffers.size(); i++) {
+ if (mMediaBuffers[i] != NULL) {
+ mMediaBuffers[i]->release();
+ mMediaBuffers.editItemAt(i) = NULL;
+ }
+ }
+ mMediaBuffers.resize(mInputBuffers.size());
+ for (size_t i = 0; i < mMediaBuffers.size(); i++) {
+ mMediaBuffers.editItemAt(i) = NULL;
+ }
+ mInputBufferIsDequeued.clear();
+ mInputBufferIsDequeued.resize(mInputBuffers.size());
+ for (size_t i = 0; i < mInputBufferIsDequeued.size(); i++) {
+ mInputBufferIsDequeued.editItemAt(i) = false;
+ }
+
+ mPendingInputMessages.clear();
+ mDequeuedInputBuffers.clear();
+ mSkipRenderingUntilMediaTimeUs = -1;
+}
+
+bool NuPlayer2::Decoder::isStaleReply(const sp<AMessage> &msg) {
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+ return generation != mBufferGeneration;
+}
+
+status_t NuPlayer2::Decoder::fetchInputData(sp<AMessage> &reply) {
+ sp<ABuffer> accessUnit;
+ bool dropAccessUnit = true;
+ do {
+ status_t err = mSource->dequeueAccessUnit(mIsAudio, &accessUnit);
+
+ if (err == -EWOULDBLOCK) {
+ return err;
+ } else if (err != OK) {
+ if (err == INFO_DISCONTINUITY) {
+ int32_t type;
+ CHECK(accessUnit->meta()->findInt32("discontinuity", &type));
+
+ bool formatChange =
+ (mIsAudio &&
+ (type & ATSParser::DISCONTINUITY_AUDIO_FORMAT))
+ || (!mIsAudio &&
+ (type & ATSParser::DISCONTINUITY_VIDEO_FORMAT));
+
+ bool timeChange = (type & ATSParser::DISCONTINUITY_TIME) != 0;
+
+ ALOGI("%s discontinuity (format=%d, time=%d)",
+ mIsAudio ? "audio" : "video", formatChange, timeChange);
+
+ bool seamlessFormatChange = false;
+ sp<AMessage> newFormat = mSource->getFormat(mIsAudio);
+ if (formatChange) {
+ seamlessFormatChange =
+ supportsSeamlessFormatChange(newFormat);
+ // treat seamless format change separately
+ formatChange = !seamlessFormatChange;
+ }
+
+ // For format or time change, return EOS to queue EOS input,
+ // then wait for EOS on output.
+ if (formatChange /* not seamless */) {
+ mFormatChangePending = true;
+ err = ERROR_END_OF_STREAM;
+ } else if (timeChange) {
+ rememberCodecSpecificData(newFormat);
+ mTimeChangePending = true;
+ err = ERROR_END_OF_STREAM;
+ } else if (seamlessFormatChange) {
+ // reuse existing decoder and don't flush
+ rememberCodecSpecificData(newFormat);
+ continue;
+ } else {
+ // This stream is unaffected by the discontinuity
+ return -EWOULDBLOCK;
+ }
+ }
+
+ // reply should only be returned without a buffer set
+ // when there is an error (including EOS)
+ CHECK(err != OK);
+
+ reply->setInt32("err", err);
+ return ERROR_END_OF_STREAM;
+ }
+
+ dropAccessUnit = false;
+ if (!mIsAudio && !mIsEncrypted) {
+ // Extra safeguard if higher-level behavior changes. Otherwise, not required now.
+ // Preventing the buffer from being processed (and sent to codec) if this is a later
+ // round of playback but this time without prepareDrm. Or if there is a race between
+ // stop (which is not blocking) and releaseDrm allowing buffers being processed after
+ // Crypto has been released (GenericSource currently prevents this race though).
+ // Particularly doing this check before IsAVCReferenceFrame call to prevent parsing
+ // of encrypted data.
+ if (mIsEncryptedObservedEarlier) {
+ ALOGE("fetchInputData: mismatched mIsEncrypted/mIsEncryptedObservedEarlier (0/1)");
+
+ return INVALID_OPERATION;
+ }
+
+ int32_t layerId = 0;
+ bool haveLayerId = accessUnit->meta()->findInt32("temporal-layer-id", &layerId);
+ if (mRenderer->getVideoLateByUs() > 100000ll
+ && mIsVideoAVC
+ && !IsAVCReferenceFrame(accessUnit)) {
+ dropAccessUnit = true;
+ } else if (haveLayerId && mNumVideoTemporalLayerTotal > 1) {
+ // Add only one layer each time.
+ if (layerId > mCurrentMaxVideoTemporalLayerId + 1
+ || layerId >= mNumVideoTemporalLayerAllowed) {
+ dropAccessUnit = true;
+ ALOGV("dropping layer(%d), speed=%g, allowed layer count=%d, max layerId=%d",
+ layerId, mPlaybackSpeed, mNumVideoTemporalLayerAllowed,
+ mCurrentMaxVideoTemporalLayerId);
+ } else if (layerId > mCurrentMaxVideoTemporalLayerId) {
+ mCurrentMaxVideoTemporalLayerId = layerId;
+ } else if (layerId == 0 && mNumVideoTemporalLayerTotal > 1
+ && IsIDR(accessUnit->data(), accessUnit->size())) {
+ mCurrentMaxVideoTemporalLayerId = mNumVideoTemporalLayerTotal - 1;
+ }
+ }
+ if (dropAccessUnit) {
+ if (layerId <= mCurrentMaxVideoTemporalLayerId && layerId > 0) {
+ mCurrentMaxVideoTemporalLayerId = layerId - 1;
+ }
+ ++mNumInputFramesDropped;
+ }
+ }
+ } while (dropAccessUnit);
+
+ // ALOGV("returned a valid buffer of %s data", mIsAudio ? "mIsAudio" : "video");
+#if 0
+ int64_t mediaTimeUs;
+ CHECK(accessUnit->meta()->findInt64("timeUs", &mediaTimeUs));
+ ALOGV("[%s] feeding input buffer at media time %.3f",
+ mIsAudio ? "audio" : "video",
+ mediaTimeUs / 1E6);
+#endif
+
+ if (mCCDecoder != NULL) {
+ mCCDecoder->decode(accessUnit);
+ }
+
+ reply->setBuffer("buffer", accessUnit);
+
+ return OK;
+}
+
+bool NuPlayer2::Decoder::onInputBufferFetched(const sp<AMessage> &msg) {
+ if (mCodec == NULL) {
+ ALOGE("[%s] onInputBufferFetched without a valid codec", mComponentName.c_str());
+ handleError(NO_INIT);
+ return false;
+ }
+
+ size_t bufferIx;
+ CHECK(msg->findSize("buffer-ix", &bufferIx));
+ CHECK_LT(bufferIx, mInputBuffers.size());
+ sp<MediaCodecBuffer> codecBuffer = mInputBuffers[bufferIx];
+
+ sp<ABuffer> buffer;
+ bool hasBuffer = msg->findBuffer("buffer", &buffer);
+ bool needsCopy = true;
+
+ if (buffer == NULL /* includes !hasBuffer */) {
+ int32_t streamErr = ERROR_END_OF_STREAM;
+ CHECK(msg->findInt32("err", &streamErr) || !hasBuffer);
+
+ CHECK(streamErr != OK);
+
+ // attempt to queue EOS
+ status_t err = mCodec->queueInputBuffer(
+ bufferIx,
+ 0,
+ 0,
+ 0,
+ AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM);
+ if (err == OK) {
+ mInputBufferIsDequeued.editItemAt(bufferIx) = false;
+ } else if (streamErr == ERROR_END_OF_STREAM) {
+ streamErr = err;
+ // err will not be ERROR_END_OF_STREAM
+ }
+
+ if (streamErr != ERROR_END_OF_STREAM) {
+ ALOGE("Stream error for [%s] (err=%d), EOS %s queued",
+ mComponentName.c_str(),
+ streamErr,
+ err == OK ? "successfully" : "unsuccessfully");
+ handleError(streamErr);
+ }
+ } else {
+ sp<AMessage> extra;
+ if (buffer->meta()->findMessage("extra", &extra) && extra != NULL) {
+ int64_t resumeAtMediaTimeUs;
+ if (extra->findInt64(
+ "resume-at-mediaTimeUs", &resumeAtMediaTimeUs)) {
+ ALOGI("[%s] suppressing rendering until %lld us",
+ mComponentName.c_str(), (long long)resumeAtMediaTimeUs);
+ mSkipRenderingUntilMediaTimeUs = resumeAtMediaTimeUs;
+ }
+ }
+
+ int64_t timeUs = 0;
+ uint32_t flags = 0;
+ CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+
+ int32_t eos, csd;
+ // we do not expect SYNCFRAME for decoder
+ if (buffer->meta()->findInt32("eos", &eos) && eos) {
+ flags |= AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM;
+ } else if (buffer->meta()->findInt32("csd", &csd) && csd) {
+ flags |= AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG;
+ }
+
+ // Modular DRM
+ MediaBufferBase *mediaBuf = NULL;
+ sp<AMediaCodecCryptoInfoWrapper> cryptInfo;
+
+ // copy into codec buffer
+ if (needsCopy) {
+ if (buffer->size() > codecBuffer->capacity()) {
+ handleError(ERROR_BUFFER_TOO_SMALL);
+ mDequeuedInputBuffers.push_back(bufferIx);
+ return false;
+ }
+
+ if (buffer->data() != NULL) {
+ codecBuffer->setRange(0, buffer->size());
+ memcpy(codecBuffer->data(), buffer->data(), buffer->size());
+ } else { // No buffer->data()
+ //Modular DRM
+ sp<RefBase> holder;
+ if (buffer->meta()->findObject("mediaBufferHolder", &holder)) {
+ mediaBuf = (holder != nullptr) ?
+ static_cast<MediaBufferHolder*>(holder.get())->mediaBuffer() : nullptr;
+ }
+ if (mediaBuf != NULL) {
+ codecBuffer->setRange(0, mediaBuf->size());
+ memcpy(codecBuffer->data(), mediaBuf->data(), mediaBuf->size());
+
+ MetaDataBase &meta_data = mediaBuf->meta_data();
+ cryptInfo = AMediaCodecCryptoInfoWrapper::Create(meta_data);
+ } else { // No mediaBuf
+ ALOGE("onInputBufferFetched: buffer->data()/mediaBuf are NULL for %p",
+ buffer.get());
+ handleError(UNKNOWN_ERROR);
+ return false;
+ }
+ } // buffer->data()
+ } // needsCopy
+
+ status_t err;
+ if (cryptInfo != NULL) {
+ err = mCodec->queueSecureInputBuffer(
+ bufferIx,
+ codecBuffer->offset(),
+ cryptInfo,
+ timeUs,
+ flags);
+ // synchronous call so done with cryptInfo here
+ } else {
+ err = mCodec->queueInputBuffer(
+ bufferIx,
+ codecBuffer->offset(),
+ codecBuffer->size(),
+ timeUs,
+ flags);
+ } // no cryptInfo
+
+ if (err != OK) {
+ ALOGE("onInputBufferFetched: queue%sInputBuffer failed for [%s] (err=%d)",
+ (cryptInfo != NULL ? "Secure" : ""),
+ mComponentName.c_str(), err);
+ handleError(err);
+ } else {
+ mInputBufferIsDequeued.editItemAt(bufferIx) = false;
+ }
+
+ } // buffer != NULL
+ return true;
+}
+
+void NuPlayer2::Decoder::onRenderBuffer(const sp<AMessage> &msg) {
+ status_t err;
+ int32_t render;
+ size_t bufferIx;
+ int32_t eos;
+ CHECK(msg->findSize("buffer-ix", &bufferIx));
+
+ if (!mIsAudio) {
+ int64_t timeUs;
+ sp<MediaCodecBuffer> buffer = mOutputBuffers[bufferIx];
+ buffer->meta()->findInt64("timeUs", &timeUs);
+
+ if (mCCDecoder != NULL && mCCDecoder->isSelected()) {
+ mCCDecoder->display(timeUs);
+ }
+ }
+
+ if (mCodec == NULL) {
+ err = NO_INIT;
+ } else if (msg->findInt32("render", &render) && render) {
+ int64_t timestampNs;
+ CHECK(msg->findInt64("timestampNs", ×tampNs));
+ err = mCodec->releaseOutputBufferAtTime(bufferIx, timestampNs);
+ } else {
+ mNumOutputFramesDropped += !mIsAudio;
+ err = mCodec->releaseOutputBuffer(bufferIx, false /* render */);
+ }
+ if (err != OK) {
+ ALOGE("failed to release output buffer for [%s] (err=%d)",
+ mComponentName.c_str(), err);
+ handleError(err);
+ }
+ if (msg->findInt32("eos", &eos) && eos
+ && isDiscontinuityPending()) {
+ finishHandleDiscontinuity(true /* flushOnTimeChange */);
+ }
+}
+
+bool NuPlayer2::Decoder::isDiscontinuityPending() const {
+ return mFormatChangePending || mTimeChangePending;
+}
+
+void NuPlayer2::Decoder::finishHandleDiscontinuity(bool flushOnTimeChange) {
+ ALOGV("finishHandleDiscontinuity: format %d, time %d, flush %d",
+ mFormatChangePending, mTimeChangePending, flushOnTimeChange);
+
+ // If we have format change, pause and wait to be killed;
+ // If we have time change only, flush and restart fetching.
+
+ if (mFormatChangePending) {
+ mPaused = true;
+ } else if (mTimeChangePending) {
+ if (flushOnTimeChange) {
+ doFlush(false /* notifyComplete */);
+ signalResume(false /* notifyComplete */);
+ }
+ }
+
+ // Notify NuPlayer2 to either shutdown decoder, or rescan sources
+ sp<AMessage> msg = mNotify->dup();
+ msg->setInt32("what", kWhatInputDiscontinuity);
+ msg->setInt32("formatChange", mFormatChangePending);
+ msg->post();
+
+ mFormatChangePending = false;
+ mTimeChangePending = false;
+}
+
+bool NuPlayer2::Decoder::supportsSeamlessAudioFormatChange(
+ const sp<AMessage> &targetFormat) const {
+ if (targetFormat == NULL) {
+ return true;
+ }
+
+ AString mime;
+ if (!targetFormat->findString("mime", &mime)) {
+ return false;
+ }
+
+ if (!strcasecmp(mime.c_str(), MEDIA_MIMETYPE_AUDIO_AAC)) {
+ // field-by-field comparison
+ const char * keys[] = { "channel-count", "sample-rate", "is-adts" };
+ for (unsigned int i = 0; i < sizeof(keys) / sizeof(keys[0]); i++) {
+ int32_t oldVal, newVal;
+ if (!mInputFormat->getInt32(keys[i], &oldVal) ||
+ !targetFormat->findInt32(keys[i], &newVal) ||
+ oldVal != newVal) {
+ return false;
+ }
+ }
+
+ sp<ABuffer> newBuf;
+ uint8_t *oldBufData = NULL;
+ size_t oldBufSize = 0;
+ if (mInputFormat->getBuffer("csd-0", (void**)&oldBufData, &oldBufSize) &&
+ targetFormat->findBuffer("csd-0", &newBuf)) {
+ if (oldBufSize != newBuf->size()) {
+ return false;
+ }
+ return !memcmp(oldBufData, newBuf->data(), oldBufSize);
+ }
+ }
+ return false;
+}
+
+bool NuPlayer2::Decoder::supportsSeamlessFormatChange(const sp<AMessage> &targetFormat) const {
+ if (mInputFormat == NULL) {
+ return false;
+ }
+
+ if (targetFormat == NULL) {
+ return true;
+ }
+
+ AString oldMime, newMime;
+ if (!mInputFormat->getString("mime", &oldMime)
+ || !targetFormat->findString("mime", &newMime)
+ || !(oldMime == newMime)) {
+ return false;
+ }
+
+ bool audio = !strncasecmp(oldMime.c_str(), "audio/", strlen("audio/"));
+ bool seamless;
+ if (audio) {
+ seamless = supportsSeamlessAudioFormatChange(targetFormat);
+ } else {
+ int32_t isAdaptive;
+ seamless = (mCodec != NULL &&
+ mInputFormat->getInt32("adaptive-playback", &isAdaptive) &&
+ isAdaptive);
+ }
+
+ ALOGV("%s seamless support for %s", seamless ? "yes" : "no", oldMime.c_str());
+ return seamless;
+}
+
+void NuPlayer2::Decoder::rememberCodecSpecificData(const sp<AMessage> &format) {
+ if (format == NULL) {
+ return;
+ }
+ mCSDsForCurrentFormat.clear();
+ for (int32_t i = 0; ; ++i) {
+ AString tag = "csd-";
+ tag.append(i);
+ sp<ABuffer> buffer;
+ if (!format->findBuffer(tag.c_str(), &buffer)) {
+ break;
+ }
+ mCSDsForCurrentFormat.push(buffer);
+ }
+}
+
+void NuPlayer2::Decoder::notifyResumeCompleteIfNecessary() {
+ if (mResumePending) {
+ mResumePending = false;
+
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatResumeCompleted);
+ notify->post();
+ }
+}
+
+} // namespace android
+
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.h
new file mode 100644
index 0000000..fdfb10e
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NUPLAYER2_DECODER_H_
+#define NUPLAYER2_DECODER_H_
+
+#include "NuPlayer2.h"
+
+#include "NuPlayer2DecoderBase.h"
+
+namespace android {
+
+class MediaCodecBuffer;
+
+struct AMediaCodecWrapper;
+struct AMediaFormatWrapper;
+
+struct NuPlayer2::Decoder : public DecoderBase {
+ Decoder(const sp<AMessage> ¬ify,
+ const sp<Source> &source,
+ pid_t pid,
+ uid_t uid,
+ const sp<Renderer> &renderer = NULL,
+ const sp<ANativeWindowWrapper> &nww = NULL,
+ const sp<CCDecoder> &ccDecoder = NULL);
+
+ virtual sp<AMessage> getStats() const;
+
+ // sets the output surface of video decoders.
+ virtual status_t setVideoSurface(const sp<ANativeWindowWrapper> &nww);
+
+ virtual status_t releaseCrypto();
+
+protected:
+ virtual ~Decoder();
+
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
+ virtual void onConfigure(const sp<AMessage> &format);
+ virtual void onSetParameters(const sp<AMessage> ¶ms);
+ virtual void onSetRenderer(const sp<Renderer> &renderer);
+ virtual void onResume(bool notifyComplete);
+ virtual void onFlush();
+ virtual void onShutdown(bool notifyComplete);
+ virtual bool doRequestBuffers();
+
+private:
+ enum {
+ kWhatCodecNotify = 'cdcN',
+ kWhatRenderBuffer = 'rndr',
+ kWhatSetVideoSurface = 'sSur',
+ kWhatAudioOutputFormatChanged = 'aofc',
+ kWhatDrmReleaseCrypto = 'rDrm',
+ };
+
+ enum {
+ kMaxNumVideoTemporalLayers = 32,
+ };
+
+ sp<ANativeWindowWrapper> mNativeWindow;
+
+ sp<Source> mSource;
+ sp<Renderer> mRenderer;
+ sp<CCDecoder> mCCDecoder;
+
+ sp<AMediaFormatWrapper> mInputFormat;
+ sp<AMediaCodecWrapper> mCodec;
+
+ List<sp<AMessage> > mPendingInputMessages;
+
+ Vector<sp<MediaCodecBuffer> > mInputBuffers;
+ Vector<sp<MediaCodecBuffer> > mOutputBuffers;
+ Vector<sp<ABuffer> > mCSDsForCurrentFormat;
+ Vector<sp<ABuffer> > mCSDsToSubmit;
+ Vector<bool> mInputBufferIsDequeued;
+ Vector<MediaBuffer *> mMediaBuffers;
+ Vector<size_t> mDequeuedInputBuffers;
+
+ const pid_t mPid;
+ const uid_t mUid;
+ int64_t mSkipRenderingUntilMediaTimeUs;
+ int64_t mNumFramesTotal;
+ int64_t mNumInputFramesDropped;
+ int64_t mNumOutputFramesDropped;
+ int32_t mVideoWidth;
+ int32_t mVideoHeight;
+ bool mIsAudio;
+ bool mIsVideoAVC;
+ bool mIsSecure;
+ bool mIsEncrypted;
+ bool mIsEncryptedObservedEarlier;
+ bool mFormatChangePending;
+ bool mTimeChangePending;
+ float mFrameRateTotal;
+ float mPlaybackSpeed;
+ int32_t mNumVideoTemporalLayerTotal;
+ int32_t mNumVideoTemporalLayerAllowed;
+ int32_t mCurrentMaxVideoTemporalLayerId;
+ float mVideoTemporalLayerAggregateFps[kMaxNumVideoTemporalLayers];
+
+ bool mResumePending;
+ AString mComponentName;
+
+ void handleError(int32_t err);
+ bool handleAnInputBuffer(size_t index);
+ bool handleAnOutputBuffer(
+ size_t index,
+ size_t offset,
+ size_t size,
+ int64_t timeUs,
+ int32_t flags);
+ void handleOutputFormatChange(const sp<AMessage> &format);
+
+ void releaseAndResetMediaBuffers();
+ bool isStaleReply(const sp<AMessage> &msg);
+
+ void doFlush(bool notifyComplete);
+ status_t fetchInputData(sp<AMessage> &reply);
+ bool onInputBufferFetched(const sp<AMessage> &msg);
+ void onRenderBuffer(const sp<AMessage> &msg);
+
+ bool supportsSeamlessFormatChange(const sp<AMessage> &to) const;
+ bool supportsSeamlessAudioFormatChange(const sp<AMessage> &targetFormat) const;
+ void rememberCodecSpecificData(const sp<AMessage> &format);
+ bool isDiscontinuityPending() const;
+ void finishHandleDiscontinuity(bool flushOnTimeChange);
+
+ void notifyResumeCompleteIfNecessary();
+
+ void onReleaseCrypto(const sp<AMessage>& msg);
+
+ DISALLOW_EVIL_CONSTRUCTORS(Decoder);
+};
+
+} // namespace android
+
+#endif // NUPLAYER2_DECODER_H_
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.cpp
new file mode 100644
index 0000000..9c1988f
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.cpp
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NuPlayer2DecoderBase"
+#include <utils/Log.h>
+#include <inttypes.h>
+
+#include "NuPlayer2DecoderBase.h"
+
+#include "NuPlayer2Renderer.h"
+
+#include <media/MediaCodecBuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+namespace android {
+
+NuPlayer2::DecoderBase::DecoderBase(const sp<AMessage> ¬ify)
+ : mNotify(notify),
+ mBufferGeneration(0),
+ mPaused(false),
+ mStats(new AMessage),
+ mRequestInputBuffersPending(false) {
+ // Every decoder has its own looper because MediaCodec operations
+ // are blocking, but NuPlayer2 needs asynchronous operations.
+ mDecoderLooper = new ALooper;
+ mDecoderLooper->setName("NPDecoder");
+ mDecoderLooper->start(false, /* runOnCallingThread */
+ true, /* canCallJava */
+ ANDROID_PRIORITY_AUDIO);
+}
+
+NuPlayer2::DecoderBase::~DecoderBase() {
+ stopLooper();
+}
+
+static
+status_t PostAndAwaitResponse(
+ const sp<AMessage> &msg, sp<AMessage> *response) {
+ status_t err = msg->postAndAwaitResponse(response);
+
+ if (err != OK) {
+ return err;
+ }
+
+ if (!(*response)->findInt32("err", &err)) {
+ err = OK;
+ }
+
+ return err;
+}
+
+void NuPlayer2::DecoderBase::configure(const sp<AMessage> &format) {
+ sp<AMessage> msg = new AMessage(kWhatConfigure, this);
+ msg->setMessage("format", format);
+ msg->post();
+}
+
+void NuPlayer2::DecoderBase::init() {
+ mDecoderLooper->registerHandler(this);
+}
+
+void NuPlayer2::DecoderBase::stopLooper() {
+ mDecoderLooper->unregisterHandler(id());
+ mDecoderLooper->stop();
+}
+
+void NuPlayer2::DecoderBase::setParameters(const sp<AMessage> ¶ms) {
+ sp<AMessage> msg = new AMessage(kWhatSetParameters, this);
+ msg->setMessage("params", params);
+ msg->post();
+}
+
+void NuPlayer2::DecoderBase::setRenderer(const sp<Renderer> &renderer) {
+ sp<AMessage> msg = new AMessage(kWhatSetRenderer, this);
+ msg->setObject("renderer", renderer);
+ msg->post();
+}
+
+void NuPlayer2::DecoderBase::pause() {
+ sp<AMessage> msg = new AMessage(kWhatPause, this);
+
+ sp<AMessage> response;
+ PostAndAwaitResponse(msg, &response);
+}
+
+void NuPlayer2::DecoderBase::signalFlush() {
+ (new AMessage(kWhatFlush, this))->post();
+}
+
+void NuPlayer2::DecoderBase::signalResume(bool notifyComplete) {
+ sp<AMessage> msg = new AMessage(kWhatResume, this);
+ msg->setInt32("notifyComplete", notifyComplete);
+ msg->post();
+}
+
+void NuPlayer2::DecoderBase::initiateShutdown() {
+ (new AMessage(kWhatShutdown, this))->post();
+}
+
+void NuPlayer2::DecoderBase::onRequestInputBuffers() {
+ if (mRequestInputBuffersPending) {
+ return;
+ }
+
+ // doRequestBuffers() return true if we should request more data
+ if (doRequestBuffers()) {
+ mRequestInputBuffersPending = true;
+
+ sp<AMessage> msg = new AMessage(kWhatRequestInputBuffers, this);
+ msg->post(10 * 1000ll);
+ }
+}
+
+void NuPlayer2::DecoderBase::onMessageReceived(const sp<AMessage> &msg) {
+
+ switch (msg->what()) {
+ case kWhatConfigure:
+ {
+ sp<AMessage> format;
+ CHECK(msg->findMessage("format", &format));
+ onConfigure(format);
+ break;
+ }
+
+ case kWhatSetParameters:
+ {
+ sp<AMessage> params;
+ CHECK(msg->findMessage("params", ¶ms));
+ onSetParameters(params);
+ break;
+ }
+
+ case kWhatSetRenderer:
+ {
+ sp<RefBase> obj;
+ CHECK(msg->findObject("renderer", &obj));
+ onSetRenderer(static_cast<Renderer *>(obj.get()));
+ break;
+ }
+
+ case kWhatPause:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ mPaused = true;
+
+ (new AMessage)->postReply(replyID);
+ break;
+ }
+
+ case kWhatRequestInputBuffers:
+ {
+ mRequestInputBuffersPending = false;
+ onRequestInputBuffers();
+ break;
+ }
+
+ case kWhatFlush:
+ {
+ onFlush();
+ break;
+ }
+
+ case kWhatResume:
+ {
+ int32_t notifyComplete;
+ CHECK(msg->findInt32("notifyComplete", ¬ifyComplete));
+
+ onResume(notifyComplete);
+ break;
+ }
+
+ case kWhatShutdown:
+ {
+ onShutdown(true);
+ break;
+ }
+
+ default:
+ TRESPASS();
+ break;
+ }
+}
+
+void NuPlayer2::DecoderBase::handleError(int32_t err)
+{
+ // We cannot immediately release the codec due to buffers still outstanding
+ // in the renderer. We signal to the player the error so it can shutdown/release the
+ // decoder after flushing and increment the generation to discard unnecessary messages.
+
+ ++mBufferGeneration;
+
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatError);
+ notify->setInt32("err", err);
+ notify->post();
+}
+
+} // namespace android
+
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.h b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.h
new file mode 100644
index 0000000..1e57f0d
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NUPLAYER2_DECODER_BASE_H_
+
+#define NUPLAYER2_DECODER_BASE_H_
+
+#include "NuPlayer2.h"
+
+#include <media/stagefright/foundation/AHandler.h>
+
+namespace android {
+
+struct ABuffer;
+struct ANativeWindowWrapper;
+struct MediaCodec;
+class MediaBuffer;
+class MediaCodecBuffer;
+
+struct NuPlayer2::DecoderBase : public AHandler {
+ explicit DecoderBase(const sp<AMessage> ¬ify);
+
+ void configure(const sp<AMessage> &format);
+ void init();
+ void setParameters(const sp<AMessage> ¶ms);
+
+ // Synchronous call to ensure decoder will not request or send out data.
+ void pause();
+
+ void setRenderer(const sp<Renderer> &renderer);
+ virtual status_t setVideoSurface(const sp<ANativeWindowWrapper> &) { return INVALID_OPERATION; }
+
+ void signalFlush();
+ void signalResume(bool notifyComplete);
+ void initiateShutdown();
+
+ virtual sp<AMessage> getStats() const {
+ return mStats;
+ }
+
+ virtual status_t releaseCrypto() {
+ return INVALID_OPERATION;
+ }
+
+ enum {
+ kWhatInputDiscontinuity = 'inDi',
+ kWhatVideoSizeChanged = 'viSC',
+ kWhatFlushCompleted = 'flsC',
+ kWhatShutdownCompleted = 'shDC',
+ kWhatResumeCompleted = 'resC',
+ kWhatEOS = 'eos ',
+ kWhatError = 'err ',
+ };
+
+protected:
+
+ virtual ~DecoderBase();
+
+ void stopLooper();
+
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
+ virtual void onConfigure(const sp<AMessage> &format) = 0;
+ virtual void onSetParameters(const sp<AMessage> ¶ms) = 0;
+ virtual void onSetRenderer(const sp<Renderer> &renderer) = 0;
+ virtual void onResume(bool notifyComplete) = 0;
+ virtual void onFlush() = 0;
+ virtual void onShutdown(bool notifyComplete) = 0;
+
+ void onRequestInputBuffers();
+ virtual bool doRequestBuffers() = 0;
+ virtual void handleError(int32_t err);
+
+ sp<AMessage> mNotify;
+ int32_t mBufferGeneration;
+ bool mPaused;
+ sp<AMessage> mStats;
+
+private:
+ enum {
+ kWhatConfigure = 'conf',
+ kWhatSetParameters = 'setP',
+ kWhatSetRenderer = 'setR',
+ kWhatPause = 'paus',
+ kWhatRequestInputBuffers = 'reqB',
+ kWhatFlush = 'flus',
+ kWhatShutdown = 'shuD',
+ };
+
+ sp<ALooper> mDecoderLooper;
+ bool mRequestInputBuffersPending;
+
+ DISALLOW_EVIL_CONSTRUCTORS(DecoderBase);
+};
+
+} // namespace android
+
+#endif // NUPLAYER2_DECODER_BASE_H_
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.cpp
new file mode 100644
index 0000000..0e0c1d8
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.cpp
@@ -0,0 +1,434 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NuPlayer2DecoderPassThrough"
+#include <utils/Log.h>
+#include <inttypes.h>
+
+#include "NuPlayer2DecoderPassThrough.h"
+
+#include "NuPlayer2Renderer.h"
+#include "NuPlayer2Source.h"
+
+#include <media/MediaCodecBuffer.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaErrors.h>
+
+#include "ATSParser.h"
+
+namespace android {
+
+// TODO optimize buffer size for power consumption
+// The offload read buffer size is 32 KB but 24 KB uses less power.
+static const size_t kAggregateBufferSizeBytes = 24 * 1024;
+static const size_t kMaxCachedBytes = 200000;
+
+NuPlayer2::DecoderPassThrough::DecoderPassThrough(
+ const sp<AMessage> ¬ify,
+ const sp<Source> &source,
+ const sp<Renderer> &renderer)
+ : DecoderBase(notify),
+ mSource(source),
+ mRenderer(renderer),
+ mSkipRenderingUntilMediaTimeUs(-1ll),
+ mReachedEOS(true),
+ mPendingAudioErr(OK),
+ mPendingBuffersToDrain(0),
+ mCachedBytes(0),
+ mComponentName("pass through decoder") {
+ ALOGW_IF(renderer == NULL, "expect a non-NULL renderer");
+}
+
+NuPlayer2::DecoderPassThrough::~DecoderPassThrough() {
+}
+
+void NuPlayer2::DecoderPassThrough::onConfigure(const sp<AMessage> &format) {
+ ALOGV("[%s] onConfigure", mComponentName.c_str());
+ mCachedBytes = 0;
+ mPendingBuffersToDrain = 0;
+ mReachedEOS = false;
+ ++mBufferGeneration;
+
+ onRequestInputBuffers();
+
+ int32_t hasVideo = 0;
+ format->findInt32("has-video", &hasVideo);
+
+ // The audio sink is already opened before the PassThrough decoder is created.
+ // Opening again might be relevant if decoder is instantiated after shutdown and
+ // format is different.
+ status_t err = mRenderer->openAudioSink(
+ format, true /* offloadOnly */, hasVideo,
+ AUDIO_OUTPUT_FLAG_NONE /* flags */, NULL /* isOffloaded */, mSource->isStreaming());
+ if (err != OK) {
+ handleError(err);
+ }
+}
+
+void NuPlayer2::DecoderPassThrough::onSetParameters(const sp<AMessage> &/*params*/) {
+ ALOGW("onSetParameters() called unexpectedly");
+}
+
+void NuPlayer2::DecoderPassThrough::onSetRenderer(
+ const sp<Renderer> &renderer) {
+ // renderer can't be changed during offloading
+ ALOGW_IF(renderer != mRenderer,
+ "ignoring request to change renderer");
+}
+
+bool NuPlayer2::DecoderPassThrough::isStaleReply(const sp<AMessage> &msg) {
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+ return generation != mBufferGeneration;
+}
+
+bool NuPlayer2::DecoderPassThrough::isDoneFetching() const {
+ ALOGV("[%s] mCachedBytes = %zu, mReachedEOS = %d mPaused = %d",
+ mComponentName.c_str(), mCachedBytes, mReachedEOS, mPaused);
+
+ return mCachedBytes >= kMaxCachedBytes || mReachedEOS || mPaused;
+}
+
+/*
+ * returns true if we should request more data
+ */
+bool NuPlayer2::DecoderPassThrough::doRequestBuffers() {
+ status_t err = OK;
+ while (!isDoneFetching()) {
+ sp<AMessage> msg = new AMessage();
+
+ err = fetchInputData(msg);
+ if (err != OK) {
+ break;
+ }
+
+ onInputBufferFetched(msg);
+ }
+
+ return err == -EWOULDBLOCK
+ && mSource->feedMoreTSData() == OK;
+}
+
+status_t NuPlayer2::DecoderPassThrough::dequeueAccessUnit(sp<ABuffer> *accessUnit) {
+ status_t err;
+
+ // Did we save an accessUnit earlier because of a discontinuity?
+ if (mPendingAudioAccessUnit != NULL) {
+ *accessUnit = mPendingAudioAccessUnit;
+ mPendingAudioAccessUnit.clear();
+ err = mPendingAudioErr;
+ ALOGV("feedDecoderInputData() use mPendingAudioAccessUnit");
+ } else {
+ err = mSource->dequeueAccessUnit(true /* audio */, accessUnit);
+ }
+
+ if (err == INFO_DISCONTINUITY || err == ERROR_END_OF_STREAM) {
+ if (mAggregateBuffer != NULL) {
+ // We already have some data so save this for later.
+ mPendingAudioErr = err;
+ mPendingAudioAccessUnit = *accessUnit;
+ (*accessUnit).clear();
+ ALOGD("return aggregated buffer and save err(=%d) for later", err);
+ err = OK;
+ }
+ }
+
+ return err;
+}
+
+sp<ABuffer> NuPlayer2::DecoderPassThrough::aggregateBuffer(
+ const sp<ABuffer> &accessUnit) {
+ sp<ABuffer> aggregate;
+
+ if (accessUnit == NULL) {
+ // accessUnit is saved to mPendingAudioAccessUnit
+ // return current mAggregateBuffer
+ aggregate = mAggregateBuffer;
+ mAggregateBuffer.clear();
+ return aggregate;
+ }
+
+ size_t smallSize = accessUnit->size();
+ if ((mAggregateBuffer == NULL)
+ // Don't bother if only room for a few small buffers.
+ && (smallSize < (kAggregateBufferSizeBytes / 3))) {
+ // Create a larger buffer for combining smaller buffers from the extractor.
+ mAggregateBuffer = new ABuffer(kAggregateBufferSizeBytes);
+ mAggregateBuffer->setRange(0, 0); // start empty
+ }
+
+ if (mAggregateBuffer != NULL) {
+ int64_t timeUs;
+ int64_t dummy;
+ bool smallTimestampValid = accessUnit->meta()->findInt64("timeUs", &timeUs);
+ bool bigTimestampValid = mAggregateBuffer->meta()->findInt64("timeUs", &dummy);
+ // Will the smaller buffer fit?
+ size_t bigSize = mAggregateBuffer->size();
+ size_t roomLeft = mAggregateBuffer->capacity() - bigSize;
+ // Should we save this small buffer for the next big buffer?
+ // If the first small buffer did not have a timestamp then save
+ // any buffer that does have a timestamp until the next big buffer.
+ if ((smallSize > roomLeft)
+ || (!bigTimestampValid && (bigSize > 0) && smallTimestampValid)) {
+ mPendingAudioErr = OK;
+ mPendingAudioAccessUnit = accessUnit;
+ aggregate = mAggregateBuffer;
+ mAggregateBuffer.clear();
+ } else {
+ // Grab time from first small buffer if available.
+ if ((bigSize == 0) && smallTimestampValid) {
+ mAggregateBuffer->meta()->setInt64("timeUs", timeUs);
+ }
+ // Append small buffer to the bigger buffer.
+ memcpy(mAggregateBuffer->base() + bigSize, accessUnit->data(), smallSize);
+ bigSize += smallSize;
+ mAggregateBuffer->setRange(0, bigSize);
+
+ ALOGV("feedDecoderInputData() smallSize = %zu, bigSize = %zu, capacity = %zu",
+ smallSize, bigSize, mAggregateBuffer->capacity());
+ }
+ } else {
+ // decided not to aggregate
+ aggregate = accessUnit;
+ }
+
+ return aggregate;
+}
+
+status_t NuPlayer2::DecoderPassThrough::fetchInputData(sp<AMessage> &reply) {
+ sp<ABuffer> accessUnit;
+
+ do {
+ status_t err = dequeueAccessUnit(&accessUnit);
+
+ if (err == -EWOULDBLOCK) {
+ // Flush out the aggregate buffer to try to avoid underrun.
+ accessUnit = aggregateBuffer(NULL /* accessUnit */);
+ if (accessUnit != NULL) {
+ break;
+ }
+ return err;
+ } else if (err != OK) {
+ if (err == INFO_DISCONTINUITY) {
+ int32_t type;
+ CHECK(accessUnit->meta()->findInt32("discontinuity", &type));
+
+ bool formatChange =
+ (type & ATSParser::DISCONTINUITY_AUDIO_FORMAT) != 0;
+
+ bool timeChange =
+ (type & ATSParser::DISCONTINUITY_TIME) != 0;
+
+ ALOGI("audio discontinuity (formatChange=%d, time=%d)",
+ formatChange, timeChange);
+
+ if (formatChange || timeChange) {
+ sp<AMessage> msg = mNotify->dup();
+ msg->setInt32("what", kWhatInputDiscontinuity);
+ // will perform seamless format change,
+ // only notify NuPlayer2 to scan sources
+ msg->setInt32("formatChange", false);
+ msg->post();
+ }
+
+ if (timeChange) {
+ doFlush(false /* notifyComplete */);
+ err = OK;
+ } else if (formatChange) {
+ // do seamless format change
+ err = OK;
+ } else {
+ // This stream is unaffected by the discontinuity
+ return -EWOULDBLOCK;
+ }
+ }
+
+ reply->setInt32("err", err);
+ return OK;
+ }
+
+ accessUnit = aggregateBuffer(accessUnit);
+ } while (accessUnit == NULL);
+
+#if 0
+ int64_t mediaTimeUs;
+ CHECK(accessUnit->meta()->findInt64("timeUs", &mediaTimeUs));
+ ALOGV("feeding audio input buffer at media time %.2f secs",
+ mediaTimeUs / 1E6);
+#endif
+
+ reply->setBuffer("buffer", accessUnit);
+
+ return OK;
+}
+
+void NuPlayer2::DecoderPassThrough::onInputBufferFetched(
+ const sp<AMessage> &msg) {
+ if (mReachedEOS) {
+ return;
+ }
+
+ sp<ABuffer> buffer;
+ bool hasBuffer = msg->findBuffer("buffer", &buffer);
+ if (buffer == NULL) {
+ int32_t streamErr = ERROR_END_OF_STREAM;
+ CHECK(msg->findInt32("err", &streamErr) || !hasBuffer);
+ if (streamErr == OK) {
+ return;
+ }
+
+ if (streamErr != ERROR_END_OF_STREAM) {
+ handleError(streamErr);
+ }
+ mReachedEOS = true;
+ if (mRenderer != NULL) {
+ mRenderer->queueEOS(true /* audio */, ERROR_END_OF_STREAM);
+ }
+ return;
+ }
+
+ sp<AMessage> extra;
+ if (buffer->meta()->findMessage("extra", &extra) && extra != NULL) {
+ int64_t resumeAtMediaTimeUs;
+ if (extra->findInt64(
+ "resume-at-mediatimeUs", &resumeAtMediaTimeUs)) {
+ ALOGI("[%s] suppressing rendering until %lld us",
+ mComponentName.c_str(), (long long)resumeAtMediaTimeUs);
+ mSkipRenderingUntilMediaTimeUs = resumeAtMediaTimeUs;
+ }
+ }
+
+ int32_t bufferSize = buffer->size();
+ mCachedBytes += bufferSize;
+
+ int64_t timeUs = 0;
+ CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+ if (mSkipRenderingUntilMediaTimeUs >= 0) {
+ if (timeUs < mSkipRenderingUntilMediaTimeUs) {
+ ALOGV("[%s] dropping buffer at time %lld as requested.",
+ mComponentName.c_str(), (long long)timeUs);
+
+ onBufferConsumed(bufferSize);
+ return;
+ }
+
+ mSkipRenderingUntilMediaTimeUs = -1;
+ }
+
+ if (mRenderer == NULL) {
+ onBufferConsumed(bufferSize);
+ return;
+ }
+
+ sp<AMessage> reply = new AMessage(kWhatBufferConsumed, this);
+ reply->setInt32("generation", mBufferGeneration);
+ reply->setInt32("size", bufferSize);
+
+ sp<MediaCodecBuffer> mcBuffer = new MediaCodecBuffer(nullptr, buffer);
+ mcBuffer->meta()->setInt64("timeUs", timeUs);
+
+ mRenderer->queueBuffer(true /* audio */, mcBuffer, reply);
+
+ ++mPendingBuffersToDrain;
+ ALOGV("onInputBufferFilled: #ToDrain = %zu, cachedBytes = %zu",
+ mPendingBuffersToDrain, mCachedBytes);
+}
+
+void NuPlayer2::DecoderPassThrough::onBufferConsumed(int32_t size) {
+ --mPendingBuffersToDrain;
+ mCachedBytes -= size;
+ ALOGV("onBufferConsumed: #ToDrain = %zu, cachedBytes = %zu",
+ mPendingBuffersToDrain, mCachedBytes);
+ onRequestInputBuffers();
+}
+
+void NuPlayer2::DecoderPassThrough::onResume(bool notifyComplete) {
+ mPaused = false;
+
+ onRequestInputBuffers();
+
+ if (notifyComplete) {
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatResumeCompleted);
+ notify->post();
+ }
+}
+
+void NuPlayer2::DecoderPassThrough::doFlush(bool notifyComplete) {
+ ++mBufferGeneration;
+ mSkipRenderingUntilMediaTimeUs = -1;
+ mPendingAudioAccessUnit.clear();
+ mPendingAudioErr = OK;
+ mAggregateBuffer.clear();
+
+ if (mRenderer != NULL) {
+ mRenderer->flush(true /* audio */, notifyComplete);
+ mRenderer->signalTimeDiscontinuity();
+ }
+
+ mPendingBuffersToDrain = 0;
+ mCachedBytes = 0;
+ mReachedEOS = false;
+}
+
+void NuPlayer2::DecoderPassThrough::onFlush() {
+ doFlush(true /* notifyComplete */);
+
+ mPaused = true;
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatFlushCompleted);
+ notify->post();
+
+}
+
+void NuPlayer2::DecoderPassThrough::onShutdown(bool notifyComplete) {
+ ++mBufferGeneration;
+ mSkipRenderingUntilMediaTimeUs = -1;
+
+ if (notifyComplete) {
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatShutdownCompleted);
+ notify->post();
+ }
+
+ mReachedEOS = true;
+}
+
+void NuPlayer2::DecoderPassThrough::onMessageReceived(const sp<AMessage> &msg) {
+ ALOGV("[%s] onMessage: %s", mComponentName.c_str(),
+ msg->debugString().c_str());
+
+ switch (msg->what()) {
+ case kWhatBufferConsumed:
+ {
+ if (!isStaleReply(msg)) {
+ int32_t size;
+ CHECK(msg->findInt32("size", &size));
+ onBufferConsumed(size);
+ }
+ break;
+ }
+
+ default:
+ DecoderBase::onMessageReceived(msg);
+ break;
+ }
+}
+
+} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.h b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.h
new file mode 100644
index 0000000..838c60a
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NUPLAYER2_DECODER_PASS_THROUGH_H_
+
+#define NUPLAYER2_DECODER_PASS_THROUGH_H_
+
+#include "NuPlayer2.h"
+
+#include "NuPlayer2DecoderBase.h"
+
+namespace android {
+
+struct NuPlayer2::DecoderPassThrough : public DecoderBase {
+ DecoderPassThrough(const sp<AMessage> ¬ify,
+ const sp<Source> &source,
+ const sp<Renderer> &renderer);
+
+protected:
+
+ virtual ~DecoderPassThrough();
+
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
+ virtual void onConfigure(const sp<AMessage> &format);
+ virtual void onSetParameters(const sp<AMessage> ¶ms);
+ virtual void onSetRenderer(const sp<Renderer> &renderer);
+ virtual void onResume(bool notifyComplete);
+ virtual void onFlush();
+ virtual void onShutdown(bool notifyComplete);
+ virtual bool doRequestBuffers();
+
+private:
+ enum {
+ kWhatBufferConsumed = 'bufC',
+ };
+
+ sp<Source> mSource;
+ sp<Renderer> mRenderer;
+ int64_t mSkipRenderingUntilMediaTimeUs;
+
+ bool mReachedEOS;
+
+ // Used by feedDecoderInputData to aggregate small buffers into
+ // one large buffer.
+ sp<ABuffer> mPendingAudioAccessUnit;
+ status_t mPendingAudioErr;
+ sp<ABuffer> mAggregateBuffer;
+
+ // mPendingBuffersToDrain are only for debugging. It can be removed
+ // when the power investigation is done.
+ size_t mPendingBuffersToDrain;
+ size_t mCachedBytes;
+ AString mComponentName;
+
+ bool isStaleReply(const sp<AMessage> &msg);
+ bool isDoneFetching() const;
+
+ status_t dequeueAccessUnit(sp<ABuffer> *accessUnit);
+ sp<ABuffer> aggregateBuffer(const sp<ABuffer> &accessUnit);
+ status_t fetchInputData(sp<AMessage> &reply);
+ void doFlush(bool notifyComplete);
+
+ void onInputBufferFetched(const sp<AMessage> &msg);
+ void onBufferConsumed(int32_t size);
+
+ DISALLOW_EVIL_CONSTRUCTORS(DecoderPassThrough);
+};
+
+} // namespace android
+
+#endif // NUPLAYER2_DECODER_PASS_THROUGH_H_
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
new file mode 100644
index 0000000..03d17a5
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
@@ -0,0 +1,1089 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NuPlayer2Driver"
+#include <inttypes.h>
+#include <utils/Log.h>
+#include <cutils/properties.h>
+
+#include "NuPlayer2Driver.h"
+
+#include "NuPlayer2.h"
+#include "NuPlayer2Source.h"
+
+#include <media/DataSourceDesc.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/stagefright/MediaClock.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+
+#include <media/IMediaAnalyticsService.h>
+
+static const int kDumpLockRetries = 50;
+static const int kDumpLockSleepUs = 20000;
+
+namespace android {
+
+struct ParcelWrapper : public RefBase {
+ static sp<ParcelWrapper> Create(const Parcel *p) {
+ if (p != NULL) {
+ sp<ParcelWrapper> pw = new ParcelWrapper();
+ if (pw->appendFrom(p) == OK) {
+ return pw;
+ }
+ }
+ return NULL;
+ }
+
+ const Parcel *getParcel() {
+ return mParcel;
+ }
+
+protected:
+ virtual ~ParcelWrapper() {
+ if (mParcel != NULL) {
+ delete mParcel;
+ }
+ }
+
+private:
+ ParcelWrapper()
+ : mParcel(NULL) { }
+
+ status_t appendFrom(const Parcel *p) {
+ if (mParcel == NULL) {
+ mParcel = new Parcel;
+ }
+ return mParcel->appendFrom(p, 0 /* start */, p->dataSize());
+ }
+
+ Parcel *mParcel;
+};
+
+// key for media statistics
+static const char *kKeyPlayer = "nuplayer";
+// attrs for media statistics
+ // NB: these are matched with public Java API constants defined
+ // in frameworks/base/media/java/android/media/MediaPlayer2.java
+ // These must be kept synchronized with the constants there.
+static const char *kPlayerVMime = "android.media.mediaplayer.video.mime";
+static const char *kPlayerVCodec = "android.media.mediaplayer.video.codec";
+static const char *kPlayerWidth = "android.media.mediaplayer.width";
+static const char *kPlayerHeight = "android.media.mediaplayer.height";
+static const char *kPlayerFrames = "android.media.mediaplayer.frames";
+static const char *kPlayerFramesDropped = "android.media.mediaplayer.dropped";
+static const char *kPlayerAMime = "android.media.mediaplayer.audio.mime";
+static const char *kPlayerACodec = "android.media.mediaplayer.audio.codec";
+static const char *kPlayerDuration = "android.media.mediaplayer.durationMs";
+static const char *kPlayerPlaying = "android.media.mediaplayer.playingMs";
+static const char *kPlayerError = "android.media.mediaplayer.err";
+static const char *kPlayerErrorCode = "android.media.mediaplayer.errcode";
+
+// NB: These are not yet exposed as public Java API constants.
+static const char *kPlayerErrorState = "android.media.mediaplayer.errstate";
+static const char *kPlayerDataSourceType = "android.media.mediaplayer.dataSource";
+//
+static const char *kPlayerRebuffering = "android.media.mediaplayer.rebufferingMs";
+static const char *kPlayerRebufferingCount = "android.media.mediaplayer.rebuffers";
+static const char *kPlayerRebufferingAtExit = "android.media.mediaplayer.rebufferExit";
+
+
+NuPlayer2Driver::NuPlayer2Driver(pid_t pid, uid_t uid)
+ : mState(STATE_IDLE),
+ mAsyncResult(UNKNOWN_ERROR),
+ mSrcId(0),
+ mSetSurfaceInProgress(false),
+ mDurationUs(-1),
+ mPositionUs(-1),
+ mSeekInProgress(false),
+ mPlayingTimeUs(0),
+ mRebufferingTimeUs(0),
+ mRebufferingEvents(0),
+ mRebufferingAtExit(false),
+ mLooper(new ALooper),
+ mNuPlayer2Looper(new ALooper),
+ mMediaClock(new MediaClock),
+ mPlayer(new NuPlayer2(pid, uid, mMediaClock)),
+ mPlayerFlags(0),
+ mAnalyticsItem(NULL),
+ mClientUid(uid),
+ mAtEOS(false),
+ mLooping(false),
+ mAutoLoop(false) {
+ ALOGD("NuPlayer2Driver(%p) created, clientPid(%d)", this, pid);
+ mLooper->setName("NuPlayer2Driver Looper");
+ mNuPlayer2Looper->setName("NuPlayer2 Looper");
+
+ mMediaClock->init();
+
+ // set up an analytics record
+ mAnalyticsItem = new MediaAnalyticsItem(kKeyPlayer);
+ mAnalyticsItem->setUid(mClientUid);
+
+ mNuPlayer2Looper->start(
+ false, /* runOnCallingThread */
+ true, /* canCallJava */
+ PRIORITY_AUDIO);
+
+ mNuPlayer2Looper->registerHandler(mPlayer);
+
+ mPlayer->setDriver(this);
+}
+
+NuPlayer2Driver::~NuPlayer2Driver() {
+ ALOGV("~NuPlayer2Driver(%p)", this);
+ mNuPlayer2Looper->stop();
+ mLooper->stop();
+
+ // finalize any pending metrics, usually a no-op.
+ updateMetrics("destructor");
+ logMetrics("destructor");
+
+ if (mAnalyticsItem != NULL) {
+ delete mAnalyticsItem;
+ mAnalyticsItem = NULL;
+ }
+}
+
+status_t NuPlayer2Driver::initCheck() {
+ mLooper->start(
+ false, /* runOnCallingThread */
+ true, /* canCallJava */
+ PRIORITY_AUDIO);
+
+ mLooper->registerHandler(this);
+ return OK;
+}
+
+status_t NuPlayer2Driver::setDataSource(const sp<DataSourceDesc> &dsd) {
+ ALOGV("setDataSource(%p)", this);
+ Mutex::Autolock autoLock(mLock);
+
+ if (mState != STATE_IDLE) {
+ return INVALID_OPERATION;
+ }
+
+ mSrcId = dsd->mId;
+ mState = STATE_SET_DATASOURCE_PENDING;
+
+ mPlayer->setDataSourceAsync(dsd);
+
+ while (mState == STATE_SET_DATASOURCE_PENDING) {
+ mCondition.wait(mLock);
+ }
+
+ return mAsyncResult;
+}
+
+status_t NuPlayer2Driver::prepareNextDataSource(const sp<DataSourceDesc> &dsd) {
+ ALOGV("prepareNextDataSource(%p)", this);
+ Mutex::Autolock autoLock(mLock);
+
+ mPlayer->prepareNextDataSourceAsync(dsd);
+
+ return OK;
+}
+
+status_t NuPlayer2Driver::playNextDataSource(int64_t srcId) {
+ ALOGV("playNextDataSource(%p)", this);
+ Mutex::Autolock autoLock(mLock);
+
+ mSrcId = srcId;
+ mPlayer->playNextDataSource(srcId);
+
+ return OK;
+}
+
+status_t NuPlayer2Driver::setVideoSurfaceTexture(const sp<ANativeWindowWrapper> &nww) {
+ ALOGV("setVideoSurfaceTexture(%p)", this);
+ Mutex::Autolock autoLock(mLock);
+
+ if (mSetSurfaceInProgress) {
+ return INVALID_OPERATION;
+ }
+
+ switch (mState) {
+ case STATE_SET_DATASOURCE_PENDING:
+ case STATE_RESET_IN_PROGRESS:
+ return INVALID_OPERATION;
+
+ default:
+ break;
+ }
+
+ mSetSurfaceInProgress = true;
+
+ mPlayer->setVideoSurfaceTextureAsync(nww);
+
+ while (mSetSurfaceInProgress) {
+ mCondition.wait(mLock);
+ }
+
+ return OK;
+}
+
+status_t NuPlayer2Driver::getBufferingSettings(BufferingSettings* buffering) {
+ ALOGV("getBufferingSettings(%p)", this);
+ {
+ Mutex::Autolock autoLock(mLock);
+ if (mState == STATE_IDLE) {
+ return INVALID_OPERATION;
+ }
+ }
+
+ return mPlayer->getBufferingSettings(buffering);
+}
+
+status_t NuPlayer2Driver::setBufferingSettings(const BufferingSettings& buffering) {
+ ALOGV("setBufferingSettings(%p)", this);
+ {
+ Mutex::Autolock autoLock(mLock);
+ if (mState == STATE_IDLE) {
+ return INVALID_OPERATION;
+ }
+ }
+
+ return mPlayer->setBufferingSettings(buffering);
+}
+
+status_t NuPlayer2Driver::prepareAsync() {
+ ALOGV("prepareAsync(%p)", this);
+ Mutex::Autolock autoLock(mLock);
+
+ switch (mState) {
+ case STATE_UNPREPARED:
+ mState = STATE_PREPARING;
+ mPlayer->prepareAsync();
+ return OK;
+ case STATE_STOPPED:
+ // this is really just paused. handle as seek to start
+ mAtEOS = false;
+ mState = STATE_STOPPED_AND_PREPARING;
+ mPlayer->seekToAsync(0, MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC /* mode */,
+ true /* needNotify */);
+ return OK;
+ default:
+ return INVALID_OPERATION;
+ };
+}
+
+status_t NuPlayer2Driver::start() {
+ ALOGD("start(%p), state is %d, eos is %d", this, mState, mAtEOS);
+ Mutex::Autolock autoLock(mLock);
+ return start_l();
+}
+
+status_t NuPlayer2Driver::start_l() {
+ switch (mState) {
+ case STATE_PAUSED:
+ case STATE_STOPPED_AND_PREPARED:
+ case STATE_PREPARED:
+ {
+ mPlayer->start();
+
+ // fall through
+ }
+
+ case STATE_RUNNING:
+ {
+ if (mAtEOS) {
+ mPlayer->seekToAsync(0);
+ mAtEOS = false;
+ mPositionUs = -1;
+ }
+ break;
+ }
+
+ default:
+ return INVALID_OPERATION;
+ }
+
+ mState = STATE_RUNNING;
+
+ return OK;
+}
+
+status_t NuPlayer2Driver::stop() {
+ ALOGD("stop(%p)", this);
+ Mutex::Autolock autoLock(mLock);
+
+ switch (mState) {
+ case STATE_RUNNING:
+ mPlayer->pause();
+ // fall through
+
+ case STATE_PAUSED:
+ mState = STATE_STOPPED;
+ //notifyListener_l(MEDIA2_STOPPED);
+ break;
+
+ case STATE_PREPARED:
+ case STATE_STOPPED:
+ case STATE_STOPPED_AND_PREPARING:
+ case STATE_STOPPED_AND_PREPARED:
+ mState = STATE_STOPPED;
+ break;
+
+ default:
+ return INVALID_OPERATION;
+ }
+
+ return OK;
+}
+
+status_t NuPlayer2Driver::pause() {
+ ALOGD("pause(%p)", this);
+ // The NuPlayerRenderer may get flushed if pause for long enough, e.g. the pause timeout tear
+ // down for audio offload mode. If that happens, the NuPlayerRenderer will no longer know the
+ // current position. So similar to seekTo, update |mPositionUs| to the pause position by calling
+ // getCurrentPosition here.
+ int64_t unused;
+ getCurrentPosition(&unused);
+
+ Mutex::Autolock autoLock(mLock);
+
+ switch (mState) {
+ case STATE_PAUSED:
+ case STATE_PREPARED:
+ return OK;
+
+ case STATE_RUNNING:
+ mState = STATE_PAUSED;
+ mPlayer->pause();
+ break;
+
+ default:
+ return INVALID_OPERATION;
+ }
+
+ return OK;
+}
+
+bool NuPlayer2Driver::isPlaying() {
+ return mState == STATE_RUNNING && !mAtEOS;
+}
+
+status_t NuPlayer2Driver::setPlaybackSettings(const AudioPlaybackRate &rate) {
+ status_t err = mPlayer->setPlaybackSettings(rate);
+ if (err == OK) {
+ // try to update position
+ int64_t unused;
+ getCurrentPosition(&unused);
+ Mutex::Autolock autoLock(mLock);
+ if (rate.mSpeed == 0.f && mState == STATE_RUNNING) {
+ mState = STATE_PAUSED;
+ } else if (rate.mSpeed != 0.f
+ && (mState == STATE_PAUSED
+ || mState == STATE_STOPPED_AND_PREPARED
+ || mState == STATE_PREPARED)) {
+ err = start_l();
+ }
+ }
+ return err;
+}
+
+status_t NuPlayer2Driver::getPlaybackSettings(AudioPlaybackRate *rate) {
+ return mPlayer->getPlaybackSettings(rate);
+}
+
+status_t NuPlayer2Driver::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
+ return mPlayer->setSyncSettings(sync, videoFpsHint);
+}
+
+status_t NuPlayer2Driver::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
+ return mPlayer->getSyncSettings(sync, videoFps);
+}
+
+status_t NuPlayer2Driver::seekTo(int64_t msec, MediaPlayer2SeekMode mode) {
+ ALOGD("seekTo(%p) (%lld ms, %d) at state %d", this, (long long)msec, mode, mState);
+ Mutex::Autolock autoLock(mLock);
+
+ int64_t seekTimeUs = msec * 1000ll;
+
+ switch (mState) {
+ case STATE_PREPARED:
+ case STATE_STOPPED_AND_PREPARED:
+ case STATE_PAUSED:
+ case STATE_RUNNING:
+ {
+ mAtEOS = false;
+ mSeekInProgress = true;
+ mPlayer->seekToAsync(seekTimeUs, mode, true /* needNotify */);
+ break;
+ }
+
+ default:
+ return INVALID_OPERATION;
+ }
+
+ mPositionUs = seekTimeUs;
+ return OK;
+}
+
+status_t NuPlayer2Driver::getCurrentPosition(int64_t *msec) {
+ int64_t tempUs = 0;
+ {
+ Mutex::Autolock autoLock(mLock);
+ if (mSeekInProgress || (mState == STATE_PAUSED && !mAtEOS)) {
+ tempUs = (mPositionUs <= 0) ? 0 : mPositionUs;
+ *msec = divRound(tempUs, (int64_t)(1000));
+ return OK;
+ }
+ }
+
+ status_t ret = mPlayer->getCurrentPosition(&tempUs);
+
+ Mutex::Autolock autoLock(mLock);
+ // We need to check mSeekInProgress here because mPlayer->seekToAsync is an async call, which
+ // means getCurrentPosition can be called before seek is completed. Iow, renderer may return a
+ // position value that's different the seek to position.
+ if (ret != OK) {
+ tempUs = (mPositionUs <= 0) ? 0 : mPositionUs;
+ } else {
+ mPositionUs = tempUs;
+ }
+ *msec = divRound(tempUs, (int64_t)(1000));
+ return OK;
+}
+
+status_t NuPlayer2Driver::getDuration(int64_t *msec) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mDurationUs < 0) {
+ return UNKNOWN_ERROR;
+ }
+
+ *msec = (mDurationUs + 500ll) / 1000;
+
+ return OK;
+}
+
+void NuPlayer2Driver::updateMetrics(const char *where) {
+ if (where == NULL) {
+ where = "unknown";
+ }
+ ALOGV("updateMetrics(%p) from %s at state %d", this, where, mState);
+
+ // gather the final stats for this record
+ Vector<sp<AMessage>> trackStats;
+ mPlayer->getStats(&trackStats);
+
+ if (trackStats.size() > 0) {
+ for (size_t i = 0; i < trackStats.size(); ++i) {
+ const sp<AMessage> &stats = trackStats.itemAt(i);
+
+ AString mime;
+ stats->findString("mime", &mime);
+
+ AString name;
+ stats->findString("component-name", &name);
+
+ if (mime.startsWith("video/")) {
+ int32_t width, height;
+ mAnalyticsItem->setCString(kPlayerVMime, mime.c_str());
+ if (!name.empty()) {
+ mAnalyticsItem->setCString(kPlayerVCodec, name.c_str());
+ }
+
+ if (stats->findInt32("width", &width)
+ && stats->findInt32("height", &height)) {
+ mAnalyticsItem->setInt32(kPlayerWidth, width);
+ mAnalyticsItem->setInt32(kPlayerHeight, height);
+ }
+
+ int64_t numFramesTotal = 0;
+ int64_t numFramesDropped = 0;
+ stats->findInt64("frames-total", &numFramesTotal);
+ stats->findInt64("frames-dropped-output", &numFramesDropped);
+
+ mAnalyticsItem->setInt64(kPlayerFrames, numFramesTotal);
+ mAnalyticsItem->setInt64(kPlayerFramesDropped, numFramesDropped);
+
+
+ } else if (mime.startsWith("audio/")) {
+ mAnalyticsItem->setCString(kPlayerAMime, mime.c_str());
+ if (!name.empty()) {
+ mAnalyticsItem->setCString(kPlayerACodec, name.c_str());
+ }
+ }
+ }
+ }
+
+ // always provide duration and playing time, even if they have 0/unknown values.
+
+ // getDuration() uses mLock for mutex -- careful where we use it.
+ int64_t duration_ms = -1;
+ getDuration(&duration_ms);
+ mAnalyticsItem->setInt64(kPlayerDuration, duration_ms);
+
+ mAnalyticsItem->setInt64(kPlayerPlaying, (mPlayingTimeUs+500)/1000 );
+
+ if (mRebufferingEvents != 0) {
+ mAnalyticsItem->setInt64(kPlayerRebuffering, (mRebufferingTimeUs+500)/1000 );
+ mAnalyticsItem->setInt32(kPlayerRebufferingCount, mRebufferingEvents);
+ mAnalyticsItem->setInt32(kPlayerRebufferingAtExit, mRebufferingAtExit);
+ }
+
+ mAnalyticsItem->setCString(kPlayerDataSourceType, mPlayer->getDataSourceType());
+}
+
+
+void NuPlayer2Driver::logMetrics(const char *where) {
+ if (where == NULL) {
+ where = "unknown";
+ }
+ ALOGV("logMetrics(%p) from %s at state %d", this, where, mState);
+
+ if (mAnalyticsItem == NULL || mAnalyticsItem->isEnabled() == false) {
+ return;
+ }
+
+ // log only non-empty records
+ // we always updateMetrics() before we get here
+ // and that always injects 3 fields (duration, playing time, and
+ // datasource) into the record.
+ // So the canonical "empty" record has 3 elements in it.
+ if (mAnalyticsItem->count() > 3) {
+
+ mAnalyticsItem->selfrecord();
+
+ // re-init in case we prepare() and start() again.
+ delete mAnalyticsItem ;
+ mAnalyticsItem = new MediaAnalyticsItem(kKeyPlayer);
+ if (mAnalyticsItem) {
+ mAnalyticsItem->setUid(mClientUid);
+ }
+ } else {
+ ALOGV("did not have anything to record");
+ }
+}
+
+status_t NuPlayer2Driver::reset() {
+ ALOGD("reset(%p) at state %d", this, mState);
+
+ updateMetrics("reset");
+ logMetrics("reset");
+
+ Mutex::Autolock autoLock(mLock);
+
+ switch (mState) {
+ case STATE_IDLE:
+ return OK;
+
+ case STATE_SET_DATASOURCE_PENDING:
+ case STATE_RESET_IN_PROGRESS:
+ return INVALID_OPERATION;
+
+ case STATE_PREPARING:
+ {
+ notifyListener_l(mSrcId, MEDIA2_PREPARED);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ if (mState != STATE_STOPPED) {
+ // notifyListener_l(MEDIA2_STOPPED);
+ }
+
+ mState = STATE_RESET_IN_PROGRESS;
+ mPlayer->resetAsync();
+
+ while (mState == STATE_RESET_IN_PROGRESS) {
+ mCondition.wait(mLock);
+ }
+
+ mDurationUs = -1;
+ mPositionUs = -1;
+ mLooping = false;
+ mPlayingTimeUs = 0;
+ mRebufferingTimeUs = 0;
+ mRebufferingEvents = 0;
+ mRebufferingAtExit = false;
+
+ return OK;
+}
+
+status_t NuPlayer2Driver::notifyAt(int64_t mediaTimeUs) {
+ ALOGV("notifyAt(%p), time:%lld", this, (long long)mediaTimeUs);
+ return mPlayer->notifyAt(mediaTimeUs);
+}
+
+status_t NuPlayer2Driver::setLooping(int loop) {
+ mLooping = loop != 0;
+ return OK;
+}
+
+status_t NuPlayer2Driver::invoke(const Parcel &request, Parcel *reply) {
+ if (reply == NULL) {
+ ALOGE("reply is a NULL pointer");
+ return BAD_VALUE;
+ }
+
+ int32_t methodId;
+ status_t ret = request.readInt32(&methodId);
+ if (ret != OK) {
+ ALOGE("Failed to retrieve the requested method to invoke, err(%d)", ret);
+ return ret;
+ }
+
+ switch (methodId) {
+ case MEDIA_PLAYER2_INVOKE_ID_SET_VIDEO_SCALING_MODE:
+ {
+ int mode = request.readInt32();
+ return mPlayer->setVideoScalingMode(mode);
+ }
+
+ case MEDIA_PLAYER2_INVOKE_ID_GET_TRACK_INFO:
+ {
+ return mPlayer->getTrackInfo(reply);
+ }
+
+ case MEDIA_PLAYER2_INVOKE_ID_SELECT_TRACK:
+ {
+ int trackIndex = request.readInt32();
+ int64_t msec = 0;
+ // getCurrentPosition should always return OK
+ getCurrentPosition(&msec);
+ return mPlayer->selectTrack(trackIndex, true /* select */, msec * 1000ll);
+ }
+
+ case MEDIA_PLAYER2_INVOKE_ID_UNSELECT_TRACK:
+ {
+ int trackIndex = request.readInt32();
+ return mPlayer->selectTrack(trackIndex, false /* select */, 0xdeadbeef /* not used */);
+ }
+
+ case MEDIA_PLAYER2_INVOKE_ID_GET_SELECTED_TRACK:
+ {
+ int32_t type = request.readInt32();
+ return mPlayer->getSelectedTrack(type, reply);
+ }
+
+ default:
+ {
+ return INVALID_OPERATION;
+ }
+ }
+}
+
+void NuPlayer2Driver::setAudioSink(const sp<AudioSink> &audioSink) {
+ mPlayer->setAudioSink(audioSink);
+ mAudioSink = audioSink;
+}
+
+status_t NuPlayer2Driver::setParameter(
+ int /* key */, const Parcel & /* request */) {
+ return INVALID_OPERATION;
+}
+
+status_t NuPlayer2Driver::getParameter(int key, Parcel *reply) {
+
+ if (key == FOURCC('m','t','r','X')) {
+ // mtrX -- a play on 'metrics' (not matrix)
+ // gather current info all together, parcel it, and send it back
+ updateMetrics("api");
+ mAnalyticsItem->writeToParcel(reply);
+ return OK;
+ }
+
+ return INVALID_OPERATION;
+}
+
+status_t NuPlayer2Driver::getMetadata(
+ const media::Metadata::Filter& /* ids */, Parcel *records) {
+ Mutex::Autolock autoLock(mLock);
+
+ using media::Metadata;
+
+ Metadata meta(records);
+
+ meta.appendBool(
+ Metadata::kPauseAvailable,
+ mPlayerFlags & NuPlayer2::Source::FLAG_CAN_PAUSE);
+
+ meta.appendBool(
+ Metadata::kSeekBackwardAvailable,
+ mPlayerFlags & NuPlayer2::Source::FLAG_CAN_SEEK_BACKWARD);
+
+ meta.appendBool(
+ Metadata::kSeekForwardAvailable,
+ mPlayerFlags & NuPlayer2::Source::FLAG_CAN_SEEK_FORWARD);
+
+ meta.appendBool(
+ Metadata::kSeekAvailable,
+ mPlayerFlags & NuPlayer2::Source::FLAG_CAN_SEEK);
+
+ return OK;
+}
+
+void NuPlayer2Driver::notifyResetComplete(int64_t /* srcId */) {
+ ALOGD("notifyResetComplete(%p)", this);
+ Mutex::Autolock autoLock(mLock);
+
+ CHECK_EQ(mState, STATE_RESET_IN_PROGRESS);
+ mState = STATE_IDLE;
+ mCondition.broadcast();
+}
+
+void NuPlayer2Driver::notifySetSurfaceComplete(int64_t /* srcId */) {
+ ALOGV("notifySetSurfaceComplete(%p)", this);
+ Mutex::Autolock autoLock(mLock);
+
+ CHECK(mSetSurfaceInProgress);
+ mSetSurfaceInProgress = false;
+
+ mCondition.broadcast();
+}
+
+void NuPlayer2Driver::notifyDuration(int64_t /* srcId */, int64_t durationUs) {
+ Mutex::Autolock autoLock(mLock);
+ mDurationUs = durationUs;
+}
+
+void NuPlayer2Driver::notifyMorePlayingTimeUs(int64_t /* srcId */, int64_t playingUs) {
+ Mutex::Autolock autoLock(mLock);
+ mPlayingTimeUs += playingUs;
+}
+
+void NuPlayer2Driver::notifyMoreRebufferingTimeUs(int64_t /* srcId */, int64_t rebufferingUs) {
+ Mutex::Autolock autoLock(mLock);
+ mRebufferingTimeUs += rebufferingUs;
+ mRebufferingEvents++;
+}
+
+void NuPlayer2Driver::notifyRebufferingWhenExit(int64_t /* srcId */, bool status) {
+ Mutex::Autolock autoLock(mLock);
+ mRebufferingAtExit = status;
+}
+
+void NuPlayer2Driver::notifySeekComplete(int64_t srcId) {
+ ALOGV("notifySeekComplete(%p)", this);
+ Mutex::Autolock autoLock(mLock);
+ mSeekInProgress = false;
+ notifySeekComplete_l(srcId);
+}
+
+void NuPlayer2Driver::notifySeekComplete_l(int64_t srcId) {
+ bool wasSeeking = true;
+ if (mState == STATE_STOPPED_AND_PREPARING) {
+ wasSeeking = false;
+ mState = STATE_STOPPED_AND_PREPARED;
+ mCondition.broadcast();
+ } else if (mState == STATE_STOPPED) {
+ // no need to notify listener
+ return;
+ }
+ notifyListener_l(srcId, wasSeeking ? MEDIA2_SEEK_COMPLETE : MEDIA2_PREPARED);
+}
+
+status_t NuPlayer2Driver::dump(
+ int fd, const Vector<String16> & /* args */) const {
+
+ Vector<sp<AMessage> > trackStats;
+ mPlayer->getStats(&trackStats);
+
+ AString logString(" NuPlayer2\n");
+ char buf[256] = {0};
+
+ bool locked = false;
+ for (int i = 0; i < kDumpLockRetries; ++i) {
+ if (mLock.tryLock() == NO_ERROR) {
+ locked = true;
+ break;
+ }
+ usleep(kDumpLockSleepUs);
+ }
+
+ if (locked) {
+ snprintf(buf, sizeof(buf), " state(%d), atEOS(%d), looping(%d), autoLoop(%d)\n",
+ mState, mAtEOS, mLooping, mAutoLoop);
+ mLock.unlock();
+ } else {
+ snprintf(buf, sizeof(buf), " NPD(%p) lock is taken\n", this);
+ }
+ logString.append(buf);
+
+ for (size_t i = 0; i < trackStats.size(); ++i) {
+ const sp<AMessage> &stats = trackStats.itemAt(i);
+
+ AString mime;
+ if (stats->findString("mime", &mime)) {
+ snprintf(buf, sizeof(buf), " mime(%s)\n", mime.c_str());
+ logString.append(buf);
+ }
+
+ AString name;
+ if (stats->findString("component-name", &name)) {
+ snprintf(buf, sizeof(buf), " decoder(%s)\n", name.c_str());
+ logString.append(buf);
+ }
+
+ if (mime.startsWith("video/")) {
+ int32_t width, height;
+ if (stats->findInt32("width", &width)
+ && stats->findInt32("height", &height)) {
+ snprintf(buf, sizeof(buf), " resolution(%d x %d)\n", width, height);
+ logString.append(buf);
+ }
+
+ int64_t numFramesTotal = 0;
+ int64_t numFramesDropped = 0;
+
+ stats->findInt64("frames-total", &numFramesTotal);
+ stats->findInt64("frames-dropped-output", &numFramesDropped);
+ snprintf(buf, sizeof(buf), " numFramesTotal(%lld), numFramesDropped(%lld), "
+ "percentageDropped(%.2f%%)\n",
+ (long long)numFramesTotal,
+ (long long)numFramesDropped,
+ numFramesTotal == 0
+ ? 0.0 : (double)(numFramesDropped * 100) / numFramesTotal);
+ logString.append(buf);
+ }
+ }
+
+ ALOGI("%s", logString.c_str());
+
+ if (fd >= 0) {
+ FILE *out = fdopen(dup(fd), "w");
+ fprintf(out, "%s", logString.c_str());
+ fclose(out);
+ out = NULL;
+ }
+
+ return OK;
+}
+
+void NuPlayer2Driver::onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatNotifyListener: {
+ int64_t srcId;
+ int32_t msgId;
+ int32_t ext1 = 0;
+ int32_t ext2 = 0;
+ CHECK(msg->findInt64("srcId", &srcId));
+ CHECK(msg->findInt32("messageId", &msgId));
+ msg->findInt32("ext1", &ext1);
+ msg->findInt32("ext2", &ext2);
+ sp<ParcelWrapper> in;
+ sp<RefBase> obj;
+ if (msg->findObject("parcel", &obj) && obj != NULL) {
+ in = static_cast<ParcelWrapper *>(obj.get());
+ }
+ sendEvent(srcId, msgId, ext1, ext2, (in == NULL ? NULL : in->getParcel()));
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+void NuPlayer2Driver::notifyListener(
+ int64_t srcId, int msg, int ext1, int ext2, const Parcel *in) {
+ Mutex::Autolock autoLock(mLock);
+ notifyListener_l(srcId, msg, ext1, ext2, in);
+}
+
+void NuPlayer2Driver::notifyListener_l(
+ int64_t srcId, int msg, int ext1, int ext2, const Parcel *in) {
+ ALOGD("notifyListener_l(%p), (%lld, %d, %d, %d, %d), loop setting(%d, %d)",
+ this, (long long)srcId, msg, ext1, ext2,
+ (in == NULL ? -1 : (int)in->dataSize()), mAutoLoop, mLooping);
+ if (srcId == mSrcId) {
+ switch (msg) {
+ case MEDIA2_PLAYBACK_COMPLETE:
+ {
+ if (mState != STATE_RESET_IN_PROGRESS) {
+ if (mAutoLoop) {
+ audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
+ if (mAudioSink != NULL) {
+ streamType = mAudioSink->getAudioStreamType();
+ }
+ if (streamType == AUDIO_STREAM_NOTIFICATION) {
+ ALOGW("disabling auto-loop for notification");
+ mAutoLoop = false;
+ }
+ }
+ if (mLooping || mAutoLoop) {
+ mPlayer->seekToAsync(0);
+ if (mAudioSink != NULL) {
+ // The renderer has stopped the sink at the end in order to play out
+ // the last little bit of audio. In looping mode, we need to restart it.
+ mAudioSink->start();
+ }
+ // don't send completion event when looping
+ return;
+ }
+ if (property_get_bool("persist.debug.sf.stats", false)) {
+ Vector<String16> args;
+ dump(-1, args);
+ }
+ mPlayer->pause();
+ mState = STATE_PAUSED;
+ }
+ // fall through
+ }
+
+ case MEDIA2_ERROR:
+ {
+ // when we have an error, add it to the analytics for this playback.
+ // ext1 is our primary 'error type' value. Only add ext2 when non-zero.
+ // [test against msg is due to fall through from previous switch value]
+ if (msg == MEDIA2_ERROR) {
+ mAnalyticsItem->setInt32(kPlayerError, ext1);
+ if (ext2 != 0) {
+ mAnalyticsItem->setInt32(kPlayerErrorCode, ext2);
+ }
+ mAnalyticsItem->setCString(kPlayerErrorState, stateString(mState).c_str());
+ }
+ mAtEOS = true;
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ sp<AMessage> notify = new AMessage(kWhatNotifyListener, this);
+ notify->setInt64("srcId", srcId);
+ notify->setInt32("messageId", msg);
+ notify->setInt32("ext1", ext1);
+ notify->setInt32("ext2", ext2);
+ notify->setObject("parcel", ParcelWrapper::Create(in));
+ notify->post();
+}
+
+void NuPlayer2Driver::notifySetDataSourceCompleted(int64_t /* srcId */, status_t err) {
+ Mutex::Autolock autoLock(mLock);
+
+ CHECK_EQ(mState, STATE_SET_DATASOURCE_PENDING);
+
+ mAsyncResult = err;
+ mState = (err == OK) ? STATE_UNPREPARED : STATE_IDLE;
+ mCondition.broadcast();
+}
+
+void NuPlayer2Driver::notifyPrepareCompleted(int64_t srcId, status_t err) {
+ ALOGV("notifyPrepareCompleted %d", err);
+
+ Mutex::Autolock autoLock(mLock);
+
+ if (srcId != mSrcId) {
+ if (err == OK) {
+ notifyListener_l(srcId, MEDIA2_PREPARED);
+ } else {
+ notifyListener_l(srcId, MEDIA2_ERROR, MEDIA2_ERROR_UNKNOWN, err);
+ }
+ return;
+ }
+
+ if (mState != STATE_PREPARING) {
+ // We were preparing asynchronously when the client called
+ // reset(), we sent a premature "prepared" notification and
+ // then initiated the reset. This notification is stale.
+ CHECK(mState == STATE_RESET_IN_PROGRESS || mState == STATE_IDLE);
+ return;
+ }
+
+ CHECK_EQ(mState, STATE_PREPARING);
+
+ mAsyncResult = err;
+
+ if (err == OK) {
+ // update state before notifying client, so that if client calls back into NuPlayer2Driver
+ // in response, NuPlayer2Driver has the right state
+ mState = STATE_PREPARED;
+ notifyListener_l(srcId, MEDIA2_PREPARED);
+ } else {
+ mState = STATE_UNPREPARED;
+ notifyListener_l(srcId, MEDIA2_ERROR, MEDIA2_ERROR_UNKNOWN, err);
+ }
+
+ sp<MetaData> meta = mPlayer->getFileMeta();
+ int32_t loop;
+ if (meta != NULL
+ && meta->findInt32(kKeyAutoLoop, &loop) && loop != 0) {
+ mAutoLoop = true;
+ }
+
+ mCondition.broadcast();
+}
+
+void NuPlayer2Driver::notifyFlagsChanged(int64_t /* srcId */, uint32_t flags) {
+ Mutex::Autolock autoLock(mLock);
+
+ mPlayerFlags = flags;
+}
+
+// Modular DRM
+status_t NuPlayer2Driver::prepareDrm(const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId)
+{
+ ALOGV("prepareDrm(%p) state: %d", this, mState);
+
+ // leaving the state verification for mediaplayer.cpp
+ status_t ret = mPlayer->prepareDrm(uuid, drmSessionId);
+
+ ALOGV("prepareDrm ret: %d", ret);
+
+ return ret;
+}
+
+status_t NuPlayer2Driver::releaseDrm()
+{
+ ALOGV("releaseDrm(%p) state: %d", this, mState);
+
+ // leaving the state verification for mediaplayer.cpp
+ status_t ret = mPlayer->releaseDrm();
+
+ ALOGV("releaseDrm ret: %d", ret);
+
+ return ret;
+}
+
+std::string NuPlayer2Driver::stateString(State state) {
+ const char *rval = NULL;
+ char rawbuffer[16]; // allows "%d"
+
+ switch (state) {
+ case STATE_IDLE: rval = "IDLE"; break;
+ case STATE_SET_DATASOURCE_PENDING: rval = "SET_DATASOURCE_PENDING"; break;
+ case STATE_UNPREPARED: rval = "UNPREPARED"; break;
+ case STATE_PREPARING: rval = "PREPARING"; break;
+ case STATE_PREPARED: rval = "PREPARED"; break;
+ case STATE_RUNNING: rval = "RUNNING"; break;
+ case STATE_PAUSED: rval = "PAUSED"; break;
+ case STATE_RESET_IN_PROGRESS: rval = "RESET_IN_PROGRESS"; break;
+ case STATE_STOPPED: rval = "STOPPED"; break;
+ case STATE_STOPPED_AND_PREPARING: rval = "STOPPED_AND_PREPARING"; break;
+ case STATE_STOPPED_AND_PREPARED: rval = "STOPPED_AND_PREPARED"; break;
+ default:
+ // yes, this buffer is shared and vulnerable to races
+ snprintf(rawbuffer, sizeof(rawbuffer), "%d", state);
+ rval = rawbuffer;
+ break;
+ }
+
+ return rval;
+}
+
+} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
new file mode 100644
index 0000000..4da2566
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <mediaplayer2/MediaPlayer2Interface.h>
+
+#include <media/MediaAnalyticsItem.h>
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+struct ALooper;
+struct MediaClock;
+struct NuPlayer2;
+
+struct NuPlayer2Driver : public MediaPlayer2Interface {
+ explicit NuPlayer2Driver(pid_t pid, uid_t uid);
+
+ virtual status_t initCheck() override;
+
+ virtual status_t setDataSource(const sp<DataSourceDesc> &dsd) override;
+ virtual status_t prepareNextDataSource(const sp<DataSourceDesc> &dsd) override;
+ virtual status_t playNextDataSource(int64_t srcId) override;
+
+ virtual status_t setVideoSurfaceTexture(const sp<ANativeWindowWrapper> &nww) override;
+
+ virtual status_t getBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) override;
+ virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
+ virtual status_t prepareAsync();
+ virtual status_t start();
+ virtual status_t stop();
+ virtual status_t pause();
+ virtual bool isPlaying();
+ virtual status_t setPlaybackSettings(const AudioPlaybackRate &rate);
+ virtual status_t getPlaybackSettings(AudioPlaybackRate *rate);
+ virtual status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint);
+ virtual status_t getSyncSettings(AVSyncSettings *sync, float *videoFps);
+ virtual status_t seekTo(
+ int64_t msec, MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC);
+ virtual status_t getCurrentPosition(int64_t *msec);
+ virtual status_t getDuration(int64_t *msec);
+ virtual status_t reset();
+ virtual status_t notifyAt(int64_t mediaTimeUs) override;
+ virtual status_t setLooping(int loop);
+ virtual status_t invoke(const Parcel &request, Parcel *reply);
+ virtual void setAudioSink(const sp<AudioSink> &audioSink);
+ virtual status_t setParameter(int key, const Parcel &request);
+ virtual status_t getParameter(int key, Parcel *reply);
+
+ virtual status_t getMetadata(
+ const media::Metadata::Filter& ids, Parcel *records);
+
+ virtual status_t dump(int fd, const Vector<String16> &args) const;
+
+ virtual void onMessageReceived(const sp<AMessage> &msg) override;
+
+ void notifySetDataSourceCompleted(int64_t srcId, status_t err);
+ void notifyPrepareCompleted(int64_t srcId, status_t err);
+ void notifyResetComplete(int64_t srcId);
+ void notifySetSurfaceComplete(int64_t srcId);
+ void notifyDuration(int64_t srcId, int64_t durationUs);
+ void notifyMorePlayingTimeUs(int64_t srcId, int64_t timeUs);
+ void notifyMoreRebufferingTimeUs(int64_t srcId, int64_t timeUs);
+ void notifyRebufferingWhenExit(int64_t srcId, bool status);
+ void notifySeekComplete(int64_t srcId);
+ void notifySeekComplete_l(int64_t srcId);
+ void notifyListener(int64_t srcId, int msg, int ext1 = 0, int ext2 = 0,
+ const Parcel *in = NULL);
+ void notifyFlagsChanged(int64_t srcId, uint32_t flags);
+
+ // Modular DRM
+ virtual status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId);
+ virtual status_t releaseDrm();
+
+protected:
+ virtual ~NuPlayer2Driver();
+
+private:
+ enum State {
+ STATE_IDLE,
+ STATE_SET_DATASOURCE_PENDING,
+ STATE_UNPREPARED,
+ STATE_PREPARING,
+ STATE_PREPARED,
+ STATE_RUNNING,
+ STATE_PAUSED,
+ STATE_RESET_IN_PROGRESS,
+ STATE_STOPPED, // equivalent to PAUSED
+ STATE_STOPPED_AND_PREPARING, // equivalent to PAUSED, but seeking
+ STATE_STOPPED_AND_PREPARED, // equivalent to PAUSED, but seek complete
+ };
+
+ std::string stateString(State state);
+
+ enum {
+ kWhatNotifyListener,
+ };
+
+ mutable Mutex mLock;
+ Condition mCondition;
+
+ State mState;
+
+ status_t mAsyncResult;
+
+ // The following are protected through "mLock"
+ // >>>
+ int64_t mSrcId;
+ bool mSetSurfaceInProgress;
+ int64_t mDurationUs;
+ int64_t mPositionUs;
+ bool mSeekInProgress;
+ int64_t mPlayingTimeUs;
+ int64_t mRebufferingTimeUs;
+ int32_t mRebufferingEvents;
+ bool mRebufferingAtExit;
+ // <<<
+
+ sp<ALooper> mLooper;
+ sp<ALooper> mNuPlayer2Looper;
+ const sp<MediaClock> mMediaClock;
+ const sp<NuPlayer2> mPlayer;
+ sp<AudioSink> mAudioSink;
+ uint32_t mPlayerFlags;
+
+ MediaAnalyticsItem *mAnalyticsItem;
+ uid_t mClientUid;
+
+ bool mAtEOS;
+ bool mLooping;
+ bool mAutoLoop;
+
+ void updateMetrics(const char *where);
+ void logMetrics(const char *where);
+
+ status_t start_l();
+ void notifyListener_l(int64_t srcId, int msg, int ext1 = 0, int ext2 = 0,
+ const Parcel *in = NULL);
+
+ DISALLOW_EVIL_CONSTRUCTORS(NuPlayer2Driver);
+};
+
+} // namespace android
+
+
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Drm.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Drm.cpp
new file mode 100644
index 0000000..4853ae1
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Drm.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NuPlayer2Drm"
+
+#include "NuPlayer2Drm.h"
+
+#include <media/NdkWrapper.h>
+#include <utils/Log.h>
+#include <sstream>
+
+namespace android {
+
+Vector<DrmUUID> NuPlayer2Drm::parsePSSH(const void *pssh, size_t psshsize)
+{
+ Vector<DrmUUID> drmSchemes, empty;
+ const int DATALEN_SIZE = 4;
+
+ // the format of the buffer is 1 or more of:
+ // {
+ // 16 byte uuid
+ // 4 byte data length N
+ // N bytes of data
+ // }
+ // Determine the number of entries in the source data.
+ // Since we got the data from stagefright, we trust it is valid and properly formatted.
+
+ const uint8_t *data = (const uint8_t*)pssh;
+ size_t len = psshsize;
+ size_t numentries = 0;
+ while (len > 0) {
+ if (len < DrmUUID::UUID_SIZE) {
+ ALOGE("ParsePSSH: invalid PSSH data");
+ return empty;
+ }
+
+ const uint8_t *uuidPtr = data;
+
+ // skip uuid
+ data += DrmUUID::UUID_SIZE;
+ len -= DrmUUID::UUID_SIZE;
+
+ // get data length
+ if (len < DATALEN_SIZE) {
+ ALOGE("ParsePSSH: invalid PSSH data");
+ return empty;
+ }
+
+ uint32_t datalen = *((uint32_t*)data);
+ data += DATALEN_SIZE;
+ len -= DATALEN_SIZE;
+
+ if (len < datalen) {
+ ALOGE("ParsePSSH: invalid PSSH data");
+ return empty;
+ }
+
+ // skip the data
+ data += datalen;
+ len -= datalen;
+
+ DrmUUID _uuid(uuidPtr);
+ drmSchemes.add(_uuid);
+
+ ALOGV("ParsePSSH[%zu]: %s: %s", numentries,
+ _uuid.toHexString().string(),
+ DrmUUID::arrayToHex(data, datalen).string()
+ );
+
+ numentries++;
+ }
+
+ return drmSchemes;
+}
+
+Vector<DrmUUID> NuPlayer2Drm::getSupportedDrmSchemes(const void *pssh, size_t psshsize)
+{
+ Vector<DrmUUID> psshDRMs = parsePSSH(pssh, psshsize);
+
+ Vector<DrmUUID> supportedDRMs;
+ for (size_t i = 0; i < psshDRMs.size(); i++) {
+ DrmUUID uuid = psshDRMs[i];
+ if (AMediaDrmWrapper::isCryptoSchemeSupported(uuid.ptr(), NULL)) {
+ supportedDRMs.add(uuid);
+ }
+ }
+
+ ALOGV("getSupportedDrmSchemes: psshDRMs: %zu supportedDRMs: %zu",
+ psshDRMs.size(), supportedDRMs.size());
+
+ return supportedDRMs;
+}
+
+sp<ABuffer> NuPlayer2Drm::retrieveDrmInfo(const void *pssh, uint32_t psshsize)
+{
+ std::ostringstream buf;
+
+ // 1) PSSH bytes
+ buf.write(reinterpret_cast<const char *>(&psshsize), sizeof(psshsize));
+ buf.write(reinterpret_cast<const char *>(pssh), psshsize);
+
+ ALOGV("retrieveDrmInfo: MEDIA2_DRM_INFO PSSH: size: %u %s", psshsize,
+ DrmUUID::arrayToHex((uint8_t*)pssh, psshsize).string());
+
+ // 2) supportedDRMs
+ Vector<DrmUUID> supportedDRMs = getSupportedDrmSchemes(pssh, psshsize);
+ uint32_t n = supportedDRMs.size();
+ buf.write(reinterpret_cast<char *>(&n), sizeof(n));
+ for (size_t i = 0; i < n; i++) {
+ DrmUUID uuid = supportedDRMs[i];
+ buf.write(reinterpret_cast<const char *>(&n), sizeof(n));
+ buf.write(reinterpret_cast<const char *>(uuid.ptr()), DrmUUID::UUID_SIZE);
+
+ ALOGV("retrieveDrmInfo: MEDIA2_DRM_INFO supportedScheme[%zu] %s", i,
+ uuid.toHexString().string());
+ }
+
+ sp<ABuffer> drmInfoBuffer = ABuffer::CreateAsCopy(buf.str().c_str(), buf.tellp());
+ return drmInfoBuffer;
+}
+
+sp<ABuffer> NuPlayer2Drm::retrieveDrmInfo(PsshInfo *psshInfo)
+{
+
+ std::ostringstream pssh, drmInfo;
+
+ // 0) Generate PSSH bytes
+ for (size_t i = 0; i < psshInfo->numentries; i++) {
+ PsshEntry *entry = &psshInfo->entries[i];
+ uint32_t datalen = entry->datalen;
+ pssh.write(reinterpret_cast<const char *>(&entry->uuid), sizeof(entry->uuid));
+ pssh.write(reinterpret_cast<const char *>(&datalen), sizeof(datalen));
+ pssh.write(reinterpret_cast<const char *>(entry->data), datalen);
+ }
+
+ uint32_t psshSize = pssh.tellp();
+ const uint8_t* psshPtr = reinterpret_cast<const uint8_t*>(pssh.str().c_str());
+ const char *psshHex = DrmUUID::arrayToHex(psshPtr, psshSize).string();
+ ALOGV("retrieveDrmInfo: MEDIA_DRM_INFO PSSH: size: %u %s", psshSize, psshHex);
+
+ // 1) Write PSSH bytes
+ drmInfo.write(reinterpret_cast<const char *>(&psshSize), sizeof(psshSize));
+ drmInfo.write(reinterpret_cast<const char *>(pssh.str().c_str()), psshSize);
+
+ // 2) Write supportedDRMs
+ uint32_t numentries = psshInfo->numentries;
+ drmInfo.write(reinterpret_cast<const char *>(&numentries), sizeof(numentries));
+ for (size_t i = 0; i < numentries; i++) {
+ PsshEntry *entry = &psshInfo->entries[i];
+ drmInfo.write(reinterpret_cast<const char *>(&entry->uuid), sizeof(entry->uuid));
+ ALOGV("retrieveDrmInfo: MEDIA_DRM_INFO supportedScheme[%zu] %s", i,
+ DrmUUID::arrayToHex((const uint8_t*)&entry->uuid, sizeof(AMediaUUID)).string());
+ }
+
+ sp<ABuffer> drmInfoBuf = ABuffer::CreateAsCopy(drmInfo.str().c_str(), drmInfo.tellp());
+ drmInfoBuf->setRange(0, drmInfo.tellp());
+ return drmInfoBuf;
+}
+
+} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Drm.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Drm.h
new file mode 100644
index 0000000..99d2415
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Drm.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NUPLAYER2_DRM_H_
+#define NUPLAYER2_DRM_H_
+
+#include <media/NdkMediaExtractor.h>
+#include <media/stagefright/foundation/ABuffer.h>
+
+#include <utils/String8.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+ struct DrmUUID {
+ static const int UUID_SIZE = 16;
+
+ DrmUUID() {
+ memset(this->uuid, 0, sizeof(uuid));
+ }
+
+ // to allow defining Vector/KeyedVector of UUID type
+ DrmUUID(const DrmUUID &a) {
+ memcpy(this->uuid, a.uuid, sizeof(uuid));
+ }
+
+ // to allow defining Vector/KeyedVector of UUID type
+ DrmUUID(const uint8_t uuid_in[UUID_SIZE]) {
+ memcpy(this->uuid, uuid_in, sizeof(uuid));
+ }
+
+ const uint8_t *ptr() const {
+ return uuid;
+ }
+
+ String8 toHexString() const {
+ return arrayToHex(uuid, UUID_SIZE);
+ }
+
+ static String8 toHexString(const uint8_t uuid_in[UUID_SIZE]) {
+ return arrayToHex(uuid_in, UUID_SIZE);
+ }
+
+ static String8 arrayToHex(const uint8_t *array, int bytes) {
+ String8 result;
+ for (int i = 0; i < bytes; i++) {
+ result.appendFormat("%02x", array[i]);
+ }
+
+ return result;
+ }
+
+ protected:
+ uint8_t uuid[UUID_SIZE];
+ };
+
+
+ struct NuPlayer2Drm {
+
+ // static helpers - internal
+
+ protected:
+ static Vector<DrmUUID> parsePSSH(const void *pssh, size_t psshsize);
+ static Vector<DrmUUID> getSupportedDrmSchemes(const void *pssh, size_t psshsize);
+
+ // static helpers - public
+
+ public:
+ static sp<ABuffer> retrieveDrmInfo(const void *pssh, uint32_t psshsize);
+ static sp<ABuffer> retrieveDrmInfo(PsshInfo *);
+
+ }; // NuPlayer2Drm
+
+} // android
+
+#endif //NUPLAYER2_DRM_H_
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
new file mode 100644
index 0000000..a0bd900
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
@@ -0,0 +1,2074 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NuPlayer2Renderer"
+#include <utils/Log.h>
+
+#include "JWakeLock.h"
+#include "NuPlayer2Renderer.h"
+#include <algorithm>
+#include <cutils/properties.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/MediaClock.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/Utils.h>
+#include <media/stagefright/VideoFrameScheduler.h>
+#include <media/MediaCodecBuffer.h>
+
+#include <inttypes.h>
+
+namespace android {
+
+/*
+ * Example of common configuration settings in shell script form
+
+ #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
+ adb shell setprop audio.offload.disable 1
+
+ #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
+ adb shell setprop audio.offload.video 1
+
+ #Use audio callbacks for PCM data
+ adb shell setprop media.stagefright.audio.cbk 1
+
+ #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
+ adb shell setprop media.stagefright.audio.deep 1
+
+ #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
+ adb shell setprop media.stagefright.audio.sink 1000
+
+ * These configurations take effect for the next track played (not the current track).
+ */
+
+static inline bool getUseAudioCallbackSetting() {
+ return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
+}
+
+static inline int32_t getAudioSinkPcmMsSetting() {
+ return property_get_int32(
+ "media.stagefright.audio.sink", 500 /* default_value */);
+}
+
+// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
+// is closed to allow the audio DSP to power down.
+static const int64_t kOffloadPauseMaxUs = 10000000ll;
+
+// Maximum allowed delay from AudioSink, 1.5 seconds.
+static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll;
+
+static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
+
+// static
+const NuPlayer2::Renderer::PcmInfo NuPlayer2::Renderer::AUDIO_PCMINFO_INITIALIZER = {
+ AUDIO_CHANNEL_NONE,
+ AUDIO_OUTPUT_FLAG_NONE,
+ AUDIO_FORMAT_INVALID,
+ 0, // mNumChannels
+ 0 // mSampleRate
+};
+
+// static
+const int64_t NuPlayer2::Renderer::kMinPositionUpdateDelayUs = 100000ll;
+
+NuPlayer2::Renderer::Renderer(
+ const sp<MediaPlayer2Interface::AudioSink> &sink,
+ const sp<MediaClock> &mediaClock,
+ const sp<AMessage> ¬ify,
+ uint32_t flags)
+ : mAudioSink(sink),
+ mUseVirtualAudioSink(false),
+ mNotify(notify),
+ mFlags(flags),
+ mNumFramesWritten(0),
+ mDrainAudioQueuePending(false),
+ mDrainVideoQueuePending(false),
+ mAudioQueueGeneration(0),
+ mVideoQueueGeneration(0),
+ mAudioDrainGeneration(0),
+ mVideoDrainGeneration(0),
+ mAudioEOSGeneration(0),
+ mMediaClock(mediaClock),
+ mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
+ mAudioFirstAnchorTimeMediaUs(-1),
+ mAnchorTimeMediaUs(-1),
+ mAnchorNumFramesWritten(-1),
+ mVideoLateByUs(0ll),
+ mNextVideoTimeMediaUs(-1),
+ mHasAudio(false),
+ mHasVideo(false),
+ mNotifyCompleteAudio(false),
+ mNotifyCompleteVideo(false),
+ mSyncQueues(false),
+ mPaused(false),
+ mPauseDrainAudioAllowedUs(0),
+ mVideoSampleReceived(false),
+ mVideoRenderingStarted(false),
+ mVideoRenderingStartGeneration(0),
+ mAudioRenderingStartGeneration(0),
+ mRenderingDataDelivered(false),
+ mNextAudioClockUpdateTimeUs(-1),
+ mLastAudioMediaTimeUs(-1),
+ mAudioOffloadPauseTimeoutGeneration(0),
+ mAudioTornDown(false),
+ mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
+ mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
+ mTotalBuffersQueued(0),
+ mLastAudioBufferDrained(0),
+ mUseAudioCallback(false),
+ mWakeLock(new JWakeLock()) {
+ CHECK(mediaClock != NULL);
+ mPlaybackRate = mPlaybackSettings.mSpeed;
+ mMediaClock->setPlaybackRate(mPlaybackRate);
+}
+
+NuPlayer2::Renderer::~Renderer() {
+ if (offloadingAudio()) {
+ mAudioSink->stop();
+ mAudioSink->flush();
+ mAudioSink->close();
+ }
+
+ // Try to avoid racing condition in case callback is still on.
+ Mutex::Autolock autoLock(mLock);
+ if (mUseAudioCallback) {
+ flushQueue(&mAudioQueue);
+ flushQueue(&mVideoQueue);
+ }
+ mWakeLock.clear();
+ mVideoScheduler.clear();
+ mNotify.clear();
+ mAudioSink.clear();
+}
+
+void NuPlayer2::Renderer::queueBuffer(
+ bool audio,
+ const sp<MediaCodecBuffer> &buffer,
+ const sp<AMessage> ¬ifyConsumed) {
+ sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
+ msg->setInt32("queueGeneration", getQueueGeneration(audio));
+ msg->setInt32("audio", static_cast<int32_t>(audio));
+ msg->setObject("buffer", buffer);
+ msg->setMessage("notifyConsumed", notifyConsumed);
+ msg->post();
+}
+
+void NuPlayer2::Renderer::queueEOS(bool audio, status_t finalResult) {
+ CHECK_NE(finalResult, (status_t)OK);
+
+ sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
+ msg->setInt32("queueGeneration", getQueueGeneration(audio));
+ msg->setInt32("audio", static_cast<int32_t>(audio));
+ msg->setInt32("finalResult", finalResult);
+ msg->post();
+}
+
+status_t NuPlayer2::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
+ sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
+ writeToAMessage(msg, rate);
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ }
+ return err;
+}
+
+status_t NuPlayer2::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
+ if (rate.mSpeed == 0.f) {
+ onPause();
+ // don't call audiosink's setPlaybackRate if pausing, as pitch does not
+ // have to correspond to the any non-0 speed (e.g old speed). Keep
+ // settings nonetheless, using the old speed, in case audiosink changes.
+ AudioPlaybackRate newRate = rate;
+ newRate.mSpeed = mPlaybackSettings.mSpeed;
+ mPlaybackSettings = newRate;
+ return OK;
+ }
+
+ if (mAudioSink != NULL && mAudioSink->ready()) {
+ status_t err = mAudioSink->setPlaybackRate(rate);
+ if (err != OK) {
+ return err;
+ }
+ }
+ mPlaybackSettings = rate;
+ mPlaybackRate = rate.mSpeed;
+ mMediaClock->setPlaybackRate(mPlaybackRate);
+ return OK;
+}
+
+status_t NuPlayer2::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
+ sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ if (err == OK) {
+ readFromAMessage(response, rate);
+ }
+ }
+ return err;
+}
+
+status_t NuPlayer2::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
+ if (mAudioSink != NULL && mAudioSink->ready()) {
+ status_t err = mAudioSink->getPlaybackRate(rate);
+ if (err == OK) {
+ if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
+ ALOGW("correcting mismatch in internal/external playback rate");
+ }
+ // get playback settings used by audiosink, as it may be
+ // slightly off due to audiosink not taking small changes.
+ mPlaybackSettings = *rate;
+ if (mPaused) {
+ rate->mSpeed = 0.f;
+ }
+ }
+ return err;
+ }
+ *rate = mPlaybackSettings;
+ return OK;
+}
+
+status_t NuPlayer2::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
+ sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
+ writeToAMessage(msg, sync, videoFpsHint);
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ }
+ return err;
+}
+
+status_t NuPlayer2::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
+ if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
+ return BAD_VALUE;
+ }
+ // TODO: support sync sources
+ return INVALID_OPERATION;
+}
+
+status_t NuPlayer2::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
+ sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ if (err == OK) {
+ readFromAMessage(response, sync, videoFps);
+ }
+ }
+ return err;
+}
+
+status_t NuPlayer2::Renderer::onGetSyncSettings(
+ AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
+ *sync = mSyncSettings;
+ *videoFps = -1.f;
+ return OK;
+}
+
+void NuPlayer2::Renderer::flush(bool audio, bool notifyComplete) {
+ {
+ Mutex::Autolock autoLock(mLock);
+ if (audio) {
+ mNotifyCompleteAudio |= notifyComplete;
+ clearAudioFirstAnchorTime_l();
+ ++mAudioQueueGeneration;
+ ++mAudioDrainGeneration;
+ } else {
+ mNotifyCompleteVideo |= notifyComplete;
+ ++mVideoQueueGeneration;
+ ++mVideoDrainGeneration;
+ }
+
+ mMediaClock->clearAnchor();
+ mVideoLateByUs = 0;
+ mNextVideoTimeMediaUs = -1;
+ mSyncQueues = false;
+ }
+
+ sp<AMessage> msg = new AMessage(kWhatFlush, this);
+ msg->setInt32("audio", static_cast<int32_t>(audio));
+ msg->post();
+}
+
+void NuPlayer2::Renderer::signalTimeDiscontinuity() {
+}
+
+void NuPlayer2::Renderer::signalDisableOffloadAudio() {
+ (new AMessage(kWhatDisableOffloadAudio, this))->post();
+}
+
+void NuPlayer2::Renderer::signalEnableOffloadAudio() {
+ (new AMessage(kWhatEnableOffloadAudio, this))->post();
+}
+
+void NuPlayer2::Renderer::pause() {
+ (new AMessage(kWhatPause, this))->post();
+}
+
+void NuPlayer2::Renderer::resume() {
+ (new AMessage(kWhatResume, this))->post();
+}
+
+void NuPlayer2::Renderer::setVideoFrameRate(float fps) {
+ sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
+ msg->setFloat("frame-rate", fps);
+ msg->post();
+}
+
+// Called on any threads without mLock acquired.
+status_t NuPlayer2::Renderer::getCurrentPosition(int64_t *mediaUs) {
+ status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
+ if (result == OK) {
+ return result;
+ }
+
+ // MediaClock has not started yet. Try to start it if possible.
+ {
+ Mutex::Autolock autoLock(mLock);
+ if (mAudioFirstAnchorTimeMediaUs == -1) {
+ return result;
+ }
+
+ AudioTimestamp ts;
+ status_t res = mAudioSink->getTimestamp(ts);
+ if (res != OK) {
+ return result;
+ }
+
+ // AudioSink has rendered some frames.
+ int64_t nowUs = ALooper::GetNowUs();
+ int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs)
+ + mAudioFirstAnchorTimeMediaUs;
+ mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
+ }
+
+ return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
+}
+
+void NuPlayer2::Renderer::clearAudioFirstAnchorTime_l() {
+ mAudioFirstAnchorTimeMediaUs = -1;
+ mMediaClock->setStartingTimeMedia(-1);
+}
+
+void NuPlayer2::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
+ if (mAudioFirstAnchorTimeMediaUs == -1) {
+ mAudioFirstAnchorTimeMediaUs = mediaUs;
+ mMediaClock->setStartingTimeMedia(mediaUs);
+ }
+}
+
+// Called on renderer looper.
+void NuPlayer2::Renderer::clearAnchorTime() {
+ mMediaClock->clearAnchor();
+ mAnchorTimeMediaUs = -1;
+ mAnchorNumFramesWritten = -1;
+}
+
+void NuPlayer2::Renderer::setVideoLateByUs(int64_t lateUs) {
+ Mutex::Autolock autoLock(mLock);
+ mVideoLateByUs = lateUs;
+}
+
+int64_t NuPlayer2::Renderer::getVideoLateByUs() {
+ Mutex::Autolock autoLock(mLock);
+ return mVideoLateByUs;
+}
+
+status_t NuPlayer2::Renderer::openAudioSink(
+ const sp<AMessage> &format,
+ bool offloadOnly,
+ bool hasVideo,
+ uint32_t flags,
+ bool *isOffloaded,
+ bool isStreaming) {
+ sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
+ msg->setMessage("format", format);
+ msg->setInt32("offload-only", offloadOnly);
+ msg->setInt32("has-video", hasVideo);
+ msg->setInt32("flags", flags);
+ msg->setInt32("isStreaming", isStreaming);
+
+ sp<AMessage> response;
+ status_t postStatus = msg->postAndAwaitResponse(&response);
+
+ int32_t err;
+ if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) {
+ err = INVALID_OPERATION;
+ } else if (err == OK && isOffloaded != NULL) {
+ int32_t offload;
+ CHECK(response->findInt32("offload", &offload));
+ *isOffloaded = (offload != 0);
+ }
+ return err;
+}
+
+void NuPlayer2::Renderer::closeAudioSink() {
+ sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
+
+ sp<AMessage> response;
+ msg->postAndAwaitResponse(&response);
+}
+
+void NuPlayer2::Renderer::changeAudioFormat(
+ const sp<AMessage> &format,
+ bool offloadOnly,
+ bool hasVideo,
+ uint32_t flags,
+ bool isStreaming,
+ const sp<AMessage> ¬ify) {
+ sp<AMessage> meta = new AMessage;
+ meta->setMessage("format", format);
+ meta->setInt32("offload-only", offloadOnly);
+ meta->setInt32("has-video", hasVideo);
+ meta->setInt32("flags", flags);
+ meta->setInt32("isStreaming", isStreaming);
+
+ sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
+ msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
+ msg->setMessage("notify", notify);
+ msg->setMessage("meta", meta);
+ msg->post();
+}
+
+void NuPlayer2::Renderer::onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatOpenAudioSink:
+ {
+ sp<AMessage> format;
+ CHECK(msg->findMessage("format", &format));
+
+ int32_t offloadOnly;
+ CHECK(msg->findInt32("offload-only", &offloadOnly));
+
+ int32_t hasVideo;
+ CHECK(msg->findInt32("has-video", &hasVideo));
+
+ uint32_t flags;
+ CHECK(msg->findInt32("flags", (int32_t *)&flags));
+
+ uint32_t isStreaming;
+ CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
+
+ status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
+
+ sp<AMessage> response = new AMessage;
+ response->setInt32("err", err);
+ response->setInt32("offload", offloadingAudio());
+
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+
+ break;
+ }
+
+ case kWhatCloseAudioSink:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ onCloseAudioSink();
+
+ sp<AMessage> response = new AMessage;
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatStopAudioSink:
+ {
+ mAudioSink->stop();
+ break;
+ }
+
+ case kWhatChangeAudioFormat:
+ {
+ int32_t queueGeneration;
+ CHECK(msg->findInt32("queueGeneration", &queueGeneration));
+
+ sp<AMessage> notify;
+ CHECK(msg->findMessage("notify", ¬ify));
+
+ if (offloadingAudio()) {
+ ALOGW("changeAudioFormat should NOT be called in offload mode");
+ notify->setInt32("err", INVALID_OPERATION);
+ notify->post();
+ break;
+ }
+
+ sp<AMessage> meta;
+ CHECK(msg->findMessage("meta", &meta));
+
+ if (queueGeneration != getQueueGeneration(true /* audio */)
+ || mAudioQueue.empty()) {
+ onChangeAudioFormat(meta, notify);
+ break;
+ }
+
+ QueueEntry entry;
+ entry.mNotifyConsumed = notify;
+ entry.mMeta = meta;
+
+ Mutex::Autolock autoLock(mLock);
+ mAudioQueue.push_back(entry);
+ postDrainAudioQueue_l();
+
+ break;
+ }
+
+ case kWhatDrainAudioQueue:
+ {
+ mDrainAudioQueuePending = false;
+
+ int32_t generation;
+ CHECK(msg->findInt32("drainGeneration", &generation));
+ if (generation != getDrainGeneration(true /* audio */)) {
+ break;
+ }
+
+ if (onDrainAudioQueue()) {
+ uint32_t numFramesPlayed;
+ CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
+ (status_t)OK);
+
+ // Handle AudioTrack race when start is immediately called after flush.
+ uint32_t numFramesPendingPlayout =
+ (mNumFramesWritten > numFramesPlayed ?
+ mNumFramesWritten - numFramesPlayed : 0);
+
+ // This is how long the audio sink will have data to
+ // play back.
+ int64_t delayUs =
+ mAudioSink->msecsPerFrame()
+ * numFramesPendingPlayout * 1000ll;
+ if (mPlaybackRate > 1.0f) {
+ delayUs /= mPlaybackRate;
+ }
+
+ // Let's give it more data after about half that time
+ // has elapsed.
+ delayUs /= 2;
+ // check the buffer size to estimate maximum delay permitted.
+ const int64_t maxDrainDelayUs = std::max(
+ mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
+ ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
+ (long long)delayUs, (long long)maxDrainDelayUs);
+ Mutex::Autolock autoLock(mLock);
+ postDrainAudioQueue_l(delayUs);
+ }
+ break;
+ }
+
+ case kWhatDrainVideoQueue:
+ {
+ int32_t generation;
+ CHECK(msg->findInt32("drainGeneration", &generation));
+ if (generation != getDrainGeneration(false /* audio */)) {
+ break;
+ }
+
+ mDrainVideoQueuePending = false;
+
+ onDrainVideoQueue();
+
+ postDrainVideoQueue();
+ break;
+ }
+
+ case kWhatPostDrainVideoQueue:
+ {
+ int32_t generation;
+ CHECK(msg->findInt32("drainGeneration", &generation));
+ if (generation != getDrainGeneration(false /* audio */)) {
+ break;
+ }
+
+ mDrainVideoQueuePending = false;
+ postDrainVideoQueue();
+ break;
+ }
+
+ case kWhatQueueBuffer:
+ {
+ onQueueBuffer(msg);
+ break;
+ }
+
+ case kWhatQueueEOS:
+ {
+ onQueueEOS(msg);
+ break;
+ }
+
+ case kWhatEOS:
+ {
+ int32_t generation;
+ CHECK(msg->findInt32("audioEOSGeneration", &generation));
+ if (generation != mAudioEOSGeneration) {
+ break;
+ }
+ status_t finalResult;
+ CHECK(msg->findInt32("finalResult", &finalResult));
+ notifyEOS(true /* audio */, finalResult);
+ break;
+ }
+
+ case kWhatConfigPlayback:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ AudioPlaybackRate rate;
+ readFromAMessage(msg, &rate);
+ status_t err = onConfigPlayback(rate);
+ sp<AMessage> response = new AMessage;
+ response->setInt32("err", err);
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatGetPlaybackSettings:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
+ status_t err = onGetPlaybackSettings(&rate);
+ sp<AMessage> response = new AMessage;
+ if (err == OK) {
+ writeToAMessage(response, rate);
+ }
+ response->setInt32("err", err);
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatConfigSync:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ AVSyncSettings sync;
+ float videoFpsHint;
+ readFromAMessage(msg, &sync, &videoFpsHint);
+ status_t err = onConfigSync(sync, videoFpsHint);
+ sp<AMessage> response = new AMessage;
+ response->setInt32("err", err);
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatGetSyncSettings:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ ALOGV("kWhatGetSyncSettings");
+ AVSyncSettings sync;
+ float videoFps = -1.f;
+ status_t err = onGetSyncSettings(&sync, &videoFps);
+ sp<AMessage> response = new AMessage;
+ if (err == OK) {
+ writeToAMessage(response, sync, videoFps);
+ }
+ response->setInt32("err", err);
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatFlush:
+ {
+ onFlush(msg);
+ break;
+ }
+
+ case kWhatDisableOffloadAudio:
+ {
+ onDisableOffloadAudio();
+ break;
+ }
+
+ case kWhatEnableOffloadAudio:
+ {
+ onEnableOffloadAudio();
+ break;
+ }
+
+ case kWhatPause:
+ {
+ onPause();
+ break;
+ }
+
+ case kWhatResume:
+ {
+ onResume();
+ break;
+ }
+
+ case kWhatSetVideoFrameRate:
+ {
+ float fps;
+ CHECK(msg->findFloat("frame-rate", &fps));
+ onSetVideoFrameRate(fps);
+ break;
+ }
+
+ case kWhatAudioTearDown:
+ {
+ int32_t reason;
+ CHECK(msg->findInt32("reason", &reason));
+
+ onAudioTearDown((AudioTearDownReason)reason);
+ break;
+ }
+
+ case kWhatAudioOffloadPauseTimeout:
+ {
+ int32_t generation;
+ CHECK(msg->findInt32("drainGeneration", &generation));
+ if (generation != mAudioOffloadPauseTimeoutGeneration) {
+ break;
+ }
+ ALOGV("Audio Offload tear down due to pause timeout.");
+ onAudioTearDown(kDueToTimeout);
+ mWakeLock->release();
+ break;
+ }
+
+ default:
+ TRESPASS();
+ break;
+ }
+}
+
+void NuPlayer2::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
+ if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
+ return;
+ }
+
+ if (mAudioQueue.empty()) {
+ return;
+ }
+
+ // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
+ if (mPaused) {
+ const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
+ if (diffUs > delayUs) {
+ delayUs = diffUs;
+ }
+ }
+
+ mDrainAudioQueuePending = true;
+ sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
+ msg->setInt32("drainGeneration", mAudioDrainGeneration);
+ msg->post(delayUs);
+}
+
+void NuPlayer2::Renderer::prepareForMediaRenderingStart_l() {
+ mAudioRenderingStartGeneration = mAudioDrainGeneration;
+ mVideoRenderingStartGeneration = mVideoDrainGeneration;
+ mRenderingDataDelivered = false;
+}
+
+void NuPlayer2::Renderer::notifyIfMediaRenderingStarted_l() {
+ if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
+ mAudioRenderingStartGeneration == mAudioDrainGeneration) {
+ mRenderingDataDelivered = true;
+ if (mPaused) {
+ return;
+ }
+ mVideoRenderingStartGeneration = -1;
+ mAudioRenderingStartGeneration = -1;
+
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatMediaRenderingStart);
+ notify->post();
+ }
+}
+
+// static
+size_t NuPlayer2::Renderer::AudioSinkCallback(
+ MediaPlayer2Interface::AudioSink * /* audioSink */,
+ void *buffer,
+ size_t size,
+ void *cookie,
+ MediaPlayer2Interface::AudioSink::cb_event_t event) {
+ NuPlayer2::Renderer *me = (NuPlayer2::Renderer *)cookie;
+
+ switch (event) {
+ case MediaPlayer2Interface::AudioSink::CB_EVENT_FILL_BUFFER:
+ {
+ return me->fillAudioBuffer(buffer, size);
+ break;
+ }
+
+ case MediaPlayer2Interface::AudioSink::CB_EVENT_STREAM_END:
+ {
+ ALOGV("AudioSink::CB_EVENT_STREAM_END");
+ me->notifyEOSCallback();
+ break;
+ }
+
+ case MediaPlayer2Interface::AudioSink::CB_EVENT_TEAR_DOWN:
+ {
+ ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
+ me->notifyAudioTearDown(kDueToError);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+void NuPlayer2::Renderer::notifyEOSCallback() {
+ Mutex::Autolock autoLock(mLock);
+
+ if (!mUseAudioCallback) {
+ return;
+ }
+
+ notifyEOS_l(true /* audio */, ERROR_END_OF_STREAM);
+}
+
+size_t NuPlayer2::Renderer::fillAudioBuffer(void *buffer, size_t size) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (!mUseAudioCallback) {
+ return 0;
+ }
+
+ bool hasEOS = false;
+
+ size_t sizeCopied = 0;
+ bool firstEntry = true;
+ QueueEntry *entry; // will be valid after while loop if hasEOS is set.
+ while (sizeCopied < size && !mAudioQueue.empty()) {
+ entry = &*mAudioQueue.begin();
+
+ if (entry->mBuffer == NULL) { // EOS
+ hasEOS = true;
+ mAudioQueue.erase(mAudioQueue.begin());
+ break;
+ }
+
+ if (firstEntry && entry->mOffset == 0) {
+ firstEntry = false;
+ int64_t mediaTimeUs;
+ CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
+ ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
+ setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
+ }
+
+ size_t copy = entry->mBuffer->size() - entry->mOffset;
+ size_t sizeRemaining = size - sizeCopied;
+ if (copy > sizeRemaining) {
+ copy = sizeRemaining;
+ }
+
+ memcpy((char *)buffer + sizeCopied,
+ entry->mBuffer->data() + entry->mOffset,
+ copy);
+
+ entry->mOffset += copy;
+ if (entry->mOffset == entry->mBuffer->size()) {
+ entry->mNotifyConsumed->post();
+ mAudioQueue.erase(mAudioQueue.begin());
+ entry = NULL;
+ }
+ sizeCopied += copy;
+
+ notifyIfMediaRenderingStarted_l();
+ }
+
+ if (mAudioFirstAnchorTimeMediaUs >= 0) {
+ int64_t nowUs = ALooper::GetNowUs();
+ int64_t nowMediaUs =
+ mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
+ // we don't know how much data we are queueing for offloaded tracks.
+ mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
+ }
+
+ // for non-offloaded audio, we need to compute the frames written because
+ // there is no EVENT_STREAM_END notification. The frames written gives
+ // an estimate on the pending played out duration.
+ if (!offloadingAudio()) {
+ mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
+ }
+
+ if (hasEOS) {
+ (new AMessage(kWhatStopAudioSink, this))->post();
+ // As there is currently no EVENT_STREAM_END callback notification for
+ // non-offloaded audio tracks, we need to post the EOS ourselves.
+ if (!offloadingAudio()) {
+ int64_t postEOSDelayUs = 0;
+ if (mAudioSink->needsTrailingPadding()) {
+ postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
+ }
+ ALOGV("fillAudioBuffer: notifyEOS_l "
+ "mNumFramesWritten:%u finalResult:%d postEOSDelay:%lld",
+ mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
+ notifyEOS_l(true /* audio */, entry->mFinalResult, postEOSDelayUs);
+ }
+ }
+ return sizeCopied;
+}
+
+void NuPlayer2::Renderer::drainAudioQueueUntilLastEOS() {
+ List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
+ bool foundEOS = false;
+ while (it != mAudioQueue.end()) {
+ int32_t eos;
+ QueueEntry *entry = &*it++;
+ if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
+ || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
+ itEOS = it;
+ foundEOS = true;
+ }
+ }
+
+ if (foundEOS) {
+ // post all replies before EOS and drop the samples
+ for (it = mAudioQueue.begin(); it != itEOS; it++) {
+ if (it->mBuffer == nullptr) {
+ if (it->mNotifyConsumed == nullptr) {
+ // delay doesn't matter as we don't even have an AudioTrack
+ notifyEOS(true /* audio */, it->mFinalResult);
+ } else {
+ // TAG for re-opening audio sink.
+ onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
+ }
+ } else {
+ it->mNotifyConsumed->post();
+ }
+ }
+ mAudioQueue.erase(mAudioQueue.begin(), itEOS);
+ }
+}
+
+bool NuPlayer2::Renderer::onDrainAudioQueue() {
+ // do not drain audio during teardown as queued buffers may be invalid.
+ if (mAudioTornDown) {
+ return false;
+ }
+ // TODO: This call to getPosition checks if AudioTrack has been created
+ // in AudioSink before draining audio. If AudioTrack doesn't exist, then
+ // CHECKs on getPosition will fail.
+ // We still need to figure out why AudioTrack is not created when
+ // this function is called. One possible reason could be leftover
+ // audio. Another possible place is to check whether decoder
+ // has received INFO_FORMAT_CHANGED as the first buffer since
+ // AudioSink is opened there, and possible interactions with flush
+ // immediately after start. Investigate error message
+ // "vorbis_dsp_synthesis returned -135", along with RTSP.
+ uint32_t numFramesPlayed;
+ if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
+ // When getPosition fails, renderer will not reschedule the draining
+ // unless new samples are queued.
+ // If we have pending EOS (or "eos" marker for discontinuities), we need
+ // to post these now as NuPlayer2Decoder might be waiting for it.
+ drainAudioQueueUntilLastEOS();
+
+ ALOGW("onDrainAudioQueue(): audio sink is not ready");
+ return false;
+ }
+
+#if 0
+ ssize_t numFramesAvailableToWrite =
+ mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
+
+ if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
+ ALOGI("audio sink underrun");
+ } else {
+ ALOGV("audio queue has %d frames left to play",
+ mAudioSink->frameCount() - numFramesAvailableToWrite);
+ }
+#endif
+
+ uint32_t prevFramesWritten = mNumFramesWritten;
+ while (!mAudioQueue.empty()) {
+ QueueEntry *entry = &*mAudioQueue.begin();
+
+ if (entry->mBuffer == NULL) {
+ if (entry->mNotifyConsumed != nullptr) {
+ // TAG for re-open audio sink.
+ onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
+ mAudioQueue.erase(mAudioQueue.begin());
+ continue;
+ }
+
+ // EOS
+ if (mPaused) {
+ // Do not notify EOS when paused.
+ // This is needed to avoid switch to next clip while in pause.
+ ALOGV("onDrainAudioQueue(): Do not notify EOS when paused");
+ return false;
+ }
+
+ int64_t postEOSDelayUs = 0;
+ if (mAudioSink->needsTrailingPadding()) {
+ postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
+ }
+ notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
+ mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
+
+ mAudioQueue.erase(mAudioQueue.begin());
+ entry = NULL;
+ if (mAudioSink->needsTrailingPadding()) {
+ // If we're not in gapless playback (i.e. through setNextPlayer), we
+ // need to stop the track here, because that will play out the last
+ // little bit at the end of the file. Otherwise short files won't play.
+ mAudioSink->stop();
+ mNumFramesWritten = 0;
+ }
+ return false;
+ }
+
+ mLastAudioBufferDrained = entry->mBufferOrdinal;
+
+ // ignore 0-sized buffer which could be EOS marker with no data
+ if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
+ int64_t mediaTimeUs;
+ CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
+ ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
+ mediaTimeUs / 1E6);
+ onNewAudioMediaTime(mediaTimeUs);
+ }
+
+ size_t copy = entry->mBuffer->size() - entry->mOffset;
+
+ ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
+ copy, false /* blocking */);
+ if (written < 0) {
+ // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
+ if (written == WOULD_BLOCK) {
+ ALOGV("AudioSink write would block when writing %zu bytes", copy);
+ } else {
+ ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
+ // This can only happen when AudioSink was opened with doNotReconnect flag set to
+ // true, in which case the NuPlayer2 will handle the reconnect.
+ notifyAudioTearDown(kDueToError);
+ }
+ break;
+ }
+
+ entry->mOffset += written;
+ size_t remainder = entry->mBuffer->size() - entry->mOffset;
+ if ((ssize_t)remainder < mAudioSink->frameSize()) {
+ if (remainder > 0) {
+ ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
+ remainder);
+ entry->mOffset += remainder;
+ copy -= remainder;
+ }
+
+ entry->mNotifyConsumed->post();
+ mAudioQueue.erase(mAudioQueue.begin());
+
+ entry = NULL;
+ }
+
+ size_t copiedFrames = written / mAudioSink->frameSize();
+ mNumFramesWritten += copiedFrames;
+
+ {
+ Mutex::Autolock autoLock(mLock);
+ int64_t maxTimeMedia;
+ maxTimeMedia =
+ mAnchorTimeMediaUs +
+ (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
+ * 1000LL * mAudioSink->msecsPerFrame());
+ mMediaClock->updateMaxTimeMedia(maxTimeMedia);
+
+ notifyIfMediaRenderingStarted_l();
+ }
+
+ if (written != (ssize_t)copy) {
+ // A short count was received from AudioSink::write()
+ //
+ // AudioSink write is called in non-blocking mode.
+ // It may return with a short count when:
+ //
+ // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
+ // discarded.
+ // 2) The data to be copied exceeds the available buffer in AudioSink.
+ // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
+ // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
+
+ // (Case 1)
+ // Must be a multiple of the frame size. If it is not a multiple of a frame size, it
+ // needs to fail, as we should not carry over fractional frames between calls.
+ CHECK_EQ(copy % mAudioSink->frameSize(), 0u);
+
+ // (Case 2, 3, 4)
+ // Return early to the caller.
+ // Beware of calling immediately again as this may busy-loop if you are not careful.
+ ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
+ break;
+ }
+ }
+
+ // calculate whether we need to reschedule another write.
+ bool reschedule = !mAudioQueue.empty()
+ && (!mPaused
+ || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
+ //ALOGD("reschedule:%d empty:%d mPaused:%d prevFramesWritten:%u mNumFramesWritten:%u",
+ // reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
+ return reschedule;
+}
+
+int64_t NuPlayer2::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
+ int32_t sampleRate = offloadingAudio() ?
+ mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
+ if (sampleRate == 0) {
+ ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
+ return 0;
+ }
+ // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
+ return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
+}
+
+// Calculate duration of pending samples if played at normal rate (i.e., 1.0).
+int64_t NuPlayer2::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
+ int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
+ if (mUseVirtualAudioSink) {
+ int64_t nowUs = ALooper::GetNowUs();
+ int64_t mediaUs;
+ if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
+ return 0ll;
+ } else {
+ return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
+ }
+ }
+
+ const int64_t audioSinkPlayedUs = mAudioSink->getPlayedOutDurationUs(nowUs);
+ int64_t pendingUs = writtenAudioDurationUs - audioSinkPlayedUs;
+ if (pendingUs < 0) {
+ // This shouldn't happen unless the timestamp is stale.
+ ALOGW("%s: pendingUs %lld < 0, clamping to zero, potential resume after pause "
+ "writtenAudioDurationUs: %lld, audioSinkPlayedUs: %lld",
+ __func__, (long long)pendingUs,
+ (long long)writtenAudioDurationUs, (long long)audioSinkPlayedUs);
+ pendingUs = 0;
+ }
+ return pendingUs;
+}
+
+int64_t NuPlayer2::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
+ int64_t realUs;
+ if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
+ // If failed to get current position, e.g. due to audio clock is
+ // not ready, then just play out video immediately without delay.
+ return nowUs;
+ }
+ return realUs;
+}
+
+void NuPlayer2::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
+ Mutex::Autolock autoLock(mLock);
+ // TRICKY: vorbis decoder generates multiple frames with the same
+ // timestamp, so only update on the first frame with a given timestamp
+ if (mediaTimeUs == mAnchorTimeMediaUs) {
+ return;
+ }
+ setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
+
+ // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
+ if (mNextAudioClockUpdateTimeUs == -1) {
+ AudioTimestamp ts;
+ if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
+ mNextAudioClockUpdateTimeUs = 0; // start our clock updates
+ }
+ }
+ int64_t nowUs = ALooper::GetNowUs();
+ if (mNextAudioClockUpdateTimeUs >= 0) {
+ if (nowUs >= mNextAudioClockUpdateTimeUs) {
+ int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
+ mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
+ mUseVirtualAudioSink = false;
+ mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
+ }
+ } else {
+ int64_t unused;
+ if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
+ && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
+ > kMaxAllowedAudioSinkDelayUs)) {
+ // Enough data has been sent to AudioSink, but AudioSink has not rendered
+ // any data yet. Something is wrong with AudioSink, e.g., the device is not
+ // connected to audio out.
+ // Switch to system clock. This essentially creates a virtual AudioSink with
+ // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
+ // This virtual AudioSink renders audio data starting from the very first sample
+ // and it's paced by system clock.
+ ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
+ mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
+ mUseVirtualAudioSink = true;
+ }
+ }
+ mAnchorNumFramesWritten = mNumFramesWritten;
+ mAnchorTimeMediaUs = mediaTimeUs;
+}
+
+// Called without mLock acquired.
+void NuPlayer2::Renderer::postDrainVideoQueue() {
+ if (mDrainVideoQueuePending
+ || getSyncQueues()
+ || (mPaused && mVideoSampleReceived)) {
+ return;
+ }
+
+ if (mVideoQueue.empty()) {
+ return;
+ }
+
+ QueueEntry &entry = *mVideoQueue.begin();
+
+ sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
+ msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
+
+ if (entry.mBuffer == NULL) {
+ // EOS doesn't carry a timestamp.
+ msg->post();
+ mDrainVideoQueuePending = true;
+ return;
+ }
+
+ int64_t nowUs = ALooper::GetNowUs();
+ if (mFlags & FLAG_REAL_TIME) {
+ int64_t realTimeUs;
+ CHECK(entry.mBuffer->meta()->findInt64("timeUs", &realTimeUs));
+
+ realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
+
+ int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
+
+ int64_t delayUs = realTimeUs - nowUs;
+
+ ALOGW_IF(delayUs > 500000, "unusually high delayUs: %lld", (long long)delayUs);
+ // post 2 display refreshes before rendering is due
+ msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
+
+ mDrainVideoQueuePending = true;
+ return;
+ }
+
+ int64_t mediaTimeUs;
+ CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
+
+ {
+ Mutex::Autolock autoLock(mLock);
+ if (mAnchorTimeMediaUs < 0) {
+ mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
+ mAnchorTimeMediaUs = mediaTimeUs;
+ }
+ }
+ mNextVideoTimeMediaUs = mediaTimeUs + 100000;
+ if (!mHasAudio) {
+ // smooth out videos >= 10fps
+ mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
+ }
+
+ if (!mVideoSampleReceived || mediaTimeUs < mAudioFirstAnchorTimeMediaUs) {
+ msg->post();
+ } else {
+ int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
+
+ // post 2 display refreshes before rendering is due
+ mMediaClock->addTimer(msg, mediaTimeUs, -twoVsyncsUs);
+ }
+
+ mDrainVideoQueuePending = true;
+}
+
+void NuPlayer2::Renderer::onDrainVideoQueue() {
+ if (mVideoQueue.empty()) {
+ return;
+ }
+
+ QueueEntry *entry = &*mVideoQueue.begin();
+
+ if (entry->mBuffer == NULL) {
+ // EOS
+
+ notifyEOS(false /* audio */, entry->mFinalResult);
+
+ mVideoQueue.erase(mVideoQueue.begin());
+ entry = NULL;
+
+ setVideoLateByUs(0);
+ return;
+ }
+
+ int64_t nowUs = ALooper::GetNowUs();
+ int64_t realTimeUs;
+ int64_t mediaTimeUs = -1;
+ if (mFlags & FLAG_REAL_TIME) {
+ CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
+ } else {
+ CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
+
+ realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
+ }
+ realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
+
+ bool tooLate = false;
+
+ if (!mPaused) {
+ setVideoLateByUs(nowUs - realTimeUs);
+ tooLate = (mVideoLateByUs > 40000);
+
+ if (tooLate) {
+ ALOGV("video late by %lld us (%.2f secs)",
+ (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
+ } else {
+ int64_t mediaUs = 0;
+ mMediaClock->getMediaTime(realTimeUs, &mediaUs);
+ ALOGV("rendering video at media time %.2f secs",
+ (mFlags & FLAG_REAL_TIME ? realTimeUs :
+ mediaUs) / 1E6);
+
+ if (!(mFlags & FLAG_REAL_TIME)
+ && mLastAudioMediaTimeUs != -1
+ && mediaTimeUs > mLastAudioMediaTimeUs) {
+ // If audio ends before video, video continues to drive media clock.
+ // Also smooth out videos >= 10fps.
+ mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
+ }
+ }
+ } else {
+ setVideoLateByUs(0);
+ if (!mVideoSampleReceived && !mHasAudio) {
+ // This will ensure that the first frame after a flush won't be used as anchor
+ // when renderer is in paused state, because resume can happen any time after seek.
+ clearAnchorTime();
+ }
+ }
+
+ // Always render the first video frame while keeping stats on A/V sync.
+ if (!mVideoSampleReceived) {
+ realTimeUs = nowUs;
+ tooLate = false;
+ }
+
+ entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
+ entry->mNotifyConsumed->setInt32("render", !tooLate);
+ entry->mNotifyConsumed->post();
+ mVideoQueue.erase(mVideoQueue.begin());
+ entry = NULL;
+
+ mVideoSampleReceived = true;
+
+ if (!mPaused) {
+ if (!mVideoRenderingStarted) {
+ mVideoRenderingStarted = true;
+ notifyVideoRenderingStart();
+ }
+ Mutex::Autolock autoLock(mLock);
+ notifyIfMediaRenderingStarted_l();
+ }
+}
+
+void NuPlayer2::Renderer::notifyVideoRenderingStart() {
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatVideoRenderingStart);
+ notify->post();
+}
+
+void NuPlayer2::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
+ Mutex::Autolock autoLock(mLock);
+ notifyEOS_l(audio, finalResult, delayUs);
+}
+
+void NuPlayer2::Renderer::notifyEOS_l(bool audio, status_t finalResult, int64_t delayUs) {
+ if (audio && delayUs > 0) {
+ sp<AMessage> msg = new AMessage(kWhatEOS, this);
+ msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
+ msg->setInt32("finalResult", finalResult);
+ msg->post(delayUs);
+ return;
+ }
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatEOS);
+ notify->setInt32("audio", static_cast<int32_t>(audio));
+ notify->setInt32("finalResult", finalResult);
+ notify->post(delayUs);
+
+ if (audio) {
+ // Video might outlive audio. Clear anchor to enable video only case.
+ mAnchorTimeMediaUs = -1;
+ mHasAudio = false;
+ if (mNextVideoTimeMediaUs >= 0) {
+ int64_t mediaUs = 0;
+ mMediaClock->getMediaTime(ALooper::GetNowUs(), &mediaUs);
+ if (mNextVideoTimeMediaUs > mediaUs) {
+ mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
+ }
+ }
+ }
+}
+
+void NuPlayer2::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
+ sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
+ msg->setInt32("reason", reason);
+ msg->post();
+}
+
+void NuPlayer2::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
+ int32_t audio;
+ CHECK(msg->findInt32("audio", &audio));
+
+ if (dropBufferIfStale(audio, msg)) {
+ return;
+ }
+
+ if (audio) {
+ mHasAudio = true;
+ } else {
+ mHasVideo = true;
+ }
+
+ if (mHasVideo) {
+ if (mVideoScheduler == NULL) {
+ mVideoScheduler = new VideoFrameScheduler();
+ mVideoScheduler->init();
+ }
+ }
+
+ sp<RefBase> obj;
+ CHECK(msg->findObject("buffer", &obj));
+ sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
+
+ sp<AMessage> notifyConsumed;
+ CHECK(msg->findMessage("notifyConsumed", ¬ifyConsumed));
+
+ QueueEntry entry;
+ entry.mBuffer = buffer;
+ entry.mNotifyConsumed = notifyConsumed;
+ entry.mOffset = 0;
+ entry.mFinalResult = OK;
+ entry.mBufferOrdinal = ++mTotalBuffersQueued;
+
+ if (audio) {
+ Mutex::Autolock autoLock(mLock);
+ mAudioQueue.push_back(entry);
+ postDrainAudioQueue_l();
+ } else {
+ mVideoQueue.push_back(entry);
+ postDrainVideoQueue();
+ }
+
+ Mutex::Autolock autoLock(mLock);
+ if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
+ return;
+ }
+
+ sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
+ sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
+
+ if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
+ // EOS signalled on either queue.
+ syncQueuesDone_l();
+ return;
+ }
+
+ int64_t firstAudioTimeUs;
+ int64_t firstVideoTimeUs;
+ CHECK(firstAudioBuffer->meta()
+ ->findInt64("timeUs", &firstAudioTimeUs));
+ CHECK(firstVideoBuffer->meta()
+ ->findInt64("timeUs", &firstVideoTimeUs));
+
+ int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
+
+ ALOGV("queueDiff = %.2f secs", diff / 1E6);
+
+ if (diff > 100000ll) {
+ // Audio data starts More than 0.1 secs before video.
+ // Drop some audio.
+
+ (*mAudioQueue.begin()).mNotifyConsumed->post();
+ mAudioQueue.erase(mAudioQueue.begin());
+ return;
+ }
+
+ syncQueuesDone_l();
+}
+
+void NuPlayer2::Renderer::syncQueuesDone_l() {
+ if (!mSyncQueues) {
+ return;
+ }
+
+ mSyncQueues = false;
+
+ if (!mAudioQueue.empty()) {
+ postDrainAudioQueue_l();
+ }
+
+ if (!mVideoQueue.empty()) {
+ mLock.unlock();
+ postDrainVideoQueue();
+ mLock.lock();
+ }
+}
+
+void NuPlayer2::Renderer::onQueueEOS(const sp<AMessage> &msg) {
+ int32_t audio;
+ CHECK(msg->findInt32("audio", &audio));
+
+ if (dropBufferIfStale(audio, msg)) {
+ return;
+ }
+
+ int32_t finalResult;
+ CHECK(msg->findInt32("finalResult", &finalResult));
+
+ QueueEntry entry;
+ entry.mOffset = 0;
+ entry.mFinalResult = finalResult;
+
+ if (audio) {
+ Mutex::Autolock autoLock(mLock);
+ if (mAudioQueue.empty() && mSyncQueues) {
+ syncQueuesDone_l();
+ }
+ mAudioQueue.push_back(entry);
+ postDrainAudioQueue_l();
+ } else {
+ if (mVideoQueue.empty() && getSyncQueues()) {
+ Mutex::Autolock autoLock(mLock);
+ syncQueuesDone_l();
+ }
+ mVideoQueue.push_back(entry);
+ postDrainVideoQueue();
+ }
+}
+
+void NuPlayer2::Renderer::onFlush(const sp<AMessage> &msg) {
+ int32_t audio, notifyComplete;
+ CHECK(msg->findInt32("audio", &audio));
+
+ {
+ Mutex::Autolock autoLock(mLock);
+ if (audio) {
+ notifyComplete = mNotifyCompleteAudio;
+ mNotifyCompleteAudio = false;
+ mLastAudioMediaTimeUs = -1;
+ } else {
+ notifyComplete = mNotifyCompleteVideo;
+ mNotifyCompleteVideo = false;
+ }
+
+ // If we're currently syncing the queues, i.e. dropping audio while
+ // aligning the first audio/video buffer times and only one of the
+ // two queues has data, we may starve that queue by not requesting
+ // more buffers from the decoder. If the other source then encounters
+ // a discontinuity that leads to flushing, we'll never find the
+ // corresponding discontinuity on the other queue.
+ // Therefore we'll stop syncing the queues if at least one of them
+ // is flushed.
+ syncQueuesDone_l();
+ }
+ clearAnchorTime();
+
+ ALOGV("flushing %s", audio ? "audio" : "video");
+ if (audio) {
+ {
+ Mutex::Autolock autoLock(mLock);
+ flushQueue(&mAudioQueue);
+
+ ++mAudioDrainGeneration;
+ ++mAudioEOSGeneration;
+ prepareForMediaRenderingStart_l();
+
+ // the frame count will be reset after flush.
+ clearAudioFirstAnchorTime_l();
+ }
+
+ mDrainAudioQueuePending = false;
+
+ if (offloadingAudio()) {
+ mAudioSink->pause();
+ mAudioSink->flush();
+ if (!mPaused) {
+ mAudioSink->start();
+ }
+ } else {
+ mAudioSink->pause();
+ mAudioSink->flush();
+ // Call stop() to signal to the AudioSink to completely fill the
+ // internal buffer before resuming playback.
+ // FIXME: this is ignored after flush().
+ mAudioSink->stop();
+ if (mPaused) {
+ // Race condition: if renderer is paused and audio sink is stopped,
+ // we need to make sure that the audio track buffer fully drains
+ // before delivering data.
+ // FIXME: remove this if we can detect if stop() is complete.
+ const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
+ mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
+ } else {
+ mAudioSink->start();
+ }
+ mNumFramesWritten = 0;
+ }
+ mNextAudioClockUpdateTimeUs = -1;
+ } else {
+ flushQueue(&mVideoQueue);
+
+ mDrainVideoQueuePending = false;
+
+ if (mVideoScheduler != NULL) {
+ mVideoScheduler->restart();
+ }
+
+ Mutex::Autolock autoLock(mLock);
+ ++mVideoDrainGeneration;
+ prepareForMediaRenderingStart_l();
+ }
+
+ mVideoSampleReceived = false;
+
+ if (notifyComplete) {
+ notifyFlushComplete(audio);
+ }
+}
+
+void NuPlayer2::Renderer::flushQueue(List<QueueEntry> *queue) {
+ while (!queue->empty()) {
+ QueueEntry *entry = &*queue->begin();
+
+ if (entry->mBuffer != NULL) {
+ entry->mNotifyConsumed->post();
+ } else if (entry->mNotifyConsumed != nullptr) {
+ // Is it needed to open audio sink now?
+ onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
+ }
+
+ queue->erase(queue->begin());
+ entry = NULL;
+ }
+}
+
+void NuPlayer2::Renderer::notifyFlushComplete(bool audio) {
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatFlushComplete);
+ notify->setInt32("audio", static_cast<int32_t>(audio));
+ notify->post();
+}
+
+bool NuPlayer2::Renderer::dropBufferIfStale(
+ bool audio, const sp<AMessage> &msg) {
+ int32_t queueGeneration;
+ CHECK(msg->findInt32("queueGeneration", &queueGeneration));
+
+ if (queueGeneration == getQueueGeneration(audio)) {
+ return false;
+ }
+
+ sp<AMessage> notifyConsumed;
+ if (msg->findMessage("notifyConsumed", ¬ifyConsumed)) {
+ notifyConsumed->post();
+ }
+
+ return true;
+}
+
+void NuPlayer2::Renderer::onAudioSinkChanged() {
+ if (offloadingAudio()) {
+ return;
+ }
+ CHECK(!mDrainAudioQueuePending);
+ mNumFramesWritten = 0;
+ mAnchorNumFramesWritten = -1;
+ uint32_t written;
+ if (mAudioSink->getFramesWritten(&written) == OK) {
+ mNumFramesWritten = written;
+ }
+}
+
+void NuPlayer2::Renderer::onDisableOffloadAudio() {
+ Mutex::Autolock autoLock(mLock);
+ mFlags &= ~FLAG_OFFLOAD_AUDIO;
+ ++mAudioDrainGeneration;
+ if (mAudioRenderingStartGeneration != -1) {
+ prepareForMediaRenderingStart_l();
+ }
+}
+
+void NuPlayer2::Renderer::onEnableOffloadAudio() {
+ Mutex::Autolock autoLock(mLock);
+ mFlags |= FLAG_OFFLOAD_AUDIO;
+ ++mAudioDrainGeneration;
+ if (mAudioRenderingStartGeneration != -1) {
+ prepareForMediaRenderingStart_l();
+ }
+}
+
+void NuPlayer2::Renderer::onPause() {
+ if (mPaused) {
+ return;
+ }
+
+ {
+ Mutex::Autolock autoLock(mLock);
+ // we do not increment audio drain generation so that we fill audio buffer during pause.
+ ++mVideoDrainGeneration;
+ prepareForMediaRenderingStart_l();
+ mPaused = true;
+ mMediaClock->setPlaybackRate(0.0);
+ }
+
+ mDrainAudioQueuePending = false;
+ mDrainVideoQueuePending = false;
+
+ // Note: audio data may not have been decoded, and the AudioSink may not be opened.
+ mAudioSink->pause();
+ startAudioOffloadPauseTimeout();
+
+ ALOGV("now paused audio queue has %zu entries, video has %zu entries",
+ mAudioQueue.size(), mVideoQueue.size());
+}
+
+void NuPlayer2::Renderer::onResume() {
+ if (!mPaused) {
+ return;
+ }
+
+ // Note: audio data may not have been decoded, and the AudioSink may not be opened.
+ cancelAudioOffloadPauseTimeout();
+ if (mAudioSink->ready()) {
+ status_t err = mAudioSink->start();
+ if (err != OK) {
+ ALOGE("cannot start AudioSink err %d", err);
+ notifyAudioTearDown(kDueToError);
+ }
+ }
+
+ {
+ Mutex::Autolock autoLock(mLock);
+ mPaused = false;
+ // rendering started message may have been delayed if we were paused.
+ if (mRenderingDataDelivered) {
+ notifyIfMediaRenderingStarted_l();
+ }
+ // configure audiosink as we did not do it when pausing
+ if (mAudioSink != NULL && mAudioSink->ready()) {
+ mAudioSink->setPlaybackRate(mPlaybackSettings);
+ }
+
+ mMediaClock->setPlaybackRate(mPlaybackRate);
+
+ if (!mAudioQueue.empty()) {
+ postDrainAudioQueue_l();
+ }
+ }
+
+ if (!mVideoQueue.empty()) {
+ postDrainVideoQueue();
+ }
+}
+
+void NuPlayer2::Renderer::onSetVideoFrameRate(float fps) {
+ if (mVideoScheduler == NULL) {
+ mVideoScheduler = new VideoFrameScheduler();
+ }
+ mVideoScheduler->init(fps);
+}
+
+int32_t NuPlayer2::Renderer::getQueueGeneration(bool audio) {
+ Mutex::Autolock autoLock(mLock);
+ return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
+}
+
+int32_t NuPlayer2::Renderer::getDrainGeneration(bool audio) {
+ Mutex::Autolock autoLock(mLock);
+ return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
+}
+
+bool NuPlayer2::Renderer::getSyncQueues() {
+ Mutex::Autolock autoLock(mLock);
+ return mSyncQueues;
+}
+
+void NuPlayer2::Renderer::onAudioTearDown(AudioTearDownReason reason) {
+ if (mAudioTornDown) {
+ return;
+ }
+ mAudioTornDown = true;
+
+ int64_t currentPositionUs;
+ sp<AMessage> notify = mNotify->dup();
+ if (getCurrentPosition(¤tPositionUs) == OK) {
+ notify->setInt64("positionUs", currentPositionUs);
+ }
+
+ mAudioSink->stop();
+ mAudioSink->flush();
+
+ notify->setInt32("what", kWhatAudioTearDown);
+ notify->setInt32("reason", reason);
+ notify->post();
+}
+
+void NuPlayer2::Renderer::startAudioOffloadPauseTimeout() {
+ if (offloadingAudio()) {
+ mWakeLock->acquire();
+ sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
+ msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
+ msg->post(kOffloadPauseMaxUs);
+ }
+}
+
+void NuPlayer2::Renderer::cancelAudioOffloadPauseTimeout() {
+ // We may have called startAudioOffloadPauseTimeout() without
+ // the AudioSink open and with offloadingAudio enabled.
+ //
+ // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
+ // we always release the wakelock and increment the pause timeout generation.
+ //
+ // Note: The acquired wakelock prevents the device from suspending
+ // immediately after offload pause (in case a resume happens shortly thereafter).
+ mWakeLock->release(true);
+ ++mAudioOffloadPauseTimeoutGeneration;
+}
+
+status_t NuPlayer2::Renderer::onOpenAudioSink(
+ const sp<AMessage> &format,
+ bool offloadOnly,
+ bool hasVideo,
+ uint32_t flags,
+ bool isStreaming) {
+ ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
+ offloadOnly, offloadingAudio());
+ bool audioSinkChanged = false;
+
+ int32_t numChannels;
+ CHECK(format->findInt32("channel-count", &numChannels));
+
+ int32_t channelMask;
+ if (!format->findInt32("channel-mask", &channelMask)) {
+ // signal to the AudioSink to derive the mask from count.
+ channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
+ }
+
+ int32_t sampleRate;
+ CHECK(format->findInt32("sample-rate", &sampleRate));
+
+ if (offloadingAudio()) {
+ audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
+ AString mime;
+ CHECK(format->findString("mime", &mime));
+ status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
+
+ if (err != OK) {
+ ALOGE("Couldn't map mime \"%s\" to a valid "
+ "audio_format", mime.c_str());
+ onDisableOffloadAudio();
+ } else {
+ ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
+ mime.c_str(), audioFormat);
+
+ int avgBitRate = -1;
+ format->findInt32("bitrate", &avgBitRate);
+
+ int32_t aacProfile = -1;
+ if (audioFormat == AUDIO_FORMAT_AAC
+ && format->findInt32("aac-profile", &aacProfile)) {
+ // Redefine AAC format as per aac profile
+ mapAACProfileToAudioFormat(
+ audioFormat,
+ aacProfile);
+ }
+
+ audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
+ offloadInfo.duration_us = -1;
+ format->findInt64(
+ "durationUs", &offloadInfo.duration_us);
+ offloadInfo.sample_rate = sampleRate;
+ offloadInfo.channel_mask = channelMask;
+ offloadInfo.format = audioFormat;
+ offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
+ offloadInfo.bit_rate = avgBitRate;
+ offloadInfo.has_video = hasVideo;
+ offloadInfo.is_streaming = isStreaming;
+
+ if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
+ ALOGV("openAudioSink: no change in offload mode");
+ // no change from previous configuration, everything ok.
+ return OK;
+ }
+ mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
+
+ ALOGV("openAudioSink: try to open AudioSink in offload mode");
+ uint32_t offloadFlags = flags;
+ offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+ offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+ audioSinkChanged = true;
+ mAudioSink->close();
+
+ err = mAudioSink->open(
+ sampleRate,
+ numChannels,
+ (audio_channel_mask_t)channelMask,
+ audioFormat,
+ 0 /* bufferCount - unused */,
+ &NuPlayer2::Renderer::AudioSinkCallback,
+ this,
+ (audio_output_flags_t)offloadFlags,
+ &offloadInfo);
+
+ if (err == OK) {
+ err = mAudioSink->setPlaybackRate(mPlaybackSettings);
+ }
+
+ if (err == OK) {
+ // If the playback is offloaded to h/w, we pass
+ // the HAL some metadata information.
+ // We don't want to do this for PCM because it
+ // will be going through the AudioFlinger mixer
+ // before reaching the hardware.
+ // TODO
+ mCurrentOffloadInfo = offloadInfo;
+ if (!mPaused) { // for preview mode, don't start if paused
+ err = mAudioSink->start();
+ }
+ ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
+ }
+ if (err != OK) {
+ // Clean up, fall back to non offload mode.
+ mAudioSink->close();
+ onDisableOffloadAudio();
+ mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
+ ALOGV("openAudioSink: offload failed");
+ if (offloadOnly) {
+ notifyAudioTearDown(kForceNonOffload);
+ }
+ } else {
+ mUseAudioCallback = true; // offload mode transfers data through callback
+ ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
+ }
+ }
+ }
+ if (!offloadOnly && !offloadingAudio()) {
+ ALOGV("openAudioSink: open AudioSink in NON-offload mode");
+ uint32_t pcmFlags = flags;
+ pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+
+ const PcmInfo info = {
+ (audio_channel_mask_t)channelMask,
+ (audio_output_flags_t)pcmFlags,
+ AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat
+ numChannels,
+ sampleRate
+ };
+ if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
+ ALOGV("openAudioSink: no change in pcm mode");
+ // no change from previous configuration, everything ok.
+ return OK;
+ }
+
+ audioSinkChanged = true;
+ mAudioSink->close();
+ mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
+ // Note: It is possible to set up the callback, but not use it to send audio data.
+ // This requires a fix in AudioSink to explicitly specify the transfer mode.
+ mUseAudioCallback = getUseAudioCallbackSetting();
+ if (mUseAudioCallback) {
+ ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
+ }
+
+ // Compute the desired buffer size.
+ // For callback mode, the amount of time before wakeup is about half the buffer size.
+ const uint32_t frameCount =
+ (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
+
+ // The doNotReconnect means AudioSink will signal back and let NuPlayer2 to re-construct
+ // AudioSink. We don't want this when there's video because it will cause a video seek to
+ // the previous I frame. But we do want this when there's only audio because it will give
+ // NuPlayer2 a chance to switch from non-offload mode to offload mode.
+ // So we only set doNotReconnect when there's no video.
+ const bool doNotReconnect = !hasVideo;
+
+ // We should always be able to set our playback settings if the sink is closed.
+ LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
+ "onOpenAudioSink: can't set playback rate on closed sink");
+ status_t err = mAudioSink->open(
+ sampleRate,
+ numChannels,
+ (audio_channel_mask_t)channelMask,
+ AUDIO_FORMAT_PCM_16_BIT,
+ 0 /* bufferCount - unused */,
+ mUseAudioCallback ? &NuPlayer2::Renderer::AudioSinkCallback : NULL,
+ mUseAudioCallback ? this : NULL,
+ (audio_output_flags_t)pcmFlags,
+ NULL,
+ doNotReconnect,
+ frameCount);
+ if (err != OK) {
+ ALOGW("openAudioSink: non offloaded open failed status: %d", err);
+ mAudioSink->close();
+ mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
+ return err;
+ }
+ mCurrentPcmInfo = info;
+ if (!mPaused) { // for preview mode, don't start if paused
+ mAudioSink->start();
+ }
+ }
+ if (audioSinkChanged) {
+ onAudioSinkChanged();
+ }
+ mAudioTornDown = false;
+ return OK;
+}
+
+void NuPlayer2::Renderer::onCloseAudioSink() {
+ mAudioSink->close();
+ mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
+ mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
+}
+
+void NuPlayer2::Renderer::onChangeAudioFormat(
+ const sp<AMessage> &meta, const sp<AMessage> ¬ify) {
+ sp<AMessage> format;
+ CHECK(meta->findMessage("format", &format));
+
+ int32_t offloadOnly;
+ CHECK(meta->findInt32("offload-only", &offloadOnly));
+
+ int32_t hasVideo;
+ CHECK(meta->findInt32("has-video", &hasVideo));
+
+ uint32_t flags;
+ CHECK(meta->findInt32("flags", (int32_t *)&flags));
+
+ uint32_t isStreaming;
+ CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
+
+ status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
+
+ if (err != OK) {
+ notify->setInt32("err", err);
+ }
+ notify->post();
+}
+
+} // namespace android
+
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.h
new file mode 100644
index 0000000..62cf0d8
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.h
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NUPLAYER2_RENDERER_H_
+
+#define NUPLAYER2_RENDERER_H_
+
+#include <media/AudioResamplerPublic.h>
+#include <media/AVSyncSettings.h>
+
+#include "NuPlayer2.h"
+
+namespace android {
+
+class JWakeLock;
+struct MediaClock;
+class MediaCodecBuffer;
+struct VideoFrameScheduler;
+
+struct NuPlayer2::Renderer : public AHandler {
+ enum Flags {
+ FLAG_REAL_TIME = 1,
+ FLAG_OFFLOAD_AUDIO = 2,
+ };
+ Renderer(const sp<MediaPlayer2Interface::AudioSink> &sink,
+ const sp<MediaClock> &mediaClock,
+ const sp<AMessage> ¬ify,
+ uint32_t flags = 0);
+
+ static size_t AudioSinkCallback(
+ MediaPlayer2Interface::AudioSink *audioSink,
+ void *data, size_t size, void *me,
+ MediaPlayer2Interface::AudioSink::cb_event_t event);
+
+ void queueBuffer(
+ bool audio,
+ const sp<MediaCodecBuffer> &buffer,
+ const sp<AMessage> ¬ifyConsumed);
+
+ void queueEOS(bool audio, status_t finalResult);
+
+ status_t setPlaybackSettings(const AudioPlaybackRate &rate /* sanitized */);
+ status_t getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
+ status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint);
+ status_t getSyncSettings(AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
+
+ void flush(bool audio, bool notifyComplete);
+
+ void signalTimeDiscontinuity();
+
+ void signalDisableOffloadAudio();
+ void signalEnableOffloadAudio();
+
+ void pause();
+ void resume();
+
+ void setVideoFrameRate(float fps);
+
+ status_t getCurrentPosition(int64_t *mediaUs);
+ int64_t getVideoLateByUs();
+
+ status_t openAudioSink(
+ const sp<AMessage> &format,
+ bool offloadOnly,
+ bool hasVideo,
+ uint32_t flags,
+ bool *isOffloaded,
+ bool isStreaming);
+ void closeAudioSink();
+
+ // re-open audio sink after all pending audio buffers played.
+ void changeAudioFormat(
+ const sp<AMessage> &format,
+ bool offloadOnly,
+ bool hasVideo,
+ uint32_t flags,
+ bool isStreaming,
+ const sp<AMessage> ¬ify);
+
+ enum {
+ kWhatEOS = 'eos ',
+ kWhatFlushComplete = 'fluC',
+ kWhatPosition = 'posi',
+ kWhatVideoRenderingStart = 'vdrd',
+ kWhatMediaRenderingStart = 'mdrd',
+ kWhatAudioTearDown = 'adTD',
+ kWhatAudioOffloadPauseTimeout = 'aOPT',
+ };
+
+ enum AudioTearDownReason {
+ kDueToError = 0, // Could restart with either offload or non-offload.
+ kDueToTimeout,
+ kForceNonOffload, // Restart only with non-offload.
+ };
+
+protected:
+ virtual ~Renderer();
+
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+ enum {
+ kWhatDrainAudioQueue = 'draA',
+ kWhatDrainVideoQueue = 'draV',
+ kWhatPostDrainVideoQueue = 'pDVQ',
+ kWhatQueueBuffer = 'queB',
+ kWhatQueueEOS = 'qEOS',
+ kWhatConfigPlayback = 'cfPB',
+ kWhatConfigSync = 'cfSy',
+ kWhatGetPlaybackSettings = 'gPbS',
+ kWhatGetSyncSettings = 'gSyS',
+ kWhatFlush = 'flus',
+ kWhatPause = 'paus',
+ kWhatResume = 'resm',
+ kWhatOpenAudioSink = 'opnA',
+ kWhatCloseAudioSink = 'clsA',
+ kWhatChangeAudioFormat = 'chgA',
+ kWhatStopAudioSink = 'stpA',
+ kWhatDisableOffloadAudio = 'noOA',
+ kWhatEnableOffloadAudio = 'enOA',
+ kWhatSetVideoFrameRate = 'sVFR',
+ };
+
+ // if mBuffer != nullptr, it's a buffer containing real data.
+ // else if mNotifyConsumed == nullptr, it's EOS.
+ // else it's a tag for re-opening audio sink in different format.
+ struct QueueEntry {
+ sp<MediaCodecBuffer> mBuffer;
+ sp<AMessage> mMeta;
+ sp<AMessage> mNotifyConsumed;
+ size_t mOffset;
+ status_t mFinalResult;
+ int32_t mBufferOrdinal;
+ };
+
+ static const int64_t kMinPositionUpdateDelayUs;
+
+ sp<MediaPlayer2Interface::AudioSink> mAudioSink;
+ bool mUseVirtualAudioSink;
+ sp<AMessage> mNotify;
+ Mutex mLock;
+ uint32_t mFlags;
+ List<QueueEntry> mAudioQueue;
+ List<QueueEntry> mVideoQueue;
+ uint32_t mNumFramesWritten;
+ sp<VideoFrameScheduler> mVideoScheduler;
+
+ bool mDrainAudioQueuePending;
+ bool mDrainVideoQueuePending;
+ int32_t mAudioQueueGeneration;
+ int32_t mVideoQueueGeneration;
+ int32_t mAudioDrainGeneration;
+ int32_t mVideoDrainGeneration;
+ int32_t mAudioEOSGeneration;
+
+ const sp<MediaClock> mMediaClock;
+ float mPlaybackRate; // audio track rate
+
+ AudioPlaybackRate mPlaybackSettings;
+ AVSyncSettings mSyncSettings;
+ float mVideoFpsHint;
+
+ int64_t mAudioFirstAnchorTimeMediaUs;
+ int64_t mAnchorTimeMediaUs;
+ int64_t mAnchorNumFramesWritten;
+ int64_t mVideoLateByUs;
+ int64_t mNextVideoTimeMediaUs;
+ bool mHasAudio;
+ bool mHasVideo;
+
+ bool mNotifyCompleteAudio;
+ bool mNotifyCompleteVideo;
+
+ bool mSyncQueues;
+
+ // modified on only renderer's thread.
+ bool mPaused;
+ int64_t mPauseDrainAudioAllowedUs; // time when we can drain/deliver audio in pause mode.
+
+ bool mVideoSampleReceived;
+ bool mVideoRenderingStarted;
+ int32_t mVideoRenderingStartGeneration;
+ int32_t mAudioRenderingStartGeneration;
+ bool mRenderingDataDelivered;
+
+ int64_t mNextAudioClockUpdateTimeUs;
+ // the media timestamp of last audio sample right before EOS.
+ int64_t mLastAudioMediaTimeUs;
+
+ int32_t mAudioOffloadPauseTimeoutGeneration;
+ bool mAudioTornDown;
+ audio_offload_info_t mCurrentOffloadInfo;
+
+ struct PcmInfo {
+ audio_channel_mask_t mChannelMask;
+ audio_output_flags_t mFlags;
+ audio_format_t mFormat;
+ int32_t mNumChannels;
+ int32_t mSampleRate;
+ };
+ PcmInfo mCurrentPcmInfo;
+ static const PcmInfo AUDIO_PCMINFO_INITIALIZER;
+
+ int32_t mTotalBuffersQueued;
+ int32_t mLastAudioBufferDrained;
+ bool mUseAudioCallback;
+
+ sp<JWakeLock> mWakeLock;
+
+ status_t getCurrentPositionOnLooper(int64_t *mediaUs);
+ status_t getCurrentPositionOnLooper(
+ int64_t *mediaUs, int64_t nowUs, bool allowPastQueuedVideo = false);
+ bool getCurrentPositionIfPaused_l(int64_t *mediaUs);
+ status_t getCurrentPositionFromAnchor(
+ int64_t *mediaUs, int64_t nowUs, bool allowPastQueuedVideo = false);
+
+ void notifyEOSCallback();
+ size_t fillAudioBuffer(void *buffer, size_t size);
+
+ bool onDrainAudioQueue();
+ void drainAudioQueueUntilLastEOS();
+ int64_t getPendingAudioPlayoutDurationUs(int64_t nowUs);
+ void postDrainAudioQueue_l(int64_t delayUs = 0);
+
+ void clearAnchorTime();
+ void clearAudioFirstAnchorTime_l();
+ void setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs);
+ void setVideoLateByUs(int64_t lateUs);
+
+ void onNewAudioMediaTime(int64_t mediaTimeUs);
+ int64_t getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs);
+
+ void onDrainVideoQueue();
+ void postDrainVideoQueue();
+
+ void prepareForMediaRenderingStart_l();
+ void notifyIfMediaRenderingStarted_l();
+
+ void onQueueBuffer(const sp<AMessage> &msg);
+ void onQueueEOS(const sp<AMessage> &msg);
+ void onFlush(const sp<AMessage> &msg);
+ void onAudioSinkChanged();
+ void onDisableOffloadAudio();
+ void onEnableOffloadAudio();
+ status_t onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */);
+ status_t onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
+ status_t onConfigSync(const AVSyncSettings &sync, float videoFpsHint);
+ status_t onGetSyncSettings(AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
+
+ void onPause();
+ void onResume();
+ void onSetVideoFrameRate(float fps);
+ int32_t getQueueGeneration(bool audio);
+ int32_t getDrainGeneration(bool audio);
+ bool getSyncQueues();
+ void onAudioTearDown(AudioTearDownReason reason);
+ status_t onOpenAudioSink(
+ const sp<AMessage> &format,
+ bool offloadOnly,
+ bool hasVideo,
+ uint32_t flags,
+ bool isStreaming);
+ void onCloseAudioSink();
+ void onChangeAudioFormat(const sp<AMessage> &meta, const sp<AMessage> ¬ify);
+
+ void notifyEOS(bool audio, status_t finalResult, int64_t delayUs = 0);
+ void notifyEOS_l(bool audio, status_t finalResult, int64_t delayUs = 0);
+ void notifyFlushComplete(bool audio);
+ void notifyPosition();
+ void notifyVideoLateBy(int64_t lateByUs);
+ void notifyVideoRenderingStart();
+ void notifyAudioTearDown(AudioTearDownReason reason);
+
+ void flushQueue(List<QueueEntry> *queue);
+ bool dropBufferIfStale(bool audio, const sp<AMessage> &msg);
+ void syncQueuesDone_l();
+
+ bool offloadingAudio() const { return (mFlags & FLAG_OFFLOAD_AUDIO) != 0; }
+
+ void startAudioOffloadPauseTimeout();
+ void cancelAudioOffloadPauseTimeout();
+
+ int64_t getDurationUsIfPlayedAtSampleRate(uint32_t numFrames);
+
+ DISALLOW_EVIL_CONSTRUCTORS(Renderer);
+};
+
+} // namespace android
+
+#endif // NUPLAYER2_RENDERER_H_
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Source.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Source.h
new file mode 100644
index 0000000..662235f
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Source.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NUPLAYER2_SOURCE_H_
+
+#define NUPLAYER2_SOURCE_H_
+
+#include "NuPlayer2.h"
+
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MetaData.h>
+#include <mediaplayer2/mediaplayer2.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+struct ABuffer;
+struct AMediaCryptoWrapper;
+class MediaBuffer;
+
+struct NuPlayer2::Source : public AHandler {
+ enum Flags {
+ FLAG_CAN_PAUSE = 1,
+ FLAG_CAN_SEEK_BACKWARD = 2, // the "10 sec back button"
+ FLAG_CAN_SEEK_FORWARD = 4, // the "10 sec forward button"
+ FLAG_CAN_SEEK = 8, // the "seek bar"
+ FLAG_DYNAMIC_DURATION = 16,
+ FLAG_SECURE = 32, // Secure codec is required.
+ FLAG_PROTECTED = 64, // The screen needs to be protected (screenshot is disabled).
+ };
+
+ enum {
+ kWhatPrepared,
+ kWhatFlagsChanged,
+ kWhatVideoSizeChanged,
+ kWhatBufferingUpdate,
+ kWhatPauseOnBufferingStart,
+ kWhatResumeOnBufferingEnd,
+ kWhatCacheStats,
+ kWhatSubtitleData,
+ kWhatTimedTextData,
+ kWhatTimedMetaData,
+ kWhatQueueDecoderShutdown,
+ kWhatDrmNoLicense,
+ // Modular DRM
+ kWhatDrmInfo,
+ };
+
+ // The provides message is used to notify the player about various
+ // events.
+ explicit Source(const sp<AMessage> ¬ify)
+ : mNotify(notify) {
+ }
+
+ virtual status_t getBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) = 0;
+ virtual status_t setBufferingSettings(const BufferingSettings& buffering) = 0;
+
+ virtual void prepareAsync() = 0;
+
+ virtual void start() = 0;
+ virtual void stop() {}
+ virtual void pause() {}
+ virtual void resume() {}
+
+ // Explicitly disconnect the underling data source
+ virtual void disconnect() {}
+
+ // Returns OK iff more data was available,
+ // an error or ERROR_END_OF_STREAM if not.
+ virtual status_t feedMoreTSData() = 0;
+
+ // Returns non-NULL format when the specified track exists.
+ // When the format has "err" set to -EWOULDBLOCK, source needs more time to get valid meta data.
+ // Returns NULL if the specified track doesn't exist or is invalid;
+ virtual sp<AMessage> getFormat(bool audio);
+
+ virtual sp<MetaData> getFormatMeta(bool /* audio */) { return NULL; }
+ virtual sp<MetaData> getFileFormatMeta() const { return NULL; }
+
+ virtual status_t dequeueAccessUnit(
+ bool audio, sp<ABuffer> *accessUnit) = 0;
+
+ virtual status_t getDuration(int64_t * /* durationUs */) {
+ return INVALID_OPERATION;
+ }
+
+ virtual size_t getTrackCount() const {
+ return 0;
+ }
+
+ virtual sp<AMessage> getTrackInfo(size_t /* trackIndex */) const {
+ return NULL;
+ }
+
+ virtual ssize_t getSelectedTrack(media_track_type /* type */) const {
+ return INVALID_OPERATION;
+ }
+
+ virtual status_t selectTrack(size_t /* trackIndex */, bool /* select */, int64_t /* timeUs*/) {
+ return INVALID_OPERATION;
+ }
+
+ virtual status_t seekTo(
+ int64_t /* seekTimeUs */,
+ MediaPlayer2SeekMode /* mode */ = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC) {
+ return INVALID_OPERATION;
+ }
+
+ virtual bool isRealTime() const {
+ return false;
+ }
+
+ virtual bool isStreaming() const {
+ return true;
+ }
+
+ virtual void setOffloadAudio(bool /* offload */) {}
+
+ // Modular DRM
+ virtual status_t prepareDrm(
+ const uint8_t /* uuid */[16], const Vector<uint8_t> & /* drmSessionId */,
+ sp<AMediaCryptoWrapper> * /* crypto */) {
+ return INVALID_OPERATION;
+ }
+
+ virtual status_t releaseDrm() {
+ return INVALID_OPERATION;
+ }
+
+protected:
+ virtual ~Source() {}
+
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
+ sp<AMessage> dupNotify() const { return mNotify->dup(); }
+
+ void notifyFlagsChanged(uint32_t flags);
+ void notifyVideoSizeChanged(const sp<AMessage> &format = NULL);
+ void notifyPrepared(status_t err = OK);
+ // Modular DRM
+ void notifyDrmInfo(const sp<ABuffer> &buffer);
+
+private:
+ sp<AMessage> mNotify;
+
+ DISALLOW_EVIL_CONSTRUCTORS(Source);
+};
+
+} // namespace android
+
+#endif // NUPLAYER2_SOURCE_H_
+
diff --git a/media/libmediaplayer2/nuplayer2/RTSPSource2.cpp b/media/libmediaplayer2/nuplayer2/RTSPSource2.cpp
new file mode 100644
index 0000000..1dfe383
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/RTSPSource2.cpp
@@ -0,0 +1,902 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "RTSPSource2"
+#include <utils/Log.h>
+
+#include "RTSPSource2.h"
+
+#include "AnotherPacketSource.h"
+#include "MyHandler.h"
+#include "SDPLoader.h"
+
+#include <media/MediaHTTPService.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+
+namespace android {
+
+const int64_t kNearEOSTimeoutUs = 2000000ll; // 2 secs
+
+// Default Buffer Underflow/Prepare/StartServer/Overflow Marks
+static const int kUnderflowMarkMs = 1000; // 1 second
+static const int kPrepareMarkMs = 3000; // 3 seconds
+//static const int kStartServerMarkMs = 5000;
+static const int kOverflowMarkMs = 10000; // 10 seconds
+
+NuPlayer2::RTSPSource2::RTSPSource2(
+ const sp<AMessage> ¬ify,
+ const sp<MediaHTTPService> &httpService,
+ const char *url,
+ const KeyedVector<String8, String8> *headers,
+ uid_t uid,
+ bool isSDP)
+ : Source(notify),
+ mHTTPService(httpService),
+ mURL(url),
+ mUID(uid),
+ mFlags(0),
+ mIsSDP(isSDP),
+ mState(DISCONNECTED),
+ mFinalResult(OK),
+ mDisconnectReplyID(0),
+ mBuffering(false),
+ mInPreparationPhase(true),
+ mEOSPending(false),
+ mSeekGeneration(0),
+ mEOSTimeoutAudio(0),
+ mEOSTimeoutVideo(0) {
+ mBufferingSettings.mInitialMarkMs = kPrepareMarkMs;
+ mBufferingSettings.mResumePlaybackMarkMs = kOverflowMarkMs;
+ if (headers) {
+ mExtraHeaders = *headers;
+
+ ssize_t index =
+ mExtraHeaders.indexOfKey(String8("x-hide-urls-from-log"));
+
+ if (index >= 0) {
+ mFlags |= kFlagIncognito;
+
+ mExtraHeaders.removeItemsAt(index);
+ }
+ }
+}
+
+NuPlayer2::RTSPSource2::~RTSPSource2() {
+ if (mLooper != NULL) {
+ mLooper->unregisterHandler(id());
+ mLooper->stop();
+ }
+}
+
+status_t NuPlayer2::RTSPSource2::getBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) {
+ Mutex::Autolock _l(mBufferingSettingsLock);
+ *buffering = mBufferingSettings;
+ return OK;
+}
+
+status_t NuPlayer2::RTSPSource2::setBufferingSettings(const BufferingSettings& buffering) {
+ Mutex::Autolock _l(mBufferingSettingsLock);
+ mBufferingSettings = buffering;
+ return OK;
+}
+
+void NuPlayer2::RTSPSource2::prepareAsync() {
+ if (mIsSDP && mHTTPService == NULL) {
+ notifyPrepared(BAD_VALUE);
+ return;
+ }
+
+ if (mLooper == NULL) {
+ mLooper = new ALooper;
+ mLooper->setName("rtsp");
+ mLooper->start();
+
+ mLooper->registerHandler(this);
+ }
+
+ CHECK(mHandler == NULL);
+ CHECK(mSDPLoader == NULL);
+
+ sp<AMessage> notify = new AMessage(kWhatNotify, this);
+
+ CHECK_EQ(mState, (int)DISCONNECTED);
+ mState = CONNECTING;
+
+ if (mIsSDP) {
+ mSDPLoader = new SDPLoader(notify,
+ (mFlags & kFlagIncognito) ? SDPLoader::kFlagIncognito : 0,
+ mHTTPService);
+
+ mSDPLoader->load(
+ mURL.c_str(), mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders);
+ } else {
+ mHandler = new MyHandler(mURL.c_str(), notify, true /* uidValid */, mUID);
+ mLooper->registerHandler(mHandler);
+
+ mHandler->connect();
+ }
+
+ startBufferingIfNecessary();
+}
+
+void NuPlayer2::RTSPSource2::start() {
+}
+
+void NuPlayer2::RTSPSource2::stop() {
+ if (mLooper == NULL) {
+ return;
+ }
+ sp<AMessage> msg = new AMessage(kWhatDisconnect, this);
+
+ sp<AMessage> dummy;
+ msg->postAndAwaitResponse(&dummy);
+}
+
+status_t NuPlayer2::RTSPSource2::feedMoreTSData() {
+ Mutex::Autolock _l(mBufferingLock);
+ return mFinalResult;
+}
+
+sp<MetaData> NuPlayer2::RTSPSource2::getFormatMeta(bool audio) {
+ sp<AnotherPacketSource> source = getSource(audio);
+
+ if (source == NULL) {
+ return NULL;
+ }
+
+ return source->getFormat();
+}
+
+bool NuPlayer2::RTSPSource2::haveSufficientDataOnAllTracks() {
+ // We're going to buffer at least 2 secs worth data on all tracks before
+ // starting playback (both at startup and after a seek).
+
+ static const int64_t kMinDurationUs = 2000000ll;
+
+ int64_t mediaDurationUs = 0;
+ getDuration(&mediaDurationUs);
+ if ((mAudioTrack != NULL && mAudioTrack->isFinished(mediaDurationUs))
+ || (mVideoTrack != NULL && mVideoTrack->isFinished(mediaDurationUs))) {
+ return true;
+ }
+
+ status_t err;
+ int64_t durationUs;
+ if (mAudioTrack != NULL
+ && (durationUs = mAudioTrack->getBufferedDurationUs(&err))
+ < kMinDurationUs
+ && err == OK) {
+ ALOGV("audio track doesn't have enough data yet. (%.2f secs buffered)",
+ durationUs / 1E6);
+ return false;
+ }
+
+ if (mVideoTrack != NULL
+ && (durationUs = mVideoTrack->getBufferedDurationUs(&err))
+ < kMinDurationUs
+ && err == OK) {
+ ALOGV("video track doesn't have enough data yet. (%.2f secs buffered)",
+ durationUs / 1E6);
+ return false;
+ }
+
+ return true;
+}
+
+status_t NuPlayer2::RTSPSource2::dequeueAccessUnit(
+ bool audio, sp<ABuffer> *accessUnit) {
+ if (!stopBufferingIfNecessary()) {
+ return -EWOULDBLOCK;
+ }
+
+ sp<AnotherPacketSource> source = getSource(audio);
+
+ if (source == NULL) {
+ return -EWOULDBLOCK;
+ }
+
+ status_t finalResult;
+ if (!source->hasBufferAvailable(&finalResult)) {
+ if (finalResult == OK) {
+
+ // If other source already signaled EOS, this source should also return EOS
+ if (sourceReachedEOS(!audio)) {
+ return ERROR_END_OF_STREAM;
+ }
+
+ // If this source has detected near end, give it some time to retrieve more
+ // data before returning EOS
+ int64_t mediaDurationUs = 0;
+ getDuration(&mediaDurationUs);
+ if (source->isFinished(mediaDurationUs)) {
+ int64_t eosTimeout = audio ? mEOSTimeoutAudio : mEOSTimeoutVideo;
+ if (eosTimeout == 0) {
+ setEOSTimeout(audio, ALooper::GetNowUs());
+ } else if ((ALooper::GetNowUs() - eosTimeout) > kNearEOSTimeoutUs) {
+ setEOSTimeout(audio, 0);
+ return ERROR_END_OF_STREAM;
+ }
+ return -EWOULDBLOCK;
+ }
+
+ if (!sourceNearEOS(!audio)) {
+ // We should not enter buffering mode
+ // if any of the sources already have detected EOS.
+ startBufferingIfNecessary();
+ }
+
+ return -EWOULDBLOCK;
+ }
+ return finalResult;
+ }
+
+ setEOSTimeout(audio, 0);
+
+ return source->dequeueAccessUnit(accessUnit);
+}
+
+sp<AnotherPacketSource> NuPlayer2::RTSPSource2::getSource(bool audio) {
+ if (mTSParser != NULL) {
+ sp<MediaSource> source = mTSParser->getSource(
+ audio ? ATSParser::AUDIO : ATSParser::VIDEO);
+
+ return static_cast<AnotherPacketSource *>(source.get());
+ }
+
+ return audio ? mAudioTrack : mVideoTrack;
+}
+
+void NuPlayer2::RTSPSource2::setEOSTimeout(bool audio, int64_t timeout) {
+ if (audio) {
+ mEOSTimeoutAudio = timeout;
+ } else {
+ mEOSTimeoutVideo = timeout;
+ }
+}
+
+status_t NuPlayer2::RTSPSource2::getDuration(int64_t *durationUs) {
+ *durationUs = -1ll;
+
+ int64_t audioDurationUs;
+ if (mAudioTrack != NULL
+ && mAudioTrack->getFormat()->findInt64(
+ kKeyDuration, &audioDurationUs)
+ && audioDurationUs > *durationUs) {
+ *durationUs = audioDurationUs;
+ }
+
+ int64_t videoDurationUs;
+ if (mVideoTrack != NULL
+ && mVideoTrack->getFormat()->findInt64(
+ kKeyDuration, &videoDurationUs)
+ && videoDurationUs > *durationUs) {
+ *durationUs = videoDurationUs;
+ }
+
+ return OK;
+}
+
+status_t NuPlayer2::RTSPSource2::seekTo(int64_t seekTimeUs, MediaPlayer2SeekMode mode) {
+ sp<AMessage> msg = new AMessage(kWhatPerformSeek, this);
+ msg->setInt32("generation", ++mSeekGeneration);
+ msg->setInt64("timeUs", seekTimeUs);
+ msg->setInt32("mode", mode);
+
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ }
+
+ return err;
+}
+
+void NuPlayer2::RTSPSource2::performSeek(int64_t seekTimeUs) {
+ if (mState != CONNECTED) {
+ finishSeek(INVALID_OPERATION);
+ return;
+ }
+
+ mState = SEEKING;
+ mHandler->seek(seekTimeUs);
+ mEOSPending = false;
+}
+
+void NuPlayer2::RTSPSource2::schedulePollBuffering() {
+ sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
+ msg->post(1000000ll); // 1 second intervals
+}
+
+void NuPlayer2::RTSPSource2::checkBuffering(
+ bool *prepared, bool *underflow, bool *overflow, bool *startServer, bool *finished) {
+ size_t numTracks = mTracks.size();
+ size_t preparedCount, underflowCount, overflowCount, startCount, finishedCount;
+ preparedCount = underflowCount = overflowCount = startCount = finishedCount = 0;
+
+ size_t count = numTracks;
+ for (size_t i = 0; i < count; ++i) {
+ status_t finalResult;
+ TrackInfo *info = &mTracks.editItemAt(i);
+ sp<AnotherPacketSource> src = info->mSource;
+ if (src == NULL) {
+ --numTracks;
+ continue;
+ }
+ int64_t bufferedDurationUs = src->getBufferedDurationUs(&finalResult);
+
+ int64_t initialMarkUs;
+ int64_t maxRebufferingMarkUs;
+ {
+ Mutex::Autolock _l(mBufferingSettingsLock);
+ initialMarkUs = mBufferingSettings.mInitialMarkMs * 1000ll;
+ // TODO: maxRebufferingMarkUs could be larger than
+ // mBufferingSettings.mResumePlaybackMarkMs * 1000ll.
+ maxRebufferingMarkUs = mBufferingSettings.mResumePlaybackMarkMs * 1000ll;
+ }
+ // isFinished when duration is 0 checks for EOS result only
+ if (bufferedDurationUs > initialMarkUs
+ || src->isFinished(/* duration */ 0)) {
+ ++preparedCount;
+ }
+
+ if (src->isFinished(/* duration */ 0)) {
+ ++overflowCount;
+ ++finishedCount;
+ } else {
+ // TODO: redefine kUnderflowMarkMs to a fair value,
+ if (bufferedDurationUs < kUnderflowMarkMs * 1000) {
+ ++underflowCount;
+ }
+ if (bufferedDurationUs > maxRebufferingMarkUs) {
+ ++overflowCount;
+ }
+ int64_t startServerMarkUs =
+ (kUnderflowMarkMs * 1000ll + maxRebufferingMarkUs) / 2;
+ if (bufferedDurationUs < startServerMarkUs) {
+ ++startCount;
+ }
+ }
+ }
+
+ *prepared = (preparedCount == numTracks);
+ *underflow = (underflowCount > 0);
+ *overflow = (overflowCount == numTracks);
+ *startServer = (startCount > 0);
+ *finished = (finishedCount > 0);
+}
+
+void NuPlayer2::RTSPSource2::onPollBuffering() {
+ bool prepared, underflow, overflow, startServer, finished;
+ checkBuffering(&prepared, &underflow, &overflow, &startServer, &finished);
+
+ if (prepared && mInPreparationPhase) {
+ mInPreparationPhase = false;
+ notifyPrepared();
+ }
+
+ if (!mInPreparationPhase && underflow) {
+ startBufferingIfNecessary();
+ }
+
+ if (haveSufficientDataOnAllTracks()) {
+ stopBufferingIfNecessary();
+ }
+
+ if (overflow && mHandler != NULL) {
+ mHandler->pause();
+ }
+
+ if (startServer && mHandler != NULL) {
+ mHandler->resume();
+ }
+
+ if (finished && mHandler != NULL) {
+ mHandler->cancelAccessUnitTimeoutCheck();
+ }
+
+ schedulePollBuffering();
+}
+
+void NuPlayer2::RTSPSource2::signalSourceEOS(status_t result) {
+ const bool audio = true;
+ const bool video = false;
+
+ sp<AnotherPacketSource> source = getSource(audio);
+ if (source != NULL) {
+ source->signalEOS(result);
+ }
+
+ source = getSource(video);
+ if (source != NULL) {
+ source->signalEOS(result);
+ }
+}
+
+bool NuPlayer2::RTSPSource2::sourceReachedEOS(bool audio) {
+ sp<AnotherPacketSource> source = getSource(audio);
+ status_t finalResult;
+ return (source != NULL &&
+ !source->hasBufferAvailable(&finalResult) &&
+ finalResult == ERROR_END_OF_STREAM);
+}
+
+bool NuPlayer2::RTSPSource2::sourceNearEOS(bool audio) {
+ sp<AnotherPacketSource> source = getSource(audio);
+ int64_t mediaDurationUs = 0;
+ getDuration(&mediaDurationUs);
+ return (source != NULL && source->isFinished(mediaDurationUs));
+}
+
+void NuPlayer2::RTSPSource2::onSignalEOS(const sp<AMessage> &msg) {
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+
+ if (generation != mSeekGeneration) {
+ return;
+ }
+
+ if (mEOSPending) {
+ signalSourceEOS(ERROR_END_OF_STREAM);
+ mEOSPending = false;
+ }
+}
+
+void NuPlayer2::RTSPSource2::postSourceEOSIfNecessary() {
+ const bool audio = true;
+ const bool video = false;
+ // If a source has detected near end, give it some time to retrieve more
+ // data before signaling EOS
+ if (sourceNearEOS(audio) || sourceNearEOS(video)) {
+ if (!mEOSPending) {
+ sp<AMessage> msg = new AMessage(kWhatSignalEOS, this);
+ msg->setInt32("generation", mSeekGeneration);
+ msg->post(kNearEOSTimeoutUs);
+ mEOSPending = true;
+ }
+ }
+}
+
+void NuPlayer2::RTSPSource2::onMessageReceived(const sp<AMessage> &msg) {
+ if (msg->what() == kWhatDisconnect) {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ mDisconnectReplyID = replyID;
+ finishDisconnectIfPossible();
+ return;
+ } else if (msg->what() == kWhatPerformSeek) {
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+ CHECK(msg->senderAwaitsResponse(&mSeekReplyID));
+
+ if (generation != mSeekGeneration) {
+ // obsolete.
+ finishSeek(OK);
+ return;
+ }
+
+ int64_t seekTimeUs;
+ int32_t mode;
+ CHECK(msg->findInt64("timeUs", &seekTimeUs));
+ CHECK(msg->findInt32("mode", &mode));
+
+ // TODO: add "mode" to performSeek.
+ performSeek(seekTimeUs/*, (MediaPlayer2SeekMode)mode */);
+ return;
+ } else if (msg->what() == kWhatPollBuffering) {
+ onPollBuffering();
+ return;
+ } else if (msg->what() == kWhatSignalEOS) {
+ onSignalEOS(msg);
+ return;
+ }
+
+ CHECK_EQ(msg->what(), kWhatNotify);
+
+ int32_t what;
+ CHECK(msg->findInt32("what", &what));
+
+ switch (what) {
+ case MyHandler::kWhatConnected:
+ {
+ onConnected();
+
+ notifyVideoSizeChanged();
+
+ uint32_t flags = 0;
+
+ if (mHandler->isSeekable()) {
+ flags = FLAG_CAN_PAUSE
+ | FLAG_CAN_SEEK
+ | FLAG_CAN_SEEK_BACKWARD
+ | FLAG_CAN_SEEK_FORWARD;
+ }
+
+ notifyFlagsChanged(flags);
+ schedulePollBuffering();
+ break;
+ }
+
+ case MyHandler::kWhatDisconnected:
+ {
+ onDisconnected(msg);
+ break;
+ }
+
+ case MyHandler::kWhatSeekDone:
+ {
+ mState = CONNECTED;
+ // Unblock seekTo here in case we attempted to seek in a live stream
+ finishSeek(OK);
+ break;
+ }
+
+ case MyHandler::kWhatSeekPaused:
+ {
+ sp<AnotherPacketSource> source = getSource(true /* audio */);
+ if (source != NULL) {
+ source->queueDiscontinuity(ATSParser::DISCONTINUITY_NONE,
+ /* extra */ NULL,
+ /* discard */ true);
+ }
+ source = getSource(false /* video */);
+ if (source != NULL) {
+ source->queueDiscontinuity(ATSParser::DISCONTINUITY_NONE,
+ /* extra */ NULL,
+ /* discard */ true);
+ };
+
+ status_t err = OK;
+ msg->findInt32("err", &err);
+
+ if (err == OK) {
+ int64_t timeUs;
+ CHECK(msg->findInt64("time", &timeUs));
+ mHandler->continueSeekAfterPause(timeUs);
+ } else {
+ finishSeek(err);
+ }
+ break;
+ }
+
+ case MyHandler::kWhatAccessUnit:
+ {
+ size_t trackIndex;
+ CHECK(msg->findSize("trackIndex", &trackIndex));
+
+ if (mTSParser == NULL) {
+ CHECK_LT(trackIndex, mTracks.size());
+ } else {
+ CHECK_EQ(trackIndex, 0u);
+ }
+
+ sp<ABuffer> accessUnit;
+ CHECK(msg->findBuffer("accessUnit", &accessUnit));
+
+ int32_t damaged;
+ if (accessUnit->meta()->findInt32("damaged", &damaged)
+ && damaged) {
+ ALOGI("dropping damaged access unit.");
+ break;
+ }
+
+ if (mTSParser != NULL) {
+ size_t offset = 0;
+ status_t err = OK;
+ while (offset + 188 <= accessUnit->size()) {
+ err = mTSParser->feedTSPacket(
+ accessUnit->data() + offset, 188);
+ if (err != OK) {
+ break;
+ }
+
+ offset += 188;
+ }
+
+ if (offset < accessUnit->size()) {
+ err = ERROR_MALFORMED;
+ }
+
+ if (err != OK) {
+ signalSourceEOS(err);
+ }
+
+ postSourceEOSIfNecessary();
+ break;
+ }
+
+ TrackInfo *info = &mTracks.editItemAt(trackIndex);
+
+ sp<AnotherPacketSource> source = info->mSource;
+ if (source != NULL) {
+ uint32_t rtpTime;
+ CHECK(accessUnit->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+
+ if (!info->mNPTMappingValid) {
+ // This is a live stream, we didn't receive any normal
+ // playtime mapping. We won't map to npt time.
+ source->queueAccessUnit(accessUnit);
+ break;
+ }
+
+ int64_t nptUs =
+ ((double)rtpTime - (double)info->mRTPTime)
+ / info->mTimeScale
+ * 1000000ll
+ + info->mNormalPlaytimeUs;
+
+ accessUnit->meta()->setInt64("timeUs", nptUs);
+
+ source->queueAccessUnit(accessUnit);
+ }
+ postSourceEOSIfNecessary();
+ break;
+ }
+
+ case MyHandler::kWhatEOS:
+ {
+ int32_t finalResult;
+ CHECK(msg->findInt32("finalResult", &finalResult));
+ CHECK_NE(finalResult, (status_t)OK);
+
+ if (mTSParser != NULL) {
+ signalSourceEOS(finalResult);
+ }
+
+ size_t trackIndex;
+ CHECK(msg->findSize("trackIndex", &trackIndex));
+ CHECK_LT(trackIndex, mTracks.size());
+
+ TrackInfo *info = &mTracks.editItemAt(trackIndex);
+ sp<AnotherPacketSource> source = info->mSource;
+ if (source != NULL) {
+ source->signalEOS(finalResult);
+ }
+
+ break;
+ }
+
+ case MyHandler::kWhatSeekDiscontinuity:
+ {
+ size_t trackIndex;
+ CHECK(msg->findSize("trackIndex", &trackIndex));
+ CHECK_LT(trackIndex, mTracks.size());
+
+ TrackInfo *info = &mTracks.editItemAt(trackIndex);
+ sp<AnotherPacketSource> source = info->mSource;
+ if (source != NULL) {
+ source->queueDiscontinuity(
+ ATSParser::DISCONTINUITY_TIME,
+ NULL,
+ true /* discard */);
+ }
+
+ break;
+ }
+
+ case MyHandler::kWhatNormalPlayTimeMapping:
+ {
+ size_t trackIndex;
+ CHECK(msg->findSize("trackIndex", &trackIndex));
+ CHECK_LT(trackIndex, mTracks.size());
+
+ uint32_t rtpTime;
+ CHECK(msg->findInt32("rtpTime", (int32_t *)&rtpTime));
+
+ int64_t nptUs;
+ CHECK(msg->findInt64("nptUs", &nptUs));
+
+ TrackInfo *info = &mTracks.editItemAt(trackIndex);
+ info->mRTPTime = rtpTime;
+ info->mNormalPlaytimeUs = nptUs;
+ info->mNPTMappingValid = true;
+ break;
+ }
+
+ case SDPLoader::kWhatSDPLoaded:
+ {
+ onSDPLoaded(msg);
+ break;
+ }
+
+ default:
+ TRESPASS();
+ }
+}
+
+void NuPlayer2::RTSPSource2::onConnected() {
+ CHECK(mAudioTrack == NULL);
+ CHECK(mVideoTrack == NULL);
+
+ size_t numTracks = mHandler->countTracks();
+ for (size_t i = 0; i < numTracks; ++i) {
+ int32_t timeScale;
+ sp<MetaData> format = mHandler->getTrackFormat(i, &timeScale);
+
+ const char *mime;
+ CHECK(format->findCString(kKeyMIMEType, &mime));
+
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) {
+ // Very special case for MPEG2 Transport Streams.
+ CHECK_EQ(numTracks, 1u);
+
+ mTSParser = new ATSParser;
+ return;
+ }
+
+ bool isAudio = !strncasecmp(mime, "audio/", 6);
+ bool isVideo = !strncasecmp(mime, "video/", 6);
+
+ TrackInfo info;
+ info.mTimeScale = timeScale;
+ info.mRTPTime = 0;
+ info.mNormalPlaytimeUs = 0ll;
+ info.mNPTMappingValid = false;
+
+ if ((isAudio && mAudioTrack == NULL)
+ || (isVideo && mVideoTrack == NULL)) {
+ sp<AnotherPacketSource> source = new AnotherPacketSource(format);
+
+ if (isAudio) {
+ mAudioTrack = source;
+ } else {
+ mVideoTrack = source;
+ }
+
+ info.mSource = source;
+ }
+
+ mTracks.push(info);
+ }
+
+ mState = CONNECTED;
+}
+
+void NuPlayer2::RTSPSource2::onSDPLoaded(const sp<AMessage> &msg) {
+ status_t err;
+ CHECK(msg->findInt32("result", &err));
+
+ mSDPLoader.clear();
+
+ if (mDisconnectReplyID != 0) {
+ err = UNKNOWN_ERROR;
+ }
+
+ if (err == OK) {
+ sp<ASessionDescription> desc;
+ sp<RefBase> obj;
+ CHECK(msg->findObject("description", &obj));
+ desc = static_cast<ASessionDescription *>(obj.get());
+
+ AString rtspUri;
+ if (!desc->findAttribute(0, "a=control", &rtspUri)) {
+ ALOGE("Unable to find url in SDP");
+ err = UNKNOWN_ERROR;
+ } else {
+ sp<AMessage> notify = new AMessage(kWhatNotify, this);
+
+ mHandler = new MyHandler(rtspUri.c_str(), notify, true /* uidValid */, mUID);
+ mLooper->registerHandler(mHandler);
+
+ mHandler->loadSDP(desc);
+ }
+ }
+
+ if (err != OK) {
+ if (mState == CONNECTING) {
+ // We're still in the preparation phase, signal that it
+ // failed.
+ notifyPrepared(err);
+ }
+
+ mState = DISCONNECTED;
+ setError(err);
+
+ if (mDisconnectReplyID != 0) {
+ finishDisconnectIfPossible();
+ }
+ }
+}
+
+void NuPlayer2::RTSPSource2::onDisconnected(const sp<AMessage> &msg) {
+ if (mState == DISCONNECTED) {
+ return;
+ }
+
+ status_t err;
+ CHECK(msg->findInt32("result", &err));
+ CHECK_NE(err, (status_t)OK);
+
+ mLooper->unregisterHandler(mHandler->id());
+ mHandler.clear();
+
+ if (mState == CONNECTING) {
+ // We're still in the preparation phase, signal that it
+ // failed.
+ notifyPrepared(err);
+ }
+
+ mState = DISCONNECTED;
+ setError(err);
+
+ if (mDisconnectReplyID != 0) {
+ finishDisconnectIfPossible();
+ }
+}
+
+void NuPlayer2::RTSPSource2::finishDisconnectIfPossible() {
+ if (mState != DISCONNECTED) {
+ if (mHandler != NULL) {
+ mHandler->disconnect();
+ } else if (mSDPLoader != NULL) {
+ mSDPLoader->cancel();
+ }
+ return;
+ }
+
+ (new AMessage)->postReply(mDisconnectReplyID);
+ mDisconnectReplyID = 0;
+}
+
+void NuPlayer2::RTSPSource2::setError(status_t err) {
+ Mutex::Autolock _l(mBufferingLock);
+ mFinalResult = err;
+}
+
+void NuPlayer2::RTSPSource2::startBufferingIfNecessary() {
+ Mutex::Autolock _l(mBufferingLock);
+
+ if (!mBuffering) {
+ mBuffering = true;
+
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatPauseOnBufferingStart);
+ notify->post();
+ }
+}
+
+bool NuPlayer2::RTSPSource2::stopBufferingIfNecessary() {
+ Mutex::Autolock _l(mBufferingLock);
+
+ if (mBuffering) {
+ if (!haveSufficientDataOnAllTracks()) {
+ return false;
+ }
+
+ mBuffering = false;
+
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatResumeOnBufferingEnd);
+ notify->post();
+ }
+
+ return true;
+}
+
+void NuPlayer2::RTSPSource2::finishSeek(status_t err) {
+ if (mSeekReplyID == NULL) {
+ return;
+ }
+ sp<AMessage> seekReply = new AMessage;
+ seekReply->setInt32("err", err);
+ seekReply->postReply(mSeekReplyID);
+ mSeekReplyID = NULL;
+}
+
+} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/RTSPSource2.h b/media/libmediaplayer2/nuplayer2/RTSPSource2.h
new file mode 100644
index 0000000..712c3e5
--- /dev/null
+++ b/media/libmediaplayer2/nuplayer2/RTSPSource2.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RTSP_SOURCE2_H_
+
+#define RTSP_SOURCE2_H_
+
+#include "NuPlayer2Source.h"
+
+#include "ATSParser.h"
+
+namespace android {
+
+struct ALooper;
+struct AReplyToken;
+struct AnotherPacketSource;
+struct MyHandler;
+struct SDPLoader;
+
+struct NuPlayer2::RTSPSource2 : public NuPlayer2::Source {
+ RTSPSource2(
+ const sp<AMessage> ¬ify,
+ const sp<MediaHTTPService> &httpService,
+ const char *url,
+ const KeyedVector<String8, String8> *headers,
+ uid_t uid = 0,
+ bool isSDP = false);
+
+ virtual status_t getBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) override;
+ virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
+ virtual void prepareAsync();
+ virtual void start();
+ virtual void stop();
+
+ virtual status_t feedMoreTSData();
+
+ virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
+
+ virtual status_t getDuration(int64_t *durationUs);
+ virtual status_t seekTo(
+ int64_t seekTimeUs,
+ MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC) override;
+
+ void onMessageReceived(const sp<AMessage> &msg);
+
+protected:
+ virtual ~RTSPSource2();
+
+ virtual sp<MetaData> getFormatMeta(bool audio);
+
+private:
+ enum {
+ kWhatNotify = 'noti',
+ kWhatDisconnect = 'disc',
+ kWhatPerformSeek = 'seek',
+ kWhatPollBuffering = 'poll',
+ kWhatSignalEOS = 'eos ',
+ };
+
+ enum State {
+ DISCONNECTED,
+ CONNECTING,
+ CONNECTED,
+ SEEKING,
+ };
+
+ enum Flags {
+ // Don't log any URLs.
+ kFlagIncognito = 1,
+ };
+
+ struct TrackInfo {
+ sp<AnotherPacketSource> mSource;
+
+ int32_t mTimeScale;
+ uint32_t mRTPTime;
+ int64_t mNormalPlaytimeUs;
+ bool mNPTMappingValid;
+ };
+
+ sp<MediaHTTPService> mHTTPService;
+ AString mURL;
+ KeyedVector<String8, String8> mExtraHeaders;
+ uid_t mUID;
+ uint32_t mFlags;
+ bool mIsSDP;
+ State mState;
+ status_t mFinalResult;
+ sp<AReplyToken> mDisconnectReplyID;
+ Mutex mBufferingLock;
+ bool mBuffering;
+ bool mInPreparationPhase;
+ bool mEOSPending;
+
+ Mutex mBufferingSettingsLock;
+ BufferingSettings mBufferingSettings;
+
+ sp<ALooper> mLooper;
+ sp<MyHandler> mHandler;
+ sp<SDPLoader> mSDPLoader;
+
+ Vector<TrackInfo> mTracks;
+ sp<AnotherPacketSource> mAudioTrack;
+ sp<AnotherPacketSource> mVideoTrack;
+
+ sp<ATSParser> mTSParser;
+
+ int32_t mSeekGeneration;
+
+ int64_t mEOSTimeoutAudio;
+ int64_t mEOSTimeoutVideo;
+
+ sp<AReplyToken> mSeekReplyID;
+
+ sp<AnotherPacketSource> getSource(bool audio);
+
+ void onConnected();
+ void onSDPLoaded(const sp<AMessage> &msg);
+ void onDisconnected(const sp<AMessage> &msg);
+ void finishDisconnectIfPossible();
+
+ void performSeek(int64_t seekTimeUs);
+ void schedulePollBuffering();
+ void checkBuffering(
+ bool *prepared,
+ bool *underflow,
+ bool *overflow,
+ bool *startServer,
+ bool *finished);
+ void onPollBuffering();
+
+ bool haveSufficientDataOnAllTracks();
+
+ void setEOSTimeout(bool audio, int64_t timeout);
+ void setError(status_t err);
+ void startBufferingIfNecessary();
+ bool stopBufferingIfNecessary();
+ void finishSeek(status_t err);
+
+ void postSourceEOSIfNecessary();
+ void signalSourceEOS(status_t result);
+ void onSignalEOS(const sp<AMessage> &msg);
+
+ bool sourceNearEOS(bool audio);
+ bool sourceReachedEOS(bool audio);
+
+ DISALLOW_EVIL_CONSTRUCTORS(RTSPSource2);
+};
+
+} // namespace android
+
+#endif // RTSP_SOURCE2_H_
diff --git a/media/libmediaplayerservice/Android.bp b/media/libmediaplayerservice/Android.bp
new file mode 100644
index 0000000..a37973b
--- /dev/null
+++ b/media/libmediaplayerservice/Android.bp
@@ -0,0 +1,78 @@
+cc_library_shared {
+
+ srcs: [
+ "ActivityManager.cpp",
+ "MediaPlayerFactory.cpp",
+ "MediaPlayerService.cpp",
+ "MediaRecorderClient.cpp",
+ "MetadataRetrieverClient.cpp",
+ "StagefrightRecorder.cpp",
+ "TestPlayerStub.cpp",
+ ],
+
+ shared_libs: [
+ "android.hardware.media.omx@1.0",
+ "libaudioclient",
+ "libbinder",
+ "libcamera_client",
+ "libcrypto",
+ "libcutils",
+ "libdl",
+ "libgui",
+ "libhidlbase",
+ "libhidlmemory",
+ "liblog",
+ "libmedia",
+ "libmedia_omx",
+ "libmediaextractor",
+ "libmediadrm",
+ "libmediametrics",
+ "libmediautils",
+ "libmemunreachable",
+ "libpowermanager",
+ "libstagefright",
+ "libstagefright_foundation",
+ "libstagefright_httplive",
+ "libutils",
+ ],
+
+ header_libs: [
+ "media_plugin_headers",
+ ],
+
+ static_libs: [
+ "libstagefright_nuplayer",
+ "libstagefright_rtsp",
+ "libstagefright_timedtext",
+ ],
+
+ export_shared_lib_headers: ["libmedia"],
+
+ include_dirs: [
+ "frameworks/av/media/libstagefright/rtsp",
+ "frameworks/av/media/libstagefright/webm",
+ ],
+
+ local_include_dirs: ["include"],
+
+ cflags: [
+ "-Werror",
+ "-Wno-error=deprecated-declarations",
+ "-Wall",
+ ],
+
+ name: "libmediaplayerservice",
+
+ compile_multilib: "32",
+
+ sanitize: {
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
+ },
+
+}
+
+subdirs = ["*"]
+
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
deleted file mode 100644
index 1fc74a9..0000000
--- a/media/libmediaplayerservice/Android.mk
+++ /dev/null
@@ -1,73 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-#
-# libmediaplayerservice
-#
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- ActivityManager.cpp \
- HDCP.cpp \
- MediaPlayerFactory.cpp \
- MediaPlayerService.cpp \
- MediaRecorderClient.cpp \
- MetadataRetrieverClient.cpp \
- RemoteDisplay.cpp \
- StagefrightRecorder.cpp \
- TestPlayerStub.cpp \
-
-LOCAL_SHARED_LIBRARIES := \
- libbinder \
- libcrypto \
- libcutils \
- libdrmframework \
- liblog \
- libdl \
- libgui \
- libaudioclient \
- libmedia \
- libmediametrics \
- libmediadrm \
- libmediautils \
- libmemunreachable \
- libstagefright \
- libstagefright_foundation \
- libstagefright_httplive \
- libstagefright_omx \
- libstagefright_wfd \
- libutils \
- libnativewindow \
- libhidlbase \
- android.hardware.media.omx@1.0 \
-
-LOCAL_STATIC_LIBRARIES := \
- libstagefright_nuplayer \
- libstagefright_rtsp \
- libstagefright_timedtext \
-
-LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libmedia
-
-LOCAL_C_INCLUDES := \
- frameworks/av/media/libstagefright/include \
- frameworks/av/media/libstagefright/rtsp \
- frameworks/av/media/libstagefright/wifi-display \
- frameworks/av/media/libstagefright/webm \
- $(LOCAL_PATH)/include/media \
- frameworks/av/include/camera \
- frameworks/native/include/media/openmax \
- frameworks/native/include/media/hardware \
- external/tremolo/Tremolo \
-
-LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
-
-LOCAL_MODULE:= libmediaplayerservice
-
-LOCAL_32_BIT_ONLY := true
-
-LOCAL_SANITIZE := cfi
-LOCAL_SANITIZE_DIAG := cfi
-
-include $(BUILD_SHARED_LIBRARY)
-
-include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/media/libmediaplayerservice/HDCP.cpp b/media/libmediaplayerservice/HDCP.cpp
deleted file mode 100644
index afe3936..0000000
--- a/media/libmediaplayerservice/HDCP.cpp
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "HDCP"
-#include <utils/Log.h>
-
-#include "HDCP.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-
-#include <dlfcn.h>
-
-namespace android {
-
-HDCP::HDCP(bool createEncryptionModule)
- : mIsEncryptionModule(createEncryptionModule),
- mLibHandle(NULL),
- mHDCPModule(NULL) {
- mLibHandle = dlopen("libstagefright_hdcp.so", RTLD_NOW);
-
- if (mLibHandle == NULL) {
- ALOGE("Unable to locate libstagefright_hdcp.so");
- return;
- }
-
- typedef HDCPModule *(*CreateHDCPModuleFunc)(
- void *, HDCPModule::ObserverFunc);
-
- CreateHDCPModuleFunc createHDCPModule =
- mIsEncryptionModule
- ? (CreateHDCPModuleFunc)dlsym(mLibHandle, "createHDCPModule")
- : (CreateHDCPModuleFunc)dlsym(
- mLibHandle, "createHDCPModuleForDecryption");
-
- if (createHDCPModule == NULL) {
- ALOGE("Unable to find symbol 'createHDCPModule'.");
- } else if ((mHDCPModule = createHDCPModule(
- this, &HDCP::ObserveWrapper)) == NULL) {
- ALOGE("createHDCPModule failed.");
- }
-}
-
-HDCP::~HDCP() {
- Mutex::Autolock autoLock(mLock);
-
- if (mHDCPModule != NULL) {
- delete mHDCPModule;
- mHDCPModule = NULL;
- }
-
- if (mLibHandle != NULL) {
- dlclose(mLibHandle);
- mLibHandle = NULL;
- }
-}
-
-status_t HDCP::setObserver(const sp<IHDCPObserver> &observer) {
- Mutex::Autolock autoLock(mLock);
-
- if (mHDCPModule == NULL) {
- return NO_INIT;
- }
-
- mObserver = observer;
-
- return OK;
-}
-
-status_t HDCP::initAsync(const char *host, unsigned port) {
- Mutex::Autolock autoLock(mLock);
-
- if (mHDCPModule == NULL) {
- return NO_INIT;
- }
-
- return mHDCPModule->initAsync(host, port);
-}
-
-status_t HDCP::shutdownAsync() {
- Mutex::Autolock autoLock(mLock);
-
- if (mHDCPModule == NULL) {
- return NO_INIT;
- }
-
- return mHDCPModule->shutdownAsync();
-}
-
-uint32_t HDCP::getCaps() {
- Mutex::Autolock autoLock(mLock);
-
- if (mHDCPModule == NULL) {
- return NO_INIT;
- }
-
- return mHDCPModule->getCaps();
-}
-
-status_t HDCP::encrypt(
- const void *inData, size_t size, uint32_t streamCTR,
- uint64_t *outInputCTR, void *outData) {
- Mutex::Autolock autoLock(mLock);
-
- CHECK(mIsEncryptionModule);
-
- if (mHDCPModule == NULL) {
- *outInputCTR = 0;
-
- return NO_INIT;
- }
-
- return mHDCPModule->encrypt(inData, size, streamCTR, outInputCTR, outData);
-}
-
-status_t HDCP::encryptNative(
- const sp<GraphicBuffer> &graphicBuffer,
- size_t offset, size_t size, uint32_t streamCTR,
- uint64_t *outInputCTR, void *outData) {
- Mutex::Autolock autoLock(mLock);
-
- CHECK(mIsEncryptionModule);
-
- if (mHDCPModule == NULL) {
- *outInputCTR = 0;
-
- return NO_INIT;
- }
-
- return mHDCPModule->encryptNative(graphicBuffer->handle,
- offset, size, streamCTR, outInputCTR, outData);
-}
-
-status_t HDCP::decrypt(
- const void *inData, size_t size,
- uint32_t streamCTR, uint64_t outInputCTR, void *outData) {
- Mutex::Autolock autoLock(mLock);
-
- CHECK(!mIsEncryptionModule);
-
- if (mHDCPModule == NULL) {
- return NO_INIT;
- }
-
- return mHDCPModule->decrypt(inData, size, streamCTR, outInputCTR, outData);
-}
-
-// static
-void HDCP::ObserveWrapper(void *me, int msg, int ext1, int ext2) {
- static_cast<HDCP *>(me)->observe(msg, ext1, ext2);
-}
-
-void HDCP::observe(int msg, int ext1, int ext2) {
- Mutex::Autolock autoLock(mLock);
-
- if (mObserver != NULL) {
- mObserver->notify(msg, ext1, ext2, NULL /* obj */);
- }
-}
-
-} // namespace android
-
diff --git a/media/libmediaplayerservice/HDCP.h b/media/libmediaplayerservice/HDCP.h
deleted file mode 100644
index 83c61b5..0000000
--- a/media/libmediaplayerservice/HDCP.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef HDCP_H_
-
-#define HDCP_H_
-
-#include <media/IHDCP.h>
-#include <utils/Mutex.h>
-
-namespace android {
-
-struct HDCP : public BnHDCP {
- explicit HDCP(bool createEncryptionModule);
- virtual ~HDCP();
-
- virtual status_t setObserver(const sp<IHDCPObserver> &observer);
- virtual status_t initAsync(const char *host, unsigned port);
- virtual status_t shutdownAsync();
- virtual uint32_t getCaps();
-
- virtual status_t encrypt(
- const void *inData, size_t size, uint32_t streamCTR,
- uint64_t *outInputCTR, void *outData);
-
- virtual status_t encryptNative(
- const sp<GraphicBuffer> &graphicBuffer,
- size_t offset, size_t size, uint32_t streamCTR,
- uint64_t *outInputCTR, void *outData);
-
- virtual status_t decrypt(
- const void *inData, size_t size,
- uint32_t streamCTR, uint64_t outInputCTR, void *outData);
-
-private:
- Mutex mLock;
-
- bool mIsEncryptionModule;
-
- void *mLibHandle;
- HDCPModule *mHDCPModule;
- sp<IHDCPObserver> mObserver;
-
- static void ObserveWrapper(void *me, int msg, int ext1, int ext2);
- void observe(int msg, int ext1, int ext2);
-
- DISALLOW_EVIL_CONSTRUCTORS(HDCP);
-};
-
-} // namespace android
-
-#endif // HDCP_H_
-
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index 6da1ec1..1376ccc 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -20,8 +20,8 @@
#include <utils/Log.h>
#include <cutils/properties.h>
+#include <media/DataSource.h>
#include <media/IMediaPlayer.h>
-#include <media/stagefright/DataSource.h>
#include <media/stagefright/FileSource.h>
#include <media/stagefright/foundation/ADebug.h>
#include <utils/Errors.h>
@@ -126,8 +126,7 @@
sp<MediaPlayerBase> MediaPlayerFactory::createPlayer(
player_type playerType,
- void* cookie,
- notify_callback_f notifyFunc,
+ const sp<MediaPlayerBase::Listener> &listener,
pid_t pid) {
sp<MediaPlayerBase> p;
IFactory* factory;
@@ -152,7 +151,7 @@
init_result = p->initCheck();
if (init_result == NO_ERROR) {
- p->setNotifyCallback(cookie, notifyFunc);
+ p->setNotifyCallback(listener);
} else {
ALOGE("Failed to create player object of type %d, initCheck failed"
" (res = %d)", playerType, init_result);
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.h b/media/libmediaplayerservice/MediaPlayerFactory.h
index e22a56f..e88700c 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.h
+++ b/media/libmediaplayerservice/MediaPlayerFactory.h
@@ -65,8 +65,7 @@
const sp<DataSource> &source);
static sp<MediaPlayerBase> createPlayer(player_type playerType,
- void* cookie,
- notify_callback_f notifyFunc,
+ const sp<MediaPlayerBase::Listener> &listener,
pid_t pid);
static void registerBuiltinFactories();
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 496db0d..9bcfc83 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -55,6 +55,7 @@
#include <media/Metadata.h>
#include <media/AudioTrack.h>
#include <media/MemoryLeakTrackUtil.h>
+#include <media/stagefright/InterfaceUtils.h>
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>
@@ -77,11 +78,7 @@
#include "TestPlayerStub.h"
#include "nuplayer/NuPlayerDriver.h"
-#include <media/stagefright/omx/OMX.h>
-
-#include "HDCP.h"
#include "HTTPBase.h"
-#include "RemoteDisplay.h"
static const int kDumpLockRetries = 50;
static const int kDumpLockSleepUs = 20000;
@@ -93,6 +90,7 @@
using android::BAD_VALUE;
using android::NOT_ENOUGH_DATA;
using android::Parcel;
+using android::media::VolumeShaper;
// Max number of entries in the filter.
const int kMaxFilterSize = 64; // I pulled that out of thin air.
@@ -337,29 +335,13 @@
return MediaCodecList::getLocalInstance();
}
-sp<IOMX> MediaPlayerService::getOMX() {
- ALOGI("MediaPlayerService::getOMX");
- Mutex::Autolock autoLock(mLock);
-
- if (mOMX.get() == NULL) {
- mOMX = new OMX;
- }
-
- return mOMX;
-}
-
-sp<IHDCP> MediaPlayerService::makeHDCP(bool createEncryptionModule) {
- return new HDCP(createEncryptionModule);
-}
-
sp<IRemoteDisplay> MediaPlayerService::listenForRemoteDisplay(
- const String16 &opPackageName,
- const sp<IRemoteDisplayClient>& client, const String8& iface) {
- if (!checkPermission("android.permission.CONTROL_WIFI_DISPLAY")) {
- return NULL;
- }
+ const String16 &/*opPackageName*/,
+ const sp<IRemoteDisplayClient>& /*client*/,
+ const String8& /*iface*/) {
+ ALOGE("listenForRemoteDisplay is no longer supported!");
- return new RemoteDisplay(opPackageName, client, iface.string());
+ return NULL;
}
status_t MediaPlayerService::AudioOutput::dump(int fd, const Vector<String16>& args) const
@@ -590,27 +572,26 @@
mUid = uid;
mRetransmitEndpointValid = false;
mAudioAttributes = NULL;
+ mListener = new Listener(this);
#if CALLBACK_ANTAGONIZER
ALOGD("create Antagonizer");
- mAntagonizer = new Antagonizer(notify, this);
+ mAntagonizer = new Antagonizer(mListener);
#endif
}
MediaPlayerService::Client::~Client()
{
ALOGV("Client(%d) destructor pid = %d", mConnId, mPid);
- {
- Mutex::Autolock l(mLock);
- mAudioOutput.clear();
- }
+ mAudioOutput.clear();
wp<Client> client(this);
disconnect();
mService->removeClient(client);
if (mAudioAttributes != NULL) {
free(mAudioAttributes);
}
- clearDeathNotifiers();
+ clearDeathNotifiers_l();
+ mAudioDeviceUpdatedListener.clear();
}
void MediaPlayerService::Client::disconnect()
@@ -630,7 +611,7 @@
// and reset the player. We assume the player will serialize
// access to itself if necessary.
if (p != 0) {
- p->setNotifyCallback(0, 0);
+ p->setNotifyCallback(0);
#if CALLBACK_ANTAGONIZER
ALOGD("kill Antagonizer");
mAntagonizer->kill();
@@ -638,7 +619,10 @@
p->reset();
}
- disconnectNativeWindow();
+ {
+ Mutex::Autolock l(mLock);
+ disconnectNativeWindow_l();
+ }
IPCThreadState::self()->flushCommands();
}
@@ -652,7 +636,7 @@
p.clear();
}
if (p == NULL) {
- p = MediaPlayerFactory::createPlayer(playerType, this, notify, mPid);
+ p = MediaPlayerFactory::createPlayer(playerType, mListener, mPid);
}
if (p != NULL) {
@@ -715,7 +699,18 @@
}
}
-void MediaPlayerService::Client::clearDeathNotifiers() {
+void MediaPlayerService::Client::AudioDeviceUpdatedNotifier::onAudioDeviceUpdate(
+ audio_io_handle_t audioIo,
+ audio_port_handle_t deviceId) {
+ sp<MediaPlayerBase> listener = mListener.promote();
+ if (listener != NULL) {
+ listener->sendEvent(MEDIA_AUDIO_ROUTING_CHANGED, audioIo, deviceId);
+ } else {
+ ALOGW("listener for process %d death is gone", MEDIA_AUDIO_ROUTING_CHANGED);
+ }
+}
+
+void MediaPlayerService::Client::clearDeathNotifiers_l() {
if (mExtractorDeathListener != nullptr) {
mExtractorDeathListener->unlinkToDeath();
mExtractorDeathListener = nullptr;
@@ -730,7 +725,6 @@
player_type playerType)
{
ALOGV("player type = %d", playerType);
- clearDeathNotifiers();
// create the right type of player
sp<MediaPlayerBase> p = createPlayer(playerType);
@@ -744,62 +738,58 @@
ALOGE("extractor service not available");
return NULL;
}
- mExtractorDeathListener = new ServiceDeathNotifier(binder, p, MEDIAEXTRACTOR_PROCESS_DEATH);
- binder->linkToDeath(mExtractorDeathListener);
+ sp<ServiceDeathNotifier> extractorDeathListener =
+ new ServiceDeathNotifier(binder, p, MEDIAEXTRACTOR_PROCESS_DEATH);
+ binder->linkToDeath(extractorDeathListener);
- if (property_get_bool("persist.media.treble_omx", true)) {
- // Treble IOmx
- sp<IOmx> omx = IOmx::getService();
- if (omx == nullptr) {
- ALOGE("Treble IOmx not available");
- return NULL;
- }
- mCodecDeathListener = new ServiceDeathNotifier(omx, p, MEDIACODEC_PROCESS_DEATH);
- omx->linkToDeath(mCodecDeathListener, 0);
- } else {
- // Legacy IOMX
- binder = sm->getService(String16("media.codec"));
- if (binder == NULL) {
- ALOGE("codec service not available");
- return NULL;
- }
- mCodecDeathListener = new ServiceDeathNotifier(binder, p, MEDIACODEC_PROCESS_DEATH);
- binder->linkToDeath(mCodecDeathListener);
+ sp<IOmx> omx = IOmx::getService();
+ if (omx == nullptr) {
+ ALOGE("IOmx service is not available");
+ return NULL;
}
+ sp<ServiceDeathNotifier> codecDeathListener =
+ new ServiceDeathNotifier(omx, p, MEDIACODEC_PROCESS_DEATH);
+ omx->linkToDeath(codecDeathListener, 0);
+
+ Mutex::Autolock lock(mLock);
+
+ clearDeathNotifiers_l();
+ mExtractorDeathListener = extractorDeathListener;
+ mCodecDeathListener = codecDeathListener;
+ mAudioDeviceUpdatedListener = new AudioDeviceUpdatedNotifier(p);
if (!p->hardwareOutput()) {
- Mutex::Autolock l(mLock);
mAudioOutput = new AudioOutput(mAudioSessionId, IPCThreadState::self()->getCallingUid(),
- mPid, mAudioAttributes);
+ mPid, mAudioAttributes, mAudioDeviceUpdatedListener);
static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
}
return p;
}
-void MediaPlayerService::Client::setDataSource_post(
+status_t MediaPlayerService::Client::setDataSource_post(
const sp<MediaPlayerBase>& p,
status_t status)
{
ALOGV(" setDataSource");
- mStatus = status;
- if (mStatus != OK) {
- ALOGE(" error: %d", mStatus);
- return;
+ if (status != OK) {
+ ALOGE(" error: %d", status);
+ return status;
}
// Set the re-transmission endpoint if one was chosen.
if (mRetransmitEndpointValid) {
- mStatus = p->setRetransmitEndpoint(&mRetransmitEndpoint);
- if (mStatus != NO_ERROR) {
- ALOGE("setRetransmitEndpoint error: %d", mStatus);
+ status = p->setRetransmitEndpoint(&mRetransmitEndpoint);
+ if (status != NO_ERROR) {
+ ALOGE("setRetransmitEndpoint error: %d", status);
}
}
- if (mStatus == OK) {
- Mutex::Autolock l(mLock);
+ if (status == OK) {
+ Mutex::Autolock lock(mLock);
mPlayer = p;
}
+ return status;
}
status_t MediaPlayerService::Client::setDataSource(
@@ -830,9 +820,9 @@
ALOGE("Couldn't open fd for %s", url);
return UNKNOWN_ERROR;
}
- setDataSource(fd, 0, 0x7fffffffffLL); // this sets mStatus
+ status_t status = setDataSource(fd, 0, 0x7fffffffffLL); // this sets mStatus
close(fd);
- return mStatus;
+ return mStatus = status;
} else {
player_type playerType = MediaPlayerFactory::getPlayerType(this, url);
sp<MediaPlayerBase> p = setDataSource_pre(playerType);
@@ -840,8 +830,9 @@
return NO_INIT;
}
- setDataSource_post(p, p->setDataSource(httpService, url, headers));
- return mStatus;
+ return mStatus =
+ setDataSource_post(
+ p, p->setDataSource(httpService, url, headers));
}
}
@@ -881,8 +872,7 @@
}
// now set data source
- setDataSource_post(p, p->setDataSource(fd, offset, length));
- return mStatus;
+ return mStatus = setDataSource_post(p, p->setDataSource(fd, offset, length));
}
status_t MediaPlayerService::Client::setDataSource(
@@ -895,24 +885,22 @@
}
// now set data source
- setDataSource_post(p, p->setDataSource(source));
- return mStatus;
+ return mStatus = setDataSource_post(p, p->setDataSource(source));
}
status_t MediaPlayerService::Client::setDataSource(
const sp<IDataSource> &source) {
- sp<DataSource> dataSource = DataSource::CreateFromIDataSource(source);
+ sp<DataSource> dataSource = CreateDataSourceFromIDataSource(source);
player_type playerType = MediaPlayerFactory::getPlayerType(this, dataSource);
sp<MediaPlayerBase> p = setDataSource_pre(playerType);
if (p == NULL) {
return NO_INIT;
}
// now set data source
- setDataSource_post(p, p->setDataSource(dataSource));
- return mStatus;
+ return mStatus = setDataSource_post(p, p->setDataSource(dataSource));
}
-void MediaPlayerService::Client::disconnectNativeWindow() {
+void MediaPlayerService::Client::disconnectNativeWindow_l() {
if (mConnectedWindow != NULL) {
status_t err = nativeWindowDisconnect(
mConnectedWindow.get(), "disconnectNativeWindow");
@@ -949,7 +937,8 @@
// ANW, which may result in errors.
reset();
- disconnectNativeWindow();
+ Mutex::Autolock lock(mLock);
+ disconnectNativeWindow_l();
return err;
}
@@ -960,14 +949,22 @@
// on the disconnected ANW, which may result in errors.
status_t err = p->setVideoSurfaceTexture(bufferProducer);
- disconnectNativeWindow();
-
- mConnectedWindow = anw;
+ mLock.lock();
+ disconnectNativeWindow_l();
if (err == OK) {
+ mConnectedWindow = anw;
mConnectedWindowBinder = binder;
+ mLock.unlock();
} else {
- disconnectNativeWindow();
+ mLock.unlock();
+ status_t err = nativeWindowDisconnect(
+ anw.get(), "disconnectNativeWindow");
+
+ if (err != OK) {
+ ALOGW("nativeWindowDisconnect returned an error: %s (%d)",
+ strerror(-err), err);
+ }
}
return err;
@@ -1050,18 +1047,18 @@
return p->setBufferingSettings(buffering);
}
-status_t MediaPlayerService::Client::getDefaultBufferingSettings(
+status_t MediaPlayerService::Client::getBufferingSettings(
BufferingSettings* buffering /* nonnull */)
{
sp<MediaPlayerBase> p = getPlayer();
// TODO: create mPlayer on demand.
if (p == 0) return UNKNOWN_ERROR;
- status_t ret = p->getDefaultBufferingSettings(buffering);
+ status_t ret = p->getBufferingSettings(buffering);
if (ret == NO_ERROR) {
- ALOGV("[%d] getDefaultBufferingSettings{%s}",
+ ALOGV("[%d] getBufferingSettings{%s}",
mConnId, buffering->toString().string());
} else {
- ALOGV("[%d] getDefaultBufferingSettings returned %d", mConnId, ret);
+ ALOGE("[%d] getBufferingSettings returned %d", mConnId, ret);
}
return ret;
}
@@ -1268,6 +1265,14 @@
return p->reset();
}
+status_t MediaPlayerService::Client::notifyAt(int64_t mediaTimeUs)
+{
+ ALOGV("[%d] notifyAt(%lld)", mConnId, (long long)mediaTimeUs);
+ sp<MediaPlayerBase> p = getPlayer();
+ if (p == 0) return UNKNOWN_ERROR;
+ return p->notifyAt(mediaTimeUs);
+}
+
status_t MediaPlayerService::Client::setAudioStreamType(audio_stream_type_t type)
{
ALOGV("[%d] setAudioStreamType(%d)", mConnId, type);
@@ -1385,9 +1390,11 @@
if (p != 0) return INVALID_OPERATION;
if (NULL != endpoint) {
+ Mutex::Autolock lock(mLock);
mRetransmitEndpoint = *endpoint;
mRetransmitEndpointValid = true;
} else {
+ Mutex::Autolock lock(mLock);
mRetransmitEndpointValid = false;
}
@@ -1405,6 +1412,7 @@
if (p != NULL)
return p->getRetransmitEndpoint(endpoint);
+ Mutex::Autolock lock(mLock);
if (!mRetransmitEndpointValid)
return NO_INIT;
@@ -1414,24 +1422,19 @@
}
void MediaPlayerService::Client::notify(
- void* cookie, int msg, int ext1, int ext2, const Parcel *obj)
+ int msg, int ext1, int ext2, const Parcel *obj)
{
- Client* client = static_cast<Client*>(cookie);
- if (client == NULL) {
- return;
- }
-
sp<IMediaPlayerClient> c;
sp<Client> nextClient;
status_t errStartNext = NO_ERROR;
{
- Mutex::Autolock l(client->mLock);
- c = client->mClient;
- if (msg == MEDIA_PLAYBACK_COMPLETE && client->mNextClient != NULL) {
- nextClient = client->mNextClient;
+ Mutex::Autolock l(mLock);
+ c = mClient;
+ if (msg == MEDIA_PLAYBACK_COMPLETE && mNextClient != NULL) {
+ nextClient = mNextClient;
- if (client->mAudioOutput != NULL)
- client->mAudioOutput->switchToNextOutput();
+ if (mAudioOutput != NULL)
+ mAudioOutput->switchToNextOutput();
errStartNext = nextClient->start();
}
@@ -1457,17 +1460,17 @@
MEDIA_INFO_METADATA_UPDATE == ext1) {
const media::Metadata::Type metadata_type = ext2;
- if(client->shouldDropMetadata(metadata_type)) {
+ if(shouldDropMetadata(metadata_type)) {
return;
}
// Update the list of metadata that have changed. getMetadata
// also access mMetadataUpdated and clears it.
- client->addNewMetadataUpdate(metadata_type);
+ addNewMetadataUpdate(metadata_type);
}
if (c != NULL) {
- ALOGV("[%d] notify (%p, %d, %d, %d)", client->mConnId, cookie, msg, ext1, ext2);
+ ALOGV("[%d] notify (%d, %d, %d)", mConnId, msg, ext1, ext2);
c->notify(msg, ext1, ext2, obj);
}
}
@@ -1522,11 +1525,47 @@
return ret;
}
+status_t MediaPlayerService::Client::setOutputDevice(audio_port_handle_t deviceId)
+{
+ ALOGV("[%d] setOutputDevice", mConnId);
+ {
+ Mutex::Autolock l(mLock);
+ if (mAudioOutput.get() != nullptr) {
+ return mAudioOutput->setOutputDevice(deviceId);
+ }
+ }
+ return NO_INIT;
+}
+
+status_t MediaPlayerService::Client::getRoutedDeviceId(audio_port_handle_t* deviceId)
+{
+ ALOGV("[%d] getRoutedDeviceId", mConnId);
+ {
+ Mutex::Autolock l(mLock);
+ if (mAudioOutput.get() != nullptr) {
+ return mAudioOutput->getRoutedDeviceId(deviceId);
+ }
+ }
+ return NO_INIT;
+}
+
+status_t MediaPlayerService::Client::enableAudioDeviceCallback(bool enabled)
+{
+ ALOGV("[%d] enableAudioDeviceCallback, %d", mConnId, enabled);
+ {
+ Mutex::Autolock l(mLock);
+ if (mAudioOutput.get() != nullptr) {
+ return mAudioOutput->enableAudioDeviceCallback(enabled);
+ }
+ }
+ return NO_INIT;
+}
+
#if CALLBACK_ANTAGONIZER
const int Antagonizer::interval = 10000; // 10 msecs
-Antagonizer::Antagonizer(notify_callback_f cb, void* client) :
- mExit(false), mActive(false), mClient(client), mCb(cb)
+Antagonizer::Antagonizer(const sp<MediaPlayerBase::Listener> &listener) :
+ mExit(false), mActive(false), mListener(listener)
{
createThread(callbackThread, this);
}
@@ -1546,7 +1585,7 @@
while (!p->mExit) {
if (p->mActive) {
ALOGV("send event");
- p->mCb(p->mClient, 0, 0, 0);
+ p->mListener->notify(0, 0, 0, 0);
}
usleep(interval);
}
@@ -1560,7 +1599,7 @@
#undef LOG_TAG
#define LOG_TAG "AudioSink"
MediaPlayerService::AudioOutput::AudioOutput(audio_session_t sessionId, uid_t uid, int pid,
- const audio_attributes_t* attr)
+ const audio_attributes_t* attr, const sp<AudioSystem::AudioDeviceCallback>& deviceCallback)
: mCallback(NULL),
mCallbackCookie(NULL),
mCallbackData(NULL),
@@ -1577,7 +1616,11 @@
mSendLevel(0.0),
mAuxEffectId(0),
mFlags(AUDIO_OUTPUT_FLAG_NONE),
- mVolumeHandler(new VolumeHandler())
+ mVolumeHandler(new media::VolumeHandler()),
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mDeviceCallbackEnabled(false),
+ mDeviceCallback(deviceCallback)
{
ALOGV("AudioOutput(%d)", sessionId);
if (attr != NULL) {
@@ -1963,7 +2006,9 @@
mUid,
mPid,
mAttributes,
- doNotReconnect);
+ doNotReconnect,
+ 1.0f, // default value for maxRequiredSpeed
+ mSelectedDeviceId);
} else {
// TODO: Due to buffer memory concerns, we use a max target playback speed
// based on mPlaybackRate at the time of open (instead of kMaxRequiredSpeed),
@@ -1990,7 +2035,8 @@
mPid,
mAttributes,
doNotReconnect,
- targetSpeed);
+ targetSpeed,
+ mSelectedDeviceId);
}
if ((t == 0) || (t->initCheck() != NO_ERROR)) {
@@ -2084,6 +2130,10 @@
res = mTrack->attachAuxEffect(mAuxEffectId);
}
}
+ mTrack->setOutputDevice(mSelectedDeviceId);
+ if (mDeviceCallbackEnabled) {
+ mTrack->addAudioDeviceCallback(mDeviceCallback.promote());
+ }
ALOGV("updateTrack() DONE status %d", res);
return res;
}
@@ -2299,6 +2349,45 @@
return NO_ERROR;
}
+status_t MediaPlayerService::AudioOutput::setOutputDevice(audio_port_handle_t deviceId)
+{
+ ALOGV("setOutputDevice(%d)", deviceId);
+ Mutex::Autolock lock(mLock);
+ mSelectedDeviceId = deviceId;
+ if (mTrack != 0) {
+ return mTrack->setOutputDevice(deviceId);
+ }
+ return NO_ERROR;
+}
+
+status_t MediaPlayerService::AudioOutput::getRoutedDeviceId(audio_port_handle_t* deviceId)
+{
+ ALOGV("getRoutedDeviceId");
+ Mutex::Autolock lock(mLock);
+ if (mTrack != 0) {
+ mRoutedDeviceId = mTrack->getRoutedDeviceId();
+ }
+ *deviceId = mRoutedDeviceId;
+ return NO_ERROR;
+}
+
+status_t MediaPlayerService::AudioOutput::enableAudioDeviceCallback(bool enabled)
+{
+ ALOGV("enableAudioDeviceCallback, %d", enabled);
+ Mutex::Autolock lock(mLock);
+ mDeviceCallbackEnabled = enabled;
+ if (mTrack != 0) {
+ status_t status;
+ if (enabled) {
+ status = mTrack->addAudioDeviceCallback(mDeviceCallback.promote());
+ } else {
+ status = mTrack->removeAudioDeviceCallback(mDeviceCallback.promote());
+ }
+ return status;
+ }
+ return NO_ERROR;
+}
+
VolumeShaper::Status MediaPlayerService::AudioOutput::applyVolumeShaper(
const sp<VolumeShaper::Configuration>& configuration,
const sp<VolumeShaper::Operation>& operation)
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 06b9cad..bfb7cc2 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -51,7 +51,7 @@
#if CALLBACK_ANTAGONIZER
class Antagonizer {
public:
- Antagonizer(notify_callback_f cb, void* client);
+ Antagonizer(const sp<MediaPlayerBase::Listener> &listener);
void start() { mActive = true; }
void stop() { mActive = false; }
void kill();
@@ -59,12 +59,11 @@
static const int interval;
Antagonizer();
static int callbackThread(void* cookie);
- Mutex mLock;
- Condition mCondition;
- bool mExit;
- bool mActive;
- void* mClient;
- notify_callback_f mCb;
+ Mutex mLock;
+ Condition mCondition;
+ bool mExit;
+ bool mActive;
+ sp<MediaPlayerBase::Listener> mListener;
};
#endif
@@ -78,8 +77,12 @@
class CallbackData;
public:
- AudioOutput(audio_session_t sessionId, uid_t uid, int pid,
- const audio_attributes_t * attr);
+ AudioOutput(
+ audio_session_t sessionId,
+ uid_t uid,
+ int pid,
+ const audio_attributes_t * attr,
+ const sp<AudioSystem::AudioDeviceCallback>& deviceCallback);
virtual ~AudioOutput();
virtual bool ready() const { return mTrack != 0; }
@@ -132,10 +135,15 @@
virtual status_t setParameters(const String8& keyValuePairs);
virtual String8 getParameters(const String8& keys);
- virtual VolumeShaper::Status applyVolumeShaper(
- const sp<VolumeShaper::Configuration>& configuration,
- const sp<VolumeShaper::Operation>& operation) override;
- virtual sp<VolumeShaper::State> getVolumeShaperState(int id) override;
+ virtual media::VolumeShaper::Status applyVolumeShaper(
+ const sp<media::VolumeShaper::Configuration>& configuration,
+ const sp<media::VolumeShaper::Operation>& operation) override;
+ virtual sp<media::VolumeShaper::State> getVolumeShaperState(int id) override;
+
+ // AudioRouting
+ virtual status_t setOutputDevice(audio_port_handle_t deviceId);
+ virtual status_t getRoutedDeviceId(audio_port_handle_t* deviceId);
+ virtual status_t enableAudioDeviceCallback(bool enabled);
private:
static void setMinBufferCount();
@@ -165,7 +173,11 @@
float mSendLevel;
int mAuxEffectId;
audio_output_flags_t mFlags;
- sp<VolumeHandler> mVolumeHandler;
+ sp<media::VolumeHandler> mVolumeHandler;
+ audio_port_handle_t mSelectedDeviceId;
+ audio_port_handle_t mRoutedDeviceId;
+ bool mDeviceCallbackEnabled;
+ wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
mutable Mutex mLock;
// static variables below not protected by mutex
@@ -215,7 +227,6 @@
}; // AudioOutput
-
public:
static void instantiate();
@@ -228,8 +239,6 @@
audio_session_t audioSessionId);
virtual sp<IMediaCodecList> getCodecList() const;
- virtual sp<IOMX> getOMX();
- virtual sp<IHDCP> makeHDCP(bool createEncryptionModule);
virtual sp<IRemoteDisplay> listenForRemoteDisplay(const String16 &opPackageName,
const sp<IRemoteDisplayClient>& client, const String8& iface);
@@ -309,7 +318,7 @@
virtual status_t setVideoSurfaceTexture(
const sp<IGraphicBufferProducer>& bufferProducer);
virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
- virtual status_t getDefaultBufferingSettings(
+ virtual status_t getBufferingSettings(
BufferingSettings* buffering /* nonnull */) override;
virtual status_t prepareAsync();
virtual status_t start();
@@ -327,6 +336,7 @@
virtual status_t getCurrentPosition(int* msec);
virtual status_t getDuration(int* msec);
virtual status_t reset();
+ virtual status_t notifyAt(int64_t mediaTimeUs);
virtual status_t setAudioStreamType(audio_stream_type_t type);
virtual status_t setLooping(int loop);
virtual status_t setVolume(float leftVolume, float rightVolume);
@@ -343,10 +353,10 @@
virtual status_t getRetransmitEndpoint(struct sockaddr_in* endpoint);
virtual status_t setNextPlayer(const sp<IMediaPlayer>& player);
- virtual VolumeShaper::Status applyVolumeShaper(
- const sp<VolumeShaper::Configuration>& configuration,
- const sp<VolumeShaper::Operation>& operation) override;
- virtual sp<VolumeShaper::State> getVolumeShaperState(int id) override;
+ virtual media::VolumeShaper::Status applyVolumeShaper(
+ const sp<media::VolumeShaper::Configuration>& configuration,
+ const sp<media::VolumeShaper::Operation>& operation) override;
+ virtual sp<media::VolumeShaper::State> getVolumeShaperState(int id) override;
sp<MediaPlayerBase> createPlayer(player_type playerType);
@@ -362,11 +372,10 @@
sp<MediaPlayerBase> setDataSource_pre(player_type playerType);
- void setDataSource_post(const sp<MediaPlayerBase>& p,
+ status_t setDataSource_post(const sp<MediaPlayerBase>& p,
status_t status);
- static void notify(void* cookie, int msg,
- int ext1, int ext2, const Parcel *obj);
+ void notify(int msg, int ext1, int ext2, const Parcel *obj);
pid_t pid() const { return mPid; }
virtual status_t dump(int fd, const Vector<String16>& args);
@@ -375,6 +384,10 @@
// Modular DRM
virtual status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId);
virtual status_t releaseDrm();
+ // AudioRouting
+ virtual status_t setOutputDevice(audio_port_handle_t deviceId);
+ virtual status_t getRoutedDeviceId(audio_port_handle_t* deviceId);
+ virtual status_t enableAudioDeviceCallback(bool enabled);
private:
class ServiceDeathNotifier:
@@ -404,7 +417,22 @@
wp<MediaPlayerBase> mListener;
};
- void clearDeathNotifiers();
+ class AudioDeviceUpdatedNotifier: public AudioSystem::AudioDeviceCallback
+ {
+ public:
+ AudioDeviceUpdatedNotifier(const sp<MediaPlayerBase>& listener) {
+ mListener = listener;
+ }
+ ~AudioDeviceUpdatedNotifier() {}
+
+ virtual void onAudioDeviceUpdate(audio_io_handle_t audioIo,
+ audio_port_handle_t deviceId);
+
+ private:
+ wp<MediaPlayerBase> mListener;
+ };
+
+ void clearDeathNotifiers_l();
friend class MediaPlayerService;
Client( const sp<MediaPlayerService>& service,
@@ -433,27 +461,42 @@
void addNewMetadataUpdate(media::Metadata::Type type);
// Disconnect from the currently connected ANativeWindow.
- void disconnectNativeWindow();
+ void disconnectNativeWindow_l();
status_t setAudioAttributes_l(const Parcel &request);
- mutable Mutex mLock;
- sp<MediaPlayerBase> mPlayer;
- sp<MediaPlayerService> mService;
- sp<IMediaPlayerClient> mClient;
- sp<AudioOutput> mAudioOutput;
- pid_t mPid;
- status_t mStatus;
- bool mLoop;
- int32_t mConnId;
- audio_session_t mAudioSessionId;
- audio_attributes_t * mAudioAttributes;
- uid_t mUid;
- sp<ANativeWindow> mConnectedWindow;
- sp<IBinder> mConnectedWindowBinder;
- struct sockaddr_in mRetransmitEndpoint;
- bool mRetransmitEndpointValid;
- sp<Client> mNextClient;
+ class Listener : public MediaPlayerBase::Listener {
+ public:
+ Listener(const wp<Client> &client) : mClient(client) {}
+ virtual ~Listener() {}
+ virtual void notify(int msg, int ext1, int ext2, const Parcel *obj) {
+ sp<Client> client = mClient.promote();
+ if (client != NULL) {
+ client->notify(msg, ext1, ext2, obj);
+ }
+ }
+ private:
+ wp<Client> mClient;
+ };
+
+ mutable Mutex mLock;
+ sp<MediaPlayerBase> mPlayer;
+ sp<MediaPlayerService> mService;
+ sp<IMediaPlayerClient> mClient;
+ sp<AudioOutput> mAudioOutput;
+ pid_t mPid;
+ status_t mStatus;
+ bool mLoop;
+ int32_t mConnId;
+ audio_session_t mAudioSessionId;
+ audio_attributes_t * mAudioAttributes;
+ uid_t mUid;
+ sp<ANativeWindow> mConnectedWindow;
+ sp<IBinder> mConnectedWindowBinder;
+ struct sockaddr_in mRetransmitEndpoint;
+ bool mRetransmitEndpointValid;
+ sp<Client> mNextClient;
+ sp<MediaPlayerBase::Listener> mListener;
// Metadata filters.
media::Metadata::Filter mMetadataAllow; // protected by mLock
@@ -467,8 +510,9 @@
sp<ServiceDeathNotifier> mExtractorDeathListener;
sp<ServiceDeathNotifier> mCodecDeathListener;
+ sp<AudioDeviceUpdatedNotifier> mAudioDeviceUpdatedListener;
#if CALLBACK_ANTAGONIZER
- Antagonizer* mAntagonizer;
+ Antagonizer* mAntagonizer;
#endif
}; // Client
@@ -481,7 +525,6 @@
SortedVector< wp<Client> > mClients;
SortedVector< wp<MediaRecorderClient> > mMediaRecorderClients;
int32_t mNextConnId;
- sp<IOMX> mOMX;
};
// ----------------------------------------------------------------------------
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 6400481..4206647 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -339,7 +339,7 @@
wp<MediaRecorderClient> client(this);
mMediaPlayerService->removeMediaRecorderClient(client);
}
- clearDeathNotifiers();
+ clearDeathNotifiers_l();
return NO_ERROR;
}
@@ -411,7 +411,26 @@
}
}
-void MediaRecorderClient::clearDeathNotifiers() {
+MediaRecorderClient::AudioDeviceUpdatedNotifier::AudioDeviceUpdatedNotifier(
+ const sp<IMediaRecorderClient>& listener) {
+ mListener = listener;
+}
+
+MediaRecorderClient::AudioDeviceUpdatedNotifier::~AudioDeviceUpdatedNotifier() {
+}
+
+void MediaRecorderClient::AudioDeviceUpdatedNotifier::onAudioDeviceUpdate(
+ audio_io_handle_t audioIo,
+ audio_port_handle_t deviceId) {
+ sp<IMediaRecorderClient> listener = mListener.promote();
+ if (listener != NULL) {
+ listener->notify(MEDIA_RECORDER_AUDIO_ROUTING_CHANGED, audioIo, deviceId);
+ } else {
+ ALOGW("listener for process %d death is gone", MEDIA_RECORDER_AUDIO_ROUTING_CHANGED);
+ }
+}
+
+void MediaRecorderClient::clearDeathNotifiers_l() {
if (mCameraDeathListener != nullptr) {
mCameraDeathListener->unlinkToDeath();
mCameraDeathListener = nullptr;
@@ -425,8 +444,8 @@
status_t MediaRecorderClient::setListener(const sp<IMediaRecorderClient>& listener)
{
ALOGV("setListener");
- clearDeathNotifiers();
Mutex::Autolock lock(mLock);
+ clearDeathNotifiers_l();
if (mRecorder == NULL) {
ALOGE("recorder is not initialized");
return NO_INIT;
@@ -450,27 +469,17 @@
}
sCameraChecked = true;
- if (property_get_bool("persist.media.treble_omx", true)) {
- // Treble IOmx
- sp<IOmx> omx = IOmx::getService();
- if (omx == nullptr) {
- ALOGE("Treble IOmx not available");
- return NO_INIT;
- }
- mCodecDeathListener = new ServiceDeathNotifier(omx, listener,
- MediaPlayerService::MEDIACODEC_PROCESS_DEATH);
- omx->linkToDeath(mCodecDeathListener, 0);
- } else {
- // Legacy IOMX
- binder = sm->getService(String16("media.codec"));
- if (binder == NULL) {
- ALOGE("Unable to connect to media codec service");
- return NO_INIT;
- }
- mCodecDeathListener = new ServiceDeathNotifier(binder, listener,
- MediaPlayerService::MEDIACODEC_PROCESS_DEATH);
- binder->linkToDeath(mCodecDeathListener);
+ sp<IOmx> omx = IOmx::getService();
+ if (omx == nullptr) {
+ ALOGE("IOmx service is not available");
+ return NO_INIT;
}
+ mCodecDeathListener = new ServiceDeathNotifier(omx, listener,
+ MediaPlayerService::MEDIACODEC_PROCESS_DEATH);
+ omx->linkToDeath(mCodecDeathListener, 0);
+
+ mAudioDeviceUpdatedNotifier = new AudioDeviceUpdatedNotifier(listener);
+ mRecorder->setAudioDeviceCallback(mAudioDeviceUpdatedNotifier);
return OK;
}
@@ -492,4 +501,40 @@
return OK;
}
+status_t MediaRecorderClient::setInputDevice(audio_port_handle_t deviceId) {
+ ALOGV("setInputDevice(%d)", deviceId);
+ Mutex::Autolock lock(mLock);
+ if (mRecorder != NULL) {
+ return mRecorder->setInputDevice(deviceId);
+ }
+ return NO_INIT;
+}
+
+status_t MediaRecorderClient::getRoutedDeviceId(audio_port_handle_t* deviceId) {
+ ALOGV("getRoutedDeviceId");
+ Mutex::Autolock lock(mLock);
+ if (mRecorder != NULL) {
+ return mRecorder->getRoutedDeviceId(deviceId);
+ }
+ return NO_INIT;
+}
+
+status_t MediaRecorderClient::enableAudioDeviceCallback(bool enabled) {
+ ALOGV("enableDeviceCallback: %d", enabled);
+ Mutex::Autolock lock(mLock);
+ if (mRecorder != NULL) {
+ return mRecorder->enableAudioDeviceCallback(enabled);
+ }
+ return NO_INIT;
+}
+
+status_t MediaRecorderClient::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo>* activeMicrophones) {
+ ALOGV("getActiveMicrophones");
+ Mutex::Autolock lock(mLock);
+ if (mRecorder != NULL) {
+ return mRecorder->getActiveMicrophones(activeMicrophones);
+ }
+ return NO_INIT;
+}
}; // namespace android
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index 7868a91..d2e681f 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -18,6 +18,7 @@
#ifndef ANDROID_MEDIARECORDERCLIENT_H
#define ANDROID_MEDIARECORDERCLIENT_H
+#include <media/AudioSystem.h>
#include <media/IMediaRecorder.h>
#include <android/hardware/media/omx/1.0/IOmx.h>
@@ -58,7 +59,19 @@
wp<IMediaRecorderClient> mListener;
};
- void clearDeathNotifiers();
+ class AudioDeviceUpdatedNotifier: public AudioSystem::AudioDeviceCallback
+ {
+ public:
+ AudioDeviceUpdatedNotifier(const sp<IMediaRecorderClient>& listener);
+ virtual ~AudioDeviceUpdatedNotifier();
+ virtual void onAudioDeviceUpdate(
+ audio_io_handle_t audioIo,
+ audio_port_handle_t deviceId);
+ private:
+ wp<IMediaRecorderClient> mListener;
+ };
+
+ void clearDeathNotifiers_l();
public:
virtual status_t setCamera(const sp<hardware::ICamera>& camera,
@@ -91,6 +104,11 @@
virtual status_t dump(int fd, const Vector<String16>& args);
virtual status_t setInputSurface(const sp<PersistentSurface>& surface);
virtual sp<IGraphicBufferProducer> querySurfaceMediaSource();
+ virtual status_t setInputDevice(audio_port_handle_t deviceId);
+ virtual status_t getRoutedDeviceId(audio_port_handle_t* deviceId);
+ virtual status_t enableAudioDeviceCallback(bool enabled);
+ virtual status_t getActiveMicrophones(
+ std::vector<media::MicrophoneInfo>* activeMicrophones);
private:
friend class MediaPlayerService; // for accessing private constructor
@@ -103,6 +121,7 @@
sp<ServiceDeathNotifier> mCameraDeathListener;
sp<ServiceDeathNotifier> mCodecDeathListener;
+ sp<AudioDeviceUpdatedNotifier> mAudioDeviceUpdatedNotifier;
pid_t mPid;
Mutex mLock;
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
index 5a468f3..40b17bf 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
@@ -31,10 +31,11 @@
#include <binder/MemoryHeapBase.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
+#include <media/DataSource.h>
#include <media/IMediaHTTPService.h>
#include <media/MediaMetadataRetrieverInterface.h>
#include <media/MediaPlayerInterface.h>
-#include <media/stagefright/DataSource.h>
+#include <media/stagefright/InterfaceUtils.h>
#include <media/stagefright/Utils.h>
#include <private/media/VideoFrame.h>
#include "MetadataRetrieverClient.h"
@@ -47,7 +48,6 @@
{
ALOGV("MetadataRetrieverClient constructor pid(%d)", pid);
mPid = pid;
- mThumbnail = NULL;
mAlbumArt = NULL;
mRetriever = NULL;
}
@@ -76,7 +76,6 @@
ALOGV("disconnect from pid %d", mPid);
Mutex::Autolock lock(mLock);
mRetriever.clear();
- mThumbnail.clear();
mAlbumArt.clear();
IPCThreadState::self()->flushCommands();
}
@@ -180,7 +179,7 @@
ALOGV("setDataSource(IDataSource)");
Mutex::Autolock lock(mLock);
- sp<DataSource> dataSource = DataSource::CreateFromIDataSource(source);
+ sp<DataSource> dataSource = CreateDataSourceFromIDataSource(source);
player_type playerType =
MediaPlayerFactory::getPlayerType(NULL /* client */, dataSource);
ALOGV("player type = %d", playerType);
@@ -200,34 +199,74 @@
(long long)timeUs, option, colorFormat, metaOnly);
Mutex::Autolock lock(mLock);
Mutex::Autolock glock(sLock);
- mThumbnail.clear();
if (mRetriever == NULL) {
ALOGE("retriever is not initialized");
return NULL;
}
- VideoFrame *frame = mRetriever->getFrameAtTime(
- timeUs, option, colorFormat, metaOnly);
+ sp<IMemory> frame = mRetriever->getFrameAtTime(timeUs, option, colorFormat, metaOnly);
if (frame == NULL) {
ALOGE("failed to capture a video frame");
return NULL;
}
- size_t size = frame->getFlattenedSize();
- sp<MemoryHeapBase> heap = new MemoryHeapBase(size, 0, "MetadataRetrieverClient");
- if (heap == NULL) {
- ALOGE("failed to create MemoryDealer");
- delete frame;
+ return frame;
+}
+
+sp<IMemory> MetadataRetrieverClient::getImageAtIndex(
+ int index, int colorFormat, bool metaOnly, bool thumbnail) {
+ ALOGV("getImageAtIndex: index(%d) colorFormat(%d), metaOnly(%d) thumbnail(%d)",
+ index, colorFormat, metaOnly, thumbnail);
+ Mutex::Autolock lock(mLock);
+ Mutex::Autolock glock(sLock);
+ if (mRetriever == NULL) {
+ ALOGE("retriever is not initialized");
return NULL;
}
- mThumbnail = new MemoryBase(heap, 0, size);
- if (mThumbnail == NULL) {
- ALOGE("not enough memory for VideoFrame size=%zu", size);
- delete frame;
+ sp<IMemory> frame = mRetriever->getImageAtIndex(index, colorFormat, metaOnly, thumbnail);
+ if (frame == NULL) {
+ ALOGE("failed to extract image");
return NULL;
}
- VideoFrame *frameCopy = static_cast<VideoFrame *>(mThumbnail->pointer());
- frameCopy->copyFlattened(*frame);
- delete frame; // Fix memory leakage
- return mThumbnail;
+ return frame;
+}
+
+sp<IMemory> MetadataRetrieverClient::getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom) {
+ ALOGV("getImageRectAtIndex: index(%d) colorFormat(%d), rect {%d, %d, %d, %d}",
+ index, colorFormat, left, top, right, bottom);
+ Mutex::Autolock lock(mLock);
+ Mutex::Autolock glock(sLock);
+ if (mRetriever == NULL) {
+ ALOGE("retriever is not initialized");
+ return NULL;
+ }
+ sp<IMemory> frame = mRetriever->getImageRectAtIndex(
+ index, colorFormat, left, top, right, bottom);
+ if (frame == NULL) {
+ ALOGE("failed to extract image");
+ return NULL;
+ }
+ return frame;
+}
+
+status_t MetadataRetrieverClient::getFrameAtIndex(
+ std::vector<sp<IMemory> > *frames,
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
+ ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d), metaOnly(%d)",
+ frameIndex, numFrames, colorFormat, metaOnly);
+ Mutex::Autolock lock(mLock);
+ Mutex::Autolock glock(sLock);
+ if (mRetriever == NULL) {
+ ALOGE("retriever is not initialized");
+ return INVALID_OPERATION;
+ }
+
+ status_t err = mRetriever->getFrameAtIndex(
+ frames, frameIndex, numFrames, colorFormat, metaOnly);
+ if (err != OK) {
+ frames->clear();
+ return err;
+ }
+ return OK;
}
sp<IMemory> MetadataRetrieverClient::extractAlbumArt()
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.h b/media/libmediaplayerservice/MetadataRetrieverClient.h
index c78cd4b..272d093 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.h
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.h
@@ -52,6 +52,13 @@
virtual status_t setDataSource(const sp<IDataSource>& source, const char *mime);
virtual sp<IMemory> getFrameAtTime(
int64_t timeUs, int option, int colorFormat, bool metaOnly);
+ virtual sp<IMemory> getImageAtIndex(
+ int index, int colorFormat, bool metaOnly, bool thumbnail);
+ virtual sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom);
+ virtual status_t getFrameAtIndex(
+ std::vector<sp<IMemory> > *frames,
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly);
virtual sp<IMemory> extractAlbumArt();
virtual const char* extractMetadata(int keyCode);
@@ -68,9 +75,8 @@
sp<MediaMetadataRetrieverBase> mRetriever;
pid_t mPid;
- // Keep the shared memory copy of album art and capture frame (for thumbnail)
+ // Keep the shared memory copy of album art
sp<IMemory> mAlbumArt;
- sp<IMemory> mThumbnail;
};
}; // namespace android
diff --git a/media/libmediaplayerservice/RemoteDisplay.cpp b/media/libmediaplayerservice/RemoteDisplay.cpp
deleted file mode 100644
index 0eb4b5d..0000000
--- a/media/libmediaplayerservice/RemoteDisplay.cpp
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright 2012, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "RemoteDisplay.h"
-
-#include "source/WifiDisplaySource.h"
-
-#include <media/IRemoteDisplayClient.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/ANetworkSession.h>
-
-namespace android {
-
-RemoteDisplay::RemoteDisplay(
- const String16 &opPackageName,
- const sp<IRemoteDisplayClient> &client,
- const char *iface)
- : mLooper(new ALooper),
- mNetSession(new ANetworkSession) {
- mLooper->setName("wfd_looper");
-
- mSource = new WifiDisplaySource(opPackageName, mNetSession, client);
- mLooper->registerHandler(mSource);
-
- mNetSession->start();
- mLooper->start();
-
- mSource->start(iface);
-}
-
-RemoteDisplay::~RemoteDisplay() {
-}
-
-status_t RemoteDisplay::pause() {
- return mSource->pause();
-}
-
-status_t RemoteDisplay::resume() {
- return mSource->resume();
-}
-
-status_t RemoteDisplay::dispose() {
- mSource->stop();
- mSource.clear();
-
- mLooper->stop();
- mNetSession->stop();
-
- return OK;
-}
-
-} // namespace android
diff --git a/media/libmediaplayerservice/RemoteDisplay.h b/media/libmediaplayerservice/RemoteDisplay.h
deleted file mode 100644
index d4573e9..0000000
--- a/media/libmediaplayerservice/RemoteDisplay.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright 2012, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef REMOTE_DISPLAY_H_
-
-#define REMOTE_DISPLAY_H_
-
-#include <media/IMediaPlayerService.h>
-#include <media/IRemoteDisplay.h>
-#include <media/stagefright/foundation/ABase.h>
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-
-namespace android {
-
-struct ALooper;
-struct ANetworkSession;
-class IRemoteDisplayClient;
-struct WifiDisplaySource;
-
-struct RemoteDisplay : public BnRemoteDisplay {
- RemoteDisplay(
- const String16 &opPackageName,
- const sp<IRemoteDisplayClient> &client,
- const char *iface);
-
- virtual status_t pause();
- virtual status_t resume();
- virtual status_t dispose();
-
-protected:
- virtual ~RemoteDisplay();
-
-private:
- sp<ALooper> mNetLooper;
- sp<ALooper> mLooper;
- sp<ANetworkSession> mNetSession;
- sp<WifiDisplaySource> mSource;
-
- DISALLOW_EVIL_CONSTRUCTORS(RemoteDisplay);
-};
-
-} // namespace android
-
-#endif // REMOTE_DISPLAY_H_
-
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 89354d6..30c0b1c 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -69,22 +69,33 @@
// key for media statistics
static const char *kKeyRecorder = "recorder";
// attrs for media statistics
-static const char *kRecorderHeight = "android.media.mediarecorder.height";
-static const char *kRecorderWidth = "android.media.mediarecorder.width";
-static const char *kRecorderFrameRate = "android.media.mediarecorder.frame-rate";
-static const char *kRecorderVideoBitrate = "android.media.mediarecorder.video-bitrate";
-static const char *kRecorderAudioSampleRate = "android.media.mediarecorder.audio-samplerate";
-static const char *kRecorderAudioChannels = "android.media.mediarecorder.audio-channels";
+// NB: these are matched with public Java API constants defined
+// in frameworks/base/media/java/android/media/MediaRecorder.java
+// These must be kept synchronized with the constants there.
static const char *kRecorderAudioBitrate = "android.media.mediarecorder.audio-bitrate";
-static const char *kRecorderVideoIframeInterval = "android.media.mediarecorder.video-iframe-interval";
-static const char *kRecorderMovieTimescale = "android.media.mediarecorder.movie-timescale";
+static const char *kRecorderAudioChannels = "android.media.mediarecorder.audio-channels";
+static const char *kRecorderAudioSampleRate = "android.media.mediarecorder.audio-samplerate";
static const char *kRecorderAudioTimescale = "android.media.mediarecorder.audio-timescale";
-static const char *kRecorderVideoTimescale = "android.media.mediarecorder.video-timescale";
-static const char *kRecorderVideoProfile = "android.media.mediarecorder.video-encoder-profile";
-static const char *kRecorderVideoLevel = "android.media.mediarecorder.video-encoder-level";
-static const char *kRecorderCaptureFpsEnable = "android.media.mediarecorder.capture-fpsenable";
static const char *kRecorderCaptureFps = "android.media.mediarecorder.capture-fps";
+static const char *kRecorderCaptureFpsEnable = "android.media.mediarecorder.capture-fpsenable";
+static const char *kRecorderFrameRate = "android.media.mediarecorder.frame-rate";
+static const char *kRecorderHeight = "android.media.mediarecorder.height";
+static const char *kRecorderMovieTimescale = "android.media.mediarecorder.movie-timescale";
static const char *kRecorderRotation = "android.media.mediarecorder.rotation";
+static const char *kRecorderVideoBitrate = "android.media.mediarecorder.video-bitrate";
+static const char *kRecorderVideoIframeInterval = "android.media.mediarecorder.video-iframe-interval";
+static const char *kRecorderVideoLevel = "android.media.mediarecorder.video-encoder-level";
+static const char *kRecorderVideoProfile = "android.media.mediarecorder.video-encoder-profile";
+static const char *kRecorderVideoTimescale = "android.media.mediarecorder.video-timescale";
+static const char *kRecorderWidth = "android.media.mediarecorder.width";
+
+// new fields, not yet frozen in the public Java API definitions
+static const char *kRecorderAudioMime = "android.media.mediarecorder.audio.mime";
+static const char *kRecorderVideoMime = "android.media.mediarecorder.video.mime";
+static const char *kRecorderDurationMs = "android.media.mediarecorder.durationMs";
+static const char *kRecorderPaused = "android.media.mediarecorder.pausedMs";
+static const char *kRecorderNumPauses = "android.media.mediarecorder.NPauses";
+
// To collect the encoder usage for the battery app
static void addBatteryData(uint32_t params) {
@@ -101,9 +112,11 @@
: MediaRecorderBase(opPackageName),
mWriter(NULL),
mOutputFd(-1),
- mAudioSource(AUDIO_SOURCE_CNT),
+ mAudioSource((audio_source_t)AUDIO_SOURCE_CNT), // initialize with invalid value
mVideoSource(VIDEO_SOURCE_LIST_END),
- mStarted(false) {
+ mStarted(false),
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mDeviceCallbackEnabled(false) {
ALOGV("Constructor");
@@ -120,22 +133,18 @@
}
// log the current record, provided it has some information worth recording
- if (mAnalyticsDirty && mAnalyticsItem != NULL) {
- updateMetrics();
- if (mAnalyticsItem->count() > 0) {
- mAnalyticsItem->setFinalized(true);
- mAnalyticsItem->selfrecord();
- }
- delete mAnalyticsItem;
- mAnalyticsItem = NULL;
- }
+ // NB: this also reclaims & clears mAnalyticsItem.
+ flushAndResetMetrics(false);
}
void StagefrightRecorder::updateMetrics() {
ALOGV("updateMetrics");
- // we'll populate the values from the raw fields.
- // (NOT going to populate as we go through the various set* ops)
+ // we run as part of the media player service; what we really want to
+ // know is the app which requested the recording.
+ mAnalyticsItem->setUid(mClientUid);
+
+ // populate the values from the raw fields.
// TBD mOutputFormat = OUTPUT_FORMAT_THREE_GPP;
// TBD mAudioEncoder = AUDIO_ENCODER_AMR_NB;
@@ -163,7 +172,6 @@
// TBD mTrackEveryTimeDurationUs = 0;
mAnalyticsItem->setInt32(kRecorderCaptureFpsEnable, mCaptureFpsEnable);
mAnalyticsItem->setDouble(kRecorderCaptureFps, mCaptureFps);
- // TBD mCaptureFps = -1.0;
// TBD mCameraSourceTimeLapse = NULL;
// TBD mMetaDataStoredInVideoBuffers = kMetadataBufferTypeInvalid;
// TBD mEncoderProfiles = MediaProfiles::getInstance();
@@ -172,26 +180,29 @@
// PII mLongitudex10000 = -3600000;
// TBD mTotalBitRate = 0;
- // TBD: some duration information (capture, paused)
- //
-
+ // duration information (recorded, paused, # of pauses)
+ mAnalyticsItem->setInt64(kRecorderDurationMs, (mDurationRecordedUs+500)/1000 );
+ if (mNPauses != 0) {
+ mAnalyticsItem->setInt64(kRecorderPaused, (mDurationPausedUs+500)/1000 );
+ mAnalyticsItem->setInt32(kRecorderNumPauses, mNPauses);
+ }
}
-void StagefrightRecorder::resetMetrics() {
- ALOGV("resetMetrics");
- // flush anything we have, restart the record
+void StagefrightRecorder::flushAndResetMetrics(bool reinitialize) {
+ ALOGV("flushAndResetMetrics");
+ // flush anything we have, maybe setup a new record
if (mAnalyticsDirty && mAnalyticsItem != NULL) {
updateMetrics();
if (mAnalyticsItem->count() > 0) {
- mAnalyticsItem->setFinalized(true);
mAnalyticsItem->selfrecord();
}
delete mAnalyticsItem;
mAnalyticsItem = NULL;
}
- mAnalyticsItem = new MediaAnalyticsItem(kKeyRecorder);
- (void) mAnalyticsItem->generateSessionID();
mAnalyticsDirty = false;
+ if (reinitialize) {
+ mAnalyticsItem = new MediaAnalyticsItem(kKeyRecorder);
+ }
}
status_t StagefrightRecorder::init() {
@@ -204,7 +215,7 @@
return OK;
}
-// The client side of mediaserver asks it to creat a SurfaceMediaSource
+// The client side of mediaserver asks it to create a SurfaceMediaSource
// and return a interface reference. The client side will use that
// while encoding GL Frames
sp<IGraphicBufferProducer> StagefrightRecorder::querySurfaceMediaSource() const {
@@ -1027,6 +1038,8 @@
mAnalyticsDirty = true;
mStarted = true;
+ mStartedRecordingUs = systemTime() / 1000;
+
uint32_t params = IMediaPlayerService::kBatteryDataCodecStarted;
if (mAudioSource != AUDIO_SOURCE_CNT) {
params |= IMediaPlayerService::kBatteryDataTrackAudio;
@@ -1069,7 +1082,8 @@
mAudioChannels,
mSampleRate,
mClientUid,
- mClientPid);
+ mClientPid,
+ mSelectedDeviceId);
status_t err = audioSource->initCheck();
@@ -1105,6 +1119,14 @@
return NULL;
}
+ // log audio mime type for media metrics
+ if (mAnalyticsItem != NULL) {
+ AString audiomime;
+ if (format->findString("mime", &audiomime)) {
+ mAnalyticsItem->setCString(kRecorderAudioMime, audiomime.c_str());
+ }
+ }
+
int32_t maxInputSize;
CHECK(audioSource->getFormat()->findInt32(
kKeyMaxInputSize, &maxInputSize));
@@ -1120,6 +1142,10 @@
sp<MediaCodecSource> audioEncoder =
MediaCodecSource::Create(mLooper, format, audioSource);
+ sp<AudioSystem::AudioDeviceCallback> callback = mAudioDeviceCallback.promote();
+ if (mDeviceCallbackEnabled && callback != 0) {
+ audioSource->addAudioDeviceCallback(callback);
+ }
mAudioSourceNode = audioSource;
if (audioEncoder == NULL) {
@@ -1647,6 +1673,14 @@
break;
}
+ // log video mime type for media metrics
+ if (mAnalyticsItem != NULL) {
+ AString videomime;
+ if (format->findString("mime", &videomime)) {
+ mAnalyticsItem->setCString(kRecorderVideoMime, videomime.c_str());
+ }
+ }
+
if (cameraSource != NULL) {
sp<MetaData> meta = cameraSource->getFormat();
@@ -1909,6 +1943,13 @@
sp<MetaData> meta = new MetaData;
meta->setInt64(kKeyTime, mPauseStartTimeUs);
+ if (mStartedRecordingUs != 0) {
+ // should always be true
+ int64_t recordingUs = mPauseStartTimeUs - mStartedRecordingUs;
+ mDurationRecordedUs += recordingUs;
+ mStartedRecordingUs = 0;
+ }
+
if (mAudioEncoderSource != NULL) {
mAudioEncoderSource->pause();
}
@@ -1967,6 +2008,16 @@
source->setInputBufferTimeOffset((int64_t)timeOffset);
source->start(meta.get());
}
+
+
+ // sum info on pause duration
+ // (ignore the 30msec of overlap adjustment factored into mTotalPausedDurationUs)
+ int64_t pausedUs = resumeStartTimeUs - mPauseStartTimeUs;
+ mDurationPausedUs += pausedUs;
+ mNPauses++;
+ // and a timestamp marking that we're back to recording....
+ mStartedRecordingUs = resumeStartTimeUs;
+
mPauseStartTimeUs = 0;
return OK;
@@ -1995,10 +2046,28 @@
mWriter.clear();
}
- resetMetrics();
+ // account for the last 'segment' -- whether paused or recording
+ if (mPauseStartTimeUs != 0) {
+ // we were paused
+ int64_t additive = stopTimeUs - mPauseStartTimeUs;
+ mDurationPausedUs += additive;
+ mNPauses++;
+ } else if (mStartedRecordingUs != 0) {
+ // we were recording
+ int64_t additive = stopTimeUs - mStartedRecordingUs;
+ mDurationRecordedUs += additive;
+ } else {
+ ALOGW("stop while neither recording nor paused");
+ }
+ flushAndResetMetrics(true);
+
+ mDurationRecordedUs = 0;
+ mDurationPausedUs = 0;
+ mNPauses = 0;
mTotalPausedDurationUs = 0;
mPauseStartTimeUs = 0;
+ mStartedRecordingUs = 0;
mGraphicBufferProducer.clear();
mPersistentSurface.clear();
@@ -2039,7 +2108,7 @@
stop();
// No audio or video source by default
- mAudioSource = AUDIO_SOURCE_CNT;
+ mAudioSource = (audio_source_t)AUDIO_SOURCE_CNT; // reset to invalid value
mVideoSource = VIDEO_SOURCE_LIST_END;
// Default parameters
@@ -2077,6 +2146,12 @@
mLongitudex10000 = -3600000;
mTotalBitRate = 0;
+ // tracking how long we recorded.
+ mDurationRecordedUs = 0;
+ mStartedRecordingUs = 0;
+ mDurationPausedUs = 0;
+ mNPauses = 0;
+
mOutputFd = -1;
return OK;
@@ -2116,6 +2191,55 @@
return OK;
}
+status_t StagefrightRecorder::setInputDevice(audio_port_handle_t deviceId) {
+ ALOGV("setInputDevice");
+
+ if (mSelectedDeviceId != deviceId) {
+ mSelectedDeviceId = deviceId;
+ if (mAudioSourceNode != 0) {
+ return mAudioSourceNode->setInputDevice(deviceId);
+ }
+ }
+ return NO_ERROR;
+}
+
+status_t StagefrightRecorder::getRoutedDeviceId(audio_port_handle_t* deviceId) {
+ ALOGV("getRoutedDeviceId");
+
+ if (mAudioSourceNode != 0) {
+ status_t status = mAudioSourceNode->getRoutedDeviceId(deviceId);
+ return status;
+ }
+ return NO_INIT;
+}
+
+void StagefrightRecorder::setAudioDeviceCallback(
+ const sp<AudioSystem::AudioDeviceCallback>& callback) {
+ mAudioDeviceCallback = callback;
+}
+
+status_t StagefrightRecorder::enableAudioDeviceCallback(bool enabled) {
+ mDeviceCallbackEnabled = enabled;
+ sp<AudioSystem::AudioDeviceCallback> callback = mAudioDeviceCallback.promote();
+ if (mAudioSourceNode != 0 && callback != 0) {
+ if (enabled) {
+ return mAudioSourceNode->addAudioDeviceCallback(callback);
+ } else {
+ return mAudioSourceNode->removeAudioDeviceCallback(callback);
+ }
+ }
+ return NO_ERROR;
+}
+
+status_t StagefrightRecorder::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo>* activeMicrophones) {
+ if (mAudioSourceNode != 0) {
+ return mAudioSourceNode->getActiveMicrophones(activeMicrophones);
+ }
+ return NO_INIT;
+}
+
+
status_t StagefrightRecorder::dump(
int fd, const Vector<String16>& args) const {
ALOGV("dump");
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 9a6c4da..faa2e59 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -25,7 +25,7 @@
#include <system/audio.h>
-#include <MetadataBufferType.h>
+#include <media/hardware/MetadataBufferType.h>
namespace android {
@@ -72,6 +72,12 @@
virtual status_t dump(int fd, const Vector<String16> &args) const;
// Querying a SurfaceMediaSourcer
virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() const;
+ virtual status_t setInputDevice(audio_port_handle_t deviceId);
+ virtual status_t getRoutedDeviceId(audio_port_handle_t* deviceId);
+ virtual void setAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
+ virtual status_t enableAudioDeviceCallback(bool enabled);
+ virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
+
private:
mutable Mutex mLock;
@@ -89,7 +95,7 @@
MediaAnalyticsItem *mAnalyticsItem;
bool mAnalyticsDirty;
- void resetMetrics();
+ void flushAndResetMetrics(bool reinitialize);
void updateMetrics();
audio_source_t mAudioSource;
@@ -121,6 +127,11 @@
int32_t mStartTimeOffsetMs;
int32_t mTotalBitRate;
+ int64_t mDurationRecordedUs;
+ int64_t mStartedRecordingUs;
+ int64_t mDurationPausedUs;
+ int32_t mNPauses;
+
bool mCaptureFpsEnable;
double mCaptureFps;
int64_t mTimeBetweenCaptureUs;
@@ -144,6 +155,10 @@
sp<IGraphicBufferProducer> mGraphicBufferProducer;
sp<ALooper> mLooper;
+ audio_port_handle_t mSelectedDeviceId;
+ bool mDeviceCallbackEnabled;
+ wp<AudioSystem::AudioDeviceCallback> mAudioDeviceCallback;
+
static const int kMaxHighSpeedFps = 1000;
status_t prepareInternal();
diff --git a/media/libmediaplayerservice/include/MediaPlayerInterface.h b/media/libmediaplayerservice/include/MediaPlayerInterface.h
index e8d59a7..3119950 100644
--- a/media/libmediaplayerservice/include/MediaPlayerInterface.h
+++ b/media/libmediaplayerservice/include/MediaPlayerInterface.h
@@ -66,14 +66,17 @@
// duration below which we do not allow deep audio buffering
#define AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US 5000000
-// callback mechanism for passing messages to MediaPlayer object
-typedef void (*notify_callback_f)(void* cookie,
- int msg, int ext1, int ext2, const Parcel *obj);
-
// abstract base class - use MediaPlayerInterface
class MediaPlayerBase : public RefBase
{
public:
+ // callback mechanism for passing messages to MediaPlayer object
+ class Listener : public RefBase {
+ public:
+ virtual void notify(int msg, int ext1, int ext2, const Parcel *obj) = 0;
+ virtual ~Listener() {}
+ };
+
// AudioSink: abstraction layer for audio output
class AudioSink : public RefBase {
public:
@@ -146,13 +149,18 @@
virtual status_t setParameters(const String8& /* keyValuePairs */) { return NO_ERROR; }
virtual String8 getParameters(const String8& /* keys */) { return String8::empty(); }
- virtual VolumeShaper::Status applyVolumeShaper(
- const sp<VolumeShaper::Configuration>& configuration,
- const sp<VolumeShaper::Operation>& operation);
- virtual sp<VolumeShaper::State> getVolumeShaperState(int id);
+ virtual media::VolumeShaper::Status applyVolumeShaper(
+ const sp<media::VolumeShaper::Configuration>& configuration,
+ const sp<media::VolumeShaper::Operation>& operation);
+ virtual sp<media::VolumeShaper::State> getVolumeShaperState(int id);
+
+ // AudioRouting
+ virtual status_t setOutputDevice(audio_port_handle_t deviceId);
+ virtual status_t getRoutedDeviceId(audio_port_handle_t* deviceId);
+ virtual status_t enableAudioDeviceCallback(bool enabled);
};
- MediaPlayerBase() : mCookie(0), mNotify(0) {}
+ MediaPlayerBase() {}
virtual ~MediaPlayerBase() {}
virtual status_t initCheck() = 0;
virtual bool hardwareOutput() = 0;
@@ -180,7 +188,7 @@
virtual status_t setVideoSurfaceTexture(
const sp<IGraphicBufferProducer>& bufferProducer) = 0;
- virtual status_t getDefaultBufferingSettings(
+ virtual status_t getBufferingSettings(
BufferingSettings* buffering /* nonnull */) {
*buffering = BufferingSettings();
return OK;
@@ -225,6 +233,9 @@
virtual status_t getCurrentPosition(int *msec) = 0;
virtual status_t getDuration(int *msec) = 0;
virtual status_t reset() = 0;
+ virtual status_t notifyAt(int64_t /* mediaTimeUs */) {
+ return INVALID_OPERATION;
+ }
virtual status_t setLooping(int loop) = 0;
virtual player_type playerType() = 0;
virtual status_t setParameter(int key, const Parcel &request) = 0;
@@ -263,22 +274,22 @@
};
void setNotifyCallback(
- void* cookie, notify_callback_f notifyFunc) {
+ const sp<Listener> &listener) {
Mutex::Autolock autoLock(mNotifyLock);
- mCookie = cookie; mNotify = notifyFunc;
+ mListener = listener;
}
void sendEvent(int msg, int ext1=0, int ext2=0,
const Parcel *obj=NULL) {
- notify_callback_f notifyCB;
- void* cookie;
+ sp<Listener> listener;
{
Mutex::Autolock autoLock(mNotifyLock);
- notifyCB = mNotify;
- cookie = mCookie;
+ listener = mListener;
}
- if (notifyCB) notifyCB(cookie, msg, ext1, ext2, obj);
+ if (listener != NULL) {
+ listener->notify(msg, ext1, ext2, obj);
+ }
}
virtual status_t dump(int /* fd */, const Vector<String16>& /* args */) const {
@@ -297,8 +308,7 @@
friend class MediaPlayerService;
Mutex mNotifyLock;
- void* mCookie;
- notify_callback_f mNotify;
+ sp<Listener> mListener;
};
// Implement this class for media players that use the AudioFlinger software mixer
diff --git a/media/libmediaplayerservice/nuplayer/AWakeLock.cpp b/media/libmediaplayerservice/nuplayer/AWakeLock.cpp
new file mode 100644
index 0000000..684ba2e
--- /dev/null
+++ b/media/libmediaplayerservice/nuplayer/AWakeLock.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AWakeLock"
+#include <utils/Log.h>
+
+#include "AWakeLock.h"
+
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <powermanager/PowerManager.h>
+
+
+namespace android {
+
+AWakeLock::AWakeLock() :
+ mPowerManager(NULL),
+ mWakeLockToken(NULL),
+ mWakeLockCount(0),
+ mDeathRecipient(new PMDeathRecipient(this)) {}
+
+AWakeLock::~AWakeLock() {
+ if (mPowerManager != NULL) {
+ sp<IBinder> binder = IInterface::asBinder(mPowerManager);
+ binder->unlinkToDeath(mDeathRecipient);
+ }
+ clearPowerManager();
+}
+
+bool AWakeLock::acquire() {
+ if (mWakeLockCount == 0) {
+ CHECK(mWakeLockToken == NULL);
+ if (mPowerManager == NULL) {
+ // use checkService() to avoid blocking if power service is not up yet
+ sp<IBinder> binder =
+ defaultServiceManager()->checkService(String16("power"));
+ if (binder == NULL) {
+ ALOGW("could not get the power manager service");
+ } else {
+ mPowerManager = interface_cast<IPowerManager>(binder);
+ binder->linkToDeath(mDeathRecipient);
+ }
+ }
+ if (mPowerManager != NULL) {
+ sp<IBinder> binder = new BBinder();
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ status_t status = mPowerManager->acquireWakeLock(
+ POWERMANAGER_PARTIAL_WAKE_LOCK,
+ binder, String16("AWakeLock"), String16("media"));
+ IPCThreadState::self()->restoreCallingIdentity(token);
+ if (status == NO_ERROR) {
+ mWakeLockToken = binder;
+ mWakeLockCount++;
+ return true;
+ }
+ }
+ } else {
+ mWakeLockCount++;
+ return true;
+ }
+ return false;
+}
+
+void AWakeLock::release(bool force) {
+ if (mWakeLockCount == 0) {
+ return;
+ }
+ if (force) {
+ // Force wakelock release below by setting reference count to 1.
+ mWakeLockCount = 1;
+ }
+ if (--mWakeLockCount == 0) {
+ CHECK(mWakeLockToken != NULL);
+ if (mPowerManager != NULL) {
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ mPowerManager->releaseWakeLock(mWakeLockToken, 0 /* flags */);
+ IPCThreadState::self()->restoreCallingIdentity(token);
+ }
+ mWakeLockToken.clear();
+ }
+}
+
+void AWakeLock::clearPowerManager() {
+ release(true);
+ mPowerManager.clear();
+}
+
+void AWakeLock::PMDeathRecipient::binderDied(const wp<IBinder>& who __unused) {
+ if (mWakeLock != NULL) {
+ mWakeLock->clearPowerManager();
+ }
+}
+
+} // namespace android
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AWakeLock.h b/media/libmediaplayerservice/nuplayer/AWakeLock.h
similarity index 100%
rename from media/libstagefright/foundation/include/media/stagefright/foundation/AWakeLock.h
rename to media/libmediaplayerservice/nuplayer/AWakeLock.h
diff --git a/media/libmediaplayerservice/nuplayer/Android.bp b/media/libmediaplayerservice/nuplayer/Android.bp
new file mode 100644
index 0000000..a4da564
--- /dev/null
+++ b/media/libmediaplayerservice/nuplayer/Android.bp
@@ -0,0 +1,64 @@
+cc_library_static {
+
+ srcs: [
+ "AWakeLock.cpp",
+ "GenericSource.cpp",
+ "HTTPLiveSource.cpp",
+ "NuPlayer.cpp",
+ "NuPlayerCCDecoder.cpp",
+ "NuPlayerDecoder.cpp",
+ "NuPlayerDecoderBase.cpp",
+ "NuPlayerDecoderPassThrough.cpp",
+ "NuPlayerDriver.cpp",
+ "NuPlayerDrm.cpp",
+ "NuPlayerRenderer.cpp",
+ "NuPlayerStreamListener.cpp",
+ "RTSPSource.cpp",
+ "StreamingSource.cpp",
+ ],
+
+ header_libs: [
+ "media_plugin_headers",
+ ],
+
+ include_dirs: [
+ "frameworks/av/media/libstagefright",
+ "frameworks/av/media/libstagefright/httplive",
+ "frameworks/av/media/libstagefright/include",
+ "frameworks/av/media/libstagefright/mpeg2ts",
+ "frameworks/av/media/libstagefright/rtsp",
+ "frameworks/av/media/libstagefright/timedtext",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+ product_variables: {
+ debuggable: {
+ cflags: [
+ "-DENABLE_STAGEFRIGHT_EXPERIMENTS",
+ ],
+ }
+ },
+
+ shared_libs: [
+ "libbinder",
+ "libui",
+ "libgui",
+ "libmedia",
+ "libmediadrm",
+ "libpowermanager",
+ ],
+
+ name: "libstagefright_nuplayer",
+
+ sanitize: {
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
+ },
+
+}
diff --git a/media/libmediaplayerservice/nuplayer/Android.mk b/media/libmediaplayerservice/nuplayer/Android.mk
deleted file mode 100644
index c582631..0000000
--- a/media/libmediaplayerservice/nuplayer/Android.mk
+++ /dev/null
@@ -1,50 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- GenericSource.cpp \
- HTTPLiveSource.cpp \
- NuPlayer.cpp \
- NuPlayerCCDecoder.cpp \
- NuPlayerDecoder.cpp \
- NuPlayerDecoderBase.cpp \
- NuPlayerDecoderPassThrough.cpp \
- NuPlayerDriver.cpp \
- NuPlayerDrm.cpp \
- NuPlayerRenderer.cpp \
- NuPlayerStreamListener.cpp \
- RTSPSource.cpp \
- StreamingSource.cpp \
-
-LOCAL_C_INCLUDES := \
- frameworks/av/media/libstagefright \
- frameworks/av/media/libstagefright/httplive \
- frameworks/av/media/libstagefright/include \
- frameworks/av/media/libstagefright/mpeg2ts \
- frameworks/av/media/libstagefright/rtsp \
- frameworks/av/media/libstagefright/timedtext \
- frameworks/av/media/libmediaplayerservice \
- frameworks/native/include/media/openmax
-
-LOCAL_CFLAGS += -Werror -Wall
-
-# enable experiments only in userdebug and eng builds
-ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
-LOCAL_CFLAGS += -DENABLE_STAGEFRIGHT_EXPERIMENTS
-endif
-
-LOCAL_SHARED_LIBRARIES := \
- libbinder \
- libui \
- libgui \
- libmedia \
- libmediadrm \
-
-LOCAL_MODULE:= libstagefright_nuplayer
-
-LOCAL_MODULE_TAGS := eng
-
-LOCAL_SANITIZE := cfi
-LOCAL_SANITIZE_DIAG := cfi
-
-include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index aa21fff..23d66bb 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -23,17 +23,22 @@
#include "AnotherPacketSource.h"
#include <binder/IServiceManager.h>
#include <cutils/properties.h>
+#include <media/DataSource.h>
+#include <media/MediaBufferHolder.h>
+#include <media/MediaExtractor.h>
+#include <media/MediaSource.h>
#include <media/IMediaExtractorService.h>
#include <media/IMediaHTTPService.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSource.h>
+#include <media/stagefright/DataSourceFactory.h>
#include <media/stagefright/FileSource.h>
+#include <media/stagefright/InterfaceUtils.h>
#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaClock.h>
#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaExtractorFactory.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
#include "../../libstagefright/include/NuCachedSource2.h"
@@ -41,22 +46,26 @@
namespace android {
-static const int kLowWaterMarkMs = 2000; // 2secs
-static const int kHighWaterMarkMs = 5000; // 5secs
-static const int kHighWaterMarkRebufferMs = 15000; // 15secs
+static const int kInitialMarkMs = 5000; // 5secs
-static const int kLowWaterMarkKB = 40;
-static const int kHighWaterMarkKB = 200;
+//static const int kPausePlaybackMarkMs = 2000; // 2secs
+static const int kResumePlaybackMarkMs = 15000; // 15secs
NuPlayer::GenericSource::GenericSource(
const sp<AMessage> ¬ify,
bool uidValid,
- uid_t uid)
+ uid_t uid,
+ const sp<MediaClock> &mediaClock)
: Source(notify),
mAudioTimeUs(0),
mAudioLastDequeueTimeUs(0),
mVideoTimeUs(0),
mVideoLastDequeueTimeUs(0),
+ mPrevBufferPercentage(-1),
+ mPollBufferingGeneration(0),
+ mSentPauseOnBuffering(false),
+ mAudioDataGeneration(0),
+ mVideoDataGeneration(0),
mFetchSubtitleDataGeneration(0),
mFetchTimedTextDataGeneration(0),
mDurationUs(-1ll),
@@ -65,12 +74,15 @@
mIsStreaming(false),
mUIDValid(uidValid),
mUID(uid),
+ mMediaClock(mediaClock),
mFd(-1),
mBitrate(-1ll),
mPendingReadBufferTypes(0) {
ALOGV("GenericSource");
+ CHECK(mediaClock != NULL);
- mBufferingMonitor = new BufferingMonitor(notify);
+ mBufferingSettings.mInitialMarkMs = kInitialMarkMs;
+ mBufferingSettings.mResumePlaybackMarkMs = kResumePlaybackMarkMs;
resetDataSource();
}
@@ -79,6 +91,7 @@
mHTTPService.clear();
mHttpSource.clear();
+ mDisconnected = false;
mUri.clear();
mUriHeaders.clear();
if (mFd >= 0) {
@@ -88,14 +101,7 @@
mOffset = 0;
mLength = 0;
mStarted = false;
- mStopRead = true;
-
- if (mBufferingMonitorLooper != NULL) {
- mBufferingMonitorLooper->unregisterHandler(mBufferingMonitor->id());
- mBufferingMonitorLooper->stop();
- mBufferingMonitorLooper = NULL;
- }
- mBufferingMonitor->stop();
+ mPreparing = false;
mIsDrmProtected = false;
mIsDrmReleased = false;
@@ -107,6 +113,7 @@
const sp<IMediaHTTPService> &httpService,
const char *url,
const KeyedVector<String8, String8> *headers) {
+ Mutex::Autolock _l(mLock);
ALOGV("setDataSource url: %s", url);
resetDataSource();
@@ -125,6 +132,7 @@
status_t NuPlayer::GenericSource::setDataSource(
int fd, int64_t offset, int64_t length) {
+ Mutex::Autolock _l(mLock);
ALOGV("setDataSource %d/%lld/%lld", fd, (long long)offset, (long long)length);
resetDataSource();
@@ -139,6 +147,7 @@
}
status_t NuPlayer::GenericSource::setDataSource(const sp<DataSource>& source) {
+ Mutex::Autolock _l(mLock);
ALOGV("setDataSource (source: %p)", source.get());
resetDataSource();
@@ -147,21 +156,34 @@
}
sp<MetaData> NuPlayer::GenericSource::getFileFormatMeta() const {
+ Mutex::Autolock _l(mLock);
return mFileMeta;
}
status_t NuPlayer::GenericSource::initFromDataSource() {
sp<IMediaExtractor> extractor;
CHECK(mDataSource != NULL);
+ sp<DataSource> dataSource = mDataSource;
- extractor = MediaExtractor::Create(mDataSource, NULL);
+ mLock.unlock();
+ // This might take long time if data source is not reliable.
+ extractor = MediaExtractorFactory::Create(dataSource, NULL);
if (extractor == NULL) {
ALOGE("initFromDataSource, cannot create extractor!");
return UNKNOWN_ERROR;
}
- mFileMeta = extractor->getMetaData();
+ sp<MetaData> fileMeta = extractor->getMetaData();
+
+ size_t numtracks = extractor->countTracks();
+ if (numtracks == 0) {
+ ALOGE("initFromDataSource, source has no track!");
+ return UNKNOWN_ERROR;
+ }
+
+ mLock.lock();
+ mFileMeta = fileMeta;
if (mFileMeta != NULL) {
int64_t duration;
if (mFileMeta->findInt64(kKeyDuration, &duration)) {
@@ -171,12 +193,6 @@
int32_t totalBitrate = 0;
- size_t numtracks = extractor->countTracks();
- if (numtracks == 0) {
- ALOGE("initFromDataSource, source has no track!");
- return UNKNOWN_ERROR;
- }
-
mMimes.clear();
for (size_t i = 0; i < numtracks; ++i) {
@@ -259,14 +275,23 @@
return OK;
}
-status_t NuPlayer::GenericSource::getDefaultBufferingSettings(
+status_t NuPlayer::GenericSource::getBufferingSettings(
BufferingSettings* buffering /* nonnull */) {
- mBufferingMonitor->getDefaultBufferingSettings(buffering);
+ {
+ Mutex::Autolock _l(mLock);
+ *buffering = mBufferingSettings;
+ }
+
+ ALOGV("getBufferingSettings{%s}", buffering->toString().string());
return OK;
}
status_t NuPlayer::GenericSource::setBufferingSettings(const BufferingSettings& buffering) {
- return mBufferingMonitor->setBufferingSettings(buffering);
+ ALOGV("setBufferingSettings{%s}", buffering.toString().string());
+
+ Mutex::Autolock _l(mLock);
+ mBufferingSettings = buffering;
+ return OK;
}
status_t NuPlayer::GenericSource::startSources() {
@@ -300,22 +325,11 @@
}
}
-status_t NuPlayer::GenericSource::setBuffers(
- bool audio, Vector<MediaBuffer *> &buffers) {
- if (mIsSecure && !audio && mVideoTrack.mSource != NULL) {
- return mVideoTrack.mSource->setBuffers(buffers);
- }
- return INVALID_OPERATION;
-}
-
bool NuPlayer::GenericSource::isStreaming() const {
+ Mutex::Autolock _l(mLock);
return mIsStreaming;
}
-void NuPlayer::GenericSource::setOffloadAudio(bool offload) {
- mBufferingMonitor->setOffloadAudio(offload);
-}
-
NuPlayer::GenericSource::~GenericSource() {
ALOGV("~GenericSource");
if (mLooper != NULL) {
@@ -326,6 +340,7 @@
}
void NuPlayer::GenericSource::prepareAsync() {
+ Mutex::Autolock _l(mLock);
ALOGV("prepareAsync: (looper: %d)", (mLooper != NULL));
if (mLooper == NULL) {
@@ -354,7 +369,7 @@
String8 contentType;
if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8)) {
- mHttpSource = DataSource::CreateMediaHTTP(mHTTPService);
+ mHttpSource = DataSourceFactory::CreateMediaHTTP(mHTTPService);
if (mHttpSource == NULL) {
ALOGE("Failed to create http source!");
notifyPreparedAndCleanup(UNKNOWN_ERROR);
@@ -362,9 +377,15 @@
}
}
- mDataSource = DataSource::CreateFromURI(
+ mLock.unlock();
+ // This might take long time if connection has some issue.
+ sp<DataSource> dataSource = DataSourceFactory::CreateFromURI(
mHTTPService, uri, &mUriHeaders, &contentType,
static_cast<HTTPBase *>(mHttpSource.get()));
+ mLock.lock();
+ if (!mDisconnected) {
+ mDataSource = dataSource;
+ }
} else {
if (property_get_bool("media.stagefright.extractremote", true) &&
!FileSource::requiresDrm(mFd, mOffset, mLength, nullptr /* mime */)) {
@@ -379,7 +400,7 @@
ALOGV("IDataSource(FileSource): %p %d %lld %lld",
source.get(), mFd, (long long)mOffset, (long long)mLength);
if (source.get() != nullptr) {
- mDataSource = DataSource::CreateFromIDataSource(source);
+ mDataSource = CreateDataSourceFromIDataSource(source);
if (mDataSource != nullptr) {
// Close the local file descriptor as it is not needed anymore.
close(mFd);
@@ -397,7 +418,7 @@
mDataSource = new FileSource(mFd, mOffset, mLength);
}
// TODO: close should always be done on mFd, see the lines following
- // DataSource::CreateFromIDataSource above,
+ // CreateDataSourceFromIDataSource above,
// and the FileSource constructor should dup the mFd argument as needed.
mFd = -1;
}
@@ -427,7 +448,7 @@
}
if (mVideoTrack.mSource != NULL) {
- sp<MetaData> meta = doGetFormatMeta(false /* audio */);
+ sp<MetaData> meta = getFormatMeta_l(false /* audio */);
sp<AMessage> msg = new AMessage;
err = convertMetaDataToMessage(meta, &msg);
if(err != OK) {
@@ -461,47 +482,39 @@
}
if (mIsStreaming) {
- if (mBufferingMonitorLooper == NULL) {
- mBufferingMonitor->prepare(mCachedSource, mDurationUs, mBitrate,
- mIsStreaming);
-
- mBufferingMonitorLooper = new ALooper;
- mBufferingMonitorLooper->setName("GSBMonitor");
- mBufferingMonitorLooper->start();
- mBufferingMonitorLooper->registerHandler(mBufferingMonitor);
- }
-
- mBufferingMonitor->ensureCacheIsFetching();
- mBufferingMonitor->restartPollBuffering();
+ mCachedSource->resumeFetchingIfNecessary();
+ mPreparing = true;
+ schedulePollBuffering();
} else {
notifyPrepared();
}
+
+ if (mAudioTrack.mSource != NULL) {
+ postReadBuffer(MEDIA_TRACK_TYPE_AUDIO);
+ }
+
+ if (mVideoTrack.mSource != NULL) {
+ postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
+ }
}
void NuPlayer::GenericSource::notifyPreparedAndCleanup(status_t err) {
if (err != OK) {
- {
- sp<DataSource> dataSource = mDataSource;
- sp<NuCachedSource2> cachedSource = mCachedSource;
- sp<DataSource> httpSource = mHttpSource;
- {
- Mutex::Autolock _l(mDisconnectLock);
- mDataSource.clear();
- mCachedSource.clear();
- mHttpSource.clear();
- }
- }
- mBitrate = -1;
+ mDataSource.clear();
+ mCachedSource.clear();
+ mHttpSource.clear();
- mBufferingMonitor->cancelPollBuffering();
+ mBitrate = -1;
+ mPrevBufferPercentage = -1;
+ ++mPollBufferingGeneration;
}
notifyPrepared(err);
}
void NuPlayer::GenericSource::start() {
+ Mutex::Autolock _l(mLock);
ALOGI("start");
- mStopRead = false;
if (mAudioTrack.mSource != NULL) {
postReadBuffer(MEDIA_TRACK_TYPE_AUDIO);
}
@@ -511,30 +524,30 @@
}
mStarted = true;
-
- (new AMessage(kWhatStart, this))->post();
}
void NuPlayer::GenericSource::stop() {
+ Mutex::Autolock _l(mLock);
mStarted = false;
}
void NuPlayer::GenericSource::pause() {
+ Mutex::Autolock _l(mLock);
mStarted = false;
}
void NuPlayer::GenericSource::resume() {
+ Mutex::Autolock _l(mLock);
mStarted = true;
-
- (new AMessage(kWhatResume, this))->post();
}
void NuPlayer::GenericSource::disconnect() {
sp<DataSource> dataSource, httpSource;
{
- Mutex::Autolock _l(mDisconnectLock);
+ Mutex::Autolock _l(mLock);
dataSource = mDataSource;
httpSource = mHttpSource;
+ mDisconnected = true;
}
if (dataSource != NULL) {
@@ -551,7 +564,24 @@
return OK;
}
+void NuPlayer::GenericSource::sendCacheStats() {
+ int32_t kbps = 0;
+ status_t err = UNKNOWN_ERROR;
+
+ if (mCachedSource != NULL) {
+ err = mCachedSource->getEstimatedBandwidthKbps(&kbps);
+ }
+
+ if (err == OK) {
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatCacheStats);
+ notify->setInt32("bandwidth", kbps);
+ notify->post();
+ }
+}
+
void NuPlayer::GenericSource::onMessageReceived(const sp<AMessage> &msg) {
+ Mutex::Autolock _l(mLock);
switch (msg->what()) {
case kWhatPrepareAsync:
{
@@ -620,6 +650,8 @@
track->mSource = source;
track->mSource->start();
track->mIndex = trackIndex;
+ ++mAudioDataGeneration;
+ ++mVideoDataGeneration;
int64_t timeUs, actualTimeUs;
const bool formatChange = true;
@@ -637,37 +669,6 @@
break;
}
- case kWhatStart:
- case kWhatResume:
- {
- mBufferingMonitor->restartPollBuffering();
- break;
- }
-
- case kWhatGetFormat:
- {
- onGetFormatMeta(msg);
- break;
- }
-
- case kWhatGetSelectedTrack:
- {
- onGetSelectedTrack(msg);
- break;
- }
-
- case kWhatGetTrackInfo:
- {
- onGetTrackInfo(msg);
- break;
- }
-
- case kWhatSelectTrack:
- {
- onSelectTrack(msg);
- break;
- }
-
case kWhatSeek:
{
onSeek(msg);
@@ -680,25 +681,13 @@
break;
}
- case kWhatPrepareDrm:
+ case kWhatPollBuffering:
{
- status_t status = onPrepareDrm(msg);
- sp<AMessage> response = new AMessage;
- response->setInt32("status", status);
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
- break;
- }
-
- case kWhatReleaseDrm:
- {
- status_t status = onReleaseDrm();
- sp<AMessage> response = new AMessage;
- response->setInt32("status", status);
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+ if (generation == mPollBufferingGeneration) {
+ onPollBuffering();
+ }
break;
}
@@ -729,17 +718,20 @@
int64_t timeUs;
CHECK(msg->findInt64("timeUs", &timeUs));
- int64_t subTimeUs;
+ int64_t subTimeUs = 0;
readBuffer(type, timeUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */, &subTimeUs);
- int64_t delayUs = subTimeUs - timeUs;
+ status_t eosResult;
+ if (!packets->hasBufferAvailable(&eosResult)) {
+ return;
+ }
+
if (msg->what() == kWhatFetchSubtitleData) {
- const int64_t oneSecUs = 1000000ll;
- delayUs -= oneSecUs;
+ subTimeUs -= 1000000ll; // send subtile data one second earlier
}
sp<AMessage> msg2 = new AMessage(sendWhat, this);
msg2->setInt32("generation", msgGeneration);
- msg2->post(delayUs < 0 ? 0 : delayUs);
+ mMediaClock->addTimer(msg2, subTimeUs);
}
void NuPlayer::GenericSource::sendTextData(
@@ -771,8 +763,10 @@
notify->setBuffer("buffer", buffer);
notify->post();
- const int64_t delayUs = nextSubTimeUs - subTimeUs;
- msg->post(delayUs < 0 ? 0 : delayUs);
+ if (msg->what() == kWhatSendSubtitleData) {
+ nextSubTimeUs -= 1000000ll; // send subtile data one second earlier
+ }
+ mMediaClock->addTimer(msg, nextSubTimeUs);
}
}
@@ -808,34 +802,11 @@
}
sp<MetaData> NuPlayer::GenericSource::getFormatMeta(bool audio) {
- sp<AMessage> msg = new AMessage(kWhatGetFormat, this);
- msg->setInt32("audio", audio);
-
- sp<AMessage> response;
- sp<RefBase> format;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findObject("format", &format));
- return static_cast<MetaData*>(format.get());
- } else {
- return NULL;
- }
+ Mutex::Autolock _l(mLock);
+ return getFormatMeta_l(audio);
}
-void NuPlayer::GenericSource::onGetFormatMeta(const sp<AMessage>& msg) const {
- int32_t audio;
- CHECK(msg->findInt32("audio", &audio));
-
- sp<AMessage> response = new AMessage;
- sp<MetaData> format = doGetFormatMeta(audio);
- response->setObject("format", format);
-
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
-}
-
-sp<MetaData> NuPlayer::GenericSource::doGetFormatMeta(bool audio) const {
+sp<MetaData> NuPlayer::GenericSource::getFormatMeta_l(bool audio) {
sp<IMediaSource> source = audio ? mAudioTrack.mSource : mVideoTrack.mSource;
if (source == NULL) {
@@ -847,10 +818,7 @@
status_t NuPlayer::GenericSource::dequeueAccessUnit(
bool audio, sp<ABuffer> *accessUnit) {
- if (audio && !mStarted) {
- return -EWOULDBLOCK;
- }
-
+ Mutex::Autolock _l(mLock);
// If has gone through stop/releaseDrm sequence, we no longer send down any buffer b/c
// the codec's crypto object has gone away (b/37960096).
// Note: This will be unnecessary when stop() changes behavior and releases codec (b/35248283).
@@ -876,10 +844,32 @@
status_t result = track->mPackets->dequeueAccessUnit(accessUnit);
- // start pulling in more buffers if we only have one (or no) buffer left
+ // start pulling in more buffers if cache is running low
// so that decoder has less chance of being starved
- if (track->mPackets->getAvailableBufferCount(&finalResult) < 2) {
- postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
+ if (!mIsStreaming) {
+ if (track->mPackets->getAvailableBufferCount(&finalResult) < 2) {
+ postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
+ }
+ } else {
+ int64_t durationUs = track->mPackets->getBufferedDurationUs(&finalResult);
+ // TODO: maxRebufferingMarkMs could be larger than
+ // mBufferingSettings.mResumePlaybackMarkMs
+ int64_t restartBufferingMarkUs =
+ mBufferingSettings.mResumePlaybackMarkMs * 1000ll / 2;
+ if (finalResult == OK) {
+ if (durationUs < restartBufferingMarkUs) {
+ postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
+ }
+ if (track->mPackets->getAvailableBufferCount(&finalResult) < 2
+ && !mSentPauseOnBuffering && !mPreparing) {
+ mCachedSource->resumeFetchingIfNecessary();
+ sendCacheStats();
+ mSentPauseOnBuffering = true;
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatPauseOnBufferingStart);
+ notify->post();
+ }
+ }
}
if (result != OK) {
@@ -899,7 +889,6 @@
CHECK((*accessUnit)->meta()->findInt64("timeUs", &timeUs));
if (audio) {
mAudioLastDequeueTimeUs = timeUs;
- mBufferingMonitor->updateDequeuedBufferTime(timeUs);
} else {
mVideoLastDequeueTimeUs = timeUs;
}
@@ -924,43 +913,18 @@
}
status_t NuPlayer::GenericSource::getDuration(int64_t *durationUs) {
+ Mutex::Autolock _l(mLock);
*durationUs = mDurationUs;
return OK;
}
size_t NuPlayer::GenericSource::getTrackCount() const {
+ Mutex::Autolock _l(mLock);
return mSources.size();
}
sp<AMessage> NuPlayer::GenericSource::getTrackInfo(size_t trackIndex) const {
- sp<AMessage> msg = new AMessage(kWhatGetTrackInfo, this);
- msg->setSize("trackIndex", trackIndex);
-
- sp<AMessage> response;
- sp<RefBase> format;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findObject("format", &format));
- return static_cast<AMessage*>(format.get());
- } else {
- return NULL;
- }
-}
-
-void NuPlayer::GenericSource::onGetTrackInfo(const sp<AMessage>& msg) const {
- size_t trackIndex;
- CHECK(msg->findSize("trackIndex", &trackIndex));
-
- sp<AMessage> response = new AMessage;
- sp<AMessage> format = doGetTrackInfo(trackIndex);
- response->setObject("format", format);
-
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
-}
-
-sp<AMessage> NuPlayer::GenericSource::doGetTrackInfo(size_t trackIndex) const {
+ Mutex::Autolock _l(mLock);
size_t trackCount = mSources.size();
if (trackIndex >= trackCount) {
return NULL;
@@ -970,7 +934,11 @@
sp<MetaData> meta = mSources.itemAt(trackIndex)->getFormat();
if (meta == NULL) {
ALOGE("no metadata for track %zu", trackIndex);
- return NULL;
+ format->setInt32("type", MEDIA_TRACK_TYPE_UNKNOWN);
+ format->setString("mime", "application/octet-stream");
+ format->setString("language", "und");
+
+ return format;
}
const char *mime;
@@ -1010,35 +978,7 @@
}
ssize_t NuPlayer::GenericSource::getSelectedTrack(media_track_type type) const {
- sp<AMessage> msg = new AMessage(kWhatGetSelectedTrack, this);
- msg->setInt32("type", type);
-
- sp<AMessage> response;
- int32_t index;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("index", &index));
- return index;
- } else {
- return -1;
- }
-}
-
-void NuPlayer::GenericSource::onGetSelectedTrack(const sp<AMessage>& msg) const {
- int32_t tmpType;
- CHECK(msg->findInt32("type", &tmpType));
- media_track_type type = (media_track_type)tmpType;
-
- sp<AMessage> response = new AMessage;
- ssize_t index = doGetSelectedTrack(type);
- response->setInt32("index", index);
-
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
-}
-
-ssize_t NuPlayer::GenericSource::doGetSelectedTrack(media_track_type type) const {
+ Mutex::Autolock _l(mLock);
const Track *track = NULL;
switch (type) {
case MEDIA_TRACK_TYPE_VIDEO:
@@ -1065,38 +1005,9 @@
}
status_t NuPlayer::GenericSource::selectTrack(size_t trackIndex, bool select, int64_t timeUs) {
+ Mutex::Autolock _l(mLock);
ALOGV("%s track: %zu", select ? "select" : "deselect", trackIndex);
- sp<AMessage> msg = new AMessage(kWhatSelectTrack, this);
- msg->setInt32("trackIndex", trackIndex);
- msg->setInt32("select", select);
- msg->setInt64("timeUs", timeUs);
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- }
-
- return err;
-}
-
-void NuPlayer::GenericSource::onSelectTrack(const sp<AMessage>& msg) {
- int32_t trackIndex, select;
- int64_t timeUs;
- CHECK(msg->findInt32("trackIndex", &trackIndex));
- CHECK(msg->findInt32("select", &select));
- CHECK(msg->findInt64("timeUs", &timeUs));
-
- sp<AMessage> response = new AMessage;
- status_t err = doSelectTrack(trackIndex, select, timeUs);
- response->setInt32("err", err);
-
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
-}
-
-status_t NuPlayer::GenericSource::doSelectTrack(size_t trackIndex, bool select, int64_t timeUs) {
if (trackIndex >= mSources.size()) {
return BAD_INDEX;
}
@@ -1188,10 +1099,14 @@
}
status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
+ ALOGV("seekTo: %lld, %d", (long long)seekTimeUs, mode);
sp<AMessage> msg = new AMessage(kWhatSeek, this);
msg->setInt64("seekTimeUs", seekTimeUs);
msg->setInt32("mode", mode);
+ // Need to call readBuffer on |mLooper| to ensure the calls to
+ // IMediaSource::read* are serialized. Note that IMediaSource::read*
+ // is called without |mLock| acquired and MediaSource is not thread safe.
sp<AMessage> response;
status_t err = msg->postAndAwaitResponse(&response);
if (err == OK && response != NULL) {
@@ -1217,17 +1132,9 @@
}
status_t NuPlayer::GenericSource::doSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
- mBufferingMonitor->updateDequeuedBufferTime(-1ll);
-
- // If the Widevine source is stopped, do not attempt to read any
- // more buffers.
- //
- // TODO: revisit after widevine is removed. May be able to
- // combine mStopRead with mStarted.
- if (mStopRead) {
- return INVALID_OPERATION;
- }
if (mVideoTrack.mSource != NULL) {
+ ++mVideoDataGeneration;
+
int64_t actualTimeUs;
readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, mode, &actualTimeUs);
@@ -1238,7 +1145,8 @@
}
if (mAudioTrack.mSource != NULL) {
- readBuffer(MEDIA_TRACK_TYPE_AUDIO, seekTimeUs);
+ ++mAudioDataGeneration;
+ readBuffer(MEDIA_TRACK_TYPE_AUDIO, seekTimeUs, MediaPlayerSeekMode::SEEK_CLOSEST);
mAudioLastDequeueTimeUs = seekTimeUs;
}
@@ -1252,17 +1160,13 @@
mFetchTimedTextDataGeneration++;
}
- // If currently buffering, post kWhatBufferingEnd first, so that
- // NuPlayer resumes. Otherwise, if cache hits high watermark
- // before new polling happens, no one will resume the playback.
- mBufferingMonitor->stopBufferingIfNecessary();
- mBufferingMonitor->restartPollBuffering();
-
+ ++mPollBufferingGeneration;
+ schedulePollBuffering();
return OK;
}
sp<ABuffer> NuPlayer::GenericSource::mediaBufferToABuffer(
- MediaBuffer* mb,
+ MediaBufferBase* mb,
media_track_type trackType) {
bool audio = trackType == MEDIA_TRACK_TYPE_AUDIO;
size_t outLength = mb->range_length();
@@ -1280,14 +1184,12 @@
// data is already provided in the buffer
ab = new ABuffer(NULL, mb->range_length());
- mb->add_ref();
- ab->setMediaBufferBase(mb);
+ ab->meta()->setObject("mediaBufferHolder", new MediaBufferHolder(mb));
// Modular DRM: Required b/c of the above add_ref.
// If ref>0, there must be an observer, or it'll crash at release().
// TODO: MediaBuffer might need to be revised to ease such need.
mb->setObserver(this);
- // setMediaBufferBase() interestingly doesn't increment the ref count on its own.
// Extra increment (since we want to keep mb alive and attached to ab beyond this function
// call. This is to counter the effect of mb->release() towards the end.
mb->add_ref();
@@ -1301,7 +1203,7 @@
if (audio && mAudioIsVorbis) {
int32_t numPageSamples;
- if (!mb->meta_data()->findInt32(kKeyValidSamples, &numPageSamples)) {
+ if (!mb->meta_data().findInt32(kKeyValidSamples, &numPageSamples)) {
numPageSamples = -1;
}
@@ -1312,12 +1214,12 @@
sp<AMessage> meta = ab->meta();
int64_t timeUs;
- CHECK(mb->meta_data()->findInt64(kKeyTime, &timeUs));
+ CHECK(mb->meta_data().findInt64(kKeyTime, &timeUs));
meta->setInt64("timeUs", timeUs);
if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
int32_t layerId;
- if (mb->meta_data()->findInt32(kKeyTemporalLayerId, &layerId)) {
+ if (mb->meta_data().findInt32(kKeyTemporalLayerId, &layerId)) {
meta->setInt32("temporal-layer-id", layerId);
}
}
@@ -1330,28 +1232,28 @@
}
int64_t durationUs;
- if (mb->meta_data()->findInt64(kKeyDuration, &durationUs)) {
+ if (mb->meta_data().findInt64(kKeyDuration, &durationUs)) {
meta->setInt64("durationUs", durationUs);
}
if (trackType == MEDIA_TRACK_TYPE_SUBTITLE) {
- meta->setInt32("trackIndex", mSubtitleTrack.mIndex);
+ meta->setInt32("track-index", mSubtitleTrack.mIndex);
}
uint32_t dataType; // unused
const void *seiData;
size_t seiLength;
- if (mb->meta_data()->findData(kKeySEI, &dataType, &seiData, &seiLength)) {
+ if (mb->meta_data().findData(kKeySEI, &dataType, &seiData, &seiLength)) {
sp<ABuffer> sei = ABuffer::CreateAsCopy(seiData, seiLength);;
meta->setBuffer("sei", sei);
}
const void *mpegUserDataPointer;
size_t mpegUserDataLength;
- if (mb->meta_data()->findData(
+ if (mb->meta_data().findData(
kKeyMpegUserData, &dataType, &mpegUserDataPointer, &mpegUserDataLength)) {
sp<ABuffer> mpegUserData = ABuffer::CreateAsCopy(mpegUserDataPointer, mpegUserDataLength);
- meta->setBuffer("mpegUserData", mpegUserData);
+ meta->setBuffer("mpeg-user-data", mpegUserData);
}
mb->release();
@@ -1360,9 +1262,29 @@
return ab;
}
-void NuPlayer::GenericSource::postReadBuffer(media_track_type trackType) {
- Mutex::Autolock _l(mReadBufferLock);
+int32_t NuPlayer::GenericSource::getDataGeneration(media_track_type type) const {
+ int32_t generation = -1;
+ switch (type) {
+ case MEDIA_TRACK_TYPE_VIDEO:
+ generation = mVideoDataGeneration;
+ break;
+ case MEDIA_TRACK_TYPE_AUDIO:
+ generation = mAudioDataGeneration;
+ break;
+ case MEDIA_TRACK_TYPE_TIMEDTEXT:
+ generation = mFetchTimedTextDataGeneration;
+ break;
+ case MEDIA_TRACK_TYPE_SUBTITLE:
+ generation = mFetchSubtitleDataGeneration;
+ break;
+ default:
+ break;
+ }
+ return generation;
+}
+
+void NuPlayer::GenericSource::postReadBuffer(media_track_type trackType) {
if ((mPendingReadBufferTypes & (1 << trackType)) == 0) {
mPendingReadBufferTypes |= (1 << trackType);
sp<AMessage> msg = new AMessage(kWhatReadBuffer, this);
@@ -1375,25 +1297,13 @@
int32_t tmpType;
CHECK(msg->findInt32("trackType", &tmpType));
media_track_type trackType = (media_track_type)tmpType;
+ mPendingReadBufferTypes &= ~(1 << trackType);
readBuffer(trackType);
- {
- // only protect the variable change, as readBuffer may
- // take considerable time.
- Mutex::Autolock _l(mReadBufferLock);
- mPendingReadBufferTypes &= ~(1 << trackType);
- }
}
void NuPlayer::GenericSource::readBuffer(
media_track_type trackType, int64_t seekTimeUs, MediaPlayerSeekMode mode,
int64_t *actualTimeUs, bool formatChange) {
- // Do not read data if Widevine source is stopped
- //
- // TODO: revisit after widevine is removed. May be able to
- // combine mStopRead with mStarted.
- if (mStopRead) {
- return;
- }
Track *track;
size_t maxBuffers = 1;
switch (trackType) {
@@ -1437,39 +1347,50 @@
options.setNonBlocking();
}
+ int32_t generation = getDataGeneration(trackType);
for (size_t numBuffers = 0; numBuffers < maxBuffers; ) {
- Vector<MediaBuffer *> mediaBuffers;
+ Vector<MediaBufferBase *> mediaBuffers;
status_t err = NO_ERROR;
+ sp<IMediaSource> source = track->mSource;
+ mLock.unlock();
if (couldReadMultiple) {
- err = track->mSource->readMultiple(
+ err = source->readMultiple(
&mediaBuffers, maxBuffers - numBuffers, &options);
} else {
- MediaBuffer *mbuf = NULL;
- err = track->mSource->read(&mbuf, &options);
+ MediaBufferBase *mbuf = NULL;
+ err = source->read(&mbuf, &options);
if (err == OK && mbuf != NULL) {
mediaBuffers.push_back(mbuf);
}
}
+ mLock.lock();
options.clearNonPersistent();
size_t id = 0;
size_t count = mediaBuffers.size();
+
+ // in case track has been changed since we don't have lock for some time.
+ if (generation != getDataGeneration(trackType)) {
+ for (; id < count; ++id) {
+ mediaBuffers[id]->release();
+ }
+ break;
+ }
+
for (; id < count; ++id) {
int64_t timeUs;
- MediaBuffer *mbuf = mediaBuffers[id];
- if (!mbuf->meta_data()->findInt64(kKeyTime, &timeUs)) {
- mbuf->meta_data()->dumpToLog();
+ MediaBufferBase *mbuf = mediaBuffers[id];
+ if (!mbuf->meta_data().findInt64(kKeyTime, &timeUs)) {
+ mbuf->meta_data().dumpToLog();
track->mPackets->signalEOS(ERROR_MALFORMED);
break;
}
if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
mAudioTimeUs = timeUs;
- mBufferingMonitor->updateQueuedTime(true /* isAudio */, timeUs);
} else if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
mVideoTimeUs = timeUs;
- mBufferingMonitor->updateQueuedTime(false /* isAudio */, timeUs);
}
queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
@@ -1516,6 +1437,41 @@
break;
}
}
+
+ if (mIsStreaming
+ && (trackType == MEDIA_TRACK_TYPE_VIDEO || trackType == MEDIA_TRACK_TYPE_AUDIO)) {
+ status_t finalResult;
+ int64_t durationUs = track->mPackets->getBufferedDurationUs(&finalResult);
+
+ // TODO: maxRebufferingMarkMs could be larger than
+ // mBufferingSettings.mResumePlaybackMarkMs
+ int64_t markUs = (mPreparing ? mBufferingSettings.mInitialMarkMs
+ : mBufferingSettings.mResumePlaybackMarkMs) * 1000ll;
+ if (finalResult == ERROR_END_OF_STREAM || durationUs >= markUs) {
+ if (mPreparing || mSentPauseOnBuffering) {
+ Track *counterTrack =
+ (trackType == MEDIA_TRACK_TYPE_VIDEO ? &mAudioTrack : &mVideoTrack);
+ if (counterTrack->mSource != NULL) {
+ durationUs = counterTrack->mPackets->getBufferedDurationUs(&finalResult);
+ }
+ if (finalResult == ERROR_END_OF_STREAM || durationUs >= markUs) {
+ if (mPreparing) {
+ notifyPrepared();
+ mPreparing = false;
+ } else {
+ sendCacheStats();
+ mSentPauseOnBuffering = false;
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatResumeOnBufferingEnd);
+ notify->post();
+ }
+ }
+ }
+ return;
+ }
+
+ postReadBuffer(trackType);
+ }
}
void NuPlayer::GenericSource::queueDiscontinuityIfNeeded(
@@ -1533,160 +1489,7 @@
}
}
-NuPlayer::GenericSource::BufferingMonitor::BufferingMonitor(const sp<AMessage> ¬ify)
- : mNotify(notify),
- mDurationUs(-1ll),
- mBitrate(-1ll),
- mIsStreaming(false),
- mAudioTimeUs(0),
- mVideoTimeUs(0),
- mPollBufferingGeneration(0),
- mPrepareBuffering(false),
- mBuffering(false),
- mPrevBufferPercentage(-1),
- mOffloadAudio(false),
- mFirstDequeuedBufferRealUs(-1ll),
- mFirstDequeuedBufferMediaUs(-1ll),
- mlastDequeuedBufferMediaUs(-1ll) {
- getDefaultBufferingSettings(&mSettings);
-}
-
-NuPlayer::GenericSource::BufferingMonitor::~BufferingMonitor() {
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::getDefaultBufferingSettings(
- BufferingSettings *buffering /* nonnull */) {
- buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
- buffering->mRebufferingMode = BUFFERING_MODE_TIME_THEN_SIZE;
- buffering->mInitialWatermarkMs = kHighWaterMarkMs;
- buffering->mRebufferingWatermarkLowMs = kLowWaterMarkMs;
- buffering->mRebufferingWatermarkHighMs = kHighWaterMarkRebufferMs;
- buffering->mRebufferingWatermarkLowKB = kLowWaterMarkKB;
- buffering->mRebufferingWatermarkHighKB = kHighWaterMarkKB;
-
- ALOGV("BufferingMonitor::getDefaultBufferingSettings{%s}",
- buffering->toString().string());
-}
-
-status_t NuPlayer::GenericSource::BufferingMonitor::setBufferingSettings(
- const BufferingSettings &buffering) {
- ALOGV("BufferingMonitor::setBufferingSettings{%s}",
- buffering.toString().string());
-
- Mutex::Autolock _l(mLock);
- if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
- || (buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode)
- && buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs)
- || (buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
- && buffering.mRebufferingWatermarkLowKB > buffering.mRebufferingWatermarkHighKB)) {
- return BAD_VALUE;
- }
- mSettings = buffering;
- if (mSettings.mInitialBufferingMode == BUFFERING_MODE_NONE) {
- mSettings.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
- }
- if (!mSettings.IsTimeBasedBufferingMode(mSettings.mRebufferingMode)) {
- mSettings.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
- mSettings.mRebufferingWatermarkHighMs = INT32_MAX;
- }
- if (!mSettings.IsSizeBasedBufferingMode(mSettings.mRebufferingMode)) {
- mSettings.mRebufferingWatermarkLowKB = BufferingSettings::kNoWatermark;
- mSettings.mRebufferingWatermarkHighKB = INT32_MAX;
- }
- return OK;
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::prepare(
- const sp<NuCachedSource2> &cachedSource,
- int64_t durationUs,
- int64_t bitrate,
- bool isStreaming) {
- Mutex::Autolock _l(mLock);
- prepare_l(cachedSource, durationUs, bitrate, isStreaming);
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::stop() {
- Mutex::Autolock _l(mLock);
- prepare_l(NULL /* cachedSource */, -1 /* durationUs */,
- -1 /* bitrate */, false /* isStreaming */);
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::cancelPollBuffering() {
- Mutex::Autolock _l(mLock);
- cancelPollBuffering_l();
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::restartPollBuffering() {
- Mutex::Autolock _l(mLock);
- if (mIsStreaming) {
- cancelPollBuffering_l();
- onPollBuffering_l();
- }
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::stopBufferingIfNecessary() {
- Mutex::Autolock _l(mLock);
- stopBufferingIfNecessary_l();
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::ensureCacheIsFetching() {
- Mutex::Autolock _l(mLock);
- ensureCacheIsFetching_l();
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::updateQueuedTime(bool isAudio, int64_t timeUs) {
- Mutex::Autolock _l(mLock);
- if (isAudio) {
- mAudioTimeUs = timeUs;
- } else {
- mVideoTimeUs = timeUs;
- }
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::setOffloadAudio(bool offload) {
- Mutex::Autolock _l(mLock);
- mOffloadAudio = offload;
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::updateDequeuedBufferTime(int64_t mediaUs) {
- Mutex::Autolock _l(mLock);
- if (mediaUs < 0) {
- mFirstDequeuedBufferRealUs = -1ll;
- mFirstDequeuedBufferMediaUs = -1ll;
- } else if (mFirstDequeuedBufferRealUs < 0) {
- mFirstDequeuedBufferRealUs = ALooper::GetNowUs();
- mFirstDequeuedBufferMediaUs = mediaUs;
- }
- mlastDequeuedBufferMediaUs = mediaUs;
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::prepare_l(
- const sp<NuCachedSource2> &cachedSource,
- int64_t durationUs,
- int64_t bitrate,
- bool isStreaming) {
-
- mCachedSource = cachedSource;
- mDurationUs = durationUs;
- mBitrate = bitrate;
- mIsStreaming = isStreaming;
- mAudioTimeUs = 0;
- mVideoTimeUs = 0;
- mPrepareBuffering = (cachedSource != NULL);
- cancelPollBuffering_l();
- mOffloadAudio = false;
- mFirstDequeuedBufferRealUs = -1ll;
- mFirstDequeuedBufferMediaUs = -1ll;
- mlastDequeuedBufferMediaUs = -1ll;
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::cancelPollBuffering_l() {
- mBuffering = false;
- ++mPollBufferingGeneration;
- mPrevBufferPercentage = -1;
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::notifyBufferingUpdate_l(int32_t percentage) {
+void NuPlayer::GenericSource::notifyBufferingUpdate(int32_t percentage) {
// Buffering percent could go backward as it's estimated from remaining
// data and last access time. This could cause the buffering position
// drawn on media control to jitter slightly. Remember previously reported
@@ -1699,106 +1502,28 @@
mPrevBufferPercentage = percentage;
- ALOGV("notifyBufferingUpdate_l: buffering %d%%", percentage);
+ ALOGV("notifyBufferingUpdate: buffering %d%%", percentage);
- sp<AMessage> msg = mNotify->dup();
- msg->setInt32("what", kWhatBufferingUpdate);
- msg->setInt32("percentage", percentage);
- msg->post();
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatBufferingUpdate);
+ notify->setInt32("percentage", percentage);
+ notify->post();
}
-void NuPlayer::GenericSource::BufferingMonitor::startBufferingIfNecessary_l() {
- if (mPrepareBuffering) {
- return;
- }
-
- if (!mBuffering) {
- ALOGD("startBufferingIfNecessary_l");
-
- mBuffering = true;
-
- ensureCacheIsFetching_l();
- sendCacheStats_l();
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatPauseOnBufferingStart);
- notify->post();
- }
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::stopBufferingIfNecessary_l() {
- if (mPrepareBuffering) {
- ALOGD("stopBufferingIfNecessary_l, mBuffering=%d", mBuffering);
-
- mPrepareBuffering = false;
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatPrepared);
- notify->setInt32("err", OK);
- notify->post();
-
- return;
- }
-
- if (mBuffering) {
- ALOGD("stopBufferingIfNecessary_l");
- mBuffering = false;
-
- sendCacheStats_l();
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatResumeOnBufferingEnd);
- notify->post();
- }
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::sendCacheStats_l() {
- int32_t kbps = 0;
- status_t err = UNKNOWN_ERROR;
-
- if (mCachedSource != NULL) {
- err = mCachedSource->getEstimatedBandwidthKbps(&kbps);
- }
-
- if (err == OK) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatCacheStats);
- notify->setInt32("bandwidth", kbps);
- notify->post();
- }
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::ensureCacheIsFetching_l() {
- if (mCachedSource != NULL) {
- mCachedSource->resumeFetchingIfNecessary();
- }
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::schedulePollBuffering_l() {
+void NuPlayer::GenericSource::schedulePollBuffering() {
sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
msg->setInt32("generation", mPollBufferingGeneration);
// Enquires buffering status every second.
msg->post(1000000ll);
}
-int64_t NuPlayer::GenericSource::BufferingMonitor::getLastReadPosition_l() {
- if (mAudioTimeUs > 0) {
- return mAudioTimeUs;
- } else if (mVideoTimeUs > 0) {
- return mVideoTimeUs;
- } else {
- return 0;
- }
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::onPollBuffering_l() {
+void NuPlayer::GenericSource::onPollBuffering() {
status_t finalStatus = UNKNOWN_ERROR;
int64_t cachedDurationUs = -1ll;
ssize_t cachedDataRemaining = -1;
if (mCachedSource != NULL) {
- cachedDataRemaining =
- mCachedSource->approxDataRemaining(&finalStatus);
+ cachedDataRemaining = mCachedSource->approxDataRemaining(&finalStatus);
if (finalStatus == OK) {
off64_t size;
@@ -1816,157 +1541,49 @@
}
if (finalStatus != OK) {
- ALOGV("onPollBuffering_l: EOS (finalStatus = %d)", finalStatus);
+ ALOGV("onPollBuffering: EOS (finalStatus = %d)", finalStatus);
if (finalStatus == ERROR_END_OF_STREAM) {
- notifyBufferingUpdate_l(100);
+ notifyBufferingUpdate(100);
}
- stopBufferingIfNecessary_l();
return;
}
if (cachedDurationUs >= 0ll) {
if (mDurationUs > 0ll) {
- int64_t cachedPosUs = getLastReadPosition_l() + cachedDurationUs;
+ int64_t cachedPosUs = getLastReadPosition() + cachedDurationUs;
int percentage = 100.0 * cachedPosUs / mDurationUs;
if (percentage > 100) {
percentage = 100;
}
- notifyBufferingUpdate_l(percentage);
+ notifyBufferingUpdate(percentage);
}
- ALOGV("onPollBuffering_l: cachedDurationUs %.1f sec", cachedDurationUs / 1000000.0f);
-
- if (mPrepareBuffering) {
- if (cachedDurationUs > mSettings.mInitialWatermarkMs * 1000) {
- stopBufferingIfNecessary_l();
- }
- } else if (mSettings.IsTimeBasedBufferingMode(mSettings.mRebufferingMode)) {
- if (cachedDurationUs < mSettings.mRebufferingWatermarkLowMs * 1000) {
- // Take into account the data cached in downstream components to try to avoid
- // unnecessary pause.
- if (mOffloadAudio && mFirstDequeuedBufferRealUs >= 0) {
- int64_t downStreamCacheUs =
- mlastDequeuedBufferMediaUs - mFirstDequeuedBufferMediaUs
- - (ALooper::GetNowUs() - mFirstDequeuedBufferRealUs);
- if (downStreamCacheUs > 0) {
- cachedDurationUs += downStreamCacheUs;
- }
- }
-
- if (cachedDurationUs < mSettings.mRebufferingWatermarkLowMs * 1000) {
- startBufferingIfNecessary_l();
- }
- } else if (cachedDurationUs > mSettings.mRebufferingWatermarkHighMs * 1000) {
- stopBufferingIfNecessary_l();
- }
- }
- } else if (cachedDataRemaining >= 0
- && mSettings.IsSizeBasedBufferingMode(mSettings.mRebufferingMode)) {
- ALOGV("onPollBuffering_l: cachedDataRemaining %zd bytes",
- cachedDataRemaining);
-
- if (cachedDataRemaining < (mSettings.mRebufferingWatermarkLowKB << 10)) {
- startBufferingIfNecessary_l();
- } else if (cachedDataRemaining > (mSettings.mRebufferingWatermarkHighKB << 10)) {
- stopBufferingIfNecessary_l();
- }
+ ALOGV("onPollBuffering: cachedDurationUs %.1f sec", cachedDurationUs / 1000000.0f);
}
- schedulePollBuffering_l();
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::onMessageReceived(const sp<AMessage> &msg) {
- switch (msg->what()) {
- case kWhatPollBuffering:
- {
- int32_t generation;
- CHECK(msg->findInt32("generation", &generation));
- Mutex::Autolock _l(mLock);
- if (generation == mPollBufferingGeneration) {
- onPollBuffering_l();
- }
- break;
- }
- default:
- TRESPASS();
- break;
- }
+ schedulePollBuffering();
}
// Modular DRM
status_t NuPlayer::GenericSource::prepareDrm(
- const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId, sp<ICrypto> *crypto)
-{
+ const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId, sp<ICrypto> *outCrypto) {
+ Mutex::Autolock _l(mLock);
ALOGV("prepareDrm");
- sp<AMessage> msg = new AMessage(kWhatPrepareDrm, this);
- // synchronous call so just passing the address but with local copies of "const" args
- uint8_t UUID[16];
- memcpy(UUID, uuid, sizeof(UUID));
- Vector<uint8_t> sessionId = drmSessionId;
- msg->setPointer("uuid", (void*)UUID);
- msg->setPointer("drmSessionId", (void*)&sessionId);
- msg->setPointer("crypto", (void*)crypto);
-
- sp<AMessage> response;
- status_t status = msg->postAndAwaitResponse(&response);
-
- if (status == OK && response != NULL) {
- CHECK(response->findInt32("status", &status));
- ALOGV_IF(status == OK, "prepareDrm: mCrypto: %p (%d)", crypto->get(),
- (*crypto != NULL ? (*crypto)->getStrongCount() : 0));
- ALOGD("prepareDrm ret: %d ", status);
- } else {
- ALOGE("prepareDrm err: %d", status);
- }
-
- return status;
-}
-
-status_t NuPlayer::GenericSource::releaseDrm()
-{
- ALOGV("releaseDrm");
-
- sp<AMessage> msg = new AMessage(kWhatReleaseDrm, this);
-
- // synchronous call to update the source states before the player proceedes with crypto cleanup
- sp<AMessage> response;
- status_t status = msg->postAndAwaitResponse(&response);
-
- if (status == OK && response != NULL) {
- ALOGD("releaseDrm ret: OK ");
- } else {
- ALOGE("releaseDrm err: %d", status);
- }
-
- return status;
-}
-
-status_t NuPlayer::GenericSource::onPrepareDrm(const sp<AMessage> &msg)
-{
- ALOGV("onPrepareDrm ");
-
mIsDrmProtected = false;
mIsDrmReleased = false;
mIsSecure = false;
- uint8_t *uuid;
- Vector<uint8_t> *drmSessionId;
- sp<ICrypto> *outCrypto;
- CHECK(msg->findPointer("uuid", (void**)&uuid));
- CHECK(msg->findPointer("drmSessionId", (void**)&drmSessionId));
- CHECK(msg->findPointer("crypto", (void**)&outCrypto));
-
status_t status = OK;
- sp<ICrypto> crypto = NuPlayerDrm::createCryptoAndPlugin(uuid, *drmSessionId, status);
+ sp<ICrypto> crypto = NuPlayerDrm::createCryptoAndPlugin(uuid, drmSessionId, status);
if (crypto == NULL) {
- ALOGE("onPrepareDrm: createCrypto failed. status: %d", status);
+ ALOGE("prepareDrm: createCrypto failed. status: %d", status);
return status;
}
- ALOGV("onPrepareDrm: createCryptoAndPlugin succeeded for uuid: %s",
+ ALOGV("prepareDrm: createCryptoAndPlugin succeeded for uuid: %s",
DrmUUID::toHexString(uuid).string());
*outCrypto = crypto;
@@ -1975,14 +1592,14 @@
if (mMimes.size() == 0) {
status = UNKNOWN_ERROR;
- ALOGE("onPrepareDrm: Unexpected. Must have at least one track. status: %d", status);
+ ALOGE("prepareDrm: Unexpected. Must have at least one track. status: %d", status);
return status;
}
// first mime in this list is either the video track, or the first audio track
const char *mime = mMimes[0].string();
mIsSecure = crypto->requiresSecureDecoderComponent(mime);
- ALOGV("onPrepareDrm: requiresSecureDecoderComponent mime: %s isSecure: %d",
+ ALOGV("prepareDrm: requiresSecureDecoderComponent mime: %s isSecure: %d",
mime, mIsSecure);
// Checking the member flags while in the looper to send out the notification.
@@ -1996,18 +1613,27 @@
FLAG_CAN_SEEK_FORWARD |
FLAG_CAN_SEEK);
+ if (status == OK) {
+ ALOGV("prepareDrm: mCrypto: %p (%d)", outCrypto->get(),
+ (*outCrypto != NULL ? (*outCrypto)->getStrongCount() : 0));
+ ALOGD("prepareDrm ret: %d ", status);
+ } else {
+ ALOGE("prepareDrm err: %d", status);
+ }
return status;
}
-status_t NuPlayer::GenericSource::onReleaseDrm()
-{
+status_t NuPlayer::GenericSource::releaseDrm() {
+ Mutex::Autolock _l(mLock);
+ ALOGV("releaseDrm");
+
if (mIsDrmProtected) {
mIsDrmProtected = false;
// to prevent returning any more buffer after stop/releaseDrm (b/37960096)
mIsDrmReleased = true;
- ALOGV("onReleaseDrm: mIsDrmProtected is reset.");
+ ALOGV("releaseDrm: mIsDrmProtected is reset.");
} else {
- ALOGE("onReleaseDrm: mIsDrmProtected is already false.");
+ ALOGE("releaseDrm: mIsDrmProtected is already false.");
}
return OK;
@@ -2051,7 +1677,7 @@
return OK;
}
-void NuPlayer::GenericSource::signalBufferReturned(MediaBuffer *buffer)
+void NuPlayer::GenericSource::signalBufferReturned(MediaBufferBase *buffer)
{
//ALOGV("signalBufferReturned %p refCount: %d", buffer, buffer->localRefcount());
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 4064133..065cac1 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -24,6 +24,7 @@
#include "ATSParser.h"
#include <media/mediaplayer.h>
+#include <media/stagefright/MediaBuffer.h>
namespace android {
@@ -34,13 +35,16 @@
class IDataSource;
struct IMediaHTTPService;
struct MediaSource;
+class IMediaSource;
class MediaBuffer;
+struct MediaClock;
struct NuCachedSource2;
struct NuPlayer::GenericSource : public NuPlayer::Source,
public MediaBufferObserver // Modular DRM
{
- GenericSource(const sp<AMessage> ¬ify, bool uidValid, uid_t uid);
+ GenericSource(const sp<AMessage> ¬ify, bool uidValid, uid_t uid,
+ const sp<MediaClock> &mediaClock);
status_t setDataSource(
const sp<IMediaHTTPService> &httpService,
@@ -51,7 +55,7 @@
status_t setDataSource(const sp<DataSource>& dataSource);
- virtual status_t getDefaultBufferingSettings(
+ virtual status_t getBufferingSettings(
BufferingSettings* buffering /* nonnull */) override;
virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
@@ -79,17 +83,13 @@
int64_t seekTimeUs,
MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) override;
- virtual status_t setBuffers(bool audio, Vector<MediaBuffer *> &buffers);
-
virtual bool isStreaming() const;
- virtual void setOffloadAudio(bool offload);
-
// Modular DRM
- virtual void signalBufferReturned(MediaBuffer *buffer);
+ virtual void signalBufferReturned(MediaBufferBase *buffer);
virtual status_t prepareDrm(
- const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId, sp<ICrypto> *crypto);
+ const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId, sp<ICrypto> *outCrypto);
virtual status_t releaseDrm();
@@ -111,17 +111,11 @@
kWhatSendTimedTextData,
kWhatChangeAVSource,
kWhatPollBuffering,
- kWhatGetFormat,
- kWhatGetSelectedTrack,
- kWhatSelectTrack,
kWhatSeek,
kWhatReadBuffer,
kWhatStart,
kWhatResume,
kWhatSecureDecodersInstantiated,
- // Modular DRM
- kWhatPrepareDrm,
- kWhatReleaseDrm,
};
struct Track {
@@ -130,84 +124,6 @@
sp<AnotherPacketSource> mPackets;
};
- // Helper to monitor buffering status. The polling happens every second.
- // When necessary, it will send out buffering events to the player.
- struct BufferingMonitor : public AHandler {
- public:
- explicit BufferingMonitor(const sp<AMessage> ¬ify);
-
- void getDefaultBufferingSettings(BufferingSettings *buffering /* nonnull */);
- status_t setBufferingSettings(const BufferingSettings &buffering);
-
- // Set up state.
- void prepare(const sp<NuCachedSource2> &cachedSource,
- int64_t durationUs,
- int64_t bitrate,
- bool isStreaming);
- // Stop and reset buffering monitor.
- void stop();
- // Cancel the current monitor task.
- void cancelPollBuffering();
- // Restart the monitor task.
- void restartPollBuffering();
- // Stop buffering task and send out corresponding events.
- void stopBufferingIfNecessary();
- // Make sure data source is getting data.
- void ensureCacheIsFetching();
- // Update media time of just extracted buffer from data source.
- void updateQueuedTime(bool isAudio, int64_t timeUs);
-
- // Set the offload mode.
- void setOffloadAudio(bool offload);
- // Update media time of last dequeued buffer which is sent to the decoder.
- void updateDequeuedBufferTime(int64_t mediaUs);
-
- protected:
- virtual ~BufferingMonitor();
- virtual void onMessageReceived(const sp<AMessage> &msg);
-
- private:
- enum {
- kWhatPollBuffering,
- };
-
- sp<AMessage> mNotify;
-
- sp<NuCachedSource2> mCachedSource;
- int64_t mDurationUs;
- int64_t mBitrate;
- bool mIsStreaming;
-
- int64_t mAudioTimeUs;
- int64_t mVideoTimeUs;
- int32_t mPollBufferingGeneration;
- bool mPrepareBuffering;
- bool mBuffering;
- int32_t mPrevBufferPercentage;
-
- mutable Mutex mLock;
-
- BufferingSettings mSettings;
- bool mOffloadAudio;
- int64_t mFirstDequeuedBufferRealUs;
- int64_t mFirstDequeuedBufferMediaUs;
- int64_t mlastDequeuedBufferMediaUs;
-
- void prepare_l(const sp<NuCachedSource2> &cachedSource,
- int64_t durationUs,
- int64_t bitrate,
- bool isStreaming);
- void cancelPollBuffering_l();
- void notifyBufferingUpdate_l(int32_t percentage);
- void startBufferingIfNecessary_l();
- void stopBufferingIfNecessary_l();
- void sendCacheStats_l();
- void ensureCacheIsFetching_l();
- int64_t getLastReadPosition_l();
- void onPollBuffering_l();
- void schedulePollBuffering_l();
- };
-
Vector<sp<IMediaSource> > mSources;
Track mAudioTrack;
int64_t mAudioTimeUs;
@@ -218,6 +134,13 @@
Track mSubtitleTrack;
Track mTimedTextTrack;
+ BufferingSettings mBufferingSettings;
+ int32_t mPrevBufferPercentage;
+ int32_t mPollBufferingGeneration;
+ bool mSentPauseOnBuffering;
+
+ int32_t mAudioDataGeneration;
+ int32_t mVideoDataGeneration;
int32_t mFetchSubtitleDataGeneration;
int32_t mFetchTimedTextDataGeneration;
int64_t mDurationUs;
@@ -227,6 +150,7 @@
bool mIsStreaming;
bool mUIDValid;
uid_t mUID;
+ const sp<MediaClock> mMediaClock;
sp<IMediaHTTPService> mHTTPService;
AString mUri;
KeyedVector<String8, String8> mUriHeaders;
@@ -234,22 +158,20 @@
int64_t mOffset;
int64_t mLength;
+ bool mDisconnected;
sp<DataSource> mDataSource;
sp<NuCachedSource2> mCachedSource;
sp<DataSource> mHttpSource;
sp<MetaData> mFileMeta;
bool mStarted;
- bool mStopRead;
+ bool mPreparing;
int64_t mBitrate;
- sp<BufferingMonitor> mBufferingMonitor;
uint32_t mPendingReadBufferTypes;
sp<ABuffer> mGlobalTimedText;
- mutable Mutex mReadBufferLock;
- mutable Mutex mDisconnectLock;
+ mutable Mutex mLock;
sp<ALooper> mLooper;
- sp<ALooper> mBufferingMonitorLooper;
void resetDataSource();
@@ -261,18 +183,6 @@
void finishPrepareAsync();
status_t startSources();
- void onGetFormatMeta(const sp<AMessage>& msg) const;
- sp<MetaData> doGetFormatMeta(bool audio) const;
-
- void onGetTrackInfo(const sp<AMessage>& msg) const;
- sp<AMessage> doGetTrackInfo(size_t trackIndex) const;
-
- void onGetSelectedTrack(const sp<AMessage>& msg) const;
- ssize_t doGetSelectedTrack(media_track_type type) const;
-
- void onSelectTrack(const sp<AMessage>& msg);
- status_t doSelectTrack(size_t trackIndex, bool select, int64_t timeUs);
-
void onSeek(const sp<AMessage>& msg);
status_t doSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode);
@@ -291,7 +201,7 @@
int32_t curGen, const sp<AnotherPacketSource>& packets, const sp<AMessage>& msg);
sp<ABuffer> mediaBufferToABuffer(
- MediaBuffer *mbuf,
+ MediaBufferBase *mbuf,
media_track_type trackType);
void postReadBuffer(media_track_type trackType);
@@ -310,6 +220,15 @@
void queueDiscontinuityIfNeeded(
bool seeking, bool formatChange, media_track_type trackType, Track *track);
+ void schedulePollBuffering();
+ void onPollBuffering();
+ void notifyBufferingUpdate(int32_t percentage);
+
+ void sendCacheStats();
+
+ sp<MetaData> getFormatMeta_l(bool audio);
+ int32_t getDataGeneration(media_track_type type) const;
+
// Modular DRM
// The source is DRM protected and is prepared for DRM.
bool mIsDrmProtected;
@@ -318,8 +237,6 @@
Vector<String8> mMimes;
status_t checkDrmInfo();
- status_t onPrepareDrm(const sp<AMessage> &msg);
- status_t onReleaseDrm();
DISALLOW_EVIL_CONSTRUCTORS(GenericSource);
};
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index ad4c223..11f1bfd 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -35,7 +35,6 @@
// default buffer prepare/ready/underflow marks
static const int kReadyMarkMs = 5000; // 5 seconds
static const int kPrepareMarkMs = 1500; // 1.5 seconds
-static const int kUnderflowMarkMs = 1000; // 1 second
namespace android {
@@ -54,7 +53,8 @@
mFetchMetaDataGeneration(0),
mHasMetadata(false),
mMetadataSelected(false) {
- getDefaultBufferingSettings(&mBufferingSettings);
+ mBufferingSettings.mInitialMarkMs = kPrepareMarkMs;
+ mBufferingSettings.mResumePlaybackMarkMs = kReadyMarkMs;
if (headers) {
mExtraHeaders = *headers;
@@ -82,35 +82,16 @@
}
}
-status_t NuPlayer::HTTPLiveSource::getDefaultBufferingSettings(
+status_t NuPlayer::HTTPLiveSource::getBufferingSettings(
BufferingSettings* buffering /* nonnull */) {
- buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
- buffering->mRebufferingMode = BUFFERING_MODE_TIME_ONLY;
- buffering->mInitialWatermarkMs = kPrepareMarkMs;
- buffering->mRebufferingWatermarkLowMs = kUnderflowMarkMs;
- buffering->mRebufferingWatermarkHighMs = kReadyMarkMs;
+ *buffering = mBufferingSettings;
return OK;
}
status_t NuPlayer::HTTPLiveSource::setBufferingSettings(const BufferingSettings& buffering) {
- if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
- || buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
- || (buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode)
- && buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs)) {
- return BAD_VALUE;
- }
-
mBufferingSettings = buffering;
- if (mBufferingSettings.mInitialBufferingMode == BUFFERING_MODE_NONE) {
- mBufferingSettings.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
- }
- if (mBufferingSettings.mRebufferingMode == BUFFERING_MODE_NONE) {
- mBufferingSettings.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
- mBufferingSettings.mRebufferingWatermarkHighMs = INT32_MAX;
- }
-
if (mLiveSession != NULL) {
mLiveSession->setBufferingSettings(mBufferingSettings);
}
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
index 2866a6a..2d6c604 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
@@ -34,7 +34,7 @@
const char *url,
const KeyedVector<String8, String8> *headers);
- virtual status_t getDefaultBufferingSettings(
+ virtual status_t getBufferingSettings(
BufferingSettings* buffering /* nonnull */) override;
virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index df36046..a5f5fc6 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -48,7 +48,9 @@
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaClock.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
@@ -56,7 +58,6 @@
#include <gui/IGraphicBufferProducer.h>
#include <gui/Surface.h>
-#include "avc_utils.h"
#include "ESDS.h"
#include <media/stagefright/Utils.h>
@@ -172,14 +173,17 @@
////////////////////////////////////////////////////////////////////////////////
-NuPlayer::NuPlayer(pid_t pid)
+NuPlayer::NuPlayer(pid_t pid, const sp<MediaClock> &mediaClock)
: mUIDValid(false),
mPID(pid),
+ mMediaClock(mediaClock),
mSourceFlags(0),
mOffloadAudio(false),
mAudioDecoderGeneration(0),
mVideoDecoderGeneration(0),
mRendererGeneration(0),
+ mLastStartedPlayingTimeNs(0),
+ mLastStartedRebufferingTimeNs(0),
mPreviousSeekTimeUs(0),
mAudioEOS(false),
mVideoEOS(false),
@@ -204,6 +208,7 @@
mPausedForBuffering(false),
mIsDrmProtected(false),
mDataSourceType(DATA_SOURCE_TYPE_NONE) {
+ CHECK(mediaClock != NULL);
clearFlushComplete();
}
@@ -215,8 +220,11 @@
mUID = uid;
}
-void NuPlayer::setDriver(const wp<NuPlayerDriver> &driver) {
+void NuPlayer::init(const wp<NuPlayerDriver> &driver) {
mDriver = driver;
+
+ sp<AMessage> notify = new AMessage(kWhatMediaClockNotify, this);
+ mMediaClock->setNotificationMessage(notify);
}
void NuPlayer::setDataSourceAsync(const sp<IStreamSource> &source) {
@@ -278,7 +286,7 @@
ALOGV("setDataSourceAsync GenericSource %s", url);
sp<GenericSource> genericSource =
- new GenericSource(notify, mUIDValid, mUID);
+ new GenericSource(notify, mUIDValid, mUID, mMediaClock);
status_t err = genericSource->setDataSource(httpService, url, headers);
@@ -301,7 +309,7 @@
sp<AMessage> notify = new AMessage(kWhatSourceNotify, this);
sp<GenericSource> source =
- new GenericSource(notify, mUIDValid, mUID);
+ new GenericSource(notify, mUIDValid, mUID, mMediaClock);
ALOGV("setDataSourceAsync fd %d/%lld/%lld source: %p",
fd, (long long)offset, (long long)length, source.get());
@@ -322,7 +330,7 @@
sp<AMessage> msg = new AMessage(kWhatSetDataSource, this);
sp<AMessage> notify = new AMessage(kWhatSourceNotify, this);
- sp<GenericSource> source = new GenericSource(notify, mUIDValid, mUID);
+ sp<GenericSource> source = new GenericSource(notify, mUIDValid, mUID, mMediaClock);
status_t err = source->setDataSource(dataSource);
if (err != OK) {
@@ -335,9 +343,9 @@
mDataSourceType = DATA_SOURCE_TYPE_MEDIA;
}
-status_t NuPlayer::getDefaultBufferingSettings(
+status_t NuPlayer::getBufferingSettings(
BufferingSettings *buffering /* nonnull */) {
- sp<AMessage> msg = new AMessage(kWhatGetDefaultBufferingSettings, this);
+ sp<AMessage> msg = new AMessage(kWhatGetBufferingSettings, this);
sp<AMessage> response;
status_t err = msg->postAndAwaitResponse(&response);
if (err == OK && response != NULL) {
@@ -470,6 +478,13 @@
(new AMessage(kWhatReset, this))->post();
}
+status_t NuPlayer::notifyAt(int64_t mediaTimeUs) {
+ sp<AMessage> notify = new AMessage(kWhatNotifyTime, this);
+ notify->setInt64("timerUs", mediaTimeUs);
+ mMediaClock->addTimer(notify, mediaTimeUs);
+ return OK;
+}
+
void NuPlayer::seekToAsync(int64_t seekTimeUs, MediaPlayerSeekMode mode, bool needNotify) {
sp<AMessage> msg = new AMessage(kWhatSeek, this);
msg->setInt64("seekTimeUs", seekTimeUs);
@@ -556,16 +571,16 @@
break;
}
- case kWhatGetDefaultBufferingSettings:
+ case kWhatGetBufferingSettings:
{
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
- ALOGV("kWhatGetDefaultBufferingSettings");
+ ALOGV("kWhatGetBufferingSettings");
BufferingSettings buffering;
status_t err = OK;
if (mSource != NULL) {
- err = mSource->getDefaultBufferingSettings(&buffering);
+ err = mSource->getBufferingSettings(&buffering);
} else {
err = INVALID_OPERATION;
}
@@ -1274,7 +1289,8 @@
ALOGV("Tear down audio with reason %d.", reason);
if (reason == Renderer::kDueToTimeout && !(mPaused && mOffloadAudio)) {
// TimeoutWhenPaused is only for offload mode.
- ALOGW("Receive a stale message for teardown.");
+ ALOGW("Received a stale message for teardown, mPaused(%d), mOffloadAudio(%d)",
+ mPaused, mOffloadAudio);
break;
}
int64_t positionUs;
@@ -1299,6 +1315,8 @@
ALOGV("kWhatReset");
mResetting = true;
+ updatePlaybackTimer(true /* stopping */, "kWhatReset");
+ updateRebufferingTimer(true /* stopping */, true /* exiting */);
mDeferredActions.push_back(
new FlushDecoderAction(
@@ -1312,6 +1330,16 @@
break;
}
+ case kWhatNotifyTime:
+ {
+ ALOGV("kWhatNotifyTime");
+ int64_t timerUs;
+ CHECK(msg->findInt64("timerUs", &timerUs));
+
+ notifyListener(MEDIA_NOTIFY_TIME, timerUs, 0);
+ break;
+ }
+
case kWhatSeek:
{
int64_t seekTimeUs;
@@ -1401,6 +1429,47 @@
break;
}
+ case kWhatMediaClockNotify:
+ {
+ ALOGV("kWhatMediaClockNotify");
+ int64_t anchorMediaUs, anchorRealUs;
+ float playbackRate;
+ CHECK(msg->findInt64("anchor-media-us", &anchorMediaUs));
+ CHECK(msg->findInt64("anchor-real-us", &anchorRealUs));
+ CHECK(msg->findFloat("playback-rate", &playbackRate));
+
+ Parcel in;
+ in.writeInt64(anchorMediaUs);
+ in.writeInt64(anchorRealUs);
+ in.writeFloat(playbackRate);
+
+ notifyListener(MEDIA_TIME_DISCONTINUITY, 0, 0, &in);
+ break;
+ }
+
+ case kWhatGetStats:
+ {
+ ALOGV("kWhatGetStats");
+
+ Vector<sp<AMessage>> *trackStats;
+ CHECK(msg->findPointer("trackstats", (void**)&trackStats));
+
+ trackStats->clear();
+ if (mVideoDecoder != NULL) {
+ trackStats->push_back(mVideoDecoder->getStats());
+ }
+ if (mAudioDecoder != NULL) {
+ trackStats->push_back(mAudioDecoder->getStats());
+ }
+
+ // respond for synchronization
+ sp<AMessage> response = new AMessage;
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+ break;
+ }
+
default:
TRESPASS();
break;
@@ -1429,7 +1498,7 @@
ALOGW("resume called when renderer is gone or not set");
}
- mLastStartedPlayingTimeNs = systemTime();
+ startPlaybackTimer("onresume");
}
status_t NuPlayer::onInstantiateSecureDecoders() {
@@ -1523,7 +1592,7 @@
sp<AMessage> notify = new AMessage(kWhatRendererNotify, this);
++mRendererGeneration;
notify->setInt32("generation", mRendererGeneration);
- mRenderer = new Renderer(mAudioSink, notify, flags);
+ mRenderer = new Renderer(mAudioSink, mMediaClock, notify, flags);
mRendererLooper = new ALooper;
mRendererLooper->setName("NuPlayerRenderer");
mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
@@ -1549,12 +1618,91 @@
mAudioDecoder->setRenderer(mRenderer);
}
- mLastStartedPlayingTimeNs = systemTime();
+ startPlaybackTimer("onstart");
postScanSources();
}
+void NuPlayer::startPlaybackTimer(const char *where) {
+ Mutex::Autolock autoLock(mPlayingTimeLock);
+ if (mLastStartedPlayingTimeNs == 0) {
+ mLastStartedPlayingTimeNs = systemTime();
+ ALOGV("startPlaybackTimer() time %20" PRId64 " (%s)", mLastStartedPlayingTimeNs, where);
+ }
+}
+
+void NuPlayer::updatePlaybackTimer(bool stopping, const char *where) {
+ Mutex::Autolock autoLock(mPlayingTimeLock);
+
+ ALOGV("updatePlaybackTimer(%s) time %20" PRId64 " (%s)",
+ stopping ? "stop" : "snap", mLastStartedPlayingTimeNs, where);
+
+ if (mLastStartedPlayingTimeNs != 0) {
+ sp<NuPlayerDriver> driver = mDriver.promote();
+ int64_t now = systemTime();
+ if (driver != NULL) {
+ int64_t played = now - mLastStartedPlayingTimeNs;
+ ALOGV("updatePlaybackTimer() log %20" PRId64 "", played);
+
+ if (played > 0) {
+ driver->notifyMorePlayingTimeUs((played+500)/1000);
+ }
+ }
+ if (stopping) {
+ mLastStartedPlayingTimeNs = 0;
+ } else {
+ mLastStartedPlayingTimeNs = now;
+ }
+ }
+}
+
+void NuPlayer::startRebufferingTimer() {
+ Mutex::Autolock autoLock(mPlayingTimeLock);
+ if (mLastStartedRebufferingTimeNs == 0) {
+ mLastStartedRebufferingTimeNs = systemTime();
+ ALOGV("startRebufferingTimer() time %20" PRId64 "", mLastStartedRebufferingTimeNs);
+ }
+}
+
+void NuPlayer::updateRebufferingTimer(bool stopping, bool exitingPlayback) {
+ Mutex::Autolock autoLock(mPlayingTimeLock);
+
+ ALOGV("updateRebufferingTimer(%s) time %20" PRId64 " (exiting %d)",
+ stopping ? "stop" : "snap", mLastStartedRebufferingTimeNs, exitingPlayback);
+
+ if (mLastStartedRebufferingTimeNs != 0) {
+ sp<NuPlayerDriver> driver = mDriver.promote();
+ int64_t now = systemTime();
+ if (driver != NULL) {
+ int64_t rebuffered = now - mLastStartedRebufferingTimeNs;
+ ALOGV("updateRebufferingTimer() log %20" PRId64 "", rebuffered);
+
+ if (rebuffered > 0) {
+ driver->notifyMoreRebufferingTimeUs((rebuffered+500)/1000);
+ if (exitingPlayback) {
+ driver->notifyRebufferingWhenExit(true);
+ }
+ }
+ }
+ if (stopping) {
+ mLastStartedRebufferingTimeNs = 0;
+ } else {
+ mLastStartedRebufferingTimeNs = now;
+ }
+ }
+}
+
+void NuPlayer::updateInternalTimers() {
+ // update values, but ticking clocks keep ticking
+ ALOGV("updateInternalTimers()");
+ updatePlaybackTimer(false /* stopping */, "updateInternalTimers");
+ updateRebufferingTimer(false /* stopping */, false /* exiting */);
+}
+
void NuPlayer::onPause() {
+
+ updatePlaybackTimer(true /* stopping */, "onPause");
+
if (mPaused) {
return;
}
@@ -1570,13 +1718,6 @@
ALOGW("pause called when renderer is gone or not set");
}
- sp<NuPlayerDriver> driver = mDriver.promote();
- if (driver != NULL) {
- int64_t now = systemTime();
- int64_t played = now - mLastStartedPlayingTimeNs;
-
- driver->notifyMorePlayingTimeUs((played+500)/1000);
- }
}
bool NuPlayer::audioDecoderStillNeeded() {
@@ -1672,6 +1813,8 @@
void NuPlayer::restartAudio(
int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder) {
+ ALOGD("restartAudio timeUs(%lld), dontOffload(%d), createDecoder(%d)",
+ (long long)currentPositionUs, forceNonOffload, needsToCreateAudioDecoder);
if (mAudioDecoder != NULL) {
mAudioDecoder->pause();
mAudioDecoder.clear();
@@ -2090,16 +2233,16 @@
return renderer->getCurrentPosition(mediaUs);
}
-void NuPlayer::getStats(Vector<sp<AMessage> > *mTrackStats) {
- CHECK(mTrackStats != NULL);
+void NuPlayer::getStats(Vector<sp<AMessage> > *trackStats) {
+ CHECK(trackStats != NULL);
- mTrackStats->clear();
- if (mVideoDecoder != NULL) {
- mTrackStats->push_back(mVideoDecoder->getStats());
- }
- if (mAudioDecoder != NULL) {
- mTrackStats->push_back(mAudioDecoder->getStats());
- }
+ ALOGV("NuPlayer::getStats()");
+ sp<AMessage> msg = new AMessage(kWhatGetStats, this);
+ msg->setPointer("trackstats", trackStats);
+
+ sp<AMessage> response;
+ (void) msg->postAndAwaitResponse(&response);
+ // response is for synchronization, ignore contents
}
sp<MetaData> NuPlayer::getFileMeta() {
@@ -2203,6 +2346,9 @@
CHECK(mAudioDecoder == NULL);
CHECK(mVideoDecoder == NULL);
+ updatePlaybackTimer(true /* stopping */, "performReset");
+ updateRebufferingTimer(true /* stopping */, true /* exiting */);
+
cancelPollDuration();
++mScanSourcesGeneration;
@@ -2455,6 +2601,7 @@
if (mStarted) {
ALOGI("buffer low, pausing...");
+ startRebufferingTimer();
mPausedForBuffering = true;
onPause();
}
@@ -2468,6 +2615,7 @@
if (mStarted) {
ALOGI("buffer ready, resuming...");
+ updateRebufferingTimer(true /* stopping */, false /* exiting */);
mPausedForBuffering = false;
// do not resume yet if client didn't unpause
@@ -2601,7 +2749,7 @@
void NuPlayer::sendSubtitleData(const sp<ABuffer> &buffer, int32_t baseIndex) {
int32_t trackIndex;
int64_t timeUs, durationUs;
- CHECK(buffer->meta()->findInt32("trackIndex", &trackIndex));
+ CHECK(buffer->meta()->findInt32("track-index", &trackIndex));
CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
CHECK(buffer->meta()->findInt64("durationUs", &durationUs));
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index c69835f..e400d16 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -30,15 +30,16 @@
struct AudioPlaybackRate;
struct AVSyncSettings;
class IDataSource;
+struct MediaClock;
class MetaData;
struct NuPlayerDriver;
struct NuPlayer : public AHandler {
- explicit NuPlayer(pid_t pid);
+ explicit NuPlayer(pid_t pid, const sp<MediaClock> &mediaClock);
void setUID(uid_t uid);
- void setDriver(const wp<NuPlayerDriver> &driver);
+ void init(const wp<NuPlayerDriver> &driver);
void setDataSourceAsync(const sp<IStreamSource> &source);
@@ -51,7 +52,7 @@
void setDataSourceAsync(const sp<DataSource> &source);
- status_t getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */);
+ status_t getBufferingSettings(BufferingSettings* buffering /* nonnull */);
status_t setBufferingSettings(const BufferingSettings& buffering);
void prepareAsync();
@@ -72,6 +73,9 @@
// Will notify the driver through "notifyResetComplete" once finished.
void resetAsync();
+ // Request a notification when specified media time is reached.
+ status_t notifyAt(int64_t mediaTimeUs);
+
// Will notify the driver through "notifySeekComplete" once finished
// and needNotify is true.
void seekToAsync(
@@ -84,7 +88,7 @@
status_t getSelectedTrack(int32_t type, Parcel* reply) const;
status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
status_t getCurrentPosition(int64_t *mediaUs);
- void getStats(Vector<sp<AMessage> > *mTrackStats);
+ void getStats(Vector<sp<AMessage> > *trackStats);
sp<MetaData> getFileMeta();
float getFrameRate();
@@ -95,6 +99,8 @@
const char *getDataSourceType();
+ void updateInternalTimers();
+
protected:
virtual ~NuPlayer();
@@ -139,6 +145,7 @@
kWhatClosedCaptionNotify = 'capN',
kWhatRendererNotify = 'renN',
kWhatReset = 'rset',
+ kWhatNotifyTime = 'nfyT',
kWhatSeek = 'seek',
kWhatPause = 'paus',
kWhatResume = 'rsme',
@@ -147,16 +154,19 @@
kWhatGetTrackInfo = 'gTrI',
kWhatGetSelectedTrack = 'gSel',
kWhatSelectTrack = 'selT',
- kWhatGetDefaultBufferingSettings = 'gDBS',
+ kWhatGetBufferingSettings = 'gBus',
kWhatSetBufferingSettings = 'sBuS',
kWhatPrepareDrm = 'pDrm',
kWhatReleaseDrm = 'rDrm',
+ kWhatMediaClockNotify = 'mckN',
+ kWhatGetStats = 'gSts',
};
wp<NuPlayerDriver> mDriver;
bool mUIDValid;
uid_t mUID;
pid_t mPID;
+ const sp<MediaClock> mMediaClock;
Mutex mSourceLock; // guard |mSource|.
sp<Source> mSource;
uint32_t mSourceFlags;
@@ -172,7 +182,14 @@
int32_t mVideoDecoderGeneration;
int32_t mRendererGeneration;
+ Mutex mPlayingTimeLock;
int64_t mLastStartedPlayingTimeNs;
+ void updatePlaybackTimer(bool stopping, const char *where);
+ void startPlaybackTimer(const char *where);
+
+ int64_t mLastStartedRebufferingTimeNs;
+ void startRebufferingTimer();
+ void updateRebufferingTimer(bool stopping, bool exitingPlayback);
int64_t mPreviousSeekTimeUs;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
index 73b07bb..fb12360 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
@@ -19,13 +19,13 @@
#include <utils/Log.h>
#include <inttypes.h>
-#include "avc_utils.h"
#include "NuPlayerCCDecoder.h"
#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/MediaDefs.h>
namespace android {
@@ -155,7 +155,9 @@
break;
default:
ALOGE("Unknown track type: %d", track.mTrackType);
- return NULL;
+ format->setInt32("type", MEDIA_TRACK_TYPE_UNKNOWN);
+ format->setString("mime", "application/octet-stream");
+ return format;
}
// For CEA-608 CC1, field 0 channel 0
@@ -301,7 +303,7 @@
// returns true if a new CC track is found
bool NuPlayer::CCDecoder::extractFromMPEGUserData(const sp<ABuffer> &accessUnit) {
sp<ABuffer> mpegUserData;
- if (!accessUnit->meta()->findBuffer("mpegUserData", &mpegUserData)
+ if (!accessUnit->meta()->findBuffer("mpeg-user-data", &mpegUserData)
|| mpegUserData == NULL) {
return false;
}
@@ -538,7 +540,7 @@
dumpBytePair(ccBuf);
#endif
- ccBuf->meta()->setInt32("trackIndex", mSelectedTrack);
+ ccBuf->meta()->setInt32("track-index", mSelectedTrack);
ccBuf->meta()->setInt64("timeUs", timeUs);
ccBuf->meta()->setInt64("durationUs", 0ll);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index ac187cc..69cd82e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -29,10 +29,12 @@
#include <cutils/properties.h>
#include <media/ICrypto.h>
+#include <media/MediaBufferHolder.h>
#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaDefs.h>
@@ -40,7 +42,6 @@
#include <media/stagefright/SurfaceUtils.h>
#include <gui/Surface.h>
-#include "avc_utils.h"
#include "ATSParser.h"
namespace android {
@@ -744,26 +745,35 @@
sp<AMessage> reply = new AMessage(kWhatRenderBuffer, this);
reply->setSize("buffer-ix", index);
reply->setInt32("generation", mBufferGeneration);
+ reply->setSize("size", size);
if (eos) {
ALOGI("[%s] saw output EOS", mIsAudio ? "audio" : "video");
buffer->meta()->setInt32("eos", true);
reply->setInt32("eos", true);
- } else if (mSkipRenderingUntilMediaTimeUs >= 0) {
+ }
+
+ mNumFramesTotal += !mIsAudio;
+
+ if (mSkipRenderingUntilMediaTimeUs >= 0) {
if (timeUs < mSkipRenderingUntilMediaTimeUs) {
ALOGV("[%s] dropping buffer at time %lld as requested.",
mComponentName.c_str(), (long long)timeUs);
reply->post();
+ if (eos) {
+ notifyResumeCompleteIfNecessary();
+ if (mRenderer != NULL && !isDiscontinuityPending()) {
+ mRenderer->queueEOS(mIsAudio, ERROR_END_OF_STREAM);
+ }
+ }
return true;
}
mSkipRenderingUntilMediaTimeUs = -1;
}
- mNumFramesTotal += !mIsAudio;
-
// wait until 1st frame comes out to signal resume complete
notifyResumeCompleteIfNecessary();
@@ -937,7 +947,8 @@
mCurrentMaxVideoTemporalLayerId);
} else if (layerId > mCurrentMaxVideoTemporalLayerId) {
mCurrentMaxVideoTemporalLayerId = layerId;
- } else if (layerId == 0 && mNumVideoTemporalLayerTotal > 1 && IsIDR(accessUnit)) {
+ } else if (layerId == 0 && mNumVideoTemporalLayerTotal > 1
+ && IsIDR(accessUnit->data(), accessUnit->size())) {
mCurrentMaxVideoTemporalLayerId = mNumVideoTemporalLayerTotal - 1;
}
}
@@ -1036,7 +1047,7 @@
}
// Modular DRM
- MediaBuffer *mediaBuf = NULL;
+ MediaBufferBase *mediaBuf = NULL;
NuPlayerDrm::CryptoInfo *cryptInfo = NULL;
// copy into codec buffer
@@ -1052,16 +1063,17 @@
memcpy(codecBuffer->data(), buffer->data(), buffer->size());
} else { // No buffer->data()
//Modular DRM
- mediaBuf = (MediaBuffer*)buffer->getMediaBufferBase();
+ sp<RefBase> holder;
+ if (buffer->meta()->findObject("mediaBufferHolder", &holder)) {
+ mediaBuf = (holder != nullptr) ?
+ static_cast<MediaBufferHolder*>(holder.get())->mediaBuffer() : nullptr;
+ }
if (mediaBuf != NULL) {
codecBuffer->setRange(0, mediaBuf->size());
memcpy(codecBuffer->data(), mediaBuf->data(), mediaBuf->size());
- sp<MetaData> meta_data = mediaBuf->meta_data();
+ MetaDataBase &meta_data = mediaBuf->meta_data();
cryptInfo = NuPlayerDrm::getSampleCryptoInfo(meta_data);
-
- // since getMediaBuffer() has incremented the refCount
- mediaBuf->release();
} else { // No mediaBuf
ALOGE("onInputBufferFetched: buffer->data()/mediaBuf are NULL for %p",
buffer.get());
@@ -1116,6 +1128,7 @@
int32_t render;
size_t bufferIx;
int32_t eos;
+ size_t size;
CHECK(msg->findSize("buffer-ix", &bufferIx));
if (!mIsAudio) {
@@ -1135,7 +1148,10 @@
CHECK(msg->findInt64("timestampNs", ×tampNs));
err = mCodec->renderOutputBufferAndRelease(bufferIx, timestampNs);
} else {
- mNumOutputFramesDropped += !mIsAudio;
+ if (!msg->findInt32("eos", &eos) || !eos ||
+ !msg->findSize("size", &size) || size) {
+ mNumOutputFramesDropped += !mIsAudio;
+ }
err = mCodec->releaseOutputBuffer(bufferIx);
}
if (err != OK) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index dc29761..3e5bdd6 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -28,6 +28,8 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/stagefright/MediaClock.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
@@ -41,6 +43,9 @@
// key for media statistics
static const char *kKeyPlayer = "nuplayer";
// attrs for media statistics
+ // NB: these are matched with public Java API constants defined
+ // in frameworks/base/media/java/android/media/MediaPlayer.java
+ // These must be kept synchronized with the constants there.
static const char *kPlayerVMime = "android.media.mediaplayer.video.mime";
static const char *kPlayerVCodec = "android.media.mediaplayer.video.codec";
static const char *kPlayerWidth = "android.media.mediaplayer.width";
@@ -53,7 +58,14 @@
static const char *kPlayerPlaying = "android.media.mediaplayer.playingMs";
static const char *kPlayerError = "android.media.mediaplayer.err";
static const char *kPlayerErrorCode = "android.media.mediaplayer.errcode";
+
+ // NB: These are not yet exposed as public Java API constants.
+static const char *kPlayerErrorState = "android.media.mediaplayer.errstate";
static const char *kPlayerDataSourceType = "android.media.mediaplayer.dataSource";
+//
+static const char *kPlayerRebuffering = "android.media.mediaplayer.rebufferingMs";
+static const char *kPlayerRebufferingCount = "android.media.mediaplayer.rebuffers";
+static const char *kPlayerRebufferingAtExit = "android.media.mediaplayer.rebufferExit";
NuPlayerDriver::NuPlayerDriver(pid_t pid)
@@ -65,8 +77,12 @@
mPositionUs(-1),
mSeekInProgress(false),
mPlayingTimeUs(0),
+ mRebufferingTimeUs(0),
+ mRebufferingEvents(0),
+ mRebufferingAtExit(false),
mLooper(new ALooper),
- mPlayer(new NuPlayer(pid)),
+ mMediaClock(new MediaClock),
+ mPlayer(new NuPlayer(pid, mMediaClock)),
mPlayerFlags(0),
mAnalyticsItem(NULL),
mClientUid(-1),
@@ -76,9 +92,10 @@
ALOGD("NuPlayerDriver(%p) created, clientPid(%d)", this, pid);
mLooper->setName("NuPlayerDriver Looper");
+ mMediaClock->init();
+
// set up an analytics record
mAnalyticsItem = new MediaAnalyticsItem(kKeyPlayer);
- mAnalyticsItem->generateSessionID();
mLooper->start(
false, /* runOnCallingThread */
@@ -87,7 +104,7 @@
mLooper->registerHandler(mPlayer);
- mPlayer->setDriver(this);
+ mPlayer->init(this);
}
NuPlayerDriver::~NuPlayerDriver() {
@@ -226,8 +243,8 @@
return OK;
}
-status_t NuPlayerDriver::getDefaultBufferingSettings(BufferingSettings* buffering) {
- ALOGV("getDefaultBufferingSettings(%p)", this);
+status_t NuPlayerDriver::getBufferingSettings(BufferingSettings* buffering) {
+ ALOGV("getBufferingSettings(%p)", this);
{
Mutex::Autolock autoLock(mLock);
if (mState == STATE_IDLE) {
@@ -235,7 +252,7 @@
}
}
- return mPlayer->getDefaultBufferingSettings(buffering);
+ return mPlayer->getBufferingSettings(buffering);
}
status_t NuPlayerDriver::setBufferingSettings(const BufferingSettings& buffering) {
@@ -518,6 +535,7 @@
}
void NuPlayerDriver::updateMetrics(const char *where) {
+
if (where == NULL) {
where = "unknown";
}
@@ -575,8 +593,16 @@
getDuration(&duration_ms);
mAnalyticsItem->setInt64(kPlayerDuration, duration_ms);
+ mPlayer->updateInternalTimers();
+
mAnalyticsItem->setInt64(kPlayerPlaying, (mPlayingTimeUs+500)/1000 );
+ if (mRebufferingEvents != 0) {
+ mAnalyticsItem->setInt64(kPlayerRebuffering, (mRebufferingTimeUs+500)/1000 );
+ mAnalyticsItem->setInt32(kPlayerRebufferingCount, mRebufferingEvents);
+ mAnalyticsItem->setInt32(kPlayerRebufferingAtExit, mRebufferingAtExit);
+ }
+
mAnalyticsItem->setCString(kPlayerDataSourceType, mPlayer->getDataSourceType());
}
@@ -598,18 +624,16 @@
// So the canonical "empty" record has 3 elements in it.
if (mAnalyticsItem->count() > 3) {
- mAnalyticsItem->setFinalized(true);
mAnalyticsItem->selfrecord();
// re-init in case we prepare() and start() again.
delete mAnalyticsItem ;
mAnalyticsItem = new MediaAnalyticsItem("nuplayer");
if (mAnalyticsItem) {
- mAnalyticsItem->generateSessionID();
mAnalyticsItem->setUid(mClientUid);
}
} else {
- ALOGV("did not have anything to record");
+ ALOGV("nothing to record (only %d fields)", mAnalyticsItem->count());
}
}
@@ -645,6 +669,11 @@
notifyListener_l(MEDIA_STOPPED);
}
+ if (property_get_bool("persist.debug.sf.stats", false)) {
+ Vector<String16> args;
+ dump(-1, args);
+ }
+
mState = STATE_RESET_IN_PROGRESS;
mPlayer->resetAsync();
@@ -656,10 +685,18 @@
mPositionUs = -1;
mLooping = false;
mPlayingTimeUs = 0;
+ mRebufferingTimeUs = 0;
+ mRebufferingEvents = 0;
+ mRebufferingAtExit = false;
return OK;
}
+status_t NuPlayerDriver::notifyAt(int64_t mediaTimeUs) {
+ ALOGV("notifyAt(%p), time:%lld", this, (long long)mediaTimeUs);
+ return mPlayer->notifyAt(mediaTimeUs);
+}
+
status_t NuPlayerDriver::setLooping(int loop) {
mLooping = loop != 0;
return OK;
@@ -801,6 +838,17 @@
mPlayingTimeUs += playingUs;
}
+void NuPlayerDriver::notifyMoreRebufferingTimeUs(int64_t rebufferingUs) {
+ Mutex::Autolock autoLock(mLock);
+ mRebufferingTimeUs += rebufferingUs;
+ mRebufferingEvents++;
+}
+
+void NuPlayerDriver::notifyRebufferingWhenExit(bool status) {
+ Mutex::Autolock autoLock(mLock);
+ mRebufferingAtExit = status;
+}
+
void NuPlayerDriver::notifySeekComplete() {
ALOGV("notifySeekComplete(%p)", this);
Mutex::Autolock autoLock(mLock);
@@ -956,6 +1004,7 @@
if (ext2 != 0) {
mAnalyticsItem->setInt32(kPlayerErrorCode, ext2);
}
+ mAnalyticsItem->setCString(kPlayerErrorState, stateString(mState).c_str());
}
mAtEOS = true;
break;
@@ -1052,4 +1101,30 @@
return ret;
}
+std::string NuPlayerDriver::stateString(State state) {
+ const char *rval = NULL;
+ char rawbuffer[16]; // allows "%d"
+
+ switch (state) {
+ case STATE_IDLE: rval = "IDLE"; break;
+ case STATE_SET_DATASOURCE_PENDING: rval = "SET_DATASOURCE_PENDING"; break;
+ case STATE_UNPREPARED: rval = "UNPREPARED"; break;
+ case STATE_PREPARING: rval = "PREPARING"; break;
+ case STATE_PREPARED: rval = "PREPARED"; break;
+ case STATE_RUNNING: rval = "RUNNING"; break;
+ case STATE_PAUSED: rval = "PAUSED"; break;
+ case STATE_RESET_IN_PROGRESS: rval = "RESET_IN_PROGRESS"; break;
+ case STATE_STOPPED: rval = "STOPPED"; break;
+ case STATE_STOPPED_AND_PREPARING: rval = "STOPPED_AND_PREPARING"; break;
+ case STATE_STOPPED_AND_PREPARED: rval = "STOPPED_AND_PREPARED"; break;
+ default:
+ // yes, this buffer is shared and vulnerable to races
+ snprintf(rawbuffer, sizeof(rawbuffer), "%d", state);
+ rval = rawbuffer;
+ break;
+ }
+
+ return rval;
+}
+
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index d0cf1dd..ad878f8 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -22,6 +22,7 @@
namespace android {
struct ALooper;
+struct MediaClock;
struct NuPlayer;
struct NuPlayerDriver : public MediaPlayerInterface {
@@ -45,7 +46,7 @@
virtual status_t setVideoSurfaceTexture(
const sp<IGraphicBufferProducer> &bufferProducer);
- virtual status_t getDefaultBufferingSettings(
+ virtual status_t getBufferingSettings(
BufferingSettings* buffering /* nonnull */) override;
virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
@@ -64,6 +65,7 @@
virtual status_t getCurrentPosition(int *msec);
virtual status_t getDuration(int *msec);
virtual status_t reset();
+ virtual status_t notifyAt(int64_t mediaTimeUs) override;
virtual status_t setLooping(int loop);
virtual player_type playerType();
virtual status_t invoke(const Parcel &request, Parcel *reply);
@@ -82,6 +84,8 @@
void notifySetSurfaceComplete();
void notifyDuration(int64_t durationUs);
void notifyMorePlayingTimeUs(int64_t timeUs);
+ void notifyMoreRebufferingTimeUs(int64_t timeUs);
+ void notifyRebufferingWhenExit(bool status);
void notifySeekComplete();
void notifySeekComplete_l();
void notifyListener(int msg, int ext1 = 0, int ext2 = 0, const Parcel *in = NULL);
@@ -109,6 +113,8 @@
STATE_STOPPED_AND_PREPARED, // equivalent to PAUSED, but seek complete
};
+ std::string stateString(State state);
+
mutable Mutex mLock;
Condition mCondition;
@@ -124,9 +130,13 @@
int64_t mPositionUs;
bool mSeekInProgress;
int64_t mPlayingTimeUs;
+ int64_t mRebufferingTimeUs;
+ int32_t mRebufferingEvents;
+ bool mRebufferingAtExit;
// <<<
sp<ALooper> mLooper;
+ const sp<MediaClock> mMediaClock;
const sp<NuPlayer> mPlayer;
sp<AudioSink> mAudioSink;
uint32_t mPlayerFlags;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
index b7c9db7..bde0862 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
@@ -20,7 +20,7 @@
#include "NuPlayerDrm.h"
#include <binder/IServiceManager.h>
-#include <media/IMediaDrmService.h>
+#include <mediadrm/IMediaDrmService.h>
#include <utils/Log.h>
@@ -265,18 +265,13 @@
return ret;
}
-NuPlayerDrm::CryptoInfo *NuPlayerDrm::getSampleCryptoInfo(sp<MetaData> meta)
+NuPlayerDrm::CryptoInfo *NuPlayerDrm::getSampleCryptoInfo(MetaDataBase &meta)
{
uint32_t type;
const void *crypteddata;
size_t cryptedsize;
- if (meta == NULL) {
- ALOGE("getSampleCryptoInfo: Unexpected. No meta data for sample.");
- return NULL;
- }
-
- if (!meta->findData(kKeyEncryptedSizes, &type, &crypteddata, &cryptedsize)) {
+ if (!meta.findData(kKeyEncryptedSizes, &type, &crypteddata, &cryptedsize)) {
return NULL;
}
size_t numSubSamples = cryptedsize / sizeof(size_t);
@@ -288,7 +283,7 @@
const void *cleardata;
size_t clearsize;
- if (meta->findData(kKeyPlainSizes, &type, &cleardata, &clearsize)) {
+ if (meta.findData(kKeyPlainSizes, &type, &cleardata, &clearsize)) {
if (clearsize != cryptedsize) {
// The two must be of the same length.
ALOGE("getSampleCryptoInfo mismatch cryptedsize: %zu != clearsize: %zu",
@@ -299,7 +294,7 @@
const void *key;
size_t keysize;
- if (meta->findData(kKeyCryptoKey, &type, &key, &keysize)) {
+ if (meta.findData(kKeyCryptoKey, &type, &key, &keysize)) {
if (keysize != kBlockSize) {
ALOGE("getSampleCryptoInfo Keys must be %d bytes in length: %zu",
kBlockSize, keysize);
@@ -310,7 +305,7 @@
const void *iv;
size_t ivsize;
- if (meta->findData(kKeyCryptoIV, &type, &iv, &ivsize)) {
+ if (meta.findData(kKeyCryptoIV, &type, &iv, &ivsize)) {
if (ivsize != kBlockSize) {
ALOGE("getSampleCryptoInfo IV must be %d bytes in length: %zu",
kBlockSize, ivsize);
@@ -320,7 +315,7 @@
}
int32_t mode;
- if (!meta->findInt32(kKeyCryptoMode, &mode)) {
+ if (!meta.findInt32(kKeyCryptoMode, &mode)) {
mode = CryptoPlugin::kMode_AES_CTR;
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.h b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.h
index 6b8a2d9..50f69ff 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.h
@@ -109,7 +109,7 @@
size_t *clearbytes,
size_t *encryptedbytes);
- static CryptoInfo *getSampleCryptoInfo(sp<MetaData> meta);
+ static CryptoInfo *getSampleCryptoInfo(MetaDataBase &meta);
}; // NuPlayerDrm
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index a52303f..57a0198 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -18,13 +18,13 @@
#define LOG_TAG "NuPlayerRenderer"
#include <utils/Log.h>
+#include "AWakeLock.h"
#include "NuPlayerRenderer.h"
#include <algorithm>
#include <cutils/properties.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AUtils.h>
-#include <media/stagefright/foundation/AWakeLock.h>
#include <media/stagefright/MediaClock.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
@@ -89,6 +89,7 @@
NuPlayer::Renderer::Renderer(
const sp<MediaPlayerBase::AudioSink> &sink,
+ const sp<MediaClock> &mediaClock,
const sp<AMessage> ¬ify,
uint32_t flags)
: mAudioSink(sink),
@@ -103,11 +104,13 @@
mAudioDrainGeneration(0),
mVideoDrainGeneration(0),
mAudioEOSGeneration(0),
+ mMediaClock(mediaClock),
mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
mAudioFirstAnchorTimeMediaUs(-1),
mAnchorTimeMediaUs(-1),
mAnchorNumFramesWritten(-1),
mVideoLateByUs(0ll),
+ mNextVideoTimeMediaUs(-1),
mHasAudio(false),
mHasVideo(false),
mNotifyCompleteAudio(false),
@@ -130,7 +133,7 @@
mLastAudioBufferDrained(0),
mUseAudioCallback(false),
mWakeLock(new AWakeLock()) {
- mMediaClock = new MediaClock;
+ CHECK(mediaClock != NULL);
mPlaybackRate = mPlaybackSettings.mSpeed;
mMediaClock->setPlaybackRate(mPlaybackRate);
}
@@ -149,7 +152,6 @@
flushQueue(&mVideoQueue);
}
mWakeLock.clear();
- mMediaClock.clear();
mVideoScheduler.clear();
mNotify.clear();
mAudioSink.clear();
@@ -300,6 +302,7 @@
mMediaClock->clearAnchor();
mVideoLateByUs = 0;
+ mNextVideoTimeMediaUs = -1;
mSyncQueues = false;
}
@@ -552,8 +555,10 @@
CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
(status_t)OK);
+ // Handle AudioTrack race when start is immediately called after flush.
uint32_t numFramesPendingPlayout =
- mNumFramesWritten - numFramesPlayed;
+ (mNumFramesWritten > numFramesPlayed ?
+ mNumFramesWritten - numFramesPlayed : 0);
// This is how long the audio sink will have data to
// play back.
@@ -1249,82 +1254,49 @@
return;
}
- bool needRepostDrainVideoQueue = false;
- int64_t delayUs;
int64_t nowUs = ALooper::GetNowUs();
- int64_t realTimeUs;
if (mFlags & FLAG_REAL_TIME) {
- int64_t mediaTimeUs;
- CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
- realTimeUs = mediaTimeUs;
- } else {
- int64_t mediaTimeUs;
- CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
+ int64_t realTimeUs;
+ CHECK(entry.mBuffer->meta()->findInt64("timeUs", &realTimeUs));
- {
- Mutex::Autolock autoLock(mLock);
- if (mAnchorTimeMediaUs < 0) {
- mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
- mAnchorTimeMediaUs = mediaTimeUs;
- realTimeUs = nowUs;
- } else if (!mVideoSampleReceived) {
- // Always render the first video frame.
- realTimeUs = nowUs;
- } else if (mAudioFirstAnchorTimeMediaUs < 0
- || mMediaClock->getRealTimeFor(mediaTimeUs, &realTimeUs) == OK) {
- realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
- } else if (mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0) {
- needRepostDrainVideoQueue = true;
- realTimeUs = nowUs;
- } else {
- realTimeUs = nowUs;
- }
- }
- if (!mHasAudio) {
- // smooth out videos >= 10fps
- mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
- }
+ realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
- // Heuristics to handle situation when media time changed without a
- // discontinuity. If we have not drained an audio buffer that was
- // received after this buffer, repost in 10 msec. Otherwise repost
- // in 500 msec.
- delayUs = realTimeUs - nowUs;
- int64_t postDelayUs = -1;
- if (delayUs > 500000) {
- postDelayUs = 500000;
- if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) {
- postDelayUs = 10000;
- }
- } else if (needRepostDrainVideoQueue) {
- // CHECK(mPlaybackRate > 0);
- // CHECK(mAudioFirstAnchorTimeMediaUs >= 0);
- // CHECK(mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0);
- postDelayUs = mediaTimeUs - mAudioFirstAnchorTimeMediaUs;
- postDelayUs /= mPlaybackRate;
- }
+ int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
- if (postDelayUs >= 0) {
- msg->setWhat(kWhatPostDrainVideoQueue);
- msg->post(postDelayUs);
- mVideoScheduler->restart();
- ALOGI("possible video time jump of %dms (%lld : %lld) or uninitialized media clock,"
- " retrying in %dms",
- (int)(delayUs / 1000), (long long)mediaTimeUs,
- (long long)mAudioFirstAnchorTimeMediaUs, (int)(postDelayUs / 1000));
- mDrainVideoQueuePending = true;
- return;
- }
+ int64_t delayUs = realTimeUs - nowUs;
+
+ ALOGW_IF(delayUs > 500000, "unusually high delayUs: %lld", (long long)delayUs);
+ // post 2 display refreshes before rendering is due
+ msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
+
+ mDrainVideoQueuePending = true;
+ return;
}
- realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
- int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
+ int64_t mediaTimeUs;
+ CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
- delayUs = realTimeUs - nowUs;
+ {
+ Mutex::Autolock autoLock(mLock);
+ if (mAnchorTimeMediaUs < 0) {
+ mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
+ mAnchorTimeMediaUs = mediaTimeUs;
+ }
+ }
+ mNextVideoTimeMediaUs = mediaTimeUs + 100000;
+ if (!mHasAudio) {
+ // smooth out videos >= 10fps
+ mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
+ }
- ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
- // post 2 display refreshes before rendering is due
- msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
+ if (!mVideoSampleReceived || mediaTimeUs < mAudioFirstAnchorTimeMediaUs) {
+ msg->post();
+ } else {
+ int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
+
+ // post 2 display refreshes before rendering is due
+ mMediaClock->addTimer(msg, mediaTimeUs, -twoVsyncsUs);
+ }
mDrainVideoQueuePending = true;
}
@@ -1358,6 +1330,7 @@
realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
}
+ realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
bool tooLate = false;
@@ -1444,6 +1417,14 @@
if (audio) {
// Video might outlive audio. Clear anchor to enable video only case.
mAnchorTimeMediaUs = -1;
+ mHasAudio = false;
+ if (mNextVideoTimeMediaUs >= 0) {
+ int64_t mediaUs = 0;
+ mMediaClock->getMediaTime(ALooper::GetNowUs(), &mediaUs);
+ if (mNextVideoTimeMediaUs > mediaUs) {
+ mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
+ }
+ }
}
}
@@ -1640,14 +1621,7 @@
// internal buffer before resuming playback.
// FIXME: this is ignored after flush().
mAudioSink->stop();
- if (mPaused) {
- // Race condition: if renderer is paused and audio sink is stopped,
- // we need to make sure that the audio track buffer fully drains
- // before delivering data.
- // FIXME: remove this if we can detect if stop() is complete.
- const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
- mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
- } else {
+ if (!mPaused) {
mAudioSink->start();
}
mNumFramesWritten = 0;
@@ -1733,6 +1707,8 @@
++mAudioDrainGeneration;
if (mAudioRenderingStartGeneration != -1) {
prepareForMediaRenderingStart_l();
+ // PauseTimeout is applied to offload mode only. Cancel pending timer.
+ cancelAudioOffloadPauseTimeout();
}
}
@@ -1835,6 +1811,12 @@
if (mAudioTornDown) {
return;
}
+
+ // TimeoutWhenPaused is only for offload mode.
+ if (reason == kDueToTimeout && !offloadingAudio()) {
+ return;
+ }
+
mAudioTornDown = true;
int64_t currentPositionUs;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index f58b79c..a047975 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -36,6 +36,7 @@
FLAG_OFFLOAD_AUDIO = 2,
};
Renderer(const sp<MediaPlayerBase::AudioSink> &sink,
+ const sp<MediaClock> &mediaClock,
const sp<AMessage> ¬ify,
uint32_t flags = 0);
@@ -165,7 +166,7 @@
int32_t mVideoDrainGeneration;
int32_t mAudioEOSGeneration;
- sp<MediaClock> mMediaClock;
+ const sp<MediaClock> mMediaClock;
float mPlaybackRate; // audio track rate
AudioPlaybackRate mPlaybackSettings;
@@ -176,6 +177,7 @@
int64_t mAnchorTimeMediaUs;
int64_t mAnchorNumFramesWritten;
int64_t mVideoLateByUs;
+ int64_t mNextVideoTimeMediaUs;
bool mHasAudio;
bool mHasVideo;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 8ba9c0d..9f5ef78 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -66,7 +66,7 @@
: mNotify(notify) {
}
- virtual status_t getDefaultBufferingSettings(
+ virtual status_t getBufferingSettings(
BufferingSettings* buffering /* nonnull */) = 0;
virtual status_t setBufferingSettings(const BufferingSettings& buffering) = 0;
@@ -121,10 +121,6 @@
return INVALID_OPERATION;
}
- virtual status_t setBuffers(bool /* audio */, Vector<MediaBuffer *> &/* buffers */) {
- return INVALID_OPERATION;
- }
-
virtual bool isRealTime() const {
return false;
}
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 8b3d0dc..851217b 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -62,7 +62,8 @@
mSeekGeneration(0),
mEOSTimeoutAudio(0),
mEOSTimeoutVideo(0) {
- getDefaultBufferingSettings(&mBufferingSettings);
+ mBufferingSettings.mInitialMarkMs = kPrepareMarkMs;
+ mBufferingSettings.mResumePlaybackMarkMs = kOverflowMarkMs;
if (headers) {
mExtraHeaders = *headers;
@@ -84,32 +85,17 @@
}
}
-status_t NuPlayer::RTSPSource::getDefaultBufferingSettings(
+status_t NuPlayer::RTSPSource::getBufferingSettings(
BufferingSettings* buffering /* nonnull */) {
- buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
- buffering->mRebufferingMode = BUFFERING_MODE_TIME_ONLY;
- buffering->mInitialWatermarkMs = kPrepareMarkMs;
- buffering->mRebufferingWatermarkLowMs = kUnderflowMarkMs;
- buffering->mRebufferingWatermarkHighMs = kOverflowMarkMs;
-
+ Mutex::Autolock _l(mBufferingSettingsLock);
+ *buffering = mBufferingSettings;
return OK;
}
status_t NuPlayer::RTSPSource::setBufferingSettings(const BufferingSettings& buffering) {
- if (mLooper == NULL) {
- mBufferingSettings = buffering;
- return OK;
- }
-
- sp<AMessage> msg = new AMessage(kWhatSetBufferingSettings, this);
- writeToAMessage(msg, buffering);
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- }
-
- return err;
+ Mutex::Autolock _l(mBufferingSettingsLock);
+ mBufferingSettings = buffering;
+ return OK;
}
void NuPlayer::RTSPSource::prepareAsync() {
@@ -356,8 +342,17 @@
}
int64_t bufferedDurationUs = src->getBufferedDurationUs(&finalResult);
+ int64_t initialMarkUs;
+ int64_t maxRebufferingMarkUs;
+ {
+ Mutex::Autolock _l(mBufferingSettingsLock);
+ initialMarkUs = mBufferingSettings.mInitialMarkMs * 1000ll;
+ // TODO: maxRebufferingMarkUs could be larger than
+ // mBufferingSettings.mResumePlaybackMarkMs * 1000ll.
+ maxRebufferingMarkUs = mBufferingSettings.mResumePlaybackMarkMs * 1000ll;
+ }
// isFinished when duration is 0 checks for EOS result only
- if (bufferedDurationUs > mBufferingSettings.mInitialWatermarkMs * 1000
+ if (bufferedDurationUs > initialMarkUs
|| src->isFinished(/* duration */ 0)) {
++preparedCount;
}
@@ -366,15 +361,15 @@
++overflowCount;
++finishedCount;
} else {
- if (bufferedDurationUs < mBufferingSettings.mRebufferingWatermarkLowMs * 1000) {
+ // TODO: redefine kUnderflowMarkMs to a fair value,
+ if (bufferedDurationUs < kUnderflowMarkMs * 1000) {
++underflowCount;
}
- if (bufferedDurationUs > mBufferingSettings.mRebufferingWatermarkHighMs * 1000) {
+ if (bufferedDurationUs > maxRebufferingMarkUs) {
++overflowCount;
}
int64_t startServerMarkUs =
- (mBufferingSettings.mRebufferingWatermarkLowMs
- + mBufferingSettings.mRebufferingWatermarkHighMs) / 2 * 1000ll;
+ (kUnderflowMarkMs * 1000ll + maxRebufferingMarkUs) / 2;
if (bufferedDurationUs < startServerMarkUs) {
++startCount;
}
@@ -512,36 +507,6 @@
} else if (msg->what() == kWhatSignalEOS) {
onSignalEOS(msg);
return;
- } else if (msg->what() == kWhatSetBufferingSettings) {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- BufferingSettings buffering;
- readFromAMessage(msg, &buffering);
-
- status_t err = OK;
- if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
- || buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
- || (buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs
- && buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode))) {
- err = BAD_VALUE;
- } else {
- if (buffering.mInitialBufferingMode == BUFFERING_MODE_NONE) {
- buffering.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
- }
- if (buffering.mRebufferingMode == BUFFERING_MODE_NONE) {
- buffering.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
- buffering.mRebufferingWatermarkHighMs = INT32_MAX;
- }
-
- mBufferingSettings = buffering;
- }
-
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
- response->postReply(replyID);
-
- return;
}
CHECK_EQ(msg->what(), kWhatNotify);
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h
index 0812991..03fce08 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h
@@ -40,7 +40,7 @@
uid_t uid = 0,
bool isSDP = false);
- virtual status_t getDefaultBufferingSettings(
+ virtual status_t getBufferingSettings(
BufferingSettings* buffering /* nonnull */) override;
virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
@@ -71,7 +71,6 @@
kWhatPerformSeek = 'seek',
kWhatPollBuffering = 'poll',
kWhatSignalEOS = 'eos ',
- kWhatSetBufferingSettings = 'sBuS',
};
enum State {
@@ -109,6 +108,8 @@
bool mBuffering;
bool mInPreparationPhase;
bool mEOSPending;
+
+ Mutex mBufferingSettingsLock;
BufferingSettings mBufferingSettings;
sp<ALooper> mLooper;
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index fc0803b..b3da53f 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -24,10 +24,11 @@
#include "AnotherPacketSource.h"
#include "NuPlayerStreamListener.h"
+#include <media/MediaSource.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/foundation/MediaKeys.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
@@ -51,19 +52,14 @@
}
}
-status_t NuPlayer::StreamingSource::getDefaultBufferingSettings(
+status_t NuPlayer::StreamingSource::getBufferingSettings(
BufferingSettings *buffering /* nonnull */) {
*buffering = BufferingSettings();
return OK;
}
status_t NuPlayer::StreamingSource::setBufferingSettings(
- const BufferingSettings &buffering) {
- if (buffering.mInitialBufferingMode != BUFFERING_MODE_NONE
- || buffering.mRebufferingMode != BUFFERING_MODE_NONE) {
- return BAD_VALUE;
- }
-
+ const BufferingSettings & /* buffering */) {
return OK;
}
@@ -119,7 +115,7 @@
int32_t mask;
if (extra != NULL
&& extra->findInt32(
- IStreamListener::kKeyDiscontinuityMask, &mask)) {
+ kIStreamListenerKeyDiscontinuityMask, &mask)) {
if (mask == 0) {
ALOGE("Client specified an illegal discontinuity type.");
setError(ERROR_UNSUPPORTED);
@@ -147,7 +143,7 @@
int64_t mediaTimeUs;
memcpy(&mediaTimeUs, &buffer[2], sizeof(mediaTimeUs));
- extra->setInt64(IStreamListener::kKeyMediaTimeUs, mediaTimeUs);
+ extra->setInt64(kATSParserKeyMediaTimeUs, mediaTimeUs);
}
mTSParser->signalDiscontinuity(
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.h b/media/libmediaplayerservice/nuplayer/StreamingSource.h
index 2e1d2b3..76d1d0b 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.h
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.h
@@ -32,7 +32,7 @@
const sp<AMessage> ¬ify,
const sp<IStreamSource> &source);
- virtual status_t getDefaultBufferingSettings(
+ virtual status_t getBufferingSettings(
BufferingSettings* buffering /* nonnull */) override;
virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
diff --git a/media/libmediaplayerservice/tests/Android.bp b/media/libmediaplayerservice/tests/Android.bp
new file mode 100644
index 0000000..e86b68a
--- /dev/null
+++ b/media/libmediaplayerservice/tests/Android.bp
@@ -0,0 +1,23 @@
+cc_test {
+
+ name: "DrmSessionManager_test",
+
+ srcs: ["DrmSessionManager_test.cpp"],
+
+ shared_libs: [
+ "liblog",
+ "libmediaplayerservice",
+ "libmediadrm",
+ "libutils",
+ "android.hardware.drm@1.0",
+ "android.hardware.drm@1.1",
+ ],
+
+ compile_multilib: "32",
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+}
diff --git a/media/libmediaplayerservice/tests/Android.mk b/media/libmediaplayerservice/tests/Android.mk
deleted file mode 100644
index 0b9b85f..0000000
--- a/media/libmediaplayerservice/tests/Android.mk
+++ /dev/null
@@ -1,28 +0,0 @@
-# Build the unit tests.
-LOCAL_PATH:= $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := DrmSessionManager_test
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_SRC_FILES := \
- DrmSessionManager_test.cpp \
-
-LOCAL_SHARED_LIBRARIES := \
- liblog \
- libmediaplayerservice \
- libmediadrm \
- libutils \
- android.hardware.drm@1.0 \
-
-LOCAL_C_INCLUDES := \
- frameworks/av/include \
- frameworks/av/media/libmediaplayerservice \
-
-LOCAL_CFLAGS += -Werror -Wall
-
-LOCAL_32_BIT_ONLY := true
-
-include $(BUILD_NATIVE_TEST)
-
diff --git a/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp b/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
index c5212fc..d81ee05 100644
--- a/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
+++ b/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
@@ -20,11 +20,11 @@
#include <gtest/gtest.h>
-#include <media/Drm.h>
-#include <media/DrmSessionClientInterface.h>
-#include <media/DrmSessionManager.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/ProcessInfoInterface.h>
+#include <mediadrm/DrmHal.h>
+#include <mediadrm/DrmSessionClientInterface.h>
+#include <mediadrm/DrmSessionManager.h>
namespace android {
diff --git a/media/libnbaio/Android.bp b/media/libnbaio/Android.bp
index 4220b77..a4df38d 100644
--- a/media/libnbaio/Android.bp
+++ b/media/libnbaio/Android.bp
@@ -41,11 +41,8 @@
"AudioBufferProviderSource.cpp",
"AudioStreamInSource.cpp",
"AudioStreamOutSink.cpp",
- "NBLog.cpp",
- "PerformanceAnalysis.cpp",
"Pipe.cpp",
"PipeReader.cpp",
- "ReportPerformance.cpp",
"SourceAudioBufferProvider.cpp",
],
diff --git a/media/libnbaio/NBLog.cpp b/media/libnbaio/NBLog.cpp
deleted file mode 100644
index 827cba9..0000000
--- a/media/libnbaio/NBLog.cpp
+++ /dev/null
@@ -1,1215 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-* Documentation: Workflow summary for histogram data processing:
-* For more details on FIFO, please see system/media/audio_utils; doxygen
-* TODO: add this documentation to doxygen once it is further developed
-* 1) Writing buffer period timestamp to the circular buffer
-* onWork()
-* Called every period length (e.g., 4ms)
-* Calls LOG_HIST_TS
-* LOG_HIST_TS
-* Hashes file name and line number, and writes single timestamp to buffer
-* calls NBLOG::Writer::logEventHistTS once
-* NBLOG::Writer::logEventHistTS
-* calls NBLOG::Writer::log on hash and current timestamp
-* time is in CLOCK_MONOTONIC converted to ns
-* NBLOG::Writer::log(Event, const void*, size_t)
-* Initializes Entry, a struct containing one log entry
-* Entry contains the event type (mEvent), data length (mLength),
-* and data pointer (mData)
-* TODO: why mLength (max length of buffer data) must be <= kMaxLength = 255?
-* calls NBLOG::Writer::log(Entry *, bool)
-* NBLog::Writer::log(Entry *, bool)
-* Calls copyEntryDataAt to format data as follows in temp array:
-* [type][length][data ... ][length]
-* calls audio_utils_fifo_writer.write on temp
-* audio_utils_fifo_writer.write
-* calls obtain(), memcpy (reference in doxygen)
-* returns number of frames written
-* ssize_t audio_utils_fifo_reader::obtain
-* Determines readable buffer section via pointer arithmetic on reader
-* and writer pointers
-* Similarly, LOG_AUDIO_STATE() is called by onStateChange whenever audio is
-* turned on or off, and writes this notification to the FIFO.
-*
-* 2) reading the data from shared memory
-* Thread::threadloop()
-* TODO: add description?
-* NBLog::MergeThread::threadLoop()
-* calls NBLog::Merger::merge
-* NBLog::Merger::merge
-* Merges snapshots sorted by timestamp
-* for each reader in vector of class NamedReader,
-* callsNamedReader::reader()->getSnapshot
-* TODO: check whether the rest of this function is relevant
-* NBLog::Reader::getSnapshot
-* copies snapshot of reader's fifo buffer into its own buffer
-* calls mFifoReader->obtain to find readable data
-* sets snapshot.begin() and .end() iterators to boundaries of valid entries
-* moves the fifo reader index to after the last entry read
-* in this case, the buffer is in shared memory. in (4), the buffer is private
-*
-* 3) reading the data from private buffer
-* MediaLogService::dump
-* calls NBLog::Reader::dump(CONSOLE)
-* The private buffer contains all logs for all readers in shared memory
-* NBLog::Reader::dump(int)
-* calls getSnapshot on the current reader
-* calls dump(int, size_t, Snapshot)
-* NBLog::Reader::dump(int, size, snapshot)
-* iterates through snapshot's events and switches based on their type
-* (string, timestamp, etc...)
-* In the case of EVENT_HISTOGRAM_ENTRY_TS, adds a list of timestamp sequences
-* (histogram entry) to NBLog::mHists
-* TODO: add every HISTOGRAM_ENTRY_TS to two
-* circular buffers: one short-term and one long-term (can add even longer-term
-* structures in the future). When dump is called, print everything currently
-* in the buffer.
-* NBLog::drawHistogram
-* input: timestamp array
-* buckets this to a histogram and prints
-*
-*/
-
-#define LOG_TAG "NBLog"
-// #define LOG_NDEBUG 0
-
-#include <algorithm>
-#include <climits>
-#include <deque>
-#include <fstream>
-// #include <inttypes.h>
-#include <iostream>
-#include <math.h>
-#include <numeric>
-#include <vector>
-#include <stdarg.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/prctl.h>
-#include <time.h>
-#include <new>
-#include <audio_utils/roundup.h>
-#include <media/nbaio/NBLog.h>
-#include <media/nbaio/PerformanceAnalysis.h>
-#include <media/nbaio/ReportPerformance.h>
-// #include <utils/CallStack.h> // used to print callstack
-#include <utils/Log.h>
-#include <utils/String8.h>
-
-#include <queue>
-#include <utility>
-
-namespace android {
-
-int NBLog::Entry::copyEntryDataAt(size_t offset) const
-{
- // FIXME This is too slow
- if (offset == 0)
- return mEvent;
- else if (offset == 1)
- return mLength;
- else if (offset < (size_t) (mLength + 2))
- return ((char *) mData)[offset - 2];
- else if (offset == (size_t) (mLength + 2))
- return mLength;
- else
- return 0;
-}
-
-// ---------------------------------------------------------------------------
-
-/*static*/
-std::unique_ptr<NBLog::AbstractEntry> NBLog::AbstractEntry::buildEntry(const uint8_t *ptr) {
- const uint8_t type = EntryIterator(ptr)->type;
- switch (type) {
- case EVENT_START_FMT:
- return std::make_unique<FormatEntry>(FormatEntry(ptr));
- case EVENT_AUDIO_STATE:
- case EVENT_HISTOGRAM_ENTRY_TS:
- return std::make_unique<HistogramEntry>(HistogramEntry(ptr));
- default:
- ALOGW("Tried to create AbstractEntry of type %d", type);
- return nullptr;
- }
-}
-
-NBLog::AbstractEntry::AbstractEntry(const uint8_t *entry) : mEntry(entry) {
-}
-
-// ---------------------------------------------------------------------------
-
-NBLog::EntryIterator NBLog::FormatEntry::begin() const {
- return EntryIterator(mEntry);
-}
-
-const char *NBLog::FormatEntry::formatString() const {
- return (const char*) mEntry + offsetof(entry, data);
-}
-
-size_t NBLog::FormatEntry::formatStringLength() const {
- return mEntry[offsetof(entry, length)];
-}
-
-NBLog::EntryIterator NBLog::FormatEntry::args() const {
- auto it = begin();
- // skip start fmt
- ++it;
- // skip timestamp
- ++it;
- // skip hash
- ++it;
- // Skip author if present
- if (it->type == EVENT_AUTHOR) {
- ++it;
- }
- return it;
-}
-
-int64_t NBLog::FormatEntry::timestamp() const {
- auto it = begin();
- // skip start fmt
- ++it;
- return it.payload<int64_t>();
-}
-
-NBLog::log_hash_t NBLog::FormatEntry::hash() const {
- auto it = begin();
- // skip start fmt
- ++it;
- // skip timestamp
- ++it;
- // unaligned 64-bit read not supported
- log_hash_t hash;
- memcpy(&hash, it->data, sizeof(hash));
- return hash;
-}
-
-int NBLog::FormatEntry::author() const {
- auto it = begin();
- // skip start fmt
- ++it;
- // skip timestamp
- ++it;
- // skip hash
- ++it;
- // if there is an author entry, return it, return -1 otherwise
- if (it->type == EVENT_AUTHOR) {
- return it.payload<int>();
- }
- return -1;
-}
-
-NBLog::EntryIterator NBLog::FormatEntry::copyWithAuthor(
- std::unique_ptr<audio_utils_fifo_writer> &dst, int author) const {
- auto it = begin();
- // copy fmt start entry
- it.copyTo(dst);
- // copy timestamp
- (++it).copyTo(dst); // copy hash
- (++it).copyTo(dst);
- // insert author entry
- size_t authorEntrySize = NBLog::Entry::kOverhead + sizeof(author);
- uint8_t authorEntry[authorEntrySize];
- authorEntry[offsetof(entry, type)] = EVENT_AUTHOR;
- authorEntry[offsetof(entry, length)] =
- authorEntry[authorEntrySize + NBLog::Entry::kPreviousLengthOffset] =
- sizeof(author);
- *(int*) (&authorEntry[offsetof(entry, data)]) = author;
- dst->write(authorEntry, authorEntrySize);
- // copy rest of entries
- while ((++it)->type != EVENT_END_FMT) {
- it.copyTo(dst);
- }
- it.copyTo(dst);
- ++it;
- return it;
-}
-
-void NBLog::EntryIterator::copyTo(std::unique_ptr<audio_utils_fifo_writer> &dst) const {
- size_t length = ptr[offsetof(entry, length)] + NBLog::Entry::kOverhead;
- dst->write(ptr, length);
-}
-
-void NBLog::EntryIterator::copyData(uint8_t *dst) const {
- memcpy((void*) dst, ptr + offsetof(entry, data), ptr[offsetof(entry, length)]);
-}
-
-NBLog::EntryIterator::EntryIterator()
- : ptr(nullptr) {}
-
-NBLog::EntryIterator::EntryIterator(const uint8_t *entry)
- : ptr(entry) {}
-
-NBLog::EntryIterator::EntryIterator(const NBLog::EntryIterator &other)
- : ptr(other.ptr) {}
-
-const NBLog::entry& NBLog::EntryIterator::operator*() const {
- return *(entry*) ptr;
-}
-
-const NBLog::entry* NBLog::EntryIterator::operator->() const {
- return (entry*) ptr;
-}
-
-NBLog::EntryIterator& NBLog::EntryIterator::operator++() {
- ptr += ptr[offsetof(entry, length)] + NBLog::Entry::kOverhead;
- return *this;
-}
-
-NBLog::EntryIterator& NBLog::EntryIterator::operator--() {
- ptr -= ptr[NBLog::Entry::kPreviousLengthOffset] + NBLog::Entry::kOverhead;
- return *this;
-}
-
-NBLog::EntryIterator NBLog::EntryIterator::next() const {
- EntryIterator aux(*this);
- return ++aux;
-}
-
-NBLog::EntryIterator NBLog::EntryIterator::prev() const {
- EntryIterator aux(*this);
- return --aux;
-}
-
-int NBLog::EntryIterator::operator-(const NBLog::EntryIterator &other) const {
- return ptr - other.ptr;
-}
-
-bool NBLog::EntryIterator::operator!=(const EntryIterator &other) const {
- return ptr != other.ptr;
-}
-
-bool NBLog::EntryIterator::hasConsistentLength() const {
- return ptr[offsetof(entry, length)] == ptr[ptr[offsetof(entry, length)] +
- NBLog::Entry::kOverhead + NBLog::Entry::kPreviousLengthOffset];
-}
-
-// ---------------------------------------------------------------------------
-
-int64_t NBLog::HistogramEntry::timestamp() const {
- return EntryIterator(mEntry).payload<HistTsEntry>().ts;
-}
-
-NBLog::log_hash_t NBLog::HistogramEntry::hash() const {
- return EntryIterator(mEntry).payload<HistTsEntry>().hash;
-}
-
-int NBLog::HistogramEntry::author() const {
- EntryIterator it(mEntry);
- if (it->length == sizeof(HistTsEntryWithAuthor)) {
- return it.payload<HistTsEntryWithAuthor>().author;
- } else {
- return -1;
- }
-}
-
-NBLog::EntryIterator NBLog::HistogramEntry::copyWithAuthor(
- std::unique_ptr<audio_utils_fifo_writer> &dst, int author) const {
- // Current histogram entry has {type, length, struct HistTsEntry, length}.
- // We now want {type, length, struct HistTsEntryWithAuthor, length}
- uint8_t buffer[Entry::kOverhead + sizeof(HistTsEntryWithAuthor)];
- // Copy content until the point we want to add the author
- memcpy(buffer, mEntry, sizeof(entry) + sizeof(HistTsEntry));
- // Copy the author
- *(int*) (buffer + sizeof(entry) + sizeof(HistTsEntry)) = author;
- // Update lengths
- buffer[offsetof(entry, length)] = sizeof(HistTsEntryWithAuthor);
- buffer[offsetof(entry, data) + sizeof(HistTsEntryWithAuthor) + offsetof(ending, length)]
- = sizeof(HistTsEntryWithAuthor);
- // Write new buffer into FIFO
- dst->write(buffer, sizeof(buffer));
- return EntryIterator(mEntry).next();
-}
-
-// ---------------------------------------------------------------------------
-
-#if 0 // FIXME see note in NBLog.h
-NBLog::Timeline::Timeline(size_t size, void *shared)
- : mSize(roundup(size)), mOwn(shared == NULL),
- mShared((Shared *) (mOwn ? new char[sharedSize(size)] : shared))
-{
- new (mShared) Shared;
-}
-
-NBLog::Timeline::~Timeline()
-{
- mShared->~Shared();
- if (mOwn) {
- delete[] (char *) mShared;
- }
-}
-#endif
-
-/*static*/
-size_t NBLog::Timeline::sharedSize(size_t size)
-{
- // TODO fifo now supports non-power-of-2 buffer sizes, so could remove the roundup
- return sizeof(Shared) + roundup(size);
-}
-
-// ---------------------------------------------------------------------------
-
-NBLog::Writer::Writer()
- : mShared(NULL), mFifo(NULL), mFifoWriter(NULL), mEnabled(false), mPidTag(NULL), mPidTagSize(0)
-{
-}
-
-NBLog::Writer::Writer(void *shared, size_t size)
- : mShared((Shared *) shared),
- mFifo(mShared != NULL ?
- new audio_utils_fifo(size, sizeof(uint8_t),
- mShared->mBuffer, mShared->mRear, NULL /*throttlesFront*/) : NULL),
- mFifoWriter(mFifo != NULL ? new audio_utils_fifo_writer(*mFifo) : NULL),
- mEnabled(mFifoWriter != NULL)
-{
- // caching pid and process name
- pid_t id = ::getpid();
- char procName[16];
- int status = prctl(PR_GET_NAME, procName);
- if (status) { // error getting process name
- procName[0] = '\0';
- }
- size_t length = strlen(procName);
- mPidTagSize = length + sizeof(pid_t);
- mPidTag = new char[mPidTagSize];
- memcpy(mPidTag, &id, sizeof(pid_t));
- memcpy(mPidTag + sizeof(pid_t), procName, length);
-}
-
-NBLog::Writer::Writer(const sp<IMemory>& iMemory, size_t size)
- : Writer(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL, size)
-{
- mIMemory = iMemory;
-}
-
-NBLog::Writer::~Writer()
-{
- delete mFifoWriter;
- delete mFifo;
- delete[] mPidTag;
-}
-
-void NBLog::Writer::log(const char *string)
-{
- if (!mEnabled) {
- return;
- }
- LOG_ALWAYS_FATAL_IF(string == NULL, "Attempted to log NULL string");
- size_t length = strlen(string);
- if (length > Entry::kMaxLength) {
- length = Entry::kMaxLength;
- }
- log(EVENT_STRING, string, length);
-}
-
-void NBLog::Writer::logf(const char *fmt, ...)
-{
- if (!mEnabled) {
- return;
- }
- va_list ap;
- va_start(ap, fmt);
- Writer::logvf(fmt, ap); // the Writer:: is needed to avoid virtual dispatch for LockedWriter
- va_end(ap);
-}
-
-void NBLog::Writer::logvf(const char *fmt, va_list ap)
-{
- if (!mEnabled) {
- return;
- }
- char buffer[Entry::kMaxLength + 1 /*NUL*/];
- int length = vsnprintf(buffer, sizeof(buffer), fmt, ap);
- if (length >= (int) sizeof(buffer)) {
- length = sizeof(buffer) - 1;
- // NUL termination is not required
- // buffer[length] = '\0';
- }
- if (length >= 0) {
- log(EVENT_STRING, buffer, length);
- }
-}
-
-void NBLog::Writer::logTimestamp()
-{
- if (!mEnabled) {
- return;
- }
- int64_t ts = get_monotonic_ns();
- if (ts > 0) {
- log(EVENT_TIMESTAMP, &ts, sizeof(ts));
- } else {
- ALOGE("Failed to get timestamp");
- }
-}
-
-void NBLog::Writer::logTimestamp(const int64_t ts)
-{
- if (!mEnabled) {
- return;
- }
- log(EVENT_TIMESTAMP, &ts, sizeof(ts));
-}
-
-void NBLog::Writer::logInteger(const int x)
-{
- if (!mEnabled) {
- return;
- }
- log(EVENT_INTEGER, &x, sizeof(x));
-}
-
-void NBLog::Writer::logFloat(const float x)
-{
- if (!mEnabled) {
- return;
- }
- log(EVENT_FLOAT, &x, sizeof(x));
-}
-
-void NBLog::Writer::logPID()
-{
- if (!mEnabled) {
- return;
- }
- log(EVENT_PID, mPidTag, mPidTagSize);
-}
-
-void NBLog::Writer::logStart(const char *fmt)
-{
- if (!mEnabled) {
- return;
- }
- size_t length = strlen(fmt);
- if (length > Entry::kMaxLength) {
- length = Entry::kMaxLength;
- }
- log(EVENT_START_FMT, fmt, length);
-}
-
-void NBLog::Writer::logEnd()
-{
- if (!mEnabled) {
- return;
- }
- Entry entry = Entry(EVENT_END_FMT, NULL, 0);
- log(&entry, true);
-}
-
-void NBLog::Writer::logHash(log_hash_t hash)
-{
- if (!mEnabled) {
- return;
- }
- log(EVENT_HASH, &hash, sizeof(hash));
-}
-
-void NBLog::Writer::logEventHistTs(Event event, log_hash_t hash)
-{
- if (!mEnabled) {
- return;
- }
- HistTsEntry data;
- data.hash = hash;
- data.ts = get_monotonic_ns();
- if (data.ts > 0) {
- log(event, &data, sizeof(data));
- } else {
- ALOGE("Failed to get timestamp");
- }
-}
-
-void NBLog::Writer::logFormat(const char *fmt, log_hash_t hash, ...)
-{
- if (!mEnabled) {
- return;
- }
-
- va_list ap;
- va_start(ap, hash);
- Writer::logVFormat(fmt, hash, ap);
- va_end(ap);
-}
-
-void NBLog::Writer::logVFormat(const char *fmt, log_hash_t hash, va_list argp)
-{
- if (!mEnabled) {
- return;
- }
- Writer::logStart(fmt);
- int i;
- double f;
- char* s;
- int64_t t;
- Writer::logTimestamp();
- Writer::logHash(hash);
- for (const char *p = fmt; *p != '\0'; p++) {
- // TODO: implement more complex formatting such as %.3f
- if (*p != '%') {
- continue;
- }
- switch(*++p) {
- case 's': // string
- s = va_arg(argp, char *);
- Writer::log(s);
- break;
-
- case 't': // timestamp
- t = va_arg(argp, int64_t);
- Writer::logTimestamp(t);
- break;
-
- case 'd': // integer
- i = va_arg(argp, int);
- Writer::logInteger(i);
- break;
-
- case 'f': // float
- f = va_arg(argp, double); // float arguments are promoted to double in vararg lists
- Writer::logFloat((float)f);
- break;
-
- case 'p': // pid
- Writer::logPID();
- break;
-
- // the "%\0" case finishes parsing
- case '\0':
- --p;
- break;
-
- case '%':
- break;
-
- default:
- ALOGW("NBLog Writer parsed invalid format specifier: %c", *p);
- break;
- }
- }
- Writer::logEnd();
-}
-
-void NBLog::Writer::log(Event event, const void *data, size_t length)
-{
- if (!mEnabled) {
- return;
- }
- if (data == NULL || length > Entry::kMaxLength) {
- // TODO Perhaps it makes sense to display truncated data or at least a
- // message that the data is too long? The current behavior can create
- // a confusion for a programmer debugging their code.
- return;
- }
- // Ignore if invalid event
- if (event == EVENT_RESERVED || event >= EVENT_UPPER_BOUND) {
- return;
- }
- Entry etr(event, data, length);
- log(&etr, true /*trusted*/);
-}
-
-void NBLog::Writer::log(const NBLog::Entry *etr, bool trusted)
-{
- if (!mEnabled) {
- return;
- }
- if (!trusted) {
- log(etr->mEvent, etr->mData, etr->mLength);
- return;
- }
- size_t need = etr->mLength + Entry::kOverhead; // mEvent, mLength, data[mLength], mLength
- // need = number of bytes written to FIFO
-
- // FIXME optimize this using memcpy for the data part of the Entry.
- // The Entry could have a method copyTo(ptr, offset, size) to optimize the copy.
- // checks size of a single log Entry: type, length, data pointer and ending
- uint8_t temp[Entry::kMaxLength + Entry::kOverhead];
- // write this data to temp array
- for (size_t i = 0; i < need; i++) {
- temp[i] = etr->copyEntryDataAt(i);
- }
- // write to circular buffer
- mFifoWriter->write(temp, need);
-}
-
-bool NBLog::Writer::isEnabled() const
-{
- return mEnabled;
-}
-
-bool NBLog::Writer::setEnabled(bool enabled)
-{
- bool old = mEnabled;
- mEnabled = enabled && mShared != NULL;
- return old;
-}
-
-// ---------------------------------------------------------------------------
-
-NBLog::LockedWriter::LockedWriter()
- : Writer()
-{
-}
-
-NBLog::LockedWriter::LockedWriter(void *shared, size_t size)
- : Writer(shared, size)
-{
-}
-
-void NBLog::LockedWriter::log(const char *string)
-{
- Mutex::Autolock _l(mLock);
- Writer::log(string);
-}
-
-void NBLog::LockedWriter::logf(const char *fmt, ...)
-{
- // FIXME should not take the lock until after formatting is done
- Mutex::Autolock _l(mLock);
- va_list ap;
- va_start(ap, fmt);
- Writer::logvf(fmt, ap);
- va_end(ap);
-}
-
-void NBLog::LockedWriter::logvf(const char *fmt, va_list ap)
-{
- // FIXME should not take the lock until after formatting is done
- Mutex::Autolock _l(mLock);
- Writer::logvf(fmt, ap);
-}
-
-void NBLog::LockedWriter::logTimestamp()
-{
- // FIXME should not take the lock until after the clock_gettime() syscall
- Mutex::Autolock _l(mLock);
- Writer::logTimestamp();
-}
-
-void NBLog::LockedWriter::logTimestamp(const int64_t ts)
-{
- Mutex::Autolock _l(mLock);
- Writer::logTimestamp(ts);
-}
-
-void NBLog::LockedWriter::logInteger(const int x)
-{
- Mutex::Autolock _l(mLock);
- Writer::logInteger(x);
-}
-
-void NBLog::LockedWriter::logFloat(const float x)
-{
- Mutex::Autolock _l(mLock);
- Writer::logFloat(x);
-}
-
-void NBLog::LockedWriter::logPID()
-{
- Mutex::Autolock _l(mLock);
- Writer::logPID();
-}
-
-void NBLog::LockedWriter::logStart(const char *fmt)
-{
- Mutex::Autolock _l(mLock);
- Writer::logStart(fmt);
-}
-
-
-void NBLog::LockedWriter::logEnd()
-{
- Mutex::Autolock _l(mLock);
- Writer::logEnd();
-}
-
-void NBLog::LockedWriter::logHash(log_hash_t hash)
-{
- Mutex::Autolock _l(mLock);
- Writer::logHash(hash);
-}
-
-bool NBLog::LockedWriter::isEnabled() const
-{
- Mutex::Autolock _l(mLock);
- return Writer::isEnabled();
-}
-
-bool NBLog::LockedWriter::setEnabled(bool enabled)
-{
- Mutex::Autolock _l(mLock);
- return Writer::setEnabled(enabled);
-}
-
-// ---------------------------------------------------------------------------
-
-const std::set<NBLog::Event> NBLog::Reader::startingTypes {NBLog::Event::EVENT_START_FMT,
- NBLog::Event::EVENT_HISTOGRAM_ENTRY_TS};
-const std::set<NBLog::Event> NBLog::Reader::endingTypes {NBLog::Event::EVENT_END_FMT,
- NBLog::Event::EVENT_HISTOGRAM_ENTRY_TS,
- NBLog::Event::EVENT_AUDIO_STATE};
-
-NBLog::Reader::Reader(const void *shared, size_t size)
- : mShared((/*const*/ Shared *) shared), /*mIMemory*/
- mFd(-1), mIndent(0),
- mFifo(mShared != NULL ?
- new audio_utils_fifo(size, sizeof(uint8_t),
- mShared->mBuffer, mShared->mRear, NULL /*throttlesFront*/) : NULL),
- mFifoReader(mFifo != NULL ? new audio_utils_fifo_reader(*mFifo) : NULL)
-{
-}
-
-NBLog::Reader::Reader(const sp<IMemory>& iMemory, size_t size)
- : Reader(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL, size)
-{
- mIMemory = iMemory;
-}
-
-NBLog::Reader::~Reader()
-{
- delete mFifoReader;
- delete mFifo;
-}
-
-const uint8_t *NBLog::Reader::findLastEntryOfTypes(const uint8_t *front, const uint8_t *back,
- const std::set<Event> &types) {
- while (back + Entry::kPreviousLengthOffset >= front) {
- const uint8_t *prev = back - back[Entry::kPreviousLengthOffset] - Entry::kOverhead;
- if (prev < front || prev + prev[offsetof(entry, length)] +
- Entry::kOverhead != back) {
-
- // prev points to an out of limits or inconsistent entry
- return nullptr;
- }
- if (types.find((const Event) prev[offsetof(entry, type)]) != types.end()) {
- return prev;
- }
- back = prev;
- }
- return nullptr; // no entry found
-}
-
-std::unique_ptr<NBLog::Reader::Snapshot> NBLog::Reader::getSnapshot()
-{
- if (mFifoReader == NULL) {
- return std::unique_ptr<NBLog::Reader::Snapshot>(new Snapshot());
- }
- // make a copy to avoid race condition with writer
- size_t capacity = mFifo->capacity();
-
- // This emulates the behaviour of audio_utils_fifo_reader::read, but without incrementing the
- // reader index. The index is incremented after handling corruption, to after the last complete
- // entry of the buffer
- size_t lost;
- audio_utils_iovec iovec[2];
- ssize_t availToRead = mFifoReader->obtain(iovec, capacity, NULL /*timeout*/, &lost);
- if (availToRead <= 0) {
- return std::unique_ptr<NBLog::Reader::Snapshot>(new Snapshot());
- }
-
- std::unique_ptr<Snapshot> snapshot(new Snapshot(availToRead));
- memcpy(snapshot->mData, (const char *) mFifo->buffer() + iovec[0].mOffset, iovec[0].mLength);
- if (iovec[1].mLength > 0) {
- memcpy(snapshot->mData + (iovec[0].mLength),
- (const char *) mFifo->buffer() + iovec[1].mOffset, iovec[1].mLength);
- }
-
- // Handle corrupted buffer
- // Potentially, a buffer has corrupted data on both beginning (due to overflow) and end
- // (due to incomplete format entry). But even if the end format entry is incomplete,
- // it ends in a complete entry (which is not an END_FMT). So is safe to traverse backwards.
- // TODO: handle client corruption (in the middle of a buffer)
-
- const uint8_t *back = snapshot->mData + availToRead;
- const uint8_t *front = snapshot->mData;
-
- // Find last END_FMT. <back> is sitting on an entry which might be the middle of a FormatEntry.
- // We go backwards until we find an EVENT_END_FMT.
- const uint8_t *lastEnd = findLastEntryOfTypes(front, back, endingTypes);
- if (lastEnd == nullptr) {
- snapshot->mEnd = snapshot->mBegin = EntryIterator(front);
- } else {
- // end of snapshot points to after last END_FMT entry
- snapshot->mEnd = EntryIterator(lastEnd).next();
- // find first START_FMT
- const uint8_t *firstStart = nullptr;
- const uint8_t *firstStartTmp = snapshot->mEnd;
- while ((firstStartTmp = findLastEntryOfTypes(front, firstStartTmp, startingTypes))
- != nullptr) {
- firstStart = firstStartTmp;
- }
- // firstStart is null if no START_FMT entry was found before lastEnd
- if (firstStart == nullptr) {
- snapshot->mBegin = snapshot->mEnd;
- } else {
- snapshot->mBegin = EntryIterator(firstStart);
- }
- }
-
- // advance fifo reader index to after last entry read.
- mFifoReader->release(snapshot->mEnd - front);
-
- snapshot->mLost = lost;
- return snapshot;
-
-}
-
-// TODO: move this to PerformanceAnalysis
-// TODO: make call to dump periodic so that data in shared FIFO does not get overwritten
-void NBLog::Reader::dump(int fd, size_t indent, NBLog::Reader::Snapshot &snapshot)
-{
- mFd = fd;
- mIndent = indent;
- String8 timestamp, body;
- // FIXME: this is not thread safe
- // TODO: need a separate instance of performanceAnalysis for each thread
- // used to store data and to call analysis functions
- static ReportPerformance::PerformanceAnalysis performanceAnalysis;
- size_t lost = snapshot.lost() + (snapshot.begin() - EntryIterator(snapshot.data()));
- if (lost > 0) {
- body.appendFormat("warning: lost %zu bytes worth of events", lost);
- // TODO timestamp empty here, only other choice to wait for the first timestamp event in the
- // log to push it out. Consider keeping the timestamp/body between calls to copyEntryDataAt().
- dumpLine(timestamp, body);
- }
-
- for (auto entry = snapshot.begin(); entry != snapshot.end();) {
- switch (entry->type) {
- case EVENT_START_FMT:
- entry = handleFormat(FormatEntry(entry), ×tamp, &body);
- break;
- case EVENT_HISTOGRAM_ENTRY_TS: {
- HistTsEntryWithAuthor *data = (HistTsEntryWithAuthor *) (entry->data);
- // TODO This memcpies are here to avoid unaligned memory access crash.
- // There's probably a more efficient way to do it
- log_hash_t hash;
- memcpy(&hash, &(data->hash), sizeof(hash));
- int64_t ts;
- memcpy(&ts, &data->ts, sizeof(ts));
- performanceAnalysis.logTsEntry(ts);
- ++entry;
- break;
- }
- case EVENT_AUDIO_STATE: {
- performanceAnalysis.handleStateChange();
- ++entry;
- break;
- }
- case EVENT_END_FMT:
- body.appendFormat("warning: got to end format event");
- ++entry;
- break;
- case EVENT_RESERVED:
- default:
- body.appendFormat("warning: unexpected event %d", entry->type);
- ++entry;
- break;
- }
- }
- performanceAnalysis.reportPerformance(&body);
- if (!body.isEmpty()) {
- dumpLine(timestamp, body);
- }
-}
-
-void NBLog::Reader::dump(int fd, size_t indent)
-{
- // get a snapshot, dump it
- std::unique_ptr<Snapshot> snap = getSnapshot();
- dump(fd, indent, *snap);
-}
-
-void NBLog::Reader::dumpLine(const String8 ×tamp, String8 &body)
-{
- if (mFd >= 0) {
- dprintf(mFd, "%.*s%s %s\n", mIndent, "", timestamp.string(), body.string());
- } else {
- ALOGI("%.*s%s %s", mIndent, "", timestamp.string(), body.string());
- }
- body.clear();
-}
-
-bool NBLog::Reader::isIMemory(const sp<IMemory>& iMemory) const
-{
- return iMemory != 0 && mIMemory != 0 && iMemory->pointer() == mIMemory->pointer();
-}
-
-// ---------------------------------------------------------------------------
-
-void NBLog::appendTimestamp(String8 *body, const void *data) {
- int64_t ts;
- memcpy(&ts, data, sizeof(ts));
- body->appendFormat("[%d.%03d]", (int) (ts / (1000 * 1000 * 1000)),
- (int) ((ts / (1000 * 1000)) % 1000));
-}
-
-void NBLog::appendInt(String8 *body, const void *data) {
- int x = *((int*) data);
- body->appendFormat("<%d>", x);
-}
-
-void NBLog::appendFloat(String8 *body, const void *data) {
- float f;
- memcpy(&f, data, sizeof(float));
- body->appendFormat("<%f>", f);
-}
-
-void NBLog::appendPID(String8 *body, const void* data, size_t length) {
- pid_t id = *((pid_t*) data);
- char * name = &((char*) data)[sizeof(pid_t)];
- body->appendFormat("<PID: %d, name: %.*s>", id, (int) (length - sizeof(pid_t)), name);
-}
-
-String8 NBLog::bufferDump(const uint8_t *buffer, size_t size)
-{
- String8 str;
- str.append("[ ");
- for(size_t i = 0; i < size; i++)
- {
- str.appendFormat("%d ", buffer[i]);
- }
- str.append("]");
- return str;
-}
-
-String8 NBLog::bufferDump(const EntryIterator &it)
-{
- return bufferDump(it, it->length + Entry::kOverhead);
-}
-
-NBLog::EntryIterator NBLog::Reader::handleFormat(const FormatEntry &fmtEntry,
- String8 *timestamp,
- String8 *body) {
- // log timestamp
- int64_t ts = fmtEntry.timestamp();
- timestamp->clear();
- timestamp->appendFormat("[%d.%03d]", (int) (ts / (1000 * 1000 * 1000)),
- (int) ((ts / (1000 * 1000)) % 1000));
-
- // log unique hash
- log_hash_t hash = fmtEntry.hash();
- // print only lower 16bit of hash as hex and line as int to reduce spam in the log
- body->appendFormat("%.4X-%d ", (int)(hash >> 16) & 0xFFFF, (int) hash & 0xFFFF);
-
- // log author (if present)
- handleAuthor(fmtEntry, body);
-
- // log string
- NBLog::EntryIterator arg = fmtEntry.args();
-
- const char* fmt = fmtEntry.formatString();
- size_t fmt_length = fmtEntry.formatStringLength();
-
- for (size_t fmt_offset = 0; fmt_offset < fmt_length; ++fmt_offset) {
- if (fmt[fmt_offset] != '%') {
- body->append(&fmt[fmt_offset], 1); // TODO optimize to write consecutive strings at once
- continue;
- }
- // case "%%""
- if (fmt[++fmt_offset] == '%') {
- body->append("%");
- continue;
- }
- // case "%\0"
- if (fmt_offset == fmt_length) {
- continue;
- }
-
- NBLog::Event event = (NBLog::Event) arg->type;
- size_t length = arg->length;
-
- // TODO check length for event type is correct
-
- if (event == EVENT_END_FMT) {
- break;
- }
-
- // TODO: implement more complex formatting such as %.3f
- const uint8_t *datum = arg->data; // pointer to the current event args
- switch(fmt[fmt_offset])
- {
- case 's': // string
- ALOGW_IF(event != EVENT_STRING,
- "NBLog Reader incompatible event for string specifier: %d", event);
- body->append((const char*) datum, length);
- break;
-
- case 't': // timestamp
- ALOGW_IF(event != EVENT_TIMESTAMP,
- "NBLog Reader incompatible event for timestamp specifier: %d", event);
- appendTimestamp(body, datum);
- break;
-
- case 'd': // integer
- ALOGW_IF(event != EVENT_INTEGER,
- "NBLog Reader incompatible event for integer specifier: %d", event);
- appendInt(body, datum);
- break;
-
- case 'f': // float
- ALOGW_IF(event != EVENT_FLOAT,
- "NBLog Reader incompatible event for float specifier: %d", event);
- appendFloat(body, datum);
- break;
-
- case 'p': // pid
- ALOGW_IF(event != EVENT_PID,
- "NBLog Reader incompatible event for pid specifier: %d", event);
- appendPID(body, datum, length);
- break;
-
- default:
- ALOGW("NBLog Reader encountered unknown character %c", fmt[fmt_offset]);
- }
- ++arg;
- }
- ALOGW_IF(arg->type != EVENT_END_FMT, "Expected end of format, got %d", arg->type);
- ++arg;
- return arg;
-}
-
-NBLog::Merger::Merger(const void *shared, size_t size):
- mShared((Shared *) shared),
- mFifo(mShared != NULL ?
- new audio_utils_fifo(size, sizeof(uint8_t),
- mShared->mBuffer, mShared->mRear, NULL /*throttlesFront*/) : NULL),
- mFifoWriter(mFifo != NULL ? new audio_utils_fifo_writer(*mFifo) : NULL)
- {}
-
-void NBLog::Merger::addReader(const NBLog::NamedReader &reader) {
- // FIXME This is called by binder thread in MediaLogService::registerWriter
- // but the access to shared variable mNamedReaders is not yet protected by a lock.
- mNamedReaders.push_back(reader);
-}
-
-// items placed in priority queue during merge
-// composed by a timestamp and the index of the snapshot where the timestamp came from
-struct MergeItem
-{
- int64_t ts;
- int index;
- MergeItem(int64_t ts, int index): ts(ts), index(index) {}
-};
-
-// operators needed for priority queue in merge
-// bool operator>(const int64_t &t1, const int64_t &t2) {
-// return t1.tv_sec > t2.tv_sec || (t1.tv_sec == t2.tv_sec && t1.tv_nsec > t2.tv_nsec);
-// }
-
-bool operator>(const struct MergeItem &i1, const struct MergeItem &i2) {
- return i1.ts > i2.ts || (i1.ts == i2.ts && i1.index > i2.index);
-}
-
-// Merge registered readers, sorted by timestamp
-void NBLog::Merger::merge() {
- // FIXME This is called by merge thread
- // but the access to shared variable mNamedReaders is not yet protected by a lock.
- int nLogs = mNamedReaders.size();
- std::vector<std::unique_ptr<NBLog::Reader::Snapshot>> snapshots(nLogs);
- std::vector<NBLog::EntryIterator> offsets(nLogs);
- for (int i = 0; i < nLogs; ++i) {
- snapshots[i] = mNamedReaders[i].reader()->getSnapshot();
- offsets[i] = snapshots[i]->begin();
- }
- // initialize offsets
- // TODO custom heap implementation could allow to update top, improving performance
- // for bursty buffers
- std::priority_queue<MergeItem, std::vector<MergeItem>, std::greater<MergeItem>> timestamps;
- for (int i = 0; i < nLogs; ++i)
- {
- if (offsets[i] != snapshots[i]->end()) {
- int64_t ts = AbstractEntry::buildEntry(offsets[i])->timestamp();
- timestamps.emplace(ts, i);
- }
- }
-
- while (!timestamps.empty()) {
- // find minimum timestamp
- int index = timestamps.top().index;
- // copy it to the log, increasing offset
- offsets[index] = AbstractEntry::buildEntry(offsets[index])->copyWithAuthor(mFifoWriter,
- index);
- // update data structures
- timestamps.pop();
- if (offsets[index] != snapshots[index]->end()) {
- int64_t ts = AbstractEntry::buildEntry(offsets[index])->timestamp();
- timestamps.emplace(ts, index);
- }
- }
-}
-
-const std::vector<NBLog::NamedReader>& NBLog::Merger::getNamedReaders() const {
- // FIXME This is returning a reference to a shared variable that needs a lock
- return mNamedReaders;
-}
-
-// ---------------------------------------------------------------------------
-
-NBLog::MergeReader::MergeReader(const void *shared, size_t size, Merger &merger)
- : Reader(shared, size), mNamedReaders(merger.getNamedReaders()) {}
-
-void NBLog::MergeReader::handleAuthor(const NBLog::AbstractEntry &entry, String8 *body) {
- int author = entry.author();
- // FIXME Needs a lock
- const char* name = mNamedReaders[author].name();
- body->appendFormat("%s: ", name);
-}
-
-// ---------------------------------------------------------------------------
-
-NBLog::MergeThread::MergeThread(NBLog::Merger &merger)
- : mMerger(merger),
- mTimeoutUs(0) {}
-
-NBLog::MergeThread::~MergeThread() {
- // set exit flag, set timeout to 0 to force threadLoop to exit and wait for the thread to join
- requestExit();
- setTimeoutUs(0);
- join();
-}
-
-bool NBLog::MergeThread::threadLoop() {
- bool doMerge;
- {
- AutoMutex _l(mMutex);
- // If mTimeoutUs is negative, wait on the condition variable until it's positive.
- // If it's positive, wait kThreadSleepPeriodUs and then merge
- nsecs_t waitTime = mTimeoutUs > 0 ? kThreadSleepPeriodUs * 1000 : LLONG_MAX;
- mCond.waitRelative(mMutex, waitTime);
- doMerge = mTimeoutUs > 0;
- mTimeoutUs -= kThreadSleepPeriodUs;
- }
- if (doMerge) {
- mMerger.merge();
- }
- return true;
-}
-
-void NBLog::MergeThread::wakeup() {
- setTimeoutUs(kThreadWakeupPeriodUs);
-}
-
-void NBLog::MergeThread::setTimeoutUs(int time) {
- AutoMutex _l(mMutex);
- mTimeoutUs = time;
- mCond.signal();
-}
-
-} // namespace android
diff --git a/media/libnbaio/OWNERS b/media/libnbaio/OWNERS
index f9cb567..eece71f 100644
--- a/media/libnbaio/OWNERS
+++ b/media/libnbaio/OWNERS
@@ -1 +1,2 @@
gkasten@google.com
+hunga@google.com
diff --git a/media/libnbaio/PerformanceAnalysis.cpp b/media/libnbaio/PerformanceAnalysis.cpp
deleted file mode 100644
index fb3bddc..0000000
--- a/media/libnbaio/PerformanceAnalysis.cpp
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#define LOG_TAG "PerformanceAnalysis"
-// #define LOG_NDEBUG 0
-
-#include <algorithm>
-#include <climits>
-#include <deque>
-#include <iostream>
-#include <math.h>
-#include <numeric>
-#include <vector>
-#include <stdarg.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/prctl.h>
-#include <time.h>
-#include <new>
-#include <audio_utils/roundup.h>
-#include <media/nbaio/NBLog.h>
-#include <media/nbaio/PerformanceAnalysis.h>
-#include <media/nbaio/ReportPerformance.h>
-// #include <utils/CallStack.h> // used to print callstack
-#include <utils/Log.h>
-#include <utils/String8.h>
-
-#include <queue>
-#include <utility>
-
-namespace android {
-
-namespace ReportPerformance {
-
-PerformanceAnalysis::PerformanceAnalysis() {
- // These variables will be (FIXME) learned from the data
- kPeriodMs = 4; // typical buffer period (mode)
- // average number of Ms spent processing buffer
- kPeriodMsCPU = static_cast<int>(kPeriodMs * kRatio);
-}
-
-// converts a time series into a map. key: buffer period length. value: count
-static std::map<int, int> buildBuckets(const std::vector<int64_t> &samples) {
- // TODO allow buckets of variable resolution
- std::map<int, int> buckets;
- for (size_t i = 1; i < samples.size(); ++i) {
- ++buckets[deltaMs(samples[i - 1], samples[i])];
- }
- return buckets;
-}
-
-static int widthOf(int x) {
- int width = 0;
- while (x > 0) {
- ++width;
- x /= 10;
- }
- return width;
-}
-
-// Given a series of audio processing wakeup timestamps,
-// buckets the time intervals into a histogram, searches for
-// outliers, analyzes the outlier series for unexpectedly
-// small or large values and stores these as peaks, and flushes
-// the timestamp series from memory.
-void PerformanceAnalysis::processAndFlushTimeStampSeries() {
- // 1) analyze the series to store all outliers and their exact timestamps:
- storeOutlierData(mTimeStampSeries);
-
- // 2) detect peaks in the outlier series
- detectPeaks();
-
- // 3) compute its histogram, append to mRecentHists and clear the time series
- mRecentHists.emplace_back(static_cast<timestamp>(mTimeStampSeries[0]),
- buildBuckets(mTimeStampSeries));
- // do not let mRecentHists exceed capacity
- // ALOGD("mRecentHists size: %d", static_cast<int>(mRecentHists.size()));
- if (mRecentHists.size() >= kRecentHistsCapacity) {
- // ALOGD("popped back mRecentHists");
- mRecentHists.pop_front();
- }
- mTimeStampSeries.clear();
-}
-
-// forces short-term histogram storage to avoid adding idle audio time interval
-// to buffer period data
-void PerformanceAnalysis::handleStateChange() {
- ALOGD("handleStateChange");
- processAndFlushTimeStampSeries();
- return;
-}
-
-// Takes a single buffer period timestamp entry information and stores it in a
-// temporary series of timestamps. Once the series is full, the data is analyzed,
-// stored, and emptied.
-void PerformanceAnalysis::logTsEntry(int64_t ts) {
- // TODO might want to filter excessively high outliers, which are usually caused
- // by the thread being inactive.
- // Store time series data for each reader in order to bucket it once there
- // is enough data. Then, write to recentHists as a histogram.
- mTimeStampSeries.push_back(ts);
- // if length of the time series has reached kShortHistSize samples,
- // analyze the data and flush the timestamp series from memory
- if (mTimeStampSeries.size() >= kShortHistSize) {
- processAndFlushTimeStampSeries();
- }
-}
-
-// When the short-term histogram array mRecentHists has reached capacity,
-// merge histograms for data compression and store them in mLongTermHists
-// clears mRecentHists
-// TODO: have logTsEntry write directly to mLongTermHists, discard mRecentHists,
-// start a new histogram when a peak occurs
-void PerformanceAnalysis::processAndFlushRecentHists() {
-
- // Buckets is used to aggregate short-term histograms.
- Histogram buckets;
- timestamp startingTs = mRecentHists[0].first;
-
- for (const auto &shortHist: mRecentHists) {
- // If the time between starting and ending timestamps has reached the maximum,
- // add the current histogram (buckets) to the long-term histogram buffer,
- // clear buckets, and start a new long-term histogram aggregation process.
- if (deltaMs(startingTs, shortHist.first) >= kMaxHistTimespanMs) {
- mLongTermHists.emplace_back(startingTs, std::move(buckets));
- buckets.clear();
- startingTs = shortHist.first;
- // When memory is full, delete oldest histogram
- // TODO use a circular buffer
- if (mLongTermHists.size() >= kLongTermHistsCapacity) {
- mLongTermHists.pop_front();
- }
- }
-
- // add current histogram to buckets
- for (const auto &countPair : shortHist.second) {
- buckets[countPair.first] += countPair.second;
- }
- }
- mRecentHists.clear();
- // TODO: decide when/where to call writeToFile
- // TODO: add a thread-specific extension to the file name
- static const char* const kName = (const char *) "/data/misc/audioserver/sample_results.txt";
- writeToFile(mOutlierData, mLongTermHists, kName, false);
-}
-
-// Given a series of outlier intervals (mOutlier data),
-// looks for changes in distribution (peaks), which can be either positive or negative.
-// The function sets the mean to the starting value and sigma to 0, and updates
-// them as long as no peak is detected. When a value is more than 'threshold'
-// standard deviations from the mean, a peak is detected and the mean and sigma
-// are set to the peak value and 0.
-void PerformanceAnalysis::detectPeaks() {
- if (mOutlierData.empty()) {
- return;
- }
-
- // compute mean of the distribution. Used to check whether a value is large
- const double kTypicalDiff = std::accumulate(
- mOutlierData.begin(), mOutlierData.end(), 0,
- [](auto &a, auto &b){return a + b.first;}) / mOutlierData.size();
- // ALOGD("typicalDiff %f", kTypicalDiff);
-
- // iterator at the beginning of a sequence, or updated to the most recent peak
- std::deque<std::pair<uint64_t, uint64_t>>::iterator start = mOutlierData.begin();
- // the mean and standard deviation are updated every time a peak is detected
- // initialize first time. The mean from the previous sequence is stored
- // for the next sequence. Here, they are initialized for the first time.
- if (mPeakDetectorMean < 0) {
- mPeakDetectorMean = static_cast<double>(start->first);
- mPeakDetectorSd = 0;
- }
- auto sqr = [](auto x){ return x * x; };
- for (auto it = mOutlierData.begin(); it != mOutlierData.end(); ++it) {
- // no surprise occurred:
- // the new element is a small number of standard deviations from the mean
- if ((fabs(it->first - mPeakDetectorMean) < kStddevThreshold * mPeakDetectorSd) ||
- // or: right after peak has been detected, the delta is smaller than average
- (mPeakDetectorSd == 0 && fabs(it->first - mPeakDetectorMean) < kTypicalDiff)) {
- // update the mean and sd:
- // count number of elements (distance between start interator and current)
- const int kN = std::distance(start, it) + 1;
- // usual formulas for mean and sd
- mPeakDetectorMean = std::accumulate(start, it + 1, 0.0,
- [](auto &a, auto &b){return a + b.first;}) / kN;
- mPeakDetectorSd = sqrt(std::accumulate(start, it + 1, 0.0,
- [=](auto &a, auto &b){ return a + sqr(b.first - mPeakDetectorMean);})) /
- ((kN > 1)? kN - 1 : kN); // kN - 1: mean is correlated with variance
- }
- // surprising value: store peak timestamp and reset mean, sd, and start iterator
- else {
- mPeakTimestamps.emplace_back(it->second);
- // TODO: remove pop_front once a circular buffer is in place
- if (mPeakTimestamps.size() >= kPeakSeriesSize) {
- mPeakTimestamps.pop_front();
- }
- mPeakDetectorMean = static_cast<double>(it->first);
- mPeakDetectorSd = 0;
- start = it;
- }
- }
- return;
-}
-
-// Called by LogTsEntry. The input is a vector of timestamps.
-// Finds outliers and writes to mOutlierdata.
-// Each value in mOutlierdata consists of: <outlier timestamp, time elapsed since previous outlier>.
-// e.g. timestamps (ms) 1, 4, 5, 16, 18, 28 will produce pairs (4, 5), (13, 18).
-// This function is applied to the time series before it is converted into a histogram.
-void PerformanceAnalysis::storeOutlierData(const std::vector<int64_t> ×tamps) {
- if (timestamps.size() < 1) {
- return;
- }
- // first pass: need to initialize
- if (mElapsed == 0) {
- mPrevNs = timestamps[0];
- }
- for (const auto &ts: timestamps) {
- const uint64_t diffMs = static_cast<uint64_t>(deltaMs(mPrevNs, ts));
- if (diffMs >= static_cast<uint64_t>(kOutlierMs)) {
- mOutlierData.emplace_back(mElapsed, static_cast<uint64_t>(mPrevNs));
- // Remove oldest value if the vector is full
- // TODO: remove pop_front once circular buffer is in place
- // FIXME: make sure kShortHistSize is large enough that that data will never be lost
- // before being written to file or to a FIFO
- if (mOutlierData.size() >= kOutlierSeriesSize) {
- mOutlierData.pop_front();
- }
- mElapsed = 0;
- }
- mElapsed += diffMs;
- mPrevNs = ts;
- }
-}
-
-
-// FIXME: delete this temporary test code, recycled for various new functions
-void PerformanceAnalysis::testFunction() {
- // produces values (4: 5000000), (13: 18000000)
- // ns timestamps of buffer periods
- const std::vector<int64_t>kTempTestData = {1000000, 4000000, 5000000,
- 16000000, 18000000, 28000000};
- PerformanceAnalysis::storeOutlierData(kTempTestData);
- for (const auto &outlier: mOutlierData) {
- ALOGE("PerformanceAnalysis test %lld: %lld",
- static_cast<long long>(outlier.first), static_cast<long long>(outlier.second));
- }
- detectPeaks();
-}
-
-// TODO Make it return a std::string instead of modifying body --> is this still relevant?
-// TODO consider changing all ints to uint32_t or uint64_t
-// TODO: move this to ReportPerformance, probably make it a friend function of PerformanceAnalysis
-void PerformanceAnalysis::reportPerformance(String8 *body, int maxHeight) {
- if (mRecentHists.size() < 1) {
- ALOGD("reportPerformance: mRecentHists is empty");
- return;
- }
- ALOGD("reportPerformance: hists size %d", static_cast<int>(mRecentHists.size()));
- // TODO: more elaborate data analysis
- std::map<int, int> buckets;
- for (const auto &shortHist: mRecentHists) {
- for (const auto &countPair : shortHist.second) {
- buckets[countPair.first] += countPair.second;
- }
- }
-
- // underscores and spaces length corresponds to maximum width of histogram
- static const int kLen = 40;
- std::string underscores(kLen, '_');
- std::string spaces(kLen, ' ');
-
- auto it = buckets.begin();
- int maxDelta = it->first;
- int maxCount = it->second;
- // Compute maximum values
- while (++it != buckets.end()) {
- if (it->first > maxDelta) {
- maxDelta = it->first;
- }
- if (it->second > maxCount) {
- maxCount = it->second;
- }
- }
- int height = log2(maxCount) + 1; // maxCount > 0, safe to call log2
- const int leftPadding = widthOf(1 << height);
- const int colWidth = std::max(std::max(widthOf(maxDelta) + 1, 3), leftPadding + 2);
- int scalingFactor = 1;
- // scale data if it exceeds maximum height
- if (height > maxHeight) {
- scalingFactor = (height + maxHeight) / maxHeight;
- height /= scalingFactor;
- }
- body->appendFormat("\n%*s", leftPadding + 11, "Occurrences");
- // write histogram label line with bucket values
- body->appendFormat("\n%s", " ");
- body->appendFormat("%*s", leftPadding, " ");
- for (auto const &x : buckets) {
- body->appendFormat("%*d", colWidth, x.second);
- }
- // write histogram ascii art
- body->appendFormat("\n%s", " ");
- for (int row = height * scalingFactor; row >= 0; row -= scalingFactor) {
- const int value = 1 << row;
- body->appendFormat("%.*s", leftPadding, spaces.c_str());
- for (auto const &x : buckets) {
- body->appendFormat("%.*s%s", colWidth - 1, spaces.c_str(), x.second < value ? " " : "|");
- }
- body->appendFormat("\n%s", " ");
- }
- // print x-axis
- const int columns = static_cast<int>(buckets.size());
- body->appendFormat("%*c", leftPadding, ' ');
- body->appendFormat("%.*s", (columns + 1) * colWidth, underscores.c_str());
- body->appendFormat("\n%s", " ");
-
- // write footer with bucket labels
- body->appendFormat("%*s", leftPadding, " ");
- for (auto const &x : buckets) {
- body->appendFormat("%*d", colWidth, x.first);
- }
- body->appendFormat("%.*s%s", colWidth, spaces.c_str(), "ms\n");
-
- // Now report glitches
- body->appendFormat("\ntime elapsed between glitches and glitch timestamps\n");
- for (const auto &outlier: mOutlierData) {
- body->appendFormat("%lld: %lld\n", static_cast<long long>(outlier.first),
- static_cast<long long>(outlier.second));
- }
-
-}
-
-
-// Produces a log warning if the timing of recent buffer periods caused a glitch
-// Computes sum of running window of three buffer periods
-// Checks whether the buffer periods leave enough CPU time for the next one
-// e.g. if a buffer period is expected to be 4 ms and a buffer requires 3 ms of CPU time,
-// here are some glitch cases:
-// 4 + 4 + 6 ; 5 + 4 + 5; 2 + 2 + 10
-// TODO: develop this code to track changes in histogram distribution in addition
-// to / instead of glitches.
-void PerformanceAnalysis::alertIfGlitch(const std::vector<int64_t> &samples) {
- std::deque<int> periods(kNumBuff, kPeriodMs);
- for (size_t i = 2; i < samples.size(); ++i) { // skip first time entry
- periods.push_front(deltaMs(samples[i - 1], samples[i]));
- periods.pop_back();
- // TODO: check that all glitch cases are covered
- if (std::accumulate(periods.begin(), periods.end(), 0) > kNumBuff * kPeriodMs +
- kPeriodMs - kPeriodMsCPU) {
- ALOGW("A glitch occurred");
- periods.assign(kNumBuff, kPeriodMs);
- }
- }
- return;
-}
-
-} // namespace ReportPerformance
-
-} // namespace android
diff --git a/media/libnbaio/PipeReader.cpp b/media/libnbaio/PipeReader.cpp
index 2486b76..35a43d8 100644
--- a/media/libnbaio/PipeReader.cpp
+++ b/media/libnbaio/PipeReader.cpp
@@ -26,7 +26,7 @@
PipeReader::PipeReader(Pipe& pipe) :
NBAIO_Source(pipe.mFormat),
- mPipe(pipe), mFifoReader(mPipe.mFifo, false /*throttlesWriter*/, true /*flush*/),
+ mPipe(pipe), mFifoReader(mPipe.mFifo, false /*throttlesWriter*/, false /*flush*/),
mFramesOverrun(0),
mOverruns(0)
{
diff --git a/media/libnbaio/ReportPerformance.cpp b/media/libnbaio/ReportPerformance.cpp
deleted file mode 100644
index dc50ada..0000000
--- a/media/libnbaio/ReportPerformance.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "ReportPerformance"
-
-#include <fstream>
-#include <iostream>
-#include <queue>
-#include <stdarg.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/prctl.h>
-#include <utility>
-#include <media/nbaio/NBLog.h>
-#include <media/nbaio/PerformanceAnalysis.h>
-#include <media/nbaio/ReportPerformance.h>
-// #include <utils/CallStack.h> // used to print callstack
-#include <utils/Log.h>
-#include <utils/String8.h>
-
-namespace android {
-
-namespace ReportPerformance {
-
-// Writes outlier intervals, timestamps, and histograms spanning long time intervals to a file.
-// TODO: format the data efficiently and write different types of data to different files
-void writeToFile(std::deque<std::pair<outlierInterval, timestamp>> &outlierData,
- std::deque<std::pair<timestamp, Histogram>> &hists,
- const char * kName,
- bool append) {
- ALOGD("writing performance data to file");
- if (outlierData.empty() || hists.empty()) {
- return;
- }
-
- std::ofstream ofs;
- ofs.open(kName, append ? std::ios::app : std::ios::trunc);
- if (!ofs.is_open()) {
- ALOGW("couldn't open file %s", kName);
- return;
- }
- ofs << "Outlier data: interval and timestamp\n";
- for (const auto &outlier : outlierData) {
- ofs << outlier.first << ": " << outlier.second << "\n";
- }
- ofs << "Histogram data\n";
- for (const auto &hist : hists) {
- ofs << "\ttimestamp\n";
- ofs << hist.first << "\n";
- ofs << "\tbuckets and counts\n";
- for (const auto &bucket : hist.second) {
- ofs << bucket.first << ": " << bucket.second << "\n";
- }
- }
- ofs.close();
-}
-
-} // namespace ReportPerformance
-
-} // namespace android
diff --git a/media/libnbaio/include/media/nbaio/NBLog.h b/media/libnbaio/include/media/nbaio/NBLog.h
deleted file mode 100644
index 3e48ee1..0000000
--- a/media/libnbaio/include/media/nbaio/NBLog.h
+++ /dev/null
@@ -1,595 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Non-blocking event logger intended for safe communication between processes via shared memory
-
-#ifndef ANDROID_MEDIA_NBLOG_H
-#define ANDROID_MEDIA_NBLOG_H
-
-#include <binder/IMemory.h>
-#include <audio_utils/fifo.h>
-#include <utils/Mutex.h>
-#include <utils/threads.h>
-
-#include <map>
-#include <deque>
-#include <set>
-#include <vector>
-
-namespace android {
-
-class String8;
-
-class NBLog {
-
-public:
-
-typedef uint64_t log_hash_t;
-
-// FIXME Everything needed for client (writer API and registration) should be isolated
-// from the rest of the implementation.
-class Writer;
-class Reader;
-
-enum Event : uint8_t {
- EVENT_RESERVED,
- EVENT_STRING, // ASCII string, not NUL-terminated
- // TODO: make timestamp optional
- EVENT_TIMESTAMP, // clock_gettime(CLOCK_MONOTONIC)
- EVENT_INTEGER, // integer value entry
- EVENT_FLOAT, // floating point value entry
- EVENT_PID, // process ID and process name
- EVENT_AUTHOR, // author index (present in merged logs) tracks entry's original log
- EVENT_START_FMT, // logFormat start event: entry includes format string, following
- // entries contain format arguments
- EVENT_HASH, // unique HASH of log origin, originates from hash of file name
- // and line number
- EVENT_HISTOGRAM_ENTRY_TS, // single datum for timestamp histogram
- EVENT_AUDIO_STATE, // audio on/off event: logged upon FastMixer::onStateChange() call
- EVENT_END_FMT, // end of logFormat argument list
-
- EVENT_UPPER_BOUND, // to check for invalid events
-};
-
-private:
-
-// ---------------------------------------------------------------------------
-// API for handling format entry operations
-
-// a formatted entry has the following structure:
-// * START_FMT entry, containing the format string
-// * TIMESTAMP entry
-// * HASH entry
-// * author entry of the thread that generated it (optional, present in merged log)
-// * format arg1
-// * format arg2
-// * ...
-// * END_FMT entry
-
-// entry representation in memory
-struct entry {
- const uint8_t type;
- const uint8_t length;
- const uint8_t data[0];
-};
-
-// entry tail representation (after data)
-struct ending {
- uint8_t length;
- uint8_t next[0];
-};
-
-// entry iterator
-class EntryIterator {
-public:
- EntryIterator();
- explicit EntryIterator(const uint8_t *entry);
- EntryIterator(const EntryIterator &other);
-
- // dereference underlying entry
- const entry& operator*() const;
- const entry* operator->() const;
- // advance to next entry
- EntryIterator& operator++(); // ++i
- // back to previous entry
- EntryIterator& operator--(); // --i
- EntryIterator next() const;
- EntryIterator prev() const;
- bool operator!=(const EntryIterator &other) const;
- int operator-(const EntryIterator &other) const;
-
- bool hasConsistentLength() const;
- void copyTo(std::unique_ptr<audio_utils_fifo_writer> &dst) const;
- void copyData(uint8_t *dst) const;
-
- template<typename T>
- inline const T& payload() {
- return *reinterpret_cast<const T *>(ptr + offsetof(entry, data));
- }
-
- inline operator const uint8_t*() const {
- return ptr;
- }
-
-private:
- const uint8_t *ptr;
-};
-
-class AbstractEntry {
-public:
-
- // Entry starting in the given pointer
- explicit AbstractEntry(const uint8_t *entry);
- virtual ~AbstractEntry() {}
-
- // build concrete entry of appropriate class from pointer
- static std::unique_ptr<AbstractEntry> buildEntry(const uint8_t *ptr);
-
- // get format entry timestamp
- // TODO consider changing to uint64_t
- virtual int64_t timestamp() const = 0;
-
- // get format entry's unique id
- virtual log_hash_t hash() const = 0;
-
- // entry's author index (-1 if none present)
- // a Merger has a vector of Readers, author simply points to the index of the
- // Reader that originated the entry
- // TODO consider changing to uint32_t
- virtual int author() const = 0;
-
- // copy entry, adding author before timestamp, returns iterator to end of entry
- virtual EntryIterator copyWithAuthor(std::unique_ptr<audio_utils_fifo_writer> &dst,
- int author) const = 0;
-
-protected:
- // copies ordinary entry from src to dst, and returns length of entry
- // size_t copyEntry(audio_utils_fifo_writer *dst, const iterator &it);
- const uint8_t *mEntry;
-};
-
-class FormatEntry : public AbstractEntry {
-public:
- // explicit FormatEntry(const EntryIterator &it);
- explicit FormatEntry(const uint8_t *ptr) : AbstractEntry(ptr) {}
- virtual ~FormatEntry() {}
-
- EntryIterator begin() const;
-
- // Entry's format string
- const char* formatString() const;
-
- // Enrty's format string length
- size_t formatStringLength() const;
-
- // Format arguments (excluding format string, timestamp and author)
- EntryIterator args() const;
-
- // get format entry timestamp
- virtual int64_t timestamp() const override;
-
- // get format entry's unique id
- virtual log_hash_t hash() const override;
-
- // entry's author index (-1 if none present)
- // a Merger has a vector of Readers, author simply points to the index of the
- // Reader that originated the entry
- virtual int author() const override;
-
- // copy entry, adding author before timestamp, returns size of original entry
- virtual EntryIterator copyWithAuthor(std::unique_ptr<audio_utils_fifo_writer> &dst,
- int author) const override;
-
-};
-
-class HistogramEntry : public AbstractEntry {
-public:
- explicit HistogramEntry(const uint8_t *ptr) : AbstractEntry(ptr) {
- }
- virtual ~HistogramEntry() {}
-
- virtual int64_t timestamp() const override;
-
- virtual log_hash_t hash() const override;
-
- virtual int author() const override;
-
- virtual EntryIterator copyWithAuthor(std::unique_ptr<audio_utils_fifo_writer> &dst,
- int author) const override;
-
-};
-
-// ---------------------------------------------------------------------------
-
-// representation of a single log entry in private memory
-struct Entry {
- Entry(Event event, const void *data, size_t length)
- : mEvent(event), mLength(length), mData(data) { }
- /*virtual*/ ~Entry() { }
-
- // used during writing to format Entry information as follows: [type][length][data ... ][length]
- int copyEntryDataAt(size_t offset) const;
-
-private:
- friend class Writer;
- Event mEvent; // event type
- uint8_t mLength; // length of additional data, 0 <= mLength <= kMaxLength
- const void *mData; // event type-specific data
- static const size_t kMaxLength = 255;
-public:
- // mEvent, mLength, mData[...], duplicate mLength
- static const size_t kOverhead = sizeof(entry) + sizeof(ending);
- // endind length of previous entry
- static const size_t kPreviousLengthOffset = - sizeof(ending) +
- offsetof(ending, length);
-};
-
-struct HistTsEntry {
- log_hash_t hash;
- int64_t ts;
-}; //TODO __attribute__((packed));
-
-struct HistTsEntryWithAuthor {
- log_hash_t hash;
- int64_t ts;
- int author;
-}; //TODO __attribute__((packed));
-
-using StateTsEntryWithAuthor = HistTsEntryWithAuthor;
-
-struct HistIntEntry {
- log_hash_t hash;
- int value;
-}; //TODO __attribute__((packed));
-
-// representation of a single log entry in shared memory
-// byte[0] mEvent
-// byte[1] mLength
-// byte[2] mData[0]
-// ...
-// byte[2+i] mData[i]
-// ...
-// byte[2+mLength-1] mData[mLength-1]
-// byte[2+mLength] duplicate copy of mLength to permit reverse scan
-// byte[3+mLength] start of next log entry
-
- static void appendInt(String8 *body, const void *data);
- static void appendFloat(String8 *body, const void *data);
- static void appendPID(String8 *body, const void *data, size_t length);
- static void appendTimestamp(String8 *body, const void *data);
- static size_t fmtEntryLength(const uint8_t *data);
- static String8 bufferDump(const uint8_t *buffer, size_t size);
- static String8 bufferDump(const EntryIterator &it);
-public:
-
-// Located in shared memory, must be POD.
-// Exactly one process must explicitly call the constructor or use placement new.
-// Since this is a POD, the destructor is empty and unnecessary to call it explicitly.
-struct Shared {
- Shared() /* mRear initialized via default constructor */ { }
- /*virtual*/ ~Shared() { }
-
- audio_utils_fifo_index mRear; // index one byte past the end of most recent Entry
- char mBuffer[0]; // circular buffer for entries
-};
-
-public:
-
-// ---------------------------------------------------------------------------
-
-// FIXME Timeline was intended to wrap Writer and Reader, but isn't actually used yet.
-// For now it is just a namespace for sharedSize().
-class Timeline : public RefBase {
-public:
-#if 0
- Timeline(size_t size, void *shared = NULL);
- virtual ~Timeline();
-#endif
-
- // Input parameter 'size' is the desired size of the timeline in byte units.
- // Returns the size rounded up to a power-of-2, plus the constant size overhead for indices.
- static size_t sharedSize(size_t size);
-
-#if 0
-private:
- friend class Writer;
- friend class Reader;
-
- const size_t mSize; // circular buffer size in bytes, must be a power of 2
- bool mOwn; // whether I own the memory at mShared
- Shared* const mShared; // pointer to shared memory
-#endif
-};
-
-// ---------------------------------------------------------------------------
-
-// Writer is thread-safe with respect to Reader, but not with respect to multiple threads
-// calling Writer methods. If you need multi-thread safety for writing, use LockedWriter.
-class Writer : public RefBase {
-public:
- Writer(); // dummy nop implementation without shared memory
-
- // Input parameter 'size' is the desired size of the timeline in byte units.
- // The size of the shared memory must be at least Timeline::sharedSize(size).
- Writer(void *shared, size_t size);
- Writer(const sp<IMemory>& iMemory, size_t size);
-
- virtual ~Writer();
-
- // FIXME needs comments, and some should be private
- virtual void log(const char *string);
- virtual void logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
- virtual void logvf(const char *fmt, va_list ap);
- virtual void logTimestamp();
- virtual void logTimestamp(const int64_t ts);
- virtual void logInteger(const int x);
- virtual void logFloat(const float x);
- virtual void logPID();
- virtual void logFormat(const char *fmt, log_hash_t hash, ...);
- virtual void logVFormat(const char *fmt, log_hash_t hash, va_list ap);
- virtual void logStart(const char *fmt);
- virtual void logEnd();
- virtual void logHash(log_hash_t hash);
- virtual void logEventHistTs(Event event, log_hash_t hash);
-
- virtual bool isEnabled() const;
-
- // return value for all of these is the previous isEnabled()
- virtual bool setEnabled(bool enabled); // but won't enable if no shared memory
- bool enable() { return setEnabled(true); }
- bool disable() { return setEnabled(false); }
-
- sp<IMemory> getIMemory() const { return mIMemory; }
-
-private:
- // 0 <= length <= kMaxLength
- // writes a single Entry to the FIFO
- void log(Event event, const void *data, size_t length);
- // checks validity of an event before calling log above this one
- void log(const Entry *entry, bool trusted = false);
-
- Shared* const mShared; // raw pointer to shared memory
- sp<IMemory> mIMemory; // ref-counted version, initialized in constructor and then const
- audio_utils_fifo * const mFifo; // FIFO itself,
- // non-NULL unless constructor fails
- audio_utils_fifo_writer * const mFifoWriter; // used to write to FIFO,
- // non-NULL unless dummy constructor used
- bool mEnabled; // whether to actually log
-
- // cached pid and process name to use in %p format specifier
- // total tag length is mPidTagSize and process name is not zero terminated
- char *mPidTag;
- size_t mPidTagSize;
-};
-
-// ---------------------------------------------------------------------------
-
-// Similar to Writer, but safe for multiple threads to call concurrently
-class LockedWriter : public Writer {
-public:
- LockedWriter();
- LockedWriter(void *shared, size_t size);
-
- virtual void log(const char *string);
- virtual void logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
- virtual void logvf(const char *fmt, va_list ap);
- virtual void logTimestamp();
- virtual void logTimestamp(const int64_t ts);
- virtual void logInteger(const int x);
- virtual void logFloat(const float x);
- virtual void logPID();
- virtual void logStart(const char *fmt);
- virtual void logEnd();
- virtual void logHash(log_hash_t hash);
-
- virtual bool isEnabled() const;
- virtual bool setEnabled(bool enabled);
-
-private:
- mutable Mutex mLock;
-};
-
-// ---------------------------------------------------------------------------
-
-class Reader : public RefBase {
-public:
-
- // A snapshot of a readers buffer
- // This is raw data. No analysis has been done on it
- class Snapshot {
- public:
- Snapshot() : mData(NULL), mLost(0) {}
-
- Snapshot(size_t bufferSize) : mData(new uint8_t[bufferSize]) {}
-
- ~Snapshot() { delete[] mData; }
-
- // copy of the buffer
- uint8_t *data() const { return mData; }
-
- // amount of data lost (given by audio_utils_fifo_reader)
- size_t lost() const { return mLost; }
-
- // iterator to beginning of readable segment of snapshot
- // data between begin and end has valid entries
- EntryIterator begin() { return mBegin; }
-
- // iterator to end of readable segment of snapshot
- EntryIterator end() { return mEnd; }
-
- private:
- friend class Reader;
- uint8_t *mData;
- size_t mLost;
- EntryIterator mBegin;
- EntryIterator mEnd;
- };
-
- // Input parameter 'size' is the desired size of the timeline in byte units.
- // The size of the shared memory must be at least Timeline::sharedSize(size).
- Reader(const void *shared, size_t size);
- Reader(const sp<IMemory>& iMemory, size_t size);
-
- virtual ~Reader();
-
- // get snapshot of readers fifo buffer, effectively consuming the buffer
- std::unique_ptr<Snapshot> getSnapshot();
- // dump a particular snapshot of the reader
- // TODO: move dump to PerformanceAnalysis. Model/view/controller design
- void dump(int fd, size_t indent, Snapshot & snap);
- // dump the current content of the reader's buffer (call getSnapshot() and previous dump())
- void dump(int fd, size_t indent = 0);
- bool isIMemory(const sp<IMemory>& iMemory) const;
-
-private:
-
- static const std::set<Event> startingTypes;
- static const std::set<Event> endingTypes;
- /*const*/ Shared* const mShared; // raw pointer to shared memory, actually const but not
- // declared as const because audio_utils_fifo() constructor
- sp<IMemory> mIMemory; // ref-counted version, assigned only in constructor
- int mFd; // file descriptor
- int mIndent; // indentation level
- audio_utils_fifo * const mFifo; // FIFO itself,
- // non-NULL unless constructor fails
- audio_utils_fifo_reader * const mFifoReader; // used to read from FIFO,
- // non-NULL unless constructor fails
-
- // TODO: it might be clearer, instead of a direct map from source location to vector of
- // timestamps, if we instead first mapped from source location to an object that
- // represented that location. And one_of its fields would be a vector of timestamps.
- // That would allow us to record other information about the source location beyond timestamps.
- void dumpLine(const String8& timestamp, String8& body);
-
- EntryIterator handleFormat(const FormatEntry &fmtEntry,
- String8 *timestamp,
- String8 *body);
- // dummy method for handling absent author entry
- virtual void handleAuthor(const AbstractEntry& /*fmtEntry*/, String8* /*body*/) {}
-
- // Searches for the last entry of type <type> in the range [front, back)
- // back has to be entry-aligned. Returns nullptr if none enconuntered.
- static const uint8_t *findLastEntryOfTypes(const uint8_t *front, const uint8_t *back,
- const std::set<Event> &types);
-
- static const size_t kSquashTimestamp = 5; // squash this many or more adjacent timestamps
-};
-
-// Wrapper for a reader with a name. Contains a pointer to the reader and a pointer to the name
-class NamedReader {
-public:
- NamedReader() { mName[0] = '\0'; } // for Vector
- NamedReader(const sp<NBLog::Reader>& reader, const char *name) :
- mReader(reader)
- { strlcpy(mName, name, sizeof(mName)); }
- ~NamedReader() { }
- const sp<NBLog::Reader>& reader() const { return mReader; }
- const char* name() const { return mName; }
-
-private:
- sp<NBLog::Reader> mReader;
- static const size_t kMaxName = 32;
- char mName[kMaxName];
-};
-
-// ---------------------------------------------------------------------------
-
-class Merger : public RefBase {
-public:
- Merger(const void *shared, size_t size);
-
- virtual ~Merger() {}
-
- void addReader(const NamedReader &reader);
- // TODO add removeReader
- void merge();
- // FIXME This is returning a reference to a shared variable that needs a lock
- const std::vector<NamedReader>& getNamedReaders() const;
-private:
- // vector of the readers the merger is supposed to merge from.
- // every reader reads from a writer's buffer
- // FIXME Needs to be protected by a lock
- std::vector<NamedReader> mNamedReaders;
-
- // TODO Need comments on all of these
- Shared * const mShared;
- std::unique_ptr<audio_utils_fifo> mFifo;
- std::unique_ptr<audio_utils_fifo_writer> mFifoWriter;
-};
-
-class MergeReader : public Reader {
-public:
- MergeReader(const void *shared, size_t size, Merger &merger);
-private:
- // FIXME Needs to be protected by a lock,
- // because even though our use of it is read-only there may be asynchronous updates
- const std::vector<NamedReader>& mNamedReaders;
- // handle author entry by looking up the author's name and appending it to the body
- // returns number of bytes read from fmtEntry
- void handleAuthor(const AbstractEntry &fmtEntry, String8 *body);
-};
-
-// MergeThread is a thread that contains a Merger. It works as a retriggerable one-shot:
-// when triggered, it awakes for a lapse of time, during which it periodically merges; if
-// retriggered, the timeout is reset.
-// The thread is triggered on AudioFlinger binder activity.
-class MergeThread : public Thread {
-public:
- MergeThread(Merger &merger);
- virtual ~MergeThread() override;
-
- // Reset timeout and activate thread to merge periodically if it's idle
- void wakeup();
-
- // Set timeout period until the merging thread goes idle again
- void setTimeoutUs(int time);
-
-private:
- virtual bool threadLoop() override;
-
- // the merger who actually does the work of merging the logs
- Merger& mMerger;
-
- // mutex for the condition variable
- Mutex mMutex;
-
- // condition variable to activate merging on timeout >= 0
- Condition mCond;
-
- // time left until the thread blocks again (in microseconds)
- int mTimeoutUs;
-
- // merging period when the thread is awake
- static const int kThreadSleepPeriodUs = 1000000 /*1s*/;
-
- // initial timeout value when triggered
- static const int kThreadWakeupPeriodUs = 3000000 /*3s*/;
-};
-
-}; // class NBLog
-
-// TODO put somewhere else
-static inline int64_t get_monotonic_ns() {
- timespec ts;
- if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
- return (uint64_t) ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec;
- }
- return 0; // should not happen.
-}
-
-} // namespace android
-
-#endif // ANDROID_MEDIA_NBLOG_H
diff --git a/media/libnbaio/include/media/nbaio/PerformanceAnalysis.h b/media/libnbaio/include/media/nbaio/PerformanceAnalysis.h
deleted file mode 100644
index b0dc148..0000000
--- a/media/libnbaio/include/media/nbaio/PerformanceAnalysis.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Non-blocking event logger intended for safe communication between processes via shared memory
-
-#ifndef ANDROID_MEDIA_PERFORMANCEANALYSIS_H
-#define ANDROID_MEDIA_PERFORMANCEANALYSIS_H
-
-#include <map>
-#include <deque>
-#include <vector>
-#include "NBLog.h"
-#include "ReportPerformance.h"
-
-namespace android {
-
-namespace ReportPerformance {
-
-class PerformanceAnalysis {
- // This class stores and analyzes audio processing wakeup timestamps from NBLog
- // FIXME: currently, all performance data is stored in deques. Need to add a mutex.
- // FIXME: continue this way until analysis is done in a separate thread. Then, use
- // the fifo writer utilities.
-public:
-
- PerformanceAnalysis();
-
- // Given a series of audio processing wakeup timestamps,
- // compresses and and analyzes the data, and flushes
- // the timestamp series from memory.
- void processAndFlushTimeStampSeries();
-
- // Called when an audio on/off event is read from the buffer,
- // e.g. EVENT_AUDIO_STATE.
- // calls flushTimeStampSeries on the data up to the event,
- // effectively discarding the idle audio time interval
- void handleStateChange();
-
- // When the short-term histogram array mRecentHists has reached capacity,
- // merges histograms for data compression and stores them in mLongTermHists
- void processAndFlushRecentHists();
-
- // Writes wakeup timestamp entry to log and runs analysis
- // TODO: make this thread safe. Each thread should have its own instance
- // of PerformanceAnalysis.
- void logTsEntry(timestamp_raw ts);
-
- // FIXME: make peakdetector and storeOutlierData a single function
- // Input: mOutlierData. Looks at time elapsed between outliers
- // finds significant changes in the distribution
- // writes timestamps of significant changes to mPeakTimestamps
- void detectPeaks();
-
- // runs analysis on timestamp series before it is converted to a histogram
- // finds outliers
- // writes to mOutlierData <time elapsed since previous outlier, outlier timestamp>
- void storeOutlierData(const std::vector<timestamp_raw> ×tamps);
-
- // input: series of short histograms. Generates a string of analysis of the buffer periods
- // TODO: WIP write more detailed analysis
- // FIXME: move this data visualization to a separate class. Model/view/controller
- void reportPerformance(String8 *body, int maxHeight = 10);
-
- // TODO: delete this. temp for testing
- void testFunction();
-
- // This function used to detect glitches in a time series
- // TODO incorporate this into the analysis (currently unused)
- void alertIfGlitch(const std::vector<timestamp_raw> &samples);
-
-private:
-
- // stores outlier analysis: <elapsed time between outliers in ms, outlier timestamp>
- std::deque<std::pair<outlierInterval, timestamp>> mOutlierData;
-
- // stores each timestamp at which a peak was detected
- // a peak is a moment at which the average outlier interval changed significantly
- std::deque<timestamp> mPeakTimestamps;
-
- // TODO: turn these into circular buffers for better data flow
- // FIFO of small histograms
- // stores fixed-size short buffer period histograms with timestamp of first sample
- std::deque<std::pair<timestamp, Histogram>> mRecentHists;
-
- // FIFO of small histograms
- // stores fixed-size long-term buffer period histograms with timestamp of first sample
- std::deque<std::pair<timestamp, Histogram>> mLongTermHists;
-
- // vector of timestamps, collected from NBLog for a (TODO) specific thread
- // when a vector reaches its maximum size, the data is processed and flushed
- std::vector<timestamp_raw> mTimeStampSeries;
-
- static const int kMsPerSec = 1000;
-
- // Parameters used when detecting outliers
- // TODO: learn some of these from the data, delete unused ones
- // FIXME: decide whether to make kPeriodMs static.
- static const int kNumBuff = 3; // number of buffers considered in local history
- int kPeriodMs; // current period length is ideally 4 ms
- static const int kOutlierMs = 7; // values greater or equal to this cause glitches
- // DAC processing time for 4 ms buffer
- static constexpr double kRatio = 0.75; // estimate of CPU time as ratio of period length
- int kPeriodMsCPU; // compute based on kPeriodLen and kRatio
-
- // Peak detection: number of standard deviations from mean considered a significant change
- static const int kStddevThreshold = 5;
-
- // capacity allocated to data structures
- // TODO: adjust all of these values
- static const int kRecentHistsCapacity = 100; // number of short-term histograms stored in memory
- static const int kShortHistSize = 50; // number of samples in a short-term histogram
- static const int kOutlierSeriesSize = 100; // number of values stored in outlier array
- static const int kPeakSeriesSize = 100; // number of values stored in peak array
- static const int kLongTermHistsCapacity = 20; // number of long-term histogram stored in memory
- // maximum elapsed time between first and last timestamp of a long-term histogram
- static const int kMaxHistTimespanMs = 5 * kMsPerSec;
-
- // these variables are stored in-class to ensure continuity while analyzing the timestamp
- // series one short sequence at a time: the variables are not re-initialized every time.
- // FIXME: create inner class for these variables and decide which other ones to add to it
- double mPeakDetectorMean = -1;
- double mPeakDetectorSd = -1;
- // variables for storeOutlierData
- uint64_t mElapsed = 0;
- int64_t mPrevNs = -1;
-
-};
-
-} // namespace ReportPerformance
-
-} // namespace android
-
-#endif // ANDROID_MEDIA_PERFORMANCEANALYSIS_H
diff --git a/media/libnbaio/include/media/nbaio/ReportPerformance.h b/media/libnbaio/include/media/nbaio/ReportPerformance.h
deleted file mode 100644
index 27d2810..0000000
--- a/media/libnbaio/include/media/nbaio/ReportPerformance.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_MEDIA_REPORTPERFORMANCE_H
-#define ANDROID_MEDIA_REPORTPERFORMANCE_H
-
-#include <deque>
-#include <map>
-#include <vector>
-
-namespace android {
-
-// This class is used by reportPerformance function
-// TODO move reportPerformance function to ReportPerformance.cpp
-class String8;
-
-namespace ReportPerformance {
-
-// stores a histogram: key: observed buffer period. value: count
-// TODO: unsigned, unsigned
-using Histogram = std::map<int, int>;
-
-using outlierInterval = uint64_t;
-// int64_t timestamps are converted to uint64_t in PerformanceAnalysis::storeOutlierData,
-// and all analysis functions use uint64_t.
-using timestamp = uint64_t;
-using timestamp_raw = int64_t;
-
-// FIXME: decide whether to use 64 or 32 bits
-// TODO: the code has a mix of typedef and using. Standardize to one or the other.
-typedef uint64_t log_hash_t;
-
-static inline int deltaMs(int64_t ns1, int64_t ns2) {
- return (ns2 - ns1) / (1000 * 1000);
-}
-
-static inline uint32_t log2(uint32_t x) {
- // This works for x > 0
- return 31 - __builtin_clz(x);
-}
-
-// Writes outlier intervals, timestamps, and histograms spanning long time
-// intervals to a file.
-void writeToFile(std::deque<std::pair<outlierInterval, timestamp>> &outlierData,
- std::deque<std::pair<timestamp, Histogram>> &hists,
- const char * kName,
- bool append);
-
-} // namespace ReportPerformance
-
-} // namespace android
-
-#endif // ANDROID_MEDIA_REPORTPERFORMANCE_H
diff --git a/media/libnblog/Android.bp b/media/libnblog/Android.bp
new file mode 100644
index 0000000..74aaf77
--- /dev/null
+++ b/media/libnblog/Android.bp
@@ -0,0 +1,28 @@
+cc_library_shared {
+
+ name: "libnblog",
+
+ srcs: [
+ "NBLog.cpp",
+ "PerformanceAnalysis.cpp",
+ "ReportPerformance.cpp",
+ ],
+
+ shared_libs: [
+ "libaudioutils",
+ "libbinder",
+ "libcutils",
+ "liblog",
+ "libutils",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+ include_dirs: ["system/media/audio_utils/include"],
+
+ export_include_dirs: ["include"],
+
+}
diff --git a/media/libnblog/NBLog.cpp b/media/libnblog/NBLog.cpp
new file mode 100644
index 0000000..d6fa3e3
--- /dev/null
+++ b/media/libnblog/NBLog.cpp
@@ -0,0 +1,1157 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+#define LOG_TAG "NBLog"
+
+#include <algorithm>
+#include <climits>
+#include <deque>
+#include <fstream>
+#include <iostream>
+#include <math.h>
+#include <numeric>
+#include <vector>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <time.h>
+#include <new>
+#include <audio_utils/roundup.h>
+#include <media/nblog/NBLog.h>
+#include <media/nblog/PerformanceAnalysis.h>
+#include <media/nblog/ReportPerformance.h>
+#include <utils/CallStack.h>
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+#include <queue>
+#include <utility>
+
+namespace android {
+
+int NBLog::Entry::copyEntryDataAt(size_t offset) const
+{
+ // FIXME This is too slow
+ if (offset == 0)
+ return mEvent;
+ else if (offset == 1)
+ return mLength;
+ else if (offset < (size_t) (mLength + 2))
+ return ((char *) mData)[offset - 2];
+ else if (offset == (size_t) (mLength + 2))
+ return mLength;
+ else
+ return 0;
+}
+
+// ---------------------------------------------------------------------------
+
+/*static*/
+std::unique_ptr<NBLog::AbstractEntry> NBLog::AbstractEntry::buildEntry(const uint8_t *ptr) {
+ const uint8_t type = EntryIterator(ptr)->type;
+ switch (type) {
+ case EVENT_START_FMT:
+ return std::make_unique<FormatEntry>(FormatEntry(ptr));
+ case EVENT_AUDIO_STATE:
+ case EVENT_HISTOGRAM_ENTRY_TS:
+ return std::make_unique<HistogramEntry>(HistogramEntry(ptr));
+ default:
+ ALOGW("Tried to create AbstractEntry of type %d", type);
+ return nullptr;
+ }
+}
+
+NBLog::AbstractEntry::AbstractEntry(const uint8_t *entry) : mEntry(entry) {
+}
+
+// ---------------------------------------------------------------------------
+
+NBLog::EntryIterator NBLog::FormatEntry::begin() const {
+ return EntryIterator(mEntry);
+}
+
+const char *NBLog::FormatEntry::formatString() const {
+ return (const char*) mEntry + offsetof(entry, data);
+}
+
+size_t NBLog::FormatEntry::formatStringLength() const {
+ return mEntry[offsetof(entry, length)];
+}
+
+NBLog::EntryIterator NBLog::FormatEntry::args() const {
+ auto it = begin();
+ // skip start fmt
+ ++it;
+ // skip timestamp
+ ++it;
+ // skip hash
+ ++it;
+ // Skip author if present
+ if (it->type == EVENT_AUTHOR) {
+ ++it;
+ }
+ return it;
+}
+
+int64_t NBLog::FormatEntry::timestamp() const {
+ auto it = begin();
+ // skip start fmt
+ ++it;
+ return it.payload<int64_t>();
+}
+
+NBLog::log_hash_t NBLog::FormatEntry::hash() const {
+ auto it = begin();
+ // skip start fmt
+ ++it;
+ // skip timestamp
+ ++it;
+ // unaligned 64-bit read not supported
+ log_hash_t hash;
+ memcpy(&hash, it->data, sizeof(hash));
+ return hash;
+}
+
+int NBLog::FormatEntry::author() const {
+ auto it = begin();
+ // skip start fmt
+ ++it;
+ // skip timestamp
+ ++it;
+ // skip hash
+ ++it;
+ // if there is an author entry, return it, return -1 otherwise
+ if (it->type == EVENT_AUTHOR) {
+ return it.payload<int>();
+ }
+ return -1;
+}
+
+NBLog::EntryIterator NBLog::FormatEntry::copyWithAuthor(
+ std::unique_ptr<audio_utils_fifo_writer> &dst, int author) const {
+ auto it = begin();
+ // copy fmt start entry
+ it.copyTo(dst);
+ // copy timestamp
+ (++it).copyTo(dst); // copy hash
+ (++it).copyTo(dst);
+ // insert author entry
+ size_t authorEntrySize = NBLog::Entry::kOverhead + sizeof(author);
+ uint8_t authorEntry[authorEntrySize];
+ authorEntry[offsetof(entry, type)] = EVENT_AUTHOR;
+ authorEntry[offsetof(entry, length)] =
+ authorEntry[authorEntrySize + NBLog::Entry::kPreviousLengthOffset] =
+ sizeof(author);
+ *(int*) (&authorEntry[offsetof(entry, data)]) = author;
+ dst->write(authorEntry, authorEntrySize);
+ // copy rest of entries
+ while ((++it)->type != EVENT_END_FMT) {
+ it.copyTo(dst);
+ }
+ it.copyTo(dst);
+ ++it;
+ return it;
+}
+
+void NBLog::EntryIterator::copyTo(std::unique_ptr<audio_utils_fifo_writer> &dst) const {
+ size_t length = ptr[offsetof(entry, length)] + NBLog::Entry::kOverhead;
+ dst->write(ptr, length);
+}
+
+void NBLog::EntryIterator::copyData(uint8_t *dst) const {
+ memcpy((void*) dst, ptr + offsetof(entry, data), ptr[offsetof(entry, length)]);
+}
+
+NBLog::EntryIterator::EntryIterator()
+ : ptr(nullptr) {}
+
+NBLog::EntryIterator::EntryIterator(const uint8_t *entry)
+ : ptr(entry) {}
+
+NBLog::EntryIterator::EntryIterator(const NBLog::EntryIterator &other)
+ : ptr(other.ptr) {}
+
+const NBLog::entry& NBLog::EntryIterator::operator*() const {
+ return *(entry*) ptr;
+}
+
+const NBLog::entry* NBLog::EntryIterator::operator->() const {
+ return (entry*) ptr;
+}
+
+NBLog::EntryIterator& NBLog::EntryIterator::operator++() {
+ ptr += ptr[offsetof(entry, length)] + NBLog::Entry::kOverhead;
+ return *this;
+}
+
+NBLog::EntryIterator& NBLog::EntryIterator::operator--() {
+ ptr -= ptr[NBLog::Entry::kPreviousLengthOffset] + NBLog::Entry::kOverhead;
+ return *this;
+}
+
+NBLog::EntryIterator NBLog::EntryIterator::next() const {
+ EntryIterator aux(*this);
+ return ++aux;
+}
+
+NBLog::EntryIterator NBLog::EntryIterator::prev() const {
+ EntryIterator aux(*this);
+ return --aux;
+}
+
+int NBLog::EntryIterator::operator-(const NBLog::EntryIterator &other) const {
+ return ptr - other.ptr;
+}
+
+bool NBLog::EntryIterator::operator!=(const EntryIterator &other) const {
+ return ptr != other.ptr;
+}
+
+bool NBLog::EntryIterator::hasConsistentLength() const {
+ return ptr[offsetof(entry, length)] == ptr[ptr[offsetof(entry, length)] +
+ NBLog::Entry::kOverhead + NBLog::Entry::kPreviousLengthOffset];
+}
+
+// ---------------------------------------------------------------------------
+
+int64_t NBLog::HistogramEntry::timestamp() const {
+ return EntryIterator(mEntry).payload<HistTsEntry>().ts;
+}
+
+NBLog::log_hash_t NBLog::HistogramEntry::hash() const {
+ return EntryIterator(mEntry).payload<HistTsEntry>().hash;
+}
+
+int NBLog::HistogramEntry::author() const {
+ EntryIterator it(mEntry);
+ if (it->length == sizeof(HistTsEntryWithAuthor)) {
+ return it.payload<HistTsEntryWithAuthor>().author;
+ } else {
+ return -1;
+ }
+}
+
+NBLog::EntryIterator NBLog::HistogramEntry::copyWithAuthor(
+ std::unique_ptr<audio_utils_fifo_writer> &dst, int author) const {
+ // Current histogram entry has {type, length, struct HistTsEntry, length}.
+ // We now want {type, length, struct HistTsEntryWithAuthor, length}
+ uint8_t buffer[Entry::kOverhead + sizeof(HistTsEntryWithAuthor)];
+ // Copy content until the point we want to add the author
+ memcpy(buffer, mEntry, sizeof(entry) + sizeof(HistTsEntry));
+ // Copy the author
+ *(int*) (buffer + sizeof(entry) + sizeof(HistTsEntry)) = author;
+ // Update lengths
+ buffer[offsetof(entry, length)] = sizeof(HistTsEntryWithAuthor);
+ buffer[offsetof(entry, data) + sizeof(HistTsEntryWithAuthor) + offsetof(ending, length)]
+ = sizeof(HistTsEntryWithAuthor);
+ // Write new buffer into FIFO
+ dst->write(buffer, sizeof(buffer));
+ return EntryIterator(mEntry).next();
+}
+
+// ---------------------------------------------------------------------------
+
+#if 0 // FIXME see note in NBLog.h
+NBLog::Timeline::Timeline(size_t size, void *shared)
+ : mSize(roundup(size)), mOwn(shared == NULL),
+ mShared((Shared *) (mOwn ? new char[sharedSize(size)] : shared))
+{
+ new (mShared) Shared;
+}
+
+NBLog::Timeline::~Timeline()
+{
+ mShared->~Shared();
+ if (mOwn) {
+ delete[] (char *) mShared;
+ }
+}
+#endif
+
+/*static*/
+size_t NBLog::Timeline::sharedSize(size_t size)
+{
+ // TODO fifo now supports non-power-of-2 buffer sizes, so could remove the roundup
+ return sizeof(Shared) + roundup(size);
+}
+
+// ---------------------------------------------------------------------------
+
+NBLog::Writer::Writer()
+ : mShared(NULL), mFifo(NULL), mFifoWriter(NULL), mEnabled(false), mPidTag(NULL), mPidTagSize(0)
+{
+}
+
+NBLog::Writer::Writer(void *shared, size_t size)
+ : mShared((Shared *) shared),
+ mFifo(mShared != NULL ?
+ new audio_utils_fifo(size, sizeof(uint8_t),
+ mShared->mBuffer, mShared->mRear, NULL /*throttlesFront*/) : NULL),
+ mFifoWriter(mFifo != NULL ? new audio_utils_fifo_writer(*mFifo) : NULL),
+ mEnabled(mFifoWriter != NULL)
+{
+ // caching pid and process name
+ pid_t id = ::getpid();
+ char procName[16];
+ int status = prctl(PR_GET_NAME, procName);
+ if (status) { // error getting process name
+ procName[0] = '\0';
+ }
+ size_t length = strlen(procName);
+ mPidTagSize = length + sizeof(pid_t);
+ mPidTag = new char[mPidTagSize];
+ memcpy(mPidTag, &id, sizeof(pid_t));
+ memcpy(mPidTag + sizeof(pid_t), procName, length);
+}
+
+NBLog::Writer::Writer(const sp<IMemory>& iMemory, size_t size)
+ : Writer(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL, size)
+{
+ mIMemory = iMemory;
+}
+
+NBLog::Writer::~Writer()
+{
+ delete mFifoWriter;
+ delete mFifo;
+ delete[] mPidTag;
+}
+
+void NBLog::Writer::log(const char *string)
+{
+ if (!mEnabled) {
+ return;
+ }
+ LOG_ALWAYS_FATAL_IF(string == NULL, "Attempted to log NULL string");
+ size_t length = strlen(string);
+ if (length > Entry::kMaxLength) {
+ length = Entry::kMaxLength;
+ }
+ log(EVENT_STRING, string, length);
+}
+
+void NBLog::Writer::logf(const char *fmt, ...)
+{
+ if (!mEnabled) {
+ return;
+ }
+ va_list ap;
+ va_start(ap, fmt);
+ Writer::logvf(fmt, ap); // the Writer:: is needed to avoid virtual dispatch for LockedWriter
+ va_end(ap);
+}
+
+void NBLog::Writer::logvf(const char *fmt, va_list ap)
+{
+ if (!mEnabled) {
+ return;
+ }
+ char buffer[Entry::kMaxLength + 1 /*NUL*/];
+ int length = vsnprintf(buffer, sizeof(buffer), fmt, ap);
+ if (length >= (int) sizeof(buffer)) {
+ length = sizeof(buffer) - 1;
+ // NUL termination is not required
+ // buffer[length] = '\0';
+ }
+ if (length >= 0) {
+ log(EVENT_STRING, buffer, length);
+ }
+}
+
+void NBLog::Writer::logTimestamp()
+{
+ if (!mEnabled) {
+ return;
+ }
+ int64_t ts = get_monotonic_ns();
+ if (ts > 0) {
+ log(EVENT_TIMESTAMP, &ts, sizeof(ts));
+ } else {
+ ALOGE("Failed to get timestamp");
+ }
+}
+
+void NBLog::Writer::logTimestamp(const int64_t ts)
+{
+ if (!mEnabled) {
+ return;
+ }
+ log(EVENT_TIMESTAMP, &ts, sizeof(ts));
+}
+
+void NBLog::Writer::logInteger(const int x)
+{
+ if (!mEnabled) {
+ return;
+ }
+ log(EVENT_INTEGER, &x, sizeof(x));
+}
+
+void NBLog::Writer::logFloat(const float x)
+{
+ if (!mEnabled) {
+ return;
+ }
+ log(EVENT_FLOAT, &x, sizeof(x));
+}
+
+void NBLog::Writer::logPID()
+{
+ if (!mEnabled) {
+ return;
+ }
+ log(EVENT_PID, mPidTag, mPidTagSize);
+}
+
+void NBLog::Writer::logStart(const char *fmt)
+{
+ if (!mEnabled) {
+ return;
+ }
+ size_t length = strlen(fmt);
+ if (length > Entry::kMaxLength) {
+ length = Entry::kMaxLength;
+ }
+ log(EVENT_START_FMT, fmt, length);
+}
+
+void NBLog::Writer::logEnd()
+{
+ if (!mEnabled) {
+ return;
+ }
+ Entry entry = Entry(EVENT_END_FMT, NULL, 0);
+ log(&entry, true);
+}
+
+void NBLog::Writer::logHash(log_hash_t hash)
+{
+ if (!mEnabled) {
+ return;
+ }
+ log(EVENT_HASH, &hash, sizeof(hash));
+}
+
+void NBLog::Writer::logEventHistTs(Event event, log_hash_t hash)
+{
+ if (!mEnabled) {
+ return;
+ }
+ HistTsEntry data;
+ data.hash = hash;
+ data.ts = get_monotonic_ns();
+ if (data.ts > 0) {
+ log(event, &data, sizeof(data));
+ } else {
+ ALOGE("Failed to get timestamp");
+ }
+}
+
+void NBLog::Writer::logFormat(const char *fmt, log_hash_t hash, ...)
+{
+ if (!mEnabled) {
+ return;
+ }
+
+ va_list ap;
+ va_start(ap, hash);
+ Writer::logVFormat(fmt, hash, ap);
+ va_end(ap);
+}
+
+void NBLog::Writer::logVFormat(const char *fmt, log_hash_t hash, va_list argp)
+{
+ if (!mEnabled) {
+ return;
+ }
+ Writer::logStart(fmt);
+ int i;
+ double f;
+ char* s;
+ int64_t t;
+ Writer::logTimestamp();
+ Writer::logHash(hash);
+ for (const char *p = fmt; *p != '\0'; p++) {
+ // TODO: implement more complex formatting such as %.3f
+ if (*p != '%') {
+ continue;
+ }
+ switch(*++p) {
+ case 's': // string
+ s = va_arg(argp, char *);
+ Writer::log(s);
+ break;
+
+ case 't': // timestamp
+ t = va_arg(argp, int64_t);
+ Writer::logTimestamp(t);
+ break;
+
+ case 'd': // integer
+ i = va_arg(argp, int);
+ Writer::logInteger(i);
+ break;
+
+ case 'f': // float
+ f = va_arg(argp, double); // float arguments are promoted to double in vararg lists
+ Writer::logFloat((float)f);
+ break;
+
+ case 'p': // pid
+ Writer::logPID();
+ break;
+
+ // the "%\0" case finishes parsing
+ case '\0':
+ --p;
+ break;
+
+ case '%':
+ break;
+
+ default:
+ ALOGW("NBLog Writer parsed invalid format specifier: %c", *p);
+ break;
+ }
+ }
+ Writer::logEnd();
+}
+
+void NBLog::Writer::log(Event event, const void *data, size_t length)
+{
+ if (!mEnabled) {
+ return;
+ }
+ if (data == NULL || length > Entry::kMaxLength) {
+ // TODO Perhaps it makes sense to display truncated data or at least a
+ // message that the data is too long? The current behavior can create
+ // a confusion for a programmer debugging their code.
+ return;
+ }
+ // Ignore if invalid event
+ if (event == EVENT_RESERVED || event >= EVENT_UPPER_BOUND) {
+ return;
+ }
+ Entry etr(event, data, length);
+ log(&etr, true /*trusted*/);
+}
+
+void NBLog::Writer::log(const NBLog::Entry *etr, bool trusted)
+{
+ if (!mEnabled) {
+ return;
+ }
+ if (!trusted) {
+ log(etr->mEvent, etr->mData, etr->mLength);
+ return;
+ }
+ size_t need = etr->mLength + Entry::kOverhead; // mEvent, mLength, data[mLength], mLength
+ // need = number of bytes written to FIFO
+
+ // FIXME optimize this using memcpy for the data part of the Entry.
+ // The Entry could have a method copyTo(ptr, offset, size) to optimize the copy.
+ // checks size of a single log Entry: type, length, data pointer and ending
+ uint8_t temp[Entry::kMaxLength + Entry::kOverhead];
+ // write this data to temp array
+ for (size_t i = 0; i < need; i++) {
+ temp[i] = etr->copyEntryDataAt(i);
+ }
+ // write to circular buffer
+ mFifoWriter->write(temp, need);
+}
+
+bool NBLog::Writer::isEnabled() const
+{
+ return mEnabled;
+}
+
+bool NBLog::Writer::setEnabled(bool enabled)
+{
+ bool old = mEnabled;
+ mEnabled = enabled && mShared != NULL;
+ return old;
+}
+
+// ---------------------------------------------------------------------------
+
+NBLog::LockedWriter::LockedWriter()
+ : Writer()
+{
+}
+
+NBLog::LockedWriter::LockedWriter(void *shared, size_t size)
+ : Writer(shared, size)
+{
+}
+
+void NBLog::LockedWriter::log(const char *string)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::log(string);
+}
+
+void NBLog::LockedWriter::logf(const char *fmt, ...)
+{
+ // FIXME should not take the lock until after formatting is done
+ Mutex::Autolock _l(mLock);
+ va_list ap;
+ va_start(ap, fmt);
+ Writer::logvf(fmt, ap);
+ va_end(ap);
+}
+
+void NBLog::LockedWriter::logvf(const char *fmt, va_list ap)
+{
+ // FIXME should not take the lock until after formatting is done
+ Mutex::Autolock _l(mLock);
+ Writer::logvf(fmt, ap);
+}
+
+void NBLog::LockedWriter::logTimestamp()
+{
+ // FIXME should not take the lock until after the clock_gettime() syscall
+ Mutex::Autolock _l(mLock);
+ Writer::logTimestamp();
+}
+
+void NBLog::LockedWriter::logTimestamp(const int64_t ts)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logTimestamp(ts);
+}
+
+void NBLog::LockedWriter::logInteger(const int x)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logInteger(x);
+}
+
+void NBLog::LockedWriter::logFloat(const float x)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logFloat(x);
+}
+
+void NBLog::LockedWriter::logPID()
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logPID();
+}
+
+void NBLog::LockedWriter::logStart(const char *fmt)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logStart(fmt);
+}
+
+
+void NBLog::LockedWriter::logEnd()
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logEnd();
+}
+
+void NBLog::LockedWriter::logHash(log_hash_t hash)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logHash(hash);
+}
+
+bool NBLog::LockedWriter::isEnabled() const
+{
+ Mutex::Autolock _l(mLock);
+ return Writer::isEnabled();
+}
+
+bool NBLog::LockedWriter::setEnabled(bool enabled)
+{
+ Mutex::Autolock _l(mLock);
+ return Writer::setEnabled(enabled);
+}
+
+// ---------------------------------------------------------------------------
+
+const std::set<NBLog::Event> NBLog::Reader::startingTypes {NBLog::Event::EVENT_START_FMT,
+ NBLog::Event::EVENT_HISTOGRAM_ENTRY_TS,
+ NBLog::Event::EVENT_AUDIO_STATE};
+const std::set<NBLog::Event> NBLog::Reader::endingTypes {NBLog::Event::EVENT_END_FMT,
+ NBLog::Event::EVENT_HISTOGRAM_ENTRY_TS,
+ NBLog::Event::EVENT_AUDIO_STATE};
+
+NBLog::Reader::Reader(const void *shared, size_t size)
+ : mFd(-1), mIndent(0), mLost(0),
+ mShared((/*const*/ Shared *) shared), /*mIMemory*/
+ mFifo(mShared != NULL ?
+ new audio_utils_fifo(size, sizeof(uint8_t),
+ mShared->mBuffer, mShared->mRear, NULL /*throttlesFront*/) : NULL),
+ mFifoReader(mFifo != NULL ? new audio_utils_fifo_reader(*mFifo) : NULL)
+{
+}
+
+NBLog::Reader::Reader(const sp<IMemory>& iMemory, size_t size)
+ : Reader(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL, size)
+{
+ mIMemory = iMemory;
+}
+
+NBLog::Reader::~Reader()
+{
+ delete mFifoReader;
+ delete mFifo;
+}
+
+const uint8_t *NBLog::Reader::findLastEntryOfTypes(const uint8_t *front, const uint8_t *back,
+ const std::set<Event> &types) {
+ while (back + Entry::kPreviousLengthOffset >= front) {
+ const uint8_t *prev = back - back[Entry::kPreviousLengthOffset] - Entry::kOverhead;
+ if (prev < front || prev + prev[offsetof(entry, length)] +
+ Entry::kOverhead != back) {
+
+ // prev points to an out of limits or inconsistent entry
+ return nullptr;
+ }
+ if (types.find((const Event) prev[offsetof(entry, type)]) != types.end()) {
+ return prev;
+ }
+ back = prev;
+ }
+ return nullptr; // no entry found
+}
+
+// Copies content of a Reader FIFO into its Snapshot
+// The Snapshot has the same raw data, but represented as a sequence of entries
+// and an EntryIterator making it possible to process the data.
+std::unique_ptr<NBLog::Reader::Snapshot> NBLog::Reader::getSnapshot()
+{
+ if (mFifoReader == NULL) {
+ return std::unique_ptr<NBLog::Reader::Snapshot>(new Snapshot());
+ }
+ // make a copy to avoid race condition with writer
+ size_t capacity = mFifo->capacity();
+
+ // This emulates the behaviour of audio_utils_fifo_reader::read, but without incrementing the
+ // reader index. The index is incremented after handling corruption, to after the last complete
+ // entry of the buffer
+ size_t lost;
+ audio_utils_iovec iovec[2];
+ ssize_t availToRead = mFifoReader->obtain(iovec, capacity, NULL /*timeout*/, &lost);
+ if (availToRead <= 0) {
+ return std::unique_ptr<NBLog::Reader::Snapshot>(new Snapshot());
+ }
+
+ std::unique_ptr<Snapshot> snapshot(new Snapshot(availToRead));
+ memcpy(snapshot->mData, (const char *) mFifo->buffer() + iovec[0].mOffset, iovec[0].mLength);
+ if (iovec[1].mLength > 0) {
+ memcpy(snapshot->mData + (iovec[0].mLength),
+ (const char *) mFifo->buffer() + iovec[1].mOffset, iovec[1].mLength);
+ }
+
+ // Handle corrupted buffer
+ // Potentially, a buffer has corrupted data on both beginning (due to overflow) and end
+ // (due to incomplete format entry). But even if the end format entry is incomplete,
+ // it ends in a complete entry (which is not an END_FMT). So is safe to traverse backwards.
+ // TODO: handle client corruption (in the middle of a buffer)
+
+ const uint8_t *back = snapshot->mData + availToRead;
+ const uint8_t *front = snapshot->mData;
+
+ // Find last END_FMT. <back> is sitting on an entry which might be the middle of a FormatEntry.
+ // We go backwards until we find an EVENT_END_FMT.
+ const uint8_t *lastEnd = findLastEntryOfTypes(front, back, endingTypes);
+ if (lastEnd == nullptr) {
+ snapshot->mEnd = snapshot->mBegin = EntryIterator(front);
+ } else {
+ // end of snapshot points to after last END_FMT entry
+ snapshot->mEnd = EntryIterator(lastEnd).next();
+ // find first START_FMT
+ const uint8_t *firstStart = nullptr;
+ const uint8_t *firstStartTmp = snapshot->mEnd;
+ while ((firstStartTmp = findLastEntryOfTypes(front, firstStartTmp, startingTypes))
+ != nullptr) {
+ firstStart = firstStartTmp;
+ }
+ // firstStart is null if no START_FMT entry was found before lastEnd
+ if (firstStart == nullptr) {
+ snapshot->mBegin = snapshot->mEnd;
+ } else {
+ snapshot->mBegin = EntryIterator(firstStart);
+ }
+ }
+
+ // advance fifo reader index to after last entry read.
+ mFifoReader->release(snapshot->mEnd - front);
+
+ snapshot->mLost = lost;
+ return snapshot;
+
+}
+
+// Takes raw content of the local merger FIFO, processes log entries, and
+// writes the data to a map of class PerformanceAnalysis, based on their thread ID.
+void NBLog::MergeReader::getAndProcessSnapshot(NBLog::Reader::Snapshot &snapshot)
+{
+ String8 timestamp, body;
+
+ for (auto entry = snapshot.begin(); entry != snapshot.end();) {
+ switch (entry->type) {
+ case EVENT_START_FMT:
+ entry = handleFormat(FormatEntry(entry), ×tamp, &body);
+ break;
+ case EVENT_HISTOGRAM_ENTRY_TS: {
+ HistTsEntryWithAuthor *data = (HistTsEntryWithAuthor *) (entry->data);
+ // TODO This memcpies are here to avoid unaligned memory access crash.
+ // There's probably a more efficient way to do it
+ log_hash_t hash;
+ memcpy(&hash, &(data->hash), sizeof(hash));
+ int64_t ts;
+ memcpy(&ts, &data->ts, sizeof(ts));
+ // TODO: hash for histogram ts and audio state need to match
+ // and correspond to audio production source file location
+ mThreadPerformanceAnalysis[data->author][0 /*hash*/].logTsEntry(ts);
+ ++entry;
+ break;
+ }
+ case EVENT_AUDIO_STATE: {
+ HistTsEntryWithAuthor *data = (HistTsEntryWithAuthor *) (entry->data);
+ // TODO This memcpies are here to avoid unaligned memory access crash.
+ // There's probably a more efficient way to do it
+ log_hash_t hash;
+ memcpy(&hash, &(data->hash), sizeof(hash));
+ // TODO: remove ts if unused
+ int64_t ts;
+ memcpy(&ts, &data->ts, sizeof(ts));
+ mThreadPerformanceAnalysis[data->author][0 /*hash*/].handleStateChange();
+ ++entry;
+ break;
+ }
+ case EVENT_END_FMT:
+ body.appendFormat("warning: got to end format event");
+ ++entry;
+ break;
+ case EVENT_RESERVED:
+ default:
+ body.appendFormat("warning: unexpected event %d", entry->type);
+ ++entry;
+ break;
+ }
+ }
+ // FIXME: decide whether to print the warnings here or elsewhere
+ if (!body.isEmpty()) {
+ dumpLine(timestamp, body);
+ }
+}
+
+void NBLog::MergeReader::getAndProcessSnapshot()
+{
+ // get a snapshot, process it
+ std::unique_ptr<Snapshot> snap = getSnapshot();
+ getAndProcessSnapshot(*snap);
+}
+
+void NBLog::MergeReader::dump(int fd, int indent) {
+ // TODO: add a mutex around media.log dump
+ ReportPerformance::dump(fd, indent, mThreadPerformanceAnalysis);
+}
+
+// Writes a string to the console
+void NBLog::Reader::dumpLine(const String8 ×tamp, String8 &body)
+{
+ if (mFd >= 0) {
+ dprintf(mFd, "%.*s%s %s\n", mIndent, "", timestamp.string(), body.string());
+ } else {
+ ALOGI("%.*s%s %s", mIndent, "", timestamp.string(), body.string());
+ }
+ body.clear();
+}
+
+bool NBLog::Reader::isIMemory(const sp<IMemory>& iMemory) const
+{
+ return iMemory != 0 && mIMemory != 0 && iMemory->pointer() == mIMemory->pointer();
+}
+
+// ---------------------------------------------------------------------------
+
+void NBLog::appendTimestamp(String8 *body, const void *data) {
+ int64_t ts;
+ memcpy(&ts, data, sizeof(ts));
+ body->appendFormat("[%d.%03d]", (int) (ts / (1000 * 1000 * 1000)),
+ (int) ((ts / (1000 * 1000)) % 1000));
+}
+
+void NBLog::appendInt(String8 *body, const void *data) {
+ int x = *((int*) data);
+ body->appendFormat("<%d>", x);
+}
+
+void NBLog::appendFloat(String8 *body, const void *data) {
+ float f;
+ memcpy(&f, data, sizeof(float));
+ body->appendFormat("<%f>", f);
+}
+
+void NBLog::appendPID(String8 *body, const void* data, size_t length) {
+ pid_t id = *((pid_t*) data);
+ char * name = &((char*) data)[sizeof(pid_t)];
+ body->appendFormat("<PID: %d, name: %.*s>", id, (int) (length - sizeof(pid_t)), name);
+}
+
+String8 NBLog::bufferDump(const uint8_t *buffer, size_t size)
+{
+ String8 str;
+ str.append("[ ");
+ for(size_t i = 0; i < size; i++)
+ {
+ str.appendFormat("%d ", buffer[i]);
+ }
+ str.append("]");
+ return str;
+}
+
+String8 NBLog::bufferDump(const EntryIterator &it)
+{
+ return bufferDump(it, it->length + Entry::kOverhead);
+}
+
+NBLog::EntryIterator NBLog::Reader::handleFormat(const FormatEntry &fmtEntry,
+ String8 *timestamp,
+ String8 *body) {
+ // log timestamp
+ int64_t ts = fmtEntry.timestamp();
+ timestamp->clear();
+ timestamp->appendFormat("[%d.%03d]", (int) (ts / (1000 * 1000 * 1000)),
+ (int) ((ts / (1000 * 1000)) % 1000));
+
+ // log unique hash
+ log_hash_t hash = fmtEntry.hash();
+ // print only lower 16bit of hash as hex and line as int to reduce spam in the log
+ body->appendFormat("%.4X-%d ", (int)(hash >> 16) & 0xFFFF, (int) hash & 0xFFFF);
+
+ // log author (if present)
+ handleAuthor(fmtEntry, body);
+
+ // log string
+ NBLog::EntryIterator arg = fmtEntry.args();
+
+ const char* fmt = fmtEntry.formatString();
+ size_t fmt_length = fmtEntry.formatStringLength();
+
+ for (size_t fmt_offset = 0; fmt_offset < fmt_length; ++fmt_offset) {
+ if (fmt[fmt_offset] != '%') {
+ body->append(&fmt[fmt_offset], 1); // TODO optimize to write consecutive strings at once
+ continue;
+ }
+ // case "%%""
+ if (fmt[++fmt_offset] == '%') {
+ body->append("%");
+ continue;
+ }
+ // case "%\0"
+ if (fmt_offset == fmt_length) {
+ continue;
+ }
+
+ NBLog::Event event = (NBLog::Event) arg->type;
+ size_t length = arg->length;
+
+ // TODO check length for event type is correct
+
+ if (event == EVENT_END_FMT) {
+ break;
+ }
+
+ // TODO: implement more complex formatting such as %.3f
+ const uint8_t *datum = arg->data; // pointer to the current event args
+ switch(fmt[fmt_offset])
+ {
+ case 's': // string
+ ALOGW_IF(event != EVENT_STRING,
+ "NBLog Reader incompatible event for string specifier: %d", event);
+ body->append((const char*) datum, length);
+ break;
+
+ case 't': // timestamp
+ ALOGW_IF(event != EVENT_TIMESTAMP,
+ "NBLog Reader incompatible event for timestamp specifier: %d", event);
+ appendTimestamp(body, datum);
+ break;
+
+ case 'd': // integer
+ ALOGW_IF(event != EVENT_INTEGER,
+ "NBLog Reader incompatible event for integer specifier: %d", event);
+ appendInt(body, datum);
+ break;
+
+ case 'f': // float
+ ALOGW_IF(event != EVENT_FLOAT,
+ "NBLog Reader incompatible event for float specifier: %d", event);
+ appendFloat(body, datum);
+ break;
+
+ case 'p': // pid
+ ALOGW_IF(event != EVENT_PID,
+ "NBLog Reader incompatible event for pid specifier: %d", event);
+ appendPID(body, datum, length);
+ break;
+
+ default:
+ ALOGW("NBLog Reader encountered unknown character %c", fmt[fmt_offset]);
+ }
+ ++arg;
+ }
+ ALOGW_IF(arg->type != EVENT_END_FMT, "Expected end of format, got %d", arg->type);
+ ++arg;
+ return arg;
+}
+
+NBLog::Merger::Merger(const void *shared, size_t size):
+ mShared((Shared *) shared),
+ mFifo(mShared != NULL ?
+ new audio_utils_fifo(size, sizeof(uint8_t),
+ mShared->mBuffer, mShared->mRear, NULL /*throttlesFront*/) : NULL),
+ mFifoWriter(mFifo != NULL ? new audio_utils_fifo_writer(*mFifo) : NULL)
+ {}
+
+void NBLog::Merger::addReader(const NBLog::NamedReader &reader) {
+
+ // FIXME This is called by binder thread in MediaLogService::registerWriter
+ // but the access to shared variable mNamedReaders is not yet protected by a lock.
+ mNamedReaders.push_back(reader);
+}
+
+// items placed in priority queue during merge
+// composed by a timestamp and the index of the snapshot where the timestamp came from
+struct MergeItem
+{
+ int64_t ts;
+ int index;
+ MergeItem(int64_t ts, int index): ts(ts), index(index) {}
+};
+
+// operators needed for priority queue in merge
+// bool operator>(const int64_t &t1, const int64_t &t2) {
+// return t1.tv_sec > t2.tv_sec || (t1.tv_sec == t2.tv_sec && t1.tv_nsec > t2.tv_nsec);
+// }
+
+bool operator>(const struct MergeItem &i1, const struct MergeItem &i2) {
+ return i1.ts > i2.ts || (i1.ts == i2.ts && i1.index > i2.index);
+}
+
+// Merge registered readers, sorted by timestamp, and write data to a single FIFO in local memory
+void NBLog::Merger::merge() {
+ // FIXME This is called by merge thread
+ // but the access to shared variable mNamedReaders is not yet protected by a lock.
+ int nLogs = mNamedReaders.size();
+ std::vector<std::unique_ptr<NBLog::Reader::Snapshot>> snapshots(nLogs);
+ std::vector<NBLog::EntryIterator> offsets(nLogs);
+ for (int i = 0; i < nLogs; ++i) {
+ snapshots[i] = mNamedReaders[i].reader()->getSnapshot();
+ offsets[i] = snapshots[i]->begin();
+ }
+ // initialize offsets
+ // TODO custom heap implementation could allow to update top, improving performance
+ // for bursty buffers
+ std::priority_queue<MergeItem, std::vector<MergeItem>, std::greater<MergeItem>> timestamps;
+ for (int i = 0; i < nLogs; ++i)
+ {
+ if (offsets[i] != snapshots[i]->end()) {
+ int64_t ts = AbstractEntry::buildEntry(offsets[i])->timestamp();
+ timestamps.emplace(ts, i);
+ }
+ }
+
+ while (!timestamps.empty()) {
+ // find minimum timestamp
+ int index = timestamps.top().index;
+ // copy it to the log, increasing offset
+ offsets[index] = AbstractEntry::buildEntry(offsets[index])->copyWithAuthor(mFifoWriter,
+ index);
+ // update data structures
+ timestamps.pop();
+ if (offsets[index] != snapshots[index]->end()) {
+ int64_t ts = AbstractEntry::buildEntry(offsets[index])->timestamp();
+ timestamps.emplace(ts, index);
+ }
+ }
+}
+
+const std::vector<NBLog::NamedReader>& NBLog::Merger::getNamedReaders() const {
+ // FIXME This is returning a reference to a shared variable that needs a lock
+ return mNamedReaders;
+}
+
+// ---------------------------------------------------------------------------
+
+NBLog::MergeReader::MergeReader(const void *shared, size_t size, Merger &merger)
+ : Reader(shared, size), mNamedReaders(merger.getNamedReaders()) {}
+
+void NBLog::MergeReader::handleAuthor(const NBLog::AbstractEntry &entry, String8 *body) {
+ int author = entry.author();
+ // FIXME Needs a lock
+ const char* name = mNamedReaders[author].name();
+ body->appendFormat("%s: ", name);
+}
+
+// ---------------------------------------------------------------------------
+
+NBLog::MergeThread::MergeThread(NBLog::Merger &merger, NBLog::MergeReader &mergeReader)
+ : mMerger(merger),
+ mMergeReader(mergeReader),
+ mTimeoutUs(0) {}
+
+NBLog::MergeThread::~MergeThread() {
+ // set exit flag, set timeout to 0 to force threadLoop to exit and wait for the thread to join
+ requestExit();
+ setTimeoutUs(0);
+ join();
+}
+
+bool NBLog::MergeThread::threadLoop() {
+ bool doMerge;
+ {
+ AutoMutex _l(mMutex);
+ // If mTimeoutUs is negative, wait on the condition variable until it's positive.
+ // If it's positive, wait kThreadSleepPeriodUs and then merge
+ nsecs_t waitTime = mTimeoutUs > 0 ? kThreadSleepPeriodUs * 1000 : LLONG_MAX;
+ mCond.waitRelative(mMutex, waitTime);
+ doMerge = mTimeoutUs > 0;
+ mTimeoutUs -= kThreadSleepPeriodUs;
+ }
+ if (doMerge) {
+ // Merge data from all the readers
+ mMerger.merge();
+ // Process the data collected by mMerger and write it to PerformanceAnalysis
+ // FIXME: decide whether to call getAndProcessSnapshot every time
+ // or whether to have a separate thread that calls it with a lower frequency
+ mMergeReader.getAndProcessSnapshot();
+ }
+ return true;
+}
+
+void NBLog::MergeThread::wakeup() {
+ setTimeoutUs(kThreadWakeupPeriodUs);
+}
+
+void NBLog::MergeThread::setTimeoutUs(int time) {
+ AutoMutex _l(mMutex);
+ mTimeoutUs = time;
+ mCond.signal();
+}
+
+} // namespace android
diff --git a/media/libnblog/PerformanceAnalysis.cpp b/media/libnblog/PerformanceAnalysis.cpp
new file mode 100644
index 0000000..f09e93d
--- /dev/null
+++ b/media/libnblog/PerformanceAnalysis.cpp
@@ -0,0 +1,378 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#define LOG_TAG "PerformanceAnalysis"
+// #define LOG_NDEBUG 0
+
+#include <algorithm>
+#include <climits>
+#include <deque>
+#include <iostream>
+#include <math.h>
+#include <numeric>
+#include <vector>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <time.h>
+#include <new>
+#include <audio_utils/roundup.h>
+#include <media/nblog/NBLog.h>
+#include <media/nblog/PerformanceAnalysis.h>
+#include <media/nblog/ReportPerformance.h>
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+#include <queue>
+#include <utility>
+
+namespace android {
+
+namespace ReportPerformance {
+
+// Given an audio processing wakeup timestamp, buckets the time interval
+// since the previous timestamp into a histogram, searches for
+// outliers, analyzes the outlier series for unexpectedly
+// small or large values and stores these as peaks
+void PerformanceAnalysis::logTsEntry(timestamp ts) {
+ // after a state change, start a new series and do not
+ // record time intervals in-between
+ if (mBufferPeriod.mPrevTs == 0) {
+ mBufferPeriod.mPrevTs = ts;
+ return;
+ }
+
+ // calculate time interval between current and previous timestamp
+ const msInterval diffMs = static_cast<msInterval>(
+ deltaMs(mBufferPeriod.mPrevTs, ts));
+
+ const int diffJiffy = deltaJiffy(mBufferPeriod.mPrevTs, ts);
+
+ // old versus new weight ratio when updating the buffer period mean
+ static constexpr double exponentialWeight = 0.999;
+ // update buffer period mean with exponential weighting
+ mBufferPeriod.mMean = (mBufferPeriod.mMean < 0) ? diffMs :
+ exponentialWeight * mBufferPeriod.mMean + (1.0 - exponentialWeight) * diffMs;
+ // set mOutlierFactor to a smaller value for the fastmixer thread
+ const int kFastMixerMax = 10;
+ // NormalMixer times vary much more than FastMixer times.
+ // TODO: mOutlierFactor values are set empirically based on what appears to be
+ // an outlier. Learn these values from the data.
+ mBufferPeriod.mOutlierFactor = mBufferPeriod.mMean < kFastMixerMax ? 1.8 : 2.0;
+ // set outlier threshold
+ mBufferPeriod.mOutlier = mBufferPeriod.mMean * mBufferPeriod.mOutlierFactor;
+
+ // Check whether the time interval between the current timestamp
+ // and the previous one is long enough to count as an outlier
+ const bool isOutlier = detectAndStoreOutlier(diffMs);
+ // If an outlier was found, check whether it was a peak
+ if (isOutlier) {
+ /*bool isPeak =*/ detectAndStorePeak(
+ mOutlierData[0].first, mOutlierData[0].second);
+ // TODO: decide whether to insert a new empty histogram if a peak
+ // TODO: remove isPeak if unused to avoid "unused variable" error
+ // occurred at the current timestamp
+ }
+
+ // Insert a histogram to mHists if it is empty, or
+ // close the current histogram and insert a new empty one if
+ // if the current histogram has spanned its maximum time interval.
+ if (mHists.empty() ||
+ deltaMs(mHists[0].first, ts) >= kMaxLength.HistTimespanMs) {
+ mHists.emplace_front(ts, std::map<int, int>());
+ // When memory is full, delete oldest histogram
+ // TODO: use a circular buffer
+ if (mHists.size() >= kMaxLength.Hists) {
+ mHists.resize(kMaxLength.Hists);
+ }
+ }
+ // add current time intervals to histogram
+ ++mHists[0].second[diffJiffy];
+ // update previous timestamp
+ mBufferPeriod.mPrevTs = ts;
+}
+
+
+// forces short-term histogram storage to avoid adding idle audio time interval
+// to buffer period data
+void PerformanceAnalysis::handleStateChange() {
+ mBufferPeriod.mPrevTs = 0;
+ return;
+}
+
+
+// Checks whether the time interval between two outliers is far enough from
+// a typical delta to be considered a peak.
+// looks for changes in distribution (peaks), which can be either positive or negative.
+// The function sets the mean to the starting value and sigma to 0, and updates
+// them as long as no peak is detected. When a value is more than 'threshold'
+// standard deviations from the mean, a peak is detected and the mean and sigma
+// are set to the peak value and 0.
+bool PerformanceAnalysis::detectAndStorePeak(msInterval diff, timestamp ts) {
+ bool isPeak = false;
+ if (mOutlierData.empty()) {
+ return false;
+ }
+ // Update mean of the distribution
+ // TypicalDiff is used to check whether a value is unusually large
+ // when we cannot use standard deviations from the mean because the sd is set to 0.
+ mOutlierDistribution.mTypicalDiff = (mOutlierDistribution.mTypicalDiff *
+ (mOutlierData.size() - 1) + diff) / mOutlierData.size();
+
+ // Initialize short-term mean at start of program
+ if (mOutlierDistribution.mMean == 0) {
+ mOutlierDistribution.mMean = diff;
+ }
+ // Update length of current sequence of outliers
+ mOutlierDistribution.mN++;
+
+ // Check whether a large deviation from the mean occurred.
+ // If the standard deviation has been reset to zero, the comparison is
+ // instead to the mean of the full mOutlierInterval sequence.
+ if ((fabs(diff - mOutlierDistribution.mMean) <
+ mOutlierDistribution.kMaxDeviation * mOutlierDistribution.mSd) ||
+ (mOutlierDistribution.mSd == 0 &&
+ fabs(diff - mOutlierDistribution.mMean) <
+ mOutlierDistribution.mTypicalDiff)) {
+ // update the mean and sd using online algorithm
+ // https://en.wikipedia.org/wiki/
+ // Algorithms_for_calculating_variance#Online_algorithm
+ mOutlierDistribution.mN++;
+ const double kDelta = diff - mOutlierDistribution.mMean;
+ mOutlierDistribution.mMean += kDelta / mOutlierDistribution.mN;
+ const double kDelta2 = diff - mOutlierDistribution.mMean;
+ mOutlierDistribution.mM2 += kDelta * kDelta2;
+ mOutlierDistribution.mSd = (mOutlierDistribution.mN < 2) ? 0 :
+ sqrt(mOutlierDistribution.mM2 / (mOutlierDistribution.mN - 1));
+ } else {
+ // new value is far from the mean:
+ // store peak timestamp and reset mean, sd, and short-term sequence
+ isPeak = true;
+ mPeakTimestamps.emplace_front(ts);
+ // if mPeaks has reached capacity, delete oldest data
+ // Note: this means that mOutlierDistribution values do not exactly
+ // match the data we have in mPeakTimestamps, but this is not an issue
+ // in practice for estimating future peaks.
+ // TODO: turn this into a circular buffer
+ if (mPeakTimestamps.size() >= kMaxLength.Peaks) {
+ mPeakTimestamps.resize(kMaxLength.Peaks);
+ }
+ mOutlierDistribution.mMean = 0;
+ mOutlierDistribution.mSd = 0;
+ mOutlierDistribution.mN = 0;
+ mOutlierDistribution.mM2 = 0;
+ }
+ return isPeak;
+}
+
+
+// Determines whether the difference between a timestamp and the previous
+// one is beyond a threshold. If yes, stores the timestamp as an outlier
+// and writes to mOutlierdata in the following format:
+// Time elapsed since previous outlier: Timestamp of start of outlier
+// e.g. timestamps (ms) 1, 4, 5, 16, 18, 28 will produce pairs (4, 5), (13, 18).
+// TODO: learn what timestamp sequences correlate with glitches instead of
+// manually designing a heuristic.
+bool PerformanceAnalysis::detectAndStoreOutlier(const msInterval diffMs) {
+ bool isOutlier = false;
+ if (diffMs >= mBufferPeriod.mOutlier) {
+ isOutlier = true;
+ mOutlierData.emplace_front(
+ mOutlierDistribution.mElapsed, mBufferPeriod.mPrevTs);
+ // Remove oldest value if the vector is full
+ // TODO: turn this into a circular buffer
+ // TODO: make sure kShortHistSize is large enough that that data will never be lost
+ // before being written to file or to a FIFO
+ if (mOutlierData.size() >= kMaxLength.Outliers) {
+ mOutlierData.resize(kMaxLength.Outliers);
+ }
+ mOutlierDistribution.mElapsed = 0;
+ }
+ mOutlierDistribution.mElapsed += diffMs;
+ return isOutlier;
+}
+
+static int widthOf(int x) {
+ int width = 0;
+ if (x < 0) {
+ width++;
+ x = x == INT_MIN ? INT_MAX : -x;
+ }
+ // assert (x >= 0)
+ do {
+ ++width;
+ x /= 10;
+ } while (x > 0);
+ return width;
+}
+
+// computes the column width required for a specific histogram value
+inline int numberWidth(double number, int leftPadding) {
+ // Added values account for whitespaces needed around numbers, and for the
+ // dot and decimal digit not accounted for by widthOf
+ return std::max(std::max(widthOf(static_cast<int>(number)) + 3, 2), leftPadding + 1);
+}
+
+// rounds value to precision based on log-distance from mean
+__attribute__((no_sanitize("signed-integer-overflow")))
+inline double logRound(double x, double mean) {
+ // Larger values decrease range of high resolution and prevent overflow
+ // of a histogram on the console.
+ // The following formula adjusts kBase based on the buffer period length.
+ // Different threads have buffer periods ranging from 2 to 40. The
+ // formula below maps buffer period 2 to kBase = ~1, 4 to ~2, 20 to ~3, 40 to ~4.
+ // TODO: tighten this for higher means, the data still overflows
+ const double kBase = log(mean) / log(2.2);
+ const double power = floor(
+ log(abs(x - mean) / mean) / log(kBase)) + 2;
+ // do not round values close to the mean
+ if (power < 1) {
+ return x;
+ }
+ const int factor = static_cast<int>(pow(10, power));
+ return (static_cast<int>(x) * factor) / factor;
+}
+
+// TODO Make it return a std::string instead of modifying body
+// TODO: move this to ReportPerformance, probably make it a friend function
+// of PerformanceAnalysis
+void PerformanceAnalysis::reportPerformance(String8 *body, int author, log_hash_t hash,
+ int maxHeight) {
+ if (mHists.empty()) {
+ return;
+ }
+
+ // ms of active audio in displayed histogram
+ double elapsedMs = 0;
+ // starting timestamp of histogram
+ timestamp startingTs = mHists[0].first;
+
+ // histogram which stores .1 precision ms counts instead of Jiffy multiple counts
+ std::map<double, int> buckets;
+ for (const auto &shortHist: mHists) {
+ for (const auto &countPair : shortHist.second) {
+ const double ms = static_cast<double>(countPair.first) / kJiffyPerMs;
+ buckets[logRound(ms, mBufferPeriod.mMean)] += countPair.second;
+ elapsedMs += ms * countPair.second;
+ }
+ }
+
+ // underscores and spaces length corresponds to maximum width of histogram
+ static const int kLen = 200;
+ std::string underscores(kLen, '_');
+ std::string spaces(kLen, ' ');
+
+ auto it = buckets.begin();
+ double maxDelta = it->first;
+ int maxCount = it->second;
+ // Compute maximum values
+ while (++it != buckets.end()) {
+ if (it->first > maxDelta) {
+ maxDelta = it->first;
+ }
+ if (it->second > maxCount) {
+ maxCount = it->second;
+ }
+ }
+ int height = log2(maxCount) + 1; // maxCount > 0, safe to call log2
+ const int leftPadding = widthOf(1 << height);
+ const int bucketWidth = numberWidth(maxDelta, leftPadding);
+ int scalingFactor = 1;
+ // scale data if it exceeds maximum height
+ if (height > maxHeight) {
+ scalingFactor = (height + maxHeight) / maxHeight;
+ height /= scalingFactor;
+ }
+ body->appendFormat("\n%*s %3.2f %s", leftPadding + 11,
+ "Occurrences in", (elapsedMs / kMsPerSec), "seconds of audio:");
+ body->appendFormat("\n%*s%d, %lld, %lld\n", leftPadding + 11,
+ "Thread, hash, starting timestamp: ", author,
+ static_cast<long long int>(hash), static_cast<long long int>(startingTs));
+ // write histogram label line with bucket values
+ body->appendFormat("\n%s", " ");
+ body->appendFormat("%*s", leftPadding, " ");
+ for (auto const &x : buckets) {
+ const int colWidth = numberWidth(x.first, leftPadding);
+ body->appendFormat("%*d", colWidth, x.second);
+ }
+ // write histogram ascii art
+ body->appendFormat("\n%s", " ");
+ for (int row = height * scalingFactor; row >= 0; row -= scalingFactor) {
+ const int value = 1 << row;
+ body->appendFormat("%.*s", leftPadding, spaces.c_str());
+ for (auto const &x : buckets) {
+ const int colWidth = numberWidth(x.first, leftPadding);
+ body->appendFormat("%.*s%s", colWidth - 1,
+ spaces.c_str(), x.second < value ? " " : "|");
+ }
+ body->appendFormat("\n%s", " ");
+ }
+ // print x-axis
+ const int columns = static_cast<int>(buckets.size());
+ body->appendFormat("%*c", leftPadding, ' ');
+ body->appendFormat("%.*s", (columns + 1) * bucketWidth, underscores.c_str());
+ body->appendFormat("\n%s", " ");
+
+ // write footer with bucket labels
+ body->appendFormat("%*s", leftPadding, " ");
+ for (auto const &x : buckets) {
+ const int colWidth = numberWidth(x.first, leftPadding);
+ body->appendFormat("%*.*f", colWidth, 1, x.first);
+ }
+ body->appendFormat("%.*s%s", bucketWidth, spaces.c_str(), "ms\n");
+
+ // Now report glitches
+ body->appendFormat("\ntime elapsed between glitches and glitch timestamps:\n");
+ for (const auto &outlier: mOutlierData) {
+ body->appendFormat("%lld: %lld\n", static_cast<long long>(outlier.first),
+ static_cast<long long>(outlier.second));
+ }
+}
+
+//------------------------------------------------------------------------------
+
+// writes summary of performance into specified file descriptor
+void dump(int fd, int indent, PerformanceAnalysisMap &threadPerformanceAnalysis) {
+ String8 body;
+ const char* const kDirectory = "/data/misc/audioserver/";
+ for (auto & thread : threadPerformanceAnalysis) {
+ for (auto & hash: thread.second) {
+ PerformanceAnalysis& curr = hash.second;
+ // write performance data to console
+ curr.reportPerformance(&body, thread.first, hash.first);
+ if (!body.isEmpty()) {
+ dumpLine(fd, indent, body);
+ body.clear();
+ }
+ // write to file
+ writeToFile(curr.mHists, curr.mOutlierData, curr.mPeakTimestamps,
+ kDirectory, false, thread.first, hash.first);
+ }
+ }
+}
+
+
+// Writes a string into specified file descriptor
+void dumpLine(int fd, int indent, const String8 &body) {
+ dprintf(fd, "%.*s%s \n", indent, "", body.string());
+}
+
+} // namespace ReportPerformance
+
+} // namespace android
diff --git a/media/libnblog/ReportPerformance.cpp b/media/libnblog/ReportPerformance.cpp
new file mode 100644
index 0000000..827e731
--- /dev/null
+++ b/media/libnblog/ReportPerformance.cpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "ReportPerformance"
+
+#include <fstream>
+#include <iostream>
+#include <queue>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sstream>
+#include <sys/prctl.h>
+#include <sys/time.h>
+#include <utility>
+#include <media/nblog/NBLog.h>
+#include <media/nblog/PerformanceAnalysis.h>
+#include <media/nblog/ReportPerformance.h>
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+namespace android {
+
+namespace ReportPerformance {
+
+
+// TODO: use a function like this to extract logic from writeToFile
+// https://stackoverflow.com/a/9279620
+
+// Writes outlier intervals, timestamps, and histograms spanning long time intervals to file.
+// TODO: write data in binary format
+void writeToFile(const std::deque<std::pair<timestamp, Histogram>> &hists,
+ const std::deque<std::pair<msInterval, timestamp>> &outlierData,
+ const std::deque<timestamp> &peakTimestamps,
+ const char * directory, bool append, int author, log_hash_t hash) {
+
+ // TODO: remove old files, implement rotating files as in AudioFlinger.cpp
+
+ if (outlierData.empty() && hists.empty() && peakTimestamps.empty()) {
+ ALOGW("No data, returning.");
+ return;
+ }
+
+ std::stringstream outlierName;
+ std::stringstream histogramName;
+ std::stringstream peakName;
+
+ // get current time
+ char currTime[16]; //YYYYMMDDHHMMSS + '\0' + one unused
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ struct tm tm;
+ localtime_r(&tv.tv_sec, &tm);
+ strftime(currTime, sizeof(currTime), "%Y%m%d%H%M%S", &tm);
+
+ // generate file names
+ std::stringstream common;
+ common << author << "_" << hash << "_" << currTime << ".csv";
+
+ histogramName << directory << "histograms_" << common.str();
+ outlierName << directory << "outliers_" << common.str();
+ peakName << directory << "peaks_" << common.str();
+
+ std::ofstream hfs;
+ hfs.open(histogramName.str(), append ? std::ios::app : std::ios::trunc);
+ if (!hfs.is_open()) {
+ ALOGW("couldn't open file %s", histogramName.str().c_str());
+ return;
+ }
+ // each histogram is written as a line where the first value is the timestamp and
+ // subsequent values are pairs of buckets and counts. Each value is separated
+ // by a comma, and each histogram is separated by a newline.
+ for (auto hist = hists.begin(); hist != hists.end(); ++hist) {
+ hfs << hist->first << ", ";
+ for (auto bucket = hist->second.begin(); bucket != hist->second.end(); ++bucket) {
+ hfs << bucket->first / static_cast<double>(kJiffyPerMs)
+ << ", " << bucket->second;
+ if (std::next(bucket) != end(hist->second)) {
+ hfs << ", ";
+ }
+ }
+ if (std::next(hist) != end(hists)) {
+ hfs << "\n";
+ }
+ }
+ hfs.close();
+
+ std::ofstream ofs;
+ ofs.open(outlierName.str(), append ? std::ios::app : std::ios::trunc);
+ if (!ofs.is_open()) {
+ ALOGW("couldn't open file %s", outlierName.str().c_str());
+ return;
+ }
+ // outliers are written as pairs separated by newlines, where each
+ // pair's values are separated by a comma
+ for (const auto &outlier : outlierData) {
+ ofs << outlier.first << ", " << outlier.second << "\n";
+ }
+ ofs.close();
+
+ std::ofstream pfs;
+ pfs.open(peakName.str(), append ? std::ios::app : std::ios::trunc);
+ if (!pfs.is_open()) {
+ ALOGW("couldn't open file %s", peakName.str().c_str());
+ return;
+ }
+ // peaks are simply timestamps separated by commas
+ for (auto peak = peakTimestamps.begin(); peak != peakTimestamps.end(); ++peak) {
+ pfs << *peak;
+ if (std::next(peak) != end(peakTimestamps)) {
+ pfs << ", ";
+ }
+ }
+ pfs.close();
+}
+
+} // namespace ReportPerformance
+
+} // namespace android
diff --git a/media/libnblog/include/media/nblog/NBLog.h b/media/libnblog/include/media/nblog/NBLog.h
new file mode 100644
index 0000000..fb6f179
--- /dev/null
+++ b/media/libnblog/include/media/nblog/NBLog.h
@@ -0,0 +1,613 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Non-blocking event logger intended for safe communication between processes via shared memory
+
+#ifndef ANDROID_MEDIA_NBLOG_H
+#define ANDROID_MEDIA_NBLOG_H
+
+#include <deque>
+#include <map>
+#include <set>
+#include <vector>
+
+#include <audio_utils/fifo.h>
+#include <binder/IMemory.h>
+#include <media/nblog/PerformanceAnalysis.h>
+#include <media/nblog/ReportPerformance.h>
+#include <utils/Mutex.h>
+#include <utils/threads.h>
+
+namespace android {
+
+class String8;
+
+class NBLog {
+
+public:
+
+ using log_hash_t = ReportPerformance::log_hash_t;
+
+ // FIXME Everything needed for client (writer API and registration) should be isolated
+ // from the rest of the implementation.
+ class Writer;
+ class Reader;
+
+ enum Event : uint8_t {
+ EVENT_RESERVED,
+ EVENT_STRING, // ASCII string, not NUL-terminated
+ // TODO: make timestamp optional
+ EVENT_TIMESTAMP, // clock_gettime(CLOCK_MONOTONIC)
+ EVENT_INTEGER, // integer value entry
+ EVENT_FLOAT, // floating point value entry
+ EVENT_PID, // process ID and process name
+ EVENT_AUTHOR, // author index (present in merged logs) tracks entry's
+ // original log
+ EVENT_START_FMT, // logFormat start event: entry includes format string,
+ // following entries contain format arguments
+ EVENT_HASH, // unique HASH of log origin, originates from hash of file name
+ // and line number
+ EVENT_HISTOGRAM_ENTRY_TS, // single datum for timestamp histogram
+ EVENT_AUDIO_STATE, // audio on/off event: logged on FastMixer::onStateChange call
+ EVENT_END_FMT, // end of logFormat argument list
+
+ EVENT_UPPER_BOUND, // to check for invalid events
+ };
+
+private:
+
+ // ---------------------------------------------------------------------------
+ // API for handling format entry operations
+
+ // a formatted entry has the following structure:
+ // * START_FMT entry, containing the format string
+ // * TIMESTAMP entry
+ // * HASH entry
+ // * author entry of the thread that generated it (optional, present in merged log)
+ // * format arg1
+ // * format arg2
+ // * ...
+ // * END_FMT entry
+
+ // entry representation in memory
+ struct entry {
+ const uint8_t type;
+ const uint8_t length;
+ const uint8_t data[0];
+ };
+
+ // entry tail representation (after data)
+ struct ending {
+ uint8_t length;
+ uint8_t next[0];
+ };
+
+ // entry iterator
+ class EntryIterator {
+ public:
+ EntryIterator();
+ explicit EntryIterator(const uint8_t *entry);
+ EntryIterator(const EntryIterator &other);
+
+ // dereference underlying entry
+ const entry& operator*() const;
+ const entry* operator->() const;
+ // advance to next entry
+ EntryIterator& operator++(); // ++i
+ // back to previous entry
+ EntryIterator& operator--(); // --i
+ EntryIterator next() const;
+ EntryIterator prev() const;
+ bool operator!=(const EntryIterator &other) const;
+ int operator-(const EntryIterator &other) const;
+
+ bool hasConsistentLength() const;
+ void copyTo(std::unique_ptr<audio_utils_fifo_writer> &dst) const;
+ void copyData(uint8_t *dst) const;
+
+ template<typename T>
+ inline const T& payload() {
+ return *reinterpret_cast<const T *>(ptr + offsetof(entry, data));
+ }
+
+ inline operator const uint8_t*() const {
+ return ptr;
+ }
+
+ private:
+ const uint8_t *ptr;
+ };
+
+ class AbstractEntry {
+ public:
+
+ // Entry starting in the given pointer
+ explicit AbstractEntry(const uint8_t *entry);
+ virtual ~AbstractEntry() {}
+
+ // build concrete entry of appropriate class from pointer
+ static std::unique_ptr<AbstractEntry> buildEntry(const uint8_t *ptr);
+
+ // get format entry timestamp
+ virtual int64_t timestamp() const = 0;
+
+ // get format entry's unique id
+ virtual log_hash_t hash() const = 0;
+
+ // entry's author index (-1 if none present)
+ // a Merger has a vector of Readers, author simply points to the index of the
+ // Reader that originated the entry
+ // TODO consider changing to uint32_t
+ virtual int author() const = 0;
+
+ // copy entry, adding author before timestamp, returns iterator to end of entry
+ virtual EntryIterator copyWithAuthor(std::unique_ptr<audio_utils_fifo_writer> &dst,
+ int author) const = 0;
+
+ protected:
+ // copies ordinary entry from src to dst, and returns length of entry
+ // size_t copyEntry(audio_utils_fifo_writer *dst, const iterator &it);
+ const uint8_t *mEntry;
+ };
+
+ class FormatEntry : public AbstractEntry {
+ public:
+ // explicit FormatEntry(const EntryIterator &it);
+ explicit FormatEntry(const uint8_t *ptr) : AbstractEntry(ptr) {}
+ virtual ~FormatEntry() {}
+
+ EntryIterator begin() const;
+
+ // Entry's format string
+ const char* formatString() const;
+
+ // Enrty's format string length
+ size_t formatStringLength() const;
+
+ // Format arguments (excluding format string, timestamp and author)
+ EntryIterator args() const;
+
+ // get format entry timestamp
+ virtual int64_t timestamp() const override;
+
+ // get format entry's unique id
+ virtual log_hash_t hash() const override;
+
+ // entry's author index (-1 if none present)
+ // a Merger has a vector of Readers, author simply points to the index of the
+ // Reader that originated the entry
+ virtual int author() const override;
+
+ // copy entry, adding author before timestamp, returns size of original entry
+ virtual EntryIterator copyWithAuthor(std::unique_ptr<audio_utils_fifo_writer> &dst,
+ int author) const override;
+
+ };
+
+ class HistogramEntry : public AbstractEntry {
+ public:
+ explicit HistogramEntry(const uint8_t *ptr) : AbstractEntry(ptr) {
+ }
+ virtual ~HistogramEntry() {}
+
+ virtual int64_t timestamp() const override;
+
+ virtual log_hash_t hash() const override;
+
+ virtual int author() const override;
+
+ virtual EntryIterator copyWithAuthor(std::unique_ptr<audio_utils_fifo_writer> &dst,
+ int author) const override;
+
+ };
+
+ // ---------------------------------------------------------------------------
+
+ // representation of a single log entry in private memory
+ struct Entry {
+ Entry(Event event, const void *data, size_t length)
+ : mEvent(event), mLength(length), mData(data) { }
+ /*virtual*/ ~Entry() { }
+
+ // used during writing to format Entry information as follows:
+ // [type][length][data ... ][length]
+ int copyEntryDataAt(size_t offset) const;
+
+ private:
+ friend class Writer;
+ Event mEvent; // event type
+ uint8_t mLength; // length of additional data, 0 <= mLength <= kMaxLength
+ const void *mData; // event type-specific data
+ static const size_t kMaxLength = 255;
+ public:
+ // mEvent, mLength, mData[...], duplicate mLength
+ static const size_t kOverhead = sizeof(entry) + sizeof(ending);
+ // endind length of previous entry
+ static const ssize_t kPreviousLengthOffset = - sizeof(ending) +
+ offsetof(ending, length);
+ };
+
+ struct HistTsEntry {
+ log_hash_t hash;
+ int64_t ts;
+ }; //TODO __attribute__((packed));
+
+ struct HistTsEntryWithAuthor {
+ log_hash_t hash;
+ int64_t ts;
+ int author;
+ }; //TODO __attribute__((packed));
+
+ struct HistIntEntry {
+ log_hash_t hash;
+ int value;
+ }; //TODO __attribute__((packed));
+
+ // representation of a single log entry in shared memory
+ // byte[0] mEvent
+ // byte[1] mLength
+ // byte[2] mData[0]
+ // ...
+ // byte[2+i] mData[i]
+ // ...
+ // byte[2+mLength-1] mData[mLength-1]
+ // byte[2+mLength] duplicate copy of mLength to permit reverse scan
+ // byte[3+mLength] start of next log entry
+
+ static void appendInt(String8 *body, const void *data);
+ static void appendFloat(String8 *body, const void *data);
+ static void appendPID(String8 *body, const void *data, size_t length);
+ static void appendTimestamp(String8 *body, const void *data);
+ static size_t fmtEntryLength(const uint8_t *data);
+ static String8 bufferDump(const uint8_t *buffer, size_t size);
+ static String8 bufferDump(const EntryIterator &it);
+public:
+
+ // Located in shared memory, must be POD.
+ // Exactly one process must explicitly call the constructor or use placement new.
+ // Since this is a POD, the destructor is empty and unnecessary to call it explicitly.
+ struct Shared {
+ Shared() /* mRear initialized via default constructor */ { }
+ /*virtual*/ ~Shared() { }
+
+ audio_utils_fifo_index mRear; // index one byte past the end of most recent Entry
+ char mBuffer[0]; // circular buffer for entries
+ };
+
+public:
+
+ // ---------------------------------------------------------------------------
+
+ // FIXME Timeline was intended to wrap Writer and Reader, but isn't actually used yet.
+ // For now it is just a namespace for sharedSize().
+ class Timeline : public RefBase {
+ public:
+#if 0
+ Timeline(size_t size, void *shared = NULL);
+ virtual ~Timeline();
+#endif
+
+ // Input parameter 'size' is the desired size of the timeline in byte units.
+ // Returns the size rounded up to a power-of-2, plus the constant size overhead for indices.
+ static size_t sharedSize(size_t size);
+
+#if 0
+ private:
+ friend class Writer;
+ friend class Reader;
+
+ const size_t mSize; // circular buffer size in bytes, must be a power of 2
+ bool mOwn; // whether I own the memory at mShared
+ Shared* const mShared; // pointer to shared memory
+#endif
+ };
+
+ // ---------------------------------------------------------------------------
+
+ // Writer is thread-safe with respect to Reader, but not with respect to multiple threads
+ // calling Writer methods. If you need multi-thread safety for writing, use LockedWriter.
+ class Writer : public RefBase {
+ public:
+ Writer(); // dummy nop implementation without shared memory
+
+ // Input parameter 'size' is the desired size of the timeline in byte units.
+ // The size of the shared memory must be at least Timeline::sharedSize(size).
+ Writer(void *shared, size_t size);
+ Writer(const sp<IMemory>& iMemory, size_t size);
+
+ virtual ~Writer();
+
+ // FIXME needs comments, and some should be private
+ virtual void log(const char *string);
+ virtual void logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
+ virtual void logvf(const char *fmt, va_list ap);
+ virtual void logTimestamp();
+ virtual void logTimestamp(const int64_t ts);
+ virtual void logInteger(const int x);
+ virtual void logFloat(const float x);
+ virtual void logPID();
+ virtual void logFormat(const char *fmt, log_hash_t hash, ...);
+ virtual void logVFormat(const char *fmt, log_hash_t hash, va_list ap);
+ virtual void logStart(const char *fmt);
+ virtual void logEnd();
+ virtual void logHash(log_hash_t hash);
+ virtual void logEventHistTs(Event event, log_hash_t hash);
+
+ virtual bool isEnabled() const;
+
+ // return value for all of these is the previous isEnabled()
+ virtual bool setEnabled(bool enabled); // but won't enable if no shared memory
+ bool enable() { return setEnabled(true); }
+ bool disable() { return setEnabled(false); }
+
+ sp<IMemory> getIMemory() const { return mIMemory; }
+
+ private:
+ // 0 <= length <= kMaxLength
+ // writes a single Entry to the FIFO
+ void log(Event event, const void *data, size_t length);
+ // checks validity of an event before calling log above this one
+ void log(const Entry *entry, bool trusted = false);
+
+ Shared* const mShared; // raw pointer to shared memory
+ sp<IMemory> mIMemory; // ref-counted version, initialized in constructor
+ // and then const
+ audio_utils_fifo * const mFifo; // FIFO itself, non-NULL
+ // unless constructor fails
+ audio_utils_fifo_writer * const mFifoWriter; // used to write to FIFO, non-NULL
+ // unless dummy constructor used
+ bool mEnabled; // whether to actually log
+
+ // cached pid and process name to use in %p format specifier
+ // total tag length is mPidTagSize and process name is not zero terminated
+ char *mPidTag;
+ size_t mPidTagSize;
+ };
+
+ // ---------------------------------------------------------------------------
+
+ // Similar to Writer, but safe for multiple threads to call concurrently
+ class LockedWriter : public Writer {
+ public:
+ LockedWriter();
+ LockedWriter(void *shared, size_t size);
+
+ virtual void log(const char *string);
+ virtual void logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
+ virtual void logvf(const char *fmt, va_list ap);
+ virtual void logTimestamp();
+ virtual void logTimestamp(const int64_t ts);
+ virtual void logInteger(const int x);
+ virtual void logFloat(const float x);
+ virtual void logPID();
+ virtual void logStart(const char *fmt);
+ virtual void logEnd();
+ virtual void logHash(log_hash_t hash);
+
+ virtual bool isEnabled() const;
+ virtual bool setEnabled(bool enabled);
+
+ private:
+ mutable Mutex mLock;
+ };
+
+ // ---------------------------------------------------------------------------
+
+ class Reader : public RefBase {
+ public:
+ // A snapshot of a readers buffer
+ // This is raw data. No analysis has been done on it
+ class Snapshot {
+ public:
+ Snapshot() : mData(NULL), mLost(0) {}
+
+ Snapshot(size_t bufferSize) : mData(new uint8_t[bufferSize]) {}
+
+ ~Snapshot() { delete[] mData; }
+
+ // copy of the buffer
+ uint8_t *data() const { return mData; }
+
+ // amount of data lost (given by audio_utils_fifo_reader)
+ size_t lost() const { return mLost; }
+
+ // iterator to beginning of readable segment of snapshot
+ // data between begin and end has valid entries
+ EntryIterator begin() { return mBegin; }
+
+ // iterator to end of readable segment of snapshot
+ EntryIterator end() { return mEnd; }
+
+ private:
+ friend class MergeReader;
+ friend class Reader;
+ uint8_t *mData;
+ size_t mLost;
+ EntryIterator mBegin;
+ EntryIterator mEnd;
+ };
+
+ // Input parameter 'size' is the desired size of the timeline in byte units.
+ // The size of the shared memory must be at least Timeline::sharedSize(size).
+ Reader(const void *shared, size_t size);
+ Reader(const sp<IMemory>& iMemory, size_t size);
+
+ virtual ~Reader();
+
+ // get snapshot of readers fifo buffer, effectively consuming the buffer
+ std::unique_ptr<Snapshot> getSnapshot();
+
+ bool isIMemory(const sp<IMemory>& iMemory) const;
+
+ protected:
+ // print a summary of the performance to the console
+ void dumpLine(const String8& timestamp, String8& body);
+ EntryIterator handleFormat(const FormatEntry &fmtEntry,
+ String8 *timestamp,
+ String8 *body);
+ int mFd; // file descriptor
+ int mIndent; // indentation level
+ int mLost; // bytes of data lost before buffer was read
+
+ private:
+ static const std::set<Event> startingTypes;
+ static const std::set<Event> endingTypes;
+
+ // declared as const because audio_utils_fifo() constructor
+ sp<IMemory> mIMemory; // ref-counted version, assigned only in constructor
+
+ /*const*/ Shared* const mShared; // raw pointer to shared memory, actually const but not
+ audio_utils_fifo * const mFifo; // FIFO itself,
+ // non-NULL unless constructor fails
+ audio_utils_fifo_reader * const mFifoReader; // used to read from FIFO,
+ // non-NULL unless constructor fails
+
+ // Searches for the last entry of type <type> in the range [front, back)
+ // back has to be entry-aligned. Returns nullptr if none enconuntered.
+ static const uint8_t *findLastEntryOfTypes(const uint8_t *front, const uint8_t *back,
+ const std::set<Event> &types);
+
+ // dummy method for handling absent author entry
+ virtual void handleAuthor(const AbstractEntry& /*fmtEntry*/, String8* /*body*/) {}
+ };
+
+ // Wrapper for a reader with a name. Contains a pointer to the reader and a pointer to the name
+ class NamedReader {
+ public:
+ NamedReader() { mName[0] = '\0'; } // for Vector
+ NamedReader(const sp<NBLog::Reader>& reader, const char *name) :
+ mReader(reader)
+ { strlcpy(mName, name, sizeof(mName)); }
+ ~NamedReader() { }
+ const sp<NBLog::Reader>& reader() const { return mReader; }
+ const char* name() const { return mName; }
+
+ private:
+ sp<NBLog::Reader> mReader;
+ static const size_t kMaxName = 32;
+ char mName[kMaxName];
+ };
+
+ // ---------------------------------------------------------------------------
+
+ // This class is used to read data from each thread's individual FIFO in shared memory
+ // and write it to a single FIFO in local memory.
+ class Merger : public RefBase {
+ public:
+ Merger(const void *shared, size_t size);
+
+ virtual ~Merger() {}
+
+ void addReader(const NamedReader &reader);
+ // TODO add removeReader
+ void merge();
+
+ // FIXME This is returning a reference to a shared variable that needs a lock
+ const std::vector<NamedReader>& getNamedReaders() const;
+
+ private:
+ // vector of the readers the merger is supposed to merge from.
+ // every reader reads from a writer's buffer
+ // FIXME Needs to be protected by a lock
+ std::vector<NamedReader> mNamedReaders;
+
+ Shared * const mShared; // raw pointer to shared memory
+ std::unique_ptr<audio_utils_fifo> mFifo; // FIFO itself
+ std::unique_ptr<audio_utils_fifo_writer> mFifoWriter; // used to write to FIFO
+ };
+
+ // This class has a pointer to the FIFO in local memory which stores the merged
+ // data collected by NBLog::Merger from all NamedReaders. It is used to process
+ // this data and write the result to PerformanceAnalysis.
+ class MergeReader : public Reader {
+ public:
+ MergeReader(const void *shared, size_t size, Merger &merger);
+
+ void dump(int fd, int indent = 0);
+ // process a particular snapshot of the reader
+ void getAndProcessSnapshot(Snapshot & snap);
+ // call getSnapshot of the content of the reader's buffer and process the data
+ void getAndProcessSnapshot();
+
+ private:
+ // FIXME Needs to be protected by a lock,
+ // because even though our use of it is read-only there may be asynchronous updates
+ const std::vector<NamedReader>& mNamedReaders;
+
+ // analyzes, compresses and stores the merged data
+ // contains a separate instance for every author (thread), and for every source file
+ // location within each author
+ ReportPerformance::PerformanceAnalysisMap mThreadPerformanceAnalysis;
+
+ // handle author entry by looking up the author's name and appending it to the body
+ // returns number of bytes read from fmtEntry
+ void handleAuthor(const AbstractEntry &fmtEntry, String8 *body);
+ };
+
+ // MergeThread is a thread that contains a Merger. It works as a retriggerable one-shot:
+ // when triggered, it awakes for a lapse of time, during which it periodically merges; if
+ // retriggered, the timeout is reset.
+ // The thread is triggered on AudioFlinger binder activity.
+ class MergeThread : public Thread {
+ public:
+ MergeThread(Merger &merger, MergeReader &mergeReader);
+ virtual ~MergeThread() override;
+
+ // Reset timeout and activate thread to merge periodically if it's idle
+ void wakeup();
+
+ // Set timeout period until the merging thread goes idle again
+ void setTimeoutUs(int time);
+
+ private:
+ virtual bool threadLoop() override;
+
+ // the merger who actually does the work of merging the logs
+ Merger& mMerger;
+
+ // the mergereader used to process data merged by mMerger
+ MergeReader& mMergeReader;
+
+ // mutex for the condition variable
+ Mutex mMutex;
+
+ // condition variable to activate merging on timeout >= 0
+ Condition mCond;
+
+ // time left until the thread blocks again (in microseconds)
+ int mTimeoutUs;
+
+ // merging period when the thread is awake
+ static const int kThreadSleepPeriodUs = 1000000 /*1s*/;
+
+ // initial timeout value when triggered
+ static const int kThreadWakeupPeriodUs = 3000000 /*3s*/;
+ };
+
+}; // class NBLog
+
+// TODO put somewhere else
+static inline int64_t get_monotonic_ns() {
+ timespec ts;
+ if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
+ return (uint64_t) ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec;
+ }
+ return 0; // should not happen.
+}
+
+} // namespace android
+
+#endif // ANDROID_MEDIA_NBLOG_H
diff --git a/media/libnblog/include/media/nblog/PerformanceAnalysis.h b/media/libnblog/include/media/nblog/PerformanceAnalysis.h
new file mode 100644
index 0000000..ddfe9d6
--- /dev/null
+++ b/media/libnblog/include/media/nblog/PerformanceAnalysis.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_PERFORMANCEANALYSIS_H
+#define ANDROID_MEDIA_PERFORMANCEANALYSIS_H
+
+#include <deque>
+#include <map>
+#include <vector>
+
+#include <media/nblog/ReportPerformance.h>
+
+namespace android {
+
+namespace ReportPerformance {
+
+class PerformanceAnalysis;
+
+// a map of PerformanceAnalysis instances
+// The outer key is for the thread, the inner key for the source file location.
+using PerformanceAnalysisMap = std::map<int, std::map<log_hash_t, PerformanceAnalysis>>;
+
+class PerformanceAnalysis {
+ // This class stores and analyzes audio processing wakeup timestamps from NBLog
+ // FIXME: currently, all performance data is stored in deques. Turn these into circular
+ // buffers.
+ // TODO: add a mutex.
+public:
+
+ PerformanceAnalysis() {};
+
+ friend void dump(int fd, int indent,
+ PerformanceAnalysisMap &threadPerformanceAnalysis);
+
+ // Called in the case of an audio on/off event, e.g., EVENT_AUDIO_STATE.
+ // Used to discard idle time intervals
+ void handleStateChange();
+
+ // Writes wakeup timestamp entry to log and runs analysis
+ void logTsEntry(timestamp ts);
+
+ // FIXME: make peakdetector and storeOutlierData a single function
+ // Input: mOutlierData. Looks at time elapsed between outliers
+ // finds significant changes in the distribution
+ // writes timestamps of significant changes to mPeakTimestamps
+ bool detectAndStorePeak(msInterval delta, timestamp ts);
+
+ // stores timestamps of intervals above a threshold: these are assumed outliers.
+ // writes to mOutlierData <time elapsed since previous outlier, outlier timestamp>
+ bool detectAndStoreOutlier(const msInterval diffMs);
+
+ // Generates a string of analysis of the buffer periods and prints to console
+ // FIXME: move this data visualization to a separate class. Model/view/controller
+ void reportPerformance(String8 *body, int author, log_hash_t hash,
+ int maxHeight = 10);
+
+private:
+
+ // TODO use a circular buffer for the deques and vectors below
+
+ // stores outlier analysis:
+ // <elapsed time between outliers in ms, outlier beginning timestamp>
+ std::deque<std::pair<msInterval, timestamp>> mOutlierData;
+
+ // stores each timestamp at which a peak was detected
+ // a peak is a moment at which the average outlier interval changed significantly
+ std::deque<timestamp> mPeakTimestamps;
+
+ // stores buffer period histograms with timestamp of first sample
+ std::deque<std::pair<timestamp, Histogram>> mHists;
+
+ // Parameters used when detecting outliers
+ struct BufferPeriod {
+ double mMean = -1; // average time between audio processing wakeups
+ double mOutlierFactor = -1; // values > mMean * mOutlierFactor are outliers
+ double mOutlier = -1; // this is set to mMean * mOutlierFactor
+ timestamp mPrevTs = -1; // previous timestamp
+ } mBufferPeriod;
+
+ // capacity allocated to data structures
+ struct MaxLength {
+ size_t Hists; // number of histograms stored in memory
+ size_t Outliers; // number of values stored in outlier array
+ size_t Peaks; // number of values stored in peak array
+ int HistTimespanMs; // maximum histogram timespan
+ };
+ // These values allow for 10 hours of data allowing for a glitch and a peak
+ // as often as every 3 seconds
+ static constexpr MaxLength kMaxLength = {.Hists = 60, .Outliers = 12000,
+ .Peaks = 12000, .HistTimespanMs = 10 * kSecPerMin * kMsPerSec };
+
+ // these variables ensure continuity while analyzing the timestamp
+ // series one sample at a time.
+ // TODO: change this to a running variance/mean class
+ struct OutlierDistribution {
+ msInterval mMean = 0; // sample mean since previous peak
+ msInterval mSd = 0; // sample sd since previous peak
+ msInterval mElapsed = 0; // time since previous detected outlier
+ const int kMaxDeviation = 5; // standard deviations from the mean threshold
+ msInterval mTypicalDiff = 0; // global mean of outliers
+ double mN = 0; // length of sequence since the last peak
+ double mM2 = 0; // used to calculate sd
+ } mOutlierDistribution;
+};
+
+void dump(int fd, int indent, PerformanceAnalysisMap &threadPerformanceAnalysis);
+void dumpLine(int fd, int indent, const String8 &body);
+
+} // namespace ReportPerformance
+
+} // namespace android
+
+#endif // ANDROID_MEDIA_PERFORMANCEANALYSIS_H
diff --git a/media/libnblog/include/media/nblog/ReportPerformance.h b/media/libnblog/include/media/nblog/ReportPerformance.h
new file mode 100644
index 0000000..ec0842f
--- /dev/null
+++ b/media/libnblog/include/media/nblog/ReportPerformance.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_REPORTPERFORMANCE_H
+#define ANDROID_MEDIA_REPORTPERFORMANCE_H
+
+#include <deque>
+#include <map>
+#include <vector>
+
+namespace android {
+
+// The String8 class is used by reportPerformance function
+class String8;
+
+namespace ReportPerformance {
+
+constexpr int kMsPerSec = 1000;
+constexpr int kSecPerMin = 60;
+
+constexpr int kJiffyPerMs = 10; // time unit for histogram as a multiple of milliseconds
+
+// stores a histogram: key: observed buffer period (multiple of jiffy). value: count
+using Histogram = std::map<int, int>;
+
+using msInterval = double;
+using jiffyInterval = double;
+
+using timestamp = int64_t;
+
+using log_hash_t = uint64_t;
+
+static inline int deltaMs(int64_t ns1, int64_t ns2) {
+ return (ns2 - ns1) / (1000 * 1000);
+}
+
+static inline int deltaJiffy(int64_t ns1, int64_t ns2) {
+ return (kJiffyPerMs * (ns2 - ns1)) / (1000 * 1000);
+}
+
+static inline uint32_t log2(uint32_t x) {
+ // This works for x > 0
+ return 31 - __builtin_clz(x);
+}
+
+// Writes outlier intervals, timestamps, peaks timestamps, and histograms to a file.
+void writeToFile(const std::deque<std::pair<timestamp, Histogram>> &hists,
+ const std::deque<std::pair<msInterval, timestamp>> &outlierData,
+ const std::deque<timestamp> &peakTimestamps,
+ const char * kDirectory, bool append, int author, log_hash_t hash);
+
+} // namespace ReportPerformance
+
+} // namespace android
+
+#endif // ANDROID_MEDIA_REPORTPERFORMANCE_H
diff --git a/media/libstagefright/AACExtractor.cpp b/media/libstagefright/AACExtractor.cpp
deleted file mode 100644
index 7449aa7..0000000
--- a/media/libstagefright/AACExtractor.cpp
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "AACExtractor"
-#include <utils/Log.h>
-
-#include "include/AACExtractor.h"
-#include "include/avc_utils.h"
-
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <utils/String8.h>
-
-namespace android {
-
-class AACSource : public MediaSource {
-public:
- AACSource(const sp<DataSource> &source,
- const sp<MetaData> &meta,
- const Vector<uint64_t> &offset_vector,
- int64_t frame_duration_us);
-
- virtual status_t start(MetaData *params = NULL);
- virtual status_t stop();
-
- virtual sp<MetaData> getFormat();
-
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-protected:
- virtual ~AACSource();
-
-private:
- static const size_t kMaxFrameSize;
- sp<DataSource> mDataSource;
- sp<MetaData> mMeta;
-
- off64_t mOffset;
- int64_t mCurrentTimeUs;
- bool mStarted;
- MediaBufferGroup *mGroup;
-
- Vector<uint64_t> mOffsetVector;
- int64_t mFrameDurationUs;
-
- AACSource(const AACSource &);
- AACSource &operator=(const AACSource &);
-};
-
-////////////////////////////////////////////////////////////////////////////////
-
-// Returns the sample rate based on the sampling frequency index
-uint32_t get_sample_rate(const uint8_t sf_index)
-{
- static const uint32_t sample_rates[] =
- {
- 96000, 88200, 64000, 48000, 44100, 32000,
- 24000, 22050, 16000, 12000, 11025, 8000
- };
-
- if (sf_index < sizeof(sample_rates) / sizeof(sample_rates[0])) {
- return sample_rates[sf_index];
- }
-
- return 0;
-}
-
-// Returns the frame length in bytes as described in an ADTS header starting at the given offset,
-// or 0 if the size can't be read due to an error in the header or a read failure.
-// The returned value is the AAC frame size with the ADTS header length (regardless of
-// the presence of the CRC).
-// If headerSize is non-NULL, it will be used to return the size of the header of this ADTS frame.
-static size_t getAdtsFrameLength(const sp<DataSource> &source, off64_t offset, size_t* headerSize) {
-
- const size_t kAdtsHeaderLengthNoCrc = 7;
- const size_t kAdtsHeaderLengthWithCrc = 9;
-
- size_t frameSize = 0;
-
- uint8_t syncword[2];
- if (source->readAt(offset, &syncword, 2) != 2) {
- return 0;
- }
- if ((syncword[0] != 0xff) || ((syncword[1] & 0xf6) != 0xf0)) {
- return 0;
- }
-
- uint8_t protectionAbsent;
- if (source->readAt(offset + 1, &protectionAbsent, 1) < 1) {
- return 0;
- }
- protectionAbsent &= 0x1;
-
- uint8_t header[3];
- if (source->readAt(offset + 3, &header, 3) < 3) {
- return 0;
- }
-
- frameSize = (header[0] & 0x3) << 11 | header[1] << 3 | header[2] >> 5;
-
- // protectionAbsent is 0 if there is CRC
- size_t headSize = protectionAbsent ? kAdtsHeaderLengthNoCrc : kAdtsHeaderLengthWithCrc;
- if (headSize > frameSize) {
- return 0;
- }
- if (headerSize != NULL) {
- *headerSize = headSize;
- }
-
- return frameSize;
-}
-
-AACExtractor::AACExtractor(
- const sp<DataSource> &source, const sp<AMessage> &_meta)
- : mDataSource(source),
- mInitCheck(NO_INIT),
- mFrameDurationUs(0) {
- sp<AMessage> meta = _meta;
-
- if (meta == NULL) {
- String8 mimeType;
- float confidence;
- sp<AMessage> _meta;
-
- if (!SniffAAC(mDataSource, &mimeType, &confidence, &meta)) {
- return;
- }
- }
-
- int64_t offset;
- CHECK(meta->findInt64("offset", &offset));
-
- uint8_t profile, sf_index, channel, header[2];
- if (mDataSource->readAt(offset + 2, &header, 2) < 2) {
- return;
- }
-
- profile = (header[0] >> 6) & 0x3;
- sf_index = (header[0] >> 2) & 0xf;
- uint32_t sr = get_sample_rate(sf_index);
- if (sr == 0) {
- return;
- }
- channel = (header[0] & 0x1) << 2 | (header[1] >> 6);
-
- mMeta = MakeAACCodecSpecificData(profile, sf_index, channel);
-
- off64_t streamSize, numFrames = 0;
- size_t frameSize = 0;
- int64_t duration = 0;
-
- if (mDataSource->getSize(&streamSize) == OK) {
- while (offset < streamSize) {
- if ((frameSize = getAdtsFrameLength(source, offset, NULL)) == 0) {
- ALOGW("prematured AAC stream (%lld vs %lld)",
- (long long)offset, (long long)streamSize);
- break;
- }
-
- mOffsetVector.push(offset);
-
- offset += frameSize;
- numFrames ++;
- }
-
- // Round up and get the duration
- mFrameDurationUs = (1024 * 1000000ll + (sr - 1)) / sr;
- duration = numFrames * mFrameDurationUs;
- mMeta->setInt64(kKeyDuration, duration);
- }
-
- mInitCheck = OK;
-}
-
-AACExtractor::~AACExtractor() {
-}
-
-sp<MetaData> AACExtractor::getMetaData() {
- sp<MetaData> meta = new MetaData;
-
- if (mInitCheck != OK) {
- return meta;
- }
-
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC_ADTS);
-
- return meta;
-}
-
-size_t AACExtractor::countTracks() {
- return mInitCheck == OK ? 1 : 0;
-}
-
-sp<IMediaSource> AACExtractor::getTrack(size_t index) {
- if (mInitCheck != OK || index != 0) {
- return NULL;
- }
-
- return new AACSource(mDataSource, mMeta, mOffsetVector, mFrameDurationUs);
-}
-
-sp<MetaData> AACExtractor::getTrackMetaData(size_t index, uint32_t /* flags */) {
- if (mInitCheck != OK || index != 0) {
- return NULL;
- }
-
- return mMeta;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-// 8192 = 2^13, 13bit AAC frame size (in bytes)
-const size_t AACSource::kMaxFrameSize = 8192;
-
-AACSource::AACSource(
- const sp<DataSource> &source, const sp<MetaData> &meta,
- const Vector<uint64_t> &offset_vector,
- int64_t frame_duration_us)
- : mDataSource(source),
- mMeta(meta),
- mOffset(0),
- mCurrentTimeUs(0),
- mStarted(false),
- mGroup(NULL),
- mOffsetVector(offset_vector),
- mFrameDurationUs(frame_duration_us) {
-}
-
-AACSource::~AACSource() {
- if (mStarted) {
- stop();
- }
-}
-
-status_t AACSource::start(MetaData * /* params */) {
- CHECK(!mStarted);
-
- if (mOffsetVector.empty()) {
- mOffset = 0;
- } else {
- mOffset = mOffsetVector.itemAt(0);
- }
-
- mCurrentTimeUs = 0;
- mGroup = new MediaBufferGroup;
- mGroup->add_buffer(new MediaBuffer(kMaxFrameSize));
- mStarted = true;
-
- return OK;
-}
-
-status_t AACSource::stop() {
- CHECK(mStarted);
-
- delete mGroup;
- mGroup = NULL;
-
- mStarted = false;
- return OK;
-}
-
-sp<MetaData> AACSource::getFormat() {
- return mMeta;
-}
-
-status_t AACSource::read(
- MediaBuffer **out, const ReadOptions *options) {
- *out = NULL;
-
- int64_t seekTimeUs;
- ReadOptions::SeekMode mode;
- if (options && options->getSeekTo(&seekTimeUs, &mode)) {
- if (mFrameDurationUs > 0) {
- int64_t seekFrame = seekTimeUs / mFrameDurationUs;
- mCurrentTimeUs = seekFrame * mFrameDurationUs;
-
- mOffset = mOffsetVector.itemAt(seekFrame);
- }
- }
-
- size_t frameSize, frameSizeWithoutHeader, headerSize;
- if ((frameSize = getAdtsFrameLength(mDataSource, mOffset, &headerSize)) == 0) {
- return ERROR_END_OF_STREAM;
- }
-
- MediaBuffer *buffer;
- status_t err = mGroup->acquire_buffer(&buffer);
- if (err != OK) {
- return err;
- }
-
- frameSizeWithoutHeader = frameSize - headerSize;
- if (mDataSource->readAt(mOffset + headerSize, buffer->data(),
- frameSizeWithoutHeader) != (ssize_t)frameSizeWithoutHeader) {
- buffer->release();
- buffer = NULL;
-
- return ERROR_IO;
- }
-
- buffer->set_range(0, frameSizeWithoutHeader);
- buffer->meta_data()->setInt64(kKeyTime, mCurrentTimeUs);
- buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
-
- mOffset += frameSize;
- mCurrentTimeUs += mFrameDurationUs;
-
- *out = buffer;
- return OK;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-bool SniffAAC(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *meta) {
- off64_t pos = 0;
-
- for (;;) {
- uint8_t id3header[10];
- if (source->readAt(pos, id3header, sizeof(id3header))
- < (ssize_t)sizeof(id3header)) {
- return false;
- }
-
- if (memcmp("ID3", id3header, 3)) {
- break;
- }
-
- // Skip the ID3v2 header.
-
- size_t len =
- ((id3header[6] & 0x7f) << 21)
- | ((id3header[7] & 0x7f) << 14)
- | ((id3header[8] & 0x7f) << 7)
- | (id3header[9] & 0x7f);
-
- len += 10;
-
- pos += len;
-
- ALOGV("skipped ID3 tag, new starting offset is %lld (0x%016llx)",
- (long long)pos, (long long)pos);
- }
-
- uint8_t header[2];
-
- if (source->readAt(pos, &header, 2) != 2) {
- return false;
- }
-
- // ADTS syncword
- if ((header[0] == 0xff) && ((header[1] & 0xf6) == 0xf0)) {
- *mimeType = MEDIA_MIMETYPE_AUDIO_AAC_ADTS;
- *confidence = 0.2;
-
- *meta = new AMessage;
- (*meta)->setInt64("offset", pos);
-
- return true;
- }
-
- return false;
-}
-
-} // namespace android
diff --git a/media/libstagefright/AACWriter.cpp b/media/libstagefright/AACWriter.cpp
index 8b1e1c3..2ea5fcd 100644
--- a/media/libstagefright/AACWriter.cpp
+++ b/media/libstagefright/AACWriter.cpp
@@ -30,8 +30,8 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
+#include <media/MediaSource.h>
#include <media/mediarecorder.h>
namespace android {
@@ -67,7 +67,7 @@
}
-status_t AACWriter::addSource(const sp<IMediaSource> &source) {
+status_t AACWriter::addSource(const sp<MediaSource> &source) {
if (mInitCheck != OK) {
return mInitCheck;
}
@@ -294,7 +294,7 @@
prctl(PR_SET_NAME, (unsigned long)"AACWriterThread", 0, 0, 0);
while (!mDone && err == OK) {
- MediaBuffer *buffer;
+ MediaBufferBase *buffer;
err = mSource->read(&buffer);
if (err != OK) {
@@ -316,7 +316,7 @@
}
int32_t isCodecSpecific = 0;
- if (buffer->meta_data()->findInt32(kKeyIsCodecConfig, &isCodecSpecific) && isCodecSpecific) {
+ if (buffer->meta_data().findInt32(kKeyIsCodecConfig, &isCodecSpecific) && isCodecSpecific) {
ALOGV("Drop codec specific info buffer");
buffer->release();
buffer = NULL;
@@ -324,7 +324,7 @@
}
int64_t timestampUs;
- CHECK(buffer->meta_data()->findInt64(kKeyTime, ×tampUs));
+ CHECK(buffer->meta_data().findInt64(kKeyTime, ×tampUs));
if (timestampUs > mEstimatedDurationUs) {
mEstimatedDurationUs = timestampUs;
}
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index c44e868..7f39d10 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -28,8 +28,7 @@
#include <media/stagefright/ACodec.h>
-#include <binder/MemoryDealer.h>
-
+#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -38,12 +37,12 @@
#include <media/stagefright/BufferProducerWrapper.h>
#include <media/stagefright/MediaCodec.h>
-#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/OMXClient.h>
#include <media/stagefright/PersistentSurface.h>
#include <media/stagefright/SurfaceUtils.h>
#include <media/hardware/HardwareAPI.h>
+#include <media/MediaBufferHolder.h>
#include <media/OMXBuffer.h>
#include <media/omx/1.0/WOmxNode.h>
@@ -55,7 +54,6 @@
#include <media/openmax/OMX_IndexExt.h>
#include <media/openmax/OMX_AsString.h>
-#include "include/avc_utils.h"
#include "include/ACodecBufferChannel.h"
#include "include/DataConverter.h"
#include "include/SecureBuffer.h"
@@ -127,6 +125,32 @@
}
}
+static OMX_VIDEO_CONTROLRATETYPE getVideoBitrateMode(const sp<AMessage> &msg) {
+ int32_t tmp;
+ if (msg->findInt32("bitrate-mode", &tmp)) {
+ // explicitly translate from MediaCodecInfo.EncoderCapabilities.
+ // BITRATE_MODE_* into OMX bitrate mode.
+ switch (tmp) {
+ //BITRATE_MODE_CQ
+ case 0: return OMX_Video_ControlRateConstantQuality;
+ //BITRATE_MODE_VBR
+ case 1: return OMX_Video_ControlRateVariable;
+ //BITRATE_MODE_CBR
+ case 2: return OMX_Video_ControlRateConstant;
+ default: break;
+ }
+ }
+ return OMX_Video_ControlRateVariable;
+}
+
+static bool findVideoBitrateControlInfo(const sp<AMessage> &msg,
+ OMX_VIDEO_CONTROLRATETYPE *mode, int32_t *bitrate, int32_t *quality) {
+ *mode = getVideoBitrateMode(msg);
+ bool isCQ = (*mode == OMX_Video_ControlRateConstantQuality);
+ return (!isCQ && msg->findInt32("bitrate", bitrate))
+ || (isCQ && msg->findInt32("quality", quality));
+}
+
struct MessageList : public RefBase {
MessageList() {
}
@@ -528,6 +552,7 @@
mNativeWindowUsageBits(0),
mLastNativeWindowDataSpace(HAL_DATASPACE_UNKNOWN),
mIsVideo(false),
+ mIsImage(false),
mIsEncoder(false),
mFatalError(false),
mShutdownInProgress(false),
@@ -542,7 +567,7 @@
mMetadataBuffersToSubmit(0),
mNumUndequeuedBuffers(0),
mRepeatFrameDelayUs(-1ll),
- mMaxPtsGapUs(-1ll),
+ mMaxPtsGapUs(0ll),
mMaxFps(-1),
mFps(-1.0),
mCaptureFps(-1.0),
@@ -553,6 +578,8 @@
mDescribeHDRStaticInfoIndex((OMX_INDEXTYPE)0),
mStateGeneration(0),
mVendorExtensionsStatus(kExtensionsUnchecked) {
+ memset(&mLastHDRStaticInfo, 0, sizeof(mLastHDRStaticInfo));
+
mUninitializedState = new UninitializedState(this);
mLoadedState = new LoadedState(this);
mLoadedToIdleState = new LoadedToIdleState(this);
@@ -575,8 +602,6 @@
memset(&mLastNativeWindowCrop, 0, sizeof(mLastNativeWindowCrop));
changeState(mUninitializedState);
-
- mTrebleFlag = false;
}
ACodec::~ACodec() {
@@ -828,11 +853,7 @@
status_t ACodec::allocateBuffersOnPort(OMX_U32 portIndex) {
CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
- if (getTrebleFlag()) {
- CHECK(mAllocator[portIndex] == NULL);
- } else {
- CHECK(mDealer[portIndex] == NULL);
- }
+ CHECK(mAllocator[portIndex] == NULL);
CHECK(mBuffers[portIndex].isEmpty());
status_t err;
@@ -874,7 +895,10 @@
}
}
- size_t alignment = MemoryDealer::getAllocationAlignment();
+ size_t alignment = 32; // This is the value currently returned by
+ // MemoryDealer::getAllocationAlignment().
+ // TODO: Fix this when Treble has
+ // MemoryHeap/MemoryDealer.
ALOGV("[%s] Allocating %u buffers of size %zu (from %u using %s) on %s port",
mComponentName.c_str(),
@@ -896,18 +920,15 @@
}
if (mode != IOMX::kPortModePresetSecureBuffer) {
- if (getTrebleFlag()) {
- mAllocator[portIndex] = TAllocator::getService("ashmem");
- if (mAllocator[portIndex] == nullptr) {
- ALOGE("hidl allocator on port %d is null",
- (int)portIndex);
- return NO_MEMORY;
- }
- } else {
- size_t totalSize = def.nBufferCountActual *
- (alignedSize + alignedConvSize);
- mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec");
+ mAllocator[portIndex] = TAllocator::getService("ashmem");
+ if (mAllocator[portIndex] == nullptr) {
+ ALOGE("hidl allocator on port %d is null",
+ (int)portIndex);
+ return NO_MEMORY;
}
+ // TODO: When Treble has MemoryHeap/MemoryDealer, we should
+ // specify the heap size to be
+ // def.nBufferCountActual * (alignedSize + alignedConvSize).
}
const sp<AMessage> &format =
@@ -936,23 +957,55 @@
: new SecureBuffer(format, native_handle, bufSize);
info.mCodecData = info.mData;
} else {
- if (getTrebleFlag()) {
+ bool success;
+ auto transStatus = mAllocator[portIndex]->allocate(
+ bufSize,
+ [&success, &hidlMemToken](
+ bool s,
+ hidl_memory const& m) {
+ success = s;
+ hidlMemToken = m;
+ });
+
+ if (!transStatus.isOk()) {
+ ALOGE("hidl's AshmemAllocator failed at the "
+ "transport: %s",
+ transStatus.description().c_str());
+ return NO_MEMORY;
+ }
+ if (!success) {
+ return NO_MEMORY;
+ }
+ hidlMem = mapMemory(hidlMemToken);
+ if (hidlMem == nullptr) {
+ return NO_MEMORY;
+ }
+ err = mOMXNode->useBuffer(
+ portIndex, hidlMemToken, &info.mBufferID);
+
+ if (mode == IOMX::kPortModeDynamicANWBuffer) {
+ VideoNativeMetadata* metaData = (VideoNativeMetadata*)(
+ (void*)hidlMem->getPointer());
+ metaData->nFenceFd = -1;
+ }
+
+ info.mCodecData = new SharedMemoryBuffer(
+ format, hidlMem);
+ info.mCodecRef = hidlMem;
+
+ // if we require conversion, allocate conversion buffer for client use;
+ // otherwise, reuse codec buffer
+ if (mConverter[portIndex] != NULL) {
+ CHECK_GT(conversionBufferSize, (size_t)0);
bool success;
- auto transStatus = mAllocator[portIndex]->allocate(
- bufSize,
+ mAllocator[portIndex]->allocate(
+ conversionBufferSize,
[&success, &hidlMemToken](
bool s,
hidl_memory const& m) {
success = s;
hidlMemToken = m;
});
-
- if (!transStatus.isOk()) {
- ALOGE("hidl's AshmemAllocator failed at the "
- "transport: %s",
- transStatus.description().c_str());
- return NO_MEMORY;
- }
if (!success) {
return NO_MEMORY;
}
@@ -960,67 +1013,8 @@
if (hidlMem == nullptr) {
return NO_MEMORY;
}
- err = mOMXNode->useBuffer(
- portIndex, hidlMemToken, &info.mBufferID);
- } else {
- mem = mDealer[portIndex]->allocate(bufSize);
- if (mem == NULL || mem->pointer() == NULL) {
- return NO_MEMORY;
- }
-
- err = mOMXNode->useBuffer(
- portIndex, mem, &info.mBufferID);
- }
-
- if (mode == IOMX::kPortModeDynamicANWBuffer) {
- VideoNativeMetadata* metaData = (VideoNativeMetadata*)(
- getTrebleFlag() ?
- (void*)hidlMem->getPointer() : mem->pointer());
- metaData->nFenceFd = -1;
- }
-
- if (getTrebleFlag()) {
- info.mCodecData = new SharedMemoryBuffer(
- format, hidlMem);
- info.mCodecRef = hidlMem;
- } else {
- info.mCodecData = new SharedMemoryBuffer(
- format, mem);
- info.mCodecRef = mem;
- }
-
- // if we require conversion, allocate conversion buffer for client use;
- // otherwise, reuse codec buffer
- if (mConverter[portIndex] != NULL) {
- CHECK_GT(conversionBufferSize, (size_t)0);
- if (getTrebleFlag()) {
- bool success;
- mAllocator[portIndex]->allocate(
- conversionBufferSize,
- [&success, &hidlMemToken](
- bool s,
- hidl_memory const& m) {
- success = s;
- hidlMemToken = m;
- });
- if (!success) {
- return NO_MEMORY;
- }
- hidlMem = mapMemory(hidlMemToken);
- if (hidlMem == nullptr) {
- return NO_MEMORY;
- }
- info.mData = new SharedMemoryBuffer(format, hidlMem);
- info.mMemRef = hidlMem;
- } else {
- mem = mDealer[portIndex]->allocate(
- conversionBufferSize);
- if (mem == NULL|| mem->pointer() == NULL) {
- return NO_MEMORY;
- }
- info.mData = new SharedMemoryBuffer(format, mem);
- info.mMemRef = mem;
- }
+ info.mData = new SharedMemoryBuffer(format, hidlMem);
+ info.mMemRef = hidlMem;
} else {
info.mData = info.mCodecData;
info.mMemRef = info.mCodecRef;
@@ -1581,11 +1575,7 @@
}
}
- if (getTrebleFlag()) {
- mAllocator[portIndex].clear();
- } else {
- mDealer[portIndex].clear();
- }
+ mAllocator[portIndex].clear();
return err;
}
@@ -1722,6 +1712,8 @@
mConfigFormat = msg;
mIsEncoder = encoder;
+ mIsVideo = !strncasecmp(mime, "video/", 6);
+ mIsImage = !strncasecmp(mime, "image/", 6);
mPortMode[kPortIndexInput] = IOMX::kPortModePresetByteBuffer;
mPortMode[kPortIndexOutput] = IOMX::kPortModePresetByteBuffer;
@@ -1732,19 +1724,27 @@
return err;
}
- int32_t bitRate = 0;
- // FLAC encoder doesn't need a bitrate, other encoders do
- if (encoder && strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC)
- && !msg->findInt32("bitrate", &bitRate)) {
- return INVALID_OPERATION;
+ OMX_VIDEO_CONTROLRATETYPE bitrateMode;
+ int32_t bitrate = 0, quality;
+ // FLAC encoder or video encoder in constant quality mode doesn't need a
+ // bitrate, other encoders do.
+ if (encoder) {
+ if (mIsVideo || mIsImage) {
+ if (!findVideoBitrateControlInfo(msg, &bitrateMode, &bitrate, &quality)) {
+ return INVALID_OPERATION;
+ }
+ } else if (strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC)
+ && !msg->findInt32("bitrate", &bitrate)) {
+ return INVALID_OPERATION;
+ }
}
// propagate bitrate to the output so that the muxer has it
- if (encoder && msg->findInt32("bitrate", &bitRate)) {
+ if (encoder && msg->findInt32("bitrate", &bitrate)) {
// Technically ISO spec says that 'bitrate' should be 0 for VBR even though it is the
// average bitrate. We've been setting both bitrate and max-bitrate to this same value.
- outputFormat->setInt32("bitrate", bitRate);
- outputFormat->setInt32("max-bitrate", bitRate);
+ outputFormat->setInt32("bitrate", bitrate);
+ outputFormat->setInt32("max-bitrate", bitrate);
}
int32_t storeMeta;
@@ -1801,9 +1801,7 @@
// Only enable metadata mode on encoder output if encoder can prepend
// sps/pps to idr frames, since in metadata mode the bitstream is in an
// opaque handle, to which we don't have access.
- int32_t video = !strncasecmp(mime, "video/", 6);
- mIsVideo = video;
- if (encoder && video) {
+ if (encoder && mIsVideo) {
OMX_BOOL enable = (OMX_BOOL) (prependSPSPPS
&& msg->findInt32("android._store-metadata-in-buffers-output", &storeMeta)
&& storeMeta != 0);
@@ -1825,16 +1823,21 @@
// only allow 32-bit value, since we pass it as U32 to OMX.
if (!msg->findInt64("max-pts-gap-to-encoder", &mMaxPtsGapUs)) {
- mMaxPtsGapUs = -1ll;
- } else if (mMaxPtsGapUs > INT32_MAX || mMaxPtsGapUs < 0) {
+ mMaxPtsGapUs = 0ll;
+ } else if (mMaxPtsGapUs > INT32_MAX || mMaxPtsGapUs < INT32_MIN) {
ALOGW("Unsupported value for max pts gap %lld", (long long) mMaxPtsGapUs);
- mMaxPtsGapUs = -1ll;
+ mMaxPtsGapUs = 0ll;
}
if (!msg->findFloat("max-fps-to-encoder", &mMaxFps)) {
mMaxFps = -1;
}
+ // notify GraphicBufferSource to allow backward frames
+ if (mMaxPtsGapUs < 0ll) {
+ mMaxFps = -1;
+ }
+
if (!msg->findDouble("time-lapse-fps", &mCaptureFps)) {
mCaptureFps = -1.0;
}
@@ -1849,9 +1852,9 @@
// NOTE: we only use native window for video decoders
sp<RefBase> obj;
bool haveNativeWindow = msg->findObject("native-window", &obj)
- && obj != NULL && video && !encoder;
+ && obj != NULL && mIsVideo && !encoder;
mUsingNativeWindow = haveNativeWindow;
- if (video && !encoder) {
+ if (mIsVideo && !encoder) {
inputFormat->setInt32("adaptive-playback", false);
int32_t usageProtected;
@@ -2014,7 +2017,7 @@
(void)msg->findInt32("pcm-encoding", (int32_t*)&pcmEncoding);
// invalid encodings will default to PCM-16bit in setupRawAudioFormat.
- if (video) {
+ if (mIsVideo || mIsImage) {
// determine need for software renderer
bool usingSwRenderer = false;
if (haveNativeWindow && mComponentName.startsWith("OMX.google.")) {
@@ -2146,16 +2149,20 @@
// value is unknown
drc.targetRefLevel = -1;
}
+ if (!msg->findInt32("aac-drc-effect-type", &drc.effectType)) {
+ // value is unknown
+ drc.effectType = -2; // valid values are -1 and over
+ }
err = setupAACCodec(
- encoder, numChannels, sampleRate, bitRate, aacProfile,
+ encoder, numChannels, sampleRate, bitrate, aacProfile,
isADTS != 0, sbrMode, maxOutputChannelCount, drc,
pcmLimiterEnable);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)) {
- err = setupAMRCodec(encoder, false /* isWAMR */, bitRate);
+ err = setupAMRCodec(encoder, false /* isWAMR */, bitrate);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_WB)) {
- err = setupAMRCodec(encoder, true /* isWAMR */, bitRate);
+ err = setupAMRCodec(encoder, true /* isWAMR */, bitrate);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_G711_ALAW)
|| !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_G711_MLAW)) {
// These are PCM-like formats with a fixed sample rate but
@@ -2269,7 +2276,7 @@
rateFloat = (float)rateInt; // 16MHz (FLINTMAX) is OK for upper bound.
}
if (rateFloat > 0) {
- err = setOperatingRate(rateFloat, video);
+ err = setOperatingRate(rateFloat, mIsVideo);
err = OK; // ignore errors
}
@@ -2294,7 +2301,7 @@
}
// create data converters if needed
- if (!video && err == OK) {
+ if (!mIsVideo && !mIsImage && err == OK) {
AudioEncoding codecPcmEncoding = kAudioEncodingPcm16bit;
if (encoder) {
(void)mInputFormat->findInt32("pcm-encoding", (int32_t*)&codecPcmEncoding);
@@ -2780,7 +2787,7 @@
? OMX_AUDIO_AACStreamFormatMP4ADTS
: OMX_AUDIO_AACStreamFormatMP4FF;
- OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE presentation;
+ OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE presentation;
InitOMXParams(&presentation);
presentation.nMaxOutputChannels = maxOutputChannelCount;
presentation.nDrcCut = drc.drcCut;
@@ -2789,14 +2796,29 @@
presentation.nTargetReferenceLevel = drc.targetRefLevel;
presentation.nEncodedTargetLevel = drc.encodedTargetLevel;
presentation.nPCMLimiterEnable = pcmLimiterEnable;
+ presentation.nDrcEffectType = drc.effectType;
status_t res = mOMXNode->setParameter(
OMX_IndexParamAudioAac, &profile, sizeof(profile));
if (res == OK) {
// optional parameters, will not cause configuration failure
- mOMXNode->setParameter(
+ if (mOMXNode->setParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacDrcPresentation,
+ &presentation, sizeof(presentation)) == ERROR_UNSUPPORTED) {
+ // prior to 9.0 we used a different config structure and index
+ OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE presentation8;
+ InitOMXParams(&presentation8);
+ presentation8.nMaxOutputChannels = presentation.nMaxOutputChannels;
+ presentation8.nDrcCut = presentation.nDrcCut;
+ presentation8.nDrcBoost = presentation.nDrcBoost;
+ presentation8.nHeavyCompression = presentation.nHeavyCompression;
+ presentation8.nTargetReferenceLevel = presentation.nTargetReferenceLevel;
+ presentation8.nEncodedTargetLevel = presentation.nEncodedTargetLevel;
+ presentation8.nPCMLimiterEnable = presentation.nPCMLimiterEnable;
+ (void)mOMXNode->setParameter(
(OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacPresentation,
- &presentation, sizeof(presentation));
+ &presentation8, sizeof(presentation8));
+ }
} else {
ALOGW("did not set AudioAndroidAacPresentation due to error %d when setting AudioAac", res);
}
@@ -3219,6 +3241,7 @@
{ MEDIA_MIMETYPE_VIDEO_VP8, OMX_VIDEO_CodingVP8 },
{ MEDIA_MIMETYPE_VIDEO_VP9, OMX_VIDEO_CodingVP9 },
{ MEDIA_MIMETYPE_VIDEO_DOLBY_VISION, OMX_VIDEO_CodingDolbyVision },
+ { MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC, OMX_VIDEO_CodingImageHEIC },
};
static status_t GetVideoCodingTypeFromMime(
@@ -3292,6 +3315,22 @@
return err;
}
+ if (compressionFormat == OMX_VIDEO_CodingHEVC) {
+ int32_t profile;
+ if (msg->findInt32("profile", &profile)) {
+ // verify if Main10 profile is supported at all, and fail
+ // immediately if it's not supported.
+ if (profile == OMX_VIDEO_HEVCProfileMain10 ||
+ profile == OMX_VIDEO_HEVCProfileMain10HDR10) {
+ err = verifySupportForProfileAndLevel(
+ kPortIndexInput, profile, 0);
+ if (err != OK) {
+ return err;
+ }
+ }
+ }
+ }
+
if (compressionFormat == OMX_VIDEO_CodingVP9) {
OMX_VIDEO_PARAM_PROFILELEVELTYPE params;
InitOMXParams(¶ms);
@@ -3745,10 +3784,12 @@
return err;
}
- int32_t width, height, bitrate;
+ OMX_VIDEO_CONTROLRATETYPE bitrateMode;
+ int32_t width, height, bitrate = 0, quality;
if (!msg->findInt32("width", &width)
|| !msg->findInt32("height", &height)
- || !msg->findInt32("bitrate", &bitrate)) {
+ || !findVideoBitrateControlInfo(
+ msg, &bitrateMode, &bitrate, &quality)) {
return INVALID_OPERATION;
}
@@ -3781,6 +3822,8 @@
} else {
mFps = (double)framerate;
}
+ // propagate framerate to the output so that the muxer has it
+ outputFormat->setInt32("frame-rate", (int32_t)mFps);
video_def->xFramerate = (OMX_U32)(mFps * 65536);
video_def->eCompressionFormat = OMX_VIDEO_CodingUnused;
@@ -3872,7 +3915,8 @@
break;
case OMX_VIDEO_CodingHEVC:
- err = setupHEVCEncoderParameters(msg);
+ case OMX_VIDEO_CodingImageHEIC:
+ err = setupHEVCEncoderParameters(msg, outputFormat);
break;
case OMX_VIDEO_CodingVP8:
@@ -3999,15 +4043,6 @@
return ret > 0 ? ret - 1 : 0;
}
-static OMX_VIDEO_CONTROLRATETYPE getBitrateMode(const sp<AMessage> &msg) {
- int32_t tmp;
- if (!msg->findInt32("bitrate-mode", &tmp)) {
- return OMX_Video_ControlRateVariable;
- }
-
- return static_cast<OMX_VIDEO_CONTROLRATETYPE>(tmp);
-}
-
status_t ACodec::setupMPEG4EncoderParameters(const sp<AMessage> &msg) {
int32_t bitrate;
float iFrameInterval;
@@ -4016,7 +4051,7 @@
return INVALID_OPERATION;
}
- OMX_VIDEO_CONTROLRATETYPE bitrateMode = getBitrateMode(msg);
+ OMX_VIDEO_CONTROLRATETYPE bitrateMode = getVideoBitrateMode(msg);
float frameRate;
if (!msg->findFloat("frame-rate", &frameRate)) {
@@ -4064,7 +4099,7 @@
return INVALID_OPERATION;
}
- err = verifySupportForProfileAndLevel(profile, level);
+ err = verifySupportForProfileAndLevel(kPortIndexOutput, profile, level);
if (err != OK) {
return err;
@@ -4081,7 +4116,7 @@
return err;
}
- err = configureBitrate(bitrate, bitrateMode);
+ err = configureBitrate(bitrateMode, bitrate);
if (err != OK) {
return err;
@@ -4098,7 +4133,7 @@
return INVALID_OPERATION;
}
- OMX_VIDEO_CONTROLRATETYPE bitrateMode = getBitrateMode(msg);
+ OMX_VIDEO_CONTROLRATETYPE bitrateMode = getVideoBitrateMode(msg);
float frameRate;
if (!msg->findFloat("frame-rate", &frameRate)) {
@@ -4136,7 +4171,7 @@
return INVALID_OPERATION;
}
- err = verifySupportForProfileAndLevel(profile, level);
+ err = verifySupportForProfileAndLevel(kPortIndexOutput, profile, level);
if (err != OK) {
return err;
@@ -4158,7 +4193,7 @@
return err;
}
- err = configureBitrate(bitrate, bitrateMode);
+ err = configureBitrate(bitrateMode, bitrate);
if (err != OK) {
return err;
@@ -4228,7 +4263,7 @@
return INVALID_OPERATION;
}
- OMX_VIDEO_CONTROLRATETYPE bitrateMode = getBitrateMode(msg);
+ OMX_VIDEO_CONTROLRATETYPE bitrateMode = getVideoBitrateMode(msg);
float frameRate;
if (!msg->findFloat("frame-rate", &frameRate)) {
@@ -4271,7 +4306,7 @@
return INVALID_OPERATION;
}
- err = verifySupportForProfileAndLevel(profile, level);
+ err = verifySupportForProfileAndLevel(kPortIndexOutput, profile, level);
if (err != OK) {
return err;
@@ -4285,7 +4320,7 @@
// Use largest supported profile for AVC recording if profile is not specified.
for (OMX_VIDEO_AVCPROFILETYPE profile : {
OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCProfileMain }) {
- if (verifySupportForProfileAndLevel(profile, 0) == OK) {
+ if (verifySupportForProfileAndLevel(kPortIndexOutput, profile, 0) == OK) {
h264type.eProfile = profile;
break;
}
@@ -4384,26 +4419,64 @@
}
}
- return configureBitrate(bitrate, bitrateMode);
+ return configureBitrate(bitrateMode, bitrate);
}
-status_t ACodec::setupHEVCEncoderParameters(const sp<AMessage> &msg) {
- int32_t bitrate;
- float iFrameInterval;
- if (!msg->findInt32("bitrate", &bitrate)
- || !msg->findAsFloat("i-frame-interval", &iFrameInterval)) {
- return INVALID_OPERATION;
+status_t ACodec::configureImageGrid(
+ const sp<AMessage> &msg, sp<AMessage> &outputFormat) {
+ int32_t tileWidth, tileHeight, gridRows, gridCols;
+ if (!msg->findInt32("tile-width", &tileWidth) ||
+ !msg->findInt32("tile-height", &tileHeight) ||
+ !msg->findInt32("grid-rows", &gridRows) ||
+ !msg->findInt32("grid-cols", &gridCols)) {
+ return OK;
}
- OMX_VIDEO_CONTROLRATETYPE bitrateMode = getBitrateMode(msg);
+ OMX_VIDEO_PARAM_ANDROID_IMAGEGRIDTYPE gridType;
+ InitOMXParams(&gridType);
+ gridType.nPortIndex = kPortIndexOutput;
+ gridType.bEnabled = OMX_TRUE;
+ gridType.nTileWidth = tileWidth;
+ gridType.nTileHeight = tileHeight;
+ gridType.nGridRows = gridRows;
+ gridType.nGridCols = gridCols;
- float frameRate;
- if (!msg->findFloat("frame-rate", &frameRate)) {
- int32_t tmp;
- if (!msg->findInt32("frame-rate", &tmp)) {
- return INVALID_OPERATION;
- }
- frameRate = (float)tmp;
+ status_t err = mOMXNode->setParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidImageGrid,
+ &gridType, sizeof(gridType));
+
+ // for video encoders, grid config is only a hint.
+ if (!mIsImage) {
+ return OK;
+ }
+
+ // image encoders must support grid config.
+ if (err != OK) {
+ return err;
+ }
+
+ // query to get the image encoder's real grid config as it might be
+ // different from the requested, and transfer that to the output.
+ err = mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidImageGrid,
+ &gridType, sizeof(gridType));
+
+ if (err == OK && gridType.bEnabled) {
+ outputFormat->setInt32("tile-width", gridType.nTileWidth);
+ outputFormat->setInt32("tile-height", gridType.nTileHeight);
+ outputFormat->setInt32("grid-rows", gridType.nGridRows);
+ outputFormat->setInt32("grid-cols", gridType.nGridCols);
+ }
+
+ return err;
+}
+
+status_t ACodec::setupHEVCEncoderParameters(
+ const sp<AMessage> &msg, sp<AMessage> &outputFormat) {
+ OMX_VIDEO_CONTROLRATETYPE bitrateMode;
+ int32_t bitrate, quality;
+ if (!findVideoBitrateControlInfo(msg, &bitrateMode, &bitrate, &quality)) {
+ return INVALID_OPERATION;
}
OMX_VIDEO_PARAM_HEVCTYPE hevcType;
@@ -4424,7 +4497,7 @@
return INVALID_OPERATION;
}
- err = verifySupportForProfileAndLevel(profile, level);
+ err = verifySupportForProfileAndLevel(kPortIndexOutput, profile, level);
if (err != OK) {
return err;
}
@@ -4433,7 +4506,27 @@
hevcType.eLevel = static_cast<OMX_VIDEO_HEVCLEVELTYPE>(level);
}
// TODO: finer control?
- hevcType.nKeyFrameInterval = setPFramesSpacing(iFrameInterval, frameRate) + 1;
+ if (mIsImage) {
+ hevcType.nKeyFrameInterval = 1;
+ } else {
+ float iFrameInterval;
+ if (!msg->findAsFloat("i-frame-interval", &iFrameInterval)) {
+ return INVALID_OPERATION;
+ }
+
+ float frameRate;
+ if (!msg->findFloat("frame-rate", &frameRate)) {
+ int32_t tmp;
+ if (!msg->findInt32("frame-rate", &tmp)) {
+ return INVALID_OPERATION;
+ }
+ frameRate = (float)tmp;
+ }
+
+ hevcType.nKeyFrameInterval =
+ setPFramesSpacing(iFrameInterval, frameRate) + 1;
+ }
+
err = mOMXNode->setParameter(
(OMX_INDEXTYPE)OMX_IndexParamVideoHevc, &hevcType, sizeof(hevcType));
@@ -4441,7 +4534,13 @@
return err;
}
- return configureBitrate(bitrate, bitrateMode);
+ err = configureImageGrid(msg, outputFormat);
+
+ if (err != OK) {
+ return err;
+ }
+
+ return configureBitrate(bitrateMode, bitrate, quality);
}
status_t ACodec::setupVPXEncoderParameters(const sp<AMessage> &msg, sp<AMessage> &outputFormat) {
@@ -4462,7 +4561,7 @@
}
msg->findAsFloat("i-frame-interval", &iFrameInterval);
- OMX_VIDEO_CONTROLRATETYPE bitrateMode = getBitrateMode(msg);
+ OMX_VIDEO_CONTROLRATETYPE bitrateMode = getVideoBitrateMode(msg);
float frameRate;
if (!msg->findFloat("frame-rate", &frameRate)) {
@@ -4539,14 +4638,14 @@
}
}
- return configureBitrate(bitrate, bitrateMode);
+ return configureBitrate(bitrateMode, bitrate);
}
status_t ACodec::verifySupportForProfileAndLevel(
- int32_t profile, int32_t level) {
+ OMX_U32 portIndex, int32_t profile, int32_t level) {
OMX_VIDEO_PARAM_PROFILELEVELTYPE params;
InitOMXParams(¶ms);
- params.nPortIndex = kPortIndexOutput;
+ params.nPortIndex = portIndex;
for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
params.nProfileIndex = index;
@@ -4575,7 +4674,7 @@
}
status_t ACodec::configureBitrate(
- int32_t bitrate, OMX_VIDEO_CONTROLRATETYPE bitrateMode) {
+ OMX_VIDEO_CONTROLRATETYPE bitrateMode, int32_t bitrate, int32_t quality) {
OMX_VIDEO_PARAM_BITRATETYPE bitrateType;
InitOMXParams(&bitrateType);
bitrateType.nPortIndex = kPortIndexOutput;
@@ -4588,7 +4687,13 @@
}
bitrateType.eControlRate = bitrateMode;
- bitrateType.nTargetBitrate = bitrate;
+
+ // write it out explicitly even if it's a union
+ if (bitrateMode == OMX_Video_ControlRateConstantQuality) {
+ bitrateType.nQualityFactor = quality;
+ } else {
+ bitrateType.nTargetBitrate = bitrate;
+ }
return mOMXNode->setParameter(
OMX_IndexParamVideoBitrate, &bitrateType, sizeof(bitrateType));
@@ -4841,8 +4946,8 @@
rect.nHeight = videoDef->nFrameHeight;
}
- if (rect.nLeft < 0 ||
- rect.nTop < 0 ||
+ if (rect.nLeft < 0 || rect.nTop < 0 ||
+ rect.nWidth == 0 || rect.nHeight == 0 ||
rect.nLeft + rect.nWidth > videoDef->nFrameWidth ||
rect.nTop + rect.nHeight > videoDef->nFrameHeight) {
ALOGE("Wrong cropped rect (%d, %d, %u, %u) vs. frame (%u, %u)",
@@ -4876,7 +4981,8 @@
(void)getHDRStaticInfoForVideoCodec(kPortIndexInput, notify);
}
uint32_t latency = 0;
- if (mIsEncoder && getLatency(&latency) == OK && latency > 0) {
+ if (mIsEncoder && !mIsImage &&
+ getLatency(&latency) == OK && latency > 0) {
notify->setInt32("latency", latency);
}
}
@@ -4932,7 +5038,8 @@
notify->setString("mime", mime.c_str());
}
uint32_t intraRefreshPeriod = 0;
- if (mIsEncoder && getIntraRefreshPeriod(&intraRefreshPeriod) == OK
+ if (mIsEncoder && !mIsImage &&
+ getIntraRefreshPeriod(&intraRefreshPeriod) == OK
&& intraRefreshPeriod > 0) {
notify->setInt32("intra-refresh-period", intraRefreshPeriod);
}
@@ -5220,13 +5327,13 @@
convertCodecColorAspectsToPlatformAspects(aspects, &range, &standard, &transfer);
// if some aspects are unspecified, use dataspace fields
- if (range != 0) {
+ if (range == 0) {
range = (dataSpace & HAL_DATASPACE_RANGE_MASK) >> HAL_DATASPACE_RANGE_SHIFT;
}
- if (standard != 0) {
+ if (standard == 0) {
standard = (dataSpace & HAL_DATASPACE_STANDARD_MASK) >> HAL_DATASPACE_STANDARD_SHIFT;
}
- if (transfer != 0) {
+ if (transfer == 0) {
transfer = (dataSpace & HAL_DATASPACE_TRANSFER_MASK) >> HAL_DATASPACE_TRANSFER_SHIFT;
}
@@ -5298,8 +5405,9 @@
CHECK(mOutputFormat->findInt32("channel-count", &channelCount));
CHECK(mOutputFormat->findInt32("sample-rate", &sampleRate));
if (mSampleRate != 0 && sampleRate != 0) {
- mEncoderDelay = mEncoderDelay * sampleRate / mSampleRate;
- mEncoderPadding = mEncoderPadding * sampleRate / mSampleRate;
+ // avoiding 32-bit overflows in intermediate values
+ mEncoderDelay = (int32_t)((((int64_t)mEncoderDelay) * sampleRate) / mSampleRate);
+ mEncoderPadding = (int32_t)((((int64_t)mEncoderPadding) * sampleRate) / mSampleRate);
mSampleRate = sampleRate;
}
if (mSkipCutBuffer != NULL) {
@@ -5640,7 +5748,7 @@
// by this "MediaBuffer" object. Now that the OMX component has
// told us that it's done with the input buffer, we can decrement
// the mediaBuffer's reference count.
- info->mData->setMediaBufferBase(NULL);
+ info->mData->meta()->setObject("mediaBufferHolder", sp<MediaBufferHolder>(nullptr));
PortMode mode = getPortMode(kPortIndexInput);
@@ -6051,7 +6159,7 @@
}
#if 0
if (mCodec->mNativeWindow == NULL) {
- if (IsIDR(info->mData)) {
+ if (IsIDR(info->mData->data(), info->mData->size())) {
ALOGI("IDR frame");
}
}
@@ -6139,6 +6247,14 @@
mCodec->mLastNativeWindowDataSpace = dataSpace;
ALOGW_IF(err != NO_ERROR, "failed to set dataspace: %d", err);
}
+ if (buffer->format()->contains("hdr-static-info")) {
+ HDRStaticInfo info;
+ if (ColorUtils::getHDRStaticInfoFromFormat(buffer->format(), &info)
+ && memcmp(&mCodec->mLastHDRStaticInfo, &info, sizeof(info))) {
+ setNativeWindowHdrMetadata(mCodec->mNativeWindow.get(), &info);
+ mCodec->mLastHDRStaticInfo = info;
+ }
+ }
// save buffers sent to the surface so we can get render time when they return
int64_t mediaTimeUs = -1;
@@ -6248,13 +6364,8 @@
if (mDeathNotifier != NULL) {
if (mCodec->mOMXNode != NULL) {
- if (mCodec->getTrebleFlag()) {
- auto tOmxNode = mCodec->mOMXNode->getHalInterface();
- tOmxNode->unlinkToDeath(mDeathNotifier);
- } else {
- sp<IBinder> binder = IInterface::asBinder(mCodec->mOMXNode);
- binder->unlinkToDeath(mDeathNotifier);
- }
+ auto tOmxNode = mCodec->mOMXNode->getHalInterface();
+ tOmxNode->unlinkToDeath(mDeathNotifier);
}
mDeathNotifier.clear();
}
@@ -6343,112 +6454,48 @@
sp<AMessage> notify = new AMessage(kWhatOMXDied, mCodec);
- Vector<AString> matchingCodecs;
- Vector<AString> owners;
-
- AString mime;
+ sp<RefBase> obj;
+ CHECK(msg->findObject("codecInfo", &obj));
+ sp<MediaCodecInfo> info = (MediaCodecInfo *)obj.get();
+ if (info == nullptr) {
+ ALOGE("Unexpected nullptr for codec information");
+ mCodec->signalError(OMX_ErrorUndefined, UNKNOWN_ERROR);
+ return false;
+ }
+ AString owner = (info->getOwnerName() == nullptr) ? "default" : info->getOwnerName();
AString componentName;
- int32_t encoder = false;
- if (msg->findString("componentName", &componentName)) {
- sp<IMediaCodecList> list = MediaCodecList::getInstance();
- if (list == nullptr) {
- ALOGE("Unable to obtain MediaCodecList while "
- "attempting to create codec \"%s\"",
- componentName.c_str());
- mCodec->signalError(OMX_ErrorUndefined, NO_INIT);
- return false;
- }
- ssize_t index = list->findCodecByName(componentName.c_str());
- if (index < 0) {
- ALOGE("Unable to find codec \"%s\"",
- componentName.c_str());
- mCodec->signalError(OMX_ErrorInvalidComponent, NAME_NOT_FOUND);
- return false;
- }
- sp<MediaCodecInfo> info = list->getCodecInfo(index);
- if (info == nullptr) {
- ALOGE("Unexpected error (index out-of-bound) while "
- "retrieving information for codec \"%s\"",
- componentName.c_str());
- mCodec->signalError(OMX_ErrorUndefined, UNKNOWN_ERROR);
- return false;
- }
- matchingCodecs.add(info->getCodecName());
- owners.add(info->getOwnerName() == nullptr ?
- "default" : info->getOwnerName());
- } else {
- CHECK(msg->findString("mime", &mime));
-
- if (!msg->findInt32("encoder", &encoder)) {
- encoder = false;
- }
-
- MediaCodecList::findMatchingCodecs(
- mime.c_str(),
- encoder, // createEncoder
- 0, // flags
- &matchingCodecs,
- &owners);
- }
+ CHECK(msg->findString("componentName", &componentName));
sp<CodecObserver> observer = new CodecObserver;
sp<IOMX> omx;
sp<IOMXNode> omxNode;
status_t err = NAME_NOT_FOUND;
- for (size_t matchIndex = 0; matchIndex < matchingCodecs.size();
- ++matchIndex) {
- componentName = matchingCodecs[matchIndex];
-
- OMXClient client;
- bool trebleFlag;
- if (client.connect(owners[matchIndex].c_str(), &trebleFlag) != OK) {
- mCodec->signalError(OMX_ErrorUndefined, NO_INIT);
- return false;
- }
- omx = client.interface();
-
- pid_t tid = gettid();
- int prevPriority = androidGetThreadPriority(tid);
- androidSetThreadPriority(tid, ANDROID_PRIORITY_FOREGROUND);
- err = omx->allocateNode(componentName.c_str(), observer, &omxNode);
- androidSetThreadPriority(tid, prevPriority);
-
- if (err == OK) {
- mCodec->setTrebleFlag(trebleFlag);
- break;
- } else {
- ALOGW("Allocating component '%s' failed, try next one.", componentName.c_str());
- }
-
- omxNode = NULL;
+ OMXClient client;
+ if (client.connect(owner.c_str()) != OK) {
+ mCodec->signalError(OMX_ErrorUndefined, NO_INIT);
+ return false;
}
+ omx = client.interface();
- if (omxNode == NULL) {
- if (!mime.empty()) {
- ALOGE("Unable to instantiate a %scoder for type '%s' with err %#x.",
- encoder ? "en" : "de", mime.c_str(), err);
- } else {
- ALOGE("Unable to instantiate codec '%s' with err %#x.", componentName.c_str(), err);
- }
+ pid_t tid = gettid();
+ int prevPriority = androidGetThreadPriority(tid);
+ androidSetThreadPriority(tid, ANDROID_PRIORITY_FOREGROUND);
+ err = omx->allocateNode(componentName.c_str(), observer, &omxNode);
+ androidSetThreadPriority(tid, prevPriority);
+
+ if (err != OK) {
+ ALOGE("Unable to instantiate codec '%s' with err %#x.", componentName.c_str(), err);
mCodec->signalError((OMX_ERRORTYPE)err, makeNoSideEffectStatus(err));
return false;
}
mDeathNotifier = new DeathNotifier(notify);
- if (mCodec->getTrebleFlag()) {
- auto tOmxNode = omxNode->getHalInterface();
- if (!tOmxNode->linkToDeath(mDeathNotifier, 0)) {
- mDeathNotifier.clear();
- }
- } else {
- if (IInterface::asBinder(omxNode)->linkToDeath(mDeathNotifier) != OK) {
- // This was a local binder, if it dies so do we, we won't care
- // about any notifications in the afterlife.
- mDeathNotifier.clear();
- }
+ auto tOmxNode = omxNode->getHalInterface();
+ if (!tOmxNode->linkToDeath(mDeathNotifier, 0)) {
+ mDeathNotifier.clear();
}
notify = new AMessage(kWhatOMXMessageList, mCodec);
@@ -6644,11 +6691,11 @@
}
}
- if (mCodec->mMaxPtsGapUs > 0ll) {
+ if (mCodec->mMaxPtsGapUs != 0ll) {
OMX_PARAM_U32TYPE maxPtsGapParams;
InitOMXParams(&maxPtsGapParams);
maxPtsGapParams.nPortIndex = kPortIndexInput;
- maxPtsGapParams.nU32 = (uint32_t) mCodec->mMaxPtsGapUs;
+ maxPtsGapParams.nU32 = (uint32_t)mCodec->mMaxPtsGapUs;
err = mCodec->mOMXNode->setParameter(
(OMX_INDEXTYPE)OMX_IndexParamMaxFrameDurationForBitrateControl,
@@ -6661,7 +6708,7 @@
}
}
- if (mCodec->mMaxFps > 0) {
+ if (mCodec->mMaxFps > 0 || mCodec->mMaxPtsGapUs < 0) {
err = statusFromBinderStatus(
mCodec->mGraphicBufferSource->setMaxFps(mCodec->mMaxFps));
@@ -6754,9 +6801,14 @@
sp<RefBase> obj;
CHECK(msg->findObject("input-surface", &obj));
+ if (obj == NULL) {
+ ALOGE("[%s] NULL input surface", mCodec->mComponentName.c_str());
+ mCodec->mCallback->onInputSurfaceDeclined(BAD_VALUE);
+ return;
+ }
+
sp<PersistentSurface> surface = static_cast<PersistentSurface *>(obj.get());
mCodec->mGraphicBufferSource = surface->getBufferSource();
-
status_t err = setupInputSurface();
if (err == OK) {
@@ -7296,12 +7348,16 @@
}
}
- float rate;
- if (params->findFloat("operating-rate", &rate) && rate > 0) {
- status_t err = setOperatingRate(rate, mIsVideo);
+ int32_t rateInt = -1;
+ float rateFloat = -1;
+ if (!params->findFloat("operating-rate", &rateFloat)) {
+ params->findInt32("operating-rate", &rateInt);
+ rateFloat = (float) rateInt; // 16MHz (FLINTMAX) is OK for upper bound.
+ }
+ if (rateFloat > 0) {
+ status_t err = setOperatingRate(rateFloat, mIsVideo);
if (err != OK) {
- ALOGE("Failed to set parameter 'operating-rate' (err %d)", err);
- return err;
+ ALOGI("Failed to set parameter 'operating-rate' (err %d)", err);
}
}
@@ -7326,10 +7382,8 @@
}
}
- status_t err = configureTemporalLayers(params, false /* inConfigure */, mOutputFormat);
- if (err != OK) {
- err = OK; // ignore failure
- }
+ // Ignore errors as failure is expected for codecs that aren't video encoders.
+ (void)configureTemporalLayers(params, false /* inConfigure */, mOutputFormat);
return setVendorParameters(params);
}
@@ -7585,8 +7639,10 @@
config->param[paramIndex].bSet =
(OMX_BOOL)params->findString(existingKey->second.c_str(), &value);
if (config->param[paramIndex].bSet) {
- strncpy((char *)config->param[paramIndex].cString, value.c_str(),
- sizeof(OMX_CONFIG_ANDROID_VENDOR_PARAMTYPE::cString));
+ size_t dstSize = sizeof(config->param[paramIndex].cString);
+ strncpy((char *)config->param[paramIndex].cString, value.c_str(), dstSize - 1);
+ // null terminate value
+ config->param[paramIndex].cString[dstSize - 1] = '\0';
}
break;
}
@@ -7855,11 +7911,7 @@
mCodec->mBuffers[kPortIndexOutput].size());
err = FAILED_TRANSACTION;
} else {
- if (mCodec->getTrebleFlag()) {
- mCodec->mAllocator[kPortIndexOutput].clear();
- } else {
- mCodec->mDealer[kPortIndexOutput].clear();
- }
+ mCodec->mAllocator[kPortIndexOutput].clear();
}
if (err == OK) {
@@ -8271,8 +8323,9 @@
}
bool isVideo = strncasecmp(mime, "video/", 6) == 0;
+ bool isImage = strncasecmp(mime, "image/", 6) == 0;
- if (isVideo) {
+ if (isVideo || isImage) {
OMX_VIDEO_PARAM_PROFILELEVELTYPE param;
InitOMXParams(¶m);
param.nPortIndex = isEncoder ? kPortIndexOutput : kPortIndexInput;
@@ -8461,12 +8514,4 @@
return OK;
}
-void ACodec::setTrebleFlag(bool trebleFlag) {
- mTrebleFlag = trebleFlag;
-}
-
-bool ACodec::getTrebleFlag() const {
- return mTrebleFlag;
-}
-
} // namespace android
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
index 3c7ae3e..710ae68 100644
--- a/media/libstagefright/ACodecBufferChannel.cpp
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -22,6 +22,7 @@
#include <android/hardware/cas/native/1.0/IDescrambler.h>
#include <binder/MemoryDealer.h>
+#include <hidlmemory/FrameworkUtils.h>
#include <media/openmax/OMX_Core.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AUtils.h>
@@ -34,6 +35,7 @@
#include "include/SharedMemoryBuffer.h"
namespace android {
+using hardware::fromHeap;
using hardware::hidl_handle;
using hardware::hidl_string;
using hardware::hidl_vec;
@@ -162,7 +164,7 @@
size_t size;
it->mSharedEncryptedBuffer->getMemory(&offset, &size);
hardware::cas::native::V1_0::SharedBuffer srcBuffer = {
- .heapBase = mHidlMemory,
+ .heapBase = *mHidlMemory,
.offset = (uint64_t) offset,
.size = size
};
@@ -308,11 +310,8 @@
}
} else if (mDescrambler != nullptr) {
sp<IMemoryHeap> heap = dealer->getMemoryHeap();
- native_handle_t* nativeHandle = native_handle_create(1, 0);
- if (nativeHandle != nullptr) {
- int fd = heap->getHeapID();
- nativeHandle->data[0] = fd;
- mHidlMemory = hidl_memory("ashmem", hidl_handle(nativeHandle), heap->getSize());
+ mHidlMemory = fromHeap(heap);
+ if (mHidlMemory != NULL) {
ALOGV("created hidl_memory for descrambler");
} else {
ALOGE("failed to create hidl_memory for descrambler");
diff --git a/media/libstagefright/AMRExtractor.cpp b/media/libstagefright/AMRExtractor.cpp
deleted file mode 100644
index 2892520..0000000
--- a/media/libstagefright/AMRExtractor.cpp
+++ /dev/null
@@ -1,365 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "AMRExtractor"
-#include <utils/Log.h>
-
-#include "include/AMRExtractor.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <utils/String8.h>
-
-namespace android {
-
-class AMRSource : public MediaSource {
-public:
- AMRSource(const sp<DataSource> &source,
- const sp<MetaData> &meta,
- bool isWide,
- const off64_t *offset_table,
- size_t offset_table_length);
-
- virtual status_t start(MetaData *params = NULL);
- virtual status_t stop();
-
- virtual sp<MetaData> getFormat();
-
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-protected:
- virtual ~AMRSource();
-
-private:
- sp<DataSource> mDataSource;
- sp<MetaData> mMeta;
- bool mIsWide;
-
- off64_t mOffset;
- int64_t mCurrentTimeUs;
- bool mStarted;
- MediaBufferGroup *mGroup;
-
- off64_t mOffsetTable[OFFSET_TABLE_LEN];
- size_t mOffsetTableLength;
-
- AMRSource(const AMRSource &);
- AMRSource &operator=(const AMRSource &);
-};
-
-////////////////////////////////////////////////////////////////////////////////
-
-static size_t getFrameSize(bool isWide, unsigned FT) {
- static const size_t kFrameSizeNB[16] = {
- 95, 103, 118, 134, 148, 159, 204, 244,
- 39, 43, 38, 37, // SID
- 0, 0, 0, // future use
- 0 // no data
- };
- static const size_t kFrameSizeWB[16] = {
- 132, 177, 253, 285, 317, 365, 397, 461, 477,
- 40, // SID
- 0, 0, 0, 0, // future use
- 0, // speech lost
- 0 // no data
- };
-
- if (FT > 15 || (isWide && FT > 9 && FT < 14) || (!isWide && FT > 11 && FT < 15)) {
- ALOGE("illegal AMR frame type %d", FT);
- return 0;
- }
-
- size_t frameSize = isWide ? kFrameSizeWB[FT] : kFrameSizeNB[FT];
-
- // Round up bits to bytes and add 1 for the header byte.
- frameSize = (frameSize + 7) / 8 + 1;
-
- return frameSize;
-}
-
-static status_t getFrameSizeByOffset(const sp<DataSource> &source,
- off64_t offset, bool isWide, size_t *frameSize) {
- uint8_t header;
- ssize_t count = source->readAt(offset, &header, 1);
- if (count == 0) {
- return ERROR_END_OF_STREAM;
- } else if (count < 0) {
- return ERROR_IO;
- }
-
- unsigned FT = (header >> 3) & 0x0f;
-
- *frameSize = getFrameSize(isWide, FT);
- if (*frameSize == 0) {
- return ERROR_MALFORMED;
- }
- return OK;
-}
-
-AMRExtractor::AMRExtractor(const sp<DataSource> &source)
- : mDataSource(source),
- mInitCheck(NO_INIT),
- mOffsetTableLength(0) {
- String8 mimeType;
- float confidence;
- if (!SniffAMR(mDataSource, &mimeType, &confidence, NULL)) {
- return;
- }
-
- mIsWide = (mimeType == MEDIA_MIMETYPE_AUDIO_AMR_WB);
-
- mMeta = new MetaData;
- mMeta->setCString(
- kKeyMIMEType, mIsWide ? MEDIA_MIMETYPE_AUDIO_AMR_WB
- : MEDIA_MIMETYPE_AUDIO_AMR_NB);
-
- mMeta->setInt32(kKeyChannelCount, 1);
- mMeta->setInt32(kKeySampleRate, mIsWide ? 16000 : 8000);
-
- off64_t offset = mIsWide ? 9 : 6;
- off64_t streamSize;
- size_t frameSize, numFrames = 0;
- int64_t duration = 0;
-
- if (mDataSource->getSize(&streamSize) == OK) {
- while (offset < streamSize) {
- status_t status = getFrameSizeByOffset(source, offset, mIsWide, &frameSize);
- if (status == ERROR_END_OF_STREAM) {
- break;
- } else if (status != OK) {
- return;
- }
-
- if ((numFrames % 50 == 0) && (numFrames / 50 < OFFSET_TABLE_LEN)) {
- CHECK_EQ(mOffsetTableLength, numFrames / 50);
- mOffsetTable[mOffsetTableLength] = offset - (mIsWide ? 9: 6);
- mOffsetTableLength ++;
- }
-
- offset += frameSize;
- duration += 20000; // Each frame is 20ms
- numFrames ++;
- }
-
- mMeta->setInt64(kKeyDuration, duration);
- }
-
- mInitCheck = OK;
-}
-
-AMRExtractor::~AMRExtractor() {
-}
-
-sp<MetaData> AMRExtractor::getMetaData() {
- sp<MetaData> meta = new MetaData;
-
- if (mInitCheck != OK) {
- return meta;
- }
-
- meta->setCString(kKeyMIMEType, mIsWide ? "audio/amr-wb" : "audio/amr");
-
- return meta;
-}
-
-size_t AMRExtractor::countTracks() {
- return mInitCheck == OK ? 1 : 0;
-}
-
-sp<IMediaSource> AMRExtractor::getTrack(size_t index) {
- if (mInitCheck != OK || index != 0) {
- return NULL;
- }
-
- return new AMRSource(mDataSource, mMeta, mIsWide,
- mOffsetTable, mOffsetTableLength);
-}
-
-sp<MetaData> AMRExtractor::getTrackMetaData(size_t index, uint32_t /* flags */) {
- if (mInitCheck != OK || index != 0) {
- return NULL;
- }
-
- return mMeta;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-AMRSource::AMRSource(
- const sp<DataSource> &source, const sp<MetaData> &meta,
- bool isWide, const off64_t *offset_table, size_t offset_table_length)
- : mDataSource(source),
- mMeta(meta),
- mIsWide(isWide),
- mOffset(mIsWide ? 9 : 6),
- mCurrentTimeUs(0),
- mStarted(false),
- mGroup(NULL),
- mOffsetTableLength(offset_table_length) {
- if (mOffsetTableLength > 0 && mOffsetTableLength <= OFFSET_TABLE_LEN) {
- memcpy ((char*)mOffsetTable, (char*)offset_table, sizeof(off64_t) * mOffsetTableLength);
- }
-}
-
-AMRSource::~AMRSource() {
- if (mStarted) {
- stop();
- }
-}
-
-status_t AMRSource::start(MetaData * /* params */) {
- CHECK(!mStarted);
-
- mOffset = mIsWide ? 9 : 6;
- mCurrentTimeUs = 0;
- mGroup = new MediaBufferGroup;
- mGroup->add_buffer(new MediaBuffer(128));
- mStarted = true;
-
- return OK;
-}
-
-status_t AMRSource::stop() {
- CHECK(mStarted);
-
- delete mGroup;
- mGroup = NULL;
-
- mStarted = false;
- return OK;
-}
-
-sp<MetaData> AMRSource::getFormat() {
- return mMeta;
-}
-
-status_t AMRSource::read(
- MediaBuffer **out, const ReadOptions *options) {
- *out = NULL;
-
- int64_t seekTimeUs;
- ReadOptions::SeekMode mode;
- if (mOffsetTableLength > 0 && options && options->getSeekTo(&seekTimeUs, &mode)) {
- size_t size;
- int64_t seekFrame = seekTimeUs / 20000ll; // 20ms per frame.
- mCurrentTimeUs = seekFrame * 20000ll;
-
- size_t index = seekFrame < 0 ? 0 : seekFrame / 50;
- if (index >= mOffsetTableLength) {
- index = mOffsetTableLength - 1;
- }
-
- mOffset = mOffsetTable[index] + (mIsWide ? 9 : 6);
-
- for (size_t i = 0; i< seekFrame - index * 50; i++) {
- status_t err;
- if ((err = getFrameSizeByOffset(mDataSource, mOffset,
- mIsWide, &size)) != OK) {
- return err;
- }
- mOffset += size;
- }
- }
-
- uint8_t header;
- ssize_t n = mDataSource->readAt(mOffset, &header, 1);
-
- if (n < 1) {
- return ERROR_END_OF_STREAM;
- }
-
- if (header & 0x83) {
- // Padding bits must be 0.
-
- ALOGE("padding bits must be 0, header is 0x%02x", header);
-
- return ERROR_MALFORMED;
- }
-
- unsigned FT = (header >> 3) & 0x0f;
-
- size_t frameSize = getFrameSize(mIsWide, FT);
- if (frameSize == 0) {
- return ERROR_MALFORMED;
- }
-
- MediaBuffer *buffer;
- status_t err = mGroup->acquire_buffer(&buffer);
- if (err != OK) {
- return err;
- }
-
- n = mDataSource->readAt(mOffset, buffer->data(), frameSize);
-
- if (n != (ssize_t)frameSize) {
- buffer->release();
- buffer = NULL;
-
- if (n < 0) {
- return ERROR_IO;
- } else {
- // only partial frame is available, treat it as EOS.
- mOffset += n;
- return ERROR_END_OF_STREAM;
- }
- }
-
- buffer->set_range(0, frameSize);
- buffer->meta_data()->setInt64(kKeyTime, mCurrentTimeUs);
- buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
-
- mOffset += frameSize;
- mCurrentTimeUs += 20000; // Each frame is 20ms
-
- *out = buffer;
-
- return OK;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-bool SniffAMR(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *) {
- char header[9];
-
- if (source->readAt(0, header, sizeof(header)) != sizeof(header)) {
- return false;
- }
-
- if (!memcmp(header, "#!AMR\n", 6)) {
- *mimeType = MEDIA_MIMETYPE_AUDIO_AMR_NB;
- *confidence = 0.5;
-
- return true;
- } else if (!memcmp(header, "#!AMR-WB\n", 9)) {
- *mimeType = MEDIA_MIMETYPE_AUDIO_AMR_WB;
- *confidence = 0.5;
-
- return true;
- }
-
- return false;
-}
-
-} // namespace android
diff --git a/media/libstagefright/AMRWriter.cpp b/media/libstagefright/AMRWriter.cpp
index 961b57f..41106a1 100644
--- a/media/libstagefright/AMRWriter.cpp
+++ b/media/libstagefright/AMRWriter.cpp
@@ -25,8 +25,8 @@
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
+#include <media/MediaSource.h>
#include <media/mediarecorder.h>
namespace android {
@@ -54,7 +54,7 @@
return mInitCheck;
}
-status_t AMRWriter::addSource(const sp<IMediaSource> &source) {
+status_t AMRWriter::addSource(const sp<MediaSource> &source) {
if (mInitCheck != OK) {
return mInitCheck;
}
@@ -193,7 +193,7 @@
prctl(PR_SET_NAME, (unsigned long)"AMRWriter", 0, 0, 0);
while (!mDone) {
- MediaBuffer *buffer;
+ MediaBufferBase *buffer;
err = mSource->read(&buffer);
if (err != OK) {
@@ -215,7 +215,7 @@
}
int64_t timestampUs;
- CHECK(buffer->meta_data()->findInt64(kKeyTime, ×tampUs));
+ CHECK(buffer->meta_data().findInt64(kKeyTime, ×tampUs));
if (timestampUs > mEstimatedDurationUs) {
mEstimatedDurationUs = timestampUs;
}
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index b764c98..48e351b 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -4,37 +4,114 @@
vendor_available: true,
}
+cc_library_static {
+ name: "libstagefright_esds",
+
+ srcs: ["ESDS.cpp"],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+ sanitize: {
+ misc_undefined: [
+ "signed-integer-overflow",
+ ],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
+ },
+
+ shared_libs: ["libmedia"],
+}
+
+cc_library_static {
+ name: "libstagefright_metadatautils",
+
+ srcs: ["MetaDataUtils.cpp"],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+ sanitize: {
+ misc_undefined: [
+ "signed-integer-overflow",
+ ],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
+ },
+
+ shared_libs: ["libmedia"],
+}
+
+cc_library_shared {
+ name: "libstagefright_codecbase",
+
+ export_include_dirs: ["include"],
+
+ srcs: [
+ "CodecBase.cpp",
+ "FrameRenderTracker.cpp",
+ "MediaCodecListWriter.cpp",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+ shared_libs: [
+ "libgui",
+ "liblog",
+ "libmedia_omx",
+ "libstagefright_foundation",
+ "libui",
+ "libutils",
+ "android.hardware.cas.native@1.0",
+ ],
+
+ sanitize: {
+ cfi: true,
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ diag: {
+ cfi: true,
+ },
+ },
+}
+
cc_library_shared {
name: "libstagefright",
srcs: [
"ACodec.cpp",
"ACodecBufferChannel.cpp",
- "AACExtractor.cpp",
"AACWriter.cpp",
- "AMRExtractor.cpp",
"AMRWriter.cpp",
"AudioPlayer.cpp",
+ "AudioPresentationInfo.cpp",
"AudioSource.cpp",
"BufferImpl.cpp",
- "CodecBase.cpp",
"CallbackDataSource.cpp",
+ "CallbackMediaSource.cpp",
"CameraSource.cpp",
"CameraSourceTimeLapse.cpp",
"DataConverter.cpp",
- "DataSource.cpp",
+ "DataSourceFactory.cpp",
"DataURISource.cpp",
- "ESDS.cpp",
"FileSource.cpp",
- "FLACExtractor.cpp",
- "FrameRenderTracker.cpp",
+ "FrameDecoder.cpp",
"HTTPBase.cpp",
"HevcUtils.cpp",
- "ItemTable.cpp",
+ "InterfaceUtils.cpp",
"JPEGSource.cpp",
- "MP3Extractor.cpp",
"MPEG2TSWriter.cpp",
- "MPEG4Extractor.cpp",
"MPEG4Writer.cpp",
"MediaAdapter.cpp",
"MediaClock.cpp",
@@ -42,83 +119,72 @@
"MediaCodecList.cpp",
"MediaCodecListOverrides.cpp",
"MediaCodecSource.cpp",
- "MediaExtractor.cpp",
+ "MediaExtractorFactory.cpp",
"MediaSync.cpp",
- "MidiExtractor.cpp",
"http/MediaHTTP.cpp",
"MediaMuxer.cpp",
- "MediaSource.cpp",
"NuCachedSource2.cpp",
"NuMediaExtractor.cpp",
"OMXClient.cpp",
"OmxInfoBuilder.cpp",
- "OggExtractor.cpp",
- "SampleIterator.cpp",
- "SampleTable.cpp",
+ "RemoteMediaExtractor.cpp",
+ "RemoteMediaSource.cpp",
"SimpleDecodingSource.cpp",
"SkipCutBuffer.cpp",
"StagefrightMediaScanner.cpp",
"StagefrightMetadataRetriever.cpp",
- "SurfaceMediaSource.cpp",
+ "StagefrightPluginLoader.cpp",
"SurfaceUtils.cpp",
- "ThrottledSource.cpp",
"Utils.cpp",
- "VBRISeeker.cpp",
+ "ThrottledSource.cpp",
"VideoFrameScheduler.cpp",
- "WAVExtractor.cpp",
- "XINGSeeker.cpp",
- "avc_utils.cpp",
],
shared_libs: [
"libaudioutils",
"libbinder",
"libcamera_client",
- "libcrypto",
"libcutils",
"libdl",
"libdrmframework",
- "libexpat",
"libgui",
+ "libion",
"liblog",
"libmedia",
+ "libmedia_omx",
"libaudioclient",
+ "libmediaextractor",
"libmediametrics",
"libmediautils",
"libnetd_client",
- "libsonivox",
"libui",
"libutils",
- "libvorbisidec",
- "libmediadrm",
- "libnativewindow",
-
"libmedia_helper",
- "libstagefright_omx_utils",
- "libstagefright_flacdec",
+ "libstagefright_codecbase",
"libstagefright_foundation",
+ "libstagefright_omx_utils",
"libstagefright_xmlparser",
"libRScpp",
+ "libhidlallocatorutils",
"libhidlbase",
"libhidlmemory",
+ "libziparchive",
"android.hidl.allocator@1.0",
- "android.hidl.memory@1.0",
- "android.hidl.token@1.0-utils",
- "android.hardware.cas@1.0",
"android.hardware.cas.native@1.0",
"android.hardware.media.omx@1.0",
+ "android.hardware.graphics.allocator@2.0",
+ "android.hardware.graphics.mapper@2.0",
],
static_libs: [
"libstagefright_color_conversion",
"libyuv_static",
- "libstagefright_matroska",
"libstagefright_mediafilter",
"libstagefright_webm",
"libstagefright_timedtext",
"libvpx",
"libwebm",
- "libstagefright_mpeg2ts",
+ "libstagefright_esds",
"libstagefright_id3",
"libFLAC",
],
@@ -139,6 +205,92 @@
"-Wall",
],
+ version_script: "exports.lds",
+
+ product_variables: {
+ debuggable: {
+ // enable experiments only in userdebug and eng builds
+ cflags: ["-DENABLE_STAGEFRIGHT_EXPERIMENTS"],
+ },
+ },
+
+ sanitize: {
+ cfi: true,
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ diag: {
+ cfi: true,
+ },
+ },
+}
+
+cc_library {
+ name: "libstagefright_player2",
+
+ srcs: [
+ "CallbackDataSource.cpp",
+ "CallbackMediaSource.cpp",
+ "DataSourceFactory.cpp",
+ "DataURISource.cpp",
+ "FileSource.cpp",
+ "HTTPBase.cpp",
+ "HevcUtils.cpp",
+ "InterfaceUtils.cpp",
+ "MediaClock.cpp",
+ "MediaExtractorFactory.cpp",
+ "NdkUtils.cpp",
+ "NuCachedSource2.cpp",
+ "RemoteMediaExtractor.cpp",
+ "RemoteMediaSource.cpp",
+ "Utils.cpp",
+ "VideoFrameScheduler.cpp",
+ "http/MediaHTTP.cpp",
+ ],
+
+ shared_libs: [
+ "libbinder",
+ "libcutils",
+ "libdrmframework",
+ "libgui",
+ "liblog",
+ "libmedia_player2_util",
+ "libaudioclient",
+ "libmediaextractor",
+ "libmediametrics",
+ "libmediautils",
+ "libnetd_client",
+ "libui",
+ "libutils",
+ "libmedia_helper",
+ "libstagefright_foundation",
+ "libziparchive",
+ ],
+
+ static_libs: [
+ "libstagefright_esds",
+ ],
+
+ header_libs:[
+ "media_plugin_headers",
+ ],
+
+ export_shared_lib_headers: [
+ "libmedia_player2_util",
+ ],
+
+ export_include_dirs: [
+ "include",
+ ],
+
+ cflags: [
+ "-Wno-multichar",
+ "-Werror",
+ "-Wno-error=deprecated-declarations",
+ "-Wall",
+ ],
+
product_variables: {
debuggable: {
// enable experiments only in userdebug and eng builds
@@ -159,6 +311,7 @@
}
subdirs = [
+ "codec2",
"codecs/*",
"colorconversion",
"filters",
@@ -167,13 +320,11 @@
"http",
"httplive",
"id3",
- "matroska",
"mpeg2ts",
"omx",
"rtsp",
"tests",
"timedtext",
"webm",
- "wifi-display",
"xmlparser",
]
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index b3fb8d4..a6f0a0b 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -23,6 +23,7 @@
#include <binder/IPCThreadState.h>
#include <media/AudioTrack.h>
+#include <media/MediaSource.h>
#include <media/openmax/OMX_Audio.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALookup.h>
@@ -30,7 +31,6 @@
#include <media/stagefright/AudioPlayer.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
@@ -67,7 +67,7 @@
}
}
-void AudioPlayer::setSource(const sp<IMediaSource> &source) {
+void AudioPlayer::setSource(const sp<MediaSource> &source) {
CHECK(mSource == NULL);
mSource = source;
}
@@ -363,7 +363,7 @@
// When offloading, the OMX component is not used so this hack
// is not needed
if (!useOffload()) {
- wp<IMediaSource> tmp = mSource;
+ wp<MediaSource> tmp = mSource;
mSource.clear();
while (tmp.promote() != NULL) {
usleep(1000);
@@ -543,7 +543,7 @@
}
if(mInputBuffer->range_length() != 0) {
- CHECK(mInputBuffer->meta_data()->findInt64(
+ CHECK(mInputBuffer->meta_data().findInt64(
kKeyTime, &mPositionTimeMediaUs));
}
diff --git a/media/libstagefright/AudioPresentationInfo.cpp b/media/libstagefright/AudioPresentationInfo.cpp
new file mode 100644
index 0000000..86e1859
--- /dev/null
+++ b/media/libstagefright/AudioPresentationInfo.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "AudioPresentationInfo"
+
+#include <media/AudioPresentationInfo.h>
+
+namespace android {
+
+AudioPresentationInfo::AudioPresentationInfo() {
+}
+
+AudioPresentationInfo::~AudioPresentationInfo() {
+ mPresentations.clear();
+}
+
+void AudioPresentationInfo::addPresentation(sp<AudioPresentation> presentation) {
+ mPresentations.push(presentation);
+}
+
+size_t AudioPresentationInfo::countPresentations() const {
+ return mPresentations.size();
+}
+
+// Returns an AudioPresentation for the given valid index
+// index must be >=0 and < countPresentations()
+const sp<AudioPresentation> AudioPresentationInfo::getPresentation(size_t index) const {
+ return mPresentations[index];
+}
+
+} // namespace android
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index f2b1f10..2ae3218 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -52,7 +52,7 @@
AudioSource::AudioSource(
audio_source_t inputSource, const String16 &opPackageName,
uint32_t sampleRate, uint32_t channelCount, uint32_t outSampleRate,
- uid_t uid, pid_t pid)
+ uid_t uid, pid_t pid, audio_port_handle_t selectedDeviceId)
: mStarted(false),
mSampleRate(sampleRate),
mOutSampleRate(outSampleRate > 0 ? outSampleRate : sampleRate),
@@ -101,7 +101,9 @@
AudioRecord::TRANSFER_DEFAULT,
AUDIO_INPUT_FLAG_NONE,
uid,
- pid);
+ pid,
+ NULL /*pAttributes*/,
+ selectedDeviceId);
mInitCheck = mRecord->initCheck();
if (mInitCheck != OK) {
mRecord.clear();
@@ -238,7 +240,7 @@
}
status_t AudioSource::read(
- MediaBuffer **out, const ReadOptions * /* options */) {
+ MediaBufferBase **out, const ReadOptions * /* options */) {
Mutex::Autolock autoLock(mLock);
*out = NULL;
@@ -263,7 +265,7 @@
// Mute/suppress the recording sound
int64_t timeUs;
- CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
+ CHECK(buffer->meta_data().findInt64(kKeyTime, &timeUs));
int64_t elapsedTimeUs = timeUs - mStartTimeUs;
if (elapsedTimeUs < kAutoRampStartUs) {
memset((uint8_t *) buffer->data(), 0, buffer->range_length());
@@ -287,7 +289,7 @@
if (mSampleRate != mOutSampleRate) {
timeUs *= (int64_t)mSampleRate / (int64_t)mOutSampleRate;
- buffer->meta_data()->setInt64(kKeyTime, timeUs);
+ buffer->meta_data().setInt64(kKeyTime, timeUs);
}
*out = buffer;
@@ -309,7 +311,7 @@
return OK;
}
-void AudioSource::signalBufferReturned(MediaBuffer *buffer) {
+void AudioSource::signalBufferReturned(MediaBufferBase *buffer) {
ALOGV("signalBufferReturned: %p", buffer->data());
Mutex::Autolock autoLock(mLock);
--mNumClientOwnedBuffers;
@@ -431,11 +433,11 @@
(mSampleRate >> 1)) / mSampleRate;
if (mNumFramesReceived == 0) {
- buffer->meta_data()->setInt64(kKeyAnchorTime, mStartTimeUs);
+ buffer->meta_data().setInt64(kKeyAnchorTime, mStartTimeUs);
}
- buffer->meta_data()->setInt64(kKeyTime, mPrevSampleTimeUs);
- buffer->meta_data()->setInt64(kKeyDriftTime, timeUs - mInitialReadTimeUs);
+ buffer->meta_data().setInt64(kKeyTime, mPrevSampleTimeUs);
+ buffer->meta_data().setInt64(kKeyDriftTime, timeUs - mInitialReadTimeUs);
mPrevSampleTimeUs = timestampUs;
mNumFramesReceived += bufferSize / frameSize;
mBuffersReceived.push_back(buffer);
@@ -465,4 +467,43 @@
return value;
}
+status_t AudioSource::setInputDevice(audio_port_handle_t deviceId) {
+ if (mRecord != 0) {
+ return mRecord->setInputDevice(deviceId);
+ }
+ return NO_INIT;
+}
+
+status_t AudioSource::getRoutedDeviceId(audio_port_handle_t* deviceId) {
+ if (mRecord != 0) {
+ *deviceId = mRecord->getRoutedDeviceId();
+ return NO_ERROR;
+ }
+ return NO_INIT;
+}
+
+status_t AudioSource::addAudioDeviceCallback(
+ const sp<AudioSystem::AudioDeviceCallback>& callback) {
+ if (mRecord != 0) {
+ return mRecord->addAudioDeviceCallback(callback);
+ }
+ return NO_INIT;
+}
+
+status_t AudioSource::removeAudioDeviceCallback(
+ const sp<AudioSystem::AudioDeviceCallback>& callback) {
+ if (mRecord != 0) {
+ return mRecord->removeAudioDeviceCallback(callback);
+ }
+ return NO_INIT;
+}
+
+status_t AudioSource::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo>* activeMicrophones) {
+ if (mRecord != 0) {
+ return mRecord->getActiveMicrophones(activeMicrophones);
+ }
+ return NO_INIT;
+}
+
} // namespace android
diff --git a/media/libstagefright/BufferImpl.cpp b/media/libstagefright/BufferImpl.cpp
index fee3739..b760273 100644
--- a/media/libstagefright/BufferImpl.cpp
+++ b/media/libstagefright/BufferImpl.cpp
@@ -29,6 +29,8 @@
namespace android {
+// SharedMemoryBuffer
+
SharedMemoryBuffer::SharedMemoryBuffer(const sp<AMessage> &format, const sp<IMemory> &mem)
: MediaCodecBuffer(format, new ABuffer(mem->pointer(), mem->size())),
mMemory(mem) {
@@ -39,6 +41,8 @@
mTMemory(mem) {
}
+// SecureBuffer
+
SecureBuffer::SecureBuffer(const sp<AMessage> &format, const void *ptr, size_t size)
: MediaCodecBuffer(format, new ABuffer(nullptr, size)),
mPointer(ptr) {
diff --git a/media/libstagefright/CallbackDataSource.cpp b/media/libstagefright/CallbackDataSource.cpp
index 6dfe2de..92e6eb9 100644
--- a/media/libstagefright/CallbackDataSource.cpp
+++ b/media/libstagefright/CallbackDataSource.cpp
@@ -21,6 +21,7 @@
#include "include/CallbackDataSource.h"
#include <binder/IMemory.h>
+#include <binder/IPCThreadState.h>
#include <media/IDataSource.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -34,7 +35,10 @@
mIsClosed(false) {
// Set up the buffer to read into.
mMemory = mIDataSource->getIMemory();
- mName = String8::format("CallbackDataSource(%s)", mIDataSource->toString().string());
+ mName = String8::format("CallbackDataSource(%d->%d, %s)",
+ getpid(),
+ IPCThreadState::self()->getCallingPid(),
+ mIDataSource->toString().string());
}
diff --git a/media/libstagefright/CallbackMediaSource.cpp b/media/libstagefright/CallbackMediaSource.cpp
new file mode 100644
index 0000000..ea7392e
--- /dev/null
+++ b/media/libstagefright/CallbackMediaSource.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/stagefright/CallbackMediaSource.h>
+#include <media/IMediaSource.h>
+
+namespace android {
+
+CallbackMediaSource::CallbackMediaSource(const sp<IMediaSource> &source)
+ :mSource(source) {}
+
+CallbackMediaSource::~CallbackMediaSource() {}
+
+status_t CallbackMediaSource::start(MetaData *params) {
+ return mSource->start(params);
+}
+
+status_t CallbackMediaSource::stop() {
+ return mSource->stop();
+}
+
+sp<MetaData> CallbackMediaSource::getFormat() {
+ return mSource->getFormat();
+}
+
+status_t CallbackMediaSource::read(MediaBufferBase **buffer, const ReadOptions *options) {
+ return mSource->read(buffer, reinterpret_cast<const ReadOptions*>(options));
+}
+
+status_t CallbackMediaSource::pause() {
+ return mSource->pause();
+}
+
+} // namespace android
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 6ed0d0e..db37021 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -1040,7 +1040,7 @@
releaseRecordingFrame(frame);
}
-void CameraSource::signalBufferReturned(MediaBuffer *buffer) {
+void CameraSource::signalBufferReturned(MediaBufferBase *buffer) {
ALOGV("signalBufferReturned: %p", buffer->data());
Mutex::Autolock autoLock(mLock);
for (List<sp<IMemory> >::iterator it = mFramesBeingEncoded.begin();
@@ -1059,7 +1059,7 @@
}
status_t CameraSource::read(
- MediaBuffer **buffer, const ReadOptions *options) {
+ MediaBufferBase **buffer, const ReadOptions *options) {
ALOGV("read");
*buffer = NULL;
@@ -1100,7 +1100,7 @@
*buffer = new MediaBuffer(frame->pointer(), frame->size());
(*buffer)->setObserver(this);
(*buffer)->add_ref();
- (*buffer)->meta_data()->setInt64(kKeyTime, frameTime);
+ (*buffer)->meta_data().setInt64(kKeyTime, frameTime);
}
return OK;
}
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 970526a..3ad82d9 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -168,7 +168,7 @@
return isSuccessful;
}
-void CameraSourceTimeLapse::signalBufferReturned(MediaBuffer* buffer) {
+void CameraSourceTimeLapse::signalBufferReturned(MediaBufferBase* buffer) {
ALOGV("signalBufferReturned");
Mutex::Autolock autoLock(mQuickStopLock);
if (mQuickStop && (buffer == mLastReadBufferCopy)) {
@@ -180,9 +180,9 @@
}
void createMediaBufferCopy(
- const MediaBuffer& sourceBuffer,
+ const MediaBufferBase& sourceBuffer,
int64_t frameTime,
- MediaBuffer **newBuffer) {
+ MediaBufferBase **newBuffer) {
ALOGV("createMediaBufferCopy");
size_t sourceSize = sourceBuffer.size();
@@ -191,20 +191,20 @@
(*newBuffer) = new MediaBuffer(sourceSize);
memcpy((*newBuffer)->data(), sourcePointer, sourceSize);
- (*newBuffer)->meta_data()->setInt64(kKeyTime, frameTime);
+ (*newBuffer)->meta_data().setInt64(kKeyTime, frameTime);
}
-void CameraSourceTimeLapse::fillLastReadBufferCopy(MediaBuffer& sourceBuffer) {
+void CameraSourceTimeLapse::fillLastReadBufferCopy(MediaBufferBase& sourceBuffer) {
ALOGV("fillLastReadBufferCopy");
int64_t frameTime;
- CHECK(sourceBuffer.meta_data()->findInt64(kKeyTime, &frameTime));
+ CHECK(sourceBuffer.meta_data().findInt64(kKeyTime, &frameTime));
createMediaBufferCopy(sourceBuffer, frameTime, &mLastReadBufferCopy);
mLastReadBufferCopy->add_ref();
mLastReadBufferCopy->setObserver(this);
}
status_t CameraSourceTimeLapse::read(
- MediaBuffer **buffer, const ReadOptions *options) {
+ MediaBufferBase **buffer, const ReadOptions *options) {
ALOGV("read");
if (mLastReadBufferCopy == NULL) {
mLastReadStatus = CameraSource::read(buffer, options);
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
deleted file mode 100644
index c22053e..0000000
--- a/media/libstagefright/DataSource.cpp
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-//#define LOG_NDEBUG 0
-#define LOG_TAG "DataSource"
-
-#include "include/CallbackDataSource.h"
-#include "include/HTTPBase.h"
-#include "include/NuCachedSource2.h"
-
-#include <media/IDataSource.h>
-#include <media/IMediaHTTPConnection.h>
-#include <media/IMediaHTTPService.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/DataURISource.h>
-#include <media/stagefright/FileSource.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaHTTP.h>
-#include <media/stagefright/RemoteDataSource.h>
-#include <media/stagefright/Utils.h>
-#include <utils/String8.h>
-
-#include <cutils/properties.h>
-
-#include <private/android_filesystem_config.h>
-
-namespace android {
-
-bool DataSource::getUInt16(off64_t offset, uint16_t *x) {
- *x = 0;
-
- uint8_t byte[2];
- if (readAt(offset, byte, 2) != 2) {
- return false;
- }
-
- *x = (byte[0] << 8) | byte[1];
-
- return true;
-}
-
-bool DataSource::getUInt24(off64_t offset, uint32_t *x) {
- *x = 0;
-
- uint8_t byte[3];
- if (readAt(offset, byte, 3) != 3) {
- return false;
- }
-
- *x = (byte[0] << 16) | (byte[1] << 8) | byte[2];
-
- return true;
-}
-
-bool DataSource::getUInt32(off64_t offset, uint32_t *x) {
- *x = 0;
-
- uint32_t tmp;
- if (readAt(offset, &tmp, 4) != 4) {
- return false;
- }
-
- *x = ntohl(tmp);
-
- return true;
-}
-
-bool DataSource::getUInt64(off64_t offset, uint64_t *x) {
- *x = 0;
-
- uint64_t tmp;
- if (readAt(offset, &tmp, 8) != 8) {
- return false;
- }
-
- *x = ntoh64(tmp);
-
- return true;
-}
-
-bool DataSource::getUInt16Var(off64_t offset, uint16_t *x, size_t size) {
- if (size == 2) {
- return getUInt16(offset, x);
- }
- if (size == 1) {
- uint8_t tmp;
- if (readAt(offset, &tmp, 1) == 1) {
- *x = tmp;
- return true;
- }
- }
- return false;
-}
-
-bool DataSource::getUInt32Var(off64_t offset, uint32_t *x, size_t size) {
- if (size == 4) {
- return getUInt32(offset, x);
- }
- if (size == 2) {
- uint16_t tmp;
- if (getUInt16(offset, &tmp)) {
- *x = tmp;
- return true;
- }
- }
- return false;
-}
-
-bool DataSource::getUInt64Var(off64_t offset, uint64_t *x, size_t size) {
- if (size == 8) {
- return getUInt64(offset, x);
- }
- if (size == 4) {
- uint32_t tmp;
- if (getUInt32(offset, &tmp)) {
- *x = tmp;
- return true;
- }
- }
- return false;
-}
-
-status_t DataSource::getSize(off64_t *size) {
- *size = 0;
-
- return ERROR_UNSUPPORTED;
-}
-
-sp<IDataSource> DataSource::getIDataSource() const {
- return nullptr;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-// static
-sp<DataSource> DataSource::CreateFromURI(
- const sp<IMediaHTTPService> &httpService,
- const char *uri,
- const KeyedVector<String8, String8> *headers,
- String8 *contentType,
- HTTPBase *httpSource) {
- if (contentType != NULL) {
- *contentType = "";
- }
-
- sp<DataSource> source;
- if (!strncasecmp("file://", uri, 7)) {
- source = new FileSource(uri + 7);
- } else if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8)) {
- if (httpService == NULL) {
- ALOGE("Invalid http service!");
- return NULL;
- }
-
- if (httpSource == NULL) {
- sp<IMediaHTTPConnection> conn = httpService->makeHTTPConnection();
- if (conn == NULL) {
- ALOGE("Failed to make http connection from http service!");
- return NULL;
- }
- httpSource = new MediaHTTP(conn);
- }
-
- String8 cacheConfig;
- bool disconnectAtHighwatermark = false;
- KeyedVector<String8, String8> nonCacheSpecificHeaders;
- if (headers != NULL) {
- nonCacheSpecificHeaders = *headers;
- NuCachedSource2::RemoveCacheSpecificHeaders(
- &nonCacheSpecificHeaders,
- &cacheConfig,
- &disconnectAtHighwatermark);
- }
-
- if (httpSource->connect(uri, &nonCacheSpecificHeaders) != OK) {
- ALOGE("Failed to connect http source!");
- return NULL;
- }
-
- if (contentType != NULL) {
- *contentType = httpSource->getMIMEType();
- }
-
- source = NuCachedSource2::Create(
- httpSource,
- cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
- disconnectAtHighwatermark);
- } else if (!strncasecmp("data:", uri, 5)) {
- source = DataURISource::Create(uri);
- } else {
- // Assume it's a filename.
- source = new FileSource(uri);
- }
-
- if (source == NULL || source->initCheck() != OK) {
- return NULL;
- }
-
- return source;
-}
-
-sp<DataSource> DataSource::CreateFromFd(int fd, int64_t offset, int64_t length) {
- sp<FileSource> source = new FileSource(fd, offset, length);
- return source->initCheck() != OK ? nullptr : source;
-}
-
-sp<DataSource> DataSource::CreateMediaHTTP(const sp<IMediaHTTPService> &httpService) {
- if (httpService == NULL) {
- return NULL;
- }
-
- sp<IMediaHTTPConnection> conn = httpService->makeHTTPConnection();
- if (conn == NULL) {
- return NULL;
- } else {
- return new MediaHTTP(conn);
- }
-}
-
-sp<DataSource> DataSource::CreateFromIDataSource(const sp<IDataSource> &source) {
- return new TinyCacheSource(new CallbackDataSource(source));
-}
-
-String8 DataSource::getMIMEType() const {
- return String8("application/octet-stream");
-}
-
-sp<IDataSource> DataSource::asIDataSource() {
- return RemoteDataSource::wrap(sp<DataSource>(this));
-}
-
-} // namespace android
diff --git a/media/libstagefright/DataSourceFactory.cpp b/media/libstagefright/DataSourceFactory.cpp
new file mode 100644
index 0000000..54bf0cc
--- /dev/null
+++ b/media/libstagefright/DataSourceFactory.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DataSource"
+
+#include "include/HTTPBase.h"
+#include "include/NuCachedSource2.h"
+
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
+#include <media/stagefright/DataSourceFactory.h>
+#include <media/stagefright/DataURISource.h>
+#include <media/stagefright/FileSource.h>
+#include <media/stagefright/MediaHTTP.h>
+#include <utils/String8.h>
+
+namespace android {
+
+// static
+sp<DataSource> DataSourceFactory::CreateFromURI(
+ const sp<MediaHTTPService> &httpService,
+ const char *uri,
+ const KeyedVector<String8, String8> *headers,
+ String8 *contentType,
+ HTTPBase *httpSource) {
+ if (contentType != NULL) {
+ *contentType = "";
+ }
+
+ sp<DataSource> source;
+ if (!strncasecmp("file://", uri, 7)) {
+ source = new FileSource(uri + 7);
+ } else if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8)) {
+ if (httpService == NULL) {
+ ALOGE("Invalid http service!");
+ return NULL;
+ }
+
+ if (httpSource == NULL) {
+ sp<MediaHTTPConnection> conn = httpService->makeHTTPConnection();
+ if (conn == NULL) {
+ ALOGE("Failed to make http connection from http service!");
+ return NULL;
+ }
+ httpSource = new MediaHTTP(conn);
+ }
+
+ String8 cacheConfig;
+ bool disconnectAtHighwatermark = false;
+ KeyedVector<String8, String8> nonCacheSpecificHeaders;
+ if (headers != NULL) {
+ nonCacheSpecificHeaders = *headers;
+ NuCachedSource2::RemoveCacheSpecificHeaders(
+ &nonCacheSpecificHeaders,
+ &cacheConfig,
+ &disconnectAtHighwatermark);
+ }
+
+ if (httpSource->connect(uri, &nonCacheSpecificHeaders) != OK) {
+ ALOGE("Failed to connect http source!");
+ return NULL;
+ }
+
+ if (contentType != NULL) {
+ *contentType = httpSource->getMIMEType();
+ }
+
+ source = NuCachedSource2::Create(
+ httpSource,
+ cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
+ disconnectAtHighwatermark);
+ } else if (!strncasecmp("data:", uri, 5)) {
+ source = DataURISource::Create(uri);
+ } else {
+ // Assume it's a filename.
+ source = new FileSource(uri);
+ }
+
+ if (source == NULL || source->initCheck() != OK) {
+ return NULL;
+ }
+
+ return source;
+}
+
+sp<DataSource> DataSourceFactory::CreateFromFd(int fd, int64_t offset, int64_t length) {
+ sp<FileSource> source = new FileSource(fd, offset, length);
+ return source->initCheck() != OK ? nullptr : source;
+}
+
+sp<DataSource> DataSourceFactory::CreateMediaHTTP(const sp<MediaHTTPService> &httpService) {
+ if (httpService == NULL) {
+ return NULL;
+ }
+
+ sp<MediaHTTPConnection> conn = httpService->makeHTTPConnection();
+ if (conn == NULL) {
+ return NULL;
+ } else {
+ return new MediaHTTP(conn);
+ }
+}
+
+} // namespace android
diff --git a/media/libstagefright/ESDS.cpp b/media/libstagefright/ESDS.cpp
index c31720d..ea059e8 100644
--- a/media/libstagefright/ESDS.cpp
+++ b/media/libstagefright/ESDS.cpp
@@ -18,7 +18,7 @@
#define LOG_TAG "ESDS"
#include <utils/Log.h>
-#include <media/stagefright/Utils.h>
+#include <media/stagefright/foundation/ByteUtils.h>
#include "include/ESDS.h"
diff --git a/media/libstagefright/FLACExtractor.cpp b/media/libstagefright/FLACExtractor.cpp
deleted file mode 100644
index 1b88e5d..0000000
--- a/media/libstagefright/FLACExtractor.cpp
+++ /dev/null
@@ -1,867 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "FLACExtractor"
-#include <utils/Log.h>
-
-#include "include/FLACExtractor.h"
-// Vorbis comments
-#include "include/OggExtractor.h"
-// libFLAC parser
-#include "FLAC/stream_decoder.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MediaBuffer.h>
-
-namespace android {
-
-class FLACParser;
-
-class FLACSource : public MediaSource {
-
-public:
- FLACSource(
- const sp<DataSource> &dataSource,
- const sp<MetaData> &trackMetadata);
-
- virtual status_t start(MetaData *params);
- virtual status_t stop();
- virtual sp<MetaData> getFormat();
-
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-protected:
- virtual ~FLACSource();
-
-private:
- sp<DataSource> mDataSource;
- sp<MetaData> mTrackMetadata;
- sp<FLACParser> mParser;
- bool mInitCheck;
- bool mStarted;
-
- status_t init();
-
- // no copy constructor or assignment
- FLACSource(const FLACSource &);
- FLACSource &operator=(const FLACSource &);
-
-};
-
-// FLACParser wraps a C libFLAC parser aka stream decoder
-
-class FLACParser : public RefBase {
-
-public:
- enum {
- kMaxChannels = 8,
- };
-
- explicit FLACParser(
- const sp<DataSource> &dataSource,
- // If metadata pointers aren't provided, we don't fill them
- const sp<MetaData> &fileMetadata = 0,
- const sp<MetaData> &trackMetadata = 0);
-
- status_t initCheck() const {
- return mInitCheck;
- }
-
- // stream properties
- unsigned getMaxBlockSize() const {
- return mStreamInfo.max_blocksize;
- }
- unsigned getSampleRate() const {
- return mStreamInfo.sample_rate;
- }
- unsigned getChannels() const {
- return mStreamInfo.channels;
- }
- unsigned getBitsPerSample() const {
- return mStreamInfo.bits_per_sample;
- }
- FLAC__uint64 getTotalSamples() const {
- return mStreamInfo.total_samples;
- }
-
- // media buffers
- void allocateBuffers();
- void releaseBuffers();
- MediaBuffer *readBuffer() {
- return readBuffer(false, 0LL);
- }
- MediaBuffer *readBuffer(FLAC__uint64 sample) {
- return readBuffer(true, sample);
- }
-
-protected:
- virtual ~FLACParser();
-
-private:
- sp<DataSource> mDataSource;
- sp<MetaData> mFileMetadata;
- sp<MetaData> mTrackMetadata;
- bool mInitCheck;
-
- // media buffers
- size_t mMaxBufferSize;
- MediaBufferGroup *mGroup;
- void (*mCopy)(short *dst, const int * src[kMaxChannels], unsigned nSamples, unsigned nChannels);
-
- // handle to underlying libFLAC parser
- FLAC__StreamDecoder *mDecoder;
-
- // current position within the data source
- off64_t mCurrentPos;
- bool mEOF;
-
- // cached when the STREAMINFO metadata is parsed by libFLAC
- FLAC__StreamMetadata_StreamInfo mStreamInfo;
- bool mStreamInfoValid;
-
- // cached when a decoded PCM block is "written" by libFLAC parser
- bool mWriteRequested;
- bool mWriteCompleted;
- FLAC__FrameHeader mWriteHeader;
- FLAC__int32 const * mWriteBuffer[kMaxChannels];
-
- // most recent error reported by libFLAC parser
- FLAC__StreamDecoderErrorStatus mErrorStatus;
-
- status_t init();
- MediaBuffer *readBuffer(bool doSeek, FLAC__uint64 sample);
-
- // no copy constructor or assignment
- FLACParser(const FLACParser &);
- FLACParser &operator=(const FLACParser &);
-
- // FLAC parser callbacks as C++ instance methods
- FLAC__StreamDecoderReadStatus readCallback(
- FLAC__byte buffer[], size_t *bytes);
- FLAC__StreamDecoderSeekStatus seekCallback(
- FLAC__uint64 absolute_byte_offset);
- FLAC__StreamDecoderTellStatus tellCallback(
- FLAC__uint64 *absolute_byte_offset);
- FLAC__StreamDecoderLengthStatus lengthCallback(
- FLAC__uint64 *stream_length);
- FLAC__bool eofCallback();
- FLAC__StreamDecoderWriteStatus writeCallback(
- const FLAC__Frame *frame, const FLAC__int32 * const buffer[]);
- void metadataCallback(const FLAC__StreamMetadata *metadata);
- void errorCallback(FLAC__StreamDecoderErrorStatus status);
-
- // FLAC parser callbacks as C-callable functions
- static FLAC__StreamDecoderReadStatus read_callback(
- const FLAC__StreamDecoder *decoder,
- FLAC__byte buffer[], size_t *bytes,
- void *client_data);
- static FLAC__StreamDecoderSeekStatus seek_callback(
- const FLAC__StreamDecoder *decoder,
- FLAC__uint64 absolute_byte_offset,
- void *client_data);
- static FLAC__StreamDecoderTellStatus tell_callback(
- const FLAC__StreamDecoder *decoder,
- FLAC__uint64 *absolute_byte_offset,
- void *client_data);
- static FLAC__StreamDecoderLengthStatus length_callback(
- const FLAC__StreamDecoder *decoder,
- FLAC__uint64 *stream_length,
- void *client_data);
- static FLAC__bool eof_callback(
- const FLAC__StreamDecoder *decoder,
- void *client_data);
- static FLAC__StreamDecoderWriteStatus write_callback(
- const FLAC__StreamDecoder *decoder,
- const FLAC__Frame *frame, const FLAC__int32 * const buffer[],
- void *client_data);
- static void metadata_callback(
- const FLAC__StreamDecoder *decoder,
- const FLAC__StreamMetadata *metadata,
- void *client_data);
- static void error_callback(
- const FLAC__StreamDecoder *decoder,
- FLAC__StreamDecoderErrorStatus status,
- void *client_data);
-
-};
-
-// The FLAC parser calls our C++ static callbacks using C calling conventions,
-// inside FLAC__stream_decoder_process_until_end_of_metadata
-// and FLAC__stream_decoder_process_single.
-// We immediately then call our corresponding C++ instance methods
-// with the same parameter list, but discard redundant information.
-
-FLAC__StreamDecoderReadStatus FLACParser::read_callback(
- const FLAC__StreamDecoder * /* decoder */, FLAC__byte buffer[],
- size_t *bytes, void *client_data)
-{
- return ((FLACParser *) client_data)->readCallback(buffer, bytes);
-}
-
-FLAC__StreamDecoderSeekStatus FLACParser::seek_callback(
- const FLAC__StreamDecoder * /* decoder */,
- FLAC__uint64 absolute_byte_offset, void *client_data)
-{
- return ((FLACParser *) client_data)->seekCallback(absolute_byte_offset);
-}
-
-FLAC__StreamDecoderTellStatus FLACParser::tell_callback(
- const FLAC__StreamDecoder * /* decoder */,
- FLAC__uint64 *absolute_byte_offset, void *client_data)
-{
- return ((FLACParser *) client_data)->tellCallback(absolute_byte_offset);
-}
-
-FLAC__StreamDecoderLengthStatus FLACParser::length_callback(
- const FLAC__StreamDecoder * /* decoder */,
- FLAC__uint64 *stream_length, void *client_data)
-{
- return ((FLACParser *) client_data)->lengthCallback(stream_length);
-}
-
-FLAC__bool FLACParser::eof_callback(
- const FLAC__StreamDecoder * /* decoder */, void *client_data)
-{
- return ((FLACParser *) client_data)->eofCallback();
-}
-
-FLAC__StreamDecoderWriteStatus FLACParser::write_callback(
- const FLAC__StreamDecoder * /* decoder */, const FLAC__Frame *frame,
- const FLAC__int32 * const buffer[], void *client_data)
-{
- return ((FLACParser *) client_data)->writeCallback(frame, buffer);
-}
-
-void FLACParser::metadata_callback(
- const FLAC__StreamDecoder * /* decoder */,
- const FLAC__StreamMetadata *metadata, void *client_data)
-{
- ((FLACParser *) client_data)->metadataCallback(metadata);
-}
-
-void FLACParser::error_callback(
- const FLAC__StreamDecoder * /* decoder */,
- FLAC__StreamDecoderErrorStatus status, void *client_data)
-{
- ((FLACParser *) client_data)->errorCallback(status);
-}
-
-// These are the corresponding callbacks with C++ calling conventions
-
-FLAC__StreamDecoderReadStatus FLACParser::readCallback(
- FLAC__byte buffer[], size_t *bytes)
-{
- size_t requested = *bytes;
- ssize_t actual = mDataSource->readAt(mCurrentPos, buffer, requested);
- if (0 > actual) {
- *bytes = 0;
- return FLAC__STREAM_DECODER_READ_STATUS_ABORT;
- } else if (0 == actual) {
- *bytes = 0;
- mEOF = true;
- return FLAC__STREAM_DECODER_READ_STATUS_END_OF_STREAM;
- } else {
- assert(actual <= requested);
- *bytes = actual;
- mCurrentPos += actual;
- return FLAC__STREAM_DECODER_READ_STATUS_CONTINUE;
- }
-}
-
-FLAC__StreamDecoderSeekStatus FLACParser::seekCallback(
- FLAC__uint64 absolute_byte_offset)
-{
- mCurrentPos = absolute_byte_offset;
- mEOF = false;
- return FLAC__STREAM_DECODER_SEEK_STATUS_OK;
-}
-
-FLAC__StreamDecoderTellStatus FLACParser::tellCallback(
- FLAC__uint64 *absolute_byte_offset)
-{
- *absolute_byte_offset = mCurrentPos;
- return FLAC__STREAM_DECODER_TELL_STATUS_OK;
-}
-
-FLAC__StreamDecoderLengthStatus FLACParser::lengthCallback(
- FLAC__uint64 *stream_length)
-{
- off64_t size;
- if (OK == mDataSource->getSize(&size)) {
- *stream_length = size;
- return FLAC__STREAM_DECODER_LENGTH_STATUS_OK;
- } else {
- return FLAC__STREAM_DECODER_LENGTH_STATUS_UNSUPPORTED;
- }
-}
-
-FLAC__bool FLACParser::eofCallback()
-{
- return mEOF;
-}
-
-FLAC__StreamDecoderWriteStatus FLACParser::writeCallback(
- const FLAC__Frame *frame, const FLAC__int32 * const buffer[])
-{
- if (mWriteRequested) {
- mWriteRequested = false;
- // FLAC parser doesn't free or realloc buffer until next frame or finish
- mWriteHeader = frame->header;
- memmove(mWriteBuffer, buffer, sizeof(const FLAC__int32 * const) * getChannels());
- mWriteCompleted = true;
- return FLAC__STREAM_DECODER_WRITE_STATUS_CONTINUE;
- } else {
- ALOGE("FLACParser::writeCallback unexpected");
- return FLAC__STREAM_DECODER_WRITE_STATUS_ABORT;
- }
-}
-
-void FLACParser::metadataCallback(const FLAC__StreamMetadata *metadata)
-{
- switch (metadata->type) {
- case FLAC__METADATA_TYPE_STREAMINFO:
- if (!mStreamInfoValid) {
- mStreamInfo = metadata->data.stream_info;
- mStreamInfoValid = true;
- } else {
- ALOGE("FLACParser::metadataCallback unexpected STREAMINFO");
- }
- break;
- case FLAC__METADATA_TYPE_VORBIS_COMMENT:
- {
- const FLAC__StreamMetadata_VorbisComment *vc;
- vc = &metadata->data.vorbis_comment;
- for (FLAC__uint32 i = 0; i < vc->num_comments; ++i) {
- FLAC__StreamMetadata_VorbisComment_Entry *vce;
- vce = &vc->comments[i];
- if (mFileMetadata != 0 && vce->entry != NULL) {
- parseVorbisComment(mFileMetadata, (const char *) vce->entry,
- vce->length);
- }
- }
- }
- break;
- case FLAC__METADATA_TYPE_PICTURE:
- if (mFileMetadata != 0) {
- const FLAC__StreamMetadata_Picture *p = &metadata->data.picture;
- mFileMetadata->setData(kKeyAlbumArt,
- MetaData::TYPE_NONE, p->data, p->data_length);
- mFileMetadata->setCString(kKeyAlbumArtMIME, p->mime_type);
- }
- break;
- default:
- ALOGW("FLACParser::metadataCallback unexpected type %u", metadata->type);
- break;
- }
-}
-
-void FLACParser::errorCallback(FLAC__StreamDecoderErrorStatus status)
-{
- ALOGE("FLACParser::errorCallback status=%d", status);
- mErrorStatus = status;
-}
-
-// Copy samples from FLAC native 32-bit non-interleaved to 16-bit interleaved.
-// These are candidates for optimization if needed.
-
-static void copyMono8(
- short *dst,
- const int * src[FLACParser::kMaxChannels],
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i] << 8;
- }
-}
-
-static void copyStereo8(
- short *dst,
- const int * src[FLACParser::kMaxChannels],
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i] << 8;
- *dst++ = src[1][i] << 8;
- }
-}
-
-static void copyMultiCh8(short *dst, const int * src[FLACParser::kMaxChannels], unsigned nSamples, unsigned nChannels)
-{
- for (unsigned i = 0; i < nSamples; ++i) {
- for (unsigned c = 0; c < nChannels; ++c) {
- *dst++ = src[c][i] << 8;
- }
- }
-}
-
-static void copyMono16(
- short *dst,
- const int * src[FLACParser::kMaxChannels],
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i];
- }
-}
-
-static void copyStereo16(
- short *dst,
- const int * src[FLACParser::kMaxChannels],
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i];
- *dst++ = src[1][i];
- }
-}
-
-static void copyMultiCh16(short *dst, const int * src[FLACParser::kMaxChannels], unsigned nSamples, unsigned nChannels)
-{
- for (unsigned i = 0; i < nSamples; ++i) {
- for (unsigned c = 0; c < nChannels; ++c) {
- *dst++ = src[c][i];
- }
- }
-}
-
-// 24-bit versions should do dithering or noise-shaping, here or in AudioFlinger
-
-static void copyMono24(
- short *dst,
- const int * src[FLACParser::kMaxChannels],
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i] >> 8;
- }
-}
-
-static void copyStereo24(
- short *dst,
- const int * src[FLACParser::kMaxChannels],
- unsigned nSamples,
- unsigned /* nChannels */) {
- for (unsigned i = 0; i < nSamples; ++i) {
- *dst++ = src[0][i] >> 8;
- *dst++ = src[1][i] >> 8;
- }
-}
-
-static void copyMultiCh24(short *dst, const int * src[FLACParser::kMaxChannels], unsigned nSamples, unsigned nChannels)
-{
- for (unsigned i = 0; i < nSamples; ++i) {
- for (unsigned c = 0; c < nChannels; ++c) {
- *dst++ = src[c][i] >> 8;
- }
- }
-}
-
-static void copyTrespass(
- short * /* dst */,
- const int *[FLACParser::kMaxChannels] /* src */,
- unsigned /* nSamples */,
- unsigned /* nChannels */) {
- TRESPASS();
-}
-
-// FLACParser
-
-FLACParser::FLACParser(
- const sp<DataSource> &dataSource,
- const sp<MetaData> &fileMetadata,
- const sp<MetaData> &trackMetadata)
- : mDataSource(dataSource),
- mFileMetadata(fileMetadata),
- mTrackMetadata(trackMetadata),
- mInitCheck(false),
- mMaxBufferSize(0),
- mGroup(NULL),
- mCopy(copyTrespass),
- mDecoder(NULL),
- mCurrentPos(0LL),
- mEOF(false),
- mStreamInfoValid(false),
- mWriteRequested(false),
- mWriteCompleted(false),
- mErrorStatus((FLAC__StreamDecoderErrorStatus) -1)
-{
- ALOGV("FLACParser::FLACParser");
- memset(&mStreamInfo, 0, sizeof(mStreamInfo));
- memset(&mWriteHeader, 0, sizeof(mWriteHeader));
- mInitCheck = init();
-}
-
-FLACParser::~FLACParser()
-{
- ALOGV("FLACParser::~FLACParser");
- if (mDecoder != NULL) {
- FLAC__stream_decoder_delete(mDecoder);
- mDecoder = NULL;
- }
-}
-
-status_t FLACParser::init()
-{
- // setup libFLAC parser
- mDecoder = FLAC__stream_decoder_new();
- if (mDecoder == NULL) {
- // The new should succeed, since probably all it does is a malloc
- // that always succeeds in Android. But to avoid dependence on the
- // libFLAC internals, we check and log here.
- ALOGE("new failed");
- return NO_INIT;
- }
- FLAC__stream_decoder_set_md5_checking(mDecoder, false);
- FLAC__stream_decoder_set_metadata_ignore_all(mDecoder);
- FLAC__stream_decoder_set_metadata_respond(
- mDecoder, FLAC__METADATA_TYPE_STREAMINFO);
- FLAC__stream_decoder_set_metadata_respond(
- mDecoder, FLAC__METADATA_TYPE_PICTURE);
- FLAC__stream_decoder_set_metadata_respond(
- mDecoder, FLAC__METADATA_TYPE_VORBIS_COMMENT);
- FLAC__StreamDecoderInitStatus initStatus;
- initStatus = FLAC__stream_decoder_init_stream(
- mDecoder,
- read_callback, seek_callback, tell_callback,
- length_callback, eof_callback, write_callback,
- metadata_callback, error_callback, (void *) this);
- if (initStatus != FLAC__STREAM_DECODER_INIT_STATUS_OK) {
- // A failure here probably indicates a programming error and so is
- // unlikely to happen. But we check and log here similarly to above.
- ALOGE("init_stream failed %d", initStatus);
- return NO_INIT;
- }
- // parse all metadata
- if (!FLAC__stream_decoder_process_until_end_of_metadata(mDecoder)) {
- ALOGE("end_of_metadata failed");
- return NO_INIT;
- }
- if (mStreamInfoValid) {
- // check channel count
- if (getChannels() == 0 || getChannels() > kMaxChannels) {
- ALOGE("unsupported channel count %u", getChannels());
- return NO_INIT;
- }
- // check bit depth
- switch (getBitsPerSample()) {
- case 8:
- case 16:
- case 24:
- break;
- default:
- ALOGE("unsupported bits per sample %u", getBitsPerSample());
- return NO_INIT;
- }
- // check sample rate
- switch (getSampleRate()) {
- case 8000:
- case 11025:
- case 12000:
- case 16000:
- case 22050:
- case 24000:
- case 32000:
- case 44100:
- case 48000:
- case 88200:
- case 96000:
- break;
- default:
- ALOGE("unsupported sample rate %u", getSampleRate());
- return NO_INIT;
- }
- // configure the appropriate copy function, defaulting to trespass
- static const struct {
- unsigned mChannels;
- unsigned mBitsPerSample;
- void (*mCopy)(short *dst, const int * src[kMaxChannels], unsigned nSamples, unsigned nChannels);
- } table[] = {
- { 1, 8, copyMono8 },
- { 2, 8, copyStereo8 },
- { 8, 8, copyMultiCh8 },
- { 1, 16, copyMono16 },
- { 2, 16, copyStereo16 },
- { 8, 16, copyMultiCh16 },
- { 1, 24, copyMono24 },
- { 2, 24, copyStereo24 },
- { 8, 24, copyMultiCh24 },
- };
- for (unsigned i = 0; i < sizeof(table)/sizeof(table[0]); ++i) {
- if (table[i].mChannels >= getChannels() &&
- table[i].mBitsPerSample == getBitsPerSample()) {
- mCopy = table[i].mCopy;
- break;
- }
- }
- // populate track metadata
- if (mTrackMetadata != 0) {
- mTrackMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
- mTrackMetadata->setInt32(kKeyChannelCount, getChannels());
- mTrackMetadata->setInt32(kKeySampleRate, getSampleRate());
- mTrackMetadata->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
- // sample rate is non-zero, so division by zero not possible
- mTrackMetadata->setInt64(kKeyDuration,
- (getTotalSamples() * 1000000LL) / getSampleRate());
- }
- } else {
- ALOGE("missing STREAMINFO");
- return NO_INIT;
- }
- if (mFileMetadata != 0) {
- mFileMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_FLAC);
- }
- return OK;
-}
-
-void FLACParser::allocateBuffers()
-{
- CHECK(mGroup == NULL);
- mGroup = new MediaBufferGroup;
- mMaxBufferSize = getMaxBlockSize() * getChannels() * sizeof(short);
- mGroup->add_buffer(new MediaBuffer(mMaxBufferSize));
-}
-
-void FLACParser::releaseBuffers()
-{
- CHECK(mGroup != NULL);
- delete mGroup;
- mGroup = NULL;
-}
-
-MediaBuffer *FLACParser::readBuffer(bool doSeek, FLAC__uint64 sample)
-{
- mWriteRequested = true;
- mWriteCompleted = false;
- if (doSeek) {
- // We implement the seek callback, so this works without explicit flush
- if (!FLAC__stream_decoder_seek_absolute(mDecoder, sample)) {
- ALOGE("FLACParser::readBuffer seek to sample %lld failed", (long long)sample);
- return NULL;
- }
- ALOGV("FLACParser::readBuffer seek to sample %lld succeeded", (long long)sample);
- } else {
- if (!FLAC__stream_decoder_process_single(mDecoder)) {
- ALOGE("FLACParser::readBuffer process_single failed");
- return NULL;
- }
- }
- if (!mWriteCompleted) {
- ALOGV("FLACParser::readBuffer write did not complete");
- return NULL;
- }
- // verify that block header keeps the promises made by STREAMINFO
- unsigned blocksize = mWriteHeader.blocksize;
- if (blocksize == 0 || blocksize > getMaxBlockSize()) {
- ALOGE("FLACParser::readBuffer write invalid blocksize %u", blocksize);
- return NULL;
- }
- if (mWriteHeader.sample_rate != getSampleRate() ||
- mWriteHeader.channels != getChannels() ||
- mWriteHeader.bits_per_sample != getBitsPerSample()) {
- ALOGE("FLACParser::readBuffer write changed parameters mid-stream: %d/%d/%d -> %d/%d/%d",
- getSampleRate(), getChannels(), getBitsPerSample(),
- mWriteHeader.sample_rate, mWriteHeader.channels, mWriteHeader.bits_per_sample);
- return NULL;
- }
- // acquire a media buffer
- CHECK(mGroup != NULL);
- MediaBuffer *buffer;
- status_t err = mGroup->acquire_buffer(&buffer);
- if (err != OK) {
- return NULL;
- }
- size_t bufferSize = blocksize * getChannels() * sizeof(short);
- CHECK(bufferSize <= mMaxBufferSize);
- short *data = (short *) buffer->data();
- buffer->set_range(0, bufferSize);
- // copy PCM from FLAC write buffer to our media buffer, with interleaving
- (*mCopy)(data, mWriteBuffer, blocksize, getChannels());
- // fill in buffer metadata
- CHECK(mWriteHeader.number_type == FLAC__FRAME_NUMBER_TYPE_SAMPLE_NUMBER);
- FLAC__uint64 sampleNumber = mWriteHeader.number.sample_number;
- int64_t timeUs = (1000000LL * sampleNumber) / getSampleRate();
- buffer->meta_data()->setInt64(kKeyTime, timeUs);
- buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
- return buffer;
-}
-
-// FLACsource
-
-FLACSource::FLACSource(
- const sp<DataSource> &dataSource,
- const sp<MetaData> &trackMetadata)
- : mDataSource(dataSource),
- mTrackMetadata(trackMetadata),
- mParser(0),
- mInitCheck(false),
- mStarted(false)
-{
- ALOGV("FLACSource::FLACSource");
- mInitCheck = init();
-}
-
-FLACSource::~FLACSource()
-{
- ALOGV("~FLACSource::FLACSource");
- if (mStarted) {
- stop();
- }
-}
-
-status_t FLACSource::start(MetaData * /* params */)
-{
- ALOGV("FLACSource::start");
-
- CHECK(!mStarted);
- mParser->allocateBuffers();
- mStarted = true;
-
- return OK;
-}
-
-status_t FLACSource::stop()
-{
- ALOGV("FLACSource::stop");
-
- CHECK(mStarted);
- mParser->releaseBuffers();
- mStarted = false;
-
- return OK;
-}
-
-sp<MetaData> FLACSource::getFormat()
-{
- return mTrackMetadata;
-}
-
-status_t FLACSource::read(
- MediaBuffer **outBuffer, const ReadOptions *options)
-{
- MediaBuffer *buffer;
- // process an optional seek request
- int64_t seekTimeUs;
- ReadOptions::SeekMode mode;
- if ((NULL != options) && options->getSeekTo(&seekTimeUs, &mode)) {
- FLAC__uint64 sample;
- if (seekTimeUs <= 0LL) {
- sample = 0LL;
- } else {
- // sample and total samples are both zero-based, and seek to EOF ok
- sample = (seekTimeUs * mParser->getSampleRate()) / 1000000LL;
- if (sample >= mParser->getTotalSamples()) {
- sample = mParser->getTotalSamples();
- }
- }
- buffer = mParser->readBuffer(sample);
- // otherwise read sequentially
- } else {
- buffer = mParser->readBuffer();
- }
- *outBuffer = buffer;
- return buffer != NULL ? (status_t) OK : (status_t) ERROR_END_OF_STREAM;
-}
-
-status_t FLACSource::init()
-{
- ALOGV("FLACSource::init");
- // re-use the same track metadata passed into constructor from FLACExtractor
- mParser = new FLACParser(mDataSource);
- return mParser->initCheck();
-}
-
-// FLACExtractor
-
-FLACExtractor::FLACExtractor(
- const sp<DataSource> &dataSource)
- : mDataSource(dataSource),
- mInitCheck(false)
-{
- ALOGV("FLACExtractor::FLACExtractor");
- mInitCheck = init();
-}
-
-FLACExtractor::~FLACExtractor()
-{
- ALOGV("~FLACExtractor::FLACExtractor");
-}
-
-size_t FLACExtractor::countTracks()
-{
- return mInitCheck == OK ? 1 : 0;
-}
-
-sp<IMediaSource> FLACExtractor::getTrack(size_t index)
-{
- if (mInitCheck != OK || index > 0) {
- return NULL;
- }
- return new FLACSource(mDataSource, mTrackMetadata);
-}
-
-sp<MetaData> FLACExtractor::getTrackMetaData(
- size_t index, uint32_t /* flags */) {
- if (mInitCheck != OK || index > 0) {
- return NULL;
- }
- return mTrackMetadata;
-}
-
-status_t FLACExtractor::init()
-{
- mFileMetadata = new MetaData;
- mTrackMetadata = new MetaData;
- // FLACParser will fill in the metadata for us
- mParser = new FLACParser(mDataSource, mFileMetadata, mTrackMetadata);
- return mParser->initCheck();
-}
-
-sp<MetaData> FLACExtractor::getMetaData()
-{
- return mFileMetadata;
-}
-
-// Sniffer
-
-bool SniffFLAC(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *)
-{
- // first 4 is the signature word
- // second 4 is the sizeof STREAMINFO
- // 042 is the mandatory STREAMINFO
- // no need to read rest of the header, as a premature EOF will be caught later
- uint8_t header[4+4];
- if (source->readAt(0, header, sizeof(header)) != sizeof(header)
- || memcmp("fLaC\0\0\0\042", header, 4+4))
- {
- return false;
- }
-
- *mimeType = MEDIA_MIMETYPE_AUDIO_FLAC;
- *confidence = 0.5;
-
- return true;
-}
-
-} // namespace android
diff --git a/media/libstagefright/FileSource.cpp b/media/libstagefright/FileSource.cpp
index 97d8988..eef5314 100644
--- a/media/libstagefright/FileSource.cpp
+++ b/media/libstagefright/FileSource.cpp
@@ -194,12 +194,6 @@
return mDecryptHandle;
}
-void FileSource::getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client) {
- handle = mDecryptHandle;
-
- *client = mDrmManagerClient;
-}
-
ssize_t FileSource::readAtDRM(off64_t offset, void *data, size_t size) {
size_t DRM_CACHE_SIZE = 1024;
if (mDrmBuf == NULL) {
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
new file mode 100644
index 0000000..29a219f
--- /dev/null
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -0,0 +1,741 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FrameDecoder"
+
+#include "include/FrameDecoder.h"
+#include <binder/MemoryBase.h>
+#include <binder/MemoryHeapBase.h>
+#include <gui/Surface.h>
+#include <inttypes.h>
+#include <media/ICrypto.h>
+#include <media/IMediaSource.h>
+#include <media/MediaCodecBuffer.h>
+#include <media/stagefright/foundation/avc_utils.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/ColorConverter.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/Utils.h>
+#include <private/media/VideoFrame.h>
+#include <utils/Log.h>
+
+namespace android {
+
+static const int64_t kBufferTimeOutUs = 10000ll; // 10 msec
+static const size_t kRetryCount = 50; // must be >0
+
+sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
+ int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
+ int32_t dstBpp, bool metaOnly = false) {
+ int32_t rotationAngle;
+ if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
+ rotationAngle = 0; // By default, no rotation
+ }
+ uint32_t type;
+ const void *iccData;
+ size_t iccSize;
+ if (!trackMeta->findData(kKeyIccProfile, &type, &iccData, &iccSize)){
+ iccData = NULL;
+ iccSize = 0;
+ }
+
+ int32_t sarWidth, sarHeight;
+ int32_t displayWidth, displayHeight;
+ if (trackMeta->findInt32(kKeySARWidth, &sarWidth)
+ && trackMeta->findInt32(kKeySARHeight, &sarHeight)
+ && sarHeight != 0) {
+ displayWidth = (width * sarWidth) / sarHeight;
+ displayHeight = height;
+ } else if (trackMeta->findInt32(kKeyDisplayWidth, &displayWidth)
+ && trackMeta->findInt32(kKeyDisplayHeight, &displayHeight)
+ && displayWidth > 0 && displayHeight > 0
+ && width > 0 && height > 0) {
+ ALOGV("found display size %dx%d", displayWidth, displayHeight);
+ } else {
+ displayWidth = width;
+ displayHeight = height;
+ }
+
+ VideoFrame frame(width, height, displayWidth, displayHeight,
+ tileWidth, tileHeight, rotationAngle, dstBpp, !metaOnly, iccSize);
+
+ size_t size = frame.getFlattenedSize();
+ sp<MemoryHeapBase> heap = new MemoryHeapBase(size, 0, "MetadataRetrieverClient");
+ if (heap == NULL) {
+ ALOGE("failed to create MemoryDealer");
+ return NULL;
+ }
+ sp<IMemory> frameMem = new MemoryBase(heap, 0, size);
+ if (frameMem == NULL) {
+ ALOGE("not enough memory for VideoFrame size=%zu", size);
+ return NULL;
+ }
+ VideoFrame* frameCopy = static_cast<VideoFrame*>(frameMem->pointer());
+ frameCopy->init(frame, iccData, iccSize);
+
+ return frameMem;
+}
+
+bool findThumbnailInfo(
+ const sp<MetaData> &trackMeta, int32_t *width, int32_t *height,
+ uint32_t *type = NULL, const void **data = NULL, size_t *size = NULL) {
+ uint32_t dummyType;
+ const void *dummyData;
+ size_t dummySize;
+ return trackMeta->findInt32(kKeyThumbnailWidth, width)
+ && trackMeta->findInt32(kKeyThumbnailHeight, height)
+ && trackMeta->findData(kKeyThumbnailHVCC,
+ type ?: &dummyType, data ?: &dummyData, size ?: &dummySize);
+}
+
+bool findGridInfo(const sp<MetaData> &trackMeta,
+ int32_t *tileWidth, int32_t *tileHeight, int32_t *gridRows, int32_t *gridCols) {
+ return trackMeta->findInt32(kKeyTileWidth, tileWidth) && (*tileWidth > 0)
+ && trackMeta->findInt32(kKeyTileHeight, tileHeight) && (*tileHeight > 0)
+ && trackMeta->findInt32(kKeyGridRows, gridRows) && (*gridRows > 0)
+ && trackMeta->findInt32(kKeyGridCols, gridCols) && (*gridCols > 0);
+}
+
+bool getDstColorFormat(
+ android_pixel_format_t colorFormat,
+ OMX_COLOR_FORMATTYPE *dstFormat,
+ int32_t *dstBpp) {
+ switch (colorFormat) {
+ case HAL_PIXEL_FORMAT_RGB_565:
+ {
+ *dstFormat = OMX_COLOR_Format16bitRGB565;
+ *dstBpp = 2;
+ return true;
+ }
+ case HAL_PIXEL_FORMAT_RGBA_8888:
+ {
+ *dstFormat = OMX_COLOR_Format32BitRGBA8888;
+ *dstBpp = 4;
+ return true;
+ }
+ case HAL_PIXEL_FORMAT_BGRA_8888:
+ {
+ *dstFormat = OMX_COLOR_Format32bitBGRA8888;
+ *dstBpp = 4;
+ return true;
+ }
+ default:
+ {
+ ALOGE("Unsupported color format: %d", colorFormat);
+ break;
+ }
+ }
+ return false;
+}
+
+//static
+sp<IMemory> FrameDecoder::getMetadataOnly(
+ const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail) {
+ OMX_COLOR_FORMATTYPE dstFormat;
+ int32_t dstBpp;
+ if (!getDstColorFormat(
+ (android_pixel_format_t)colorFormat, &dstFormat, &dstBpp)) {
+ return NULL;
+ }
+
+ int32_t width, height, tileWidth = 0, tileHeight = 0;
+ if (thumbnail) {
+ if (!findThumbnailInfo(trackMeta, &width, &height)) {
+ return NULL;
+ }
+ } else {
+ CHECK(trackMeta->findInt32(kKeyWidth, &width));
+ CHECK(trackMeta->findInt32(kKeyHeight, &height));
+
+ int32_t gridRows, gridCols;
+ if (!findGridInfo(trackMeta, &tileWidth, &tileHeight, &gridRows, &gridCols)) {
+ tileWidth = tileHeight = 0;
+ }
+ }
+ return allocVideoFrame(trackMeta,
+ width, height, tileWidth, tileHeight, dstBpp, true /*metaOnly*/);
+}
+
+FrameDecoder::FrameDecoder(
+ const AString &componentName,
+ const sp<MetaData> &trackMeta,
+ const sp<IMediaSource> &source)
+ : mComponentName(componentName),
+ mTrackMeta(trackMeta),
+ mSource(source),
+ mDstFormat(OMX_COLOR_Format16bitRGB565),
+ mDstBpp(2),
+ mHaveMoreInputs(true),
+ mFirstSample(true) {
+}
+
+FrameDecoder::~FrameDecoder() {
+ if (mDecoder != NULL) {
+ mDecoder->release();
+ mSource->stop();
+ }
+}
+
+status_t FrameDecoder::init(
+ int64_t frameTimeUs, size_t numFrames, int option, int colorFormat) {
+ if (!getDstColorFormat(
+ (android_pixel_format_t)colorFormat, &mDstFormat, &mDstBpp)) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ sp<AMessage> videoFormat = onGetFormatAndSeekOptions(
+ frameTimeUs, numFrames, option, &mReadOptions);
+ if (videoFormat == NULL) {
+ ALOGE("video format or seek mode not supported");
+ return ERROR_UNSUPPORTED;
+ }
+
+ status_t err;
+ sp<ALooper> looper = new ALooper;
+ looper->start();
+ sp<MediaCodec> decoder = MediaCodec::CreateByComponentName(
+ looper, mComponentName, &err);
+ if (decoder.get() == NULL || err != OK) {
+ ALOGW("Failed to instantiate decoder [%s]", mComponentName.c_str());
+ return (decoder.get() == NULL) ? NO_MEMORY : err;
+ }
+
+ err = decoder->configure(
+ videoFormat, NULL /* surface */, NULL /* crypto */, 0 /* flags */);
+ if (err != OK) {
+ ALOGW("configure returned error %d (%s)", err, asString(err));
+ decoder->release();
+ return err;
+ }
+
+ err = decoder->start();
+ if (err != OK) {
+ ALOGW("start returned error %d (%s)", err, asString(err));
+ decoder->release();
+ return err;
+ }
+
+ err = mSource->start();
+ if (err != OK) {
+ ALOGW("source failed to start: %d (%s)", err, asString(err));
+ decoder->release();
+ return err;
+ }
+ mDecoder = decoder;
+
+ return OK;
+}
+
+sp<IMemory> FrameDecoder::extractFrame(FrameRect *rect) {
+ status_t err = onExtractRect(rect);
+ if (err == OK) {
+ err = extractInternal();
+ }
+ if (err != OK) {
+ return NULL;
+ }
+
+ return mFrames.size() > 0 ? mFrames[0] : NULL;
+}
+
+status_t FrameDecoder::extractFrames(std::vector<sp<IMemory> >* frames) {
+ status_t err = extractInternal();
+ if (err != OK) {
+ return err;
+ }
+
+ for (size_t i = 0; i < mFrames.size(); i++) {
+ frames->push_back(mFrames[i]);
+ }
+ return OK;
+}
+
+status_t FrameDecoder::extractInternal() {
+ status_t err = OK;
+ bool done = false;
+ size_t retriesLeft = kRetryCount;
+ do {
+ size_t index;
+ int64_t ptsUs = 0ll;
+ uint32_t flags = 0;
+
+ // Queue as many inputs as we possibly can, then block on dequeuing
+ // outputs. After getting each output, come back and queue the inputs
+ // again to keep the decoder busy.
+ while (mHaveMoreInputs) {
+ err = mDecoder->dequeueInputBuffer(&index, 0);
+ if (err != OK) {
+ ALOGV("Timed out waiting for input");
+ if (retriesLeft) {
+ err = OK;
+ }
+ break;
+ }
+ sp<MediaCodecBuffer> codecBuffer;
+ err = mDecoder->getInputBuffer(index, &codecBuffer);
+ if (err != OK) {
+ ALOGE("failed to get input buffer %zu", index);
+ break;
+ }
+
+ MediaBufferBase *mediaBuffer = NULL;
+
+ err = mSource->read(&mediaBuffer, &mReadOptions);
+ mReadOptions.clearSeekTo();
+ if (err != OK) {
+ ALOGW("Input Error or EOS");
+ mHaveMoreInputs = false;
+ if (!mFirstSample && err == ERROR_END_OF_STREAM) {
+ err = OK;
+ }
+ break;
+ }
+
+ if (mediaBuffer->range_length() > codecBuffer->capacity()) {
+ ALOGE("buffer size (%zu) too large for codec input size (%zu)",
+ mediaBuffer->range_length(), codecBuffer->capacity());
+ mHaveMoreInputs = false;
+ err = BAD_VALUE;
+ } else {
+ codecBuffer->setRange(0, mediaBuffer->range_length());
+
+ CHECK(mediaBuffer->meta_data().findInt64(kKeyTime, &ptsUs));
+ memcpy(codecBuffer->data(),
+ (const uint8_t*)mediaBuffer->data() + mediaBuffer->range_offset(),
+ mediaBuffer->range_length());
+
+ onInputReceived(codecBuffer, mediaBuffer->meta_data(), mFirstSample, &flags);
+ mFirstSample = false;
+ }
+
+ mediaBuffer->release();
+
+ if (mHaveMoreInputs) {
+ ALOGV("QueueInput: size=%zu ts=%" PRId64 " us flags=%x",
+ codecBuffer->size(), ptsUs, flags);
+
+ err = mDecoder->queueInputBuffer(
+ index,
+ codecBuffer->offset(),
+ codecBuffer->size(),
+ ptsUs,
+ flags);
+
+ if (flags & MediaCodec::BUFFER_FLAG_EOS) {
+ mHaveMoreInputs = false;
+ }
+ }
+ }
+
+ while (err == OK) {
+ size_t offset, size;
+ // wait for a decoded buffer
+ err = mDecoder->dequeueOutputBuffer(
+ &index,
+ &offset,
+ &size,
+ &ptsUs,
+ &flags,
+ kBufferTimeOutUs);
+
+ if (err == INFO_FORMAT_CHANGED) {
+ ALOGV("Received format change");
+ err = mDecoder->getOutputFormat(&mOutputFormat);
+ } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
+ ALOGV("Output buffers changed");
+ err = OK;
+ } else {
+ if (err == -EAGAIN /* INFO_TRY_AGAIN_LATER */ && --retriesLeft > 0) {
+ ALOGV("Timed-out waiting for output.. retries left = %zu", retriesLeft);
+ err = OK;
+ } else if (err == OK) {
+ // If we're seeking with CLOSEST option and obtained a valid targetTimeUs
+ // from the extractor, decode to the specified frame. Otherwise we're done.
+ ALOGV("Received an output buffer, timeUs=%lld", (long long)ptsUs);
+ sp<MediaCodecBuffer> videoFrameBuffer;
+ err = mDecoder->getOutputBuffer(index, &videoFrameBuffer);
+ if (err != OK) {
+ ALOGE("failed to get output buffer %zu", index);
+ break;
+ }
+ err = onOutputReceived(videoFrameBuffer, mOutputFormat, ptsUs, &done);
+ mDecoder->releaseOutputBuffer(index);
+ } else {
+ ALOGW("Received error %d (%s) instead of output", err, asString(err));
+ done = true;
+ }
+ break;
+ }
+ }
+ } while (err == OK && !done);
+
+ if (err != OK) {
+ ALOGE("failed to get video frame (err %d)", err);
+ }
+
+ return err;
+}
+
+//////////////////////////////////////////////////////////////////////
+
+VideoFrameDecoder::VideoFrameDecoder(
+ const AString &componentName,
+ const sp<MetaData> &trackMeta,
+ const sp<IMediaSource> &source)
+ : FrameDecoder(componentName, trackMeta, source),
+ mIsAvcOrHevc(false),
+ mSeekMode(MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC),
+ mTargetTimeUs(-1ll),
+ mNumFrames(0),
+ mNumFramesDecoded(0) {
+}
+
+sp<AMessage> VideoFrameDecoder::onGetFormatAndSeekOptions(
+ int64_t frameTimeUs, size_t numFrames, int seekMode, MediaSource::ReadOptions *options) {
+ mSeekMode = static_cast<MediaSource::ReadOptions::SeekMode>(seekMode);
+ if (mSeekMode < MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC ||
+ mSeekMode > MediaSource::ReadOptions::SEEK_FRAME_INDEX) {
+ ALOGE("Unknown seek mode: %d", mSeekMode);
+ return NULL;
+ }
+ mNumFrames = numFrames;
+
+ const char *mime;
+ if (!trackMeta()->findCString(kKeyMIMEType, &mime)) {
+ ALOGE("Could not find mime type");
+ return NULL;
+ }
+
+ mIsAvcOrHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
+ || !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
+
+ if (frameTimeUs < 0) {
+ int64_t thumbNailTime;
+ if (!trackMeta()->findInt64(kKeyThumbnailTime, &thumbNailTime)
+ || thumbNailTime < 0) {
+ thumbNailTime = 0;
+ }
+ options->setSeekTo(thumbNailTime, mSeekMode);
+ } else {
+ options->setSeekTo(frameTimeUs, mSeekMode);
+ }
+
+ sp<AMessage> videoFormat;
+ if (convertMetaDataToMessage(trackMeta(), &videoFormat) != OK) {
+ ALOGE("b/23680780");
+ ALOGW("Failed to convert meta data to message");
+ return NULL;
+ }
+
+ // TODO: Use Flexible color instead
+ videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
+
+ // For the thumbnail extraction case, try to allocate single buffer in both
+ // input and output ports, if seeking to a sync frame. NOTE: This request may
+ // fail if component requires more than that for decoding.
+ bool isSeekingClosest = (mSeekMode == MediaSource::ReadOptions::SEEK_CLOSEST)
+ || (mSeekMode == MediaSource::ReadOptions::SEEK_FRAME_INDEX);
+ if (!isSeekingClosest) {
+ videoFormat->setInt32("android._num-input-buffers", 1);
+ videoFormat->setInt32("android._num-output-buffers", 1);
+ }
+ return videoFormat;
+}
+
+status_t VideoFrameDecoder::onInputReceived(
+ const sp<MediaCodecBuffer> &codecBuffer,
+ MetaDataBase &sampleMeta, bool firstSample, uint32_t *flags) {
+ bool isSeekingClosest = (mSeekMode == MediaSource::ReadOptions::SEEK_CLOSEST)
+ || (mSeekMode == MediaSource::ReadOptions::SEEK_FRAME_INDEX);
+
+ if (firstSample && isSeekingClosest) {
+ sampleMeta.findInt64(kKeyTargetTime, &mTargetTimeUs);
+ ALOGV("Seeking closest: targetTimeUs=%lld", (long long)mTargetTimeUs);
+ }
+
+ if (mIsAvcOrHevc && !isSeekingClosest
+ && IsIDR(codecBuffer->data(), codecBuffer->size())) {
+ // Only need to decode one IDR frame, unless we're seeking with CLOSEST
+ // option, in which case we need to actually decode to targetTimeUs.
+ *flags |= MediaCodec::BUFFER_FLAG_EOS;
+ }
+ return OK;
+}
+
+status_t VideoFrameDecoder::onOutputReceived(
+ const sp<MediaCodecBuffer> &videoFrameBuffer,
+ const sp<AMessage> &outputFormat,
+ int64_t timeUs, bool *done) {
+ bool shouldOutput = (mTargetTimeUs < 0ll) || (timeUs >= mTargetTimeUs);
+
+ // If this is not the target frame, skip color convert.
+ if (!shouldOutput) {
+ *done = false;
+ return OK;
+ }
+
+ *done = (++mNumFramesDecoded >= mNumFrames);
+
+ if (outputFormat == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ int32_t width, height;
+ CHECK(outputFormat->findInt32("width", &width));
+ CHECK(outputFormat->findInt32("height", &height));
+
+ int32_t crop_left, crop_top, crop_right, crop_bottom;
+ if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
+ crop_left = crop_top = 0;
+ crop_right = width - 1;
+ crop_bottom = height - 1;
+ }
+
+ sp<IMemory> frameMem = allocVideoFrame(
+ trackMeta(),
+ (crop_right - crop_left + 1),
+ (crop_bottom - crop_top + 1),
+ 0,
+ 0,
+ dstBpp());
+ addFrame(frameMem);
+ VideoFrame* frame = static_cast<VideoFrame*>(frameMem->pointer());
+
+ int32_t srcFormat;
+ CHECK(outputFormat->findInt32("color-format", &srcFormat));
+
+ ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
+
+ if (converter.isValid()) {
+ converter.convert(
+ (const uint8_t *)videoFrameBuffer->data(),
+ width, height,
+ crop_left, crop_top, crop_right, crop_bottom,
+ frame->getFlattenedData(),
+ frame->mWidth,
+ frame->mHeight,
+ crop_left, crop_top, crop_right, crop_bottom);
+ return OK;
+ }
+
+ ALOGE("Unable to convert from format 0x%08x to 0x%08x",
+ srcFormat, dstFormat());
+ return ERROR_UNSUPPORTED;
+}
+
+////////////////////////////////////////////////////////////////////////
+
+ImageDecoder::ImageDecoder(
+ const AString &componentName,
+ const sp<MetaData> &trackMeta,
+ const sp<IMediaSource> &source)
+ : FrameDecoder(componentName, trackMeta, source),
+ mFrame(NULL),
+ mWidth(0),
+ mHeight(0),
+ mGridRows(1),
+ mGridCols(1),
+ mTileWidth(0),
+ mTileHeight(0),
+ mTilesDecoded(0),
+ mTargetTiles(0) {
+}
+
+sp<AMessage> ImageDecoder::onGetFormatAndSeekOptions(
+ int64_t frameTimeUs, size_t /*numFrames*/,
+ int /*seekMode*/, MediaSource::ReadOptions *options) {
+ sp<MetaData> overrideMeta;
+ if (frameTimeUs < 0) {
+ uint32_t type;
+ const void *data;
+ size_t size;
+
+ // if we have a stand-alone thumbnail, set up the override meta,
+ // and set seekTo time to -1.
+ if (!findThumbnailInfo(trackMeta(), &mWidth, &mHeight, &type, &data, &size)) {
+ ALOGE("Thumbnail not available");
+ return NULL;
+ }
+ overrideMeta = new MetaData(*(trackMeta()));
+ overrideMeta->remove(kKeyDisplayWidth);
+ overrideMeta->remove(kKeyDisplayHeight);
+ overrideMeta->setInt32(kKeyWidth, mWidth);
+ overrideMeta->setInt32(kKeyHeight, mHeight);
+ overrideMeta->setData(kKeyHVCC, type, data, size);
+ options->setSeekTo(-1);
+ } else {
+ CHECK(trackMeta()->findInt32(kKeyWidth, &mWidth));
+ CHECK(trackMeta()->findInt32(kKeyHeight, &mHeight));
+
+ options->setSeekTo(frameTimeUs);
+ }
+
+ mGridRows = mGridCols = 1;
+ if (overrideMeta == NULL) {
+ // check if we're dealing with a tiled heif
+ int32_t tileWidth, tileHeight, gridRows, gridCols;
+ if (findGridInfo(trackMeta(), &tileWidth, &tileHeight, &gridRows, &gridCols)) {
+ if (mWidth <= tileWidth * gridCols && mHeight <= tileHeight * gridRows) {
+ ALOGV("grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
+ gridCols, gridRows, tileWidth, tileHeight, mWidth, mHeight);
+
+ overrideMeta = new MetaData(*(trackMeta()));
+ overrideMeta->setInt32(kKeyWidth, tileWidth);
+ overrideMeta->setInt32(kKeyHeight, tileHeight);
+ mTileWidth = tileWidth;
+ mTileHeight = tileHeight;
+ mGridCols = gridCols;
+ mGridRows = gridRows;
+ } else {
+ ALOGW("ignore bad grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
+ gridCols, gridRows, tileWidth, tileHeight, mWidth, mHeight);
+ }
+ }
+ if (overrideMeta == NULL) {
+ overrideMeta = trackMeta();
+ }
+ }
+ mTargetTiles = mGridCols * mGridRows;
+
+ sp<AMessage> videoFormat;
+ if (convertMetaDataToMessage(overrideMeta, &videoFormat) != OK) {
+ ALOGE("b/23680780");
+ ALOGW("Failed to convert meta data to message");
+ return NULL;
+ }
+
+ // TODO: Use Flexible color instead
+ videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
+
+ if ((mGridRows == 1) && (mGridCols == 1)) {
+ videoFormat->setInt32("android._num-input-buffers", 1);
+ videoFormat->setInt32("android._num-output-buffers", 1);
+ }
+ return videoFormat;
+}
+
+status_t ImageDecoder::onExtractRect(FrameRect *rect) {
+ // TODO:
+ // This callback is for verifying whether we can decode the rect,
+ // and if so, set up the internal variables for decoding.
+ // Currently, rect decoding is restricted to sequentially decoding one
+ // row of tiles at a time. We can't decode arbitrary rects, as the image
+ // track doesn't yet support seeking by tiles. So all we do here is to
+ // verify the rect against what we expect.
+ // When seeking by tile is supported, this code should be updated to
+ // set the seek parameters.
+ if (rect == NULL) {
+ if (mTilesDecoded > 0) {
+ return ERROR_UNSUPPORTED;
+ }
+ mTargetTiles = mGridRows * mGridCols;
+ return OK;
+ }
+
+ if (mTileWidth <= 0 || mTileHeight <=0) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ int32_t row = mTilesDecoded / mGridCols;
+ int32_t expectedTop = row * mTileHeight;
+ int32_t expectedBot = (row + 1) * mTileHeight;
+ if (expectedBot > mHeight) {
+ expectedBot = mHeight;
+ }
+ if (rect->left != 0 || rect->top != expectedTop
+ || rect->right != mWidth || rect->bottom != expectedBot) {
+ ALOGE("currently only support sequential decoding of slices");
+ return ERROR_UNSUPPORTED;
+ }
+
+ // advance one row
+ mTargetTiles = mTilesDecoded + mGridCols;
+ return OK;
+}
+
+status_t ImageDecoder::onOutputReceived(
+ const sp<MediaCodecBuffer> &videoFrameBuffer,
+ const sp<AMessage> &outputFormat, int64_t /*timeUs*/, bool *done) {
+ if (outputFormat == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ int32_t width, height;
+ CHECK(outputFormat->findInt32("width", &width));
+ CHECK(outputFormat->findInt32("height", &height));
+
+ if (mFrame == NULL) {
+ sp<IMemory> frameMem = allocVideoFrame(
+ trackMeta(), mWidth, mHeight, mTileWidth, mTileHeight, dstBpp());
+ mFrame = static_cast<VideoFrame*>(frameMem->pointer());
+
+ addFrame(frameMem);
+ }
+
+ int32_t srcFormat;
+ CHECK(outputFormat->findInt32("color-format", &srcFormat));
+
+ ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
+
+ int32_t dstLeft, dstTop, dstRight, dstBottom;
+ dstLeft = mTilesDecoded % mGridCols * width;
+ dstTop = mTilesDecoded / mGridCols * height;
+ dstRight = dstLeft + width - 1;
+ dstBottom = dstTop + height - 1;
+
+ int32_t crop_left, crop_top, crop_right, crop_bottom;
+ if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
+ crop_left = crop_top = 0;
+ crop_right = width - 1;
+ crop_bottom = height - 1;
+ }
+
+ // apply crop on bottom-right
+ // TODO: need to move this into the color converter itself.
+ if (dstRight >= mWidth) {
+ crop_right = mWidth - dstLeft - 1;
+ dstRight = dstLeft + crop_right;
+ }
+ if (dstBottom >= mHeight) {
+ crop_bottom = mHeight - dstTop - 1;
+ dstBottom = dstTop + crop_bottom;
+ }
+
+ *done = (++mTilesDecoded >= mTargetTiles);
+
+ if (converter.isValid()) {
+ converter.convert(
+ (const uint8_t *)videoFrameBuffer->data(),
+ width, height,
+ crop_left, crop_top, crop_right, crop_bottom,
+ mFrame->getFlattenedData(),
+ mFrame->mWidth,
+ mFrame->mHeight,
+ dstLeft, dstTop, dstRight, dstBottom);
+ return OK;
+ }
+
+ ALOGE("Unable to convert from format 0x%08x to 0x%08x",
+ srcFormat, dstFormat());
+ return ERROR_UNSUPPORTED;
+}
+
+} // namespace android
diff --git a/media/libstagefright/HevcUtils.cpp b/media/libstagefright/HevcUtils.cpp
index 7d463a9..f152a38 100644
--- a/media/libstagefright/HevcUtils.cpp
+++ b/media/libstagefright/HevcUtils.cpp
@@ -21,12 +21,12 @@
#include <utility>
#include "include/HevcUtils.h"
-#include "include/avc_utils.h"
#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>
@@ -162,6 +162,8 @@
reader.skipBits(1);
// Skip vps_max_layers_minus_1
reader.skipBits(6);
+ // Skip vps_max_sub_layers_minus1
+ reader.skipBits(3);
// Skip vps_temporal_id_nesting_flags
reader.skipBits(1);
// Skip reserved
@@ -422,7 +424,7 @@
uint8_t *header = hvcc;
header[0] = 1;
- header[1] = (kGeneralProfileSpace << 6) | (kGeneralTierFlag << 5) | kGeneralProfileIdc;
+ header[1] = (generalProfileSpace << 6) | (generalTierFlag << 5) | generalProfileIdc;
header[2] = (compatibilityFlags >> 24) & 0xff;
header[3] = (compatibilityFlags >> 16) & 0xff;
header[4] = (compatibilityFlags >> 8) & 0xff;
diff --git a/media/libstagefright/InterfaceUtils.cpp b/media/libstagefright/InterfaceUtils.cpp
new file mode 100644
index 0000000..56c5908
--- /dev/null
+++ b/media/libstagefright/InterfaceUtils.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "include/CallbackDataSource.h"
+
+#include <media/stagefright/CallbackMediaSource.h>
+#include <media/stagefright/InterfaceUtils.h>
+#include <media/stagefright/RemoteDataSource.h>
+#include <media/stagefright/RemoteMediaSource.h>
+
+namespace android {
+
+sp<DataSource> CreateDataSourceFromIDataSource(const sp<IDataSource> &source) {
+ if (source == nullptr) {
+ return nullptr;
+ }
+ return new TinyCacheSource(new CallbackDataSource(source));
+}
+
+sp<IDataSource> CreateIDataSourceFromDataSource(const sp<DataSource> &source) {
+ if (source == nullptr) {
+ return nullptr;
+ }
+ return RemoteDataSource::wrap(source);
+}
+
+sp<IMediaExtractor> CreateIMediaExtractorFromMediaExtractor(
+ MediaExtractor *extractor,
+ const sp<DataSource> &source,
+ const sp<RefBase> &plugin) {
+ if (extractor == nullptr) {
+ return nullptr;
+ }
+ return RemoteMediaExtractor::wrap(extractor, source, plugin);
+}
+
+sp<MediaSource> CreateMediaSourceFromIMediaSource(const sp<IMediaSource> &source) {
+ if (source == nullptr) {
+ return nullptr;
+ }
+ return new CallbackMediaSource(source);
+}
+
+sp<IMediaSource> CreateIMediaSourceFromMediaSourceBase(
+ const sp<RemoteMediaExtractor> &extractor,
+ MediaTrack *source, const sp<RefBase> &plugin) {
+ if (source == nullptr) {
+ return nullptr;
+ }
+ return RemoteMediaSource::wrap(extractor, source, plugin);
+}
+
+} // namespace android
diff --git a/media/libstagefright/ItemTable.cpp b/media/libstagefright/ItemTable.cpp
deleted file mode 100644
index 7bc4f3c..0000000
--- a/media/libstagefright/ItemTable.cpp
+++ /dev/null
@@ -1,1560 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ItemTable"
-
-#include <include/ItemTable.h>
-#include <media/MediaDefs.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/Utils.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <utils/Log.h>
-
-namespace android {
-
-namespace heif {
-
-/////////////////////////////////////////////////////////////////////
-//
-// struct to keep track of one image item
-//
-
-struct ImageItem {
- friend struct ItemReference;
- friend struct ItemProperty;
-
- ImageItem() : ImageItem(0) {}
- ImageItem(uint32_t _type) : type(_type),
- rows(0), columns(0), width(0), height(0), rotation(0),
- offset(0), size(0), nextTileIndex(0) {}
-
- bool isGrid() const {
- return type == FOURCC('g', 'r', 'i', 'd');
- }
-
- status_t getNextTileItemId(uint32_t *nextTileItemId, bool reset) {
- if (reset) {
- nextTileIndex = 0;
- }
- if (nextTileIndex >= dimgRefs.size()) {
- return ERROR_END_OF_STREAM;
- }
- *nextTileItemId = dimgRefs[nextTileIndex++];
- return OK;
- }
-
- uint32_t type;
- int32_t rows;
- int32_t columns;
- int32_t width;
- int32_t height;
- int32_t rotation;
- off64_t offset;
- size_t size;
- sp<ABuffer> hvcc;
- sp<ABuffer> icc;
-
- Vector<uint32_t> thumbnails;
- Vector<uint32_t> dimgRefs;
- size_t nextTileIndex;
-};
-
-
-/////////////////////////////////////////////////////////////////////
-//
-// ISO boxes
-//
-
-struct Box {
-protected:
- Box(const sp<DataSource> source, uint32_t type) :
- mDataSource(source), mType(type) {}
-
- virtual ~Box() {}
-
- virtual status_t onChunkData(
- uint32_t /*type*/, off64_t /*offset*/, size_t /*size*/) {
- return OK;
- }
-
- inline uint32_t type() const { return mType; }
-
- inline sp<DataSource> source() const { return mDataSource; }
-
- status_t parseChunk(off64_t *offset);
-
- status_t parseChunks(off64_t offset, size_t size);
-
-private:
- sp<DataSource> mDataSource;
- uint32_t mType;
-};
-
-status_t Box::parseChunk(off64_t *offset) {
- if (*offset < 0) {
- ALOGE("b/23540914");
- return ERROR_MALFORMED;
- }
- uint32_t hdr[2];
- if (mDataSource->readAt(*offset, hdr, 8) < 8) {
- return ERROR_IO;
- }
- uint64_t chunk_size = ntohl(hdr[0]);
- int32_t chunk_type = ntohl(hdr[1]);
- off64_t data_offset = *offset + 8;
-
- if (chunk_size == 1) {
- if (mDataSource->readAt(*offset + 8, &chunk_size, 8) < 8) {
- return ERROR_IO;
- }
- chunk_size = ntoh64(chunk_size);
- data_offset += 8;
-
- if (chunk_size < 16) {
- // The smallest valid chunk is 16 bytes long in this case.
- return ERROR_MALFORMED;
- }
- } else if (chunk_size == 0) {
- // This shouldn't happen since we should never be top level
- ALOGE("invalid chunk size 0 for non-top level box");
- return ERROR_MALFORMED;
- } else if (chunk_size < 8) {
- // The smallest valid chunk is 8 bytes long.
- ALOGE("invalid chunk size: %lld", (long long)chunk_size);
- return ERROR_MALFORMED;
- }
-
- char chunk[5];
- MakeFourCCString(chunk_type, chunk);
- ALOGV("chunk: %s @ %lld", chunk, (long long)*offset);
-
- off64_t chunk_data_size = chunk_size - (data_offset - *offset);
- if (chunk_data_size < 0) {
- ALOGE("b/23540914");
- return ERROR_MALFORMED;
- }
-
- status_t err = onChunkData(chunk_type, data_offset, chunk_data_size);
-
- if (err != OK) {
- return err;
- }
- *offset += chunk_size;
- return OK;
-}
-
-status_t Box::parseChunks(off64_t offset, size_t size) {
- off64_t stopOffset = offset + size;
- while (offset < stopOffset) {
- status_t err = parseChunk(&offset);
- if (err != OK) {
- return err;
- }
- }
- if (offset != stopOffset) {
- return ERROR_MALFORMED;
- }
- return OK;
-}
-
-///////////////////////////////////////////////////////////////////////
-
-struct FullBox : public Box {
-protected:
- FullBox(const sp<DataSource> source, uint32_t type) :
- Box(source, type), mVersion(0), mFlags(0) {}
-
- inline uint8_t version() const { return mVersion; }
-
- inline uint32_t flags() const { return mFlags; }
-
- status_t parseFullBoxHeader(off64_t *offset, size_t *size);
-
-private:
- uint8_t mVersion;
- uint32_t mFlags;
-};
-
-status_t FullBox::parseFullBoxHeader(off64_t *offset, size_t *size) {
- if (*size < 4) {
- return ERROR_MALFORMED;
- }
- if (!source()->readAt(*offset, &mVersion, 1)) {
- return ERROR_IO;
- }
- if (!source()->getUInt24(*offset + 1, &mFlags)) {
- return ERROR_IO;
- }
- *offset += 4;
- *size -= 4;
- return OK;
-}
-
-/////////////////////////////////////////////////////////////////////
-//
-// PrimaryImage box
-//
-
-struct PitmBox : public FullBox {
- PitmBox(const sp<DataSource> source) :
- FullBox(source, FOURCC('p', 'i', 't', 'm')) {}
-
- status_t parse(off64_t offset, size_t size, uint32_t *primaryItemId);
-};
-
-status_t PitmBox::parse(off64_t offset, size_t size, uint32_t *primaryItemId) {
- status_t err = parseFullBoxHeader(&offset, &size);
- if (err != OK) {
- return err;
- }
-
- size_t itemIdSize = (version() == 0) ? 2 : 4;
- if (size < itemIdSize) {
- return ERROR_MALFORMED;
- }
- uint32_t itemId;
- if (!source()->getUInt32Var(offset, &itemId, itemIdSize)) {
- return ERROR_IO;
- }
-
- ALOGV("primary id %d", itemId);
- *primaryItemId = itemId;
-
- return OK;
-}
-
-/////////////////////////////////////////////////////////////////////
-//
-// ItemLocation related boxes
-//
-
-struct ExtentEntry {
- uint64_t extentIndex;
- uint64_t extentOffset;
- uint64_t extentLength;
-};
-
-struct ItemLoc {
- ItemLoc() : ItemLoc(0, 0, 0, 0) {}
- ItemLoc(uint32_t item_id, uint16_t construction_method,
- uint16_t data_reference_index, uint64_t base_offset) :
- itemId(item_id),
- constructionMethod(construction_method),
- dataReferenceIndex(data_reference_index),
- baseOffset(base_offset) {}
-
- void addExtent(const ExtentEntry& extent) {
- extents.push_back(extent);
- }
-
- status_t getLoc(off64_t *offset, size_t *size,
- off64_t idatOffset, size_t idatSize) const {
- // TODO: fix extent handling, fix constructionMethod = 2
- CHECK(extents.size() == 1);
- if (constructionMethod == 0) {
- *offset = baseOffset + extents[0].extentOffset;
- *size = extents[0].extentLength;
- return OK;
- } else if (constructionMethod == 1) {
- if (baseOffset + extents[0].extentOffset + extents[0].extentLength
- > idatSize) {
- return ERROR_MALFORMED;
- }
- *offset = baseOffset + extents[0].extentOffset + idatOffset;
- *size = extents[0].extentLength;
- return OK;
- }
- return ERROR_UNSUPPORTED;
- }
-
- // parsed info
- uint32_t itemId;
- uint16_t constructionMethod;
- uint16_t dataReferenceIndex;
- off64_t baseOffset;
- Vector<ExtentEntry> extents;
-};
-
-struct IlocBox : public FullBox {
- IlocBox(const sp<DataSource> source, KeyedVector<uint32_t, ItemLoc> *itemLocs) :
- FullBox(source, FOURCC('i', 'l', 'o', 'c')),
- mItemLocs(itemLocs), mHasConstructMethod1(false) {}
-
- status_t parse(off64_t offset, size_t size);
-
- bool hasConstructMethod1() { return mHasConstructMethod1; }
-
-private:
- static bool isSizeFieldValid(uint32_t offset_size) {
- return offset_size == 0 || offset_size == 4 || offset_size == 8;
- }
- KeyedVector<uint32_t, ItemLoc> *mItemLocs;
- bool mHasConstructMethod1;
-};
-
-status_t IlocBox::parse(off64_t offset, size_t size) {
- status_t err = parseFullBoxHeader(&offset, &size);
- if (err != OK) {
- return err;
- }
- if (version() > 2) {
- ALOGE("%s: invalid version %d", __FUNCTION__, version());
- return ERROR_MALFORMED;
- }
-
- if (size < 2) {
- return ERROR_MALFORMED;
- }
- uint8_t offset_size;
- if (!source()->readAt(offset++, &offset_size, 1)) {
- return ERROR_IO;
- }
- uint8_t length_size = (offset_size & 0xF);
- offset_size >>= 4;
-
- uint8_t base_offset_size;
- if (!source()->readAt(offset++, &base_offset_size, 1)) {
- return ERROR_IO;
- }
- uint8_t index_size = 0;
- if (version() == 1 || version() == 2) {
- index_size = (base_offset_size & 0xF);
- }
- base_offset_size >>= 4;
- size -= 2;
-
- if (!isSizeFieldValid(offset_size)
- || !isSizeFieldValid(length_size)
- || !isSizeFieldValid(base_offset_size)
- || !isSizeFieldValid((index_size))) {
- ALOGE("%s: offset size not valid: %d, %d, %d, %d", __FUNCTION__,
- offset_size, length_size, base_offset_size, index_size);
- return ERROR_MALFORMED;
- }
-
- uint32_t item_count;
- size_t itemFieldSize = version() < 2 ? 2 : 4;
- if (size < itemFieldSize) {
- return ERROR_MALFORMED;
- }
- if (!source()->getUInt32Var(offset, &item_count, itemFieldSize)) {
- return ERROR_IO;
- }
-
- ALOGV("item_count %lld", (long long) item_count);
- offset += itemFieldSize;
- size -= itemFieldSize;
-
- for (size_t i = 0; i < item_count; i++) {
- uint32_t item_id;
- if (!source()->getUInt32Var(offset, &item_id, itemFieldSize)) {
- return ERROR_IO;
- }
- ALOGV("item[%zu]: id %lld", i, (long long)item_id);
- offset += itemFieldSize;
-
- uint8_t construction_method = 0;
- if (version() == 1 || version() == 2) {
- uint8_t buf[2];
- if (!source()->readAt(offset, buf, 2)) {
- return ERROR_IO;
- }
- construction_method = (buf[1] & 0xF);
- ALOGV("construction_method %d", construction_method);
- if (construction_method == 1) {
- mHasConstructMethod1 = true;
- }
-
- offset += 2;
- }
-
- uint16_t data_reference_index;
- if (!source()->getUInt16(offset, &data_reference_index)) {
- return ERROR_IO;
- }
- ALOGV("data_reference_index %d", data_reference_index);
- if (data_reference_index != 0) {
- // we don't support reference to other files
- return ERROR_UNSUPPORTED;
- }
- offset += 2;
-
- uint64_t base_offset = 0;
- if (base_offset_size != 0) {
- if (!source()->getUInt64Var(offset, &base_offset, base_offset_size)) {
- return ERROR_IO;
- }
- offset += base_offset_size;
- }
- ALOGV("base_offset %lld", (long long) base_offset);
-
- ssize_t index = mItemLocs->add(item_id, ItemLoc(
- item_id, construction_method, data_reference_index, base_offset));
- ItemLoc &item = mItemLocs->editValueAt(index);
-
- uint16_t extent_count;
- if (!source()->getUInt16(offset, &extent_count)) {
- return ERROR_IO;
- }
- ALOGV("extent_count %d", extent_count);
-
- if (extent_count > 1 && (offset_size == 0 || length_size == 0)) {
- // if the item is dividec into more than one extents, offset and
- // length must be present.
- return ERROR_MALFORMED;
- }
- offset += 2;
-
- for (size_t j = 0; j < extent_count; j++) {
- uint64_t extent_index = 1; // default=1
- if ((version() == 1 || version() == 2) && (index_size > 0)) {
- if (!source()->getUInt64Var(offset, &extent_index, index_size)) {
- return ERROR_IO;
- }
- // TODO: add support for this mode
- offset += index_size;
- ALOGV("extent_index %lld", (long long)extent_index);
- }
-
- uint64_t extent_offset = 0; // default=0
- if (offset_size > 0) {
- if (!source()->getUInt64Var(offset, &extent_offset, offset_size)) {
- return ERROR_IO;
- }
- offset += offset_size;
- }
- ALOGV("extent_offset %lld", (long long)extent_offset);
-
- uint64_t extent_length = 0; // this indicates full length of file
- if (length_size > 0) {
- if (!source()->getUInt64Var(offset, &extent_length, length_size)) {
- return ERROR_IO;
- }
- offset += length_size;
- }
- ALOGV("extent_length %lld", (long long)extent_length);
-
- item.addExtent({ extent_index, extent_offset, extent_length });
- }
- }
- return OK;
-}
-
-/////////////////////////////////////////////////////////////////////
-//
-// ItemReference related boxes
-//
-
-struct ItemReference : public Box, public RefBase {
- ItemReference(const sp<DataSource> source, uint32_t type, uint32_t itemIdSize) :
- Box(source, type), mItemId(0), mRefIdSize(itemIdSize) {}
-
- status_t parse(off64_t offset, size_t size);
-
- uint32_t itemId() { return mItemId; }
-
- void apply(KeyedVector<uint32_t, ImageItem> &itemIdToImageMap) const {
- ssize_t imageIndex = itemIdToImageMap.indexOfKey(mItemId);
-
- // ignore non-image items
- if (imageIndex < 0) {
- return;
- }
-
- ALOGV("attach reference type 0x%x to item id %d)", type(), mItemId);
-
- if (type() == FOURCC('d', 'i', 'm', 'g')) {
- ImageItem &image = itemIdToImageMap.editValueAt(imageIndex);
- if (!image.dimgRefs.empty()) {
- ALOGW("dimgRefs if not clean!");
- }
- image.dimgRefs.appendVector(mRefs);
- } else if (type() == FOURCC('t', 'h', 'm', 'b')) {
- for (size_t i = 0; i < mRefs.size(); i++) {
- imageIndex = itemIdToImageMap.indexOfKey(mRefs[i]);
-
- // ignore non-image items
- if (imageIndex < 0) {
- continue;
- }
- ALOGV("Image item id %d uses thumbnail item id %d", mRefs[i], mItemId);
- ImageItem &image = itemIdToImageMap.editValueAt(imageIndex);
- if (!image.thumbnails.empty()) {
- ALOGW("already has thumbnails!");
- }
- image.thumbnails.push_back(mItemId);
- }
- } else {
- ALOGW("ignoring unsupported ref type 0x%x", type());
- }
- }
-
-private:
- uint32_t mItemId;
- uint32_t mRefIdSize;
- Vector<uint32_t> mRefs;
-
- DISALLOW_EVIL_CONSTRUCTORS(ItemReference);
-};
-
-status_t ItemReference::parse(off64_t offset, size_t size) {
- if (size < mRefIdSize + 2) {
- return ERROR_MALFORMED;
- }
- if (!source()->getUInt32Var(offset, &mItemId, mRefIdSize)) {
- return ERROR_IO;
- }
- offset += mRefIdSize;
-
- uint16_t count;
- if (!source()->getUInt16(offset, &count)) {
- return ERROR_IO;
- }
- offset += 2;
- size -= (mRefIdSize + 2);
-
- if (size < count * mRefIdSize) {
- return ERROR_MALFORMED;
- }
-
- for (size_t i = 0; i < count; i++) {
- uint32_t refItemId;
- if (!source()->getUInt32Var(offset, &refItemId, mRefIdSize)) {
- return ERROR_IO;
- }
- offset += mRefIdSize;
- mRefs.push_back(refItemId);
- ALOGV("item id %d: referencing item id %d", mItemId, refItemId);
- }
-
- return OK;
-}
-
-struct IrefBox : public FullBox {
- IrefBox(const sp<DataSource> source, Vector<sp<ItemReference> > *itemRefs) :
- FullBox(source, FOURCC('i', 'r', 'e', 'f')), mRefIdSize(0), mItemRefs(itemRefs) {}
-
- status_t parse(off64_t offset, size_t size);
-
-protected:
- status_t onChunkData(uint32_t type, off64_t offset, size_t size) override;
-
-private:
- uint32_t mRefIdSize;
- Vector<sp<ItemReference> > *mItemRefs;
-};
-
-status_t IrefBox::parse(off64_t offset, size_t size) {
- ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
- status_t err = parseFullBoxHeader(&offset, &size);
- if (err != OK) {
- return err;
- }
-
- mRefIdSize = (version() == 0) ? 2 : 4;
- return parseChunks(offset, size);
-}
-
-status_t IrefBox::onChunkData(uint32_t type, off64_t offset, size_t size) {
- sp<ItemReference> itemRef = new ItemReference(source(), type, mRefIdSize);
-
- status_t err = itemRef->parse(offset, size);
- if (err != OK) {
- return err;
- }
- mItemRefs->push_back(itemRef);
- return OK;
-}
-
-/////////////////////////////////////////////////////////////////////
-//
-// ItemProperty related boxes
-//
-
-struct AssociationEntry {
- uint32_t itemId;
- bool essential;
- uint16_t index;
-};
-
-struct ItemProperty : public RefBase {
- ItemProperty() {}
-
- virtual void attachTo(ImageItem &/*image*/) const {
- ALOGW("Unrecognized property");
- }
- virtual status_t parse(off64_t /*offset*/, size_t /*size*/) {
- ALOGW("Unrecognized property");
- return OK;
- }
-
-private:
- DISALLOW_EVIL_CONSTRUCTORS(ItemProperty);
-};
-
-struct IspeBox : public FullBox, public ItemProperty {
- IspeBox(const sp<DataSource> source) :
- FullBox(source, FOURCC('i', 's', 'p', 'e')), mWidth(0), mHeight(0) {}
-
- status_t parse(off64_t offset, size_t size) override;
-
- void attachTo(ImageItem &image) const override {
- image.width = mWidth;
- image.height = mHeight;
- }
-
-private:
- uint32_t mWidth;
- uint32_t mHeight;
-};
-
-status_t IspeBox::parse(off64_t offset, size_t size) {
- ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
-
- status_t err = parseFullBoxHeader(&offset, &size);
- if (err != OK) {
- return err;
- }
-
- if (size < 8) {
- return ERROR_MALFORMED;
- }
- if (!source()->getUInt32(offset, &mWidth)
- || !source()->getUInt32(offset + 4, &mHeight)) {
- return ERROR_IO;
- }
- ALOGV("property ispe: %dx%d", mWidth, mHeight);
-
- return OK;
-}
-
-struct HvccBox : public Box, public ItemProperty {
- HvccBox(const sp<DataSource> source) :
- Box(source, FOURCC('h', 'v', 'c', 'C')) {}
-
- status_t parse(off64_t offset, size_t size) override;
-
- void attachTo(ImageItem &image) const override {
- image.hvcc = mHVCC;
- }
-
-private:
- sp<ABuffer> mHVCC;
-};
-
-status_t HvccBox::parse(off64_t offset, size_t size) {
- ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
-
- mHVCC = new ABuffer(size);
-
- if (mHVCC->data() == NULL) {
- ALOGE("b/28471206");
- return NO_MEMORY;
- }
-
- if (source()->readAt(offset, mHVCC->data(), size) < (ssize_t)size) {
- return ERROR_IO;
- }
-
- ALOGV("property hvcC");
-
- return OK;
-}
-
-struct IrotBox : public Box, public ItemProperty {
- IrotBox(const sp<DataSource> source) :
- Box(source, FOURCC('i', 'r', 'o', 't')), mAngle(0) {}
-
- status_t parse(off64_t offset, size_t size) override;
-
- void attachTo(ImageItem &image) const override {
- image.rotation = mAngle * 90;
- }
-
-private:
- uint8_t mAngle;
-};
-
-status_t IrotBox::parse(off64_t offset, size_t size) {
- ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
-
- if (size < 1) {
- return ERROR_MALFORMED;
- }
- if (source()->readAt(offset, &mAngle, 1) != 1) {
- return ERROR_IO;
- }
- mAngle &= 0x3;
- ALOGV("property irot: %d", mAngle);
-
- return OK;
-}
-
-struct ColrBox : public Box, public ItemProperty {
- ColrBox(const sp<DataSource> source) :
- Box(source, FOURCC('c', 'o', 'l', 'r')) {}
-
- status_t parse(off64_t offset, size_t size) override;
-
- void attachTo(ImageItem &image) const override {
- image.icc = mICCData;
- }
-
-private:
- sp<ABuffer> mICCData;
-};
-
-status_t ColrBox::parse(off64_t offset, size_t size) {
- ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
-
- if (size < 4) {
- return ERROR_MALFORMED;
- }
- uint32_t colour_type;
- if (!source()->getUInt32(offset, &colour_type)) {
- return ERROR_IO;
- }
- offset += 4;
- size -= 4;
- if (colour_type == FOURCC('n', 'c', 'l', 'x')) {
- return OK;
- }
- if ((colour_type != FOURCC('r', 'I', 'C', 'C')) &&
- (colour_type != FOURCC('p', 'r', 'o', 'f'))) {
- return ERROR_MALFORMED;
- }
-
- mICCData = new ABuffer(size);
- if (mICCData->data() == NULL) {
- ALOGE("b/28471206");
- return NO_MEMORY;
- }
-
- if (source()->readAt(offset, mICCData->data(), size) != (ssize_t)size) {
- return ERROR_IO;
- }
-
- ALOGV("property Colr: size %zd", size);
- return OK;
-}
-
-struct IpmaBox : public FullBox {
- IpmaBox(const sp<DataSource> source, Vector<AssociationEntry> *associations) :
- FullBox(source, FOURCC('i', 'p', 'm', 'a')), mAssociations(associations) {}
-
- status_t parse(off64_t offset, size_t size);
-private:
- Vector<AssociationEntry> *mAssociations;
-};
-
-status_t IpmaBox::parse(off64_t offset, size_t size) {
- status_t err = parseFullBoxHeader(&offset, &size);
- if (err != OK) {
- return err;
- }
-
- if (size < 4) {
- return ERROR_MALFORMED;
- }
- uint32_t entryCount;
- if (!source()->getUInt32(offset, &entryCount)) {
- return ERROR_IO;
- }
- offset += 4;
- size -= 4;
-
- for (size_t k = 0; k < entryCount; ++k) {
- uint32_t itemId = 0;
- size_t itemIdSize = (version() < 1) ? 2 : 4;
-
- if (size < itemIdSize + 1) {
- return ERROR_MALFORMED;
- }
-
- if (!source()->getUInt32Var(offset, &itemId, itemIdSize)) {
- return ERROR_IO;
- }
- offset += itemIdSize;
- size -= itemIdSize;
-
- uint8_t associationCount;
- if (!source()->readAt(offset, &associationCount, 1)) {
- return ERROR_IO;
- }
- offset++;
- size--;
-
- for (size_t i = 0; i < associationCount; ++i) {
- size_t propIndexSize = (flags() & 1) ? 2 : 1;
- if (size < propIndexSize) {
- return ERROR_MALFORMED;
- }
- uint16_t propIndex;
- if (!source()->getUInt16Var(offset, &propIndex, propIndexSize)) {
- return ERROR_IO;
- }
- offset += propIndexSize;
- size -= propIndexSize;
- uint16_t bitmask = (1 << (8 * propIndexSize - 1));
- AssociationEntry entry = {
- .itemId = itemId,
- .essential = !!(propIndex & bitmask),
- .index = (uint16_t) (propIndex & ~bitmask)
- };
-
- ALOGV("item id %d associated to property %d (essential %d)",
- itemId, entry.index, entry.essential);
-
- mAssociations->push_back(entry);
- }
- }
-
- return OK;
-}
-
-struct IpcoBox : public Box {
- IpcoBox(const sp<DataSource> source, Vector<sp<ItemProperty> > *properties) :
- Box(source, FOURCC('i', 'p', 'c', 'o')), mItemProperties(properties) {}
-
- status_t parse(off64_t offset, size_t size);
-protected:
- status_t onChunkData(uint32_t type, off64_t offset, size_t size) override;
-
-private:
- Vector<sp<ItemProperty> > *mItemProperties;
-};
-
-status_t IpcoBox::parse(off64_t offset, size_t size) {
- ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
- // push dummy as the index is 1-based
- mItemProperties->push_back(new ItemProperty());
- return parseChunks(offset, size);
-}
-
-status_t IpcoBox::onChunkData(uint32_t type, off64_t offset, size_t size) {
- sp<ItemProperty> itemProperty;
- switch(type) {
- case FOURCC('h', 'v', 'c', 'C'):
- {
- itemProperty = new HvccBox(source());
- break;
- }
- case FOURCC('i', 's', 'p', 'e'):
- {
- itemProperty = new IspeBox(source());
- break;
- }
- case FOURCC('i', 'r', 'o', 't'):
- {
- itemProperty = new IrotBox(source());
- break;
- }
- case FOURCC('c', 'o', 'l', 'r'):
- {
- itemProperty = new ColrBox(source());
- break;
- }
- default:
- {
- // push dummy to maintain correct item property index
- itemProperty = new ItemProperty();
- break;
- }
- }
- status_t err = itemProperty->parse(offset, size);
- if (err != OK) {
- return err;
- }
- mItemProperties->push_back(itemProperty);
- return OK;
-}
-
-struct IprpBox : public Box {
- IprpBox(const sp<DataSource> source,
- Vector<sp<ItemProperty> > *properties,
- Vector<AssociationEntry> *associations) :
- Box(source, FOURCC('i', 'p', 'r', 'p')),
- mProperties(properties), mAssociations(associations) {}
-
- status_t parse(off64_t offset, size_t size);
-protected:
- status_t onChunkData(uint32_t type, off64_t offset, size_t size) override;
-
-private:
- Vector<sp<ItemProperty> > *mProperties;
- Vector<AssociationEntry> *mAssociations;
-};
-
-status_t IprpBox::parse(off64_t offset, size_t size) {
- ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
-
- status_t err = parseChunks(offset, size);
- if (err != OK) {
- return err;
- }
- return OK;
-}
-
-status_t IprpBox::onChunkData(uint32_t type, off64_t offset, size_t size) {
- switch(type) {
- case FOURCC('i', 'p', 'c', 'o'):
- {
- IpcoBox ipcoBox(source(), mProperties);
- return ipcoBox.parse(offset, size);
- }
- case FOURCC('i', 'p', 'm', 'a'):
- {
- IpmaBox ipmaBox(source(), mAssociations);
- return ipmaBox.parse(offset, size);
- }
- default:
- {
- ALOGW("Unrecognized box.");
- break;
- }
- }
- return OK;
-}
-
-/////////////////////////////////////////////////////////////////////
-//
-// ItemInfo related boxes
-//
-struct ItemInfo {
- uint32_t itemId;
- uint32_t itemType;
-};
-
-struct InfeBox : public FullBox {
- InfeBox(const sp<DataSource> source) :
- FullBox(source, FOURCC('i', 'n', 'f', 'e')) {}
-
- status_t parse(off64_t offset, size_t size, ItemInfo *itemInfo);
-
-private:
- bool parseNullTerminatedString(off64_t *offset, size_t *size, String8 *out);
-};
-
-bool InfeBox::parseNullTerminatedString(
- off64_t *offset, size_t *size, String8 *out) {
- char tmp[256];
- size_t len = 0;
- off64_t newOffset = *offset;
- off64_t stopOffset = *offset + *size;
- while (newOffset < stopOffset) {
- if (!source()->readAt(newOffset++, &tmp[len], 1)) {
- return false;
- }
- if (tmp[len] == 0) {
- out->append(tmp, len);
-
- *offset = newOffset;
- *size = stopOffset - newOffset;
-
- return true;
- }
- if (++len >= sizeof(tmp)) {
- out->append(tmp, len);
- len = 0;
- }
- }
- return false;
-}
-
-status_t InfeBox::parse(off64_t offset, size_t size, ItemInfo *itemInfo) {
- status_t err = parseFullBoxHeader(&offset, &size);
- if (err != OK) {
- return err;
- }
-
- if (version() == 0 || version() == 1) {
- if (size < 4) {
- return ERROR_MALFORMED;
- }
- uint16_t item_id;
- if (!source()->getUInt16(offset, &item_id)) {
- return ERROR_IO;
- }
- ALOGV("item_id %d", item_id);
- uint16_t item_protection_index;
- if (!source()->getUInt16(offset + 2, &item_protection_index)) {
- return ERROR_IO;
- }
- offset += 4;
- size -= 4;
-
- String8 item_name;
- if (!parseNullTerminatedString(&offset, &size, &item_name)) {
- return ERROR_MALFORMED;
- }
-
- String8 content_type;
- if (!parseNullTerminatedString(&offset, &size, &content_type)) {
- return ERROR_MALFORMED;
- }
-
- String8 content_encoding;
- if (!parseNullTerminatedString(&offset, &size, &content_encoding)) {
- return ERROR_MALFORMED;
- }
-
- if (version() == 1) {
- uint32_t extension_type;
- if (!source()->getUInt32(offset, &extension_type)) {
- return ERROR_IO;
- }
- offset++;
- size--;
- // TODO: handle this case
- }
- } else { // version >= 2
- uint32_t item_id;
- size_t itemIdSize = (version() == 2) ? 2 : 4;
- if (size < itemIdSize + 6) {
- return ERROR_MALFORMED;
- }
- if (!source()->getUInt32Var(offset, &item_id, itemIdSize)) {
- return ERROR_IO;
- }
- ALOGV("item_id %d", item_id);
- offset += itemIdSize;
- uint16_t item_protection_index;
- if (!source()->getUInt16(offset, &item_protection_index)) {
- return ERROR_IO;
- }
- ALOGV("item_protection_index %d", item_protection_index);
- offset += 2;
- uint32_t item_type;
- if (!source()->getUInt32(offset, &item_type)) {
- return ERROR_IO;
- }
-
- itemInfo->itemId = item_id;
- itemInfo->itemType = item_type;
-
- char itemTypeString[5];
- MakeFourCCString(item_type, itemTypeString);
- ALOGV("item_type %s", itemTypeString);
- offset += 4;
- size -= itemIdSize + 6;
-
- String8 item_name;
- if (!parseNullTerminatedString(&offset, &size, &item_name)) {
- return ERROR_MALFORMED;
- }
- ALOGV("item_name %s", item_name.c_str());
-
- if (item_type == FOURCC('m', 'i', 'm', 'e')) {
- String8 content_type;
- if (!parseNullTerminatedString(&offset, &size, &content_type)) {
- return ERROR_MALFORMED;
- }
-
- String8 content_encoding;
- if (!parseNullTerminatedString(&offset, &size, &content_encoding)) {
- return ERROR_MALFORMED;
- }
- } else if (item_type == FOURCC('u', 'r', 'i', ' ')) {
- String8 item_uri_type;
- if (!parseNullTerminatedString(&offset, &size, &item_uri_type)) {
- return ERROR_MALFORMED;
- }
- }
- }
- return OK;
-}
-
-struct IinfBox : public FullBox {
- IinfBox(const sp<DataSource> source, Vector<ItemInfo> *itemInfos) :
- FullBox(source, FOURCC('i', 'i', 'n', 'f')),
- mItemInfos(itemInfos), mHasGrids(false) {}
-
- status_t parse(off64_t offset, size_t size);
-
- bool hasGrids() { return mHasGrids; }
-
-protected:
- status_t onChunkData(uint32_t type, off64_t offset, size_t size) override;
-
-private:
- Vector<ItemInfo> *mItemInfos;
- bool mHasGrids;
-};
-
-status_t IinfBox::parse(off64_t offset, size_t size) {
- ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
-
- status_t err = parseFullBoxHeader(&offset, &size);
- if (err != OK) {
- return err;
- }
-
- size_t entryCountSize = version() == 0 ? 2 : 4;
- if (size < entryCountSize) {
- return ERROR_MALFORMED;
- }
- uint32_t entry_count;
- if (!source()->getUInt32Var(offset, &entry_count, entryCountSize)) {
- return ERROR_IO;
- }
- ALOGV("entry_count %d", entry_count);
-
- off64_t stopOffset = offset + size;
- offset += entryCountSize;
- for (size_t i = 0; i < entry_count && offset < stopOffset; i++) {
- ALOGV("entry %zu", i);
- status_t err = parseChunk(&offset);
- if (err != OK) {
- return err;
- }
- }
- if (offset != stopOffset) {
- return ERROR_MALFORMED;
- }
-
- return OK;
-}
-
-status_t IinfBox::onChunkData(uint32_t type, off64_t offset, size_t size) {
- if (type != FOURCC('i', 'n', 'f', 'e')) {
- return OK;
- }
-
- InfeBox infeBox(source());
- ItemInfo itemInfo;
- status_t err = infeBox.parse(offset, size, &itemInfo);
- if (err != OK) {
- return err;
- }
- mItemInfos->push_back(itemInfo);
- mHasGrids |= (itemInfo.itemType == FOURCC('g', 'r', 'i', 'd'));
- return OK;
-}
-
-//////////////////////////////////////////////////////////////////
-
-ItemTable::ItemTable(const sp<DataSource> &source)
- : mDataSource(source),
- mPrimaryItemId(0),
- mIdatOffset(0),
- mIdatSize(0),
- mImageItemsValid(false),
- mCurrentImageIndex(0) {
- mRequiredBoxes.insert('iprp');
- mRequiredBoxes.insert('iloc');
- mRequiredBoxes.insert('pitm');
- mRequiredBoxes.insert('iinf');
-}
-
-ItemTable::~ItemTable() {}
-
-status_t ItemTable::parse(uint32_t type, off64_t data_offset, size_t chunk_data_size) {
- switch(type) {
- case FOURCC('i', 'l', 'o', 'c'):
- {
- return parseIlocBox(data_offset, chunk_data_size);
- }
- case FOURCC('i', 'i', 'n', 'f'):
- {
- return parseIinfBox(data_offset, chunk_data_size);
- }
- case FOURCC('i', 'p', 'r', 'p'):
- {
- return parseIprpBox(data_offset, chunk_data_size);
- }
- case FOURCC('p', 'i', 't', 'm'):
- {
- return parsePitmBox(data_offset, chunk_data_size);
- }
- case FOURCC('i', 'd', 'a', 't'):
- {
- return parseIdatBox(data_offset, chunk_data_size);
- }
- case FOURCC('i', 'r', 'e', 'f'):
- {
- return parseIrefBox(data_offset, chunk_data_size);
- }
- case FOURCC('i', 'p', 'r', 'o'):
- {
- ALOGW("ipro box not supported!");
- break;
- }
- default:
- {
- ALOGW("unrecognized box type: 0x%x", type);
- break;
- }
- }
- return ERROR_UNSUPPORTED;
-}
-
-status_t ItemTable::parseIlocBox(off64_t offset, size_t size) {
- ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
-
- IlocBox ilocBox(mDataSource, &mItemLocs);
- status_t err = ilocBox.parse(offset, size);
- if (err != OK) {
- return err;
- }
-
- if (ilocBox.hasConstructMethod1()) {
- mRequiredBoxes.insert('idat');
- }
-
- return buildImageItemsIfPossible('iloc');
-}
-
-status_t ItemTable::parseIinfBox(off64_t offset, size_t size) {
- ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
-
- IinfBox iinfBox(mDataSource, &mItemInfos);
- status_t err = iinfBox.parse(offset, size);
- if (err != OK) {
- return err;
- }
-
- if (iinfBox.hasGrids()) {
- mRequiredBoxes.insert('iref');
- }
-
- return buildImageItemsIfPossible('iinf');
-}
-
-status_t ItemTable::parsePitmBox(off64_t offset, size_t size) {
- ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
-
- PitmBox pitmBox(mDataSource);
- status_t err = pitmBox.parse(offset, size, &mPrimaryItemId);
- if (err != OK) {
- return err;
- }
-
- return buildImageItemsIfPossible('pitm');
-}
-
-status_t ItemTable::parseIprpBox(off64_t offset, size_t size) {
- ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
-
- IprpBox iprpBox(mDataSource, &mItemProperties, &mAssociations);
- status_t err = iprpBox.parse(offset, size);
- if (err != OK) {
- return err;
- }
-
- return buildImageItemsIfPossible('iprp');
-}
-
-status_t ItemTable::parseIdatBox(off64_t offset, size_t size) {
- ALOGV("%s: idat offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
-
- // only remember the offset and size of idat box for later use
- mIdatOffset = offset;
- mIdatSize = size;
-
- return buildImageItemsIfPossible('idat');
-}
-
-status_t ItemTable::parseIrefBox(off64_t offset, size_t size) {
- ALOGV("%s: offset %lld, size %zu", __FUNCTION__, (long long)offset, size);
-
- IrefBox irefBox(mDataSource, &mItemReferences);
- status_t err = irefBox.parse(offset, size);
- if (err != OK) {
- return err;
- }
-
- return buildImageItemsIfPossible('iref');
-}
-
-status_t ItemTable::buildImageItemsIfPossible(uint32_t type) {
- if (mImageItemsValid) {
- return OK;
- }
-
- mBoxesSeen.insert(type);
-
- // need at least 'iprp', 'iloc', 'pitm', 'iinf';
- // need 'idat' if any items used construction_method of 2;
- // need 'iref' if there are grids.
- if (!std::includes(
- mBoxesSeen.begin(), mBoxesSeen.end(),
- mRequiredBoxes.begin(), mRequiredBoxes.end())) {
- return OK;
- }
-
- ALOGV("building image table...");
-
- for (size_t i = 0; i < mItemInfos.size(); i++) {
- const ItemInfo &info = mItemInfos[i];
-
-
- // ignore non-image items
- if (info.itemType != FOURCC('g', 'r', 'i', 'd') &&
- info.itemType != FOURCC('h', 'v', 'c', '1')) {
- continue;
- }
-
- ssize_t imageIndex = mItemIdToImageMap.indexOfKey(info.itemId);
- if (imageIndex >= 0) {
- ALOGW("ignoring duplicate image item id %d", info.itemId);
- continue;
- }
-
- ssize_t ilocIndex = mItemLocs.indexOfKey(info.itemId);
- if (ilocIndex < 0) {
- ALOGE("iloc missing for image item id %d", info.itemId);
- continue;
- }
- const ItemLoc &iloc = mItemLocs[ilocIndex];
-
- off64_t offset;
- size_t size;
- if (iloc.getLoc(&offset, &size, mIdatOffset, mIdatSize) != OK) {
- return ERROR_MALFORMED;
- }
-
- ImageItem image(info.itemType);
-
- ALOGV("adding %s: itemId %d", image.isGrid() ? "grid" : "image", info.itemId);
-
- if (image.isGrid()) {
- if (size > 12) {
- return ERROR_MALFORMED;
- }
- uint8_t buf[12];
- if (!mDataSource->readAt(offset, buf, size)) {
- return ERROR_IO;
- }
-
- image.rows = buf[2] + 1;
- image.columns = buf[3] + 1;
-
- ALOGV("rows %d, columans %d", image.rows, image.columns);
- } else {
- image.offset = offset;
- image.size = size;
- }
- mItemIdToImageMap.add(info.itemId, image);
- }
-
- for (size_t i = 0; i < mAssociations.size(); i++) {
- attachProperty(mAssociations[i]);
- }
-
- for (size_t i = 0; i < mItemReferences.size(); i++) {
- mItemReferences[i]->apply(mItemIdToImageMap);
- }
-
- mImageItemsValid = true;
- return OK;
-}
-
-void ItemTable::attachProperty(const AssociationEntry &association) {
- ssize_t imageIndex = mItemIdToImageMap.indexOfKey(association.itemId);
-
- // ignore non-image items
- if (imageIndex < 0) {
- return;
- }
-
- uint16_t propertyIndex = association.index;
- if (propertyIndex >= mItemProperties.size()) {
- ALOGW("Ignoring invalid property index %d", propertyIndex);
- return;
- }
-
- ALOGV("attach property %d to item id %d)",
- propertyIndex, association.itemId);
-
- mItemProperties[propertyIndex]->attachTo(
- mItemIdToImageMap.editValueAt(imageIndex));
-}
-
-sp<MetaData> ItemTable::getImageMeta() {
- if (!mImageItemsValid) {
- return NULL;
- }
-
- ssize_t imageIndex = mItemIdToImageMap.indexOfKey(mPrimaryItemId);
- if (imageIndex < 0) {
- ALOGE("Primary item id %d not found!", mPrimaryItemId);
- return NULL;
- }
-
- ALOGV("primary image index %zu", imageIndex);
-
- const ImageItem *image = &mItemIdToImageMap[imageIndex];
-
- sp<MetaData> meta = new MetaData;
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_HEVC);
-
- ALOGV("setting image size %dx%d", image->width, image->height);
- meta->setInt32(kKeyWidth, image->width);
- meta->setInt32(kKeyHeight, image->height);
- if (image->rotation != 0) {
- // Rotation angle in HEIF is CCW, convert to CW here to be
- // consistent with the other media formats.
- switch(image->rotation) {
- case 90: meta->setInt32(kKeyRotation, 270); break;
- case 180: meta->setInt32(kKeyRotation, 180); break;
- case 270: meta->setInt32(kKeyRotation, 90); break;
- default: break; // don't set if invalid
- }
- }
- meta->setInt32(kKeyMaxInputSize, image->width * image->height * 1.5);
-
- if (!image->thumbnails.empty()) {
- ssize_t thumbnailIndex = mItemIdToImageMap.indexOfKey(image->thumbnails[0]);
- if (thumbnailIndex >= 0) {
- const ImageItem &thumbnail = mItemIdToImageMap[thumbnailIndex];
-
- meta->setInt32(kKeyThumbnailWidth, thumbnail.width);
- meta->setInt32(kKeyThumbnailHeight, thumbnail.height);
- meta->setData(kKeyThumbnailHVCC, kTypeHVCC,
- thumbnail.hvcc->data(), thumbnail.hvcc->size());
- ALOGV("thumbnail meta: %dx%d, index %zd",
- thumbnail.width, thumbnail.height, thumbnailIndex);
- } else {
- ALOGW("Referenced thumbnail does not exist!");
- }
- }
-
- if (image->isGrid()) {
- ssize_t tileIndex = mItemIdToImageMap.indexOfKey(image->dimgRefs[0]);
- if (tileIndex < 0) {
- return NULL;
- }
- // when there are tiles, (kKeyWidth, kKeyHeight) is the full tiled area,
- // and (kKeyDisplayWidth, kKeyDisplayHeight) may be smaller than that.
- meta->setInt32(kKeyDisplayWidth, image->width);
- meta->setInt32(kKeyDisplayHeight, image->height);
- int32_t gridRows = image->rows, gridCols = image->columns;
-
- // point image to the first tile for grid size and HVCC
- image = &mItemIdToImageMap.editValueAt(tileIndex);
- meta->setInt32(kKeyWidth, image->width * gridCols);
- meta->setInt32(kKeyHeight, image->height * gridRows);
- meta->setInt32(kKeyGridWidth, image->width);
- meta->setInt32(kKeyGridHeight, image->height);
- meta->setInt32(kKeyMaxInputSize, image->width * image->height * 1.5);
- }
-
- if (image->hvcc == NULL) {
- ALOGE("hvcc is missing!");
- return NULL;
- }
- meta->setData(kKeyHVCC, kTypeHVCC, image->hvcc->data(), image->hvcc->size());
-
- if (image->icc != NULL) {
- meta->setData(kKeyIccProfile, 0, image->icc->data(), image->icc->size());
- }
- return meta;
-}
-
-uint32_t ItemTable::countImages() const {
- return mImageItemsValid ? mItemIdToImageMap.size() : 0;
-}
-
-status_t ItemTable::findPrimaryImage(uint32_t *imageIndex) {
- if (!mImageItemsValid) {
- return INVALID_OPERATION;
- }
-
- ssize_t index = mItemIdToImageMap.indexOfKey(mPrimaryItemId);
- if (index < 0) {
- return ERROR_MALFORMED;
- }
-
- *imageIndex = index;
- return OK;
-}
-
-status_t ItemTable::findThumbnail(uint32_t *imageIndex) {
- if (!mImageItemsValid) {
- return INVALID_OPERATION;
- }
-
- ssize_t primaryIndex = mItemIdToImageMap.indexOfKey(mPrimaryItemId);
- if (primaryIndex < 0) {
- ALOGE("Primary item id %d not found!", mPrimaryItemId);
- return ERROR_MALFORMED;
- }
-
- const ImageItem &primaryImage = mItemIdToImageMap[primaryIndex];
- if (primaryImage.thumbnails.empty()) {
- ALOGW("Using primary in place of thumbnail.");
- *imageIndex = primaryIndex;
- return OK;
- }
-
- ssize_t thumbnailIndex = mItemIdToImageMap.indexOfKey(
- primaryImage.thumbnails[0]);
- if (thumbnailIndex < 0) {
- ALOGE("Thumbnail item id %d not found!", primaryImage.thumbnails[0]);
- return ERROR_MALFORMED;
- }
-
- *imageIndex = thumbnailIndex;
- return OK;
-}
-
-status_t ItemTable::getImageOffsetAndSize(
- uint32_t *imageIndex, off64_t *offset, size_t *size) {
- if (!mImageItemsValid) {
- return INVALID_OPERATION;
- }
-
- if (imageIndex != NULL) {
- if (*imageIndex >= mItemIdToImageMap.size()) {
- ALOGE("Bad image index!");
- return BAD_VALUE;
- }
- mCurrentImageIndex = *imageIndex;
- }
-
- ImageItem &image = mItemIdToImageMap.editValueAt(mCurrentImageIndex);
- if (image.isGrid()) {
- uint32_t tileItemId;
- status_t err = image.getNextTileItemId(&tileItemId, imageIndex != NULL);
- if (err != OK) {
- return err;
- }
- ssize_t tileImageIndex = mItemIdToImageMap.indexOfKey(tileItemId);
- if (tileImageIndex < 0) {
- return ERROR_END_OF_STREAM;
- }
- *offset = mItemIdToImageMap[tileImageIndex].offset;
- *size = mItemIdToImageMap[tileImageIndex].size;
- } else {
- if (imageIndex == NULL) {
- // For single images, we only allow it to be read once, after that
- // it's EOS. New image index must be requested each time.
- return ERROR_END_OF_STREAM;
- }
- *offset = mItemIdToImageMap[mCurrentImageIndex].offset;
- *size = mItemIdToImageMap[mCurrentImageIndex].size;
- }
-
- return OK;
-}
-
-} // namespace heif
-
-} // namespace android
diff --git a/media/libstagefright/JPEGSource.cpp b/media/libstagefright/JPEGSource.cpp
index bafa4b2..10eb2d2 100644
--- a/media/libstagefright/JPEGSource.cpp
+++ b/media/libstagefright/JPEGSource.cpp
@@ -18,9 +18,10 @@
#define LOG_TAG "JPEGSource"
#include <utils/Log.h>
+#include <media/DataSource.h>
#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/DataSource.h>
#include <media/stagefright/JPEGSource.h>
+#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaBufferGroup.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
@@ -108,7 +109,7 @@
}
status_t JPEGSource::read(
- MediaBuffer **out, const ReadOptions *options) {
+ MediaBufferBase **out, const ReadOptions *options) {
*out = NULL;
int64_t seekTimeUs;
@@ -117,7 +118,7 @@
return UNKNOWN_ERROR;
}
- MediaBuffer *buffer;
+ MediaBufferBase *buffer;
mGroup->acquire_buffer(&buffer);
ssize_t n = mSource->readAt(mOffset, buffer->data(), mSize - mOffset);
diff --git a/media/libstagefright/MP3Extractor.cpp b/media/libstagefright/MP3Extractor.cpp
deleted file mode 100644
index 13f9928..0000000
--- a/media/libstagefright/MP3Extractor.cpp
+++ /dev/null
@@ -1,699 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MP3Extractor"
-#include <utils/Log.h>
-
-#include "include/MP3Extractor.h"
-
-#include "include/avc_utils.h"
-#include "include/ID3.h"
-#include "include/VBRISeeker.h"
-#include "include/XINGSeeker.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-#include <utils/String8.h>
-
-namespace android {
-
-// Everything must match except for
-// protection, bitrate, padding, private bits, mode, mode extension,
-// copyright bit, original bit and emphasis.
-// Yes ... there are things that must indeed match...
-static const uint32_t kMask = 0xfffe0c00;
-
-static bool Resync(
- const sp<DataSource> &source, uint32_t match_header,
- off64_t *inout_pos, off64_t *post_id3_pos, uint32_t *out_header) {
- if (post_id3_pos != NULL) {
- *post_id3_pos = 0;
- }
-
- if (*inout_pos == 0) {
- // Skip an optional ID3 header if syncing at the very beginning
- // of the datasource.
-
- for (;;) {
- uint8_t id3header[10];
- if (source->readAt(*inout_pos, id3header, sizeof(id3header))
- < (ssize_t)sizeof(id3header)) {
- // If we can't even read these 10 bytes, we might as well bail
- // out, even if there _were_ 10 bytes of valid mp3 audio data...
- return false;
- }
-
- if (memcmp("ID3", id3header, 3)) {
- break;
- }
-
- // Skip the ID3v2 header.
-
- size_t len =
- ((id3header[6] & 0x7f) << 21)
- | ((id3header[7] & 0x7f) << 14)
- | ((id3header[8] & 0x7f) << 7)
- | (id3header[9] & 0x7f);
-
- len += 10;
-
- *inout_pos += len;
-
- ALOGV("skipped ID3 tag, new starting offset is %lld (0x%016llx)",
- (long long)*inout_pos, (long long)*inout_pos);
- }
-
- if (post_id3_pos != NULL) {
- *post_id3_pos = *inout_pos;
- }
- }
-
- off64_t pos = *inout_pos;
- bool valid = false;
-
- const size_t kMaxReadBytes = 1024;
- const size_t kMaxBytesChecked = 128 * 1024;
- uint8_t buf[kMaxReadBytes];
- ssize_t bytesToRead = kMaxReadBytes;
- ssize_t totalBytesRead = 0;
- ssize_t remainingBytes = 0;
- bool reachEOS = false;
- uint8_t *tmp = buf;
-
- do {
- if (pos >= (off64_t)(*inout_pos + kMaxBytesChecked)) {
- // Don't scan forever.
- ALOGV("giving up at offset %lld", (long long)pos);
- break;
- }
-
- if (remainingBytes < 4) {
- if (reachEOS) {
- break;
- } else {
- memcpy(buf, tmp, remainingBytes);
- bytesToRead = kMaxReadBytes - remainingBytes;
-
- /*
- * The next read position should start from the end of
- * the last buffer, and thus should include the remaining
- * bytes in the buffer.
- */
- totalBytesRead = source->readAt(pos + remainingBytes,
- buf + remainingBytes,
- bytesToRead);
- if (totalBytesRead <= 0) {
- break;
- }
- reachEOS = (totalBytesRead != bytesToRead);
- totalBytesRead += remainingBytes;
- remainingBytes = totalBytesRead;
- tmp = buf;
- continue;
- }
- }
-
- uint32_t header = U32_AT(tmp);
-
- if (match_header != 0 && (header & kMask) != (match_header & kMask)) {
- ++pos;
- ++tmp;
- --remainingBytes;
- continue;
- }
-
- size_t frame_size;
- int sample_rate, num_channels, bitrate;
- if (!GetMPEGAudioFrameSize(
- header, &frame_size,
- &sample_rate, &num_channels, &bitrate)) {
- ++pos;
- ++tmp;
- --remainingBytes;
- continue;
- }
-
- ALOGV("found possible 1st frame at %lld (header = 0x%08x)", (long long)pos, header);
-
- // We found what looks like a valid frame,
- // now find its successors.
-
- off64_t test_pos = pos + frame_size;
-
- valid = true;
- for (int j = 0; j < 3; ++j) {
- uint8_t tmp[4];
- if (source->readAt(test_pos, tmp, 4) < 4) {
- valid = false;
- break;
- }
-
- uint32_t test_header = U32_AT(tmp);
-
- ALOGV("subsequent header is %08x", test_header);
-
- if ((test_header & kMask) != (header & kMask)) {
- valid = false;
- break;
- }
-
- size_t test_frame_size;
- if (!GetMPEGAudioFrameSize(
- test_header, &test_frame_size)) {
- valid = false;
- break;
- }
-
- ALOGV("found subsequent frame #%d at %lld", j + 2, (long long)test_pos);
-
- test_pos += test_frame_size;
- }
-
- if (valid) {
- *inout_pos = pos;
-
- if (out_header != NULL) {
- *out_header = header;
- }
- } else {
- ALOGV("no dice, no valid sequence of frames found.");
- }
-
- ++pos;
- ++tmp;
- --remainingBytes;
- } while (!valid);
-
- return valid;
-}
-
-class MP3Source : public MediaSource {
-public:
- MP3Source(
- const sp<MetaData> &meta, const sp<DataSource> &source,
- off64_t first_frame_pos, uint32_t fixed_header,
- const sp<MP3Seeker> &seeker);
-
- virtual status_t start(MetaData *params = NULL);
- virtual status_t stop();
-
- virtual sp<MetaData> getFormat();
-
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-protected:
- virtual ~MP3Source();
-
-private:
- static const size_t kMaxFrameSize;
- sp<MetaData> mMeta;
- sp<DataSource> mDataSource;
- off64_t mFirstFramePos;
- uint32_t mFixedHeader;
- off64_t mCurrentPos;
- int64_t mCurrentTimeUs;
- bool mStarted;
- sp<MP3Seeker> mSeeker;
- MediaBufferGroup *mGroup;
-
- int64_t mBasisTimeUs;
- int64_t mSamplesRead;
-
- MP3Source(const MP3Source &);
- MP3Source &operator=(const MP3Source &);
-};
-
-MP3Extractor::MP3Extractor(
- const sp<DataSource> &source, const sp<AMessage> &meta)
- : mInitCheck(NO_INIT),
- mDataSource(source),
- mFirstFramePos(-1),
- mFixedHeader(0) {
-
- off64_t pos = 0;
- off64_t post_id3_pos;
- uint32_t header;
- bool success;
-
- int64_t meta_offset;
- uint32_t meta_header;
- int64_t meta_post_id3_offset;
- if (meta != NULL
- && meta->findInt64("offset", &meta_offset)
- && meta->findInt32("header", (int32_t *)&meta_header)
- && meta->findInt64("post-id3-offset", &meta_post_id3_offset)) {
- // The sniffer has already done all the hard work for us, simply
- // accept its judgement.
- pos = (off64_t)meta_offset;
- header = meta_header;
- post_id3_pos = (off64_t)meta_post_id3_offset;
-
- success = true;
- } else {
- success = Resync(mDataSource, 0, &pos, &post_id3_pos, &header);
- }
-
- if (!success) {
- // mInitCheck will remain NO_INIT
- return;
- }
-
- mFirstFramePos = pos;
- mFixedHeader = header;
- mMeta = new MetaData;
- sp<XINGSeeker> seeker = XINGSeeker::CreateFromSource(mDataSource, mFirstFramePos);
-
- if (seeker == NULL) {
- mSeeker = VBRISeeker::CreateFromSource(mDataSource, post_id3_pos);
- } else {
- mSeeker = seeker;
- int encd = seeker->getEncoderDelay();
- int encp = seeker->getEncoderPadding();
- if (encd != 0 || encp != 0) {
- mMeta->setInt32(kKeyEncoderDelay, encd);
- mMeta->setInt32(kKeyEncoderPadding, encp);
- }
- }
-
- if (mSeeker != NULL) {
- // While it is safe to send the XING/VBRI frame to the decoder, this will
- // result in an extra 1152 samples being output. In addition, the bitrate
- // of the Xing header might not match the rest of the file, which could
- // lead to problems when seeking. The real first frame to decode is after
- // the XING/VBRI frame, so skip there.
- size_t frame_size;
- int sample_rate;
- int num_channels;
- int bitrate;
- GetMPEGAudioFrameSize(
- header, &frame_size, &sample_rate, &num_channels, &bitrate);
- pos += frame_size;
- if (!Resync(mDataSource, 0, &pos, &post_id3_pos, &header)) {
- // mInitCheck will remain NO_INIT
- return;
- }
- mFirstFramePos = pos;
- mFixedHeader = header;
- }
-
- size_t frame_size;
- int sample_rate;
- int num_channels;
- int bitrate;
- GetMPEGAudioFrameSize(
- header, &frame_size, &sample_rate, &num_channels, &bitrate);
-
- unsigned layer = 4 - ((header >> 17) & 3);
-
- switch (layer) {
- case 1:
- mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I);
- break;
- case 2:
- mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II);
- break;
- case 3:
- mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
- break;
- default:
- TRESPASS();
- }
-
- mMeta->setInt32(kKeySampleRate, sample_rate);
- mMeta->setInt32(kKeyBitRate, bitrate * 1000);
- mMeta->setInt32(kKeyChannelCount, num_channels);
-
- int64_t durationUs;
-
- if (mSeeker == NULL || !mSeeker->getDuration(&durationUs)) {
- off64_t fileSize;
- if (mDataSource->getSize(&fileSize) == OK) {
- off64_t dataLength = fileSize - mFirstFramePos;
- if (dataLength > INT64_MAX / 8000LL) {
- // duration would overflow
- durationUs = INT64_MAX;
- } else {
- durationUs = 8000LL * dataLength / bitrate;
- }
- } else {
- durationUs = -1;
- }
- }
-
- if (durationUs >= 0) {
- mMeta->setInt64(kKeyDuration, durationUs);
- }
-
- mInitCheck = OK;
-
- // Get iTunes-style gapless info if present.
- // When getting the id3 tag, skip the V1 tags to prevent the source cache
- // from being iterated to the end of the file.
- ID3 id3(mDataSource, true);
- if (id3.isValid()) {
- ID3::Iterator *com = new ID3::Iterator(id3, "COM");
- if (com->done()) {
- delete com;
- com = new ID3::Iterator(id3, "COMM");
- }
- while(!com->done()) {
- String8 commentdesc;
- String8 commentvalue;
- com->getString(&commentdesc, &commentvalue);
- const char * desc = commentdesc.string();
- const char * value = commentvalue.string();
-
- // first 3 characters are the language, which we don't care about
- if(strlen(desc) > 3 && strcmp(desc + 3, "iTunSMPB") == 0) {
-
- int32_t delay, padding;
- if (sscanf(value, " %*x %x %x %*x", &delay, &padding) == 2) {
- mMeta->setInt32(kKeyEncoderDelay, delay);
- mMeta->setInt32(kKeyEncoderPadding, padding);
- }
- break;
- }
- com->next();
- }
- delete com;
- com = NULL;
- }
-}
-
-size_t MP3Extractor::countTracks() {
- return mInitCheck != OK ? 0 : 1;
-}
-
-sp<IMediaSource> MP3Extractor::getTrack(size_t index) {
- if (mInitCheck != OK || index != 0) {
- return NULL;
- }
-
- return new MP3Source(
- mMeta, mDataSource, mFirstFramePos, mFixedHeader,
- mSeeker);
-}
-
-sp<MetaData> MP3Extractor::getTrackMetaData(
- size_t index, uint32_t /* flags */) {
- if (mInitCheck != OK || index != 0) {
- return NULL;
- }
-
- return mMeta;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-// The theoretical maximum frame size for an MPEG audio stream should occur
-// while playing a Layer 2, MPEGv2.5 audio stream at 160kbps (with padding).
-// The size of this frame should be...
-// ((1152 samples/frame * 160000 bits/sec) /
-// (8000 samples/sec * 8 bits/byte)) + 1 padding byte/frame = 2881 bytes/frame.
-// Set our max frame size to the nearest power of 2 above this size (aka, 4kB)
-const size_t MP3Source::kMaxFrameSize = (1 << 12); /* 4096 bytes */
-MP3Source::MP3Source(
- const sp<MetaData> &meta, const sp<DataSource> &source,
- off64_t first_frame_pos, uint32_t fixed_header,
- const sp<MP3Seeker> &seeker)
- : mMeta(meta),
- mDataSource(source),
- mFirstFramePos(first_frame_pos),
- mFixedHeader(fixed_header),
- mCurrentPos(0),
- mCurrentTimeUs(0),
- mStarted(false),
- mSeeker(seeker),
- mGroup(NULL),
- mBasisTimeUs(0),
- mSamplesRead(0) {
-}
-
-MP3Source::~MP3Source() {
- if (mStarted) {
- stop();
- }
-}
-
-status_t MP3Source::start(MetaData *) {
- CHECK(!mStarted);
-
- mGroup = new MediaBufferGroup;
-
- mGroup->add_buffer(new MediaBuffer(kMaxFrameSize));
-
- mCurrentPos = mFirstFramePos;
- mCurrentTimeUs = 0;
-
- mBasisTimeUs = mCurrentTimeUs;
- mSamplesRead = 0;
-
- mStarted = true;
-
- return OK;
-}
-
-status_t MP3Source::stop() {
- CHECK(mStarted);
-
- delete mGroup;
- mGroup = NULL;
-
- mStarted = false;
-
- return OK;
-}
-
-sp<MetaData> MP3Source::getFormat() {
- return mMeta;
-}
-
-status_t MP3Source::read(
- MediaBuffer **out, const ReadOptions *options) {
- *out = NULL;
-
- int64_t seekTimeUs;
- ReadOptions::SeekMode mode;
- bool seekCBR = false;
-
- if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) {
- int64_t actualSeekTimeUs = seekTimeUs;
- if (mSeeker == NULL
- || !mSeeker->getOffsetForTime(&actualSeekTimeUs, &mCurrentPos)) {
- int32_t bitrate;
- if (!mMeta->findInt32(kKeyBitRate, &bitrate)) {
- // bitrate is in bits/sec.
- ALOGI("no bitrate");
-
- return ERROR_UNSUPPORTED;
- }
-
- mCurrentTimeUs = seekTimeUs;
- mCurrentPos = mFirstFramePos + seekTimeUs * bitrate / 8000000;
- seekCBR = true;
- } else {
- mCurrentTimeUs = actualSeekTimeUs;
- }
-
- mBasisTimeUs = mCurrentTimeUs;
- mSamplesRead = 0;
- }
-
- MediaBuffer *buffer;
- status_t err = mGroup->acquire_buffer(&buffer);
- if (err != OK) {
- return err;
- }
-
- size_t frame_size;
- int bitrate;
- int num_samples;
- int sample_rate;
- for (;;) {
- ssize_t n = mDataSource->readAt(mCurrentPos, buffer->data(), 4);
- if (n < 4) {
- buffer->release();
- buffer = NULL;
-
- return (n < 0 ? n : ERROR_END_OF_STREAM);
- }
-
- uint32_t header = U32_AT((const uint8_t *)buffer->data());
-
- if ((header & kMask) == (mFixedHeader & kMask)
- && GetMPEGAudioFrameSize(
- header, &frame_size, &sample_rate, NULL,
- &bitrate, &num_samples)) {
-
- // re-calculate mCurrentTimeUs because we might have called Resync()
- if (seekCBR) {
- mCurrentTimeUs = (mCurrentPos - mFirstFramePos) * 8000 / bitrate;
- mBasisTimeUs = mCurrentTimeUs;
- }
-
- break;
- }
-
- // Lost sync.
- ALOGV("lost sync! header = 0x%08x, old header = 0x%08x\n", header, mFixedHeader);
-
- off64_t pos = mCurrentPos;
- if (!Resync(mDataSource, mFixedHeader, &pos, NULL, NULL)) {
- ALOGE("Unable to resync. Signalling end of stream.");
-
- buffer->release();
- buffer = NULL;
-
- return ERROR_END_OF_STREAM;
- }
-
- mCurrentPos = pos;
-
- // Try again with the new position.
- }
-
- CHECK(frame_size <= buffer->size());
-
- ssize_t n = mDataSource->readAt(mCurrentPos, buffer->data(), frame_size);
- if (n < (ssize_t)frame_size) {
- buffer->release();
- buffer = NULL;
-
- return (n < 0 ? n : ERROR_END_OF_STREAM);
- }
-
- buffer->set_range(0, frame_size);
-
- buffer->meta_data()->setInt64(kKeyTime, mCurrentTimeUs);
- buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
-
- mCurrentPos += frame_size;
-
- mSamplesRead += num_samples;
- mCurrentTimeUs = mBasisTimeUs + ((mSamplesRead * 1000000) / sample_rate);
-
- *out = buffer;
-
- return OK;
-}
-
-sp<MetaData> MP3Extractor::getMetaData() {
- sp<MetaData> meta = new MetaData;
-
- if (mInitCheck != OK) {
- return meta;
- }
-
- meta->setCString(kKeyMIMEType, "audio/mpeg");
-
- ID3 id3(mDataSource);
-
- if (!id3.isValid()) {
- return meta;
- }
-
- struct Map {
- int key;
- const char *tag1;
- const char *tag2;
- };
- static const Map kMap[] = {
- { kKeyAlbum, "TALB", "TAL" },
- { kKeyArtist, "TPE1", "TP1" },
- { kKeyAlbumArtist, "TPE2", "TP2" },
- { kKeyComposer, "TCOM", "TCM" },
- { kKeyGenre, "TCON", "TCO" },
- { kKeyTitle, "TIT2", "TT2" },
- { kKeyYear, "TYE", "TYER" },
- { kKeyAuthor, "TXT", "TEXT" },
- { kKeyCDTrackNumber, "TRK", "TRCK" },
- { kKeyDiscNumber, "TPA", "TPOS" },
- { kKeyCompilation, "TCP", "TCMP" },
- };
- static const size_t kNumMapEntries = sizeof(kMap) / sizeof(kMap[0]);
-
- for (size_t i = 0; i < kNumMapEntries; ++i) {
- ID3::Iterator *it = new ID3::Iterator(id3, kMap[i].tag1);
- if (it->done()) {
- delete it;
- it = new ID3::Iterator(id3, kMap[i].tag2);
- }
-
- if (it->done()) {
- delete it;
- continue;
- }
-
- String8 s;
- it->getString(&s);
- delete it;
-
- meta->setCString(kMap[i].key, s);
- }
-
- size_t dataSize;
- String8 mime;
- const void *data = id3.getAlbumArt(&dataSize, &mime);
-
- if (data) {
- meta->setData(kKeyAlbumArt, MetaData::TYPE_NONE, data, dataSize);
- meta->setCString(kKeyAlbumArtMIME, mime.string());
- }
-
- return meta;
-}
-
-bool SniffMP3(
- const sp<DataSource> &source, String8 *mimeType,
- float *confidence, sp<AMessage> *meta) {
- off64_t pos = 0;
- off64_t post_id3_pos;
- uint32_t header;
- uint8_t mpeg_header[5];
- if (source->readAt(0, mpeg_header, sizeof(mpeg_header)) < (ssize_t)sizeof(mpeg_header)) {
- return false;
- }
-
- if (!memcmp("\x00\x00\x01\xba", mpeg_header, 4) && (mpeg_header[4] >> 4) == 2) {
- ALOGV("MPEG1PS container is not supported!");
- return false;
- }
- if (!Resync(source, 0, &pos, &post_id3_pos, &header)) {
- return false;
- }
-
- *meta = new AMessage;
- (*meta)->setInt64("offset", pos);
- (*meta)->setInt32("header", header);
- (*meta)->setInt64("post-id3-offset", post_id3_pos);
-
- *mimeType = MEDIA_MIMETYPE_AUDIO_MPEG;
- *confidence = 0.2f;
-
- return true;
-}
-
-} // namespace android
diff --git a/media/libstagefright/MPEG2TSWriter.cpp b/media/libstagefright/MPEG2TSWriter.cpp
index 03ea959..770535c 100644
--- a/media/libstagefright/MPEG2TSWriter.cpp
+++ b/media/libstagefright/MPEG2TSWriter.cpp
@@ -16,18 +16,18 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MPEG2TSWriter"
-#include <media/stagefright/foundation/ADebug.h>
+#include <media/MediaSource.h>
+#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ByteUtils.h>
#include <media/stagefright/MPEG2TSWriter.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
#include <arpa/inet.h>
#include "include/ESDS.h"
@@ -35,7 +35,7 @@
namespace android {
struct MPEG2TSWriter::SourceInfo : public AHandler {
- explicit SourceInfo(const sp<IMediaSource> &source);
+ explicit SourceInfo(const sp<MediaSource> &source);
void start(const sp<AMessage> ¬ify, const sp<MetaData> ¶ms);
void stop();
@@ -69,7 +69,7 @@
kWhatRead = 'read',
};
- sp<IMediaSource> mSource;
+ sp<MediaSource> mSource;
sp<ALooper> mLooper;
sp<AMessage> mNotify;
@@ -85,13 +85,13 @@
void extractCodecSpecificData();
- void appendAACFrames(MediaBuffer *buffer);
- void appendAVCFrame(MediaBuffer *buffer);
+ void appendAACFrames(MediaBufferBase *buffer);
+ void appendAVCFrame(MediaBufferBase *buffer);
DISALLOW_EVIL_CONSTRUCTORS(SourceInfo);
};
-MPEG2TSWriter::SourceInfo::SourceInfo(const sp<IMediaSource> &source)
+MPEG2TSWriter::SourceInfo::SourceInfo(const sp<MediaSource> &source)
: mSource(source),
mLooper(new ALooper),
mEOSReceived(false),
@@ -249,7 +249,7 @@
notify->post();
}
-void MPEG2TSWriter::SourceInfo::appendAVCFrame(MediaBuffer *buffer) {
+void MPEG2TSWriter::SourceInfo::appendAVCFrame(MediaBufferBase *buffer) {
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kNotifyBuffer);
@@ -264,11 +264,11 @@
buffer->range_length());
int64_t timeUs;
- CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
+ CHECK(buffer->meta_data().findInt64(kKeyTime, &timeUs));
mBuffer->meta()->setInt64("timeUs", timeUs);
int32_t isSync;
- if (buffer->meta_data()->findInt32(kKeyIsSyncFrame, &isSync)
+ if (buffer->meta_data().findInt32(kKeyIsSyncFrame, &isSync)
&& isSync != 0) {
mBuffer->meta()->setInt32("isSync", true);
}
@@ -279,7 +279,7 @@
notify->post();
}
-void MPEG2TSWriter::SourceInfo::appendAACFrames(MediaBuffer *buffer) {
+void MPEG2TSWriter::SourceInfo::appendAACFrames(MediaBufferBase *buffer) {
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kNotifyBuffer);
@@ -288,7 +288,7 @@
}
int64_t timeUs;
- CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
+ CHECK(buffer->meta_data().findInt64(kKeyTime, &timeUs));
mBuffer->meta()->setInt64("timeUs", timeUs);
mBuffer->meta()->setInt32("isSync", true);
@@ -368,7 +368,7 @@
case kWhatRead:
{
- MediaBuffer *buffer;
+ MediaBufferBase *buffer;
status_t err = mSource->read(&buffer);
if (err != OK && err != INFO_FORMAT_CHANGED) {
@@ -499,7 +499,7 @@
}
}
-status_t MPEG2TSWriter::addSource(const sp<IMediaSource> &source) {
+status_t MPEG2TSWriter::addSource(const sp<MediaSource> &source) {
CHECK(!mStarted);
sp<MetaData> meta = source->getFormat();
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
deleted file mode 100644
index a8b6614..0000000
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ /dev/null
@@ -1,5515 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MPEG4Extractor"
-
-#include <ctype.h>
-#include <inttypes.h>
-#include <memory>
-#include <stdint.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <utils/Log.h>
-
-#include "include/MPEG4Extractor.h"
-#include "include/SampleTable.h"
-#include "include/ItemTable.h"
-#include "include/ESDS.h"
-
-#include <media/stagefright/foundation/ABitReader.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/AUtils.h>
-#include <media/stagefright/foundation/ColorUtils.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <utils/String8.h>
-
-#include <byteswap.h>
-#include "include/ID3.h"
-#include "include/avc_utils.h"
-
-#ifndef UINT32_MAX
-#define UINT32_MAX (4294967295U)
-#endif
-
-namespace android {
-
-enum {
- // max track header chunk to return
- kMaxTrackHeaderSize = 32,
-
- // maximum size of an atom. Some atoms can be bigger according to the spec,
- // but we only allow up to this size.
- kMaxAtomSize = 64 * 1024 * 1024,
-};
-
-class MPEG4Source : public MediaSource {
-public:
- // Caller retains ownership of both "dataSource" and "sampleTable".
- MPEG4Source(const sp<MPEG4Extractor> &owner,
- const sp<MetaData> &format,
- const sp<DataSource> &dataSource,
- int32_t timeScale,
- const sp<SampleTable> &sampleTable,
- Vector<SidxEntry> &sidx,
- const Trex *trex,
- off64_t firstMoofOffset,
- const sp<ItemTable> &itemTable);
- virtual status_t init();
-
- virtual status_t start(MetaData *params = NULL);
- virtual status_t stop();
-
- virtual sp<MetaData> getFormat();
-
- virtual status_t read(MediaBuffer **buffer, const ReadOptions *options = NULL);
- virtual bool supportNonblockingRead() { return true; }
- virtual status_t fragmentedRead(MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-protected:
- virtual ~MPEG4Source();
-
-private:
- Mutex mLock;
-
- // keep the MPEG4Extractor around, since we're referencing its data
- sp<MPEG4Extractor> mOwner;
- sp<MetaData> mFormat;
- sp<DataSource> mDataSource;
- int32_t mTimescale;
- sp<SampleTable> mSampleTable;
- uint32_t mCurrentSampleIndex;
- uint32_t mCurrentFragmentIndex;
- Vector<SidxEntry> &mSegments;
- const Trex *mTrex;
- off64_t mFirstMoofOffset;
- off64_t mCurrentMoofOffset;
- off64_t mNextMoofOffset;
- uint32_t mCurrentTime;
- int32_t mLastParsedTrackId;
- int32_t mTrackId;
-
- int32_t mCryptoMode; // passed in from extractor
- int32_t mDefaultIVSize; // passed in from extractor
- uint8_t mCryptoKey[16]; // passed in from extractor
- uint32_t mCurrentAuxInfoType;
- uint32_t mCurrentAuxInfoTypeParameter;
- int32_t mCurrentDefaultSampleInfoSize;
- uint32_t mCurrentSampleInfoCount;
- uint32_t mCurrentSampleInfoAllocSize;
- uint8_t* mCurrentSampleInfoSizes;
- uint32_t mCurrentSampleInfoOffsetCount;
- uint32_t mCurrentSampleInfoOffsetsAllocSize;
- uint64_t* mCurrentSampleInfoOffsets;
-
- bool mIsAVC;
- bool mIsHEVC;
- size_t mNALLengthSize;
-
- bool mStarted;
-
- MediaBufferGroup *mGroup;
-
- MediaBuffer *mBuffer;
-
- bool mWantsNALFragments;
-
- uint8_t *mSrcBuffer;
-
- bool mIsHEIF;
- sp<ItemTable> mItemTable;
-
- size_t parseNALSize(const uint8_t *data) const;
- status_t parseChunk(off64_t *offset);
- status_t parseTrackFragmentHeader(off64_t offset, off64_t size);
- status_t parseTrackFragmentRun(off64_t offset, off64_t size);
- status_t parseSampleAuxiliaryInformationSizes(off64_t offset, off64_t size);
- status_t parseSampleAuxiliaryInformationOffsets(off64_t offset, off64_t size);
-
- struct TrackFragmentHeaderInfo {
- enum Flags {
- kBaseDataOffsetPresent = 0x01,
- kSampleDescriptionIndexPresent = 0x02,
- kDefaultSampleDurationPresent = 0x08,
- kDefaultSampleSizePresent = 0x10,
- kDefaultSampleFlagsPresent = 0x20,
- kDurationIsEmpty = 0x10000,
- };
-
- uint32_t mTrackID;
- uint32_t mFlags;
- uint64_t mBaseDataOffset;
- uint32_t mSampleDescriptionIndex;
- uint32_t mDefaultSampleDuration;
- uint32_t mDefaultSampleSize;
- uint32_t mDefaultSampleFlags;
-
- uint64_t mDataOffset;
- };
- TrackFragmentHeaderInfo mTrackFragmentHeaderInfo;
-
- struct Sample {
- off64_t offset;
- size_t size;
- uint32_t duration;
- int32_t compositionOffset;
- uint8_t iv[16];
- Vector<size_t> clearsizes;
- Vector<size_t> encryptedsizes;
- };
- Vector<Sample> mCurrentSamples;
-
- MPEG4Source(const MPEG4Source &);
- MPEG4Source &operator=(const MPEG4Source &);
-};
-
-// This custom data source wraps an existing one and satisfies requests
-// falling entirely within a cached range from the cache while forwarding
-// all remaining requests to the wrapped datasource.
-// This is used to cache the full sampletable metadata for a single track,
-// possibly wrapping multiple times to cover all tracks, i.e.
-// Each MPEG4DataSource caches the sampletable metadata for a single track.
-
-struct MPEG4DataSource : public DataSource {
- explicit MPEG4DataSource(const sp<DataSource> &source);
-
- virtual status_t initCheck() const;
- virtual ssize_t readAt(off64_t offset, void *data, size_t size);
- virtual status_t getSize(off64_t *size);
- virtual uint32_t flags();
-
- status_t setCachedRange(off64_t offset, size_t size);
-
-protected:
- virtual ~MPEG4DataSource();
-
-private:
- Mutex mLock;
-
- sp<DataSource> mSource;
- off64_t mCachedOffset;
- size_t mCachedSize;
- uint8_t *mCache;
-
- void clearCache();
-
- MPEG4DataSource(const MPEG4DataSource &);
- MPEG4DataSource &operator=(const MPEG4DataSource &);
-};
-
-MPEG4DataSource::MPEG4DataSource(const sp<DataSource> &source)
- : mSource(source),
- mCachedOffset(0),
- mCachedSize(0),
- mCache(NULL) {
-}
-
-MPEG4DataSource::~MPEG4DataSource() {
- clearCache();
-}
-
-void MPEG4DataSource::clearCache() {
- if (mCache) {
- free(mCache);
- mCache = NULL;
- }
-
- mCachedOffset = 0;
- mCachedSize = 0;
-}
-
-status_t MPEG4DataSource::initCheck() const {
- return mSource->initCheck();
-}
-
-ssize_t MPEG4DataSource::readAt(off64_t offset, void *data, size_t size) {
- Mutex::Autolock autoLock(mLock);
-
- if (isInRange(mCachedOffset, mCachedSize, offset, size)) {
- memcpy(data, &mCache[offset - mCachedOffset], size);
- return size;
- }
-
- return mSource->readAt(offset, data, size);
-}
-
-status_t MPEG4DataSource::getSize(off64_t *size) {
- return mSource->getSize(size);
-}
-
-uint32_t MPEG4DataSource::flags() {
- return mSource->flags();
-}
-
-status_t MPEG4DataSource::setCachedRange(off64_t offset, size_t size) {
- Mutex::Autolock autoLock(mLock);
-
- clearCache();
-
- mCache = (uint8_t *)malloc(size);
-
- if (mCache == NULL) {
- return -ENOMEM;
- }
-
- mCachedOffset = offset;
- mCachedSize = size;
-
- ssize_t err = mSource->readAt(mCachedOffset, mCache, mCachedSize);
-
- if (err < (ssize_t)size) {
- clearCache();
-
- return ERROR_IO;
- }
-
- return OK;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-static const bool kUseHexDump = false;
-
-static const char *FourCC2MIME(uint32_t fourcc) {
- switch (fourcc) {
- case FOURCC('m', 'p', '4', 'a'):
- return MEDIA_MIMETYPE_AUDIO_AAC;
-
- case FOURCC('s', 'a', 'm', 'r'):
- return MEDIA_MIMETYPE_AUDIO_AMR_NB;
-
- case FOURCC('s', 'a', 'w', 'b'):
- return MEDIA_MIMETYPE_AUDIO_AMR_WB;
-
- case FOURCC('m', 'p', '4', 'v'):
- return MEDIA_MIMETYPE_VIDEO_MPEG4;
-
- case FOURCC('s', '2', '6', '3'):
- case FOURCC('h', '2', '6', '3'):
- case FOURCC('H', '2', '6', '3'):
- return MEDIA_MIMETYPE_VIDEO_H263;
-
- case FOURCC('a', 'v', 'c', '1'):
- return MEDIA_MIMETYPE_VIDEO_AVC;
-
- case FOURCC('h', 'v', 'c', '1'):
- case FOURCC('h', 'e', 'v', '1'):
- return MEDIA_MIMETYPE_VIDEO_HEVC;
- default:
- CHECK(!"should not be here.");
- return NULL;
- }
-}
-
-static bool AdjustChannelsAndRate(uint32_t fourcc, uint32_t *channels, uint32_t *rate) {
- if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_NB, FourCC2MIME(fourcc))) {
- // AMR NB audio is always mono, 8kHz
- *channels = 1;
- *rate = 8000;
- return true;
- } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, FourCC2MIME(fourcc))) {
- // AMR WB audio is always mono, 16kHz
- *channels = 1;
- *rate = 16000;
- return true;
- }
- return false;
-}
-
-MPEG4Extractor::MPEG4Extractor(const sp<DataSource> &source)
- : mMoofOffset(0),
- mMoofFound(false),
- mMdatFound(false),
- mDataSource(source),
- mInitCheck(NO_INIT),
- mHeaderTimescale(0),
- mIsQT(false),
- mIsHEIF(false),
- mFirstTrack(NULL),
- mLastTrack(NULL),
- mFileMetaData(new MetaData),
- mFirstSINF(NULL),
- mIsDrm(false) {
-}
-
-MPEG4Extractor::~MPEG4Extractor() {
- release();
-}
-
-void MPEG4Extractor::release() {
- Track *track = mFirstTrack;
- while (track) {
- Track *next = track->next;
-
- delete track;
- track = next;
- }
- mFirstTrack = mLastTrack = NULL;
-
- SINF *sinf = mFirstSINF;
- while (sinf) {
- SINF *next = sinf->next;
- delete[] sinf->IPMPData;
- delete sinf;
- sinf = next;
- }
- mFirstSINF = NULL;
-
- for (size_t i = 0; i < mPssh.size(); i++) {
- delete [] mPssh[i].data;
- }
- mPssh.clear();
-
- if (mDataSource != NULL) {
- mDataSource->close();
- mDataSource.clear();
- }
-}
-
-uint32_t MPEG4Extractor::flags() const {
- return CAN_PAUSE |
- ((mMoofOffset == 0 || mSidxEntries.size() != 0) ?
- (CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK) : 0);
-}
-
-sp<MetaData> MPEG4Extractor::getMetaData() {
- status_t err;
- if ((err = readMetaData()) != OK) {
- return new MetaData;
- }
-
- return mFileMetaData;
-}
-
-size_t MPEG4Extractor::countTracks() {
- status_t err;
- if ((err = readMetaData()) != OK) {
- ALOGV("MPEG4Extractor::countTracks: no tracks");
- return 0;
- }
-
- size_t n = 0;
- Track *track = mFirstTrack;
- while (track) {
- ++n;
- track = track->next;
- }
-
- ALOGV("MPEG4Extractor::countTracks: %zu tracks", n);
- return n;
-}
-
-sp<MetaData> MPEG4Extractor::getTrackMetaData(
- size_t index, uint32_t flags) {
- status_t err;
- if ((err = readMetaData()) != OK) {
- return NULL;
- }
-
- Track *track = mFirstTrack;
- while (index > 0) {
- if (track == NULL) {
- return NULL;
- }
-
- track = track->next;
- --index;
- }
-
- if (track == NULL) {
- return NULL;
- }
-
- [=] {
- int64_t duration;
- int32_t samplerate;
- if (track->has_elst && mHeaderTimescale != 0 &&
- track->meta->findInt64(kKeyDuration, &duration) &&
- track->meta->findInt32(kKeySampleRate, &samplerate)) {
-
- track->has_elst = false;
-
- if (track->elst_segment_duration > INT64_MAX) {
- return;
- }
- int64_t segment_duration = track->elst_segment_duration;
- int64_t media_time = track->elst_media_time;
- int64_t halfscale = mHeaderTimescale / 2;
- ALOGV("segment_duration = %" PRId64 ", media_time = %" PRId64
- ", halfscale = %" PRId64 ", timescale = %d",
- segment_duration,
- media_time,
- halfscale,
- mHeaderTimescale);
-
- int64_t delay;
- // delay = ((media_time * samplerate) + halfscale) / mHeaderTimescale;
- if (__builtin_mul_overflow(media_time, samplerate, &delay) ||
- __builtin_add_overflow(delay, halfscale, &delay) ||
- (delay /= mHeaderTimescale, false) ||
- delay > INT32_MAX ||
- delay < INT32_MIN) {
- return;
- }
- ALOGV("delay = %" PRId64, delay);
- track->meta->setInt32(kKeyEncoderDelay, delay);
-
- int64_t scaled_duration;
- // scaled_duration = duration * mHeaderTimescale;
- if (__builtin_mul_overflow(duration, mHeaderTimescale, &scaled_duration)) {
- return;
- }
- ALOGV("scaled_duration = %" PRId64, scaled_duration);
-
- int64_t segment_end;
- int64_t padding;
- // padding = scaled_duration - ((segment_duration + media_time) * 1000000);
- if (__builtin_add_overflow(segment_duration, media_time, &segment_end) ||
- __builtin_mul_overflow(segment_end, 1000000, &segment_end) ||
- __builtin_sub_overflow(scaled_duration, segment_end, &padding)) {
- return;
- }
- ALOGV("segment_end = %" PRId64 ", padding = %" PRId64, segment_end, padding);
-
- if (padding < 0) {
- // track duration from media header (which is what kKeyDuration is) might
- // be slightly shorter than the segment duration, which would make the
- // padding negative. Clamp to zero.
- padding = 0;
- }
-
- int64_t paddingsamples;
- int64_t halfscale_e6;
- int64_t timescale_e6;
- // paddingsamples = ((padding * samplerate) + (halfscale * 1000000))
- // / (mHeaderTimescale * 1000000);
- if (__builtin_mul_overflow(padding, samplerate, &paddingsamples) ||
- __builtin_mul_overflow(halfscale, 1000000, &halfscale_e6) ||
- __builtin_mul_overflow(mHeaderTimescale, 1000000, ×cale_e6) ||
- __builtin_add_overflow(paddingsamples, halfscale_e6, &paddingsamples) ||
- (paddingsamples /= timescale_e6, false) ||
- paddingsamples > INT32_MAX) {
- return;
- }
- ALOGV("paddingsamples = %" PRId64, paddingsamples);
- track->meta->setInt32(kKeyEncoderPadding, paddingsamples);
- }
- }();
-
- if ((flags & kIncludeExtensiveMetaData)
- && !track->includes_expensive_metadata) {
- track->includes_expensive_metadata = true;
-
- const char *mime;
- CHECK(track->meta->findCString(kKeyMIMEType, &mime));
- if (!strncasecmp("video/", mime, 6)) {
- // MPEG2 tracks do not provide CSD, so read the stream header
- if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG2)) {
- off64_t offset;
- size_t size;
- if (track->sampleTable->getMetaDataForSample(
- 0 /* sampleIndex */, &offset, &size, NULL /* sampleTime */) == OK) {
- if (size > kMaxTrackHeaderSize) {
- size = kMaxTrackHeaderSize;
- }
- uint8_t header[kMaxTrackHeaderSize];
- if (mDataSource->readAt(offset, &header, size) == (ssize_t)size) {
- track->meta->setData(kKeyStreamHeader, 'mdat', header, size);
- }
- }
- }
-
- if (mMoofOffset > 0) {
- int64_t duration;
- if (track->meta->findInt64(kKeyDuration, &duration)) {
- // nothing fancy, just pick a frame near 1/4th of the duration
- track->meta->setInt64(
- kKeyThumbnailTime, duration / 4);
- }
- } else {
- uint32_t sampleIndex;
- uint32_t sampleTime;
- if (track->timescale != 0 &&
- track->sampleTable->findThumbnailSample(&sampleIndex) == OK
- && track->sampleTable->getMetaDataForSample(
- sampleIndex, NULL /* offset */, NULL /* size */,
- &sampleTime) == OK) {
- track->meta->setInt64(
- kKeyThumbnailTime,
- ((int64_t)sampleTime * 1000000) / track->timescale);
- }
- }
- }
- }
-
- return track->meta;
-}
-
-status_t MPEG4Extractor::readMetaData() {
- if (mInitCheck != NO_INIT) {
- return mInitCheck;
- }
-
- off64_t offset = 0;
- status_t err;
- bool sawMoovOrSidx = false;
-
- while (!((sawMoovOrSidx && (mMdatFound || mMoofFound)) ||
- (mIsHEIF && (mItemTable != NULL) && mItemTable->isValid()))) {
- off64_t orig_offset = offset;
- err = parseChunk(&offset, 0);
-
- if (err != OK && err != UNKNOWN_ERROR) {
- break;
- } else if (offset <= orig_offset) {
- // only continue parsing if the offset was advanced,
- // otherwise we might end up in an infinite loop
- ALOGE("did not advance: %lld->%lld", (long long)orig_offset, (long long)offset);
- err = ERROR_MALFORMED;
- break;
- } else if (err == UNKNOWN_ERROR) {
- sawMoovOrSidx = true;
- }
- }
-
- if (mInitCheck == OK) {
- if (findTrackByMimePrefix("video/") != NULL) {
- mFileMetaData->setCString(
- kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG4);
- } else if (findTrackByMimePrefix("audio/") != NULL) {
- mFileMetaData->setCString(kKeyMIMEType, "audio/mp4");
- } else {
- mFileMetaData->setCString(kKeyMIMEType, "application/octet-stream");
- }
- } else {
- mInitCheck = err;
- }
-
- CHECK_NE(err, (status_t)NO_INIT);
-
- // copy pssh data into file metadata
- uint64_t psshsize = 0;
- for (size_t i = 0; i < mPssh.size(); i++) {
- psshsize += 20 + mPssh[i].datalen;
- }
- if (psshsize > 0 && psshsize <= UINT32_MAX) {
- char *buf = (char*)malloc(psshsize);
- if (!buf) {
- ALOGE("b/28471206");
- return NO_MEMORY;
- }
- char *ptr = buf;
- for (size_t i = 0; i < mPssh.size(); i++) {
- memcpy(ptr, mPssh[i].uuid, 20); // uuid + length
- memcpy(ptr + 20, mPssh[i].data, mPssh[i].datalen);
- ptr += (20 + mPssh[i].datalen);
- }
- mFileMetaData->setData(kKeyPssh, 'pssh', buf, psshsize);
- free(buf);
- }
-
- if (mIsHEIF) {
- sp<MetaData> meta = mItemTable->getImageMeta();
- if (meta == NULL) {
- return ERROR_MALFORMED;
- }
-
- Track *track = mLastTrack;
- if (track != NULL) {
- ALOGW("track is set before metadata is fully processed");
- } else {
- track = new Track;
- track->next = NULL;
- mFirstTrack = mLastTrack = track;
- }
-
- track->meta = meta;
- track->meta->setInt32(kKeyTrackID, 0);
- track->includes_expensive_metadata = false;
- track->skipTrack = false;
- track->timescale = 0;
- }
-
- return mInitCheck;
-}
-
-char* MPEG4Extractor::getDrmTrackInfo(size_t trackID, int *len) {
- if (mFirstSINF == NULL) {
- return NULL;
- }
-
- SINF *sinf = mFirstSINF;
- while (sinf && (trackID != sinf->trackID)) {
- sinf = sinf->next;
- }
-
- if (sinf == NULL) {
- return NULL;
- }
-
- *len = sinf->len;
- return sinf->IPMPData;
-}
-
-// Reads an encoded integer 7 bits at a time until it encounters the high bit clear.
-static int32_t readSize(off64_t offset,
- const sp<DataSource> &DataSource, uint8_t *numOfBytes) {
- uint32_t size = 0;
- uint8_t data;
- bool moreData = true;
- *numOfBytes = 0;
-
- while (moreData) {
- if (DataSource->readAt(offset, &data, 1) < 1) {
- return -1;
- }
- offset ++;
- moreData = (data >= 128) ? true : false;
- size = (size << 7) | (data & 0x7f); // Take last 7 bits
- (*numOfBytes) ++;
- }
-
- return size;
-}
-
-status_t MPEG4Extractor::parseDrmSINF(
- off64_t * /* offset */, off64_t data_offset) {
- uint8_t updateIdTag;
- if (mDataSource->readAt(data_offset, &updateIdTag, 1) < 1) {
- return ERROR_IO;
- }
- data_offset ++;
-
- if (0x01/*OBJECT_DESCRIPTOR_UPDATE_ID_TAG*/ != updateIdTag) {
- return ERROR_MALFORMED;
- }
-
- uint8_t numOfBytes;
- int32_t size = readSize(data_offset, mDataSource, &numOfBytes);
- if (size < 0) {
- return ERROR_IO;
- }
- data_offset += numOfBytes;
-
- while(size >= 11 ) {
- uint8_t descriptorTag;
- if (mDataSource->readAt(data_offset, &descriptorTag, 1) < 1) {
- return ERROR_IO;
- }
- data_offset ++;
-
- if (0x11/*OBJECT_DESCRIPTOR_ID_TAG*/ != descriptorTag) {
- return ERROR_MALFORMED;
- }
-
- uint8_t buffer[8];
- //ObjectDescriptorID and ObjectDescriptor url flag
- if (mDataSource->readAt(data_offset, buffer, 2) < 2) {
- return ERROR_IO;
- }
- data_offset += 2;
-
- if ((buffer[1] >> 5) & 0x0001) { //url flag is set
- return ERROR_MALFORMED;
- }
-
- if (mDataSource->readAt(data_offset, buffer, 8) < 8) {
- return ERROR_IO;
- }
- data_offset += 8;
-
- if ((0x0F/*ES_ID_REF_TAG*/ != buffer[1])
- || ( 0x0A/*IPMP_DESCRIPTOR_POINTER_ID_TAG*/ != buffer[5])) {
- return ERROR_MALFORMED;
- }
-
- SINF *sinf = new SINF;
- sinf->trackID = U16_AT(&buffer[3]);
- sinf->IPMPDescriptorID = buffer[7];
- sinf->next = mFirstSINF;
- mFirstSINF = sinf;
-
- size -= (8 + 2 + 1);
- }
-
- if (size != 0) {
- return ERROR_MALFORMED;
- }
-
- if (mDataSource->readAt(data_offset, &updateIdTag, 1) < 1) {
- return ERROR_IO;
- }
- data_offset ++;
-
- if(0x05/*IPMP_DESCRIPTOR_UPDATE_ID_TAG*/ != updateIdTag) {
- return ERROR_MALFORMED;
- }
-
- size = readSize(data_offset, mDataSource, &numOfBytes);
- if (size < 0) {
- return ERROR_IO;
- }
- data_offset += numOfBytes;
-
- while (size > 0) {
- uint8_t tag;
- int32_t dataLen;
- if (mDataSource->readAt(data_offset, &tag, 1) < 1) {
- return ERROR_IO;
- }
- data_offset ++;
-
- if (0x0B/*IPMP_DESCRIPTOR_ID_TAG*/ == tag) {
- uint8_t id;
- dataLen = readSize(data_offset, mDataSource, &numOfBytes);
- if (dataLen < 0) {
- return ERROR_IO;
- } else if (dataLen < 4) {
- return ERROR_MALFORMED;
- }
- data_offset += numOfBytes;
-
- if (mDataSource->readAt(data_offset, &id, 1) < 1) {
- return ERROR_IO;
- }
- data_offset ++;
-
- SINF *sinf = mFirstSINF;
- while (sinf && (sinf->IPMPDescriptorID != id)) {
- sinf = sinf->next;
- }
- if (sinf == NULL) {
- return ERROR_MALFORMED;
- }
- sinf->len = dataLen - 3;
- sinf->IPMPData = new (std::nothrow) char[sinf->len];
- if (sinf->IPMPData == NULL) {
- return ERROR_MALFORMED;
- }
- data_offset += 2;
-
- if (mDataSource->readAt(data_offset, sinf->IPMPData, sinf->len) < sinf->len) {
- return ERROR_IO;
- }
- data_offset += sinf->len;
-
- size -= (dataLen + numOfBytes + 1);
- }
- }
-
- if (size != 0) {
- return ERROR_MALFORMED;
- }
-
- return UNKNOWN_ERROR; // Return a dummy error.
-}
-
-struct PathAdder {
- PathAdder(Vector<uint32_t> *path, uint32_t chunkType)
- : mPath(path) {
- mPath->push(chunkType);
- }
-
- ~PathAdder() {
- mPath->pop();
- }
-
-private:
- Vector<uint32_t> *mPath;
-
- PathAdder(const PathAdder &);
- PathAdder &operator=(const PathAdder &);
-};
-
-static bool underMetaDataPath(const Vector<uint32_t> &path) {
- return path.size() >= 5
- && path[0] == FOURCC('m', 'o', 'o', 'v')
- && path[1] == FOURCC('u', 'd', 't', 'a')
- && path[2] == FOURCC('m', 'e', 't', 'a')
- && path[3] == FOURCC('i', 'l', 's', 't');
-}
-
-static bool underQTMetaPath(const Vector<uint32_t> &path, int32_t depth) {
- return path.size() >= 2
- && path[0] == FOURCC('m', 'o', 'o', 'v')
- && path[1] == FOURCC('m', 'e', 't', 'a')
- && (depth == 2
- || (depth == 3
- && (path[2] == FOURCC('h', 'd', 'l', 'r')
- || path[2] == FOURCC('i', 'l', 's', 't')
- || path[2] == FOURCC('k', 'e', 'y', 's'))));
-}
-
-// Given a time in seconds since Jan 1 1904, produce a human-readable string.
-static bool convertTimeToDate(int64_t time_1904, String8 *s) {
- // delta between mpeg4 time and unix epoch time
- static const int64_t delta = (((66 * 365 + 17) * 24) * 3600);
- if (time_1904 < INT64_MIN + delta) {
- return false;
- }
- time_t time_1970 = time_1904 - delta;
-
- char tmp[32];
- struct tm* tm = gmtime(&time_1970);
- if (tm != NULL &&
- strftime(tmp, sizeof(tmp), "%Y%m%dT%H%M%S.000Z", tm) > 0) {
- s->setTo(tmp);
- return true;
- }
- return false;
-}
-
-status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
- ALOGV("entering parseChunk %lld/%d", (long long)*offset, depth);
-
- if (*offset < 0) {
- ALOGE("b/23540914");
- return ERROR_MALFORMED;
- }
- if (depth > 100) {
- ALOGE("b/27456299");
- return ERROR_MALFORMED;
- }
- uint32_t hdr[2];
- if (mDataSource->readAt(*offset, hdr, 8) < 8) {
- return ERROR_IO;
- }
- uint64_t chunk_size = ntohl(hdr[0]);
- int32_t chunk_type = ntohl(hdr[1]);
- off64_t data_offset = *offset + 8;
-
- if (chunk_size == 1) {
- if (mDataSource->readAt(*offset + 8, &chunk_size, 8) < 8) {
- return ERROR_IO;
- }
- chunk_size = ntoh64(chunk_size);
- data_offset += 8;
-
- if (chunk_size < 16) {
- // The smallest valid chunk is 16 bytes long in this case.
- return ERROR_MALFORMED;
- }
- } else if (chunk_size == 0) {
- if (depth == 0) {
- // atom extends to end of file
- off64_t sourceSize;
- if (mDataSource->getSize(&sourceSize) == OK) {
- chunk_size = (sourceSize - *offset);
- } else {
- // XXX could we just pick a "sufficiently large" value here?
- ALOGE("atom size is 0, and data source has no size");
- return ERROR_MALFORMED;
- }
- } else {
- // not allowed for non-toplevel atoms, skip it
- *offset += 4;
- return OK;
- }
- } else if (chunk_size < 8) {
- // The smallest valid chunk is 8 bytes long.
- ALOGE("invalid chunk size: %" PRIu64, chunk_size);
- return ERROR_MALFORMED;
- }
-
- char chunk[5];
- MakeFourCCString(chunk_type, chunk);
- ALOGV("chunk: %s @ %lld, %d", chunk, (long long)*offset, depth);
-
- if (kUseHexDump) {
- static const char kWhitespace[] = " ";
- const char *indent = &kWhitespace[sizeof(kWhitespace) - 1 - 2 * depth];
- printf("%sfound chunk '%s' of size %" PRIu64 "\n", indent, chunk, chunk_size);
-
- char buffer[256];
- size_t n = chunk_size;
- if (n > sizeof(buffer)) {
- n = sizeof(buffer);
- }
- if (mDataSource->readAt(*offset, buffer, n)
- < (ssize_t)n) {
- return ERROR_IO;
- }
-
- hexdump(buffer, n);
- }
-
- PathAdder autoAdder(&mPath, chunk_type);
-
- // (data_offset - *offset) is either 8 or 16
- off64_t chunk_data_size = chunk_size - (data_offset - *offset);
- if (chunk_data_size < 0) {
- ALOGE("b/23540914");
- return ERROR_MALFORMED;
- }
- if (chunk_type != FOURCC('m', 'd', 'a', 't') && chunk_data_size > kMaxAtomSize) {
- char errMsg[100];
- sprintf(errMsg, "%s atom has size %" PRId64, chunk, chunk_data_size);
- ALOGE("%s (b/28615448)", errMsg);
- android_errorWriteWithInfoLog(0x534e4554, "28615448", -1, errMsg, strlen(errMsg));
- return ERROR_MALFORMED;
- }
-
- if (chunk_type != FOURCC('c', 'p', 'r', 't')
- && chunk_type != FOURCC('c', 'o', 'v', 'r')
- && mPath.size() == 5 && underMetaDataPath(mPath)) {
- off64_t stop_offset = *offset + chunk_size;
- *offset = data_offset;
- while (*offset < stop_offset) {
- status_t err = parseChunk(offset, depth + 1);
- if (err != OK) {
- return err;
- }
- }
-
- if (*offset != stop_offset) {
- return ERROR_MALFORMED;
- }
-
- return OK;
- }
-
- switch(chunk_type) {
- case FOURCC('m', 'o', 'o', 'v'):
- case FOURCC('t', 'r', 'a', 'k'):
- case FOURCC('m', 'd', 'i', 'a'):
- case FOURCC('m', 'i', 'n', 'f'):
- case FOURCC('d', 'i', 'n', 'f'):
- case FOURCC('s', 't', 'b', 'l'):
- case FOURCC('m', 'v', 'e', 'x'):
- case FOURCC('m', 'o', 'o', 'f'):
- case FOURCC('t', 'r', 'a', 'f'):
- case FOURCC('m', 'f', 'r', 'a'):
- case FOURCC('u', 'd', 't', 'a'):
- case FOURCC('i', 'l', 's', 't'):
- case FOURCC('s', 'i', 'n', 'f'):
- case FOURCC('s', 'c', 'h', 'i'):
- case FOURCC('e', 'd', 't', 's'):
- case FOURCC('w', 'a', 'v', 'e'):
- {
- if (chunk_type == FOURCC('m', 'o', 'o', 'v') && depth != 0) {
- ALOGE("moov: depth %d", depth);
- return ERROR_MALFORMED;
- }
-
- if (chunk_type == FOURCC('m', 'o', 'o', 'v') && mInitCheck == OK) {
- ALOGE("duplicate moov");
- return ERROR_MALFORMED;
- }
-
- if (chunk_type == FOURCC('m', 'o', 'o', 'f') && !mMoofFound) {
- // store the offset of the first segment
- mMoofFound = true;
- mMoofOffset = *offset;
- }
-
- if (chunk_type == FOURCC('s', 't', 'b', 'l')) {
- ALOGV("sampleTable chunk is %" PRIu64 " bytes long.", chunk_size);
-
- if (mDataSource->flags()
- & (DataSource::kWantsPrefetching
- | DataSource::kIsCachingDataSource)) {
- sp<MPEG4DataSource> cachedSource =
- new MPEG4DataSource(mDataSource);
-
- if (cachedSource->setCachedRange(*offset, chunk_size) == OK) {
- mDataSource = cachedSource;
- }
- }
-
- if (mLastTrack == NULL) {
- return ERROR_MALFORMED;
- }
-
- mLastTrack->sampleTable = new SampleTable(mDataSource);
- }
-
- bool isTrack = false;
- if (chunk_type == FOURCC('t', 'r', 'a', 'k')) {
- if (depth != 1) {
- ALOGE("trak: depth %d", depth);
- return ERROR_MALFORMED;
- }
- isTrack = true;
-
- Track *track = new Track;
- track->next = NULL;
- if (mLastTrack) {
- mLastTrack->next = track;
- } else {
- mFirstTrack = track;
- }
- mLastTrack = track;
-
- track->meta = new MetaData;
- track->includes_expensive_metadata = false;
- track->skipTrack = false;
- track->timescale = 0;
- track->meta->setCString(kKeyMIMEType, "application/octet-stream");
- track->has_elst = false;
- }
-
- off64_t stop_offset = *offset + chunk_size;
- *offset = data_offset;
- while (*offset < stop_offset) {
- status_t err = parseChunk(offset, depth + 1);
- if (err != OK) {
- if (isTrack) {
- mLastTrack->skipTrack = true;
- break;
- }
- return err;
- }
- }
-
- if (*offset != stop_offset) {
- return ERROR_MALFORMED;
- }
-
- if (isTrack) {
- int32_t trackId;
- // There must be exact one track header per track.
- if (!mLastTrack->meta->findInt32(kKeyTrackID, &trackId)) {
- mLastTrack->skipTrack = true;
- }
-
- status_t err = verifyTrack(mLastTrack);
- if (err != OK) {
- mLastTrack->skipTrack = true;
- }
-
- if (mLastTrack->skipTrack) {
- Track *cur = mFirstTrack;
-
- if (cur == mLastTrack) {
- delete cur;
- mFirstTrack = mLastTrack = NULL;
- } else {
- while (cur && cur->next != mLastTrack) {
- cur = cur->next;
- }
- if (cur) {
- cur->next = NULL;
- }
- delete mLastTrack;
- mLastTrack = cur;
- }
-
- return OK;
- }
- } else if (chunk_type == FOURCC('m', 'o', 'o', 'v')) {
- mInitCheck = OK;
-
- if (!mIsDrm) {
- return UNKNOWN_ERROR; // Return a dummy error.
- } else {
- return OK;
- }
- }
- break;
- }
-
- case FOURCC('e', 'l', 's', 't'):
- {
- *offset += chunk_size;
-
- if (!mLastTrack) {
- return ERROR_MALFORMED;
- }
-
- // See 14496-12 8.6.6
- uint8_t version;
- if (mDataSource->readAt(data_offset, &version, 1) < 1) {
- return ERROR_IO;
- }
-
- uint32_t entry_count;
- if (!mDataSource->getUInt32(data_offset + 4, &entry_count)) {
- return ERROR_IO;
- }
-
- if (entry_count != 1) {
- // we only support a single entry at the moment, for gapless playback
- ALOGW("ignoring edit list with %d entries", entry_count);
- } else {
- off64_t entriesoffset = data_offset + 8;
- uint64_t segment_duration;
- int64_t media_time;
-
- if (version == 1) {
- if (!mDataSource->getUInt64(entriesoffset, &segment_duration) ||
- !mDataSource->getUInt64(entriesoffset + 8, (uint64_t*)&media_time)) {
- return ERROR_IO;
- }
- } else if (version == 0) {
- uint32_t sd;
- int32_t mt;
- if (!mDataSource->getUInt32(entriesoffset, &sd) ||
- !mDataSource->getUInt32(entriesoffset + 4, (uint32_t*)&mt)) {
- return ERROR_IO;
- }
- segment_duration = sd;
- media_time = mt;
- } else {
- return ERROR_IO;
- }
-
- // save these for later, because the elst atom might precede
- // the atoms that actually gives us the duration and sample rate
- // needed to calculate the padding and delay values
- mLastTrack->has_elst = true;
- mLastTrack->elst_media_time = media_time;
- mLastTrack->elst_segment_duration = segment_duration;
- }
- break;
- }
-
- case FOURCC('f', 'r', 'm', 'a'):
- {
- *offset += chunk_size;
-
- uint32_t original_fourcc;
- if (mDataSource->readAt(data_offset, &original_fourcc, 4) < 4) {
- return ERROR_IO;
- }
- original_fourcc = ntohl(original_fourcc);
- ALOGV("read original format: %d", original_fourcc);
-
- if (mLastTrack == NULL) {
- return ERROR_MALFORMED;
- }
-
- mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(original_fourcc));
- uint32_t num_channels = 0;
- uint32_t sample_rate = 0;
- if (AdjustChannelsAndRate(original_fourcc, &num_channels, &sample_rate)) {
- mLastTrack->meta->setInt32(kKeyChannelCount, num_channels);
- mLastTrack->meta->setInt32(kKeySampleRate, sample_rate);
- }
- break;
- }
-
- case FOURCC('t', 'e', 'n', 'c'):
- {
- *offset += chunk_size;
-
- if (chunk_size < 32) {
- return ERROR_MALFORMED;
- }
-
- // tenc box contains 1 byte version, 3 byte flags, 3 byte default algorithm id, one byte
- // default IV size, 16 bytes default KeyID
- // (ISO 23001-7)
- char buf[4];
- memset(buf, 0, 4);
- if (mDataSource->readAt(data_offset + 4, buf + 1, 3) < 3) {
- return ERROR_IO;
- }
- uint32_t defaultAlgorithmId = ntohl(*((int32_t*)buf));
- if (defaultAlgorithmId > 1) {
- // only 0 (clear) and 1 (AES-128) are valid
- return ERROR_MALFORMED;
- }
-
- memset(buf, 0, 4);
- if (mDataSource->readAt(data_offset + 7, buf + 3, 1) < 1) {
- return ERROR_IO;
- }
- uint32_t defaultIVSize = ntohl(*((int32_t*)buf));
-
- if ((defaultAlgorithmId == 0 && defaultIVSize != 0) ||
- (defaultAlgorithmId != 0 && defaultIVSize == 0)) {
- // only unencrypted data must have 0 IV size
- return ERROR_MALFORMED;
- } else if (defaultIVSize != 0 &&
- defaultIVSize != 8 &&
- defaultIVSize != 16) {
- // only supported sizes are 0, 8 and 16
- return ERROR_MALFORMED;
- }
-
- uint8_t defaultKeyId[16];
-
- if (mDataSource->readAt(data_offset + 8, &defaultKeyId, 16) < 16) {
- return ERROR_IO;
- }
-
- if (mLastTrack == NULL)
- return ERROR_MALFORMED;
-
- mLastTrack->meta->setInt32(kKeyCryptoMode, defaultAlgorithmId);
- mLastTrack->meta->setInt32(kKeyCryptoDefaultIVSize, defaultIVSize);
- mLastTrack->meta->setData(kKeyCryptoKey, 'tenc', defaultKeyId, 16);
- break;
- }
-
- case FOURCC('t', 'k', 'h', 'd'):
- {
- *offset += chunk_size;
-
- status_t err;
- if ((err = parseTrackHeader(data_offset, chunk_data_size)) != OK) {
- return err;
- }
-
- break;
- }
-
- case FOURCC('p', 's', 's', 'h'):
- {
- *offset += chunk_size;
-
- PsshInfo pssh;
-
- if (mDataSource->readAt(data_offset + 4, &pssh.uuid, 16) < 16) {
- return ERROR_IO;
- }
-
- uint32_t psshdatalen = 0;
- if (mDataSource->readAt(data_offset + 20, &psshdatalen, 4) < 4) {
- return ERROR_IO;
- }
- pssh.datalen = ntohl(psshdatalen);
- ALOGV("pssh data size: %d", pssh.datalen);
- if (chunk_size < 20 || pssh.datalen > chunk_size - 20) {
- // pssh data length exceeds size of containing box
- return ERROR_MALFORMED;
- }
-
- pssh.data = new (std::nothrow) uint8_t[pssh.datalen];
- if (pssh.data == NULL) {
- return ERROR_MALFORMED;
- }
- ALOGV("allocated pssh @ %p", pssh.data);
- ssize_t requested = (ssize_t) pssh.datalen;
- if (mDataSource->readAt(data_offset + 24, pssh.data, requested) < requested) {
- delete[] pssh.data;
- return ERROR_IO;
- }
- mPssh.push_back(pssh);
-
- break;
- }
-
- case FOURCC('m', 'd', 'h', 'd'):
- {
- *offset += chunk_size;
-
- if (chunk_data_size < 4 || mLastTrack == NULL) {
- return ERROR_MALFORMED;
- }
-
- uint8_t version;
- if (mDataSource->readAt(
- data_offset, &version, sizeof(version))
- < (ssize_t)sizeof(version)) {
- return ERROR_IO;
- }
-
- off64_t timescale_offset;
-
- if (version == 1) {
- timescale_offset = data_offset + 4 + 16;
- } else if (version == 0) {
- timescale_offset = data_offset + 4 + 8;
- } else {
- return ERROR_IO;
- }
-
- uint32_t timescale;
- if (mDataSource->readAt(
- timescale_offset, ×cale, sizeof(timescale))
- < (ssize_t)sizeof(timescale)) {
- return ERROR_IO;
- }
-
- if (!timescale) {
- ALOGE("timescale should not be ZERO.");
- return ERROR_MALFORMED;
- }
-
- mLastTrack->timescale = ntohl(timescale);
-
- // 14496-12 says all ones means indeterminate, but some files seem to use
- // 0 instead. We treat both the same.
- int64_t duration = 0;
- if (version == 1) {
- if (mDataSource->readAt(
- timescale_offset + 4, &duration, sizeof(duration))
- < (ssize_t)sizeof(duration)) {
- return ERROR_IO;
- }
- if (duration != -1) {
- duration = ntoh64(duration);
- }
- } else {
- uint32_t duration32;
- if (mDataSource->readAt(
- timescale_offset + 4, &duration32, sizeof(duration32))
- < (ssize_t)sizeof(duration32)) {
- return ERROR_IO;
- }
- if (duration32 != 0xffffffff) {
- duration = ntohl(duration32);
- }
- }
- if (duration != 0 && mLastTrack->timescale != 0) {
- mLastTrack->meta->setInt64(
- kKeyDuration, (duration * 1000000) / mLastTrack->timescale);
- }
-
- uint8_t lang[2];
- off64_t lang_offset;
- if (version == 1) {
- lang_offset = timescale_offset + 4 + 8;
- } else if (version == 0) {
- lang_offset = timescale_offset + 4 + 4;
- } else {
- return ERROR_IO;
- }
-
- if (mDataSource->readAt(lang_offset, &lang, sizeof(lang))
- < (ssize_t)sizeof(lang)) {
- return ERROR_IO;
- }
-
- // To get the ISO-639-2/T three character language code
- // 1 bit pad followed by 3 5-bits characters. Each character
- // is packed as the difference between its ASCII value and 0x60.
- char lang_code[4];
- lang_code[0] = ((lang[0] >> 2) & 0x1f) + 0x60;
- lang_code[1] = ((lang[0] & 0x3) << 3 | (lang[1] >> 5)) + 0x60;
- lang_code[2] = (lang[1] & 0x1f) + 0x60;
- lang_code[3] = '\0';
-
- mLastTrack->meta->setCString(
- kKeyMediaLanguage, lang_code);
-
- break;
- }
-
- case FOURCC('s', 't', 's', 'd'):
- {
- uint8_t buffer[8];
- if (chunk_data_size < (off64_t)sizeof(buffer)) {
- return ERROR_MALFORMED;
- }
-
- if (mDataSource->readAt(
- data_offset, buffer, 8) < 8) {
- return ERROR_IO;
- }
-
- if (U32_AT(buffer) != 0) {
- // Should be version 0, flags 0.
- return ERROR_MALFORMED;
- }
-
- uint32_t entry_count = U32_AT(&buffer[4]);
-
- if (entry_count > 1) {
- // For 3GPP timed text, there could be multiple tx3g boxes contain
- // multiple text display formats. These formats will be used to
- // display the timed text.
- // For encrypted files, there may also be more than one entry.
- const char *mime;
-
- if (mLastTrack == NULL)
- return ERROR_MALFORMED;
-
- CHECK(mLastTrack->meta->findCString(kKeyMIMEType, &mime));
- if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) &&
- strcasecmp(mime, "application/octet-stream")) {
- // For now we only support a single type of media per track.
- mLastTrack->skipTrack = true;
- *offset += chunk_size;
- break;
- }
- }
- off64_t stop_offset = *offset + chunk_size;
- *offset = data_offset + 8;
- for (uint32_t i = 0; i < entry_count; ++i) {
- status_t err = parseChunk(offset, depth + 1);
- if (err != OK) {
- return err;
- }
- }
-
- if (*offset != stop_offset) {
- return ERROR_MALFORMED;
- }
- break;
- }
- case FOURCC('m', 'e', 't', 't'):
- {
- *offset += chunk_size;
-
- if (mLastTrack == NULL)
- return ERROR_MALFORMED;
-
- sp<ABuffer> buffer = new ABuffer(chunk_data_size);
- if (buffer->data() == NULL) {
- return NO_MEMORY;
- }
-
- if (mDataSource->readAt(
- data_offset, buffer->data(), chunk_data_size) < chunk_data_size) {
- return ERROR_IO;
- }
-
- String8 mimeFormat((const char *)(buffer->data()), chunk_data_size);
- mLastTrack->meta->setCString(kKeyMIMEType, mimeFormat.string());
-
- break;
- }
-
- case FOURCC('m', 'p', '4', 'a'):
- case FOURCC('e', 'n', 'c', 'a'):
- case FOURCC('s', 'a', 'm', 'r'):
- case FOURCC('s', 'a', 'w', 'b'):
- {
- if (mIsQT && chunk_type == FOURCC('m', 'p', '4', 'a')
- && depth >= 1 && mPath[depth - 1] == FOURCC('w', 'a', 'v', 'e')) {
- // Ignore mp4a embedded in QT wave atom
- *offset += chunk_size;
- break;
- }
-
- uint8_t buffer[8 + 20];
- if (chunk_data_size < (ssize_t)sizeof(buffer)) {
- // Basic AudioSampleEntry size.
- return ERROR_MALFORMED;
- }
-
- if (mDataSource->readAt(
- data_offset, buffer, sizeof(buffer)) < (ssize_t)sizeof(buffer)) {
- return ERROR_IO;
- }
-
- uint16_t data_ref_index __unused = U16_AT(&buffer[6]);
- uint16_t version = U16_AT(&buffer[8]);
- uint32_t num_channels = U16_AT(&buffer[16]);
-
- uint16_t sample_size = U16_AT(&buffer[18]);
- uint32_t sample_rate = U32_AT(&buffer[24]) >> 16;
-
- if (mLastTrack == NULL)
- return ERROR_MALFORMED;
-
- off64_t stop_offset = *offset + chunk_size;
- *offset = data_offset + sizeof(buffer);
-
- if (mIsQT && chunk_type == FOURCC('m', 'p', '4', 'a')) {
- if (version == 1) {
- if (mDataSource->readAt(*offset, buffer, 16) < 16) {
- return ERROR_IO;
- }
-
-#if 0
- U32_AT(buffer); // samples per packet
- U32_AT(&buffer[4]); // bytes per packet
- U32_AT(&buffer[8]); // bytes per frame
- U32_AT(&buffer[12]); // bytes per sample
-#endif
- *offset += 16;
- } else if (version == 2) {
- uint8_t v2buffer[36];
- if (mDataSource->readAt(*offset, v2buffer, 36) < 36) {
- return ERROR_IO;
- }
-
-#if 0
- U32_AT(v2buffer); // size of struct only
- sample_rate = (uint32_t)U64_AT(&v2buffer[4]); // audio sample rate
- num_channels = U32_AT(&v2buffer[12]); // num audio channels
- U32_AT(&v2buffer[16]); // always 0x7f000000
- sample_size = (uint16_t)U32_AT(&v2buffer[20]); // const bits per channel
- U32_AT(&v2buffer[24]); // format specifc flags
- U32_AT(&v2buffer[28]); // const bytes per audio packet
- U32_AT(&v2buffer[32]); // const LPCM frames per audio packet
-#endif
- *offset += 36;
- }
- }
-
- if (chunk_type != FOURCC('e', 'n', 'c', 'a')) {
- // if the chunk type is enca, we'll get the type from the sinf/frma box later
- mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(chunk_type));
- AdjustChannelsAndRate(chunk_type, &num_channels, &sample_rate);
- }
- ALOGV("*** coding='%s' %d channels, size %d, rate %d\n",
- chunk, num_channels, sample_size, sample_rate);
- mLastTrack->meta->setInt32(kKeyChannelCount, num_channels);
- mLastTrack->meta->setInt32(kKeySampleRate, sample_rate);
-
- while (*offset < stop_offset) {
- status_t err = parseChunk(offset, depth + 1);
- if (err != OK) {
- return err;
- }
- }
-
- if (*offset != stop_offset) {
- return ERROR_MALFORMED;
- }
- break;
- }
-
- case FOURCC('m', 'p', '4', 'v'):
- case FOURCC('e', 'n', 'c', 'v'):
- case FOURCC('s', '2', '6', '3'):
- case FOURCC('H', '2', '6', '3'):
- case FOURCC('h', '2', '6', '3'):
- case FOURCC('a', 'v', 'c', '1'):
- case FOURCC('h', 'v', 'c', '1'):
- case FOURCC('h', 'e', 'v', '1'):
- {
- uint8_t buffer[78];
- if (chunk_data_size < (ssize_t)sizeof(buffer)) {
- // Basic VideoSampleEntry size.
- return ERROR_MALFORMED;
- }
-
- if (mDataSource->readAt(
- data_offset, buffer, sizeof(buffer)) < (ssize_t)sizeof(buffer)) {
- return ERROR_IO;
- }
-
- uint16_t data_ref_index __unused = U16_AT(&buffer[6]);
- uint16_t width = U16_AT(&buffer[6 + 18]);
- uint16_t height = U16_AT(&buffer[6 + 20]);
-
- // The video sample is not standard-compliant if it has invalid dimension.
- // Use some default width and height value, and
- // let the decoder figure out the actual width and height (and thus
- // be prepared for INFO_FOMRAT_CHANGED event).
- if (width == 0) width = 352;
- if (height == 0) height = 288;
-
- // printf("*** coding='%s' width=%d height=%d\n",
- // chunk, width, height);
-
- if (mLastTrack == NULL)
- return ERROR_MALFORMED;
-
- if (chunk_type != FOURCC('e', 'n', 'c', 'v')) {
- // if the chunk type is encv, we'll get the type from the sinf/frma box later
- mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(chunk_type));
- }
- mLastTrack->meta->setInt32(kKeyWidth, width);
- mLastTrack->meta->setInt32(kKeyHeight, height);
-
- off64_t stop_offset = *offset + chunk_size;
- *offset = data_offset + sizeof(buffer);
- while (*offset < stop_offset) {
- status_t err = parseChunk(offset, depth + 1);
- if (err != OK) {
- return err;
- }
- }
-
- if (*offset != stop_offset) {
- return ERROR_MALFORMED;
- }
- break;
- }
-
- case FOURCC('s', 't', 'c', 'o'):
- case FOURCC('c', 'o', '6', '4'):
- {
- if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL)) {
- return ERROR_MALFORMED;
- }
-
- status_t err =
- mLastTrack->sampleTable->setChunkOffsetParams(
- chunk_type, data_offset, chunk_data_size);
-
- *offset += chunk_size;
-
- if (err != OK) {
- return err;
- }
-
- break;
- }
-
- case FOURCC('s', 't', 's', 'c'):
- {
- if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
- return ERROR_MALFORMED;
-
- status_t err =
- mLastTrack->sampleTable->setSampleToChunkParams(
- data_offset, chunk_data_size);
-
- *offset += chunk_size;
-
- if (err != OK) {
- return err;
- }
-
- break;
- }
-
- case FOURCC('s', 't', 's', 'z'):
- case FOURCC('s', 't', 'z', '2'):
- {
- if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL)) {
- return ERROR_MALFORMED;
- }
-
- status_t err =
- mLastTrack->sampleTable->setSampleSizeParams(
- chunk_type, data_offset, chunk_data_size);
-
- *offset += chunk_size;
-
- if (err != OK) {
- return err;
- }
-
- size_t max_size;
- err = mLastTrack->sampleTable->getMaxSampleSize(&max_size);
-
- if (err != OK) {
- return err;
- }
-
- if (max_size != 0) {
- // Assume that a given buffer only contains at most 10 chunks,
- // each chunk originally prefixed with a 2 byte length will
- // have a 4 byte header (0x00 0x00 0x00 0x01) after conversion,
- // and thus will grow by 2 bytes per chunk.
- if (max_size > SIZE_MAX - 10 * 2) {
- ALOGE("max sample size too big: %zu", max_size);
- return ERROR_MALFORMED;
- }
- mLastTrack->meta->setInt32(kKeyMaxInputSize, max_size + 10 * 2);
- } else {
- // No size was specified. Pick a conservatively large size.
- uint32_t width, height;
- if (!mLastTrack->meta->findInt32(kKeyWidth, (int32_t*)&width) ||
- !mLastTrack->meta->findInt32(kKeyHeight,(int32_t*) &height)) {
- ALOGE("No width or height, assuming worst case 1080p");
- width = 1920;
- height = 1080;
- } else {
- // A resolution was specified, check that it's not too big. The values below
- // were chosen so that the calculations below don't cause overflows, they're
- // not indicating that resolutions up to 32kx32k are actually supported.
- if (width > 32768 || height > 32768) {
- ALOGE("can't support %u x %u video", width, height);
- return ERROR_MALFORMED;
- }
- }
-
- const char *mime;
- CHECK(mLastTrack->meta->findCString(kKeyMIMEType, &mime));
- if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
- || !strcmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
- // AVC & HEVC requires compression ratio of at least 2, and uses
- // macroblocks
- max_size = ((width + 15) / 16) * ((height + 15) / 16) * 192;
- } else {
- // For all other formats there is no minimum compression
- // ratio. Use compression ratio of 1.
- max_size = width * height * 3 / 2;
- }
- // HACK: allow 10% overhead
- // TODO: read sample size from traf atom for fragmented MPEG4.
- max_size += max_size / 10;
- mLastTrack->meta->setInt32(kKeyMaxInputSize, max_size);
- }
-
- // NOTE: setting another piece of metadata invalidates any pointers (such as the
- // mimetype) previously obtained, so don't cache them.
- const char *mime;
- CHECK(mLastTrack->meta->findCString(kKeyMIMEType, &mime));
- // Calculate average frame rate.
- if (!strncasecmp("video/", mime, 6)) {
- size_t nSamples = mLastTrack->sampleTable->countSamples();
- if (nSamples == 0) {
- int32_t trackId;
- if (mLastTrack->meta->findInt32(kKeyTrackID, &trackId)) {
- for (size_t i = 0; i < mTrex.size(); i++) {
- Trex *t = &mTrex.editItemAt(i);
- if (t->track_ID == (uint32_t) trackId) {
- if (t->default_sample_duration > 0) {
- int32_t frameRate =
- mLastTrack->timescale / t->default_sample_duration;
- mLastTrack->meta->setInt32(kKeyFrameRate, frameRate);
- }
- break;
- }
- }
- }
- } else {
- int64_t durationUs;
- if (mLastTrack->meta->findInt64(kKeyDuration, &durationUs)) {
- if (durationUs > 0) {
- int32_t frameRate = (nSamples * 1000000LL +
- (durationUs >> 1)) / durationUs;
- mLastTrack->meta->setInt32(kKeyFrameRate, frameRate);
- }
- }
- }
- }
-
- break;
- }
-
- case FOURCC('s', 't', 't', 's'):
- {
- if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
- return ERROR_MALFORMED;
-
- *offset += chunk_size;
-
- status_t err =
- mLastTrack->sampleTable->setTimeToSampleParams(
- data_offset, chunk_data_size);
-
- if (err != OK) {
- return err;
- }
-
- break;
- }
-
- case FOURCC('c', 't', 't', 's'):
- {
- if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
- return ERROR_MALFORMED;
-
- *offset += chunk_size;
-
- status_t err =
- mLastTrack->sampleTable->setCompositionTimeToSampleParams(
- data_offset, chunk_data_size);
-
- if (err != OK) {
- return err;
- }
-
- break;
- }
-
- case FOURCC('s', 't', 's', 's'):
- {
- if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
- return ERROR_MALFORMED;
-
- *offset += chunk_size;
-
- status_t err =
- mLastTrack->sampleTable->setSyncSampleParams(
- data_offset, chunk_data_size);
-
- if (err != OK) {
- return err;
- }
-
- break;
- }
-
- // \xA9xyz
- case FOURCC(0xA9, 'x', 'y', 'z'):
- {
- *offset += chunk_size;
-
- // Best case the total data length inside "\xA9xyz" box would
- // be 9, for instance "\xA9xyz" + "\x00\x05\x15\xc7" + "+0+0/",
- // where "\x00\x05" is the text string length with value = 5,
- // "\0x15\xc7" is the language code = en, and "+0+0/" is a
- // location (string) value with longitude = 0 and latitude = 0.
- // Since some devices encountered in the wild omit the trailing
- // slash, we'll allow that.
- if (chunk_data_size < 8) { // 8 instead of 9 to allow for missing /
- return ERROR_MALFORMED;
- }
-
- uint16_t len;
- if (!mDataSource->getUInt16(data_offset, &len)) {
- return ERROR_IO;
- }
-
- // allow "+0+0" without trailing slash
- if (len < 4 || len > chunk_data_size - 4) {
- return ERROR_MALFORMED;
- }
- // The location string following the language code is formatted
- // according to ISO 6709:2008 (https://en.wikipedia.org/wiki/ISO_6709).
- // Allocate 2 extra bytes, in case we need to add a trailing slash,
- // and to add a terminating 0.
- std::unique_ptr<char[]> buffer(new (std::nothrow) char[len+2]());
- if (!buffer) {
- return NO_MEMORY;
- }
-
- if (mDataSource->readAt(
- data_offset + 4, &buffer[0], len) < len) {
- return ERROR_IO;
- }
-
- len = strlen(&buffer[0]);
- if (len < 4) {
- return ERROR_MALFORMED;
- }
- // Add a trailing slash if there wasn't one.
- if (buffer[len - 1] != '/') {
- buffer[len] = '/';
- }
- mFileMetaData->setCString(kKeyLocation, &buffer[0]);
- break;
- }
-
- case FOURCC('e', 's', 'd', 's'):
- {
- *offset += chunk_size;
-
- if (chunk_data_size < 4) {
- return ERROR_MALFORMED;
- }
-
- uint8_t buffer[256];
- if (chunk_data_size > (off64_t)sizeof(buffer)) {
- return ERROR_BUFFER_TOO_SMALL;
- }
-
- if (mDataSource->readAt(
- data_offset, buffer, chunk_data_size) < chunk_data_size) {
- return ERROR_IO;
- }
-
- if (U32_AT(buffer) != 0) {
- // Should be version 0, flags 0.
- return ERROR_MALFORMED;
- }
-
- if (mLastTrack == NULL)
- return ERROR_MALFORMED;
-
- mLastTrack->meta->setData(
- kKeyESDS, kTypeESDS, &buffer[4], chunk_data_size - 4);
-
- if (mPath.size() >= 2
- && mPath[mPath.size() - 2] == FOURCC('m', 'p', '4', 'a')) {
- // Information from the ESDS must be relied on for proper
- // setup of sample rate and channel count for MPEG4 Audio.
- // The generic header appears to only contain generic
- // information...
-
- status_t err = updateAudioTrackInfoFromESDS_MPEG4Audio(
- &buffer[4], chunk_data_size - 4);
-
- if (err != OK) {
- return err;
- }
- }
- if (mPath.size() >= 2
- && mPath[mPath.size() - 2] == FOURCC('m', 'p', '4', 'v')) {
- // Check if the video is MPEG2
- ESDS esds(&buffer[4], chunk_data_size - 4);
-
- uint8_t objectTypeIndication;
- if (esds.getObjectTypeIndication(&objectTypeIndication) == OK) {
- if (objectTypeIndication >= 0x60 && objectTypeIndication <= 0x65) {
- mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG2);
- }
- }
- }
- break;
- }
-
- case FOURCC('b', 't', 'r', 't'):
- {
- *offset += chunk_size;
- if (mLastTrack == NULL) {
- return ERROR_MALFORMED;
- }
-
- uint8_t buffer[12];
- if (chunk_data_size != sizeof(buffer)) {
- return ERROR_MALFORMED;
- }
-
- if (mDataSource->readAt(
- data_offset, buffer, chunk_data_size) < chunk_data_size) {
- return ERROR_IO;
- }
-
- uint32_t maxBitrate = U32_AT(&buffer[4]);
- uint32_t avgBitrate = U32_AT(&buffer[8]);
- if (maxBitrate > 0 && maxBitrate < INT32_MAX) {
- mLastTrack->meta->setInt32(kKeyMaxBitRate, (int32_t)maxBitrate);
- }
- if (avgBitrate > 0 && avgBitrate < INT32_MAX) {
- mLastTrack->meta->setInt32(kKeyBitRate, (int32_t)avgBitrate);
- }
- break;
- }
-
- case FOURCC('a', 'v', 'c', 'C'):
- {
- *offset += chunk_size;
-
- sp<ABuffer> buffer = new ABuffer(chunk_data_size);
-
- if (buffer->data() == NULL) {
- ALOGE("b/28471206");
- return NO_MEMORY;
- }
-
- if (mDataSource->readAt(
- data_offset, buffer->data(), chunk_data_size) < chunk_data_size) {
- return ERROR_IO;
- }
-
- if (mLastTrack == NULL)
- return ERROR_MALFORMED;
-
- mLastTrack->meta->setData(
- kKeyAVCC, kTypeAVCC, buffer->data(), chunk_data_size);
-
- break;
- }
- case FOURCC('h', 'v', 'c', 'C'):
- {
- sp<ABuffer> buffer = new ABuffer(chunk_data_size);
-
- if (buffer->data() == NULL) {
- ALOGE("b/28471206");
- return NO_MEMORY;
- }
-
- if (mDataSource->readAt(
- data_offset, buffer->data(), chunk_data_size) < chunk_data_size) {
- return ERROR_IO;
- }
-
- if (mLastTrack == NULL)
- return ERROR_MALFORMED;
-
- mLastTrack->meta->setData(
- kKeyHVCC, kTypeHVCC, buffer->data(), chunk_data_size);
-
- *offset += chunk_size;
- break;
- }
-
- case FOURCC('d', '2', '6', '3'):
- {
- *offset += chunk_size;
- /*
- * d263 contains a fixed 7 bytes part:
- * vendor - 4 bytes
- * version - 1 byte
- * level - 1 byte
- * profile - 1 byte
- * optionally, "d263" box itself may contain a 16-byte
- * bit rate box (bitr)
- * average bit rate - 4 bytes
- * max bit rate - 4 bytes
- */
- char buffer[23];
- if (chunk_data_size != 7 &&
- chunk_data_size != 23) {
- ALOGE("Incorrect D263 box size %lld", (long long)chunk_data_size);
- return ERROR_MALFORMED;
- }
-
- if (mDataSource->readAt(
- data_offset, buffer, chunk_data_size) < chunk_data_size) {
- return ERROR_IO;
- }
-
- if (mLastTrack == NULL)
- return ERROR_MALFORMED;
-
- mLastTrack->meta->setData(kKeyD263, kTypeD263, buffer, chunk_data_size);
-
- break;
- }
-
- case FOURCC('m', 'e', 't', 'a'):
- {
- off64_t stop_offset = *offset + chunk_size;
- *offset = data_offset;
- bool isParsingMetaKeys = underQTMetaPath(mPath, 2);
- if (!isParsingMetaKeys) {
- uint8_t buffer[4];
- if (chunk_data_size < (off64_t)sizeof(buffer)) {
- *offset = stop_offset;
- return ERROR_MALFORMED;
- }
-
- if (mDataSource->readAt(
- data_offset, buffer, 4) < 4) {
- *offset = stop_offset;
- return ERROR_IO;
- }
-
- if (U32_AT(buffer) != 0) {
- // Should be version 0, flags 0.
-
- // If it's not, let's assume this is one of those
- // apparently malformed chunks that don't have flags
- // and completely different semantics than what's
- // in the MPEG4 specs and skip it.
- *offset = stop_offset;
- return OK;
- }
- *offset += sizeof(buffer);
- }
-
- while (*offset < stop_offset) {
- status_t err = parseChunk(offset, depth + 1);
- if (err != OK) {
- return err;
- }
- }
-
- if (*offset != stop_offset) {
- return ERROR_MALFORMED;
- }
- break;
- }
-
- case FOURCC('i', 'l', 'o', 'c'):
- case FOURCC('i', 'i', 'n', 'f'):
- case FOURCC('i', 'p', 'r', 'p'):
- case FOURCC('p', 'i', 't', 'm'):
- case FOURCC('i', 'd', 'a', 't'):
- case FOURCC('i', 'r', 'e', 'f'):
- case FOURCC('i', 'p', 'r', 'o'):
- {
- if (mIsHEIF) {
- if (mItemTable == NULL) {
- mItemTable = new ItemTable(mDataSource);
- }
- status_t err = mItemTable->parse(
- chunk_type, data_offset, chunk_data_size);
- if (err != OK) {
- return err;
- }
- }
- *offset += chunk_size;
- break;
- }
-
- case FOURCC('m', 'e', 'a', 'n'):
- case FOURCC('n', 'a', 'm', 'e'):
- case FOURCC('d', 'a', 't', 'a'):
- {
- *offset += chunk_size;
-
- if (mPath.size() == 6 && underMetaDataPath(mPath)) {
- status_t err = parseITunesMetaData(data_offset, chunk_data_size);
-
- if (err != OK) {
- return err;
- }
- }
-
- break;
- }
-
- case FOURCC('m', 'v', 'h', 'd'):
- {
- *offset += chunk_size;
-
- if (depth != 1) {
- ALOGE("mvhd: depth %d", depth);
- return ERROR_MALFORMED;
- }
- if (chunk_data_size < 32) {
- return ERROR_MALFORMED;
- }
-
- uint8_t header[32];
- if (mDataSource->readAt(
- data_offset, header, sizeof(header))
- < (ssize_t)sizeof(header)) {
- return ERROR_IO;
- }
-
- uint64_t creationTime;
- uint64_t duration = 0;
- if (header[0] == 1) {
- creationTime = U64_AT(&header[4]);
- mHeaderTimescale = U32_AT(&header[20]);
- duration = U64_AT(&header[24]);
- if (duration == 0xffffffffffffffff) {
- duration = 0;
- }
- } else if (header[0] != 0) {
- return ERROR_MALFORMED;
- } else {
- creationTime = U32_AT(&header[4]);
- mHeaderTimescale = U32_AT(&header[12]);
- uint32_t d32 = U32_AT(&header[16]);
- if (d32 == 0xffffffff) {
- d32 = 0;
- }
- duration = d32;
- }
- if (duration != 0 && mHeaderTimescale != 0 && duration < UINT64_MAX / 1000000) {
- mFileMetaData->setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
- }
-
- String8 s;
- if (convertTimeToDate(creationTime, &s)) {
- mFileMetaData->setCString(kKeyDate, s.string());
- }
-
-
- break;
- }
-
- case FOURCC('m', 'e', 'h', 'd'):
- {
- *offset += chunk_size;
-
- if (chunk_data_size < 8) {
- return ERROR_MALFORMED;
- }
-
- uint8_t flags[4];
- if (mDataSource->readAt(
- data_offset, flags, sizeof(flags))
- < (ssize_t)sizeof(flags)) {
- return ERROR_IO;
- }
-
- uint64_t duration = 0;
- if (flags[0] == 1) {
- // 64 bit
- if (chunk_data_size < 12) {
- return ERROR_MALFORMED;
- }
- mDataSource->getUInt64(data_offset + 4, &duration);
- if (duration == 0xffffffffffffffff) {
- duration = 0;
- }
- } else if (flags[0] == 0) {
- // 32 bit
- uint32_t d32;
- mDataSource->getUInt32(data_offset + 4, &d32);
- if (d32 == 0xffffffff) {
- d32 = 0;
- }
- duration = d32;
- } else {
- return ERROR_MALFORMED;
- }
-
- if (duration != 0 && mHeaderTimescale != 0) {
- mFileMetaData->setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
- }
-
- break;
- }
-
- case FOURCC('m', 'd', 'a', 't'):
- {
- ALOGV("mdat chunk, drm: %d", mIsDrm);
-
- mMdatFound = true;
-
- if (!mIsDrm) {
- *offset += chunk_size;
- break;
- }
-
- if (chunk_size < 8) {
- return ERROR_MALFORMED;
- }
-
- return parseDrmSINF(offset, data_offset);
- }
-
- case FOURCC('h', 'd', 'l', 'r'):
- {
- *offset += chunk_size;
-
- if (underQTMetaPath(mPath, 3)) {
- break;
- }
-
- uint32_t buffer;
- if (mDataSource->readAt(
- data_offset + 8, &buffer, 4) < 4) {
- return ERROR_IO;
- }
-
- uint32_t type = ntohl(buffer);
- // For the 3GPP file format, the handler-type within the 'hdlr' box
- // shall be 'text'. We also want to support 'sbtl' handler type
- // for a practical reason as various MPEG4 containers use it.
- if (type == FOURCC('t', 'e', 'x', 't') || type == FOURCC('s', 'b', 't', 'l')) {
- if (mLastTrack != NULL) {
- mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_TEXT_3GPP);
- }
- }
-
- break;
- }
-
- case FOURCC('k', 'e', 'y', 's'):
- {
- *offset += chunk_size;
-
- if (underQTMetaPath(mPath, 3)) {
- status_t err = parseQTMetaKey(data_offset, chunk_data_size);
- if (err != OK) {
- return err;
- }
- }
- break;
- }
-
- case FOURCC('t', 'r', 'e', 'x'):
- {
- *offset += chunk_size;
-
- if (chunk_data_size < 24) {
- return ERROR_IO;
- }
- Trex trex;
- if (!mDataSource->getUInt32(data_offset + 4, &trex.track_ID) ||
- !mDataSource->getUInt32(data_offset + 8, &trex.default_sample_description_index) ||
- !mDataSource->getUInt32(data_offset + 12, &trex.default_sample_duration) ||
- !mDataSource->getUInt32(data_offset + 16, &trex.default_sample_size) ||
- !mDataSource->getUInt32(data_offset + 20, &trex.default_sample_flags)) {
- return ERROR_IO;
- }
- mTrex.add(trex);
- break;
- }
-
- case FOURCC('t', 'x', '3', 'g'):
- {
- if (mLastTrack == NULL)
- return ERROR_MALFORMED;
-
- uint32_t type;
- const void *data;
- size_t size = 0;
- if (!mLastTrack->meta->findData(
- kKeyTextFormatData, &type, &data, &size)) {
- size = 0;
- }
-
- if ((chunk_size > SIZE_MAX) || (SIZE_MAX - chunk_size <= size)) {
- return ERROR_MALFORMED;
- }
-
- uint8_t *buffer = new (std::nothrow) uint8_t[size + chunk_size];
- if (buffer == NULL) {
- return ERROR_MALFORMED;
- }
-
- if (size > 0) {
- memcpy(buffer, data, size);
- }
-
- if ((size_t)(mDataSource->readAt(*offset, buffer + size, chunk_size))
- < chunk_size) {
- delete[] buffer;
- buffer = NULL;
-
- // advance read pointer so we don't end up reading this again
- *offset += chunk_size;
- return ERROR_IO;
- }
-
- mLastTrack->meta->setData(
- kKeyTextFormatData, 0, buffer, size + chunk_size);
-
- delete[] buffer;
-
- *offset += chunk_size;
- break;
- }
-
- case FOURCC('c', 'o', 'v', 'r'):
- {
- *offset += chunk_size;
-
- if (mFileMetaData != NULL) {
- ALOGV("chunk_data_size = %" PRId64 " and data_offset = %" PRId64,
- chunk_data_size, data_offset);
-
- if (chunk_data_size < 0 || static_cast<uint64_t>(chunk_data_size) >= SIZE_MAX - 1) {
- return ERROR_MALFORMED;
- }
- sp<ABuffer> buffer = new ABuffer(chunk_data_size + 1);
- if (buffer->data() == NULL) {
- ALOGE("b/28471206");
- return NO_MEMORY;
- }
- if (mDataSource->readAt(
- data_offset, buffer->data(), chunk_data_size) != (ssize_t)chunk_data_size) {
- return ERROR_IO;
- }
- const int kSkipBytesOfDataBox = 16;
- if (chunk_data_size <= kSkipBytesOfDataBox) {
- return ERROR_MALFORMED;
- }
-
- mFileMetaData->setData(
- kKeyAlbumArt, MetaData::TYPE_NONE,
- buffer->data() + kSkipBytesOfDataBox, chunk_data_size - kSkipBytesOfDataBox);
- }
-
- break;
- }
-
- case FOURCC('c', 'o', 'l', 'r'):
- {
- *offset += chunk_size;
- // this must be in a VisualSampleEntry box under the Sample Description Box ('stsd')
- // ignore otherwise
- if (depth >= 2 && mPath[depth - 2] == FOURCC('s', 't', 's', 'd')) {
- status_t err = parseColorInfo(data_offset, chunk_data_size);
- if (err != OK) {
- return err;
- }
- }
-
- break;
- }
-
- case FOURCC('t', 'i', 't', 'l'):
- case FOURCC('p', 'e', 'r', 'f'):
- case FOURCC('a', 'u', 't', 'h'):
- case FOURCC('g', 'n', 'r', 'e'):
- case FOURCC('a', 'l', 'b', 'm'):
- case FOURCC('y', 'r', 'r', 'c'):
- {
- *offset += chunk_size;
-
- status_t err = parse3GPPMetaData(data_offset, chunk_data_size, depth);
-
- if (err != OK) {
- return err;
- }
-
- break;
- }
-
- case FOURCC('I', 'D', '3', '2'):
- {
- *offset += chunk_size;
-
- if (chunk_data_size < 6) {
- return ERROR_MALFORMED;
- }
-
- parseID3v2MetaData(data_offset + 6);
-
- break;
- }
-
- case FOURCC('-', '-', '-', '-'):
- {
- mLastCommentMean.clear();
- mLastCommentName.clear();
- mLastCommentData.clear();
- *offset += chunk_size;
- break;
- }
-
- case FOURCC('s', 'i', 'd', 'x'):
- {
- status_t err = parseSegmentIndex(data_offset, chunk_data_size);
- if (err != OK) {
- return err;
- }
- *offset += chunk_size;
- return UNKNOWN_ERROR; // stop parsing after sidx
- }
-
- case FOURCC('a', 'c', '-', '3'):
- {
- *offset += chunk_size;
- return parseAC3SampleEntry(data_offset);
- }
-
- case FOURCC('f', 't', 'y', 'p'):
- {
- if (chunk_data_size < 8 || depth != 0) {
- return ERROR_MALFORMED;
- }
-
- off64_t stop_offset = *offset + chunk_size;
- uint32_t numCompatibleBrands = (chunk_data_size - 8) / 4;
- std::set<uint32_t> brandSet;
- for (size_t i = 0; i < numCompatibleBrands + 2; ++i) {
- if (i == 1) {
- // Skip this index, it refers to the minorVersion,
- // not a brand.
- continue;
- }
-
- uint32_t brand;
- if (mDataSource->readAt(data_offset + 4 * i, &brand, 4) < 4) {
- return ERROR_MALFORMED;
- }
-
- brand = ntohl(brand);
- brandSet.insert(brand);
- }
-
- if (brandSet.count(FOURCC('q', 't', ' ', ' ')) > 0) {
- mIsQT = true;
- } else if (brandSet.count(FOURCC('m', 'i', 'f', '1')) > 0
- && brandSet.count(FOURCC('h', 'e', 'i', 'c')) > 0) {
- mIsHEIF = true;
- ALOGV("identified HEIF image");
- }
-
- *offset = stop_offset;
-
- break;
- }
-
- default:
- {
- // check if we're parsing 'ilst' for meta keys
- // if so, treat type as a number (key-id).
- if (underQTMetaPath(mPath, 3)) {
- status_t err = parseQTMetaVal(chunk_type, data_offset, chunk_data_size);
- if (err != OK) {
- return err;
- }
- }
-
- *offset += chunk_size;
- break;
- }
- }
-
- return OK;
-}
-
-status_t MPEG4Extractor::parseAC3SampleEntry(off64_t offset) {
- // skip 16 bytes:
- // + 6-byte reserved,
- // + 2-byte data reference index,
- // + 8-byte reserved
- offset += 16;
- uint16_t channelCount;
- if (!mDataSource->getUInt16(offset, &channelCount)) {
- return ERROR_MALFORMED;
- }
- // skip 8 bytes:
- // + 2-byte channelCount,
- // + 2-byte sample size,
- // + 4-byte reserved
- offset += 8;
- uint16_t sampleRate;
- if (!mDataSource->getUInt16(offset, &sampleRate)) {
- ALOGE("MPEG4Extractor: error while reading ac-3 block: cannot read sample rate");
- return ERROR_MALFORMED;
- }
-
- // skip 4 bytes:
- // + 2-byte sampleRate,
- // + 2-byte reserved
- offset += 4;
- return parseAC3SpecificBox(offset, sampleRate);
-}
-
-status_t MPEG4Extractor::parseAC3SpecificBox(
- off64_t offset, uint16_t sampleRate) {
- uint32_t size;
- // + 4-byte size
- // + 4-byte type
- // + 3-byte payload
- const uint32_t kAC3SpecificBoxSize = 11;
- if (!mDataSource->getUInt32(offset, &size) || size < kAC3SpecificBoxSize) {
- ALOGE("MPEG4Extractor: error while reading ac-3 block: cannot read specific box size");
- return ERROR_MALFORMED;
- }
-
- offset += 4;
- uint32_t type;
- if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'a', 'c', '3')) {
- ALOGE("MPEG4Extractor: error while reading ac-3 specific block: header not dac3");
- return ERROR_MALFORMED;
- }
-
- offset += 4;
- const uint32_t kAC3SpecificBoxPayloadSize = 3;
- uint8_t chunk[kAC3SpecificBoxPayloadSize];
- if (mDataSource->readAt(offset, chunk, sizeof(chunk)) != sizeof(chunk)) {
- ALOGE("MPEG4Extractor: error while reading ac-3 specific block: bitstream fields");
- return ERROR_MALFORMED;
- }
-
- ABitReader br(chunk, sizeof(chunk));
- static const unsigned channelCountTable[] = {2, 1, 2, 3, 3, 4, 4, 5};
- static const unsigned sampleRateTable[] = {48000, 44100, 32000};
-
- unsigned fscod = br.getBits(2);
- if (fscod == 3) {
- ALOGE("Incorrect fscod (3) in AC3 header");
- return ERROR_MALFORMED;
- }
- unsigned boxSampleRate = sampleRateTable[fscod];
- if (boxSampleRate != sampleRate) {
- ALOGE("sample rate mismatch: boxSampleRate = %d, sampleRate = %d",
- boxSampleRate, sampleRate);
- return ERROR_MALFORMED;
- }
-
- unsigned bsid = br.getBits(5);
- if (bsid > 8) {
- ALOGW("Incorrect bsid in AC3 header. Possibly E-AC-3?");
- return ERROR_MALFORMED;
- }
-
- // skip
- unsigned bsmod __unused = br.getBits(3);
-
- unsigned acmod = br.getBits(3);
- unsigned lfeon = br.getBits(1);
- unsigned channelCount = channelCountTable[acmod] + lfeon;
-
- if (mLastTrack == NULL) {
- return ERROR_MALFORMED;
- }
- mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC3);
- mLastTrack->meta->setInt32(kKeyChannelCount, channelCount);
- mLastTrack->meta->setInt32(kKeySampleRate, sampleRate);
- return OK;
-}
-
-status_t MPEG4Extractor::parseSegmentIndex(off64_t offset, size_t size) {
- ALOGV("MPEG4Extractor::parseSegmentIndex");
-
- if (size < 12) {
- return -EINVAL;
- }
-
- uint32_t flags;
- if (!mDataSource->getUInt32(offset, &flags)) {
- return ERROR_MALFORMED;
- }
-
- uint32_t version = flags >> 24;
- flags &= 0xffffff;
-
- ALOGV("sidx version %d", version);
-
- uint32_t referenceId;
- if (!mDataSource->getUInt32(offset + 4, &referenceId)) {
- return ERROR_MALFORMED;
- }
-
- uint32_t timeScale;
- if (!mDataSource->getUInt32(offset + 8, &timeScale)) {
- return ERROR_MALFORMED;
- }
- ALOGV("sidx refid/timescale: %d/%d", referenceId, timeScale);
- if (timeScale == 0)
- return ERROR_MALFORMED;
-
- uint64_t earliestPresentationTime;
- uint64_t firstOffset;
-
- offset += 12;
- size -= 12;
-
- if (version == 0) {
- if (size < 8) {
- return -EINVAL;
- }
- uint32_t tmp;
- if (!mDataSource->getUInt32(offset, &tmp)) {
- return ERROR_MALFORMED;
- }
- earliestPresentationTime = tmp;
- if (!mDataSource->getUInt32(offset + 4, &tmp)) {
- return ERROR_MALFORMED;
- }
- firstOffset = tmp;
- offset += 8;
- size -= 8;
- } else {
- if (size < 16) {
- return -EINVAL;
- }
- if (!mDataSource->getUInt64(offset, &earliestPresentationTime)) {
- return ERROR_MALFORMED;
- }
- if (!mDataSource->getUInt64(offset + 8, &firstOffset)) {
- return ERROR_MALFORMED;
- }
- offset += 16;
- size -= 16;
- }
- ALOGV("sidx pres/off: %" PRIu64 "/%" PRIu64, earliestPresentationTime, firstOffset);
-
- if (size < 4) {
- return -EINVAL;
- }
-
- uint16_t referenceCount;
- if (!mDataSource->getUInt16(offset + 2, &referenceCount)) {
- return ERROR_MALFORMED;
- }
- offset += 4;
- size -= 4;
- ALOGV("refcount: %d", referenceCount);
-
- if (size < referenceCount * 12) {
- return -EINVAL;
- }
-
- uint64_t total_duration = 0;
- for (unsigned int i = 0; i < referenceCount; i++) {
- uint32_t d1, d2, d3;
-
- if (!mDataSource->getUInt32(offset, &d1) || // size
- !mDataSource->getUInt32(offset + 4, &d2) || // duration
- !mDataSource->getUInt32(offset + 8, &d3)) { // flags
- return ERROR_MALFORMED;
- }
-
- if (d1 & 0x80000000) {
- ALOGW("sub-sidx boxes not supported yet");
- }
- bool sap = d3 & 0x80000000;
- uint32_t saptype = (d3 >> 28) & 7;
- if (!sap || (saptype != 1 && saptype != 2)) {
- // type 1 and 2 are sync samples
- ALOGW("not a stream access point, or unsupported type: %08x", d3);
- }
- total_duration += d2;
- offset += 12;
- ALOGV(" item %d, %08x %08x %08x", i, d1, d2, d3);
- SidxEntry se;
- se.mSize = d1 & 0x7fffffff;
- se.mDurationUs = 1000000LL * d2 / timeScale;
- mSidxEntries.add(se);
- }
-
- uint64_t sidxDuration = total_duration * 1000000 / timeScale;
-
- if (mLastTrack == NULL)
- return ERROR_MALFORMED;
-
- int64_t metaDuration;
- if (!mLastTrack->meta->findInt64(kKeyDuration, &metaDuration) || metaDuration == 0) {
- mLastTrack->meta->setInt64(kKeyDuration, sidxDuration);
- }
- return OK;
-}
-
-status_t MPEG4Extractor::parseQTMetaKey(off64_t offset, size_t size) {
- if (size < 8) {
- return ERROR_MALFORMED;
- }
-
- uint32_t count;
- if (!mDataSource->getUInt32(offset + 4, &count)) {
- return ERROR_MALFORMED;
- }
-
- if (mMetaKeyMap.size() > 0) {
- ALOGW("'keys' atom seen again, discarding existing entries");
- mMetaKeyMap.clear();
- }
-
- off64_t keyOffset = offset + 8;
- off64_t stopOffset = offset + size;
- for (size_t i = 1; i <= count; i++) {
- if (keyOffset + 8 > stopOffset) {
- return ERROR_MALFORMED;
- }
-
- uint32_t keySize;
- if (!mDataSource->getUInt32(keyOffset, &keySize)
- || keySize < 8
- || keyOffset + keySize > stopOffset) {
- return ERROR_MALFORMED;
- }
-
- uint32_t type;
- if (!mDataSource->getUInt32(keyOffset + 4, &type)
- || type != FOURCC('m', 'd', 't', 'a')) {
- return ERROR_MALFORMED;
- }
-
- keySize -= 8;
- keyOffset += 8;
-
- sp<ABuffer> keyData = new ABuffer(keySize);
- if (keyData->data() == NULL) {
- return ERROR_MALFORMED;
- }
- if (mDataSource->readAt(
- keyOffset, keyData->data(), keySize) < (ssize_t) keySize) {
- return ERROR_MALFORMED;
- }
-
- AString key((const char *)keyData->data(), keySize);
- mMetaKeyMap.add(i, key);
-
- keyOffset += keySize;
- }
- return OK;
-}
-
-status_t MPEG4Extractor::parseQTMetaVal(
- int32_t keyId, off64_t offset, size_t size) {
- ssize_t index = mMetaKeyMap.indexOfKey(keyId);
- if (index < 0) {
- // corresponding key is not present, ignore
- return ERROR_MALFORMED;
- }
-
- if (size <= 16) {
- return ERROR_MALFORMED;
- }
- uint32_t dataSize;
- if (!mDataSource->getUInt32(offset, &dataSize)
- || dataSize > size || dataSize <= 16) {
- return ERROR_MALFORMED;
- }
- uint32_t atomFourCC;
- if (!mDataSource->getUInt32(offset + 4, &atomFourCC)
- || atomFourCC != FOURCC('d', 'a', 't', 'a')) {
- return ERROR_MALFORMED;
- }
- uint32_t dataType;
- if (!mDataSource->getUInt32(offset + 8, &dataType)
- || ((dataType & 0xff000000) != 0)) {
- // not well-known type
- return ERROR_MALFORMED;
- }
-
- dataSize -= 16;
- offset += 16;
-
- if (dataType == 23 && dataSize >= 4) {
- // BE Float32
- uint32_t val;
- if (!mDataSource->getUInt32(offset, &val)) {
- return ERROR_MALFORMED;
- }
- if (!strcasecmp(mMetaKeyMap[index].c_str(), "com.android.capture.fps")) {
- mFileMetaData->setFloat(kKeyCaptureFramerate, *(float *)&val);
- }
- } else if (dataType == 67 && dataSize >= 4) {
- // BE signed int32
- uint32_t val;
- if (!mDataSource->getUInt32(offset, &val)) {
- return ERROR_MALFORMED;
- }
- if (!strcasecmp(mMetaKeyMap[index].c_str(), "com.android.video.temporal_layers_count")) {
- mFileMetaData->setInt32(kKeyTemporalLayerCount, val);
- }
- } else {
- // add more keys if needed
- ALOGV("ignoring key: type %d, size %d", dataType, dataSize);
- }
-
- return OK;
-}
-
-status_t MPEG4Extractor::parseTrackHeader(
- off64_t data_offset, off64_t data_size) {
- if (data_size < 4) {
- return ERROR_MALFORMED;
- }
-
- uint8_t version;
- if (mDataSource->readAt(data_offset, &version, 1) < 1) {
- return ERROR_IO;
- }
-
- size_t dynSize = (version == 1) ? 36 : 24;
-
- uint8_t buffer[36 + 60];
-
- if (data_size != (off64_t)dynSize + 60) {
- return ERROR_MALFORMED;
- }
-
- if (mDataSource->readAt(
- data_offset, buffer, data_size) < (ssize_t)data_size) {
- return ERROR_IO;
- }
-
- uint64_t ctime __unused, mtime __unused, duration __unused;
- int32_t id;
-
- if (version == 1) {
- ctime = U64_AT(&buffer[4]);
- mtime = U64_AT(&buffer[12]);
- id = U32_AT(&buffer[20]);
- duration = U64_AT(&buffer[28]);
- } else if (version == 0) {
- ctime = U32_AT(&buffer[4]);
- mtime = U32_AT(&buffer[8]);
- id = U32_AT(&buffer[12]);
- duration = U32_AT(&buffer[20]);
- } else {
- return ERROR_UNSUPPORTED;
- }
-
- if (mLastTrack == NULL)
- return ERROR_MALFORMED;
-
- mLastTrack->meta->setInt32(kKeyTrackID, id);
-
- size_t matrixOffset = dynSize + 16;
- int32_t a00 = U32_AT(&buffer[matrixOffset]);
- int32_t a01 = U32_AT(&buffer[matrixOffset + 4]);
- int32_t a10 = U32_AT(&buffer[matrixOffset + 12]);
- int32_t a11 = U32_AT(&buffer[matrixOffset + 16]);
-
-#if 0
- int32_t dx = U32_AT(&buffer[matrixOffset + 8]);
- int32_t dy = U32_AT(&buffer[matrixOffset + 20]);
-
- ALOGI("x' = %.2f * x + %.2f * y + %.2f",
- a00 / 65536.0f, a01 / 65536.0f, dx / 65536.0f);
- ALOGI("y' = %.2f * x + %.2f * y + %.2f",
- a10 / 65536.0f, a11 / 65536.0f, dy / 65536.0f);
-#endif
-
- uint32_t rotationDegrees;
-
- static const int32_t kFixedOne = 0x10000;
- if (a00 == kFixedOne && a01 == 0 && a10 == 0 && a11 == kFixedOne) {
- // Identity, no rotation
- rotationDegrees = 0;
- } else if (a00 == 0 && a01 == kFixedOne && a10 == -kFixedOne && a11 == 0) {
- rotationDegrees = 90;
- } else if (a00 == 0 && a01 == -kFixedOne && a10 == kFixedOne && a11 == 0) {
- rotationDegrees = 270;
- } else if (a00 == -kFixedOne && a01 == 0 && a10 == 0 && a11 == -kFixedOne) {
- rotationDegrees = 180;
- } else {
- ALOGW("We only support 0,90,180,270 degree rotation matrices");
- rotationDegrees = 0;
- }
-
- if (rotationDegrees != 0) {
- mLastTrack->meta->setInt32(kKeyRotation, rotationDegrees);
- }
-
- // Handle presentation display size, which could be different
- // from the image size indicated by kKeyWidth and kKeyHeight.
- uint32_t width = U32_AT(&buffer[dynSize + 52]);
- uint32_t height = U32_AT(&buffer[dynSize + 56]);
- mLastTrack->meta->setInt32(kKeyDisplayWidth, width >> 16);
- mLastTrack->meta->setInt32(kKeyDisplayHeight, height >> 16);
-
- return OK;
-}
-
-status_t MPEG4Extractor::parseITunesMetaData(off64_t offset, size_t size) {
- if (size == 0) {
- return OK;
- }
-
- if (size < 4 || size == SIZE_MAX) {
- return ERROR_MALFORMED;
- }
-
- uint8_t *buffer = new (std::nothrow) uint8_t[size + 1];
- if (buffer == NULL) {
- return ERROR_MALFORMED;
- }
- if (mDataSource->readAt(
- offset, buffer, size) != (ssize_t)size) {
- delete[] buffer;
- buffer = NULL;
-
- return ERROR_IO;
- }
-
- uint32_t flags = U32_AT(buffer);
-
- uint32_t metadataKey = 0;
- char chunk[5];
- MakeFourCCString(mPath[4], chunk);
- ALOGV("meta: %s @ %lld", chunk, (long long)offset);
- switch ((int32_t)mPath[4]) {
- case FOURCC(0xa9, 'a', 'l', 'b'):
- {
- metadataKey = kKeyAlbum;
- break;
- }
- case FOURCC(0xa9, 'A', 'R', 'T'):
- {
- metadataKey = kKeyArtist;
- break;
- }
- case FOURCC('a', 'A', 'R', 'T'):
- {
- metadataKey = kKeyAlbumArtist;
- break;
- }
- case FOURCC(0xa9, 'd', 'a', 'y'):
- {
- metadataKey = kKeyYear;
- break;
- }
- case FOURCC(0xa9, 'n', 'a', 'm'):
- {
- metadataKey = kKeyTitle;
- break;
- }
- case FOURCC(0xa9, 'w', 'r', 't'):
- {
- metadataKey = kKeyWriter;
- break;
- }
- case FOURCC('c', 'o', 'v', 'r'):
- {
- metadataKey = kKeyAlbumArt;
- break;
- }
- case FOURCC('g', 'n', 'r', 'e'):
- {
- metadataKey = kKeyGenre;
- break;
- }
- case FOURCC(0xa9, 'g', 'e', 'n'):
- {
- metadataKey = kKeyGenre;
- break;
- }
- case FOURCC('c', 'p', 'i', 'l'):
- {
- if (size == 9 && flags == 21) {
- char tmp[16];
- sprintf(tmp, "%d",
- (int)buffer[size - 1]);
-
- mFileMetaData->setCString(kKeyCompilation, tmp);
- }
- break;
- }
- case FOURCC('t', 'r', 'k', 'n'):
- {
- if (size == 16 && flags == 0) {
- char tmp[16];
- uint16_t* pTrack = (uint16_t*)&buffer[10];
- uint16_t* pTotalTracks = (uint16_t*)&buffer[12];
- sprintf(tmp, "%d/%d", ntohs(*pTrack), ntohs(*pTotalTracks));
-
- mFileMetaData->setCString(kKeyCDTrackNumber, tmp);
- }
- break;
- }
- case FOURCC('d', 'i', 's', 'k'):
- {
- if ((size == 14 || size == 16) && flags == 0) {
- char tmp[16];
- uint16_t* pDisc = (uint16_t*)&buffer[10];
- uint16_t* pTotalDiscs = (uint16_t*)&buffer[12];
- sprintf(tmp, "%d/%d", ntohs(*pDisc), ntohs(*pTotalDiscs));
-
- mFileMetaData->setCString(kKeyDiscNumber, tmp);
- }
- break;
- }
- case FOURCC('-', '-', '-', '-'):
- {
- buffer[size] = '\0';
- switch (mPath[5]) {
- case FOURCC('m', 'e', 'a', 'n'):
- mLastCommentMean.setTo((const char *)buffer + 4);
- break;
- case FOURCC('n', 'a', 'm', 'e'):
- mLastCommentName.setTo((const char *)buffer + 4);
- break;
- case FOURCC('d', 'a', 't', 'a'):
- if (size < 8) {
- delete[] buffer;
- buffer = NULL;
- ALOGE("b/24346430");
- return ERROR_MALFORMED;
- }
- mLastCommentData.setTo((const char *)buffer + 8);
- break;
- }
-
- // Once we have a set of mean/name/data info, go ahead and process
- // it to see if its something we are interested in. Whether or not
- // were are interested in the specific tag, make sure to clear out
- // the set so we can be ready to process another tuple should one
- // show up later in the file.
- if ((mLastCommentMean.length() != 0) &&
- (mLastCommentName.length() != 0) &&
- (mLastCommentData.length() != 0)) {
-
- if (mLastCommentMean == "com.apple.iTunes"
- && mLastCommentName == "iTunSMPB") {
- int32_t delay, padding;
- if (sscanf(mLastCommentData,
- " %*x %x %x %*x", &delay, &padding) == 2) {
- if (mLastTrack == NULL) {
- delete[] buffer;
- return ERROR_MALFORMED;
- }
-
- mLastTrack->meta->setInt32(kKeyEncoderDelay, delay);
- mLastTrack->meta->setInt32(kKeyEncoderPadding, padding);
- }
- }
-
- mLastCommentMean.clear();
- mLastCommentName.clear();
- mLastCommentData.clear();
- }
- break;
- }
-
- default:
- break;
- }
-
- if (size >= 8 && metadataKey && !mFileMetaData->hasData(metadataKey)) {
- if (metadataKey == kKeyAlbumArt) {
- mFileMetaData->setData(
- kKeyAlbumArt, MetaData::TYPE_NONE,
- buffer + 8, size - 8);
- } else if (metadataKey == kKeyGenre) {
- if (flags == 0) {
- // uint8_t genre code, iTunes genre codes are
- // the standard id3 codes, except they start
- // at 1 instead of 0 (e.g. Pop is 14, not 13)
- // We use standard id3 numbering, so subtract 1.
- int genrecode = (int)buffer[size - 1];
- genrecode--;
- if (genrecode < 0) {
- genrecode = 255; // reserved for 'unknown genre'
- }
- char genre[10];
- sprintf(genre, "%d", genrecode);
-
- mFileMetaData->setCString(metadataKey, genre);
- } else if (flags == 1) {
- // custom genre string
- buffer[size] = '\0';
-
- mFileMetaData->setCString(
- metadataKey, (const char *)buffer + 8);
- }
- } else {
- buffer[size] = '\0';
-
- mFileMetaData->setCString(
- metadataKey, (const char *)buffer + 8);
- }
- }
-
- delete[] buffer;
- buffer = NULL;
-
- return OK;
-}
-
-status_t MPEG4Extractor::parseColorInfo(off64_t offset, size_t size) {
- if (size < 4 || size == SIZE_MAX || mLastTrack == NULL) {
- return ERROR_MALFORMED;
- }
-
- uint8_t *buffer = new (std::nothrow) uint8_t[size + 1];
- if (buffer == NULL) {
- return ERROR_MALFORMED;
- }
- if (mDataSource->readAt(offset, buffer, size) != (ssize_t)size) {
- delete[] buffer;
- buffer = NULL;
-
- return ERROR_IO;
- }
-
- int32_t type = U32_AT(&buffer[0]);
- if ((type == FOURCC('n', 'c', 'l', 'x') && size >= 11)
- || (type == FOURCC('n', 'c', 'l', 'c') && size >= 10)) {
- int32_t primaries = U16_AT(&buffer[4]);
- int32_t transfer = U16_AT(&buffer[6]);
- int32_t coeffs = U16_AT(&buffer[8]);
- bool fullRange = (type == FOURCC('n', 'c', 'l', 'x')) && (buffer[10] & 128);
-
- ColorAspects aspects;
- ColorUtils::convertIsoColorAspectsToCodecAspects(
- primaries, transfer, coeffs, fullRange, aspects);
-
- // only store the first color specification
- if (!mLastTrack->meta->hasData(kKeyColorPrimaries)) {
- mLastTrack->meta->setInt32(kKeyColorPrimaries, aspects.mPrimaries);
- mLastTrack->meta->setInt32(kKeyTransferFunction, aspects.mTransfer);
- mLastTrack->meta->setInt32(kKeyColorMatrix, aspects.mMatrixCoeffs);
- mLastTrack->meta->setInt32(kKeyColorRange, aspects.mRange);
- }
- }
-
- delete[] buffer;
- buffer = NULL;
-
- return OK;
-}
-
-status_t MPEG4Extractor::parse3GPPMetaData(off64_t offset, size_t size, int depth) {
- if (size < 4 || size == SIZE_MAX) {
- return ERROR_MALFORMED;
- }
-
- uint8_t *buffer = new (std::nothrow) uint8_t[size + 1];
- if (buffer == NULL) {
- return ERROR_MALFORMED;
- }
- if (mDataSource->readAt(
- offset, buffer, size) != (ssize_t)size) {
- delete[] buffer;
- buffer = NULL;
-
- return ERROR_IO;
- }
-
- uint32_t metadataKey = 0;
- switch (mPath[depth]) {
- case FOURCC('t', 'i', 't', 'l'):
- {
- metadataKey = kKeyTitle;
- break;
- }
- case FOURCC('p', 'e', 'r', 'f'):
- {
- metadataKey = kKeyArtist;
- break;
- }
- case FOURCC('a', 'u', 't', 'h'):
- {
- metadataKey = kKeyWriter;
- break;
- }
- case FOURCC('g', 'n', 'r', 'e'):
- {
- metadataKey = kKeyGenre;
- break;
- }
- case FOURCC('a', 'l', 'b', 'm'):
- {
- if (buffer[size - 1] != '\0') {
- char tmp[4];
- sprintf(tmp, "%u", buffer[size - 1]);
-
- mFileMetaData->setCString(kKeyCDTrackNumber, tmp);
- }
-
- metadataKey = kKeyAlbum;
- break;
- }
- case FOURCC('y', 'r', 'r', 'c'):
- {
- if (size < 6) {
- delete[] buffer;
- buffer = NULL;
- ALOGE("b/62133227");
- android_errorWriteLog(0x534e4554, "62133227");
- return ERROR_MALFORMED;
- }
- char tmp[5];
- uint16_t year = U16_AT(&buffer[4]);
-
- if (year < 10000) {
- sprintf(tmp, "%u", year);
-
- mFileMetaData->setCString(kKeyYear, tmp);
- }
- break;
- }
-
- default:
- break;
- }
-
- if (metadataKey > 0) {
- bool isUTF8 = true; // Common case
- char16_t *framedata = NULL;
- int len16 = 0; // Number of UTF-16 characters
-
- // smallest possible valid UTF-16 string w BOM: 0xfe 0xff 0x00 0x00
- if (size < 6) {
- delete[] buffer;
- buffer = NULL;
- return ERROR_MALFORMED;
- }
-
- if (size - 6 >= 4) {
- len16 = ((size - 6) / 2) - 1; // don't include 0x0000 terminator
- framedata = (char16_t *)(buffer + 6);
- if (0xfffe == *framedata) {
- // endianness marker (BOM) doesn't match host endianness
- for (int i = 0; i < len16; i++) {
- framedata[i] = bswap_16(framedata[i]);
- }
- // BOM is now swapped to 0xfeff, we will execute next block too
- }
-
- if (0xfeff == *framedata) {
- // Remove the BOM
- framedata++;
- len16--;
- isUTF8 = false;
- }
- // else normal non-zero-length UTF-8 string
- // we can't handle UTF-16 without BOM as there is no other
- // indication of encoding.
- }
-
- if (isUTF8) {
- buffer[size] = 0;
- mFileMetaData->setCString(metadataKey, (const char *)buffer + 6);
- } else {
- // Convert from UTF-16 string to UTF-8 string.
- String8 tmpUTF8str(framedata, len16);
- mFileMetaData->setCString(metadataKey, tmpUTF8str.string());
- }
- }
-
- delete[] buffer;
- buffer = NULL;
-
- return OK;
-}
-
-void MPEG4Extractor::parseID3v2MetaData(off64_t offset) {
- ID3 id3(mDataSource, true /* ignorev1 */, offset);
-
- if (id3.isValid()) {
- struct Map {
- int key;
- const char *tag1;
- const char *tag2;
- };
- static const Map kMap[] = {
- { kKeyAlbum, "TALB", "TAL" },
- { kKeyArtist, "TPE1", "TP1" },
- { kKeyAlbumArtist, "TPE2", "TP2" },
- { kKeyComposer, "TCOM", "TCM" },
- { kKeyGenre, "TCON", "TCO" },
- { kKeyTitle, "TIT2", "TT2" },
- { kKeyYear, "TYE", "TYER" },
- { kKeyAuthor, "TXT", "TEXT" },
- { kKeyCDTrackNumber, "TRK", "TRCK" },
- { kKeyDiscNumber, "TPA", "TPOS" },
- { kKeyCompilation, "TCP", "TCMP" },
- };
- static const size_t kNumMapEntries = sizeof(kMap) / sizeof(kMap[0]);
-
- for (size_t i = 0; i < kNumMapEntries; ++i) {
- if (!mFileMetaData->hasData(kMap[i].key)) {
- ID3::Iterator *it = new ID3::Iterator(id3, kMap[i].tag1);
- if (it->done()) {
- delete it;
- it = new ID3::Iterator(id3, kMap[i].tag2);
- }
-
- if (it->done()) {
- delete it;
- continue;
- }
-
- String8 s;
- it->getString(&s);
- delete it;
-
- mFileMetaData->setCString(kMap[i].key, s);
- }
- }
-
- size_t dataSize;
- String8 mime;
- const void *data = id3.getAlbumArt(&dataSize, &mime);
-
- if (data) {
- mFileMetaData->setData(kKeyAlbumArt, MetaData::TYPE_NONE, data, dataSize);
- mFileMetaData->setCString(kKeyAlbumArtMIME, mime.string());
- }
- }
-}
-
-sp<IMediaSource> MPEG4Extractor::getTrack(size_t index) {
- status_t err;
- if ((err = readMetaData()) != OK) {
- return NULL;
- }
-
- Track *track = mFirstTrack;
- while (index > 0) {
- if (track == NULL) {
- return NULL;
- }
-
- track = track->next;
- --index;
- }
-
- if (track == NULL) {
- return NULL;
- }
-
-
- Trex *trex = NULL;
- int32_t trackId;
- if (track->meta->findInt32(kKeyTrackID, &trackId)) {
- for (size_t i = 0; i < mTrex.size(); i++) {
- Trex *t = &mTrex.editItemAt(i);
- if (t->track_ID == (uint32_t) trackId) {
- trex = t;
- break;
- }
- }
- } else {
- ALOGE("b/21657957");
- return NULL;
- }
-
- ALOGV("getTrack called, pssh: %zu", mPssh.size());
-
- const char *mime;
- if (!track->meta->findCString(kKeyMIMEType, &mime)) {
- return NULL;
- }
-
- if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
- uint32_t type;
- const void *data;
- size_t size;
- if (!track->meta->findData(kKeyAVCC, &type, &data, &size)) {
- return NULL;
- }
-
- const uint8_t *ptr = (const uint8_t *)data;
-
- if (size < 7 || ptr[0] != 1) { // configurationVersion == 1
- return NULL;
- }
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
- uint32_t type;
- const void *data;
- size_t size;
- if (!track->meta->findData(kKeyHVCC, &type, &data, &size)) {
- return NULL;
- }
-
- const uint8_t *ptr = (const uint8_t *)data;
-
- if (size < 22 || ptr[0] != 1) { // configurationVersion == 1
- return NULL;
- }
- }
-
- sp<MPEG4Source> source = new MPEG4Source(this,
- track->meta, mDataSource, track->timescale, track->sampleTable,
- mSidxEntries, trex, mMoofOffset, mItemTable);
- if (source->init() != OK) {
- return NULL;
- }
- return source;
-}
-
-// static
-status_t MPEG4Extractor::verifyTrack(Track *track) {
- const char *mime;
- CHECK(track->meta->findCString(kKeyMIMEType, &mime));
-
- uint32_t type;
- const void *data;
- size_t size;
- if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
- if (!track->meta->findData(kKeyAVCC, &type, &data, &size)
- || type != kTypeAVCC) {
- return ERROR_MALFORMED;
- }
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
- if (!track->meta->findData(kKeyHVCC, &type, &data, &size)
- || type != kTypeHVCC) {
- return ERROR_MALFORMED;
- }
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)
- || !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG2)
- || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
- if (!track->meta->findData(kKeyESDS, &type, &data, &size)
- || type != kTypeESDS) {
- return ERROR_MALFORMED;
- }
- }
-
- if (track->sampleTable == NULL || !track->sampleTable->isValid()) {
- // Make sure we have all the metadata we need.
- ALOGE("stbl atom missing/invalid.");
- return ERROR_MALFORMED;
- }
-
- if (track->timescale == 0) {
- ALOGE("timescale invalid.");
- return ERROR_MALFORMED;
- }
-
- return OK;
-}
-
-typedef enum {
- //AOT_NONE = -1,
- //AOT_NULL_OBJECT = 0,
- //AOT_AAC_MAIN = 1, /**< Main profile */
- AOT_AAC_LC = 2, /**< Low Complexity object */
- //AOT_AAC_SSR = 3,
- //AOT_AAC_LTP = 4,
- AOT_SBR = 5,
- //AOT_AAC_SCAL = 6,
- //AOT_TWIN_VQ = 7,
- //AOT_CELP = 8,
- //AOT_HVXC = 9,
- //AOT_RSVD_10 = 10, /**< (reserved) */
- //AOT_RSVD_11 = 11, /**< (reserved) */
- //AOT_TTSI = 12, /**< TTSI Object */
- //AOT_MAIN_SYNTH = 13, /**< Main Synthetic object */
- //AOT_WAV_TAB_SYNTH = 14, /**< Wavetable Synthesis object */
- //AOT_GEN_MIDI = 15, /**< General MIDI object */
- //AOT_ALG_SYNTH_AUD_FX = 16, /**< Algorithmic Synthesis and Audio FX object */
- AOT_ER_AAC_LC = 17, /**< Error Resilient(ER) AAC Low Complexity */
- //AOT_RSVD_18 = 18, /**< (reserved) */
- //AOT_ER_AAC_LTP = 19, /**< Error Resilient(ER) AAC LTP object */
- AOT_ER_AAC_SCAL = 20, /**< Error Resilient(ER) AAC Scalable object */
- //AOT_ER_TWIN_VQ = 21, /**< Error Resilient(ER) TwinVQ object */
- AOT_ER_BSAC = 22, /**< Error Resilient(ER) BSAC object */
- AOT_ER_AAC_LD = 23, /**< Error Resilient(ER) AAC LowDelay object */
- //AOT_ER_CELP = 24, /**< Error Resilient(ER) CELP object */
- //AOT_ER_HVXC = 25, /**< Error Resilient(ER) HVXC object */
- //AOT_ER_HILN = 26, /**< Error Resilient(ER) HILN object */
- //AOT_ER_PARA = 27, /**< Error Resilient(ER) Parametric object */
- //AOT_RSVD_28 = 28, /**< might become SSC */
- AOT_PS = 29, /**< PS, Parametric Stereo (includes SBR) */
- //AOT_MPEGS = 30, /**< MPEG Surround */
-
- AOT_ESCAPE = 31, /**< Signal AOT uses more than 5 bits */
-
- //AOT_MP3ONMP4_L1 = 32, /**< MPEG-Layer1 in mp4 */
- //AOT_MP3ONMP4_L2 = 33, /**< MPEG-Layer2 in mp4 */
- //AOT_MP3ONMP4_L3 = 34, /**< MPEG-Layer3 in mp4 */
- //AOT_RSVD_35 = 35, /**< might become DST */
- //AOT_RSVD_36 = 36, /**< might become ALS */
- //AOT_AAC_SLS = 37, /**< AAC + SLS */
- //AOT_SLS = 38, /**< SLS */
- //AOT_ER_AAC_ELD = 39, /**< AAC Enhanced Low Delay */
-
- //AOT_USAC = 42, /**< USAC */
- //AOT_SAOC = 43, /**< SAOC */
- //AOT_LD_MPEGS = 44, /**< Low Delay MPEG Surround */
-
- //AOT_RSVD50 = 50, /**< Interim AOT for Rsvd50 */
-} AUDIO_OBJECT_TYPE;
-
-status_t MPEG4Extractor::updateAudioTrackInfoFromESDS_MPEG4Audio(
- const void *esds_data, size_t esds_size) {
- ESDS esds(esds_data, esds_size);
-
- uint8_t objectTypeIndication;
- if (esds.getObjectTypeIndication(&objectTypeIndication) != OK) {
- return ERROR_MALFORMED;
- }
-
- if (objectTypeIndication == 0xe1) {
- // This isn't MPEG4 audio at all, it's QCELP 14k...
- if (mLastTrack == NULL)
- return ERROR_MALFORMED;
-
- mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_QCELP);
- return OK;
- }
-
- if (objectTypeIndication == 0x6b) {
- // The media subtype is MP3 audio
- // Our software MP3 audio decoder may not be able to handle
- // packetized MP3 audio; for now, lets just return ERROR_UNSUPPORTED
- ALOGE("MP3 track in MP4/3GPP file is not supported");
- return ERROR_UNSUPPORTED;
- }
-
- if (mLastTrack != NULL) {
- uint32_t maxBitrate = 0;
- uint32_t avgBitrate = 0;
- esds.getBitRate(&maxBitrate, &avgBitrate);
- if (maxBitrate > 0 && maxBitrate < INT32_MAX) {
- mLastTrack->meta->setInt32(kKeyMaxBitRate, (int32_t)maxBitrate);
- }
- if (avgBitrate > 0 && avgBitrate < INT32_MAX) {
- mLastTrack->meta->setInt32(kKeyBitRate, (int32_t)avgBitrate);
- }
- }
-
- const uint8_t *csd;
- size_t csd_size;
- if (esds.getCodecSpecificInfo(
- (const void **)&csd, &csd_size) != OK) {
- return ERROR_MALFORMED;
- }
-
- if (kUseHexDump) {
- printf("ESD of size %zu\n", csd_size);
- hexdump(csd, csd_size);
- }
-
- if (csd_size == 0) {
- // There's no further information, i.e. no codec specific data
- // Let's assume that the information provided in the mpeg4 headers
- // is accurate and hope for the best.
-
- return OK;
- }
-
- if (csd_size < 2) {
- return ERROR_MALFORMED;
- }
-
- static uint32_t kSamplingRate[] = {
- 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050,
- 16000, 12000, 11025, 8000, 7350
- };
-
- ABitReader br(csd, csd_size);
- uint32_t objectType = br.getBits(5);
-
- if (objectType == 31) { // AAC-ELD => additional 6 bits
- objectType = 32 + br.getBits(6);
- }
-
- if (mLastTrack == NULL)
- return ERROR_MALFORMED;
-
- //keep AOT type
- mLastTrack->meta->setInt32(kKeyAACAOT, objectType);
-
- uint32_t freqIndex = br.getBits(4);
-
- int32_t sampleRate = 0;
- int32_t numChannels = 0;
- if (freqIndex == 15) {
- if (br.numBitsLeft() < 28) return ERROR_MALFORMED;
- sampleRate = br.getBits(24);
- numChannels = br.getBits(4);
- } else {
- if (br.numBitsLeft() < 4) return ERROR_MALFORMED;
- numChannels = br.getBits(4);
-
- if (freqIndex == 13 || freqIndex == 14) {
- return ERROR_MALFORMED;
- }
-
- sampleRate = kSamplingRate[freqIndex];
- }
-
- if (objectType == AOT_SBR || objectType == AOT_PS) {//SBR specific config per 14496-3 table 1.13
- if (br.numBitsLeft() < 4) return ERROR_MALFORMED;
- uint32_t extFreqIndex = br.getBits(4);
- int32_t extSampleRate __unused;
- if (extFreqIndex == 15) {
- if (csd_size < 8) {
- return ERROR_MALFORMED;
- }
- if (br.numBitsLeft() < 24) return ERROR_MALFORMED;
- extSampleRate = br.getBits(24);
- } else {
- if (extFreqIndex == 13 || extFreqIndex == 14) {
- return ERROR_MALFORMED;
- }
- extSampleRate = kSamplingRate[extFreqIndex];
- }
- //TODO: save the extension sampling rate value in meta data =>
- // mLastTrack->meta->setInt32(kKeyExtSampleRate, extSampleRate);
- }
-
- switch (numChannels) {
- // values defined in 14496-3_2009 amendment-4 Table 1.19 - Channel Configuration
- case 0:
- case 1:// FC
- case 2:// FL FR
- case 3:// FC, FL FR
- case 4:// FC, FL FR, RC
- case 5:// FC, FL FR, SL SR
- case 6:// FC, FL FR, SL SR, LFE
- //numChannels already contains the right value
- break;
- case 11:// FC, FL FR, SL SR, RC, LFE
- numChannels = 7;
- break;
- case 7: // FC, FCL FCR, FL FR, SL SR, LFE
- case 12:// FC, FL FR, SL SR, RL RR, LFE
- case 14:// FC, FL FR, SL SR, LFE, FHL FHR
- numChannels = 8;
- break;
- default:
- return ERROR_UNSUPPORTED;
- }
-
- {
- if (objectType == AOT_SBR || objectType == AOT_PS) {
- if (br.numBitsLeft() < 5) return ERROR_MALFORMED;
- objectType = br.getBits(5);
-
- if (objectType == AOT_ESCAPE) {
- if (br.numBitsLeft() < 6) return ERROR_MALFORMED;
- objectType = 32 + br.getBits(6);
- }
- }
- if (objectType == AOT_AAC_LC || objectType == AOT_ER_AAC_LC ||
- objectType == AOT_ER_AAC_LD || objectType == AOT_ER_AAC_SCAL ||
- objectType == AOT_ER_BSAC) {
- if (br.numBitsLeft() < 2) return ERROR_MALFORMED;
- const int32_t frameLengthFlag __unused = br.getBits(1);
-
- const int32_t dependsOnCoreCoder = br.getBits(1);
-
- if (dependsOnCoreCoder ) {
- if (br.numBitsLeft() < 14) return ERROR_MALFORMED;
- const int32_t coreCoderDelay __unused = br.getBits(14);
- }
-
- int32_t extensionFlag = -1;
- if (br.numBitsLeft() > 0) {
- extensionFlag = br.getBits(1);
- } else {
- switch (objectType) {
- // 14496-3 4.5.1.1 extensionFlag
- case AOT_AAC_LC:
- extensionFlag = 0;
- break;
- case AOT_ER_AAC_LC:
- case AOT_ER_AAC_SCAL:
- case AOT_ER_BSAC:
- case AOT_ER_AAC_LD:
- extensionFlag = 1;
- break;
- default:
- return ERROR_MALFORMED;
- break;
- }
- ALOGW("csd missing extension flag; assuming %d for object type %u.",
- extensionFlag, objectType);
- }
-
- if (numChannels == 0) {
- int32_t channelsEffectiveNum = 0;
- int32_t channelsNum = 0;
- if (br.numBitsLeft() < 32) {
- return ERROR_MALFORMED;
- }
- const int32_t ElementInstanceTag __unused = br.getBits(4);
- const int32_t Profile __unused = br.getBits(2);
- const int32_t SamplingFrequencyIndex __unused = br.getBits(4);
- const int32_t NumFrontChannelElements = br.getBits(4);
- const int32_t NumSideChannelElements = br.getBits(4);
- const int32_t NumBackChannelElements = br.getBits(4);
- const int32_t NumLfeChannelElements = br.getBits(2);
- const int32_t NumAssocDataElements __unused = br.getBits(3);
- const int32_t NumValidCcElements __unused = br.getBits(4);
-
- const int32_t MonoMixdownPresent = br.getBits(1);
-
- if (MonoMixdownPresent != 0) {
- if (br.numBitsLeft() < 4) return ERROR_MALFORMED;
- const int32_t MonoMixdownElementNumber __unused = br.getBits(4);
- }
-
- if (br.numBitsLeft() < 1) return ERROR_MALFORMED;
- const int32_t StereoMixdownPresent = br.getBits(1);
- if (StereoMixdownPresent != 0) {
- if (br.numBitsLeft() < 4) return ERROR_MALFORMED;
- const int32_t StereoMixdownElementNumber __unused = br.getBits(4);
- }
-
- if (br.numBitsLeft() < 1) return ERROR_MALFORMED;
- const int32_t MatrixMixdownIndexPresent = br.getBits(1);
- if (MatrixMixdownIndexPresent != 0) {
- if (br.numBitsLeft() < 3) return ERROR_MALFORMED;
- const int32_t MatrixMixdownIndex __unused = br.getBits(2);
- const int32_t PseudoSurroundEnable __unused = br.getBits(1);
- }
-
- int i;
- for (i=0; i < NumFrontChannelElements; i++) {
- if (br.numBitsLeft() < 5) return ERROR_MALFORMED;
- const int32_t FrontElementIsCpe = br.getBits(1);
- const int32_t FrontElementTagSelect __unused = br.getBits(4);
- channelsNum += FrontElementIsCpe ? 2 : 1;
- }
-
- for (i=0; i < NumSideChannelElements; i++) {
- if (br.numBitsLeft() < 5) return ERROR_MALFORMED;
- const int32_t SideElementIsCpe = br.getBits(1);
- const int32_t SideElementTagSelect __unused = br.getBits(4);
- channelsNum += SideElementIsCpe ? 2 : 1;
- }
-
- for (i=0; i < NumBackChannelElements; i++) {
- if (br.numBitsLeft() < 5) return ERROR_MALFORMED;
- const int32_t BackElementIsCpe = br.getBits(1);
- const int32_t BackElementTagSelect __unused = br.getBits(4);
- channelsNum += BackElementIsCpe ? 2 : 1;
- }
- channelsEffectiveNum = channelsNum;
-
- for (i=0; i < NumLfeChannelElements; i++) {
- if (br.numBitsLeft() < 4) return ERROR_MALFORMED;
- const int32_t LfeElementTagSelect __unused = br.getBits(4);
- channelsNum += 1;
- }
- ALOGV("mpeg4 audio channelsNum = %d", channelsNum);
- ALOGV("mpeg4 audio channelsEffectiveNum = %d", channelsEffectiveNum);
- numChannels = channelsNum;
- }
- }
- }
-
- if (numChannels == 0) {
- return ERROR_UNSUPPORTED;
- }
-
- if (mLastTrack == NULL)
- return ERROR_MALFORMED;
-
- int32_t prevSampleRate;
- CHECK(mLastTrack->meta->findInt32(kKeySampleRate, &prevSampleRate));
-
- if (prevSampleRate != sampleRate) {
- ALOGV("mpeg4 audio sample rate different from previous setting. "
- "was: %d, now: %d", prevSampleRate, sampleRate);
- }
-
- mLastTrack->meta->setInt32(kKeySampleRate, sampleRate);
-
- int32_t prevChannelCount;
- CHECK(mLastTrack->meta->findInt32(kKeyChannelCount, &prevChannelCount));
-
- if (prevChannelCount != numChannels) {
- ALOGV("mpeg4 audio channel count different from previous setting. "
- "was: %d, now: %d", prevChannelCount, numChannels);
- }
-
- mLastTrack->meta->setInt32(kKeyChannelCount, numChannels);
-
- return OK;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-MPEG4Source::MPEG4Source(
- const sp<MPEG4Extractor> &owner,
- const sp<MetaData> &format,
- const sp<DataSource> &dataSource,
- int32_t timeScale,
- const sp<SampleTable> &sampleTable,
- Vector<SidxEntry> &sidx,
- const Trex *trex,
- off64_t firstMoofOffset,
- const sp<ItemTable> &itemTable)
- : mOwner(owner),
- mFormat(format),
- mDataSource(dataSource),
- mTimescale(timeScale),
- mSampleTable(sampleTable),
- mCurrentSampleIndex(0),
- mCurrentFragmentIndex(0),
- mSegments(sidx),
- mTrex(trex),
- mFirstMoofOffset(firstMoofOffset),
- mCurrentMoofOffset(firstMoofOffset),
- mNextMoofOffset(-1),
- mCurrentTime(0),
- mCurrentSampleInfoAllocSize(0),
- mCurrentSampleInfoSizes(NULL),
- mCurrentSampleInfoOffsetsAllocSize(0),
- mCurrentSampleInfoOffsets(NULL),
- mIsAVC(false),
- mIsHEVC(false),
- mNALLengthSize(0),
- mStarted(false),
- mGroup(NULL),
- mBuffer(NULL),
- mWantsNALFragments(false),
- mSrcBuffer(NULL),
- mIsHEIF(itemTable != NULL),
- mItemTable(itemTable) {
-
- memset(&mTrackFragmentHeaderInfo, 0, sizeof(mTrackFragmentHeaderInfo));
-
- mFormat->findInt32(kKeyCryptoMode, &mCryptoMode);
- mDefaultIVSize = 0;
- mFormat->findInt32(kKeyCryptoDefaultIVSize, &mDefaultIVSize);
- uint32_t keytype;
- const void *key;
- size_t keysize;
- if (mFormat->findData(kKeyCryptoKey, &keytype, &key, &keysize)) {
- CHECK(keysize <= 16);
- memset(mCryptoKey, 0, 16);
- memcpy(mCryptoKey, key, keysize);
- }
-
- const char *mime;
- bool success = mFormat->findCString(kKeyMIMEType, &mime);
- CHECK(success);
-
- mIsAVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
- mIsHEVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
-
- if (mIsAVC) {
- uint32_t type;
- const void *data;
- size_t size;
- CHECK(format->findData(kKeyAVCC, &type, &data, &size));
-
- const uint8_t *ptr = (const uint8_t *)data;
-
- CHECK(size >= 7);
- CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1
-
- // The number of bytes used to encode the length of a NAL unit.
- mNALLengthSize = 1 + (ptr[4] & 3);
- } else if (mIsHEVC) {
- uint32_t type;
- const void *data;
- size_t size;
- CHECK(format->findData(kKeyHVCC, &type, &data, &size));
-
- const uint8_t *ptr = (const uint8_t *)data;
-
- CHECK(size >= 22);
- CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1
-
- mNALLengthSize = 1 + (ptr[14 + 7] & 3);
- }
-
- CHECK(format->findInt32(kKeyTrackID, &mTrackId));
-
-}
-
-status_t MPEG4Source::init() {
- if (mFirstMoofOffset != 0) {
- off64_t offset = mFirstMoofOffset;
- return parseChunk(&offset);
- }
- return OK;
-}
-
-MPEG4Source::~MPEG4Source() {
- if (mStarted) {
- stop();
- }
- free(mCurrentSampleInfoSizes);
- free(mCurrentSampleInfoOffsets);
-}
-
-status_t MPEG4Source::start(MetaData *params) {
- Mutex::Autolock autoLock(mLock);
-
- CHECK(!mStarted);
-
- int32_t val;
- if (params && params->findInt32(kKeyWantsNALFragments, &val)
- && val != 0) {
- mWantsNALFragments = true;
- } else {
- mWantsNALFragments = false;
- }
-
- int32_t tmp;
- CHECK(mFormat->findInt32(kKeyMaxInputSize, &tmp));
- size_t max_size = tmp;
-
- // A somewhat arbitrary limit that should be sufficient for 8k video frames
- // If you see the message below for a valid input stream: increase the limit
- const size_t kMaxBufferSize = 64 * 1024 * 1024;
- if (max_size > kMaxBufferSize) {
- ALOGE("bogus max input size: %zu > %zu", max_size, kMaxBufferSize);
- return ERROR_MALFORMED;
- }
- if (max_size == 0) {
- ALOGE("zero max input size");
- return ERROR_MALFORMED;
- }
-
- // Allow up to kMaxBuffers, but not if the total exceeds kMaxBufferSize.
- const size_t kMaxBuffers = 8;
- const size_t buffers = min(kMaxBufferSize / max_size, kMaxBuffers);
- mGroup = new MediaBufferGroup(buffers, max_size);
- mSrcBuffer = new (std::nothrow) uint8_t[max_size];
- if (mSrcBuffer == NULL) {
- // file probably specified a bad max size
- delete mGroup;
- mGroup = NULL;
- return ERROR_MALFORMED;
- }
-
- mStarted = true;
-
- return OK;
-}
-
-status_t MPEG4Source::stop() {
- Mutex::Autolock autoLock(mLock);
-
- CHECK(mStarted);
-
- if (mBuffer != NULL) {
- mBuffer->release();
- mBuffer = NULL;
- }
-
- delete[] mSrcBuffer;
- mSrcBuffer = NULL;
-
- delete mGroup;
- mGroup = NULL;
-
- mStarted = false;
- mCurrentSampleIndex = 0;
-
- return OK;
-}
-
-status_t MPEG4Source::parseChunk(off64_t *offset) {
- uint32_t hdr[2];
- if (mDataSource->readAt(*offset, hdr, 8) < 8) {
- return ERROR_IO;
- }
- uint64_t chunk_size = ntohl(hdr[0]);
- uint32_t chunk_type = ntohl(hdr[1]);
- off64_t data_offset = *offset + 8;
-
- if (chunk_size == 1) {
- if (mDataSource->readAt(*offset + 8, &chunk_size, 8) < 8) {
- return ERROR_IO;
- }
- chunk_size = ntoh64(chunk_size);
- data_offset += 8;
-
- if (chunk_size < 16) {
- // The smallest valid chunk is 16 bytes long in this case.
- return ERROR_MALFORMED;
- }
- } else if (chunk_size < 8) {
- // The smallest valid chunk is 8 bytes long.
- return ERROR_MALFORMED;
- }
-
- char chunk[5];
- MakeFourCCString(chunk_type, chunk);
- ALOGV("MPEG4Source chunk %s @ %#llx", chunk, (long long)*offset);
-
- off64_t chunk_data_size = *offset + chunk_size - data_offset;
-
- switch(chunk_type) {
-
- case FOURCC('t', 'r', 'a', 'f'):
- case FOURCC('m', 'o', 'o', 'f'): {
- off64_t stop_offset = *offset + chunk_size;
- *offset = data_offset;
- while (*offset < stop_offset) {
- status_t err = parseChunk(offset);
- if (err != OK) {
- return err;
- }
- }
- if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
- // *offset points to the box following this moof. Find the next moof from there.
-
- while (true) {
- if (mDataSource->readAt(*offset, hdr, 8) < 8) {
- // no more box to the end of file.
- break;
- }
- chunk_size = ntohl(hdr[0]);
- chunk_type = ntohl(hdr[1]);
- if (chunk_size == 1) {
- // ISO/IEC 14496-12:2012, 8.8.4 Movie Fragment Box, moof is a Box
- // which is defined in 4.2 Object Structure.
- // When chunk_size==1, 8 bytes follows as "largesize".
- if (mDataSource->readAt(*offset + 8, &chunk_size, 8) < 8) {
- return ERROR_IO;
- }
- chunk_size = ntoh64(chunk_size);
- if (chunk_size < 16) {
- // The smallest valid chunk is 16 bytes long in this case.
- return ERROR_MALFORMED;
- }
- } else if (chunk_size == 0) {
- // next box extends to end of file.
- } else if (chunk_size < 8) {
- // The smallest valid chunk is 8 bytes long in this case.
- return ERROR_MALFORMED;
- }
-
- if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
- mNextMoofOffset = *offset;
- break;
- } else if (chunk_size == 0) {
- break;
- }
- *offset += chunk_size;
- }
- }
- break;
- }
-
- case FOURCC('t', 'f', 'h', 'd'): {
- status_t err;
- if ((err = parseTrackFragmentHeader(data_offset, chunk_data_size)) != OK) {
- return err;
- }
- *offset += chunk_size;
- break;
- }
-
- case FOURCC('t', 'r', 'u', 'n'): {
- status_t err;
- if (mLastParsedTrackId == mTrackId) {
- if ((err = parseTrackFragmentRun(data_offset, chunk_data_size)) != OK) {
- return err;
- }
- }
-
- *offset += chunk_size;
- break;
- }
-
- case FOURCC('s', 'a', 'i', 'z'): {
- status_t err;
- if ((err = parseSampleAuxiliaryInformationSizes(data_offset, chunk_data_size)) != OK) {
- return err;
- }
- *offset += chunk_size;
- break;
- }
- case FOURCC('s', 'a', 'i', 'o'): {
- status_t err;
- if ((err = parseSampleAuxiliaryInformationOffsets(data_offset, chunk_data_size)) != OK) {
- return err;
- }
- *offset += chunk_size;
- break;
- }
-
- case FOURCC('m', 'd', 'a', 't'): {
- // parse DRM info if present
- ALOGV("MPEG4Source::parseChunk mdat");
- // if saiz/saoi was previously observed, do something with the sampleinfos
- *offset += chunk_size;
- break;
- }
-
- default: {
- *offset += chunk_size;
- break;
- }
- }
- return OK;
-}
-
-status_t MPEG4Source::parseSampleAuxiliaryInformationSizes(
- off64_t offset, off64_t /* size */) {
- ALOGV("parseSampleAuxiliaryInformationSizes");
- // 14496-12 8.7.12
- uint8_t version;
- if (mDataSource->readAt(
- offset, &version, sizeof(version))
- < (ssize_t)sizeof(version)) {
- return ERROR_IO;
- }
-
- if (version != 0) {
- return ERROR_UNSUPPORTED;
- }
- offset++;
-
- uint32_t flags;
- if (!mDataSource->getUInt24(offset, &flags)) {
- return ERROR_IO;
- }
- offset += 3;
-
- if (flags & 1) {
- uint32_t tmp;
- if (!mDataSource->getUInt32(offset, &tmp)) {
- return ERROR_MALFORMED;
- }
- mCurrentAuxInfoType = tmp;
- offset += 4;
- if (!mDataSource->getUInt32(offset, &tmp)) {
- return ERROR_MALFORMED;
- }
- mCurrentAuxInfoTypeParameter = tmp;
- offset += 4;
- }
-
- uint8_t defsize;
- if (mDataSource->readAt(offset, &defsize, 1) != 1) {
- return ERROR_MALFORMED;
- }
- mCurrentDefaultSampleInfoSize = defsize;
- offset++;
-
- uint32_t smplcnt;
- if (!mDataSource->getUInt32(offset, &smplcnt)) {
- return ERROR_MALFORMED;
- }
- mCurrentSampleInfoCount = smplcnt;
- offset += 4;
-
- if (mCurrentDefaultSampleInfoSize != 0) {
- ALOGV("@@@@ using default sample info size of %d", mCurrentDefaultSampleInfoSize);
- return OK;
- }
- if (smplcnt > mCurrentSampleInfoAllocSize) {
- uint8_t * newPtr = (uint8_t*) realloc(mCurrentSampleInfoSizes, smplcnt);
- if (newPtr == NULL) {
- ALOGE("failed to realloc %u -> %u", mCurrentSampleInfoAllocSize, smplcnt);
- return NO_MEMORY;
- }
- mCurrentSampleInfoSizes = newPtr;
- mCurrentSampleInfoAllocSize = smplcnt;
- }
-
- mDataSource->readAt(offset, mCurrentSampleInfoSizes, smplcnt);
- return OK;
-}
-
-status_t MPEG4Source::parseSampleAuxiliaryInformationOffsets(
- off64_t offset, off64_t /* size */) {
- ALOGV("parseSampleAuxiliaryInformationOffsets");
- // 14496-12 8.7.13
- uint8_t version;
- if (mDataSource->readAt(offset, &version, sizeof(version)) != 1) {
- return ERROR_IO;
- }
- offset++;
-
- uint32_t flags;
- if (!mDataSource->getUInt24(offset, &flags)) {
- return ERROR_IO;
- }
- offset += 3;
-
- uint32_t entrycount;
- if (!mDataSource->getUInt32(offset, &entrycount)) {
- return ERROR_IO;
- }
- offset += 4;
- if (entrycount == 0) {
- return OK;
- }
- if (entrycount > UINT32_MAX / 8) {
- return ERROR_MALFORMED;
- }
-
- if (entrycount > mCurrentSampleInfoOffsetsAllocSize) {
- uint64_t *newPtr = (uint64_t *)realloc(mCurrentSampleInfoOffsets, entrycount * 8);
- if (newPtr == NULL) {
- ALOGE("failed to realloc %u -> %u", mCurrentSampleInfoOffsetsAllocSize, entrycount * 8);
- return NO_MEMORY;
- }
- mCurrentSampleInfoOffsets = newPtr;
- mCurrentSampleInfoOffsetsAllocSize = entrycount;
- }
- mCurrentSampleInfoOffsetCount = entrycount;
-
- if (mCurrentSampleInfoOffsets == NULL) {
- return OK;
- }
-
- for (size_t i = 0; i < entrycount; i++) {
- if (version == 0) {
- uint32_t tmp;
- if (!mDataSource->getUInt32(offset, &tmp)) {
- return ERROR_IO;
- }
- mCurrentSampleInfoOffsets[i] = tmp;
- offset += 4;
- } else {
- uint64_t tmp;
- if (!mDataSource->getUInt64(offset, &tmp)) {
- return ERROR_IO;
- }
- mCurrentSampleInfoOffsets[i] = tmp;
- offset += 8;
- }
- }
-
- // parse clear/encrypted data
-
- off64_t drmoffset = mCurrentSampleInfoOffsets[0]; // from moof
-
- drmoffset += mCurrentMoofOffset;
- int ivlength;
- CHECK(mFormat->findInt32(kKeyCryptoDefaultIVSize, &ivlength));
-
- // only 0, 8 and 16 byte initialization vectors are supported
- if (ivlength != 0 && ivlength != 8 && ivlength != 16) {
- ALOGW("unsupported IV length: %d", ivlength);
- return ERROR_MALFORMED;
- }
- // read CencSampleAuxiliaryDataFormats
- for (size_t i = 0; i < mCurrentSampleInfoCount; i++) {
- if (i >= mCurrentSamples.size()) {
- ALOGW("too few samples");
- break;
- }
- Sample *smpl = &mCurrentSamples.editItemAt(i);
-
- memset(smpl->iv, 0, 16);
- if (mDataSource->readAt(drmoffset, smpl->iv, ivlength) != ivlength) {
- return ERROR_IO;
- }
-
- drmoffset += ivlength;
-
- int32_t smplinfosize = mCurrentDefaultSampleInfoSize;
- if (smplinfosize == 0) {
- smplinfosize = mCurrentSampleInfoSizes[i];
- }
- if (smplinfosize > ivlength) {
- uint16_t numsubsamples;
- if (!mDataSource->getUInt16(drmoffset, &numsubsamples)) {
- return ERROR_IO;
- }
- drmoffset += 2;
- for (size_t j = 0; j < numsubsamples; j++) {
- uint16_t numclear;
- uint32_t numencrypted;
- if (!mDataSource->getUInt16(drmoffset, &numclear)) {
- return ERROR_IO;
- }
- drmoffset += 2;
- if (!mDataSource->getUInt32(drmoffset, &numencrypted)) {
- return ERROR_IO;
- }
- drmoffset += 4;
- smpl->clearsizes.add(numclear);
- smpl->encryptedsizes.add(numencrypted);
- }
- } else {
- smpl->clearsizes.add(0);
- smpl->encryptedsizes.add(smpl->size);
- }
- }
-
-
- return OK;
-}
-
-status_t MPEG4Source::parseTrackFragmentHeader(off64_t offset, off64_t size) {
-
- if (size < 8) {
- return -EINVAL;
- }
-
- uint32_t flags;
- if (!mDataSource->getUInt32(offset, &flags)) { // actually version + flags
- return ERROR_MALFORMED;
- }
-
- if (flags & 0xff000000) {
- return -EINVAL;
- }
-
- if (!mDataSource->getUInt32(offset + 4, (uint32_t*)&mLastParsedTrackId)) {
- return ERROR_MALFORMED;
- }
-
- if (mLastParsedTrackId != mTrackId) {
- // this is not the right track, skip it
- return OK;
- }
-
- mTrackFragmentHeaderInfo.mFlags = flags;
- mTrackFragmentHeaderInfo.mTrackID = mLastParsedTrackId;
- offset += 8;
- size -= 8;
-
- ALOGV("fragment header: %08x %08x", flags, mTrackFragmentHeaderInfo.mTrackID);
-
- if (flags & TrackFragmentHeaderInfo::kBaseDataOffsetPresent) {
- if (size < 8) {
- return -EINVAL;
- }
-
- if (!mDataSource->getUInt64(offset, &mTrackFragmentHeaderInfo.mBaseDataOffset)) {
- return ERROR_MALFORMED;
- }
- offset += 8;
- size -= 8;
- }
-
- if (flags & TrackFragmentHeaderInfo::kSampleDescriptionIndexPresent) {
- if (size < 4) {
- return -EINVAL;
- }
-
- if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mSampleDescriptionIndex)) {
- return ERROR_MALFORMED;
- }
- offset += 4;
- size -= 4;
- }
-
- if (flags & TrackFragmentHeaderInfo::kDefaultSampleDurationPresent) {
- if (size < 4) {
- return -EINVAL;
- }
-
- if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mDefaultSampleDuration)) {
- return ERROR_MALFORMED;
- }
- offset += 4;
- size -= 4;
- }
-
- if (flags & TrackFragmentHeaderInfo::kDefaultSampleSizePresent) {
- if (size < 4) {
- return -EINVAL;
- }
-
- if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mDefaultSampleSize)) {
- return ERROR_MALFORMED;
- }
- offset += 4;
- size -= 4;
- }
-
- if (flags & TrackFragmentHeaderInfo::kDefaultSampleFlagsPresent) {
- if (size < 4) {
- return -EINVAL;
- }
-
- if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mDefaultSampleFlags)) {
- return ERROR_MALFORMED;
- }
- offset += 4;
- size -= 4;
- }
-
- if (!(flags & TrackFragmentHeaderInfo::kBaseDataOffsetPresent)) {
- mTrackFragmentHeaderInfo.mBaseDataOffset = mCurrentMoofOffset;
- }
-
- mTrackFragmentHeaderInfo.mDataOffset = 0;
- return OK;
-}
-
-status_t MPEG4Source::parseTrackFragmentRun(off64_t offset, off64_t size) {
-
- ALOGV("MPEG4Extractor::parseTrackFragmentRun");
- if (size < 8) {
- return -EINVAL;
- }
-
- enum {
- kDataOffsetPresent = 0x01,
- kFirstSampleFlagsPresent = 0x04,
- kSampleDurationPresent = 0x100,
- kSampleSizePresent = 0x200,
- kSampleFlagsPresent = 0x400,
- kSampleCompositionTimeOffsetPresent = 0x800,
- };
-
- uint32_t flags;
- if (!mDataSource->getUInt32(offset, &flags)) {
- return ERROR_MALFORMED;
- }
- // |version| only affects SampleCompositionTimeOffset field.
- // If version == 0, SampleCompositionTimeOffset is uint32_t;
- // Otherwise, SampleCompositionTimeOffset is int32_t.
- // Sample.compositionOffset is defined as int32_t.
- uint8_t version = flags >> 24;
- flags &= 0xffffff;
- ALOGV("fragment run version: 0x%02x, flags: 0x%06x", version, flags);
-
- if ((flags & kFirstSampleFlagsPresent) && (flags & kSampleFlagsPresent)) {
- // These two shall not be used together.
- return -EINVAL;
- }
-
- uint32_t sampleCount;
- if (!mDataSource->getUInt32(offset + 4, &sampleCount)) {
- return ERROR_MALFORMED;
- }
- offset += 8;
- size -= 8;
-
- uint64_t dataOffset = mTrackFragmentHeaderInfo.mDataOffset;
-
- uint32_t firstSampleFlags = 0;
-
- if (flags & kDataOffsetPresent) {
- if (size < 4) {
- return -EINVAL;
- }
-
- int32_t dataOffsetDelta;
- if (!mDataSource->getUInt32(offset, (uint32_t*)&dataOffsetDelta)) {
- return ERROR_MALFORMED;
- }
-
- dataOffset = mTrackFragmentHeaderInfo.mBaseDataOffset + dataOffsetDelta;
-
- offset += 4;
- size -= 4;
- }
-
- if (flags & kFirstSampleFlagsPresent) {
- if (size < 4) {
- return -EINVAL;
- }
-
- if (!mDataSource->getUInt32(offset, &firstSampleFlags)) {
- return ERROR_MALFORMED;
- }
- offset += 4;
- size -= 4;
- }
-
- uint32_t sampleDuration = 0, sampleSize = 0, sampleFlags = 0,
- sampleCtsOffset = 0;
-
- size_t bytesPerSample = 0;
- if (flags & kSampleDurationPresent) {
- bytesPerSample += 4;
- } else if (mTrackFragmentHeaderInfo.mFlags
- & TrackFragmentHeaderInfo::kDefaultSampleDurationPresent) {
- sampleDuration = mTrackFragmentHeaderInfo.mDefaultSampleDuration;
- } else if (mTrex) {
- sampleDuration = mTrex->default_sample_duration;
- }
-
- if (flags & kSampleSizePresent) {
- bytesPerSample += 4;
- } else if (mTrackFragmentHeaderInfo.mFlags
- & TrackFragmentHeaderInfo::kDefaultSampleSizePresent) {
- sampleSize = mTrackFragmentHeaderInfo.mDefaultSampleSize;
- } else {
- sampleSize = mTrackFragmentHeaderInfo.mDefaultSampleSize;
- }
-
- if (flags & kSampleFlagsPresent) {
- bytesPerSample += 4;
- } else if (mTrackFragmentHeaderInfo.mFlags
- & TrackFragmentHeaderInfo::kDefaultSampleFlagsPresent) {
- sampleFlags = mTrackFragmentHeaderInfo.mDefaultSampleFlags;
- } else {
- sampleFlags = mTrackFragmentHeaderInfo.mDefaultSampleFlags;
- }
-
- if (flags & kSampleCompositionTimeOffsetPresent) {
- bytesPerSample += 4;
- } else {
- sampleCtsOffset = 0;
- }
-
- if (size < (off64_t)(sampleCount * bytesPerSample)) {
- return -EINVAL;
- }
-
- Sample tmp;
- for (uint32_t i = 0; i < sampleCount; ++i) {
- if (flags & kSampleDurationPresent) {
- if (!mDataSource->getUInt32(offset, &sampleDuration)) {
- return ERROR_MALFORMED;
- }
- offset += 4;
- }
-
- if (flags & kSampleSizePresent) {
- if (!mDataSource->getUInt32(offset, &sampleSize)) {
- return ERROR_MALFORMED;
- }
- offset += 4;
- }
-
- if (flags & kSampleFlagsPresent) {
- if (!mDataSource->getUInt32(offset, &sampleFlags)) {
- return ERROR_MALFORMED;
- }
- offset += 4;
- }
-
- if (flags & kSampleCompositionTimeOffsetPresent) {
- if (!mDataSource->getUInt32(offset, &sampleCtsOffset)) {
- return ERROR_MALFORMED;
- }
- offset += 4;
- }
-
- ALOGV("adding sample %d at offset 0x%08" PRIx64 ", size %u, duration %u, "
- " flags 0x%08x", i + 1,
- dataOffset, sampleSize, sampleDuration,
- (flags & kFirstSampleFlagsPresent) && i == 0
- ? firstSampleFlags : sampleFlags);
- tmp.offset = dataOffset;
- tmp.size = sampleSize;
- tmp.duration = sampleDuration;
- tmp.compositionOffset = sampleCtsOffset;
- mCurrentSamples.add(tmp);
-
- dataOffset += sampleSize;
- }
-
- mTrackFragmentHeaderInfo.mDataOffset = dataOffset;
-
- return OK;
-}
-
-sp<MetaData> MPEG4Source::getFormat() {
- Mutex::Autolock autoLock(mLock);
-
- return mFormat;
-}
-
-size_t MPEG4Source::parseNALSize(const uint8_t *data) const {
- switch (mNALLengthSize) {
- case 1:
- return *data;
- case 2:
- return U16_AT(data);
- case 3:
- return ((size_t)data[0] << 16) | U16_AT(&data[1]);
- case 4:
- return U32_AT(data);
- }
-
- // This cannot happen, mNALLengthSize springs to life by adding 1 to
- // a 2-bit integer.
- CHECK(!"Should not be here.");
-
- return 0;
-}
-
-status_t MPEG4Source::read(
- MediaBuffer **out, const ReadOptions *options) {
- Mutex::Autolock autoLock(mLock);
-
- CHECK(mStarted);
-
- if (options != nullptr && options->getNonBlocking() && !mGroup->has_buffers()) {
- *out = nullptr;
- return WOULD_BLOCK;
- }
-
- if (mFirstMoofOffset > 0) {
- return fragmentedRead(out, options);
- }
-
- *out = NULL;
-
- int64_t targetSampleTimeUs = -1;
-
- int64_t seekTimeUs;
- ReadOptions::SeekMode mode;
- if (options && options->getSeekTo(&seekTimeUs, &mode)) {
- if (mIsHEIF) {
- CHECK(mSampleTable == NULL);
- CHECK(mItemTable != NULL);
-
- status_t err;
- if (seekTimeUs >= 0) {
- err = mItemTable->findPrimaryImage(&mCurrentSampleIndex);
- } else {
- err = mItemTable->findThumbnail(&mCurrentSampleIndex);
- }
- if (err != OK) {
- return err;
- }
- } else {
- uint32_t findFlags = 0;
- switch (mode) {
- case ReadOptions::SEEK_PREVIOUS_SYNC:
- findFlags = SampleTable::kFlagBefore;
- break;
- case ReadOptions::SEEK_NEXT_SYNC:
- findFlags = SampleTable::kFlagAfter;
- break;
- case ReadOptions::SEEK_CLOSEST_SYNC:
- case ReadOptions::SEEK_CLOSEST:
- findFlags = SampleTable::kFlagClosest;
- break;
- default:
- CHECK(!"Should not be here.");
- break;
- }
-
- uint32_t sampleIndex;
- status_t err = mSampleTable->findSampleAtTime(
- seekTimeUs, 1000000, mTimescale,
- &sampleIndex, findFlags);
-
- if (mode == ReadOptions::SEEK_CLOSEST) {
- // We found the closest sample already, now we want the sync
- // sample preceding it (or the sample itself of course), even
- // if the subsequent sync sample is closer.
- findFlags = SampleTable::kFlagBefore;
- }
-
- uint32_t syncSampleIndex;
- if (err == OK) {
- err = mSampleTable->findSyncSampleNear(
- sampleIndex, &syncSampleIndex, findFlags);
- }
-
- uint32_t sampleTime;
- if (err == OK) {
- err = mSampleTable->getMetaDataForSample(
- sampleIndex, NULL, NULL, &sampleTime);
- }
-
- if (err != OK) {
- if (err == ERROR_OUT_OF_RANGE) {
- // An attempt to seek past the end of the stream would
- // normally cause this ERROR_OUT_OF_RANGE error. Propagating
- // this all the way to the MediaPlayer would cause abnormal
- // termination. Legacy behaviour appears to be to behave as if
- // we had seeked to the end of stream, ending normally.
- err = ERROR_END_OF_STREAM;
- }
- ALOGV("end of stream");
- return err;
- }
-
- if (mode == ReadOptions::SEEK_CLOSEST) {
- targetSampleTimeUs = (sampleTime * 1000000ll) / mTimescale;
- }
-
-#if 0
- uint32_t syncSampleTime;
- CHECK_EQ(OK, mSampleTable->getMetaDataForSample(
- syncSampleIndex, NULL, NULL, &syncSampleTime));
-
- ALOGI("seek to time %lld us => sample at time %lld us, "
- "sync sample at time %lld us",
- seekTimeUs,
- sampleTime * 1000000ll / mTimescale,
- syncSampleTime * 1000000ll / mTimescale);
-#endif
-
- mCurrentSampleIndex = syncSampleIndex;
- }
-
- if (mBuffer != NULL) {
- mBuffer->release();
- mBuffer = NULL;
- }
-
- // fall through
- }
-
- off64_t offset = 0;
- size_t size = 0;
- uint32_t cts, stts;
- bool isSyncSample;
- bool newBuffer = false;
- if (mBuffer == NULL) {
- newBuffer = true;
-
- status_t err;
- if (!mIsHEIF) {
- err = mSampleTable->getMetaDataForSample(
- mCurrentSampleIndex, &offset, &size, &cts, &isSyncSample, &stts);
- } else {
- err = mItemTable->getImageOffsetAndSize(
- options && options->getSeekTo(&seekTimeUs, &mode) ?
- &mCurrentSampleIndex : NULL, &offset, &size);
-
- cts = stts = 0;
- isSyncSample = 0;
- ALOGV("image offset %lld, size %zu", (long long)offset, size);
- }
-
- if (err != OK) {
- return err;
- }
-
- err = mGroup->acquire_buffer(&mBuffer);
-
- if (err != OK) {
- CHECK(mBuffer == NULL);
- return err;
- }
- if (size > mBuffer->size()) {
- ALOGE("buffer too small: %zu > %zu", size, mBuffer->size());
- mBuffer->release();
- mBuffer = NULL;
- return ERROR_BUFFER_TOO_SMALL;
- }
- }
-
- if ((!mIsAVC && !mIsHEVC) || mWantsNALFragments) {
- if (newBuffer) {
- ssize_t num_bytes_read =
- mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);
-
- if (num_bytes_read < (ssize_t)size) {
- mBuffer->release();
- mBuffer = NULL;
-
- return ERROR_IO;
- }
-
- CHECK(mBuffer != NULL);
- mBuffer->set_range(0, size);
- mBuffer->meta_data()->clear();
- mBuffer->meta_data()->setInt64(
- kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
- mBuffer->meta_data()->setInt64(
- kKeyDuration, ((int64_t)stts * 1000000) / mTimescale);
-
- if (targetSampleTimeUs >= 0) {
- mBuffer->meta_data()->setInt64(
- kKeyTargetTime, targetSampleTimeUs);
- }
-
- if (isSyncSample) {
- mBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
- }
-
- ++mCurrentSampleIndex;
- }
-
- if (!mIsAVC && !mIsHEVC) {
- *out = mBuffer;
- mBuffer = NULL;
-
- return OK;
- }
-
- // Each NAL unit is split up into its constituent fragments and
- // each one of them returned in its own buffer.
-
- CHECK(mBuffer->range_length() >= mNALLengthSize);
-
- const uint8_t *src =
- (const uint8_t *)mBuffer->data() + mBuffer->range_offset();
-
- size_t nal_size = parseNALSize(src);
- if (mNALLengthSize > SIZE_MAX - nal_size) {
- ALOGE("b/24441553, b/24445122");
- }
- if (mBuffer->range_length() - mNALLengthSize < nal_size) {
- ALOGE("incomplete NAL unit.");
-
- mBuffer->release();
- mBuffer = NULL;
-
- return ERROR_MALFORMED;
- }
-
- MediaBuffer *clone = mBuffer->clone();
- CHECK(clone != NULL);
- clone->set_range(mBuffer->range_offset() + mNALLengthSize, nal_size);
-
- CHECK(mBuffer != NULL);
- mBuffer->set_range(
- mBuffer->range_offset() + mNALLengthSize + nal_size,
- mBuffer->range_length() - mNALLengthSize - nal_size);
-
- if (mBuffer->range_length() == 0) {
- mBuffer->release();
- mBuffer = NULL;
- }
-
- *out = clone;
-
- return OK;
- } else {
- // Whole NAL units are returned but each fragment is prefixed by
- // the start code (0x00 00 00 01).
- ssize_t num_bytes_read = 0;
- int32_t drm = 0;
- bool usesDRM = (mFormat->findInt32(kKeyIsDRM, &drm) && drm != 0);
- if (usesDRM) {
- num_bytes_read =
- mDataSource->readAt(offset, (uint8_t*)mBuffer->data(), size);
- } else {
- num_bytes_read = mDataSource->readAt(offset, mSrcBuffer, size);
- }
-
- if (num_bytes_read < (ssize_t)size) {
- mBuffer->release();
- mBuffer = NULL;
-
- return ERROR_IO;
- }
-
- if (usesDRM) {
- CHECK(mBuffer != NULL);
- mBuffer->set_range(0, size);
-
- } else {
- uint8_t *dstData = (uint8_t *)mBuffer->data();
- size_t srcOffset = 0;
- size_t dstOffset = 0;
-
- while (srcOffset < size) {
- bool isMalFormed = !isInRange((size_t)0u, size, srcOffset, mNALLengthSize);
- size_t nalLength = 0;
- if (!isMalFormed) {
- nalLength = parseNALSize(&mSrcBuffer[srcOffset]);
- srcOffset += mNALLengthSize;
- isMalFormed = !isInRange((size_t)0u, size, srcOffset, nalLength);
- }
-
- if (isMalFormed) {
- ALOGE("Video is malformed");
- mBuffer->release();
- mBuffer = NULL;
- return ERROR_MALFORMED;
- }
-
- if (nalLength == 0) {
- continue;
- }
-
- if (dstOffset > SIZE_MAX - 4 ||
- dstOffset + 4 > SIZE_MAX - nalLength ||
- dstOffset + 4 + nalLength > mBuffer->size()) {
- ALOGE("b/27208621 : %zu %zu", dstOffset, mBuffer->size());
- android_errorWriteLog(0x534e4554, "27208621");
- mBuffer->release();
- mBuffer = NULL;
- return ERROR_MALFORMED;
- }
-
- dstData[dstOffset++] = 0;
- dstData[dstOffset++] = 0;
- dstData[dstOffset++] = 0;
- dstData[dstOffset++] = 1;
- memcpy(&dstData[dstOffset], &mSrcBuffer[srcOffset], nalLength);
- srcOffset += nalLength;
- dstOffset += nalLength;
- }
- CHECK_EQ(srcOffset, size);
- CHECK(mBuffer != NULL);
- mBuffer->set_range(0, dstOffset);
- }
-
- mBuffer->meta_data()->clear();
- mBuffer->meta_data()->setInt64(
- kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
- mBuffer->meta_data()->setInt64(
- kKeyDuration, ((int64_t)stts * 1000000) / mTimescale);
-
- if (targetSampleTimeUs >= 0) {
- mBuffer->meta_data()->setInt64(
- kKeyTargetTime, targetSampleTimeUs);
- }
-
- if (mIsAVC) {
- uint32_t layerId = FindAVCLayerId(
- (const uint8_t *)mBuffer->data(), mBuffer->range_length());
- mBuffer->meta_data()->setInt32(kKeyTemporalLayerId, layerId);
- }
-
- if (isSyncSample) {
- mBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
- }
-
- ++mCurrentSampleIndex;
-
- *out = mBuffer;
- mBuffer = NULL;
-
- return OK;
- }
-}
-
-status_t MPEG4Source::fragmentedRead(
- MediaBuffer **out, const ReadOptions *options) {
-
- ALOGV("MPEG4Source::fragmentedRead");
-
- CHECK(mStarted);
-
- *out = NULL;
-
- int64_t targetSampleTimeUs = -1;
-
- int64_t seekTimeUs;
- ReadOptions::SeekMode mode;
- if (options && options->getSeekTo(&seekTimeUs, &mode)) {
-
- int numSidxEntries = mSegments.size();
- if (numSidxEntries != 0) {
- int64_t totalTime = 0;
- off64_t totalOffset = mFirstMoofOffset;
- for (int i = 0; i < numSidxEntries; i++) {
- const SidxEntry *se = &mSegments[i];
- if (totalTime + se->mDurationUs > seekTimeUs) {
- // The requested time is somewhere in this segment
- if ((mode == ReadOptions::SEEK_NEXT_SYNC && seekTimeUs > totalTime) ||
- (mode == ReadOptions::SEEK_CLOSEST_SYNC &&
- (seekTimeUs - totalTime) > (totalTime + se->mDurationUs - seekTimeUs))) {
- // requested next sync, or closest sync and it was closer to the end of
- // this segment
- totalTime += se->mDurationUs;
- totalOffset += se->mSize;
- }
- break;
- }
- totalTime += se->mDurationUs;
- totalOffset += se->mSize;
- }
- mCurrentMoofOffset = totalOffset;
- mNextMoofOffset = -1;
- mCurrentSamples.clear();
- mCurrentSampleIndex = 0;
- status_t err = parseChunk(&totalOffset);
- if (err != OK) {
- return err;
- }
- mCurrentTime = totalTime * mTimescale / 1000000ll;
- } else {
- // without sidx boxes, we can only seek to 0
- mCurrentMoofOffset = mFirstMoofOffset;
- mNextMoofOffset = -1;
- mCurrentSamples.clear();
- mCurrentSampleIndex = 0;
- off64_t tmp = mCurrentMoofOffset;
- status_t err = parseChunk(&tmp);
- if (err != OK) {
- return err;
- }
- mCurrentTime = 0;
- }
-
- if (mBuffer != NULL) {
- mBuffer->release();
- mBuffer = NULL;
- }
-
- // fall through
- }
-
- off64_t offset = 0;
- size_t size = 0;
- uint32_t cts = 0;
- bool isSyncSample = false;
- bool newBuffer = false;
- if (mBuffer == NULL) {
- newBuffer = true;
-
- if (mCurrentSampleIndex >= mCurrentSamples.size()) {
- // move to next fragment if there is one
- if (mNextMoofOffset <= mCurrentMoofOffset) {
- return ERROR_END_OF_STREAM;
- }
- off64_t nextMoof = mNextMoofOffset;
- mCurrentMoofOffset = nextMoof;
- mCurrentSamples.clear();
- mCurrentSampleIndex = 0;
- status_t err = parseChunk(&nextMoof);
- if (err != OK) {
- return err;
- }
- if (mCurrentSampleIndex >= mCurrentSamples.size()) {
- return ERROR_END_OF_STREAM;
- }
- }
-
- const Sample *smpl = &mCurrentSamples[mCurrentSampleIndex];
- offset = smpl->offset;
- size = smpl->size;
- cts = mCurrentTime + smpl->compositionOffset;
- mCurrentTime += smpl->duration;
- isSyncSample = (mCurrentSampleIndex == 0); // XXX
-
- status_t err = mGroup->acquire_buffer(&mBuffer);
-
- if (err != OK) {
- CHECK(mBuffer == NULL);
- ALOGV("acquire_buffer returned %d", err);
- return err;
- }
- if (size > mBuffer->size()) {
- ALOGE("buffer too small: %zu > %zu", size, mBuffer->size());
- mBuffer->release();
- mBuffer = NULL;
- return ERROR_BUFFER_TOO_SMALL;
- }
- }
-
- const Sample *smpl = &mCurrentSamples[mCurrentSampleIndex];
- const sp<MetaData> bufmeta = mBuffer->meta_data();
- bufmeta->clear();
- if (smpl->encryptedsizes.size()) {
- // store clear/encrypted lengths in metadata
- bufmeta->setData(kKeyPlainSizes, 0,
- smpl->clearsizes.array(), smpl->clearsizes.size() * 4);
- bufmeta->setData(kKeyEncryptedSizes, 0,
- smpl->encryptedsizes.array(), smpl->encryptedsizes.size() * 4);
- bufmeta->setData(kKeyCryptoIV, 0, smpl->iv, 16); // use 16 or the actual size?
- bufmeta->setInt32(kKeyCryptoDefaultIVSize, mDefaultIVSize);
- bufmeta->setInt32(kKeyCryptoMode, mCryptoMode);
- bufmeta->setData(kKeyCryptoKey, 0, mCryptoKey, 16);
- }
-
- if ((!mIsAVC && !mIsHEVC)|| mWantsNALFragments) {
- if (newBuffer) {
- if (!isInRange((size_t)0u, mBuffer->size(), size)) {
- mBuffer->release();
- mBuffer = NULL;
-
- ALOGE("fragmentedRead ERROR_MALFORMED size %zu", size);
- return ERROR_MALFORMED;
- }
-
- ssize_t num_bytes_read =
- mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);
-
- if (num_bytes_read < (ssize_t)size) {
- mBuffer->release();
- mBuffer = NULL;
-
- ALOGE("i/o error");
- return ERROR_IO;
- }
-
- CHECK(mBuffer != NULL);
- mBuffer->set_range(0, size);
- mBuffer->meta_data()->setInt64(
- kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
- mBuffer->meta_data()->setInt64(
- kKeyDuration, ((int64_t)smpl->duration * 1000000) / mTimescale);
-
- if (targetSampleTimeUs >= 0) {
- mBuffer->meta_data()->setInt64(
- kKeyTargetTime, targetSampleTimeUs);
- }
-
- if (mIsAVC) {
- uint32_t layerId = FindAVCLayerId(
- (const uint8_t *)mBuffer->data(), mBuffer->range_length());
- mBuffer->meta_data()->setInt32(kKeyTemporalLayerId, layerId);
- }
-
- if (isSyncSample) {
- mBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
- }
-
- ++mCurrentSampleIndex;
- }
-
- if (!mIsAVC && !mIsHEVC) {
- *out = mBuffer;
- mBuffer = NULL;
-
- return OK;
- }
-
- // Each NAL unit is split up into its constituent fragments and
- // each one of them returned in its own buffer.
-
- CHECK(mBuffer->range_length() >= mNALLengthSize);
-
- const uint8_t *src =
- (const uint8_t *)mBuffer->data() + mBuffer->range_offset();
-
- size_t nal_size = parseNALSize(src);
- if (mNALLengthSize > SIZE_MAX - nal_size) {
- ALOGE("b/24441553, b/24445122");
- }
-
- if (mBuffer->range_length() - mNALLengthSize < nal_size) {
- ALOGE("incomplete NAL unit.");
-
- mBuffer->release();
- mBuffer = NULL;
-
- return ERROR_MALFORMED;
- }
-
- MediaBuffer *clone = mBuffer->clone();
- CHECK(clone != NULL);
- clone->set_range(mBuffer->range_offset() + mNALLengthSize, nal_size);
-
- CHECK(mBuffer != NULL);
- mBuffer->set_range(
- mBuffer->range_offset() + mNALLengthSize + nal_size,
- mBuffer->range_length() - mNALLengthSize - nal_size);
-
- if (mBuffer->range_length() == 0) {
- mBuffer->release();
- mBuffer = NULL;
- }
-
- *out = clone;
-
- return OK;
- } else {
- ALOGV("whole NAL");
- // Whole NAL units are returned but each fragment is prefixed by
- // the start code (0x00 00 00 01).
- ssize_t num_bytes_read = 0;
- int32_t drm = 0;
- bool usesDRM = (mFormat->findInt32(kKeyIsDRM, &drm) && drm != 0);
- void *data = NULL;
- bool isMalFormed = false;
- if (usesDRM) {
- if (mBuffer == NULL || !isInRange((size_t)0u, mBuffer->size(), size)) {
- isMalFormed = true;
- } else {
- data = mBuffer->data();
- }
- } else {
- int32_t max_size;
- if (mFormat == NULL
- || !mFormat->findInt32(kKeyMaxInputSize, &max_size)
- || !isInRange((size_t)0u, (size_t)max_size, size)) {
- isMalFormed = true;
- } else {
- data = mSrcBuffer;
- }
- }
-
- if (isMalFormed || data == NULL) {
- ALOGE("isMalFormed size %zu", size);
- if (mBuffer != NULL) {
- mBuffer->release();
- mBuffer = NULL;
- }
- return ERROR_MALFORMED;
- }
- num_bytes_read = mDataSource->readAt(offset, data, size);
-
- if (num_bytes_read < (ssize_t)size) {
- mBuffer->release();
- mBuffer = NULL;
-
- ALOGE("i/o error");
- return ERROR_IO;
- }
-
- if (usesDRM) {
- CHECK(mBuffer != NULL);
- mBuffer->set_range(0, size);
-
- } else {
- uint8_t *dstData = (uint8_t *)mBuffer->data();
- size_t srcOffset = 0;
- size_t dstOffset = 0;
-
- while (srcOffset < size) {
- isMalFormed = !isInRange((size_t)0u, size, srcOffset, mNALLengthSize);
- size_t nalLength = 0;
- if (!isMalFormed) {
- nalLength = parseNALSize(&mSrcBuffer[srcOffset]);
- srcOffset += mNALLengthSize;
- isMalFormed = !isInRange((size_t)0u, size, srcOffset, nalLength)
- || !isInRange((size_t)0u, mBuffer->size(), dstOffset, (size_t)4u)
- || !isInRange((size_t)0u, mBuffer->size(), dstOffset + 4, nalLength);
- }
-
- if (isMalFormed) {
- ALOGE("Video is malformed; nalLength %zu", nalLength);
- mBuffer->release();
- mBuffer = NULL;
- return ERROR_MALFORMED;
- }
-
- if (nalLength == 0) {
- continue;
- }
-
- if (dstOffset > SIZE_MAX - 4 ||
- dstOffset + 4 > SIZE_MAX - nalLength ||
- dstOffset + 4 + nalLength > mBuffer->size()) {
- ALOGE("b/26365349 : %zu %zu", dstOffset, mBuffer->size());
- android_errorWriteLog(0x534e4554, "26365349");
- mBuffer->release();
- mBuffer = NULL;
- return ERROR_MALFORMED;
- }
-
- dstData[dstOffset++] = 0;
- dstData[dstOffset++] = 0;
- dstData[dstOffset++] = 0;
- dstData[dstOffset++] = 1;
- memcpy(&dstData[dstOffset], &mSrcBuffer[srcOffset], nalLength);
- srcOffset += nalLength;
- dstOffset += nalLength;
- }
- CHECK_EQ(srcOffset, size);
- CHECK(mBuffer != NULL);
- mBuffer->set_range(0, dstOffset);
- }
-
- mBuffer->meta_data()->setInt64(
- kKeyTime, ((int64_t)cts * 1000000) / mTimescale);
- mBuffer->meta_data()->setInt64(
- kKeyDuration, ((int64_t)smpl->duration * 1000000) / mTimescale);
-
- if (targetSampleTimeUs >= 0) {
- mBuffer->meta_data()->setInt64(
- kKeyTargetTime, targetSampleTimeUs);
- }
-
- if (isSyncSample) {
- mBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
- }
-
- ++mCurrentSampleIndex;
-
- *out = mBuffer;
- mBuffer = NULL;
-
- return OK;
- }
-}
-
-MPEG4Extractor::Track *MPEG4Extractor::findTrackByMimePrefix(
- const char *mimePrefix) {
- for (Track *track = mFirstTrack; track != NULL; track = track->next) {
- const char *mime;
- if (track->meta != NULL
- && track->meta->findCString(kKeyMIMEType, &mime)
- && !strncasecmp(mime, mimePrefix, strlen(mimePrefix))) {
- return track;
- }
- }
-
- return NULL;
-}
-
-void MPEG4Extractor::populateMetrics() {
- ALOGV("MPEG4Extractor::populateMetrics");
- // write into mAnalyticsItem
-}
-
-static bool LegacySniffMPEG4(
- const sp<DataSource> &source, String8 *mimeType, float *confidence) {
- uint8_t header[8];
-
- ssize_t n = source->readAt(4, header, sizeof(header));
- if (n < (ssize_t)sizeof(header)) {
- return false;
- }
-
- if (!memcmp(header, "ftyp3gp", 7) || !memcmp(header, "ftypmp42", 8)
- || !memcmp(header, "ftyp3gr6", 8) || !memcmp(header, "ftyp3gs6", 8)
- || !memcmp(header, "ftyp3ge6", 8) || !memcmp(header, "ftyp3gg6", 8)
- || !memcmp(header, "ftypisom", 8) || !memcmp(header, "ftypM4V ", 8)
- || !memcmp(header, "ftypM4A ", 8) || !memcmp(header, "ftypf4v ", 8)
- || !memcmp(header, "ftypkddi", 8) || !memcmp(header, "ftypM4VP", 8)
- || !memcmp(header, "ftypmif1", 8) || !memcmp(header, "ftypheic", 8)) {
- *mimeType = MEDIA_MIMETYPE_CONTAINER_MPEG4;
- *confidence = 0.4;
-
- return true;
- }
-
- return false;
-}
-
-static bool isCompatibleBrand(uint32_t fourcc) {
- static const uint32_t kCompatibleBrands[] = {
- FOURCC('i', 's', 'o', 'm'),
- FOURCC('i', 's', 'o', '2'),
- FOURCC('a', 'v', 'c', '1'),
- FOURCC('h', 'v', 'c', '1'),
- FOURCC('h', 'e', 'v', '1'),
- FOURCC('3', 'g', 'p', '4'),
- FOURCC('m', 'p', '4', '1'),
- FOURCC('m', 'p', '4', '2'),
- FOURCC('d', 'a', 's', 'h'),
-
- // Won't promise that the following file types can be played.
- // Just give these file types a chance.
- FOURCC('q', 't', ' ', ' '), // Apple's QuickTime
- FOURCC('M', 'S', 'N', 'V'), // Sony's PSP
-
- FOURCC('3', 'g', '2', 'a'), // 3GPP2
- FOURCC('3', 'g', '2', 'b'),
- FOURCC('m', 'i', 'f', '1'), // HEIF image
- FOURCC('h', 'e', 'i', 'c'), // HEIF image
- };
-
- for (size_t i = 0;
- i < sizeof(kCompatibleBrands) / sizeof(kCompatibleBrands[0]);
- ++i) {
- if (kCompatibleBrands[i] == fourcc) {
- return true;
- }
- }
-
- return false;
-}
-
-// Attempt to actually parse the 'ftyp' atom and determine if a suitable
-// compatible brand is present.
-// Also try to identify where this file's metadata ends
-// (end of the 'moov' atom) and report it to the caller as part of
-// the metadata.
-static bool BetterSniffMPEG4(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *meta) {
- // We scan up to 128 bytes to identify this file as an MP4.
- static const off64_t kMaxScanOffset = 128ll;
-
- off64_t offset = 0ll;
- bool foundGoodFileType = false;
- off64_t moovAtomEndOffset = -1ll;
- bool done = false;
-
- while (!done && offset < kMaxScanOffset) {
- uint32_t hdr[2];
- if (source->readAt(offset, hdr, 8) < 8) {
- return false;
- }
-
- uint64_t chunkSize = ntohl(hdr[0]);
- uint32_t chunkType = ntohl(hdr[1]);
- off64_t chunkDataOffset = offset + 8;
-
- if (chunkSize == 1) {
- if (source->readAt(offset + 8, &chunkSize, 8) < 8) {
- return false;
- }
-
- chunkSize = ntoh64(chunkSize);
- chunkDataOffset += 8;
-
- if (chunkSize < 16) {
- // The smallest valid chunk is 16 bytes long in this case.
- return false;
- }
-
- } else if (chunkSize < 8) {
- // The smallest valid chunk is 8 bytes long.
- return false;
- }
-
- // (data_offset - offset) is either 8 or 16
- off64_t chunkDataSize = chunkSize - (chunkDataOffset - offset);
- if (chunkDataSize < 0) {
- ALOGE("b/23540914");
- return false;
- }
-
- char chunkstring[5];
- MakeFourCCString(chunkType, chunkstring);
- ALOGV("saw chunk type %s, size %" PRIu64 " @ %lld", chunkstring, chunkSize, (long long)offset);
- switch (chunkType) {
- case FOURCC('f', 't', 'y', 'p'):
- {
- if (chunkDataSize < 8) {
- return false;
- }
-
- uint32_t numCompatibleBrands = (chunkDataSize - 8) / 4;
- for (size_t i = 0; i < numCompatibleBrands + 2; ++i) {
- if (i == 1) {
- // Skip this index, it refers to the minorVersion,
- // not a brand.
- continue;
- }
-
- uint32_t brand;
- if (source->readAt(
- chunkDataOffset + 4 * i, &brand, 4) < 4) {
- return false;
- }
-
- brand = ntohl(brand);
-
- if (isCompatibleBrand(brand)) {
- foundGoodFileType = true;
- break;
- }
- }
-
- if (!foundGoodFileType) {
- return false;
- }
-
- break;
- }
-
- case FOURCC('m', 'o', 'o', 'v'):
- {
- moovAtomEndOffset = offset + chunkSize;
-
- done = true;
- break;
- }
-
- default:
- break;
- }
-
- offset += chunkSize;
- }
-
- if (!foundGoodFileType) {
- return false;
- }
-
- *mimeType = MEDIA_MIMETYPE_CONTAINER_MPEG4;
- *confidence = 0.4f;
-
- if (moovAtomEndOffset >= 0) {
- *meta = new AMessage;
- (*meta)->setInt64("meta-data-size", moovAtomEndOffset);
-
- ALOGV("found metadata size: %lld", (long long)moovAtomEndOffset);
- }
-
- return true;
-}
-
-bool SniffMPEG4(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *meta) {
- if (BetterSniffMPEG4(source, mimeType, confidence, meta)) {
- return true;
- }
-
- if (LegacySniffMPEG4(source, mimeType, confidence)) {
- ALOGW("Identified supported mpeg4 through LegacySniffMPEG4.");
- return true;
- }
-
- return false;
-}
-
-} // namespace android
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 7786c4d..6ff3d78 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -32,23 +32,24 @@
#include <functional>
+#include <media/MediaSource.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/ByteUtils.h>
#include <media/stagefright/foundation/ColorUtils.h>
+#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/MPEG4Writer.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
#include <media/stagefright/Utils.h>
#include <media/mediarecorder.h>
#include <cutils/properties.h>
#include "include/ESDS.h"
#include "include/HevcUtils.h"
-#include "include/avc_utils.h"
#ifndef __predict_false
#define __predict_false(exp) __builtin_expect((exp) != 0, 0)
@@ -82,6 +83,9 @@
static const char kMetaKey_TemporalLayerCount[] = "com.android.video.temporal_layers_count";
static const int kTimestampDebugCount = 10;
+static const int kItemIdBase = 10000;
+static const char kExifHeader[] = {'E', 'x', 'i', 'f', '\0', '\0'};
+static const int32_t kTiffHeaderOffset = htonl(sizeof(kExifHeader));
static const uint8_t kMandatoryHevcNalUnitTypes[3] = {
kHevcNalUnitTypeVps,
@@ -100,7 +104,7 @@
class MPEG4Writer::Track {
public:
- Track(MPEG4Writer *owner, const sp<IMediaSource> &source, size_t trackId);
+ Track(MPEG4Writer *owner, const sp<MediaSource> &source, size_t trackId);
~Track();
@@ -111,14 +115,20 @@
int64_t getDurationUs() const;
int64_t getEstimatedTrackSizeBytes() const;
+ int32_t getMetaSizeIncrease(int32_t angle, int32_t trackCount) const;
void writeTrackHeader(bool use32BitOffset = true);
int64_t getMinCttsOffsetTimeUs();
void bufferChunk(int64_t timestampUs);
bool isAvc() const { return mIsAvc; }
bool isHevc() const { return mIsHevc; }
+ bool isHeic() const { return mIsHeic; }
bool isAudio() const { return mIsAudio; }
bool isMPEG4() const { return mIsMPEG4; }
+ bool usePrefix() const { return mIsAvc || mIsHevc || mIsHeic; }
+ bool isExifData(const MediaBufferBase *buffer) const;
void addChunkOffset(off64_t offset);
+ void addItemOffsetAndSize(off64_t offset, size_t size, bool isExif);
+ void flushItemRefs();
int32_t getTrackId() const { return mTrackId; }
status_t dump(int fd, const Vector<String16>& args) const;
static const char *getFourCCForMime(const char *mime);
@@ -271,7 +281,7 @@
MPEG4Writer *mOwner;
sp<MetaData> mMeta;
- sp<IMediaSource> mSource;
+ sp<MediaSource> mSource;
volatile bool mDone;
volatile bool mPaused;
volatile bool mResumed;
@@ -280,6 +290,7 @@
bool mIsHevc;
bool mIsAudio;
bool mIsVideo;
+ bool mIsHeic;
bool mIsMPEG4;
bool mGotStartKeyFrame;
bool mIsMalformed;
@@ -346,6 +357,18 @@
int64_t mPreviousTrackTimeUs;
int64_t mTrackEveryTimeDurationUs;
+ int32_t mRotation;
+
+ Vector<uint16_t> mProperties;
+ ItemRefs mDimgRefs;
+ ItemRefs mCdscRefs;
+ uint16_t mImageItemId;
+ int32_t mIsPrimary;
+ int32_t mWidth, mHeight;
+ int32_t mTileWidth, mTileHeight;
+ int32_t mGridRows, mGridCols;
+ size_t mNumTiles, mTileIndex;
+
// Update the audio track's drift information.
void updateDriftTime(const sp<MetaData>& meta);
@@ -385,7 +408,6 @@
// Simple validation on the codec specific data
status_t checkCodecSpecificData() const;
- int32_t mRotation;
void updateTrackSizeEstimate();
void addOneStscTableEntry(size_t chunkId, size_t sampleId);
@@ -473,18 +495,24 @@
mUse32BitOffset = true;
mOffset = 0;
mMdatOffset = 0;
- mMoovBoxBuffer = NULL;
- mMoovBoxBufferOffset = 0;
- mWriteMoovBoxToMemory = false;
+ mInMemoryCache = NULL;
+ mInMemoryCacheOffset = 0;
+ mInMemoryCacheSize = 0;
+ mWriteBoxToMemory = false;
mFreeBoxOffset = 0;
mStreamableFile = false;
- mEstimatedMoovBoxSize = 0;
mTimeScale = -1;
+ mHasFileLevelMeta = false;
+ mPrimaryItemId = 0;
+ mAssociationEntryCount = 0;
+ mNumGrids = 0;
+ mHasRefs = false;
// Following variables only need to be set for the first recording session.
// And they will stay the same for all the recording sessions.
if (isFirstSession) {
mMoovExtraSize = 0;
+ mHasMoovBox = false;
mMetaKeys = new AMessage();
addDeviceMeta();
mLatitudex10000 = 0;
@@ -566,13 +594,15 @@
}
} else if (!strncasecmp(mime, "application/", 12)) {
return "mett";
+ } else if (!strcasecmp(MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC, mime)) {
+ return "heic";
} else {
ALOGE("Track (%s) other than video/audio/metadata is not supported", mime);
}
return NULL;
}
-status_t MPEG4Writer::addSource(const sp<IMediaSource> &source) {
+status_t MPEG4Writer::addSource(const sp<MediaSource> &source) {
Mutex::Autolock l(mLock);
if (mStarted) {
ALOGE("Attempt to add source AFTER recording is started");
@@ -594,6 +624,9 @@
Track *track = new Track(this, source, 1 + mTracks.size());
mTracks.push_back(track);
+ mHasMoovBox |= !track->isHeic();
+ mHasFileLevelMeta |= track->isHeic();
+
return OK;
}
@@ -655,6 +688,37 @@
#endif
}
+int64_t MPEG4Writer::estimateFileLevelMetaSize(MetaData *params) {
+ int32_t rotation;
+ if (!params || !params->findInt32(kKeyRotation, &rotation)) {
+ rotation = 0;
+ }
+
+ // base meta size
+ int64_t metaSize = 12 // meta fullbox header
+ + 33 // hdlr box
+ + 14 // pitm box
+ + 16 // iloc box (fixed size portion)
+ + 14 // iinf box (fixed size portion)
+ + 32 // iprp box (fixed size protion)
+ + 8 // idat box (when empty)
+ + 12 // iref box (when empty)
+ ;
+
+ for (List<Track *>::iterator it = mTracks.begin();
+ it != mTracks.end(); ++it) {
+ if ((*it)->isHeic()) {
+ metaSize += (*it)->getMetaSizeIncrease(rotation, mTracks.size());
+ }
+ }
+
+ ALOGV("estimated meta size: %lld", (long long) metaSize);
+
+ // Need at least 8-byte padding at the end, otherwise the left-over
+ // freebox may become malformed
+ return metaSize + 8;
+}
+
int64_t MPEG4Writer::estimateMoovBoxSize(int32_t bitRate) {
// This implementation is highly experimental/heurisitic.
//
@@ -714,7 +778,11 @@
ALOGI("limits: %" PRId64 "/%" PRId64 " bytes/us, bit rate: %d bps and the"
" estimated moov size %" PRId64 " bytes",
mMaxFileSizeLimitBytes, mMaxFileDurationLimitUs, bitRate, size);
- return factor * size;
+
+ int64_t estimatedSize = factor * size;
+ CHECK_GE(estimatedSize, 8);
+
+ return estimatedSize;
}
status_t MPEG4Writer::start(MetaData *param) {
@@ -796,63 +864,70 @@
mMaxFileSizeLimitBytes >= kMinStreamableFileSizeInBytes);
/*
- * mWriteMoovBoxToMemory is true if the amount of data in moov box is
- * smaller than the reserved free space at the beginning of a file, AND
- * when the content of moov box is constructed. Note that video/audio
- * frame data is always written to the file but not in the memory.
+ * mWriteBoxToMemory is true if the amount of data in a file-level meta or
+ * moov box is smaller than the reserved free space at the beginning of a
+ * file, AND when the content of the box is constructed. Note that video/
+ * audio frame data is always written to the file but not in the memory.
*
- * Before stop()/reset() is called, mWriteMoovBoxToMemory is always
+ * Before stop()/reset() is called, mWriteBoxToMemory is always
* false. When reset() is called at the end of a recording session,
- * Moov box needs to be constructed.
+ * file-level meta and/or moov box needs to be constructed.
*
- * 1) Right before a moov box is constructed, mWriteMoovBoxToMemory
- * to set to mStreamableFile so that if
- * the file is intended to be streamable, it is set to true;
- * otherwise, it is set to false. When the value is set to false,
- * all the content of the moov box is written immediately to
+ * 1) Right before the box is constructed, mWriteBoxToMemory to set to
+ * mStreamableFile so that if the file is intended to be streamable, it
+ * is set to true; otherwise, it is set to false. When the value is set
+ * to false, all the content of that box is written immediately to
* the end of the file. When the value is set to true, all the
- * content of the moov box is written to an in-memory cache,
- * mMoovBoxBuffer, util the following condition happens. Note
+ * content of that box is written to an in-memory cache,
+ * mInMemoryCache, util the following condition happens. Note
* that the size of the in-memory cache is the same as the
* reserved free space at the beginning of the file.
*
- * 2) While the data of the moov box is written to an in-memory
+ * 2) While the data of the box is written to an in-memory
* cache, the data size is checked against the reserved space.
- * If the data size surpasses the reserved space, subsequent moov
- * data could no longer be hold in the in-memory cache. This also
+ * If the data size surpasses the reserved space, subsequent box data
+ * could no longer be hold in the in-memory cache. This also
* indicates that the reserved space was too small. At this point,
- * _all_ moov data must be written to the end of the file.
- * mWriteMoovBoxToMemory must be set to false to direct the write
+ * _all_ subsequent box data must be written to the end of the file.
+ * mWriteBoxToMemory must be set to false to direct the write
* to the file.
*
- * 3) If the data size in moov box is smaller than the reserved
- * space after moov box is completely constructed, the in-memory
- * cache copy of the moov box is written to the reserved free
- * space. Thus, immediately after the moov is completedly
- * constructed, mWriteMoovBoxToMemory is always set to false.
+ * 3) If the data size in the box is smaller than the reserved
+ * space after the box is completely constructed, the in-memory
+ * cache copy of the box is written to the reserved free space.
+ * mWriteBoxToMemory is always set to false after all boxes that
+ * using the in-memory cache have been constructed.
*/
- mWriteMoovBoxToMemory = false;
- mMoovBoxBuffer = NULL;
- mMoovBoxBufferOffset = 0;
+ mWriteBoxToMemory = false;
+ mInMemoryCache = NULL;
+ mInMemoryCacheOffset = 0;
+
+
+ ALOGV("muxer starting: mHasMoovBox %d, mHasFileLevelMeta %d",
+ mHasMoovBox, mHasFileLevelMeta);
writeFtypBox(param);
mFreeBoxOffset = mOffset;
- if (mEstimatedMoovBoxSize == 0) {
+ if (mInMemoryCacheSize == 0) {
int32_t bitRate = -1;
- if (param) {
- param->findInt32(kKeyBitRate, &bitRate);
+ if (mHasFileLevelMeta) {
+ mInMemoryCacheSize += estimateFileLevelMetaSize(param);
}
- mEstimatedMoovBoxSize = estimateMoovBoxSize(bitRate);
+ if (mHasMoovBox) {
+ if (param) {
+ param->findInt32(kKeyBitRate, &bitRate);
+ }
+ mInMemoryCacheSize += estimateMoovBoxSize(bitRate);
+ }
}
- CHECK_GE(mEstimatedMoovBoxSize, 8);
if (mStreamableFile) {
// Reserve a 'free' box only for streamable file
lseek64(mFd, mFreeBoxOffset, SEEK_SET);
- writeInt32(mEstimatedMoovBoxSize);
+ writeInt32(mInMemoryCacheSize);
write("free", 4);
- mMdatOffset = mFreeBoxOffset + mEstimatedMoovBoxSize;
+ mMdatOffset = mFreeBoxOffset + mInMemoryCacheSize;
} else {
mMdatOffset = mOffset;
}
@@ -964,8 +1039,8 @@
mFd = -1;
mInitCheck = NO_INIT;
mStarted = false;
- free(mMoovBoxBuffer);
- mMoovBoxBuffer = NULL;
+ free(mInMemoryCache);
+ mInMemoryCache = NULL;
}
void MPEG4Writer::finishCurrentSession() {
@@ -1008,13 +1083,18 @@
status_t err = OK;
int64_t maxDurationUs = 0;
int64_t minDurationUs = 0x7fffffffffffffffLL;
+ int32_t nonImageTrackCount = 0;
for (List<Track *>::iterator it = mTracks.begin();
- it != mTracks.end(); ++it) {
+ it != mTracks.end(); ++it) {
status_t status = (*it)->stop(stopSource);
if (err == OK && status != OK) {
err = status;
}
+ // skip image tracks
+ if ((*it)->isHeic()) continue;
+ nonImageTrackCount++;
+
int64_t durationUs = (*it)->getDurationUs();
if (durationUs > maxDurationUs) {
maxDurationUs = durationUs;
@@ -1024,7 +1104,7 @@
}
}
- if (mTracks.size() > 1) {
+ if (nonImageTrackCount > 1) {
ALOGD("Duration from tracks range is [%" PRId64 ", %" PRId64 "] us",
minDurationUs, maxDurationUs);
}
@@ -1050,45 +1130,43 @@
}
lseek64(mFd, mOffset, SEEK_SET);
- // Construct moov box now
- mMoovBoxBufferOffset = 0;
- mWriteMoovBoxToMemory = mStreamableFile;
- if (mWriteMoovBoxToMemory) {
+ // Construct file-level meta and moov box now
+ mInMemoryCacheOffset = 0;
+ mWriteBoxToMemory = mStreamableFile;
+ if (mWriteBoxToMemory) {
// There is no need to allocate in-memory cache
- // for moov box if the file is not streamable.
+ // if the file is not streamable.
- mMoovBoxBuffer = (uint8_t *) malloc(mEstimatedMoovBoxSize);
- CHECK(mMoovBoxBuffer != NULL);
- }
- writeMoovBox(maxDurationUs);
-
- // mWriteMoovBoxToMemory could be set to false in
- // MPEG4Writer::write() method
- if (mWriteMoovBoxToMemory) {
- mWriteMoovBoxToMemory = false;
- // Content of the moov box is saved in the cache, and the in-memory
- // moov box needs to be written to the file in a single shot.
-
- CHECK_LE(mMoovBoxBufferOffset + 8, mEstimatedMoovBoxSize);
-
- // Moov box
- lseek64(mFd, mFreeBoxOffset, SEEK_SET);
- mOffset = mFreeBoxOffset;
- write(mMoovBoxBuffer, 1, mMoovBoxBufferOffset);
-
- // Free box
- lseek64(mFd, mOffset, SEEK_SET);
- writeInt32(mEstimatedMoovBoxSize - mMoovBoxBufferOffset);
- write("free", 4);
- } else {
- ALOGI("The mp4 file will not be streamable.");
+ mInMemoryCache = (uint8_t *) malloc(mInMemoryCacheSize);
+ CHECK(mInMemoryCache != NULL);
}
- // Free in-memory cache for moov box
- if (mMoovBoxBuffer != NULL) {
- free(mMoovBoxBuffer);
- mMoovBoxBuffer = NULL;
- mMoovBoxBufferOffset = 0;
+ if (mHasFileLevelMeta) {
+ writeFileLevelMetaBox();
+ if (mWriteBoxToMemory) {
+ writeCachedBoxToFile("meta");
+ } else {
+ ALOGI("The file meta box is written at the end.");
+ }
+ }
+
+ if (mHasMoovBox) {
+ writeMoovBox(maxDurationUs);
+ // mWriteBoxToMemory could be set to false in
+ // MPEG4Writer::write() method
+ if (mWriteBoxToMemory) {
+ writeCachedBoxToFile("moov");
+ } else {
+ ALOGI("The mp4 file will not be streamable.");
+ }
+ }
+ mWriteBoxToMemory = false;
+
+ // Free in-memory cache for box writing
+ if (mInMemoryCache != NULL) {
+ free(mInMemoryCache);
+ mInMemoryCache = NULL;
+ mInMemoryCacheOffset = 0;
}
CHECK(mBoxes.empty());
@@ -1097,6 +1175,42 @@
return err;
}
+/*
+ * Writes currently cached box into file.
+ *
+ * Must be called while mWriteBoxToMemory is true, and will not modify
+ * mWriteBoxToMemory. After the call, remaining cache size will be
+ * reduced and buffer offset will be set to the beginning of the cache.
+ */
+void MPEG4Writer::writeCachedBoxToFile(const char *type) {
+ CHECK(mWriteBoxToMemory);
+
+ mWriteBoxToMemory = false;
+ // Content of the box is saved in the cache, and the in-memory
+ // box needs to be written to the file in a single shot.
+
+ CHECK_LE(mInMemoryCacheOffset + 8, mInMemoryCacheSize);
+
+ // Cached box
+ lseek64(mFd, mFreeBoxOffset, SEEK_SET);
+ mOffset = mFreeBoxOffset;
+ write(mInMemoryCache, 1, mInMemoryCacheOffset);
+
+ // Free box
+ lseek64(mFd, mOffset, SEEK_SET);
+ mFreeBoxOffset = mOffset;
+ writeInt32(mInMemoryCacheSize - mInMemoryCacheOffset);
+ write("free", 4);
+
+ // Rewind buffering to the beginning, and restore mWriteBoxToMemory flag
+ mInMemoryCacheSize -= mInMemoryCacheOffset;
+ mInMemoryCacheOffset = 0;
+ mWriteBoxToMemory = true;
+
+ ALOGV("dumped out %s box, estimated size remaining %lld",
+ type, (long long)mInMemoryCacheSize);
+}
+
uint32_t MPEG4Writer::getMpeg4Time() {
time_t now = time(NULL);
// MP4 file uses time counting seconds since midnight, Jan. 1, 1904
@@ -1141,14 +1255,16 @@
if (mAreGeoTagsAvailable) {
writeUdtaBox();
}
- writeMetaBox();
+ writeMoovLevelMetaBox();
// Loop through all the tracks to get the global time offset if there is
// any ctts table appears in a video track.
int64_t minCttsOffsetTimeUs = kMaxCttsOffsetTimeUs;
for (List<Track *>::iterator it = mTracks.begin();
it != mTracks.end(); ++it) {
- minCttsOffsetTimeUs =
- std::min(minCttsOffsetTimeUs, (*it)->getMinCttsOffsetTimeUs());
+ if (!(*it)->isHeic()) {
+ minCttsOffsetTimeUs =
+ std::min(minCttsOffsetTimeUs, (*it)->getMinCttsOffsetTimeUs());
+ }
}
ALOGI("Ajust the moov start time from %lld us -> %lld us",
(long long)mStartTimestampUs,
@@ -1158,7 +1274,9 @@
for (List<Track *>::iterator it = mTracks.begin();
it != mTracks.end(); ++it) {
- (*it)->writeTrackHeader(mUse32BitOffset);
+ if (!(*it)->isHeic()) {
+ (*it)->writeTrackHeader(mUse32BitOffset);
+ }
}
endBox(); // moov
}
@@ -1167,17 +1285,31 @@
beginBox("ftyp");
int32_t fileType;
- if (param && param->findInt32(kKeyFileType, &fileType) &&
- fileType != OUTPUT_FORMAT_MPEG_4) {
+ if (!param || !param->findInt32(kKeyFileType, &fileType)) {
+ fileType = OUTPUT_FORMAT_MPEG_4;
+ }
+ if (fileType != OUTPUT_FORMAT_MPEG_4 && fileType != OUTPUT_FORMAT_HEIF) {
writeFourcc("3gp4");
writeInt32(0);
writeFourcc("isom");
writeFourcc("3gp4");
} else {
- writeFourcc("mp42");
+ // Only write "heic" as major brand if the client specified HEIF
+ // AND we indeed receive some image heic tracks.
+ if (fileType == OUTPUT_FORMAT_HEIF && mHasFileLevelMeta) {
+ writeFourcc("heic");
+ } else {
+ writeFourcc("mp42");
+ }
writeInt32(0);
- writeFourcc("isom");
- writeFourcc("mp42");
+ if (mHasFileLevelMeta) {
+ writeFourcc("mif1");
+ writeFourcc("heic");
+ }
+ if (mHasMoovBox) {
+ writeFourcc("isom");
+ writeFourcc("mp42");
+ }
}
endBox();
@@ -1224,15 +1356,26 @@
mLock.unlock();
}
-off64_t MPEG4Writer::addSample_l(MediaBuffer *buffer) {
+off64_t MPEG4Writer::addSample_l(
+ MediaBuffer *buffer, bool usePrefix, bool isExif, size_t *bytesWritten) {
off64_t old_offset = mOffset;
- ::write(mFd,
- (const uint8_t *)buffer->data() + buffer->range_offset(),
- buffer->range_length());
+ if (usePrefix) {
+ addMultipleLengthPrefixedSamples_l(buffer);
+ } else {
+ if (isExif) {
+ ::write(mFd, &kTiffHeaderOffset, 4); // exif_tiff_header_offset field
+ mOffset += 4;
+ }
- mOffset += buffer->range_length();
+ ::write(mFd,
+ (const uint8_t *)buffer->data() + buffer->range_offset(),
+ buffer->range_length());
+ mOffset += buffer->range_length();
+ }
+
+ *bytesWritten = mOffset - old_offset;
return old_offset;
}
@@ -1250,18 +1393,13 @@
}
}
-off64_t MPEG4Writer::addMultipleLengthPrefixedSamples_l(MediaBuffer *buffer) {
- off64_t old_offset = mOffset;
-
- const size_t kExtensionNALSearchRange = 64; // bytes to look for non-VCL NALUs
-
+void MPEG4Writer::addMultipleLengthPrefixedSamples_l(MediaBuffer *buffer) {
const uint8_t *dataStart = (const uint8_t *)buffer->data() + buffer->range_offset();
const uint8_t *currentNalStart = dataStart;
const uint8_t *nextNalStart;
const uint8_t *data = dataStart;
size_t nextNalSize;
- size_t searchSize = buffer->range_length() > kExtensionNALSearchRange ?
- kExtensionNALSearchRange : buffer->range_length();
+ size_t searchSize = buffer->range_length();
while (getNextNALUnit(&data, &searchSize, &nextNalStart,
&nextNalSize, true) == OK) {
@@ -1277,13 +1415,9 @@
buffer->set_range(buffer->range_offset() + currentNalOffset,
buffer->range_length() - currentNalOffset);
addLengthPrefixedSample_l(buffer);
-
- return old_offset;
}
-off64_t MPEG4Writer::addLengthPrefixedSample_l(MediaBuffer *buffer) {
- off64_t old_offset = mOffset;
-
+void MPEG4Writer::addLengthPrefixedSample_l(MediaBuffer *buffer) {
size_t length = buffer->range_length();
if (mUse4ByteNalLength) {
@@ -1311,40 +1445,35 @@
::write(mFd, (const uint8_t *)buffer->data() + buffer->range_offset(), length);
mOffset += length + 2;
}
-
- return old_offset;
}
size_t MPEG4Writer::write(
const void *ptr, size_t size, size_t nmemb) {
const size_t bytes = size * nmemb;
- if (mWriteMoovBoxToMemory) {
+ if (mWriteBoxToMemory) {
- off64_t moovBoxSize = 8 + mMoovBoxBufferOffset + bytes;
- if (moovBoxSize > mEstimatedMoovBoxSize) {
- // The reserved moov box at the beginning of the file
- // is not big enough. Moov box should be written to
- // the end of the file from now on, but not to the
- // in-memory cache.
+ off64_t boxSize = 8 + mInMemoryCacheOffset + bytes;
+ if (boxSize > mInMemoryCacheSize) {
+ // The reserved free space at the beginning of the file is not big
+ // enough. Boxes should be written to the end of the file from now
+ // on, but not to the in-memory cache.
- // We write partial moov box that is in the memory to
- // the file first.
+ // We write partial box that is in the memory to the file first.
for (List<off64_t>::iterator it = mBoxes.begin();
it != mBoxes.end(); ++it) {
(*it) += mOffset;
}
lseek64(mFd, mOffset, SEEK_SET);
- ::write(mFd, mMoovBoxBuffer, mMoovBoxBufferOffset);
+ ::write(mFd, mInMemoryCache, mInMemoryCacheOffset);
::write(mFd, ptr, bytes);
- mOffset += (bytes + mMoovBoxBufferOffset);
+ mOffset += (bytes + mInMemoryCacheOffset);
- // All subsequent moov box content will be written
- // to the end of the file.
- mWriteMoovBoxToMemory = false;
+ // All subsequent boxes will be written to the end of the file.
+ mWriteBoxToMemory = false;
} else {
- memcpy(mMoovBoxBuffer + mMoovBoxBufferOffset, ptr, bytes);
- mMoovBoxBufferOffset += bytes;
+ memcpy(mInMemoryCache + mInMemoryCacheOffset, ptr, bytes);
+ mInMemoryCacheOffset += bytes;
}
} else {
::write(mFd, ptr, size * nmemb);
@@ -1354,8 +1483,8 @@
}
void MPEG4Writer::beginBox(uint32_t id) {
- mBoxes.push_back(mWriteMoovBoxToMemory?
- mMoovBoxBufferOffset: mOffset);
+ mBoxes.push_back(mWriteBoxToMemory?
+ mInMemoryCacheOffset: mOffset);
writeInt32(0);
writeInt32(id);
@@ -1364,8 +1493,8 @@
void MPEG4Writer::beginBox(const char *fourcc) {
CHECK_EQ(strlen(fourcc), 4u);
- mBoxes.push_back(mWriteMoovBoxToMemory?
- mMoovBoxBufferOffset: mOffset);
+ mBoxes.push_back(mWriteBoxToMemory?
+ mInMemoryCacheOffset: mOffset);
writeInt32(0);
writeFourcc(fourcc);
@@ -1377,9 +1506,9 @@
off64_t offset = *--mBoxes.end();
mBoxes.erase(--mBoxes.end());
- if (mWriteMoovBoxToMemory) {
- int32_t x = htonl(mMoovBoxBufferOffset - offset);
- memcpy(mMoovBoxBuffer + offset, &x, 4);
+ if (mWriteBoxToMemory) {
+ int32_t x = htonl(mInMemoryCacheOffset - offset);
+ memcpy(mInMemoryCache + offset, &x, 4);
} else {
lseek64(mFd, offset, SEEK_SET);
writeInt32(mOffset - offset);
@@ -1538,7 +1667,7 @@
if (mMaxFileSizeLimitBytes == 0) {
return false;
}
- int64_t nTotalBytesEstimate = static_cast<int64_t>(mEstimatedMoovBoxSize);
+ int64_t nTotalBytesEstimate = static_cast<int64_t>(mInMemoryCacheSize);
for (List<Track *>::iterator it = mTracks.begin();
it != mTracks.end(); ++it) {
nTotalBytesEstimate += (*it)->getEstimatedTrackSizeBytes();
@@ -1561,7 +1690,7 @@
return false;
}
- int64_t nTotalBytesEstimate = static_cast<int64_t>(mEstimatedMoovBoxSize);
+ int64_t nTotalBytesEstimate = static_cast<int64_t>(mInMemoryCacheSize);
for (List<Track *>::iterator it = mTracks.begin();
it != mTracks.end(); ++it) {
nTotalBytesEstimate += (*it)->getEstimatedTrackSizeBytes();
@@ -1583,7 +1712,7 @@
for (List<Track *>::iterator it = mTracks.begin();
it != mTracks.end(); ++it) {
- if ((*it)->getDurationUs() >= mMaxFileDurationLimitUs) {
+ if (!(*it)->isHeic() && (*it)->getDurationUs() >= mMaxFileDurationLimitUs) {
return true;
}
}
@@ -1626,7 +1755,7 @@
////////////////////////////////////////////////////////////////////////////////
MPEG4Writer::Track::Track(
- MPEG4Writer *owner, const sp<IMediaSource> &source, size_t trackId)
+ MPEG4Writer *owner, const sp<MediaSource> &source, size_t trackId)
: mOwner(owner),
mMeta(source->getFormat()),
mSource(source),
@@ -1655,7 +1784,19 @@
mGotAllCodecSpecificData(false),
mReachedEOS(false),
mStartTimestampUs(-1),
- mRotation(0) {
+ mRotation(0),
+ mDimgRefs("dimg"),
+ mCdscRefs("cdsc"),
+ mImageItemId(0),
+ mIsPrimary(0),
+ mWidth(0),
+ mHeight(0),
+ mTileWidth(0),
+ mTileHeight(0),
+ mGridRows(0),
+ mGridCols(0),
+ mNumTiles(1),
+ mTileIndex(0) {
getCodecSpecificDataFromInputFormatIfPossible();
const char *mime;
@@ -1664,6 +1805,7 @@
mIsHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
mIsAudio = !strncasecmp(mime, "audio/", 6);
mIsVideo = !strncasecmp(mime, "video/", 6);
+ mIsHeic = !strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
mIsMPEG4 = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC);
@@ -1675,7 +1817,27 @@
}
}
- setTimeScale();
+ if (!mIsHeic) {
+ setTimeScale();
+ } else {
+ CHECK(mMeta->findInt32(kKeyWidth, &mWidth) && (mWidth > 0));
+ CHECK(mMeta->findInt32(kKeyHeight, &mHeight) && (mHeight > 0));
+
+ int32_t tileWidth, tileHeight, gridRows, gridCols;
+ if (mMeta->findInt32(kKeyTileWidth, &tileWidth) && (tileWidth > 0) &&
+ mMeta->findInt32(kKeyTileHeight, &tileHeight) && (tileHeight > 0) &&
+ mMeta->findInt32(kKeyGridRows, &gridRows) && (gridRows > 0) &&
+ mMeta->findInt32(kKeyGridCols, &gridCols) && (gridCols > 0)) {
+ mTileWidth = tileWidth;
+ mTileHeight = tileHeight;
+ mGridRows = gridRows;
+ mGridCols = gridCols;
+ mNumTiles = gridRows * gridCols;
+ }
+ if (!mMeta->findInt32(kKeyTrackIsDefault, &mIsPrimary)) {
+ mIsPrimary = false;
+ }
+ }
}
// Clear all the internal states except the CSD data.
@@ -1723,15 +1885,15 @@
}
void MPEG4Writer::Track::updateTrackSizeEstimate() {
-
- uint32_t stcoBoxCount = (mOwner->use32BitFileOffset()
- ? mStcoTableEntries->count()
- : mCo64TableEntries->count());
- int64_t stcoBoxSizeBytes = stcoBoxCount * 4;
- int64_t stszBoxSizeBytes = mSamplesHaveSameSize? 4: (mStszTableEntries->count() * 4);
-
mEstimatedTrackSizeBytes = mMdatSizeBytes; // media data size
- if (!mOwner->isFileStreamable()) {
+
+ if (!isHeic() && !mOwner->isFileStreamable()) {
+ uint32_t stcoBoxCount = (mOwner->use32BitFileOffset()
+ ? mStcoTableEntries->count()
+ : mCo64TableEntries->count());
+ int64_t stcoBoxSizeBytes = stcoBoxCount * 4;
+ int64_t stszBoxSizeBytes = mSamplesHaveSameSize? 4: (mStszTableEntries->count() * 4);
+
// Reserved free space is not large enough to hold
// all meta data and thus wasted.
mEstimatedTrackSizeBytes += mStscTableEntries->count() * 12 + // stsc box size
@@ -1745,10 +1907,9 @@
void MPEG4Writer::Track::addOneStscTableEntry(
size_t chunkId, size_t sampleId) {
-
- mStscTableEntries->add(htonl(chunkId));
- mStscTableEntries->add(htonl(sampleId));
- mStscTableEntries->add(htonl(1));
+ mStscTableEntries->add(htonl(chunkId));
+ mStscTableEntries->add(htonl(sampleId));
+ mStscTableEntries->add(htonl(1));
}
void MPEG4Writer::Track::addOneStssTableEntry(size_t sampleId) {
@@ -1793,7 +1954,15 @@
return OK;
}
+bool MPEG4Writer::Track::isExifData(const MediaBufferBase *buffer) const {
+ return mIsHeic
+ && (buffer->range_length() > sizeof(kExifHeader))
+ && !memcmp((uint8_t *)buffer->data() + buffer->range_offset(),
+ kExifHeader, sizeof(kExifHeader));
+}
+
void MPEG4Writer::Track::addChunkOffset(off64_t offset) {
+ CHECK(!mIsHeic);
if (mOwner->use32BitFileOffset()) {
uint32_t value = offset;
mStcoTableEntries->add(htonl(value));
@@ -1802,6 +1971,126 @@
}
}
+void MPEG4Writer::Track::addItemOffsetAndSize(off64_t offset, size_t size, bool isExif) {
+ CHECK(mIsHeic);
+
+ if (offset > UINT32_MAX || size > UINT32_MAX) {
+ ALOGE("offset or size is out of range: %lld, %lld",
+ (long long) offset, (long long) size);
+ mIsMalformed = true;
+ }
+ if (mIsMalformed) {
+ return;
+ }
+
+ if (isExif) {
+ mCdscRefs.value.push_back(mOwner->addItem_l({
+ .itemType = "Exif",
+ .isPrimary = false,
+ .isHidden = false,
+ .offset = (uint32_t)offset,
+ .size = (uint32_t)size,
+ }));
+ return;
+ }
+
+ if (mTileIndex >= mNumTiles) {
+ ALOGW("Ignoring excess tiles!");
+ return;
+ }
+
+ // Rotation angle in HEIF is CCW, framework angle is CW.
+ int32_t heifRotation = 0;
+ switch(mRotation) {
+ case 90: heifRotation = 3; break;
+ case 180: heifRotation = 2; break;
+ case 270: heifRotation = 1; break;
+ default: break; // don't set if invalid
+ }
+
+ bool hasGrid = (mTileWidth > 0);
+
+ if (mProperties.empty()) {
+ mProperties.push_back(mOwner->addProperty_l({
+ .type = FOURCC('h', 'v', 'c', 'C'),
+ .hvcc = ABuffer::CreateAsCopy(mCodecSpecificData, mCodecSpecificDataSize)
+ }));
+
+ mProperties.push_back(mOwner->addProperty_l({
+ .type = FOURCC('i', 's', 'p', 'e'),
+ .width = hasGrid ? mTileWidth : mWidth,
+ .height = hasGrid ? mTileHeight : mHeight,
+ }));
+
+ if (!hasGrid && heifRotation > 0) {
+ mProperties.push_back(mOwner->addProperty_l({
+ .type = FOURCC('i', 'r', 'o', 't'),
+ .rotation = heifRotation,
+ }));
+ }
+ }
+
+ mTileIndex++;
+ if (hasGrid) {
+ mDimgRefs.value.push_back(mOwner->addItem_l({
+ .itemType = "hvc1",
+ .isPrimary = false,
+ .isHidden = true,
+ .offset = (uint32_t)offset,
+ .size = (uint32_t)size,
+ .properties = mProperties,
+ }));
+
+ if (mTileIndex == mNumTiles) {
+ mProperties.clear();
+ mProperties.push_back(mOwner->addProperty_l({
+ .type = FOURCC('i', 's', 'p', 'e'),
+ .width = mWidth,
+ .height = mHeight,
+ }));
+ if (heifRotation > 0) {
+ mProperties.push_back(mOwner->addProperty_l({
+ .type = FOURCC('i', 'r', 'o', 't'),
+ .rotation = heifRotation,
+ }));
+ }
+ mImageItemId = mOwner->addItem_l({
+ .itemType = "grid",
+ .isPrimary = (mIsPrimary != 0),
+ .isHidden = false,
+ .rows = (uint32_t)mGridRows,
+ .cols = (uint32_t)mGridCols,
+ .width = (uint32_t)mWidth,
+ .height = (uint32_t)mHeight,
+ .properties = mProperties,
+ });
+ }
+ } else {
+ mImageItemId = mOwner->addItem_l({
+ .itemType = "hvc1",
+ .isPrimary = (mIsPrimary != 0),
+ .isHidden = false,
+ .offset = (uint32_t)offset,
+ .size = (uint32_t)size,
+ .properties = mProperties,
+ });
+ }
+}
+
+// Flush out the item refs for this track. Note that it must be called after the
+// writer thread has stopped, because there might be pending items in the last
+// few chunks written by the writer thread (as opposed to the track). In particular,
+// it affects the 'dimg' refs for tiled image, as we only have the refs after the
+// last tile sample is written.
+void MPEG4Writer::Track::flushItemRefs() {
+ CHECK(mIsHeic);
+
+ if (mImageItemId > 0) {
+ mOwner->addRefs_l(mImageItemId, mDimgRefs);
+ mOwner->addRefs_l(mImageItemId, mCdscRefs);
+ }
+}
+
void MPEG4Writer::Track::setTimeScale() {
ALOGV("setTimeScale");
// Default time scale
@@ -1854,7 +2143,8 @@
size_t size = 0;
if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
mMeta->findData(kKeyAVCC, &type, &data, &size);
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC) ||
+ !strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC)) {
mMeta->findData(kKeyHVCC, &type, &data, &size);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)
|| !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
@@ -1947,11 +2237,18 @@
while (!chunk->mSamples.empty()) {
List<MediaBuffer *>::iterator it = chunk->mSamples.begin();
- off64_t offset = (chunk->mTrack->isAvc() || chunk->mTrack->isHevc())
- ? addMultipleLengthPrefixedSamples_l(*it)
- : addSample_l(*it);
+ int32_t isExif;
+ if (!(*it)->meta_data().findInt32(kKeyIsExif, &isExif)) {
+ isExif = 0;
+ }
+ bool usePrefix = chunk->mTrack->usePrefix() && !isExif;
- if (isFirstSample) {
+ size_t bytesWritten;
+ off64_t offset = addSample_l(*it, usePrefix, isExif, &bytesWritten);
+
+ if (chunk->mTrack->isHeic()) {
+ chunk->mTrack->addItemOffsetAndSize(offset, bytesWritten, isExif);
+ } else if (isFirstSample) {
chunk->mTrack->addChunkOffset(offset);
isFirstSample = false;
}
@@ -2094,7 +2391,8 @@
mStartTimeRealUs = startTimeUs;
int32_t rotationDegrees;
- if (mIsVideo && params && params->findInt32(kKeyRotation, &rotationDegrees)) {
+ if ((mIsVideo || mIsHeic) && params &&
+ params->findInt32(kKeyRotation, &rotationDegrees)) {
mRotation = rotationDegrees;
}
@@ -2598,7 +2896,7 @@
sp<MetaData> meta_data;
status_t err = OK;
- MediaBuffer *buffer;
+ MediaBufferBase *buffer;
const char *trackName = getTrackType();
while (!mDone && (err = mSource->read(&buffer)) == OK) {
if (buffer->range_length() == 0) {
@@ -2620,7 +2918,7 @@
++count;
int32_t isCodecConfig;
- if (buffer->meta_data()->findInt32(kKeyIsCodecConfig, &isCodecConfig)
+ if (buffer->meta_data().findInt32(kKeyIsCodecConfig, &isCodecConfig)
&& isCodecConfig) {
// if config format (at track addition) already had CSD, keep that
// UNLESS we have not received any frames yet.
@@ -2636,7 +2934,7 @@
(const uint8_t *)buffer->data()
+ buffer->range_offset(),
buffer->range_length());
- } else if (mIsHevc) {
+ } else if (mIsHevc || mIsHeic) {
err = makeHEVCCodecSpecificData(
(const uint8_t *)buffer->data()
+ buffer->range_offset(),
@@ -2661,7 +2959,8 @@
}
// Per-frame metadata sample's size must be smaller than max allowed.
- if (!mIsVideo && !mIsAudio && buffer->range_length() >= kMaxMetadataSize) {
+ if (!mIsVideo && !mIsAudio && !mIsHeic &&
+ buffer->range_length() >= kMaxMetadataSize) {
ALOGW("Buffer size is %zu. Maximum metadata buffer size is %lld for %s track",
buffer->range_length(), (long long)kMaxMetadataSize, trackName);
buffer->release();
@@ -2670,6 +2969,19 @@
break;
}
+ bool isExif = false;
+ int32_t isMuxerData;
+ if (buffer->meta_data().findInt32(kKeyIsMuxerData, &isMuxerData) && isMuxerData) {
+ // We only support one type of muxer data, which is Exif data block.
+ isExif = isExifData(buffer);
+ if (!isExif) {
+ ALOGW("Ignoring bad Exif data block");
+ buffer->release();
+ buffer = NULL;
+ continue;
+ }
+ }
+
++nActualFrames;
// Make a deep copy of the MediaBuffer and Metadata and release
@@ -2678,14 +2990,19 @@
memcpy(copy->data(), (uint8_t *)buffer->data() + buffer->range_offset(),
buffer->range_length());
copy->set_range(0, buffer->range_length());
- meta_data = new MetaData(*buffer->meta_data().get());
+ meta_data = new MetaData(buffer->meta_data());
buffer->release();
buffer = NULL;
- if (mIsAvc || mIsHevc) StripStartcode(copy);
+ if (isExif) {
+ copy->meta_data().setInt32(kKeyIsExif, 1);
+ }
+ bool usePrefix = this->usePrefix() && !isExif;
+
+ if (usePrefix) StripStartcode(copy);
size_t sampleSize = copy->range_length();
- if (mIsAvc || mIsHevc) {
+ if (usePrefix) {
if (mOwner->useNalLengthFour()) {
sampleSize += 4;
} else {
@@ -2739,223 +3056,230 @@
mGotStartKeyFrame = true;
}
////////////////////////////////////////////////////////////////////////////////
- if (mStszTableEntries->count() == 0) {
- mFirstSampleTimeRealUs = systemTime() / 1000;
- mStartTimestampUs = timestampUs;
- mOwner->setStartTimestampUs(mStartTimestampUs);
- previousPausedDurationUs = mStartTimestampUs;
- }
- if (mResumed) {
- int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
- if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
- if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- previousPausedDurationUs += pausedDurationUs - lastDurationUs;
- mResumed = false;
- }
- TimestampDebugHelperEntry timestampDebugEntry;
- timestampUs -= previousPausedDurationUs;
- timestampDebugEntry.pts = timestampUs;
- if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- if (mIsVideo) {
- /*
- * Composition time: timestampUs
- * Decoding time: decodingTimeUs
- * Composition time offset = composition time - decoding time
- */
- int64_t decodingTimeUs;
- CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
- decodingTimeUs -= previousPausedDurationUs;
-
- // ensure non-negative, monotonic decoding time
- if (mLastDecodingTimeUs < 0) {
- decodingTimeUs = std::max((int64_t)0, decodingTimeUs);
- } else {
- // increase decoding time by at least the larger vaule of 1 tick and
- // 0.1 milliseconds. This needs to take into account the possible
- // delta adjustment in DurationTicks in below.
- decodingTimeUs = std::max(mLastDecodingTimeUs +
- std::max(100, divUp(1000000, mTimeScale)), decodingTimeUs);
- }
-
- mLastDecodingTimeUs = decodingTimeUs;
- timestampDebugEntry.dts = decodingTimeUs;
- timestampDebugEntry.frameType = isSync ? "Key frame" : "Non-Key frame";
- // Insert the timestamp into the mTimestampDebugHelper
- if (mTimestampDebugHelper.size() >= kTimestampDebugCount) {
- mTimestampDebugHelper.pop_front();
- }
- mTimestampDebugHelper.push_back(timestampDebugEntry);
-
- cttsOffsetTimeUs =
- timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
- if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- timestampUs = decodingTimeUs;
- ALOGV("decoding time: %" PRId64 " and ctts offset time: %" PRId64,
- timestampUs, cttsOffsetTimeUs);
-
- // Update ctts box table if necessary
- currCttsOffsetTimeTicks =
- (cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
- if (WARN_UNLESS(currCttsOffsetTimeTicks <= 0x0FFFFFFFFLL, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
+ if (!mIsHeic) {
if (mStszTableEntries->count() == 0) {
- // Force the first ctts table entry to have one single entry
- // so that we can do adjustment for the initial track start
- // time offset easily in writeCttsBox().
- lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
- addOneCttsTableEntry(1, currCttsOffsetTimeTicks);
- cttsSampleCount = 0; // No sample in ctts box is pending
- } else {
- if (currCttsOffsetTimeTicks != lastCttsOffsetTimeTicks) {
- addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
- lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
- cttsSampleCount = 1; // One sample in ctts box is pending
+ mFirstSampleTimeRealUs = systemTime() / 1000;
+ mStartTimestampUs = timestampUs;
+ mOwner->setStartTimestampUs(mStartTimestampUs);
+ previousPausedDurationUs = mStartTimestampUs;
+ }
+
+ if (mResumed) {
+ int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
+ if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
+ if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ previousPausedDurationUs += pausedDurationUs - lastDurationUs;
+ mResumed = false;
+ }
+ TimestampDebugHelperEntry timestampDebugEntry;
+ timestampUs -= previousPausedDurationUs;
+ timestampDebugEntry.pts = timestampUs;
+ if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ if (mIsVideo) {
+ /*
+ * Composition time: timestampUs
+ * Decoding time: decodingTimeUs
+ * Composition time offset = composition time - decoding time
+ */
+ int64_t decodingTimeUs;
+ CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
+ decodingTimeUs -= previousPausedDurationUs;
+
+ // ensure non-negative, monotonic decoding time
+ if (mLastDecodingTimeUs < 0) {
+ decodingTimeUs = std::max((int64_t)0, decodingTimeUs);
} else {
- ++cttsSampleCount;
+ // increase decoding time by at least the larger vaule of 1 tick and
+ // 0.1 milliseconds. This needs to take into account the possible
+ // delta adjustment in DurationTicks in below.
+ decodingTimeUs = std::max(mLastDecodingTimeUs +
+ std::max(100, divUp(1000000, mTimeScale)), decodingTimeUs);
}
- }
- // Update ctts time offset range
- if (mStszTableEntries->count() == 0) {
- mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
- mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
- } else {
- if (currCttsOffsetTimeTicks > mMaxCttsOffsetTicks) {
- mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
- } else if (currCttsOffsetTimeTicks < mMinCttsOffsetTicks) {
+ mLastDecodingTimeUs = decodingTimeUs;
+ timestampDebugEntry.dts = decodingTimeUs;
+ timestampDebugEntry.frameType = isSync ? "Key frame" : "Non-Key frame";
+ // Insert the timestamp into the mTimestampDebugHelper
+ if (mTimestampDebugHelper.size() >= kTimestampDebugCount) {
+ mTimestampDebugHelper.pop_front();
+ }
+ mTimestampDebugHelper.push_back(timestampDebugEntry);
+
+ cttsOffsetTimeUs =
+ timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
+ if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ timestampUs = decodingTimeUs;
+ ALOGV("decoding time: %" PRId64 " and ctts offset time: %" PRId64,
+ timestampUs, cttsOffsetTimeUs);
+
+ // Update ctts box table if necessary
+ currCttsOffsetTimeTicks =
+ (cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
+ if (WARN_UNLESS(currCttsOffsetTimeTicks <= 0x0FFFFFFFFLL, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ if (mStszTableEntries->count() == 0) {
+ // Force the first ctts table entry to have one single entry
+ // so that we can do adjustment for the initial track start
+ // time offset easily in writeCttsBox().
+ lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
+ addOneCttsTableEntry(1, currCttsOffsetTimeTicks);
+ cttsSampleCount = 0; // No sample in ctts box is pending
+ } else {
+ if (currCttsOffsetTimeTicks != lastCttsOffsetTimeTicks) {
+ addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
+ lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
+ cttsSampleCount = 1; // One sample in ctts box is pending
+ } else {
+ ++cttsSampleCount;
+ }
+ }
+
+ // Update ctts time offset range
+ if (mStszTableEntries->count() == 0) {
mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
- mMinCttsOffsetTimeUs = cttsOffsetTimeUs;
+ mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
+ } else {
+ if (currCttsOffsetTimeTicks > mMaxCttsOffsetTicks) {
+ mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
+ } else if (currCttsOffsetTimeTicks < mMinCttsOffsetTicks) {
+ mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
+ mMinCttsOffsetTimeUs = cttsOffsetTimeUs;
+ }
}
}
- }
- if (mOwner->isRealTimeRecording()) {
- if (mIsAudio) {
- updateDriftTime(meta_data);
- }
- }
-
- if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- ALOGV("%s media time stamp: %" PRId64 " and previous paused duration %" PRId64,
- trackName, timestampUs, previousPausedDurationUs);
- if (timestampUs > mTrackDurationUs) {
- mTrackDurationUs = timestampUs;
- }
-
- // We need to use the time scale based ticks, rather than the
- // timestamp itself to determine whether we have to use a new
- // stts entry, since we may have rounding errors.
- // The calculation is intended to reduce the accumulated
- // rounding errors.
- currDurationTicks =
- ((timestampUs * mTimeScale + 500000LL) / 1000000LL -
- (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
- if (currDurationTicks < 0ll) {
- ALOGE("do not support out of order frames (timestamp: %lld < last: %lld for %s track",
- (long long)timestampUs, (long long)lastTimestampUs, trackName);
- copy->release();
- mSource->stop();
- mIsMalformed = true;
- break;
- }
-
- // if the duration is different for this sample, see if it is close enough to the previous
- // duration that we can fudge it and use the same value, to avoid filling the stts table
- // with lots of near-identical entries.
- // "close enough" here means that the current duration needs to be adjusted by less
- // than 0.1 milliseconds
- if (lastDurationTicks && (currDurationTicks != lastDurationTicks)) {
- int64_t deltaUs = ((lastDurationTicks - currDurationTicks) * 1000000LL
- + (mTimeScale / 2)) / mTimeScale;
- if (deltaUs > -100 && deltaUs < 100) {
- // use previous ticks, and adjust timestamp as if it was actually that number
- // of ticks
- currDurationTicks = lastDurationTicks;
- timestampUs += deltaUs;
- }
- }
- mStszTableEntries->add(htonl(sampleSize));
- if (mStszTableEntries->count() > 2) {
-
- // Force the first sample to have its own stts entry so that
- // we can adjust its value later to maintain the A/V sync.
- if (mStszTableEntries->count() == 3 || currDurationTicks != lastDurationTicks) {
- addOneSttsTableEntry(sampleCount, lastDurationTicks);
- sampleCount = 1;
- } else {
- ++sampleCount;
+ if (mOwner->isRealTimeRecording()) {
+ if (mIsAudio) {
+ updateDriftTime(meta_data);
+ }
}
- }
- if (mSamplesHaveSameSize) {
- if (mStszTableEntries->count() >= 2 && previousSampleSize != sampleSize) {
- mSamplesHaveSameSize = false;
+ if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
}
- previousSampleSize = sampleSize;
- }
- ALOGV("%s timestampUs/lastTimestampUs: %" PRId64 "/%" PRId64,
- trackName, timestampUs, lastTimestampUs);
- lastDurationUs = timestampUs - lastTimestampUs;
- lastDurationTicks = currDurationTicks;
- lastTimestampUs = timestampUs;
- if (isSync != 0) {
- addOneStssTableEntry(mStszTableEntries->count());
- }
-
- if (mTrackingProgressStatus) {
- if (mPreviousTrackTimeUs <= 0) {
- mPreviousTrackTimeUs = mStartTimestampUs;
+ ALOGV("%s media time stamp: %" PRId64 " and previous paused duration %" PRId64,
+ trackName, timestampUs, previousPausedDurationUs);
+ if (timestampUs > mTrackDurationUs) {
+ mTrackDurationUs = timestampUs;
}
- trackProgressStatus(timestampUs);
+
+ // We need to use the time scale based ticks, rather than the
+ // timestamp itself to determine whether we have to use a new
+ // stts entry, since we may have rounding errors.
+ // The calculation is intended to reduce the accumulated
+ // rounding errors.
+ currDurationTicks =
+ ((timestampUs * mTimeScale + 500000LL) / 1000000LL -
+ (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
+ if (currDurationTicks < 0ll) {
+ ALOGE("do not support out of order frames (timestamp: %lld < last: %lld for %s track",
+ (long long)timestampUs, (long long)lastTimestampUs, trackName);
+ copy->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
+ // if the duration is different for this sample, see if it is close enough to the previous
+ // duration that we can fudge it and use the same value, to avoid filling the stts table
+ // with lots of near-identical entries.
+ // "close enough" here means that the current duration needs to be adjusted by less
+ // than 0.1 milliseconds
+ if (lastDurationTicks && (currDurationTicks != lastDurationTicks)) {
+ int64_t deltaUs = ((lastDurationTicks - currDurationTicks) * 1000000LL
+ + (mTimeScale / 2)) / mTimeScale;
+ if (deltaUs > -100 && deltaUs < 100) {
+ // use previous ticks, and adjust timestamp as if it was actually that number
+ // of ticks
+ currDurationTicks = lastDurationTicks;
+ timestampUs += deltaUs;
+ }
+ }
+ mStszTableEntries->add(htonl(sampleSize));
+ if (mStszTableEntries->count() > 2) {
+
+ // Force the first sample to have its own stts entry so that
+ // we can adjust its value later to maintain the A/V sync.
+ if (mStszTableEntries->count() == 3 || currDurationTicks != lastDurationTicks) {
+ addOneSttsTableEntry(sampleCount, lastDurationTicks);
+ sampleCount = 1;
+ } else {
+ ++sampleCount;
+ }
+
+ }
+ if (mSamplesHaveSameSize) {
+ if (mStszTableEntries->count() >= 2 && previousSampleSize != sampleSize) {
+ mSamplesHaveSameSize = false;
+ }
+ previousSampleSize = sampleSize;
+ }
+ ALOGV("%s timestampUs/lastTimestampUs: %" PRId64 "/%" PRId64,
+ trackName, timestampUs, lastTimestampUs);
+ lastDurationUs = timestampUs - lastTimestampUs;
+ lastDurationTicks = currDurationTicks;
+ lastTimestampUs = timestampUs;
+
+ if (isSync != 0) {
+ addOneStssTableEntry(mStszTableEntries->count());
+ }
+
+ if (mTrackingProgressStatus) {
+ if (mPreviousTrackTimeUs <= 0) {
+ mPreviousTrackTimeUs = mStartTimestampUs;
+ }
+ trackProgressStatus(timestampUs);
+ }
}
if (!hasMultipleTracks) {
- off64_t offset = (mIsAvc || mIsHevc) ? mOwner->addMultipleLengthPrefixedSamples_l(copy)
- : mOwner->addSample_l(copy);
+ size_t bytesWritten;
+ off64_t offset = mOwner->addSample_l(copy, usePrefix, isExif, &bytesWritten);
- uint32_t count = (mOwner->use32BitFileOffset()
- ? mStcoTableEntries->count()
- : mCo64TableEntries->count());
+ if (mIsHeic) {
+ addItemOffsetAndSize(offset, bytesWritten, isExif);
+ } else {
+ uint32_t count = (mOwner->use32BitFileOffset()
+ ? mStcoTableEntries->count()
+ : mCo64TableEntries->count());
- if (count == 0) {
- addChunkOffset(offset);
+ if (count == 0) {
+ addChunkOffset(offset);
+ }
}
copy->release();
copy = NULL;
@@ -2963,7 +3287,10 @@
}
mChunkSamples.push_back(copy);
- if (interleaveDurationUs == 0) {
+ if (mIsHeic) {
+ bufferChunk(0 /*timestampUs*/);
+ ++nChunks;
+ } else if (interleaveDurationUs == 0) {
addOneStscTableEntry(++nChunks, 1);
bufferChunk(timestampUs);
} else {
@@ -2996,42 +3323,49 @@
mOwner->trackProgressStatus(mTrackId, -1, err);
- // Last chunk
- if (!hasMultipleTracks) {
- addOneStscTableEntry(1, mStszTableEntries->count());
- } else if (!mChunkSamples.empty()) {
- addOneStscTableEntry(++nChunks, mChunkSamples.size());
- bufferChunk(timestampUs);
- }
-
- // We don't really know how long the last frame lasts, since
- // there is no frame time after it, just repeat the previous
- // frame's duration.
- if (mStszTableEntries->count() == 1) {
- lastDurationUs = 0; // A single sample's duration
- lastDurationTicks = 0;
- } else {
- ++sampleCount; // Count for the last sample
- }
-
- if (mStszTableEntries->count() <= 2) {
- addOneSttsTableEntry(1, lastDurationTicks);
- if (sampleCount - 1 > 0) {
- addOneSttsTableEntry(sampleCount - 1, lastDurationTicks);
+ if (mIsHeic) {
+ if (!mChunkSamples.empty()) {
+ bufferChunk(0);
+ ++nChunks;
}
} else {
- addOneSttsTableEntry(sampleCount, lastDurationTicks);
- }
-
- // The last ctts box may not have been written yet, and this
- // is to make sure that we write out the last ctts box.
- if (currCttsOffsetTimeTicks == lastCttsOffsetTimeTicks) {
- if (cttsSampleCount > 0) {
- addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
+ // Last chunk
+ if (!hasMultipleTracks) {
+ addOneStscTableEntry(1, mStszTableEntries->count());
+ } else if (!mChunkSamples.empty()) {
+ addOneStscTableEntry(++nChunks, mChunkSamples.size());
+ bufferChunk(timestampUs);
}
- }
- mTrackDurationUs += lastDurationUs;
+ // We don't really know how long the last frame lasts, since
+ // there is no frame time after it, just repeat the previous
+ // frame's duration.
+ if (mStszTableEntries->count() == 1) {
+ lastDurationUs = 0; // A single sample's duration
+ lastDurationTicks = 0;
+ } else {
+ ++sampleCount; // Count for the last sample
+ }
+
+ if (mStszTableEntries->count() <= 2) {
+ addOneSttsTableEntry(1, lastDurationTicks);
+ if (sampleCount - 1 > 0) {
+ addOneSttsTableEntry(sampleCount - 1, lastDurationTicks);
+ }
+ } else {
+ addOneSttsTableEntry(sampleCount, lastDurationTicks);
+ }
+
+ // The last ctts box may not have been written yet, and this
+ // is to make sure that we write out the last ctts box.
+ if (currCttsOffsetTimeTicks == lastCttsOffsetTimeTicks) {
+ if (cttsSampleCount > 0) {
+ addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
+ }
+ }
+
+ mTrackDurationUs += lastDurationUs;
+ }
mReachedEOS = true;
sendTrackSummary(hasMultipleTracks);
@@ -3053,7 +3387,7 @@
return true;
}
- if (mStszTableEntries->count() == 0) { // no samples written
+ if (!mIsHeic && mStszTableEntries->count() == 0) { // no samples written
ALOGE("The number of recorded samples is 0");
return true;
}
@@ -3199,13 +3533,59 @@
return mEstimatedTrackSizeBytes;
}
+int32_t MPEG4Writer::Track::getMetaSizeIncrease(
+ int32_t angle, int32_t trackCount) const {
+ CHECK(mIsHeic);
+
+ int32_t grid = (mTileWidth > 0);
+ int32_t rotate = (angle > 0);
+
+ // Note that the rotation angle is in the file meta, and we don't have
+ // it until start, so here the calculation has to assume rotation.
+
+ // increase to ipco
+ int32_t increase = 20 * (grid + 1) // 'ispe' property
+ + (8 + mCodecSpecificDataSize) // 'hvcC' property
+ ;
+
+ if (rotate) {
+ increase += 9; // 'irot' property (worst case)
+ }
+
+ // increase to iref and idat
+ if (grid) {
+ increase += (12 + mNumTiles * 2) // 'dimg' in iref
+ + 12; // ImageGrid in 'idat' (worst case)
+ }
+
+ increase += (12 + 2); // 'cdsc' in iref
+
+ // increase to iloc, iinf
+ increase += (16 // increase to 'iloc'
+ + 21) // increase to 'iinf'
+ * (mNumTiles + grid + 1); // "+1" is for 'Exif'
+
+ // When total # of properties is > 127, the properties id becomes 2-byte.
+ // We write 4 properties at most for each image (2x'ispe', 1x'hvcC', 1x'irot').
+ // Set the threshold to be 30.
+ int32_t propBytes = trackCount > 30 ? 2 : 1;
+
+ // increase to ipma
+ increase += (3 + 2 * propBytes) * mNumTiles // 'ispe' + 'hvcC'
+ + grid * (3 + propBytes) // 'ispe' for grid
+ + rotate * propBytes; // 'irot' (either on grid or tile)
+
+ return increase;
+}
+
status_t MPEG4Writer::Track::checkCodecSpecificData() const {
const char *mime;
CHECK(mMeta->findCString(kKeyMIMEType, &mime));
if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AAC, mime) ||
!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime) ||
!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime) ||
- !strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
+ !strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime) ||
+ !strcasecmp(MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC, mime)) {
if (!mCodecSpecificData ||
mCodecSpecificDataSize <= 0) {
ALOGE("Missing codec specific data");
@@ -3222,7 +3602,10 @@
}
const char *MPEG4Writer::Track::getTrackType() const {
- return mIsAudio ? "Audio" : (mIsVideo ? "Video" : "Metadata");
+ return mIsAudio ? "Audio" :
+ mIsVideo ? "Video" :
+ mIsHeic ? "Image" :
+ "Metadata";
}
void MPEG4Writer::Track::writeTrackHeader(bool use32BitOffset) {
@@ -3792,11 +4175,11 @@
endBox();
}
-void MPEG4Writer::writeHdlr() {
+void MPEG4Writer::writeHdlr(const char *handlerType) {
beginBox("hdlr");
writeInt32(0); // Version, Flags
writeInt32(0); // Predefined
- writeFourcc("mdta");
+ writeFourcc(handlerType);
writeInt32(0); // Reserved[0]
writeInt32(0); // Reserved[1]
writeInt32(0); // Reserved[2]
@@ -3876,19 +4259,334 @@
endBox(); // ilst
}
-void MPEG4Writer::writeMetaBox() {
+void MPEG4Writer::writeMoovLevelMetaBox() {
size_t count = mMetaKeys->countEntries();
if (count == 0) {
return;
}
beginBox("meta");
- writeHdlr();
+ writeHdlr("mdta");
writeKeys();
writeIlst();
endBox();
}
+void MPEG4Writer::writeIlocBox() {
+ beginBox("iloc");
+ // Use version 1 to allow construction method 1 that refers to
+ // data in idat box inside meta box.
+ writeInt32(0x01000000); // Version = 1, Flags = 0
+ writeInt16(0x4400); // offset_size = length_size = 4
+ // base_offset_size = index_size = 0
+
+ // 16-bit item_count
+ size_t itemCount = mItems.size();
+ if (itemCount > 65535) {
+ ALOGW("Dropping excess items: itemCount %zu", itemCount);
+ itemCount = 65535;
+ }
+ writeInt16((uint16_t)itemCount);
+
+ for (size_t i = 0; i < itemCount; i++) {
+ writeInt16(mItems[i].itemId);
+ bool isGrid = mItems[i].isGrid();
+
+ writeInt16(isGrid ? 1 : 0); // construction_method
+ writeInt16(0); // data_reference_index = 0
+ writeInt16(1); // extent_count = 1
+
+ if (isGrid) {
+ // offset into the 'idat' box
+ writeInt32(mNumGrids++ * 8);
+ writeInt32(8);
+ } else {
+ writeInt32(mItems[i].offset);
+ writeInt32(mItems[i].size);
+ }
+ }
+ endBox();
+}
+
+void MPEG4Writer::writeInfeBox(
+ uint16_t itemId, const char *itemType, uint32_t flags) {
+ beginBox("infe");
+ writeInt32(0x02000000 | flags); // Version = 2, Flags = 0
+ writeInt16(itemId);
+ writeInt16(0); //item_protection_index = 0
+ writeFourcc(itemType);
+ writeCString(""); // item_name
+ endBox();
+}
+
+void MPEG4Writer::writeIinfBox() {
+ beginBox("iinf");
+ writeInt32(0); // Version = 0, Flags = 0
+
+ // 16-bit item_count
+ size_t itemCount = mItems.size();
+ if (itemCount > 65535) {
+ ALOGW("Dropping excess items: itemCount %zu", itemCount);
+ itemCount = 65535;
+ }
+
+ writeInt16((uint16_t)itemCount);
+ for (size_t i = 0; i < itemCount; i++) {
+ writeInfeBox(mItems[i].itemId, mItems[i].itemType,
+ (mItems[i].isImage() && mItems[i].isHidden) ? 1 : 0);
+ }
+
+ endBox();
+}
+
+void MPEG4Writer::writeIdatBox() {
+ beginBox("idat");
+
+ for (size_t i = 0; i < mItems.size(); i++) {
+ if (mItems[i].isGrid()) {
+ writeInt8(0); // version
+ // flags == 1 means 32-bit width,height
+ int8_t flags = (mItems[i].width > 65535 || mItems[i].height > 65535);
+ writeInt8(flags);
+ writeInt8(mItems[i].rows - 1);
+ writeInt8(mItems[i].cols - 1);
+ if (flags) {
+ writeInt32(mItems[i].width);
+ writeInt32(mItems[i].height);
+ } else {
+ writeInt16((uint16_t)mItems[i].width);
+ writeInt16((uint16_t)mItems[i].height);
+ }
+ }
+ }
+
+ endBox();
+}
+
+void MPEG4Writer::writeIrefBox() {
+ beginBox("iref");
+ writeInt32(0); // Version = 0, Flags = 0
+ {
+ for (size_t i = 0; i < mItems.size(); i++) {
+ for (size_t r = 0; r < mItems[i].refsList.size(); r++) {
+ const ItemRefs &refs = mItems[i].refsList[r];
+ beginBox(refs.key);
+ writeInt16(mItems[i].itemId);
+ size_t refCount = refs.value.size();
+ if (refCount > 65535) {
+ ALOGW("too many entries in %s", refs.key);
+ refCount = 65535;
+ }
+ writeInt16((uint16_t)refCount);
+ for (size_t refIndex = 0; refIndex < refCount; refIndex++) {
+ writeInt16(refs.value[refIndex]);
+ }
+ endBox();
+ }
+ }
+ }
+ endBox();
+}
+
+void MPEG4Writer::writePitmBox() {
+ beginBox("pitm");
+ writeInt32(0); // Version = 0, Flags = 0
+ writeInt16(mPrimaryItemId);
+ endBox();
+}
+
+void MPEG4Writer::writeIpcoBox() {
+ beginBox("ipco");
+ size_t numProperties = mProperties.size();
+ if (numProperties > 32767) {
+ ALOGW("Dropping excess properties: numProperties %zu", numProperties);
+ numProperties = 32767;
+ }
+ for (size_t propIndex = 0; propIndex < numProperties; propIndex++) {
+ switch (mProperties[propIndex].type) {
+ case FOURCC('h', 'v', 'c', 'C'):
+ {
+ beginBox("hvcC");
+ sp<ABuffer> hvcc = mProperties[propIndex].hvcc;
+ // Patch avcc's lengthSize field to match the number
+ // of bytes we use to indicate the size of a nal unit.
+ uint8_t *ptr = (uint8_t *)hvcc->data();
+ ptr[21] = (ptr[21] & 0xfc) | (useNalLengthFour() ? 3 : 1);
+ write(hvcc->data(), hvcc->size());
+ endBox();
+ break;
+ }
+ case FOURCC('i', 's', 'p', 'e'):
+ {
+ beginBox("ispe");
+ writeInt32(0); // Version = 0, Flags = 0
+ writeInt32(mProperties[propIndex].width);
+ writeInt32(mProperties[propIndex].height);
+ endBox();
+ break;
+ }
+ case FOURCC('i', 'r', 'o', 't'):
+ {
+ beginBox("irot");
+ writeInt8(mProperties[propIndex].rotation);
+ endBox();
+ break;
+ }
+ default:
+ ALOGW("Skipping unrecognized property: type 0x%08x",
+ mProperties[propIndex].type);
+ }
+ }
+ endBox();
+}
+
+void MPEG4Writer::writeIpmaBox() {
+ beginBox("ipma");
+ uint32_t flags = (mProperties.size() > 127) ? 1 : 0;
+ writeInt32(flags); // Version = 0
+
+ writeInt32(mAssociationEntryCount);
+ for (size_t itemIndex = 0; itemIndex < mItems.size(); itemIndex++) {
+ const Vector<uint16_t> &properties = mItems[itemIndex].properties;
+ if (properties.empty()) {
+ continue;
+ }
+ writeInt16(mItems[itemIndex].itemId);
+
+ size_t entryCount = properties.size();
+ if (entryCount > 255) {
+ ALOGW("Dropping excess associations: entryCount %zu", entryCount);
+ entryCount = 255;
+ }
+ writeInt8((uint8_t)entryCount);
+ for (size_t propIndex = 0; propIndex < entryCount; propIndex++) {
+ if (flags & 1) {
+ writeInt16((1 << 15) | properties[propIndex]);
+ } else {
+ writeInt8((1 << 7) | properties[propIndex]);
+ }
+ }
+ }
+ endBox();
+}
+
+void MPEG4Writer::writeIprpBox() {
+ beginBox("iprp");
+ writeIpcoBox();
+ writeIpmaBox();
+ endBox();
+}
+
+void MPEG4Writer::writeFileLevelMetaBox() {
+ // patch up the mPrimaryItemId and count items with prop associations
+ uint16_t firstVisibleItemId = 0;
+ uint16_t firstImageItemId = 0;
+ for (size_t index = 0; index < mItems.size(); index++) {
+ if (!mItems[index].isImage()) continue;
+
+ if (mItems[index].isPrimary) {
+ mPrimaryItemId = mItems[index].itemId;
+ }
+ if (!firstImageItemId) {
+ firstImageItemId = mItems[index].itemId;
+ }
+ if (!firstVisibleItemId && !mItems[index].isHidden) {
+ firstVisibleItemId = mItems[index].itemId;
+ }
+ if (!mItems[index].properties.empty()) {
+ mAssociationEntryCount++;
+ }
+ }
+
+ if (!firstImageItemId) {
+ ALOGE("no valid image was found");
+ return;
+ }
+
+ if (mPrimaryItemId == 0) {
+ if (firstVisibleItemId > 0) {
+ ALOGW("didn't find primary, using first visible image");
+ mPrimaryItemId = firstVisibleItemId;
+ } else {
+ ALOGW("no primary and no visible item, using first image");
+ mPrimaryItemId = firstImageItemId;
+ }
+ }
+
+ for (List<Track *>::iterator it = mTracks.begin();
+ it != mTracks.end(); ++it) {
+ if ((*it)->isHeic()) {
+ (*it)->flushItemRefs();
+ }
+ }
+
+ beginBox("meta");
+ writeInt32(0); // Version = 0, Flags = 0
+ writeHdlr("pict");
+ writeIlocBox();
+ writeIinfBox();
+ writePitmBox();
+ writeIprpBox();
+ if (mNumGrids > 0) {
+ writeIdatBox();
+ }
+ if (mHasRefs) {
+ writeIrefBox();
+ }
+ endBox();
+}
+
+uint16_t MPEG4Writer::addProperty_l(const ItemProperty &prop) {
+ char typeStr[5];
+ MakeFourCCString(prop.type, typeStr);
+ ALOGV("addProperty_l: %s", typeStr);
+
+ mProperties.push_back(prop);
+
+ // returning 1-based property index
+ return mProperties.size();
+}
+
+uint16_t MPEG4Writer::addItem_l(const ItemInfo &info) {
+ ALOGV("addItem_l: type %s, offset %u, size %u",
+ info.itemType, info.offset, info.size);
+
+ size_t index = mItems.size();
+ mItems.push_back(info);
+
+ // make the item id start at kItemIdBase
+ mItems.editItemAt(index).itemId = index + kItemIdBase;
+
+#if (LOG_NDEBUG==0)
+ if (!info.properties.empty()) {
+ AString str;
+ for (size_t i = 0; i < info.properties.size(); i++) {
+ if (i > 0) {
+ str.append(", ");
+ }
+ str.append(info.properties[i]);
+ }
+ ALOGV("addItem_l: id %d, properties: %s", mItems[index].itemId, str.c_str());
+ }
+#endif // (LOG_NDEBUG==0)
+
+ return mItems[index].itemId;
+}
+
+void MPEG4Writer::addRefs_l(uint16_t itemId, const ItemRefs &refs) {
+ if (refs.value.empty()) {
+ return;
+ }
+ if (itemId < kItemIdBase) {
+ ALOGW("itemId shouldn't be smaller than kItemIdBase");
+ return;
+ }
+
+ size_t index = itemId - kItemIdBase;
+ mItems.editItemAt(index).refsList.push_back(refs);
+ mHasRefs = true;
+}
+
/*
* Geodata is stored according to ISO-6709 standard.
*/
diff --git a/media/libstagefright/MediaAdapter.cpp b/media/libstagefright/MediaAdapter.cpp
index d680e0c..f1b6e8c 100644
--- a/media/libstagefright/MediaAdapter.cpp
+++ b/media/libstagefright/MediaAdapter.cpp
@@ -45,17 +45,24 @@
}
status_t MediaAdapter::stop() {
- Mutex::Autolock autoLock(mAdapterLock);
- if (mStarted) {
- mStarted = false;
- // If stop() happens immediately after a pushBuffer(), we should
- // clean up the mCurrentMediaBuffer
- if (mCurrentMediaBuffer != NULL) {
- mCurrentMediaBuffer->release();
+ MediaBuffer *currentBuffer = NULL;
+ {
+ Mutex::Autolock autoLock(mAdapterLock);
+ if (mStarted) {
+ mStarted = false;
+ // If stop() happens immediately after a pushBuffer(), we should
+ // clean up the mCurrentMediaBuffer. But need to release without
+ // the lock as signalBufferReturned() will acquire the lock.
+ currentBuffer = mCurrentMediaBuffer;
mCurrentMediaBuffer = NULL;
+
+ // While read() is still waiting, we should signal it to finish.
+ mBufferReadCond.signal();
}
- // While read() is still waiting, we should signal it to finish.
- mBufferReadCond.signal();
+ }
+ if (currentBuffer != NULL) {
+ currentBuffer->release();
+ currentBuffer = NULL;
}
return OK;
}
@@ -65,7 +72,7 @@
return mOutputFormat;
}
-void MediaAdapter::signalBufferReturned(MediaBuffer *buffer) {
+void MediaAdapter::signalBufferReturned(MediaBufferBase *buffer) {
Mutex::Autolock autoLock(mAdapterLock);
CHECK(buffer != NULL);
buffer->setObserver(0);
@@ -75,7 +82,7 @@
}
status_t MediaAdapter::read(
- MediaBuffer **buffer, const ReadOptions * /* options */) {
+ MediaBufferBase **buffer, const ReadOptions * /* options */) {
Mutex::Autolock autoLock(mAdapterLock);
if (!mStarted) {
ALOGV("Read before even started!");
@@ -97,7 +104,6 @@
*buffer = mCurrentMediaBuffer;
mCurrentMediaBuffer = NULL;
- (*buffer)->setObserver(this);
return OK;
}
@@ -114,6 +120,7 @@
return INVALID_OPERATION;
}
mCurrentMediaBuffer = buffer;
+ mCurrentMediaBuffer->setObserver(this);
mBufferReadCond.signal();
ALOGV("wait for the buffer returned @ pushBuffer! %p", buffer);
diff --git a/media/libstagefright/MediaClock.cpp b/media/libstagefright/MediaClock.cpp
index 3aa0061..41dbfd4 100644
--- a/media/libstagefright/MediaClock.cpp
+++ b/media/libstagefright/MediaClock.cpp
@@ -17,11 +17,12 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MediaClock"
#include <utils/Log.h>
+#include <map>
#include <media/stagefright/MediaClock.h>
#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
namespace android {
@@ -29,15 +30,50 @@
// If larger than this threshold, it's treated as discontinuity.
static const int64_t kAnchorFluctuationAllowedUs = 10000ll;
+MediaClock::Timer::Timer(const sp<AMessage> ¬ify, int64_t mediaTimeUs, int64_t adjustRealUs)
+ : mNotify(notify),
+ mMediaTimeUs(mediaTimeUs),
+ mAdjustRealUs(adjustRealUs) {
+}
+
MediaClock::MediaClock()
: mAnchorTimeMediaUs(-1),
mAnchorTimeRealUs(-1),
mMaxTimeMediaUs(INT64_MAX),
mStartingTimeMediaUs(-1),
- mPlaybackRate(1.0) {
+ mPlaybackRate(1.0),
+ mGeneration(0) {
+ mLooper = new ALooper;
+ mLooper->setName("MediaClock");
+ mLooper->start(false /* runOnCallingThread */,
+ false /* canCallJava */,
+ ANDROID_PRIORITY_AUDIO);
+}
+
+void MediaClock::init() {
+ mLooper->registerHandler(this);
}
MediaClock::~MediaClock() {
+ reset();
+ if (mLooper != NULL) {
+ mLooper->unregisterHandler(id());
+ mLooper->stop();
+ }
+}
+
+void MediaClock::reset() {
+ Mutex::Autolock autoLock(mLock);
+ auto it = mTimers.begin();
+ while (it != mTimers.end()) {
+ it->mNotify->setInt32("reason", TIMER_REASON_RESET);
+ it->mNotify->post();
+ it = mTimers.erase(it);
+ }
+ mMaxTimeMediaUs = INT64_MAX;
+ mStartingTimeMediaUs = -1;
+ updateAnchorTimesAndPlaybackRate_l(-1, -1, 1.0);
+ ++mGeneration;
}
void MediaClock::setStartingTimeMedia(int64_t startingTimeMediaUs) {
@@ -47,8 +83,7 @@
void MediaClock::clearAnchor() {
Mutex::Autolock autoLock(mLock);
- mAnchorTimeMediaUs = -1;
- mAnchorTimeRealUs = -1;
+ updateAnchorTimesAndPlaybackRate_l(-1, -1, mPlaybackRate);
}
void MediaClock::updateAnchor(
@@ -80,8 +115,10 @@
return;
}
}
- mAnchorTimeRealUs = nowUs;
- mAnchorTimeMediaUs = nowMediaUs;
+ updateAnchorTimesAndPlaybackRate_l(nowMediaUs, nowUs, mPlaybackRate);
+
+ ++mGeneration;
+ processTimers_l();
}
void MediaClock::updateMaxTimeMedia(int64_t maxTimeMediaUs) {
@@ -98,13 +135,17 @@
}
int64_t nowUs = ALooper::GetNowUs();
- mAnchorTimeMediaUs += (nowUs - mAnchorTimeRealUs) * (double)mPlaybackRate;
- if (mAnchorTimeMediaUs < 0) {
+ int64_t nowMediaUs = mAnchorTimeMediaUs + (nowUs - mAnchorTimeRealUs) * (double)mPlaybackRate;
+ if (nowMediaUs < 0) {
ALOGW("setRate: anchor time should not be negative, set to 0.");
- mAnchorTimeMediaUs = 0;
+ nowMediaUs = 0;
}
- mAnchorTimeRealUs = nowUs;
- mPlaybackRate = rate;
+ updateAnchorTimesAndPlaybackRate_l(nowMediaUs, nowUs, rate);
+
+ if (rate > 0.0) {
+ ++mGeneration;
+ processTimers_l();
+ }
}
float MediaClock::getPlaybackRate() const {
@@ -165,4 +206,133 @@
return OK;
}
+void MediaClock::addTimer(const sp<AMessage> ¬ify, int64_t mediaTimeUs,
+ int64_t adjustRealUs) {
+ Mutex::Autolock autoLock(mLock);
+
+ bool updateTimer = (mPlaybackRate != 0.0);
+ if (updateTimer) {
+ auto it = mTimers.begin();
+ while (it != mTimers.end()) {
+ if (((it->mAdjustRealUs - (double)adjustRealUs) * (double)mPlaybackRate
+ + (it->mMediaTimeUs - mediaTimeUs)) <= 0) {
+ updateTimer = false;
+ break;
+ }
+ ++it;
+ }
+ }
+
+ mTimers.emplace_back(notify, mediaTimeUs, adjustRealUs);
+
+ if (updateTimer) {
+ ++mGeneration;
+ processTimers_l();
+ }
+}
+
+void MediaClock::onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatTimeIsUp:
+ {
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+
+ Mutex::Autolock autoLock(mLock);
+ if (generation != mGeneration) {
+ break;
+ }
+ processTimers_l();
+ break;
+ }
+
+ default:
+ TRESPASS();
+ break;
+ }
+}
+
+void MediaClock::processTimers_l() {
+ int64_t nowMediaTimeUs;
+ status_t status = getMediaTime_l(
+ ALooper::GetNowUs(), &nowMediaTimeUs, false /* allowPastMaxTime */);
+
+ if (status != OK) {
+ return;
+ }
+
+ int64_t nextLapseRealUs = INT64_MAX;
+ std::multimap<int64_t, Timer> notifyList;
+ auto it = mTimers.begin();
+ while (it != mTimers.end()) {
+ double diff = it->mAdjustRealUs * (double)mPlaybackRate
+ + it->mMediaTimeUs - nowMediaTimeUs;
+ int64_t diffMediaUs;
+ if (diff > (double)INT64_MAX) {
+ diffMediaUs = INT64_MAX;
+ } else if (diff < (double)INT64_MIN) {
+ diffMediaUs = INT64_MIN;
+ } else {
+ diffMediaUs = diff;
+ }
+
+ if (diffMediaUs <= 0) {
+ notifyList.emplace(diffMediaUs, *it);
+ it = mTimers.erase(it);
+ } else {
+ if (mPlaybackRate != 0.0
+ && (double)diffMediaUs < INT64_MAX * (double)mPlaybackRate) {
+ int64_t targetRealUs = diffMediaUs / (double)mPlaybackRate;
+ if (targetRealUs < nextLapseRealUs) {
+ nextLapseRealUs = targetRealUs;
+ }
+ }
+ ++it;
+ }
+ }
+
+ auto itNotify = notifyList.begin();
+ while (itNotify != notifyList.end()) {
+ itNotify->second.mNotify->setInt32("reason", TIMER_REASON_REACHED);
+ itNotify->second.mNotify->post();
+ itNotify = notifyList.erase(itNotify);
+ }
+
+ if (mTimers.empty() || mPlaybackRate == 0.0 || mAnchorTimeMediaUs < 0
+ || nextLapseRealUs == INT64_MAX) {
+ return;
+ }
+
+ sp<AMessage> msg = new AMessage(kWhatTimeIsUp, this);
+ msg->setInt32("generation", mGeneration);
+ msg->post(nextLapseRealUs);
+}
+
+void MediaClock::updateAnchorTimesAndPlaybackRate_l(int64_t anchorTimeMediaUs,
+ int64_t anchorTimeRealUs, float playbackRate) {
+ if (mAnchorTimeMediaUs != anchorTimeMediaUs
+ || mAnchorTimeRealUs != anchorTimeRealUs
+ || mPlaybackRate != playbackRate) {
+ mAnchorTimeMediaUs = anchorTimeMediaUs;
+ mAnchorTimeRealUs = anchorTimeRealUs;
+ mPlaybackRate = playbackRate;
+ notifyDiscontinuity_l();
+ }
+}
+
+void MediaClock::setNotificationMessage(const sp<AMessage> &msg) {
+ Mutex::Autolock autoLock(mLock);
+ mNotify = msg;
+}
+
+void MediaClock::notifyDiscontinuity_l() {
+ if (mNotify != nullptr) {
+ sp<AMessage> msg = mNotify->dup();
+ msg->setInt64("anchor-media-us", mAnchorTimeMediaUs);
+ msg->setInt64("anchor-real-us", mAnchorTimeRealUs);
+ msg->setFloat("playback-rate", mPlaybackRate);
+ msg->post();
+ }
+}
+
} // namespace android
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 759e42d..72eff94 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -16,12 +16,15 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MediaCodec"
-#include <inttypes.h>
+#include <utils/Log.h>
-#include "include/avc_utils.h"
+#include <inttypes.h>
+#include <stdlib.h>
+
#include "include/SecureBuffer.h"
#include "include/SharedMemoryBuffer.h"
#include "include/SoftwareRenderer.h"
+#include "StagefrightPluginLoader.h"
#include <android/hardware/cas/native/1.0/IDescrambler.h>
@@ -29,6 +32,7 @@
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <binder/MemoryDealer.h>
+#include <cutils/properties.h>
#include <gui/BufferQueue.h>
#include <gui/Surface.h>
#include <media/ICrypto.h>
@@ -41,6 +45,7 @@
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AString.h>
#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/ACodec.h>
#include <media/stagefright/BufferProducerWrapper.h>
@@ -49,13 +54,11 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaFilter.h>
-#include <media/stagefright/MetaData.h>
#include <media/stagefright/OMXClient.h>
#include <media/stagefright/PersistentSurface.h>
#include <media/stagefright/SurfaceUtils.h>
#include <mediautils/BatteryNotifier.h>
#include <private/android_filesystem_config.h>
-#include <utils/Log.h>
#include <utils/Singleton.h>
namespace android {
@@ -63,23 +66,44 @@
// key for media statistics
static const char *kCodecKeyName = "codec";
// attrs for media statistics
+// NB: these are matched with public Java API constants defined
+// in frameworks/base/media/java/android/media/MediaCodec.java
+// These must be kept synchronized with the constants there.
static const char *kCodecCodec = "android.media.mediacodec.codec"; /* e.g. OMX.google.aac.decoder */
static const char *kCodecMime = "android.media.mediacodec.mime"; /* e.g. audio/mime */
static const char *kCodecMode = "android.media.mediacodec.mode"; /* audio, video */
-static const char *kCodecSecure = "android.media.mediacodec.secure"; /* 0, 1 */
-static const char *kCodecHeight = "android.media.mediacodec.height"; /* 0..n */
-static const char *kCodecWidth = "android.media.mediacodec.width"; /* 0..n */
-static const char *kCodecRotation = "android.media.mediacodec.rotation-degrees"; /* 0/90/180/270 */
-static const char *kCodecCrypto = "android.media.mediacodec.crypto"; /* 0,1 */
+static const char *kCodecModeVideo = "video"; /* values returned for kCodecMode */
+static const char *kCodecModeAudio = "audio";
static const char *kCodecEncoder = "android.media.mediacodec.encoder"; /* 0,1 */
+static const char *kCodecSecure = "android.media.mediacodec.secure"; /* 0, 1 */
+static const char *kCodecWidth = "android.media.mediacodec.width"; /* 0..n */
+static const char *kCodecHeight = "android.media.mediacodec.height"; /* 0..n */
+static const char *kCodecRotation = "android.media.mediacodec.rotation-degrees"; /* 0/90/180/270 */
-static const char *kCodecBytesIn = "android.media.mediacodec.bytesin"; /* 0..n */
+// NB: These are not yet exposed as public Java API constants.
+static const char *kCodecCrypto = "android.media.mediacodec.crypto"; /* 0,1 */
static const char *kCodecProfile = "android.media.mediacodec.profile"; /* 0..n */
static const char *kCodecLevel = "android.media.mediacodec.level"; /* 0..n */
static const char *kCodecMaxWidth = "android.media.mediacodec.maxwidth"; /* 0..n */
static const char *kCodecMaxHeight = "android.media.mediacodec.maxheight"; /* 0..n */
static const char *kCodecError = "android.media.mediacodec.errcode";
static const char *kCodecErrorState = "android.media.mediacodec.errstate";
+static const char *kCodecLatencyMax = "android.media.mediacodec.latency.max"; /* in us */
+static const char *kCodecLatencyMin = "android.media.mediacodec.latency.min"; /* in us */
+static const char *kCodecLatencyAvg = "android.media.mediacodec.latency.avg"; /* in us */
+static const char *kCodecLatencyCount = "android.media.mediacodec.latency.n";
+static const char *kCodecLatencyHist = "android.media.mediacodec.latency.hist"; /* in us */
+static const char *kCodecLatencyUnknown = "android.media.mediacodec.latency.unknown";
+
+// the kCodecRecent* fields appear only in getMetrics() results
+static const char *kCodecRecentLatencyMax = "android.media.mediacodec.recent.max"; /* in us */
+static const char *kCodecRecentLatencyMin = "android.media.mediacodec.recent.min"; /* in us */
+static const char *kCodecRecentLatencyAvg = "android.media.mediacodec.recent.avg"; /* in us */
+static const char *kCodecRecentLatencyCount = "android.media.mediacodec.recent.n";
+static const char *kCodecRecentLatencyHist = "android.media.mediacodec.recent.hist"; /* in us */
+
+// XXX suppress until we get our representation right
+static bool kEmitHistogram = false;
static int64_t getId(const sp<IResourceManagerClient> &client) {
@@ -414,13 +438,31 @@
sp<MediaCodec> MediaCodec::CreateByType(
const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err, pid_t pid,
uid_t uid) {
- sp<MediaCodec> codec = new MediaCodec(looper, pid, uid);
+ Vector<AString> matchingCodecs;
- const status_t ret = codec->init(mime, true /* nameIsType */, encoder);
+ MediaCodecList::findMatchingCodecs(
+ mime.c_str(),
+ encoder,
+ 0,
+ &matchingCodecs);
+
if (err != NULL) {
- *err = ret;
+ *err = NAME_NOT_FOUND;
}
- return ret == OK ? codec : NULL; // NULL deallocates codec.
+ for (size_t i = 0; i < matchingCodecs.size(); ++i) {
+ sp<MediaCodec> codec = new MediaCodec(looper, pid, uid);
+ AString componentName = matchingCodecs[i];
+ status_t ret = codec->init(componentName);
+ if (err != NULL) {
+ *err = ret;
+ }
+ if (ret == OK) {
+ return codec;
+ }
+ ALOGD("Allocating component '%s' failed (%d), try next one.",
+ componentName.c_str(), ret);
+ }
+ return NULL;
}
// static
@@ -428,7 +470,7 @@
const sp<ALooper> &looper, const AString &name, status_t *err, pid_t pid, uid_t uid) {
sp<MediaCodec> codec = new MediaCodec(looper, pid, uid);
- const status_t ret = codec->init(name, false /* nameIsType */, false /* encoder */);
+ const status_t ret = codec->init(name);
if (err != NULL) {
*err = ret;
}
@@ -437,6 +479,13 @@
// static
sp<PersistentSurface> MediaCodec::CreatePersistentInputSurface() {
+ // allow plugin to create surface
+ sp<PersistentSurface> pluginSurface =
+ StagefrightPluginLoader::GetCCodecInstance()->createInputSurface();
+ if (pluginSurface != nullptr) {
+ return pluginSurface;
+ }
+
OMXClient client;
if (client.connect() != OK) {
ALOGE("Failed to connect to OMX to create persistent input surface.");
@@ -480,12 +529,15 @@
mDequeueOutputTimeoutGeneration(0),
mDequeueOutputReplyID(0),
mHaveInputSurface(false),
- mHavePendingInputBuffers(false) {
+ mHavePendingInputBuffers(false),
+ mCpuBoostRequested(false),
+ mLatencyUnknown(0) {
if (uid == kNoUid) {
mUid = IPCThreadState::self()->getCallingUid();
} else {
mUid = uid;
}
+
initAnalyticsItem();
}
@@ -497,21 +549,93 @@
}
void MediaCodec::initAnalyticsItem() {
- CHECK(mAnalyticsItem == NULL);
- // set up our new record, get a sessionID, put it into the in-progress list
- mAnalyticsItem = new MediaAnalyticsItem(kCodecKeyName);
- if (mAnalyticsItem != NULL) {
- (void) mAnalyticsItem->generateSessionID();
- // don't record it yet; only at the end, when we have decided that we have
- // data worth writing (e.g. .count() > 0)
+ if (mAnalyticsItem == NULL) {
+ mAnalyticsItem = new MediaAnalyticsItem(kCodecKeyName);
+ }
+
+ mLatencyHist.setup(kLatencyHistBuckets, kLatencyHistWidth, kLatencyHistFloor);
+
+ {
+ Mutex::Autolock al(mRecentLock);
+ for (int i = 0; i<kRecentLatencyFrames; i++) {
+ mRecentSamples[i] = kRecentSampleInvalid;
+ }
+ mRecentHead = 0;
+ }
+}
+
+void MediaCodec::updateAnalyticsItem() {
+ ALOGV("MediaCodec::updateAnalyticsItem");
+ if (mAnalyticsItem == NULL) {
+ return;
+ }
+
+ if (mLatencyHist.getCount() != 0 ) {
+ mAnalyticsItem->setInt64(kCodecLatencyMax, mLatencyHist.getMax());
+ mAnalyticsItem->setInt64(kCodecLatencyMin, mLatencyHist.getMin());
+ mAnalyticsItem->setInt64(kCodecLatencyAvg, mLatencyHist.getAvg());
+ mAnalyticsItem->setInt64(kCodecLatencyCount, mLatencyHist.getCount());
+
+ if (kEmitHistogram) {
+ // and the histogram itself
+ std::string hist = mLatencyHist.emit();
+ mAnalyticsItem->setCString(kCodecLatencyHist, hist.c_str());
+ }
+ }
+ if (mLatencyUnknown > 0) {
+ mAnalyticsItem->setInt64(kCodecLatencyUnknown, mLatencyUnknown);
+ }
+
+#if 0
+ // enable for short term, only while debugging
+ updateEphemeralAnalytics(mAnalyticsItem);
+#endif
+}
+
+void MediaCodec::updateEphemeralAnalytics(MediaAnalyticsItem *item) {
+ ALOGD("MediaCodec::updateEphemeralAnalytics()");
+
+ if (item == NULL) {
+ return;
+ }
+
+ Histogram recentHist;
+
+ // build an empty histogram
+ recentHist.setup(kLatencyHistBuckets, kLatencyHistWidth, kLatencyHistFloor);
+
+ // stuff it with the samples in the ring buffer
+ {
+ Mutex::Autolock al(mRecentLock);
+
+ for (int i=0; i<kRecentLatencyFrames; i++) {
+ if (mRecentSamples[i] != kRecentSampleInvalid) {
+ recentHist.insert(mRecentSamples[i]);
+ }
+ }
+ }
+
+
+ // spit the data (if any) into the supplied analytics record
+ if (recentHist.getCount()!= 0 ) {
+ item->setInt64(kCodecRecentLatencyMax, recentHist.getMax());
+ item->setInt64(kCodecRecentLatencyMin, recentHist.getMin());
+ item->setInt64(kCodecRecentLatencyAvg, recentHist.getAvg());
+ item->setInt64(kCodecRecentLatencyCount, recentHist.getCount());
+
+ if (kEmitHistogram) {
+ // and the histogram itself
+ std::string hist = recentHist.emit();
+ item->setCString(kCodecRecentLatencyHist, hist.c_str());
+ }
}
}
void MediaCodec::flushAnalyticsItem() {
+ updateAnalyticsItem();
if (mAnalyticsItem != NULL) {
// don't log empty records
if (mAnalyticsItem->count() > 0) {
- mAnalyticsItem->setFinalized(true);
mAnalyticsItem->selfrecord();
}
delete mAnalyticsItem;
@@ -519,6 +643,190 @@
}
}
+bool MediaCodec::Histogram::setup(int nbuckets, int64_t width, int64_t floor)
+{
+ if (nbuckets <= 0 || width <= 0) {
+ return false;
+ }
+
+ // get histogram buckets
+ if (nbuckets == mBucketCount && mBuckets != NULL) {
+ // reuse our existing buffer
+ memset(mBuckets, 0, sizeof(*mBuckets) * mBucketCount);
+ } else {
+ // get a new pre-zeroed buffer
+ int64_t *newbuckets = (int64_t *)calloc(nbuckets, sizeof (*mBuckets));
+ if (newbuckets == NULL) {
+ goto bad;
+ }
+ if (mBuckets != NULL)
+ free(mBuckets);
+ mBuckets = newbuckets;
+ }
+
+ mWidth = width;
+ mFloor = floor;
+ mCeiling = floor + nbuckets * width;
+ mBucketCount = nbuckets;
+
+ mMin = INT64_MAX;
+ mMax = INT64_MIN;
+ mSum = 0;
+ mCount = 0;
+ mBelow = mAbove = 0;
+
+ return true;
+
+ bad:
+ if (mBuckets != NULL) {
+ free(mBuckets);
+ mBuckets = NULL;
+ }
+
+ return false;
+}
+
+void MediaCodec::Histogram::insert(int64_t sample)
+{
+ // histogram is not set up
+ if (mBuckets == NULL) {
+ return;
+ }
+
+ mCount++;
+ mSum += sample;
+ if (mMin > sample) mMin = sample;
+ if (mMax < sample) mMax = sample;
+
+ if (sample < mFloor) {
+ mBelow++;
+ } else if (sample >= mCeiling) {
+ mAbove++;
+ } else {
+ int64_t slot = (sample - mFloor) / mWidth;
+ CHECK(slot < mBucketCount);
+ mBuckets[slot]++;
+ }
+ return;
+}
+
+std::string MediaCodec::Histogram::emit()
+{
+ std::string value;
+ char buffer[64];
+
+ // emits: width,Below{bucket0,bucket1,...., bucketN}above
+ // unconfigured will emit: 0,0{}0
+ // XXX: is this best representation?
+ snprintf(buffer, sizeof(buffer), "%" PRId64 ",%" PRId64 ",%" PRId64 "{",
+ mFloor, mWidth, mBelow);
+ value = buffer;
+ for (int i = 0; i < mBucketCount; i++) {
+ if (i != 0) {
+ value = value + ",";
+ }
+ snprintf(buffer, sizeof(buffer), "%" PRId64, mBuckets[i]);
+ value = value + buffer;
+ }
+ snprintf(buffer, sizeof(buffer), "}%" PRId64 , mAbove);
+ value = value + buffer;
+ return value;
+}
+
+// when we send a buffer to the codec;
+void MediaCodec::statsBufferSent(int64_t presentationUs) {
+
+ // only enqueue if we have a legitimate time
+ if (presentationUs <= 0) {
+ ALOGV("presentation time: %" PRId64, presentationUs);
+ return;
+ }
+
+ const int64_t nowNs = systemTime(SYSTEM_TIME_MONOTONIC);
+ BufferFlightTiming_t startdata = { presentationUs, nowNs };
+
+ {
+ // mutex access to mBuffersInFlight and other stats
+ Mutex::Autolock al(mLatencyLock);
+
+
+ // XXX: we *could* make sure that the time is later than the end of queue
+ // as part of a consistency check...
+ mBuffersInFlight.push_back(startdata);
+ }
+}
+
+// when we get a buffer back from the codec
+void MediaCodec::statsBufferReceived(int64_t presentationUs) {
+
+ CHECK_NE(mState, UNINITIALIZED);
+
+ // mutex access to mBuffersInFlight and other stats
+ Mutex::Autolock al(mLatencyLock);
+
+ // how long this buffer took for the round trip through the codec
+ // NB: pipelining can/will make these times larger. e.g., if each packet
+ // is always 2 msec and we have 3 in flight at any given time, we're going to
+ // see "6 msec" as an answer.
+
+ // ignore stuff with no presentation time
+ if (presentationUs <= 0) {
+ ALOGV("-- returned buffer timestamp %" PRId64 " <= 0, ignore it", presentationUs);
+ mLatencyUnknown++;
+ return;
+ }
+
+ BufferFlightTiming_t startdata;
+ bool valid = false;
+ while (mBuffersInFlight.size() > 0) {
+ startdata = *mBuffersInFlight.begin();
+ ALOGV("-- Looking at startdata. presentation %" PRId64 ", start %" PRId64,
+ startdata.presentationUs, startdata.startedNs);
+ if (startdata.presentationUs == presentationUs) {
+ // a match
+ ALOGV("-- match entry for %" PRId64 ", hits our frame of %" PRId64,
+ startdata.presentationUs, presentationUs);
+ mBuffersInFlight.pop_front();
+ valid = true;
+ break;
+ } else if (startdata.presentationUs < presentationUs) {
+ // we must have missed the match for this, drop it and keep looking
+ ALOGV("-- drop entry for %" PRId64 ", before our frame of %" PRId64,
+ startdata.presentationUs, presentationUs);
+ mBuffersInFlight.pop_front();
+ continue;
+ } else {
+ // head is after, so we don't have a frame for ourselves
+ ALOGV("-- found entry for %" PRId64 ", AFTER our frame of %" PRId64
+ " we have nothing to pair with",
+ startdata.presentationUs, presentationUs);
+ mLatencyUnknown++;
+ return;
+ }
+ }
+ if (!valid) {
+ ALOGV("-- empty queue, so ignore that.");
+ mLatencyUnknown++;
+ return;
+ }
+
+ // nowNs start our calculations
+ const int64_t nowNs = systemTime(SYSTEM_TIME_MONOTONIC);
+ int64_t latencyUs = (nowNs - startdata.startedNs + 500) / 1000;
+
+ mLatencyHist.insert(latencyUs);
+
+ // push into the recent samples
+ {
+ Mutex::Autolock al(mRecentLock);
+
+ if (mRecentHead >= kRecentLatencyFrames) {
+ mRecentHead = 0;
+ }
+ mRecentSamples[mRecentHead++] = latencyUs;
+ }
+}
+
// static
status_t MediaCodec::PostAndAwaitResponse(
const sp<AMessage> &msg, sp<AMessage> *response) {
@@ -547,10 +855,16 @@
response->postReply(replyID);
}
+static CodecBase *CreateCCodec() {
+ return StagefrightPluginLoader::GetCCodecInstance()->createCodec();
+}
+
//static
-sp<CodecBase> MediaCodec::GetCodecBase(const AString &name, bool nameIsType) {
- // at this time only ACodec specifies a mime type.
- if (nameIsType || name.startsWithIgnoreCase("omx.")) {
+sp<CodecBase> MediaCodec::GetCodecBase(const AString &name) {
+ if (name.startsWithIgnoreCase("c2.")) {
+ return CreateCCodec();
+ } else if (name.startsWithIgnoreCase("omx.")) {
+ // at this time only ACodec specifies a mime type.
return new ACodec;
} else if (name.startsWithIgnoreCase("android.filter.")) {
return new MediaFilter;
@@ -559,50 +873,53 @@
}
}
-status_t MediaCodec::init(const AString &name, bool nameIsType, bool encoder) {
+status_t MediaCodec::init(const AString &name) {
mResourceManagerService->init();
// save init parameters for reset
mInitName = name;
- mInitNameIsType = nameIsType;
- mInitIsEncoder = encoder;
// Current video decoders do not return from OMX_FillThisBuffer
// quickly, violating the OpenMAX specs, until that is remedied
// we need to invest in an extra looper to free the main event
// queue.
- mCodec = GetCodecBase(name, nameIsType);
+ mCodec = GetCodecBase(name);
if (mCodec == NULL) {
return NAME_NOT_FOUND;
}
+ mCodecInfo.clear();
+
bool secureCodec = false;
- if (nameIsType && !strncasecmp(name.c_str(), "video/", 6)) {
- mIsVideo = true;
- } else {
- AString tmp = name;
- if (tmp.endsWith(".secure")) {
- secureCodec = true;
- tmp.erase(tmp.size() - 7, 7);
+ AString tmp = name;
+ if (tmp.endsWith(".secure")) {
+ secureCodec = true;
+ tmp.erase(tmp.size() - 7, 7);
+ }
+ const sp<IMediaCodecList> mcl = MediaCodecList::getInstance();
+ if (mcl == NULL) {
+ mCodec = NULL; // remove the codec.
+ return NO_INIT; // if called from Java should raise IOException
+ }
+ for (const AString &codecName : { name, tmp }) {
+ ssize_t codecIdx = mcl->findCodecByName(codecName.c_str());
+ if (codecIdx < 0) {
+ continue;
}
- const sp<IMediaCodecList> mcl = MediaCodecList::getInstance();
- if (mcl == NULL) {
- mCodec = NULL; // remove the codec.
- return NO_INIT; // if called from Java should raise IOException
- }
- ssize_t codecIdx = mcl->findCodecByName(tmp.c_str());
- if (codecIdx >= 0) {
- const sp<MediaCodecInfo> info = mcl->getCodecInfo(codecIdx);
- Vector<AString> mimes;
- info->getSupportedMimes(&mimes);
- for (size_t i = 0; i < mimes.size(); i++) {
- if (mimes[i].startsWith("video/")) {
- mIsVideo = true;
- break;
- }
+ mCodecInfo = mcl->getCodecInfo(codecIdx);
+ Vector<AString> mimes;
+ mCodecInfo->getSupportedMimes(&mimes);
+ for (size_t i = 0; i < mimes.size(); i++) {
+ if (mimes[i].startsWith("video/")) {
+ mIsVideo = true;
+ break;
}
}
+ break;
+ }
+ if (mCodecInfo == nullptr) {
+ return NAME_NOT_FOUND;
}
if (mIsVideo) {
@@ -629,23 +946,14 @@
new BufferCallback(new AMessage(kWhatCodecNotify, this))));
sp<AMessage> msg = new AMessage(kWhatInit, this);
+ msg->setObject("codecInfo", mCodecInfo);
+ // name may be different from mCodecInfo->getCodecName() if we stripped
+ // ".secure"
msg->setString("name", name);
- msg->setInt32("nameIsType", nameIsType);
-
- if (nameIsType) {
- msg->setInt32("encoder", encoder);
- }
if (mAnalyticsItem != NULL) {
- if (nameIsType) {
- // name is the mime type
- mAnalyticsItem->setCString(kCodecMime, name.c_str());
- } else {
- mAnalyticsItem->setCString(kCodecCodec, name.c_str());
- }
- mAnalyticsItem->setCString(kCodecMode, mIsVideo ? "video" : "audio");
- if (nameIsType)
- mAnalyticsItem->setInt32(kCodecEncoder, encoder);
+ mAnalyticsItem->setCString(kCodecCodec, name.c_str());
+ mAnalyticsItem->setCString(kCodecMode, mIsVideo ? kCodecModeVideo : kCodecModeAudio);
}
status_t err;
@@ -711,6 +1019,7 @@
if (format->findInt32("level", &level)) {
mAnalyticsItem->setInt32(kCodecLevel, level);
}
+ mAnalyticsItem->setInt32(kCodecEncoder, (flags & CONFIGURE_FLAG_ENCODE) ? 1 : 0);
}
if (mIsVideo) {
@@ -735,8 +1044,7 @@
}
// Prevent possible integer overflow in downstream code.
- if (mInitIsEncoder
- && (uint64_t)mVideoWidth * mVideoHeight > (uint64_t)INT32_MAX / 4) {
+ if ((uint64_t)mVideoWidth * mVideoHeight > (uint64_t)INT32_MAX / 4) {
ALOGE("buffer size is too big, width=%d, height=%d", mVideoWidth, mVideoHeight);
return BAD_VALUE;
}
@@ -753,7 +1061,6 @@
msg->setPointer("descrambler", descrambler.get());
}
if (mAnalyticsItem != NULL) {
- // XXX: save indication that it's crypto in some way...
mAnalyticsItem->setInt32(kCodecCrypto, 1);
}
} else if (mFlags & kFlagIsSecure) {
@@ -1015,7 +1322,7 @@
mHaveInputSurface = false;
if (err == OK) {
- err = init(mInitName, mInitNameIsType, mInitIsEncoder);
+ err = init(mInitName);
}
return err;
}
@@ -1195,6 +1502,22 @@
return OK;
}
+status_t MediaCodec::getCodecInfo(sp<MediaCodecInfo> *codecInfo) const {
+ sp<AMessage> msg = new AMessage(kWhatGetCodecInfo, this);
+
+ sp<AMessage> response;
+ status_t err;
+ if ((err = PostAndAwaitResponse(msg, &response)) != OK) {
+ return err;
+ }
+
+ sp<RefBase> obj;
+ CHECK(response->findObject("codecInfo", &obj));
+ *codecInfo = static_cast<MediaCodecInfo *>(obj.get());
+
+ return OK;
+}
+
status_t MediaCodec::getMetrics(MediaAnalyticsItem * &reply) {
reply = NULL;
@@ -1204,11 +1527,14 @@
return UNKNOWN_ERROR;
}
- // XXX: go get current values for whatever in-flight data we want
+ // update any in-flight data that's not carried within the record
+ updateAnalyticsItem();
// send it back to the caller.
reply = mAnalyticsItem->dup();
+ updateEphemeralAnalytics(reply);
+
return OK;
}
@@ -1319,6 +1645,31 @@
msg->post();
}
+void MediaCodec::requestCpuBoostIfNeeded() {
+ if (mCpuBoostRequested) {
+ return;
+ }
+ int32_t colorFormat;
+ if (mSoftRenderer != NULL
+ && mOutputFormat->contains("hdr-static-info")
+ && mOutputFormat->findInt32("color-format", &colorFormat)
+ && (colorFormat == OMX_COLOR_FormatYUV420Planar16)) {
+ int32_t left, top, right, bottom, width, height;
+ int64_t totalPixel = 0;
+ if (mOutputFormat->findRect("crop", &left, &top, &right, &bottom)) {
+ totalPixel = (right - left + 1) * (bottom - top + 1);
+ } else if (mOutputFormat->findInt32("width", &width)
+ && mOutputFormat->findInt32("height", &height)) {
+ totalPixel = width * height;
+ }
+ if (totalPixel >= 1920 * 1080) {
+ addResource(MediaResource::kCpuBoost,
+ MediaResource::kUnspecifiedSubType, 1);
+ mCpuBoostRequested = true;
+ }
+ }
+}
+
////////////////////////////////////////////////////////////////////////////////
void MediaCodec::cancelPendingDequeueOperations() {
@@ -1394,6 +1745,8 @@
int64_t timeUs;
CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+ statsBufferReceived(timeUs);
+
response->setInt64("timeUs", timeUs);
int32_t flags;
@@ -1440,7 +1793,7 @@
{
if (actionCode == ACTION_CODE_FATAL) {
mAnalyticsItem->setInt32(kCodecError, err);
- mAnalyticsItem->setInt32(kCodecErrorState, mState);
+ mAnalyticsItem->setCString(kCodecErrorState, stateString(mState).c_str());
flushAnalyticsItem();
initAnalyticsItem();
}
@@ -1453,7 +1806,7 @@
{
if (actionCode == ACTION_CODE_FATAL) {
mAnalyticsItem->setInt32(kCodecError, err);
- mAnalyticsItem->setInt32(kCodecErrorState, mState);
+ mAnalyticsItem->setCString(kCodecErrorState, stateString(mState).c_str());
flushAnalyticsItem();
initAnalyticsItem();
}
@@ -1494,7 +1847,7 @@
{
if (actionCode == ACTION_CODE_FATAL) {
mAnalyticsItem->setInt32(kCodecError, err);
- mAnalyticsItem->setInt32(kCodecErrorState, mState);
+ mAnalyticsItem->setCString(kCodecErrorState, stateString(mState).c_str());
flushAnalyticsItem();
initAnalyticsItem();
@@ -1527,7 +1880,7 @@
break;
default:
mAnalyticsItem->setInt32(kCodecError, err);
- mAnalyticsItem->setInt32(kCodecErrorState, mState);
+ mAnalyticsItem->setCString(kCodecErrorState, stateString(mState).c_str());
flushAnalyticsItem();
initAnalyticsItem();
setState(UNINITIALIZED);
@@ -1827,12 +2180,20 @@
mSurface.get(), (android_dataspace)dataSpace);
ALOGW_IF(err != 0, "failed to set dataspace on surface (%d)", err);
}
+ if (mOutputFormat->contains("hdr-static-info")) {
+ HDRStaticInfo info;
+ if (ColorUtils::getHDRStaticInfoFromFormat(mOutputFormat, &info)) {
+ setNativeWindowHdrMetadata(mSurface.get(), &info);
+ }
+ }
if (mime.startsWithIgnoreCase("video/")) {
mSoftRenderer = new SoftwareRenderer(mSurface, mRotationDegrees);
}
}
+ requestCpuBoostIfNeeded();
+
if (mFlags & kFlagIsEncoder) {
// Before we announce the format change we should
// collect codec specific data and amend the output
@@ -1849,7 +2210,6 @@
}
}
}
-
if (mFlags & kFlagIsAsync) {
onOutputFormatChanged();
} else {
@@ -1957,24 +2317,14 @@
mReplyID = replyID;
setState(INITIALIZING);
+ sp<RefBase> codecInfo;
+ CHECK(msg->findObject("codecInfo", &codecInfo));
AString name;
CHECK(msg->findString("name", &name));
- int32_t nameIsType;
- int32_t encoder = false;
- CHECK(msg->findInt32("nameIsType", &nameIsType));
- if (nameIsType) {
- CHECK(msg->findInt32("encoder", &encoder));
- }
-
sp<AMessage> format = new AMessage;
-
- if (nameIsType) {
- format->setString("mime", name.c_str());
- format->setInt32("encoder", encoder);
- } else {
- format->setString("componentName", name.c_str());
- }
+ format->setObject("codecInfo", codecInfo);
+ format->setString("componentName", name);
mCodec->initiateAllocateComponent(format);
break;
@@ -2593,6 +2943,17 @@
break;
}
+ case kWhatGetCodecInfo:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ sp<AMessage> response = new AMessage;
+ response->setObject("codecInfo", mCodecInfo);
+ response->postReply(replyID);
+ break;
+ }
+
case kWhatSetParameters:
{
sp<AReplyToken> replyID;
@@ -2872,9 +3233,8 @@
Mutex::Autolock al(mBufferLock);
info->mOwnedByClient = false;
info->mData.clear();
- if (mAnalyticsItem != NULL) {
- mAnalyticsItem->addInt64(kCodecBytesIn, size);
- }
+
+ statsBufferSent(timeUs);
}
return err;
@@ -2942,8 +3302,8 @@
if (mSoftRenderer != NULL) {
std::list<FrameRenderTracker::Info> doneFrames = mSoftRenderer->render(
- buffer->data(), buffer->size(),
- mediaTimeUs, renderTimeNs, NULL, buffer->format());
+ buffer->data(), buffer->size(), mediaTimeUs, renderTimeNs,
+ mPortBuffers[kPortIndexOutput].size(), buffer->format());
// if we are running, notify rendered frames
if (!doneFrames.empty() && mState == STARTED && mOnFrameRenderedNotification != NULL) {
@@ -3091,6 +3451,8 @@
msg->setInt64("timeUs", timeUs);
+ statsBufferReceived(timeUs);
+
int32_t flags;
CHECK(buffer->meta()->findInt32("flags", &flags));
@@ -3224,4 +3586,28 @@
}
}
+std::string MediaCodec::stateString(State state) {
+ const char *rval = NULL;
+ char rawbuffer[16]; // room for "%d"
+
+ switch (state) {
+ case UNINITIALIZED: rval = "UNINITIALIZED"; break;
+ case INITIALIZING: rval = "INITIALIZING"; break;
+ case INITIALIZED: rval = "INITIALIZED"; break;
+ case CONFIGURING: rval = "CONFIGURING"; break;
+ case CONFIGURED: rval = "CONFIGURED"; break;
+ case STARTING: rval = "STARTING"; break;
+ case STARTED: rval = "STARTED"; break;
+ case FLUSHING: rval = "FLUSHING"; break;
+ case FLUSHED: rval = "FLUSHED"; break;
+ case STOPPING: rval = "STOPPING"; break;
+ case RELEASING: rval = "RELEASING"; break;
+ default:
+ snprintf(rawbuffer, sizeof(rawbuffer), "%d", state);
+ rval = rawbuffer;
+ break;
+ }
+ return rval;
+}
+
} // namespace android
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 4652594..eaff283 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -19,28 +19,27 @@
#include <utils/Log.h>
#include "MediaCodecListOverrides.h"
+#include "StagefrightPluginLoader.h"
#include <binder/IServiceManager.h>
#include <media/IMediaCodecList.h>
#include <media/IMediaPlayerService.h>
-#include <media/IMediaCodecService.h>
#include <media/MediaCodecInfo.h>
-#include <media/MediaDefs.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/MediaDefs.h>
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/OmxInfoBuilder.h>
#include <media/stagefright/omx/OMXUtils.h>
-#include <media/stagefright/xmlparser/MediaCodecsXmlParser.h>
+#include <xmlparser/include/media/stagefright/xmlparser/MediaCodecsXmlParser.h>
#include <sys/stat.h>
#include <utils/threads.h>
#include <cutils/properties.h>
-#include <expat.h>
#include <algorithm>
@@ -80,6 +79,31 @@
OmxInfoBuilder sOmxInfoBuilder;
+Mutex sCodec2InfoBuilderMutex;
+std::unique_ptr<MediaCodecListBuilderBase> sCodec2InfoBuilder;
+
+MediaCodecListBuilderBase *GetCodec2InfoBuilder() {
+ Mutex::Autolock _l(sCodec2InfoBuilderMutex);
+ if (!sCodec2InfoBuilder) {
+ sCodec2InfoBuilder.reset(
+ StagefrightPluginLoader::GetCCodecInstance()->createBuilder());
+ }
+ return sCodec2InfoBuilder.get();
+}
+
+std::vector<MediaCodecListBuilderBase *> GetBuilders() {
+ std::vector<MediaCodecListBuilderBase *> builders;
+ // if plugin provides the input surface, we cannot use OMX video encoders.
+ // In this case, rely on plugin to provide list of OMX codecs that are usable.
+ sp<PersistentSurface> surfaceTest =
+ StagefrightPluginLoader::GetCCodecInstance()->createInputSurface();
+ if (surfaceTest == nullptr) {
+ builders.push_back(&sOmxInfoBuilder);
+ }
+ builders.push_back(GetCodec2InfoBuilder());
+ return builders;
+}
+
} // unnamed namespace
// static
@@ -90,7 +114,7 @@
ALOGV("Enter profilerThreadWrapper.");
remove(kProfilingResults); // remove previous result so that it won't be loaded to
// the new MediaCodecList
- sp<MediaCodecList> codecList(new MediaCodecList(&sOmxInfoBuilder));
+ sp<MediaCodecList> codecList(new MediaCodecList(GetBuilders()));
if (codecList->initCheck() != OK) {
ALOGW("Failed to create a new MediaCodecList, skipping codec profiling.");
return nullptr;
@@ -100,7 +124,7 @@
ALOGV("Codec profiling started.");
profileCodecs(infos, kProfilingResults);
ALOGV("Codec profiling completed.");
- codecList = new MediaCodecList(&sOmxInfoBuilder);
+ codecList = new MediaCodecList(GetBuilders());
if (codecList->initCheck() != OK) {
ALOGW("Failed to parse profiling results.");
return nullptr;
@@ -118,7 +142,7 @@
Mutex::Autolock autoLock(sInitMutex);
if (sCodecList == nullptr) {
- MediaCodecList *codecList = new MediaCodecList(&sOmxInfoBuilder);
+ MediaCodecList *codecList = new MediaCodecList(GetBuilders());
if (codecList->initCheck() == OK) {
sCodecList = codecList;
@@ -171,11 +195,34 @@
return sRemoteList;
}
-MediaCodecList::MediaCodecList(MediaCodecListBuilderBase* builder) {
+MediaCodecList::MediaCodecList(std::vector<MediaCodecListBuilderBase*> builders) {
mGlobalSettings = new AMessage();
mCodecInfos.clear();
- MediaCodecListWriter writer(this);
- mInitCheck = builder->buildMediaCodecList(&writer);
+ MediaCodecListWriter writer;
+ for (MediaCodecListBuilderBase *builder : builders) {
+ if (builder == nullptr) {
+ ALOGD("ignored a null builder");
+ continue;
+ }
+ mInitCheck = builder->buildMediaCodecList(&writer);
+ if (mInitCheck != OK) {
+ break;
+ }
+ }
+ writer.writeGlobalSettings(mGlobalSettings);
+ writer.writeCodecInfos(&mCodecInfos);
+ std::stable_sort(
+ mCodecInfos.begin(),
+ mCodecInfos.end(),
+ [](const sp<MediaCodecInfo> &info1, const sp<MediaCodecInfo> &info2) {
+ if (info2 == nullptr) {
+ return false;
+ } else if (info1 == nullptr) {
+ return true;
+ } else {
+ return info1->rank() < info2->rank();
+ }
+ });
}
MediaCodecList::~MediaCodecList() {
@@ -185,23 +232,6 @@
return mInitCheck;
}
-MediaCodecListWriter::MediaCodecListWriter(MediaCodecList* list) :
- mList(list) {
-}
-
-void MediaCodecListWriter::addGlobalSetting(
- const char* key, const char* value) {
- mList->mGlobalSettings->setString(key, value);
-}
-
-std::unique_ptr<MediaCodecInfoWriter>
- MediaCodecListWriter::addMediaCodecInfo() {
- sp<MediaCodecInfo> info = new MediaCodecInfo();
- mList->mCodecInfos.push_back(info);
- return std::unique_ptr<MediaCodecInfoWriter>(
- new MediaCodecInfoWriter(info.get()));
-}
-
// legacy method for non-advanced codecs
ssize_t MediaCodecList::findCodecByType(
const char *type, bool encoder, size_t startIndex) const {
@@ -262,7 +292,9 @@
//static
bool MediaCodecList::isSoftwareCodec(const AString &componentName) {
return componentName.startsWithIgnoreCase("OMX.google.")
- || !componentName.startsWithIgnoreCase("OMX.");
+ || componentName.startsWithIgnoreCase("c2.android.")
+ || (!componentName.startsWithIgnoreCase("OMX.")
+ && !componentName.startsWithIgnoreCase("c2."));
}
static int compareSoftwareCodecsFirst(const AString *name1, const AString *name2) {
@@ -273,7 +305,14 @@
return isSoftwareCodec2 - isSoftwareCodec1;
}
- // sort order 2: OMX codecs are first (lower)
+ // sort order 2: Codec 2.0 codecs are first (lower)
+ bool isC2_1 = name1->startsWithIgnoreCase("c2.");
+ bool isC2_2 = name2->startsWithIgnoreCase("c2.");
+ if (isC2_1 != isC2_2) {
+ return isC2_2 - isC2_1;
+ }
+
+ // sort order 3: OMX codecs are first (lower)
bool isOMX1 = name1->startsWithIgnoreCase("OMX.");
bool isOMX2 = name2->startsWithIgnoreCase("OMX.");
return isOMX2 - isOMX1;
@@ -282,11 +321,8 @@
//static
void MediaCodecList::findMatchingCodecs(
const char *mime, bool encoder, uint32_t flags,
- Vector<AString> *matches, Vector<AString> *owners) {
+ Vector<AString> *matches) {
matches->clear();
- if (owners != nullptr) {
- owners->clear();
- }
const sp<IMediaCodecList> list = getInstance();
if (list == nullptr) {
@@ -312,9 +348,6 @@
ALOGV("skipping SW codec '%s'", componentName.c_str());
} else {
matches->push(componentName);
- if (owners != nullptr) {
- owners->push(AString(info->getOwnerName()));
- }
ALOGV("matching '%s'", componentName.c_str());
}
}
@@ -325,7 +358,4 @@
}
}
-MediaCodecListBuilderBase::~MediaCodecListBuilderBase() {
-}
-
} // namespace android
diff --git a/media/libstagefright/MediaCodecListOverrides.cpp b/media/libstagefright/MediaCodecListOverrides.cpp
index 6920e51..cac53f4 100644
--- a/media/libstagefright/MediaCodecListOverrides.cpp
+++ b/media/libstagefright/MediaCodecListOverrides.cpp
@@ -222,7 +222,7 @@
AString supportMultipleSecureCodecs = "true";
for (const auto& info : infos) {
AString name = info->getCodecName();
- if (name.startsWith("OMX.google.") ||
+ if (name.startsWith("OMX.google.") || name.startsWith("c2.android.") ||
// TODO: reenable below codecs once fixed
name == "OMX.Intel.VideoDecoder.VP9.hybrid") {
continue;
diff --git a/media/libstagefright/MediaCodecListWriter.cpp b/media/libstagefright/MediaCodecListWriter.cpp
new file mode 100644
index 0000000..b32e470
--- /dev/null
+++ b/media/libstagefright/MediaCodecListWriter.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaCodecListWriter"
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaCodecListWriter.h>
+#include <media/MediaCodecInfo.h>
+
+namespace android {
+
+void MediaCodecListWriter::addGlobalSetting(
+ const char* key, const char* value) {
+ mGlobalSettings.emplace_back(key, value);
+}
+
+std::unique_ptr<MediaCodecInfoWriter>
+ MediaCodecListWriter::addMediaCodecInfo() {
+ sp<MediaCodecInfo> info = new MediaCodecInfo();
+ mCodecInfos.push_back(info);
+ return std::unique_ptr<MediaCodecInfoWriter>(
+ new MediaCodecInfoWriter(info.get()));
+}
+
+void MediaCodecListWriter::writeGlobalSettings(
+ const sp<AMessage> &globalSettings) const {
+ for (const std::pair<std::string, std::string> &kv : mGlobalSettings) {
+ globalSettings->setString(kv.first.c_str(), kv.second.c_str());
+ }
+}
+
+void MediaCodecListWriter::writeCodecInfos(
+ std::vector<sp<MediaCodecInfo>> *codecInfos) const {
+ for (const sp<MediaCodecInfo> &info : mCodecInfos) {
+ codecInfos->push_back(info);
+ }
+}
+
+} // namespace android
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index d808e5b..20881a4 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -23,7 +23,9 @@
#include <gui/IGraphicBufferProducer.h>
#include <gui/Surface.h>
#include <media/ICrypto.h>
+#include <media/MediaBufferHolder.h>
#include <media/MediaCodecBuffer.h>
+#include <media/MediaSource.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
@@ -33,7 +35,6 @@
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaCodecSource.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
@@ -58,7 +59,7 @@
void pause();
void resume();
status_t setStopTimeUs(int64_t stopTimeUs);
- bool readBuffer(MediaBuffer **buffer);
+ bool readBuffer(MediaBufferBase **buffer);
protected:
virtual void onMessageReceived(const sp<AMessage> &msg);
@@ -85,14 +86,14 @@
int64_t mReadPendingSince;
bool mPaused;
bool mPulling;
- Vector<MediaBuffer *> mReadBuffers;
+ Vector<MediaBufferBase *> mReadBuffers;
void flush();
// if queue is empty, return false and set *|buffer| to NULL . Otherwise, pop
// buffer from front of the queue, place it into *|buffer| and return true.
- bool readBuffer(MediaBuffer **buffer);
+ bool readBuffer(MediaBufferBase **buffer);
// add a buffer to the back of the queue
- void pushBuffer(MediaBuffer *mbuf);
+ void pushBuffer(MediaBufferBase *mbuf);
};
Mutexed<Queue> mQueue;
@@ -122,11 +123,11 @@
mLooper->stop();
}
-void MediaCodecSource::Puller::Queue::pushBuffer(MediaBuffer *mbuf) {
+void MediaCodecSource::Puller::Queue::pushBuffer(MediaBufferBase *mbuf) {
mReadBuffers.push_back(mbuf);
}
-bool MediaCodecSource::Puller::Queue::readBuffer(MediaBuffer **mbuf) {
+bool MediaCodecSource::Puller::Queue::readBuffer(MediaBufferBase **mbuf) {
if (mReadBuffers.empty()) {
*mbuf = NULL;
return false;
@@ -137,14 +138,14 @@
}
void MediaCodecSource::Puller::Queue::flush() {
- MediaBuffer *mbuf;
+ MediaBufferBase *mbuf;
while (readBuffer(&mbuf)) {
// there are no null buffers in the queue
mbuf->release();
}
}
-bool MediaCodecSource::Puller::readBuffer(MediaBuffer **mbuf) {
+bool MediaCodecSource::Puller::readBuffer(MediaBufferBase **mbuf) {
Mutexed<Queue>::Locked queue(mQueue);
return queue->readBuffer(mbuf);
}
@@ -297,7 +298,7 @@
}
queue.unlock();
- MediaBuffer *mbuf = NULL;
+ MediaBufferBase *mbuf = NULL;
status_t err = mSource->read(&mbuf);
queue.lock();
@@ -412,7 +413,7 @@
}
status_t MediaCodecSource::read(
- MediaBuffer** buffer, const ReadOptions* /* options */) {
+ MediaBufferBase** buffer, const ReadOptions* /* options */) {
Mutexed<Output>::Locked output(mOutput);
*buffer = NULL;
@@ -427,7 +428,7 @@
return output->mErrorCode;
}
-void MediaCodecSource::signalBufferReturned(MediaBuffer *buffer) {
+void MediaCodecSource::signalBufferReturned(MediaBufferBase *buffer) {
buffer->setObserver(0);
buffer->release();
}
@@ -457,13 +458,6 @@
mGeneration(0) {
CHECK(mLooper != NULL);
- AString mime;
- CHECK(mOutputFormat->findString("mime", &mime));
-
- if (!strncasecmp("video/", mime.c_str(), 6)) {
- mIsVideo = true;
- }
-
if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
mPuller = new Puller(source);
}
@@ -487,6 +481,7 @@
}
status_t MediaCodecSource::initEncoder() {
+
mReflector = new AHandlerReflector<MediaCodecSource>(this);
mLooper->registerHandler(mReflector);
@@ -500,23 +495,12 @@
AString outputMIME;
CHECK(mOutputFormat->findString("mime", &outputMIME));
+ mIsVideo = outputMIME.startsWithIgnoreCase("video/");
- Vector<AString> matchingCodecs;
- MediaCodecList::findMatchingCodecs(
- outputMIME.c_str(), true /* encoder */,
- ((mFlags & FLAG_PREFER_SOFTWARE_CODEC) ? MediaCodecList::kPreferSoftwareCodecs : 0),
- &matchingCodecs);
-
+ AString name;
status_t err = NO_INIT;
- for (size_t ix = 0; ix < matchingCodecs.size(); ++ix) {
- mEncoder = MediaCodec::CreateByComponentName(
- mCodecLooper, matchingCodecs[ix]);
-
- if (mEncoder == NULL) {
- continue;
- }
-
- ALOGV("output format is '%s'", mOutputFormat->debugString(0).c_str());
+ if (mOutputFormat->findString("testing-name", &name)) {
+ mEncoder = MediaCodec::CreateByComponentName(mCodecLooper, name);
mEncoderActivityNotify = new AMessage(kWhatEncoderActivity, mReflector);
mEncoder->setCallback(mEncoderActivityNotify);
@@ -526,12 +510,38 @@
NULL /* nativeWindow */,
NULL /* crypto */,
MediaCodec::CONFIGURE_FLAG_ENCODE);
+ } else {
+ Vector<AString> matchingCodecs;
+ MediaCodecList::findMatchingCodecs(
+ outputMIME.c_str(), true /* encoder */,
+ ((mFlags & FLAG_PREFER_SOFTWARE_CODEC) ? MediaCodecList::kPreferSoftwareCodecs : 0),
+ &matchingCodecs);
- if (err == OK) {
- break;
+ for (size_t ix = 0; ix < matchingCodecs.size(); ++ix) {
+ mEncoder = MediaCodec::CreateByComponentName(
+ mCodecLooper, matchingCodecs[ix]);
+
+ if (mEncoder == NULL) {
+ continue;
+ }
+
+ ALOGV("output format is '%s'", mOutputFormat->debugString(0).c_str());
+
+ mEncoderActivityNotify = new AMessage(kWhatEncoderActivity, mReflector);
+ mEncoder->setCallback(mEncoderActivityNotify);
+
+ err = mEncoder->configure(
+ mOutputFormat,
+ NULL /* nativeWindow */,
+ NULL /* crypto */,
+ MediaCodec::CONFIGURE_FLAG_ENCODE);
+
+ if (err == OK) {
+ break;
+ }
+ mEncoder->release();
+ mEncoder = NULL;
}
- mEncoder->release();
- mEncoder = NULL;
}
if (err != OK) {
@@ -626,7 +636,7 @@
if (!reachedEOS) {
ALOGV("encoder (%s) reached EOS", mIsVideo ? "video" : "audio");
// release all unread media buffers
- for (List<MediaBuffer*>::iterator it = output->mBufferQueue.begin();
+ for (List<MediaBufferBase*>::iterator it = output->mBufferQueue.begin();
it != output->mBufferQueue.end(); it++) {
(*it)->release();
}
@@ -643,7 +653,9 @@
if (mStopping && reachedEOS) {
ALOGI("encoder (%s) stopped", mIsVideo ? "video" : "audio");
- mPuller->stopSource();
+ if (mPuller != NULL) {
+ mPuller->stopSource();
+ }
ALOGV("source (%s) stopped", mIsVideo ? "video" : "audio");
// posting reply to everyone that's waiting
List<sp<AReplyToken>>::iterator it;
@@ -670,7 +682,7 @@
}
status_t MediaCodecSource::feedEncoderInputBuffers() {
- MediaBuffer* mbuf = NULL;
+ MediaBufferBase* mbuf = NULL;
while (!mAvailEncoderInputIndices.empty() && mPuller->readBuffer(&mbuf)) {
size_t bufferIndex = *mAvailEncoderInputIndices.begin();
mAvailEncoderInputIndices.erase(mAvailEncoderInputIndices.begin());
@@ -680,7 +692,7 @@
size_t size = 0;
if (mbuf != NULL) {
- CHECK(mbuf->meta_data()->findInt64(kKeyTime, &timeUs));
+ CHECK(mbuf->meta_data().findInt64(kKeyTime, &timeUs));
if (mFirstSampleSystemTimeUs < 0ll) {
mFirstSampleSystemTimeUs = systemTime() / 1000;
if (mPausePending) {
@@ -703,7 +715,7 @@
mFirstSampleTimeUs = timeUs;
}
int64_t driftTimeUs = 0;
- if (mbuf->meta_data()->findInt64(kKeyDriftTime, &driftTimeUs)
+ if (mbuf->meta_data().findInt64(kKeyDriftTime, &driftTimeUs)
&& driftTimeUs) {
driftTimeUs = timeUs - mFirstSampleTimeUs - driftTimeUs;
}
@@ -728,7 +740,8 @@
if (mIsVideo) {
// video encoder will release MediaBuffer when done
// with underlying data.
- inbuf->setMediaBufferBase(mbuf);
+ inbuf->meta()->setObject("mediaBufferHolder", new MediaBufferHolder(mbuf));
+ mbuf->release();
} else {
mbuf->release();
}
@@ -893,7 +906,7 @@
break;
}
- MediaBuffer *mbuf = new MediaBuffer(outbuf->size());
+ MediaBufferBase *mbuf = new MediaBuffer(outbuf->size());
mbuf->setObserver(this);
mbuf->add_ref();
@@ -924,7 +937,7 @@
decodingTimeUs = *(mDecodingTimeQueue.begin());
mDecodingTimeQueue.erase(mDecodingTimeQueue.begin());
}
- mbuf->meta_data()->setInt64(kKeyDecodingTime, decodingTimeUs);
+ mbuf->meta_data().setInt64(kKeyDecodingTime, decodingTimeUs);
ALOGV("[video] time %" PRId64 " us (%.2f secs), dts/pts diff %" PRId64,
timeUs, timeUs / 1E6, decodingTimeUs - timeUs);
@@ -934,18 +947,18 @@
CHECK(!mDriftTimeQueue.empty());
driftTimeUs = *(mDriftTimeQueue.begin());
mDriftTimeQueue.erase(mDriftTimeQueue.begin());
- mbuf->meta_data()->setInt64(kKeyDriftTime, driftTimeUs);
+ mbuf->meta_data().setInt64(kKeyDriftTime, driftTimeUs);
#endif // DEBUG_DRIFT_TIME
ALOGV("[audio] time %" PRId64 " us (%.2f secs), drift %" PRId64,
timeUs, timeUs / 1E6, driftTimeUs);
}
- mbuf->meta_data()->setInt64(kKeyTime, timeUs);
+ mbuf->meta_data().setInt64(kKeyTime, timeUs);
} else {
- mbuf->meta_data()->setInt64(kKeyTime, 0ll);
- mbuf->meta_data()->setInt32(kKeyIsCodecConfig, true);
+ mbuf->meta_data().setInt64(kKeyTime, 0ll);
+ mbuf->meta_data().setInt32(kKeyIsCodecConfig, true);
}
if (flags & MediaCodec::BUFFER_FLAG_SYNCFRAME) {
- mbuf->meta_data()->setInt32(kKeyIsSyncFrame, true);
+ mbuf->meta_data().setInt32(kKeyIsSyncFrame, true);
}
memcpy(mbuf->data(), outbuf->data(), outbuf->size());
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
deleted file mode 100644
index c91c82b..0000000
--- a/media/libstagefright/MediaExtractor.cpp
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MediaExtractor"
-#include <utils/Log.h>
-#include <inttypes.h>
-#include <pwd.h>
-
-#include "include/AMRExtractor.h"
-#include "include/MP3Extractor.h"
-#include "include/MPEG4Extractor.h"
-#include "include/WAVExtractor.h"
-#include "include/OggExtractor.h"
-#include "include/MPEG2PSExtractor.h"
-#include "include/MPEG2TSExtractor.h"
-#include "include/FLACExtractor.h"
-#include "include/AACExtractor.h"
-#include "include/MidiExtractor.h"
-
-#include "matroska/MatroskaExtractor.h"
-
-#include <binder/IServiceManager.h>
-#include <binder/MemoryDealer.h>
-
-#include <media/MediaAnalyticsItem.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MetaData.h>
-#include <media/IMediaExtractorService.h>
-#include <cutils/properties.h>
-#include <utils/String8.h>
-#include <private/android_filesystem_config.h>
-
-// still doing some on/off toggling here.
-#define MEDIA_LOG 1
-
-
-namespace android {
-
-// key for media statistics
-static const char *kKeyExtractor = "extractor";
-// attrs for media statistics
-static const char *kExtractorMime = "android.media.mediaextractor.mime";
-static const char *kExtractorTracks = "android.media.mediaextractor.ntrk";
-static const char *kExtractorFormat = "android.media.mediaextractor.fmt";
-
-MediaExtractor::MediaExtractor() {
- if (!LOG_NDEBUG) {
- uid_t uid = getuid();
- struct passwd *pw = getpwuid(uid);
- ALOGI("extractor created in uid: %d (%s)", getuid(), pw->pw_name);
- }
-
- mAnalyticsItem = NULL;
- if (MEDIA_LOG) {
- mAnalyticsItem = new MediaAnalyticsItem(kKeyExtractor);
- (void) mAnalyticsItem->generateSessionID();
- }
-}
-
-MediaExtractor::~MediaExtractor() {
-
- // log the current record, provided it has some information worth recording
- if (MEDIA_LOG) {
- if (mAnalyticsItem != NULL) {
- if (mAnalyticsItem->count() > 0) {
- mAnalyticsItem->setFinalized(true);
- mAnalyticsItem->selfrecord();
- }
- }
- }
- if (mAnalyticsItem != NULL) {
- delete mAnalyticsItem;
- mAnalyticsItem = NULL;
- }
-}
-
-sp<MetaData> MediaExtractor::getMetaData() {
- return new MetaData;
-}
-
-status_t MediaExtractor::getMetrics(Parcel *reply) {
-
- if (mAnalyticsItem == NULL || reply == NULL) {
- return UNKNOWN_ERROR;
- }
-
- populateMetrics();
- mAnalyticsItem->writeToParcel(reply);
-
- return OK;
-}
-
-void MediaExtractor::populateMetrics() {
- ALOGV("MediaExtractor::populateMetrics");
- // normally overridden in subclasses
-}
-
-uint32_t MediaExtractor::flags() const {
- return CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_PAUSE | CAN_SEEK;
-}
-
-// static
-sp<IMediaExtractor> MediaExtractor::Create(
- const sp<DataSource> &source, const char *mime) {
- ALOGV("MediaExtractor::Create %s", mime);
-
- if (!property_get_bool("media.stagefright.extractremote", true)) {
- // local extractor
- ALOGW("creating media extractor in calling process");
- return CreateFromService(source, mime);
- } else {
- // remote extractor
- ALOGV("get service manager");
- sp<IBinder> binder = defaultServiceManager()->getService(String16("media.extractor"));
-
- if (binder != 0) {
- sp<IMediaExtractorService> mediaExService(interface_cast<IMediaExtractorService>(binder));
- sp<IMediaExtractor> ex = mediaExService->makeExtractor(source->asIDataSource(), mime);
- return ex;
- } else {
- ALOGE("extractor service not running");
- return NULL;
- }
- }
- return NULL;
-}
-
-sp<MediaExtractor> MediaExtractor::CreateFromService(
- const sp<DataSource> &source, const char *mime) {
-
- ALOGV("MediaExtractor::CreateFromService %s", mime);
- RegisterDefaultSniffers();
-
- // initialize source decryption if needed
- source->DrmInitialization(nullptr /* mime */);
-
- sp<AMessage> meta;
-
- String8 tmp;
- if (mime == NULL) {
- float confidence;
- if (!sniff(source, &tmp, &confidence, &meta)) {
- ALOGW("FAILED to autodetect media content.");
-
- return NULL;
- }
-
- mime = tmp.string();
- ALOGV("Autodetected media content as '%s' with confidence %.2f",
- mime, confidence);
- }
-
- MediaExtractor *ret = NULL;
- if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG4)
- || !strcasecmp(mime, "audio/mp4")) {
- ret = new MPEG4Extractor(source);
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
- ret = new MP3Extractor(source, meta);
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)
- || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_WB)) {
- ret = new AMRExtractor(source);
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC)) {
- ret = new FLACExtractor(source);
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_WAV)) {
- ret = new WAVExtractor(source);
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_OGG)) {
- ret = new OggExtractor(source);
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MATROSKA)) {
- ret = new MatroskaExtractor(source);
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) {
- ret = new MPEG2TSExtractor(source);
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC_ADTS)) {
- ret = new AACExtractor(source, meta);
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2PS)) {
- ret = new MPEG2PSExtractor(source);
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MIDI)) {
- ret = new MidiExtractor(source);
- }
-
- if (ret != NULL) {
- // track the container format (mpeg, aac, wvm, etc)
- if (MEDIA_LOG) {
- if (ret->mAnalyticsItem != NULL) {
- size_t ntracks = ret->countTracks();
- ret->mAnalyticsItem->setCString(kExtractorFormat, ret->name());
- // tracks (size_t)
- ret->mAnalyticsItem->setInt32(kExtractorTracks, ntracks);
- // metadata
- sp<MetaData> pMetaData = ret->getMetaData();
- if (pMetaData != NULL) {
- String8 xx = pMetaData->toString();
- // 'titl' -- but this verges into PII
- // 'mime'
- const char *mime = NULL;
- if (pMetaData->findCString(kKeyMIMEType, &mime)) {
- ret->mAnalyticsItem->setCString(kExtractorMime, mime);
- }
- // what else is interesting and not already available?
- }
- }
- }
- }
-
- return ret;
-}
-
-Mutex MediaExtractor::gSnifferMutex;
-List<MediaExtractor::SnifferFunc> MediaExtractor::gSniffers;
-bool MediaExtractor::gSniffersRegistered = false;
-
-// static
-bool MediaExtractor::sniff(
- const sp<DataSource> &source, String8 *mimeType, float *confidence, sp<AMessage> *meta) {
- *mimeType = "";
- *confidence = 0.0f;
- meta->clear();
-
- {
- Mutex::Autolock autoLock(gSnifferMutex);
- if (!gSniffersRegistered) {
- return false;
- }
- }
-
- for (List<SnifferFunc>::iterator it = gSniffers.begin();
- it != gSniffers.end(); ++it) {
- String8 newMimeType;
- float newConfidence;
- sp<AMessage> newMeta;
- if ((*it)(source, &newMimeType, &newConfidence, &newMeta)) {
- if (newConfidence > *confidence) {
- *mimeType = newMimeType;
- *confidence = newConfidence;
- *meta = newMeta;
- }
- }
- }
-
- return *confidence > 0.0;
-}
-
-// static
-void MediaExtractor::RegisterSniffer_l(SnifferFunc func) {
- for (List<SnifferFunc>::iterator it = gSniffers.begin();
- it != gSniffers.end(); ++it) {
- if (*it == func) {
- return;
- }
- }
-
- gSniffers.push_back(func);
-}
-
-// static
-void MediaExtractor::RegisterDefaultSniffers() {
- Mutex::Autolock autoLock(gSnifferMutex);
- if (gSniffersRegistered) {
- return;
- }
-
- RegisterSniffer_l(SniffMPEG4);
- RegisterSniffer_l(SniffMatroska);
- RegisterSniffer_l(SniffOgg);
- RegisterSniffer_l(SniffWAV);
- RegisterSniffer_l(SniffFLAC);
- RegisterSniffer_l(SniffAMR);
- RegisterSniffer_l(SniffMPEG2TS);
- RegisterSniffer_l(SniffMP3);
- RegisterSniffer_l(SniffAAC);
- RegisterSniffer_l(SniffMPEG2PS);
- RegisterSniffer_l(SniffMidi);
-
- gSniffersRegistered = true;
-}
-
-
-} // namespace android
diff --git a/media/libstagefright/MediaExtractorFactory.cpp b/media/libstagefright/MediaExtractorFactory.cpp
new file mode 100644
index 0000000..f6c61a0
--- /dev/null
+++ b/media/libstagefright/MediaExtractorFactory.cpp
@@ -0,0 +1,346 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaExtractorFactory"
+#include <utils/Log.h>
+
+#include <binder/IServiceManager.h>
+#include <media/DataSource.h>
+#include <media/MediaAnalyticsItem.h>
+#include <media/MediaExtractor.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/InterfaceUtils.h>
+#include <media/stagefright/MediaExtractorFactory.h>
+#include <media/IMediaExtractor.h>
+#include <media/IMediaExtractorService.h>
+#include <cutils/properties.h>
+#include <utils/String8.h>
+#include <ziparchive/zip_archive.h>
+
+#include <dirent.h>
+#include <dlfcn.h>
+
+namespace android {
+
+// static
+sp<IMediaExtractor> MediaExtractorFactory::Create(
+ const sp<DataSource> &source, const char *mime) {
+ ALOGV("MediaExtractorFactory::Create %s", mime);
+
+ if (!property_get_bool("media.stagefright.extractremote", true)) {
+ // local extractor
+ ALOGW("creating media extractor in calling process");
+ return CreateFromService(source, mime);
+ } else {
+ // remote extractor
+ ALOGV("get service manager");
+ sp<IBinder> binder = defaultServiceManager()->getService(String16("media.extractor"));
+
+ if (binder != 0) {
+ sp<IMediaExtractorService> mediaExService(interface_cast<IMediaExtractorService>(binder));
+ sp<IMediaExtractor> ex = mediaExService->makeExtractor(
+ CreateIDataSourceFromDataSource(source), mime);
+ return ex;
+ } else {
+ ALOGE("extractor service not running");
+ return NULL;
+ }
+ }
+ return NULL;
+}
+
+sp<IMediaExtractor> MediaExtractorFactory::CreateFromService(
+ const sp<DataSource> &source, const char *mime) {
+
+ ALOGV("MediaExtractorFactory::CreateFromService %s", mime);
+
+ UpdateExtractors(nullptr);
+
+ // initialize source decryption if needed
+ source->DrmInitialization(nullptr /* mime */);
+
+ void *meta = nullptr;
+ MediaExtractor::CreatorFunc creator = NULL;
+ MediaExtractor::FreeMetaFunc freeMeta = nullptr;
+ float confidence;
+ sp<ExtractorPlugin> plugin;
+ creator = sniff(source.get(), &confidence, &meta, &freeMeta, plugin);
+ if (!creator) {
+ ALOGV("FAILED to autodetect media content.");
+ return NULL;
+ }
+
+ MediaExtractor *ret = creator(source.get(), meta);
+ if (meta != nullptr && freeMeta != nullptr) {
+ freeMeta(meta);
+ }
+
+ ALOGV("Created an extractor '%s' with confidence %.2f",
+ ret != nullptr ? ret->name() : "<null>", confidence);
+
+ return CreateIMediaExtractorFromMediaExtractor(ret, source, plugin);
+}
+
+//static
+void MediaExtractorFactory::LoadPlugins(const ::std::string& apkPath) {
+ // TODO: Verify apk path with package manager in extractor process.
+ ALOGV("Load plugins from: %s", apkPath.c_str());
+ UpdateExtractors(apkPath.empty() ? nullptr : apkPath.c_str());
+}
+
+struct ExtractorPlugin : public RefBase {
+ MediaExtractor::ExtractorDef def;
+ void *libHandle;
+ String8 libPath;
+ String8 uuidString;
+
+ ExtractorPlugin(MediaExtractor::ExtractorDef definition, void *handle, String8 &path)
+ : def(definition), libHandle(handle), libPath(path) {
+ for (size_t i = 0; i < sizeof MediaExtractor::ExtractorDef::extractor_uuid; i++) {
+ uuidString.appendFormat("%02x", def.extractor_uuid.b[i]);
+ }
+ }
+ ~ExtractorPlugin() {
+ if (libHandle != nullptr) {
+ ALOGV("closing handle for %s %d", libPath.c_str(), def.extractor_version);
+ dlclose(libHandle);
+ }
+ }
+};
+
+Mutex MediaExtractorFactory::gPluginMutex;
+std::shared_ptr<List<sp<ExtractorPlugin>>> MediaExtractorFactory::gPlugins;
+bool MediaExtractorFactory::gPluginsRegistered = false;
+
+// static
+MediaExtractor::CreatorFunc MediaExtractorFactory::sniff(
+ DataSourceBase *source, float *confidence, void **meta,
+ MediaExtractor::FreeMetaFunc *freeMeta, sp<ExtractorPlugin> &plugin) {
+ *confidence = 0.0f;
+ *meta = nullptr;
+
+ std::shared_ptr<List<sp<ExtractorPlugin>>> plugins;
+ {
+ Mutex::Autolock autoLock(gPluginMutex);
+ if (!gPluginsRegistered) {
+ return NULL;
+ }
+ plugins = gPlugins;
+ }
+
+ MediaExtractor::CreatorFunc curCreator = NULL;
+ MediaExtractor::CreatorFunc bestCreator = NULL;
+ for (auto it = plugins->begin(); it != plugins->end(); ++it) {
+ float newConfidence;
+ void *newMeta = nullptr;
+ MediaExtractor::FreeMetaFunc newFreeMeta = nullptr;
+ if ((curCreator = (*it)->def.sniff(source, &newConfidence, &newMeta, &newFreeMeta))) {
+ if (newConfidence > *confidence) {
+ *confidence = newConfidence;
+ if (*meta != nullptr && *freeMeta != nullptr) {
+ (*freeMeta)(*meta);
+ }
+ *meta = newMeta;
+ *freeMeta = newFreeMeta;
+ plugin = *it;
+ bestCreator = curCreator;
+ } else {
+ if (newMeta != nullptr && newFreeMeta != nullptr) {
+ newFreeMeta(newMeta);
+ }
+ }
+ }
+ }
+
+ return bestCreator;
+}
+
+// static
+void MediaExtractorFactory::RegisterExtractor(const sp<ExtractorPlugin> &plugin,
+ List<sp<ExtractorPlugin>> &pluginList) {
+ // sanity check check struct version, uuid, name
+ if (plugin->def.def_version == 0
+ || plugin->def.def_version > MediaExtractor::EXTRACTORDEF_VERSION) {
+ ALOGE("don't understand extractor format %u, ignoring.", plugin->def.def_version);
+ return;
+ }
+ if (memcmp(&plugin->def.extractor_uuid, "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", 16) == 0) {
+ ALOGE("invalid UUID, ignoring");
+ return;
+ }
+ if (plugin->def.extractor_name == NULL || strlen(plugin->def.extractor_name) == 0) {
+ ALOGE("extractors should have a name, ignoring");
+ return;
+ }
+
+ for (auto it = pluginList.begin(); it != pluginList.end(); ++it) {
+ if (memcmp(&((*it)->def.extractor_uuid), &plugin->def.extractor_uuid, 16) == 0) {
+ // there's already an extractor with the same uuid
+ if ((*it)->def.extractor_version < plugin->def.extractor_version) {
+ // this one is newer, replace the old one
+ ALOGW("replacing extractor '%s' version %u with version %u",
+ plugin->def.extractor_name,
+ (*it)->def.extractor_version,
+ plugin->def.extractor_version);
+ pluginList.erase(it);
+ break;
+ } else {
+ ALOGW("ignoring extractor '%s' version %u in favor of version %u",
+ plugin->def.extractor_name,
+ plugin->def.extractor_version,
+ (*it)->def.extractor_version);
+ return;
+ }
+ }
+ }
+ ALOGV("registering extractor for %s", plugin->def.extractor_name);
+ pluginList.push_back(plugin);
+}
+
+//static
+void MediaExtractorFactory::RegisterExtractorsInApk(
+ const char *apkPath, List<sp<ExtractorPlugin>> &pluginList) {
+ ALOGV("search for plugins at %s", apkPath);
+ ZipArchiveHandle zipHandle;
+ int32_t ret = OpenArchive(apkPath, &zipHandle);
+ if (ret == 0) {
+ char abi[PROPERTY_VALUE_MAX];
+ property_get("ro.product.cpu.abi", abi, "arm64-v8a");
+ String8 prefix8 = String8::format("lib/%s/", abi);
+ ZipString prefix(prefix8.c_str());
+ ZipString suffix("extractor.so");
+ void* cookie;
+ ret = StartIteration(zipHandle, &cookie, &prefix, &suffix);
+ if (ret == 0) {
+ ZipEntry entry;
+ ZipString name;
+ while (Next(cookie, &entry, &name) == 0) {
+ String8 libPath = String8(apkPath) + "!/" +
+ String8(reinterpret_cast<const char*>(name.name), name.name_length);
+ // TODO: Open with a linker namespace so that it can be linked with sub-libraries
+ // within the apk instead of system libraries already loaded.
+ void *libHandle = dlopen(libPath.string(), RTLD_NOW | RTLD_LOCAL);
+ if (libHandle) {
+ MediaExtractor::GetExtractorDef getDef =
+ (MediaExtractor::GetExtractorDef) dlsym(libHandle, "GETEXTRACTORDEF");
+ if (getDef) {
+ ALOGV("registering sniffer for %s", libPath.string());
+ RegisterExtractor(
+ new ExtractorPlugin(getDef(), libHandle, libPath), pluginList);
+ } else {
+ ALOGW("%s does not contain sniffer", libPath.string());
+ dlclose(libHandle);
+ }
+ } else {
+ ALOGW("couldn't dlopen(%s) %s", libPath.string(), strerror(errno));
+ }
+ }
+ EndIteration(cookie);
+ } else {
+ ALOGW("couldn't find plugins from %s, %d", apkPath, ret);
+ }
+ CloseArchive(zipHandle);
+ } else {
+ ALOGW("couldn't open(%s) %d", apkPath, ret);
+ }
+}
+
+//static
+void MediaExtractorFactory::RegisterExtractorsInSystem(
+ const char *libDirPath, List<sp<ExtractorPlugin>> &pluginList) {
+ ALOGV("search for plugins at %s", libDirPath);
+ DIR *libDir = opendir(libDirPath);
+ if (libDir) {
+ struct dirent* libEntry;
+ while ((libEntry = readdir(libDir))) {
+ String8 libPath = String8(libDirPath) + "/" + libEntry->d_name;
+ void *libHandle = dlopen(libPath.string(), RTLD_NOW | RTLD_LOCAL);
+ if (libHandle) {
+ MediaExtractor::GetExtractorDef getDef =
+ (MediaExtractor::GetExtractorDef) dlsym(libHandle, "GETEXTRACTORDEF");
+ if (getDef) {
+ ALOGV("registering sniffer for %s", libPath.string());
+ RegisterExtractor(
+ new ExtractorPlugin(getDef(), libHandle, libPath), pluginList);
+ } else {
+ ALOGW("%s does not contain sniffer", libPath.string());
+ dlclose(libHandle);
+ }
+ } else {
+ ALOGW("couldn't dlopen(%s) %s", libPath.string(), strerror(errno));
+ }
+ }
+
+ closedir(libDir);
+ } else {
+ ALOGE("couldn't opendir(%s)", libDirPath);
+ }
+}
+
+// static
+void MediaExtractorFactory::UpdateExtractors(const char *newUpdateApkPath) {
+ Mutex::Autolock autoLock(gPluginMutex);
+ if (newUpdateApkPath != nullptr) {
+ gPluginsRegistered = false;
+ }
+ if (gPluginsRegistered) {
+ return;
+ }
+
+ std::shared_ptr<List<sp<ExtractorPlugin>>> newList(new List<sp<ExtractorPlugin>>());
+
+ RegisterExtractorsInSystem("/system/lib"
+#ifdef __LP64__
+ "64"
+#endif
+ "/extractors", *newList);
+
+ RegisterExtractorsInSystem("/vendor/lib"
+#ifdef __LP64__
+ "64"
+#endif
+ "/extractors", *newList);
+
+ if (newUpdateApkPath != nullptr) {
+ RegisterExtractorsInApk(newUpdateApkPath, *newList);
+ }
+
+ gPlugins = newList;
+ gPluginsRegistered = true;
+}
+
+status_t MediaExtractorFactory::dump(int fd, const Vector<String16>&) {
+ Mutex::Autolock autoLock(gPluginMutex);
+ String8 out;
+ out.append("Available extractors:\n");
+ if (gPluginsRegistered) {
+ for (auto it = gPlugins->begin(); it != gPlugins->end(); ++it) {
+ out.appendFormat(" %25s: uuid(%s), version(%u), path(%s)\n",
+ (*it)->def.extractor_name,
+ (*it)->uuidString.c_str(),
+ (*it)->def.extractor_version,
+ (*it)->libPath.c_str());
+ }
+ } else {
+ out.append(" (no plugins registered)\n");
+ }
+ write(fd, out.string(), out.size());
+ return OK;
+}
+
+
+} // namespace android
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index c7b8888..98f59b5 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -23,6 +23,8 @@
#include <media/stagefright/MediaMuxer.h>
+#include <media/mediarecorder.h>
+#include <media/MediaSource.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -31,17 +33,22 @@
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/MPEG4Writer.h>
#include <media/stagefright/Utils.h>
namespace android {
+static bool isMp4Format(MediaMuxer::OutputFormat format) {
+ return format == MediaMuxer::OUTPUT_FORMAT_MPEG_4 ||
+ format == MediaMuxer::OUTPUT_FORMAT_THREE_GPP ||
+ format == MediaMuxer::OUTPUT_FORMAT_HEIF;
+}
+
MediaMuxer::MediaMuxer(int fd, OutputFormat format)
: mFormat(format),
mState(UNINITIALIZED) {
- if (format == OUTPUT_FORMAT_MPEG_4 || format == OUTPUT_FORMAT_THREE_GPP) {
+ if (isMp4Format(format)) {
mWriter = new MPEG4Writer(fd);
} else if (format == OUTPUT_FORMAT_WEBM) {
mWriter = new WebmWriter(fd);
@@ -49,6 +56,10 @@
if (mWriter != NULL) {
mFileMeta = new MetaData;
+ if (format == OUTPUT_FORMAT_HEIF) {
+ // Note that the key uses recorder file types.
+ mFileMeta->setInt32(kKeyFileType, output_format::OUTPUT_FORMAT_HEIF);
+ }
mState = INITIALIZED;
}
}
@@ -108,8 +119,8 @@
ALOGE("setLocation() must be called before start().");
return INVALID_OPERATION;
}
- if (mFormat != OUTPUT_FORMAT_MPEG_4 && mFormat != OUTPUT_FORMAT_THREE_GPP) {
- ALOGE("setLocation() is only supported for .mp4 pr .3gp output.");
+ if (!isMp4Format(mFormat)) {
+ ALOGE("setLocation() is only supported for .mp4, .3gp or .heic output.");
return INVALID_OPERATION;
}
@@ -170,13 +181,17 @@
mediaBuffer->add_ref(); // Released in MediaAdapter::signalBufferReturned().
mediaBuffer->set_range(buffer->offset(), buffer->size());
- sp<MetaData> sampleMetaData = mediaBuffer->meta_data();
- sampleMetaData->setInt64(kKeyTime, timeUs);
+ MetaDataBase &sampleMetaData = mediaBuffer->meta_data();
+ sampleMetaData.setInt64(kKeyTime, timeUs);
// Just set the kKeyDecodingTime as the presentation time for now.
- sampleMetaData->setInt64(kKeyDecodingTime, timeUs);
+ sampleMetaData.setInt64(kKeyDecodingTime, timeUs);
if (flags & MediaCodec::BUFFER_FLAG_SYNCFRAME) {
- sampleMetaData->setInt32(kKeyIsSyncFrame, true);
+ sampleMetaData.setInt32(kKeyIsSyncFrame, true);
+ }
+
+ if (flags & MediaCodec::BUFFER_FLAG_MUXER_DATA) {
+ sampleMetaData.setInt32(kKeyIsMuxerData, 1);
}
sp<MediaAdapter> currentTrack = mTrackList[trackIndex];
diff --git a/media/libstagefright/MediaSource.cpp b/media/libstagefright/MediaSource.cpp
deleted file mode 100644
index a17757a..0000000
--- a/media/libstagefright/MediaSource.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <media/stagefright/MediaSource.h>
-
-namespace android {
-
-MediaSource::MediaSource() {}
-
-MediaSource::~MediaSource() {}
-
-} // namespace android
diff --git a/media/libstagefright/MediaSync.cpp b/media/libstagefright/MediaSync.cpp
index 9278381..ba14e5d 100644
--- a/media/libstagefright/MediaSync.cpp
+++ b/media/libstagefright/MediaSync.cpp
@@ -61,6 +61,7 @@
mNextBufferItemMediaUs(-1),
mPlaybackRate(0.0) {
mMediaClock = new MediaClock;
+ mMediaClock->init();
// initialize settings
mPlaybackSettings = AUDIO_PLAYBACK_RATE_DEFAULT;
diff --git a/media/libstagefright/MetaDataUtils.cpp b/media/libstagefright/MetaDataUtils.cpp
new file mode 100644
index 0000000..04f6ade
--- /dev/null
+++ b/media/libstagefright/MetaDataUtils.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MetaDataUtils"
+
+#include <media/stagefright/foundation/avc_utils.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaDataUtils.h>
+
+namespace android {
+
+bool MakeAVCCodecSpecificData(MetaDataBase &meta, const uint8_t *data, size_t size) {
+ int32_t width;
+ int32_t height;
+ int32_t sarWidth;
+ int32_t sarHeight;
+ sp<ABuffer> accessUnit = new ABuffer((void*)data, size);
+ sp<ABuffer> csd = MakeAVCCodecSpecificData(accessUnit, &width, &height, &sarWidth, &sarHeight);
+ if (csd == nullptr) {
+ return false;
+ }
+ meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
+
+ meta.setData(kKeyAVCC, kTypeAVCC, csd->data(), csd->size());
+ meta.setInt32(kKeyWidth, width);
+ meta.setInt32(kKeyHeight, height);
+ if (sarWidth > 0 && sarHeight > 0) {
+ meta.setInt32(kKeySARWidth, sarWidth);
+ meta.setInt32(kKeySARHeight, sarHeight);
+ }
+ return true;
+}
+
+bool MakeAACCodecSpecificData(
+ MetaDataBase &meta,
+ unsigned profile, unsigned sampling_freq_index,
+ unsigned channel_configuration) {
+ if(sampling_freq_index > 11u) {
+ return false;
+ }
+ int32_t sampleRate;
+ int32_t channelCount;
+ static const int32_t kSamplingFreq[] = {
+ 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050,
+ 16000, 12000, 11025, 8000
+ };
+ sampleRate = kSamplingFreq[sampling_freq_index];
+ channelCount = channel_configuration;
+
+ static const uint8_t kStaticESDS[] = {
+ 0x03, 22,
+ 0x00, 0x00, // ES_ID
+ 0x00, // streamDependenceFlag, URL_Flag, OCRstreamFlag
+
+ 0x04, 17,
+ 0x40, // Audio ISO/IEC 14496-3
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x05, 2,
+ // AudioSpecificInfo follows
+
+ // oooo offf fccc c000
+ // o - audioObjectType
+ // f - samplingFreqIndex
+ // c - channelConfig
+ };
+
+ size_t csdSize = sizeof(kStaticESDS) + 2;
+ uint8_t *csd = new uint8_t[csdSize];
+ memcpy(csd, kStaticESDS, sizeof(kStaticESDS));
+
+ csd[sizeof(kStaticESDS)] =
+ ((profile + 1) << 3) | (sampling_freq_index >> 1);
+
+ csd[sizeof(kStaticESDS) + 1] =
+ ((sampling_freq_index << 7) & 0x80) | (channel_configuration << 3);
+
+ meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC);
+
+ meta.setInt32(kKeySampleRate, sampleRate);
+ meta.setInt32(kKeyChannelCount, channelCount);
+
+ meta.setData(kKeyESDS, 0, csd, csdSize);
+ delete [] csd;
+ return true;
+}
+
+} // namespace android
diff --git a/media/libstagefright/MidiExtractor.cpp b/media/libstagefright/MidiExtractor.cpp
deleted file mode 100644
index 7930bbb..0000000
--- a/media/libstagefright/MidiExtractor.cpp
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MidiExtractor"
-#include <utils/Log.h>
-
-#include "include/MidiExtractor.h"
-
-#include <media/MidiIoWrapper.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaSource.h>
-#include <libsonivox/eas_reverb.h>
-
-namespace android {
-
-// how many Sonivox output buffers to aggregate into one MediaBuffer
-static const int NUM_COMBINE_BUFFERS = 4;
-
-class MidiSource : public MediaSource {
-
-public:
- MidiSource(
- const sp<MidiEngine> &engine,
- const sp<MetaData> &trackMetadata);
-
- virtual status_t start(MetaData *params);
- virtual status_t stop();
- virtual sp<MetaData> getFormat();
-
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-protected:
- virtual ~MidiSource();
-
-private:
- sp<MidiEngine> mEngine;
- sp<MetaData> mTrackMetadata;
- bool mInitCheck;
- bool mStarted;
-
- status_t init();
-
- // no copy constructor or assignment
- MidiSource(const MidiSource &);
- MidiSource &operator=(const MidiSource &);
-
-};
-
-
-// Midisource
-
-MidiSource::MidiSource(
- const sp<MidiEngine> &engine,
- const sp<MetaData> &trackMetadata)
- : mEngine(engine),
- mTrackMetadata(trackMetadata),
- mInitCheck(false),
- mStarted(false)
-{
- ALOGV("MidiSource ctor");
- mInitCheck = init();
-}
-
-MidiSource::~MidiSource()
-{
- ALOGV("MidiSource dtor");
- if (mStarted) {
- stop();
- }
-}
-
-status_t MidiSource::start(MetaData * /* params */)
-{
- ALOGV("MidiSource::start");
-
- CHECK(!mStarted);
- mStarted = true;
- mEngine->allocateBuffers();
- return OK;
-}
-
-status_t MidiSource::stop()
-{
- ALOGV("MidiSource::stop");
-
- CHECK(mStarted);
- mStarted = false;
- mEngine->releaseBuffers();
-
- return OK;
-}
-
-sp<MetaData> MidiSource::getFormat()
-{
- return mTrackMetadata;
-}
-
-status_t MidiSource::read(
- MediaBuffer **outBuffer, const ReadOptions *options)
-{
- ALOGV("MidiSource::read");
- MediaBuffer *buffer;
- // process an optional seek request
- int64_t seekTimeUs;
- ReadOptions::SeekMode mode;
- if ((NULL != options) && options->getSeekTo(&seekTimeUs, &mode)) {
- if (seekTimeUs <= 0LL) {
- seekTimeUs = 0LL;
- }
- mEngine->seekTo(seekTimeUs);
- }
- buffer = mEngine->readBuffer();
- *outBuffer = buffer;
- ALOGV("MidiSource::read %p done", this);
- return buffer != NULL ? (status_t) OK : (status_t) ERROR_END_OF_STREAM;
-}
-
-status_t MidiSource::init()
-{
- ALOGV("MidiSource::init");
- return OK;
-}
-
-// MidiEngine
-
-MidiEngine::MidiEngine(const sp<DataSource> &dataSource,
- const sp<MetaData> &fileMetadata,
- const sp<MetaData> &trackMetadata) :
- mGroup(NULL),
- mEasData(NULL),
- mEasHandle(NULL),
- mEasConfig(NULL),
- mIsInitialized(false) {
- mIoWrapper = new MidiIoWrapper(dataSource);
- // spin up a new EAS engine
- EAS_I32 temp;
- EAS_RESULT result = EAS_Init(&mEasData);
-
- if (result == EAS_SUCCESS) {
- result = EAS_OpenFile(mEasData, mIoWrapper->getLocator(), &mEasHandle);
- }
- if (result == EAS_SUCCESS) {
- result = EAS_Prepare(mEasData, mEasHandle);
- }
- if (result == EAS_SUCCESS) {
- result = EAS_ParseMetaData(mEasData, mEasHandle, &temp);
- }
-
- if (result != EAS_SUCCESS) {
- return;
- }
-
- if (fileMetadata != NULL) {
- fileMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MIDI);
- }
-
- if (trackMetadata != NULL) {
- trackMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
- trackMetadata->setInt64(kKeyDuration, 1000ll * temp); // milli->micro
- mEasConfig = EAS_Config();
- trackMetadata->setInt32(kKeySampleRate, mEasConfig->sampleRate);
- trackMetadata->setInt32(kKeyChannelCount, mEasConfig->numChannels);
- trackMetadata->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
- }
- mIsInitialized = true;
-}
-
-MidiEngine::~MidiEngine() {
- if (mEasHandle) {
- EAS_CloseFile(mEasData, mEasHandle);
- }
- if (mEasData) {
- EAS_Shutdown(mEasData);
- }
- delete mGroup;
-
-}
-
-status_t MidiEngine::initCheck() {
- return mIsInitialized ? OK : UNKNOWN_ERROR;
-}
-
-status_t MidiEngine::allocateBuffers() {
- // select reverb preset and enable
- EAS_SetParameter(mEasData, EAS_MODULE_REVERB, EAS_PARAM_REVERB_PRESET, EAS_PARAM_REVERB_CHAMBER);
- EAS_SetParameter(mEasData, EAS_MODULE_REVERB, EAS_PARAM_REVERB_BYPASS, EAS_FALSE);
-
- mGroup = new MediaBufferGroup;
- int bufsize = sizeof(EAS_PCM)
- * mEasConfig->mixBufferSize * mEasConfig->numChannels * NUM_COMBINE_BUFFERS;
- ALOGV("using %d byte buffer", bufsize);
- mGroup->add_buffer(new MediaBuffer(bufsize));
- return OK;
-}
-
-status_t MidiEngine::releaseBuffers() {
- delete mGroup;
- mGroup = NULL;
- return OK;
-}
-
-status_t MidiEngine::seekTo(int64_t positionUs) {
- ALOGV("seekTo %lld", (long long)positionUs);
- EAS_RESULT result = EAS_Locate(mEasData, mEasHandle, positionUs / 1000, false);
- return result == EAS_SUCCESS ? OK : UNKNOWN_ERROR;
-}
-
-MediaBuffer* MidiEngine::readBuffer() {
- EAS_STATE state;
- EAS_State(mEasData, mEasHandle, &state);
- if ((state == EAS_STATE_STOPPED) || (state == EAS_STATE_ERROR)) {
- return NULL;
- }
- MediaBuffer *buffer;
- status_t err = mGroup->acquire_buffer(&buffer);
- if (err != OK) {
- ALOGE("readBuffer: no buffer");
- return NULL;
- }
- EAS_I32 timeMs;
- EAS_GetLocation(mEasData, mEasHandle, &timeMs);
- int64_t timeUs = 1000ll * timeMs;
- buffer->meta_data()->setInt64(kKeyTime, timeUs);
-
- EAS_PCM* p = (EAS_PCM*) buffer->data();
- int numBytesOutput = 0;
- for (int i = 0; i < NUM_COMBINE_BUFFERS; i++) {
- EAS_I32 numRendered;
- EAS_RESULT result = EAS_Render(mEasData, p, mEasConfig->mixBufferSize, &numRendered);
- if (result != EAS_SUCCESS) {
- ALOGE("EAS_Render returned %ld", result);
- break;
- }
- p += numRendered * mEasConfig->numChannels;
- numBytesOutput += numRendered * mEasConfig->numChannels * sizeof(EAS_PCM);
- }
- buffer->set_range(0, numBytesOutput);
- ALOGV("readBuffer: returning %zd in buffer %p", buffer->range_length(), buffer);
- return buffer;
-}
-
-
-// MidiExtractor
-
-MidiExtractor::MidiExtractor(
- const sp<DataSource> &dataSource)
- : mDataSource(dataSource),
- mInitCheck(false)
-{
- ALOGV("MidiExtractor ctor");
- mFileMetadata = new MetaData;
- mTrackMetadata = new MetaData;
- mEngine = new MidiEngine(mDataSource, mFileMetadata, mTrackMetadata);
- mInitCheck = mEngine->initCheck();
-}
-
-MidiExtractor::~MidiExtractor()
-{
- ALOGV("MidiExtractor dtor");
-}
-
-size_t MidiExtractor::countTracks()
-{
- return mInitCheck == OK ? 1 : 0;
-}
-
-sp<IMediaSource> MidiExtractor::getTrack(size_t index)
-{
- if (mInitCheck != OK || index > 0) {
- return NULL;
- }
- return new MidiSource(mEngine, mTrackMetadata);
-}
-
-sp<MetaData> MidiExtractor::getTrackMetaData(
- size_t index, uint32_t /* flags */) {
- ALOGV("MidiExtractor::getTrackMetaData");
- if (mInitCheck != OK || index > 0) {
- return NULL;
- }
- return mTrackMetadata;
-}
-
-sp<MetaData> MidiExtractor::getMetaData()
-{
- ALOGV("MidiExtractor::getMetaData");
- return mFileMetadata;
-}
-
-// Sniffer
-
-bool SniffMidi(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *)
-{
- sp<MidiEngine> p = new MidiEngine(source, NULL, NULL);
- if (p->initCheck() == OK) {
- *mimeType = MEDIA_MIMETYPE_AUDIO_MIDI;
- *confidence = 0.8;
- ALOGV("SniffMidi: yes");
- return true;
- }
- ALOGV("SniffMidi: no");
- return false;
-
-}
-
-} // namespace android
diff --git a/media/libstagefright/NdkUtils.cpp b/media/libstagefright/NdkUtils.cpp
new file mode 100644
index 0000000..904fe72
--- /dev/null
+++ b/media/libstagefright/NdkUtils.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+
+#include <media/stagefright/NdkUtils.h>
+#include <media/stagefright/Utils.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+namespace android {
+
+sp<MetaData> convertMediaFormatWrapperToMetaData(const sp<AMediaFormatWrapper> &fmt) {
+ sp<AMessage> msg = fmt->toAMessage();
+ sp<MetaData> meta = new MetaData;
+ convertMessageToMetaData(msg, meta);
+ return meta;
+}
+
+} // namespace android
+
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp
index afd6ffb..18f4b12 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libstagefright/NuCachedSource2.cpp
@@ -681,10 +681,6 @@
return mSource->DrmInitialization(mime);
}
-void NuCachedSource2::getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client) {
- mSource->getDrmInfo(handle, client);
-}
-
String8 NuCachedSource2::getUri() {
return mSource->getUri();
}
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 640cb82..4a7d6ca 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_NDEBUG 0
+//#define LOG_NDEBUG 0
#define LOG_TAG "NuMediaExtractor"
#include <utils/Log.h>
@@ -23,28 +23,40 @@
#include "include/ESDS.h"
#include "include/NuCachedSource2.h"
+#include <media/DataSource.h>
+#include <media/MediaExtractor.h>
+#include <media/MediaSource.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSource.h>
+#include <media/stagefright/DataSourceFactory.h>
#include <media/stagefright/FileSource.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaExtractorFactory.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
namespace android {
+NuMediaExtractor::Sample::Sample()
+ : mBuffer(NULL),
+ mSampleTimeUs(-1ll) {
+}
+
+NuMediaExtractor::Sample::Sample(MediaBufferBase *buffer, int64_t timeUs)
+ : mBuffer(buffer),
+ mSampleTimeUs(timeUs) {
+}
+
NuMediaExtractor::NuMediaExtractor()
: mTotalBitrate(-1ll),
mDurationUs(-1ll) {
}
NuMediaExtractor::~NuMediaExtractor() {
- releaseTrackSamples();
+ releaseAllTrackSamples();
for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
TrackInfo *info = &mSelectedTracks.editItemAt(i);
@@ -60,7 +72,7 @@
}
status_t NuMediaExtractor::setDataSource(
- const sp<IMediaHTTPService> &httpService,
+ const sp<MediaHTTPService> &httpService,
const char *path,
const KeyedVector<String8, String8> *headers) {
Mutex::Autolock autoLock(mLock);
@@ -70,13 +82,13 @@
}
sp<DataSource> dataSource =
- DataSource::CreateFromURI(httpService, path, headers);
+ DataSourceFactory::CreateFromURI(httpService, path, headers);
if (dataSource == NULL) {
return -ENOENT;
}
- mImpl = MediaExtractor::Create(dataSource);
+ mImpl = MediaExtractorFactory::Create(dataSource);
if (mImpl == NULL) {
return ERROR_UNSUPPORTED;
@@ -112,7 +124,7 @@
return err;
}
- mImpl = MediaExtractor::Create(fileSource);
+ mImpl = MediaExtractorFactory::Create(fileSource);
if (mImpl == NULL) {
return ERROR_UNSUPPORTED;
@@ -142,7 +154,7 @@
return err;
}
- mImpl = MediaExtractor::Create(source);
+ mImpl = MediaExtractorFactory::Create(source);
if (mImpl == NULL) {
return ERROR_UNSUPPORTED;
@@ -193,6 +205,15 @@
return OK;
}
+void NuMediaExtractor::disconnect() {
+ if (mDataSource != NULL) {
+ // disconnect data source
+ if (mDataSource->flags() & DataSource::kIsCachingDataSource) {
+ static_cast<NuCachedSource2 *>(mDataSource.get())->disconnect();
+ }
+ }
+}
+
status_t NuMediaExtractor::updateDurationAndBitrate() {
if (mImpl->countTracks() > kMaxTrackCount) {
return ERROR_UNSUPPORTED;
@@ -286,7 +307,28 @@
return OK;
}
-status_t NuMediaExtractor::selectTrack(size_t index) {
+status_t NuMediaExtractor::getExifOffsetSize(off64_t *offset, size_t *size) const {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mImpl == NULL) {
+ return -EINVAL;
+ }
+
+ sp<MetaData> meta = mImpl->getMetaData();
+
+ int64_t exifOffset, exifSize;
+ if (meta->findInt64(kKeyExifOffset, &exifOffset)
+ && meta->findInt64(kKeyExifSize, &exifSize)) {
+ *offset = (off64_t) exifOffset;
+ *size = (size_t) exifSize;
+
+ return OK;
+ }
+ return ERROR_UNSUPPORTED;
+}
+
+status_t NuMediaExtractor::selectTrack(size_t index,
+ int64_t startTimeUs, MediaSource::ReadOptions::SeekMode mode) {
Mutex::Autolock autoLock(mLock);
if (mImpl == NULL) {
@@ -309,31 +351,56 @@
sp<IMediaSource> source = mImpl->getTrack(index);
if (source == nullptr) {
+ ALOGE("track %zu is empty", index);
return ERROR_MALFORMED;
}
status_t ret = source->start();
if (ret != OK) {
+ ALOGE("track %zu failed to start", index);
return ret;
}
+ sp<MetaData> meta = source->getFormat();
+ if (meta == NULL) {
+ ALOGE("track %zu has no meta data", index);
+ return ERROR_MALFORMED;
+ }
+
+ const char *mime;
+ if (!meta->findCString(kKeyMIMEType, &mime)) {
+ ALOGE("track %zu has no mime type in meta data", index);
+ return ERROR_MALFORMED;
+ }
+ ALOGV("selectTrack, track[%zu]: %s", index, mime);
+
mSelectedTracks.push();
TrackInfo *info = &mSelectedTracks.editItemAt(mSelectedTracks.size() - 1);
info->mSource = source;
info->mTrackIndex = index;
+ if (!strncasecmp(mime, "audio/", 6)) {
+ info->mTrackType = MEDIA_TRACK_TYPE_AUDIO;
+ info->mMaxFetchCount = 64;
+ } else if (!strncasecmp(mime, "video/", 6)) {
+ info->mTrackType = MEDIA_TRACK_TYPE_VIDEO;
+ info->mMaxFetchCount = 8;
+ } else {
+ info->mTrackType = MEDIA_TRACK_TYPE_UNKNOWN;
+ info->mMaxFetchCount = 1;
+ }
info->mFinalResult = OK;
- info->mSample = NULL;
- info->mSampleTimeUs = -1ll;
+ releaseTrackSamples(info);
info->mTrackFlags = 0;
- const char *mime;
- CHECK(source->getFormat()->findCString(kKeyMIMEType, &mime));
-
if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) {
info->mTrackFlags |= kIsVorbis;
}
+ if (startTimeUs >= 0) {
+ fetchTrackSamples(info, startTimeUs, mode);
+ }
+
return OK;
}
@@ -364,12 +431,7 @@
TrackInfo *info = &mSelectedTracks.editItemAt(i);
- if (info->mSample != NULL) {
- info->mSample->release();
- info->mSample = NULL;
-
- info->mSampleTimeUs = -1ll;
- }
+ releaseTrackSamples(info);
CHECK_EQ((status_t)OK, info->mSource->stop());
@@ -378,79 +440,141 @@
return OK;
}
-void NuMediaExtractor::releaseTrackSamples() {
- for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
- TrackInfo *info = &mSelectedTracks.editItemAt(i);
+void NuMediaExtractor::releaseOneSample(TrackInfo *info) {
+ if (info == NULL || info->mSamples.empty()) {
+ return;
+ }
- if (info->mSample != NULL) {
- info->mSample->release();
- info->mSample = NULL;
+ auto it = info->mSamples.begin();
+ if (it->mBuffer != NULL) {
+ it->mBuffer->release();
+ }
+ info->mSamples.erase(it);
+}
- info->mSampleTimeUs = -1ll;
+void NuMediaExtractor::releaseTrackSamples(TrackInfo *info) {
+ if (info == NULL) {
+ return;
+ }
+
+ auto it = info->mSamples.begin();
+ while (it != info->mSamples.end()) {
+ if (it->mBuffer != NULL) {
+ it->mBuffer->release();
}
+ it = info->mSamples.erase(it);
}
}
-ssize_t NuMediaExtractor::fetchTrackSamples(
+void NuMediaExtractor::releaseAllTrackSamples() {
+ for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
+ releaseTrackSamples(&mSelectedTracks.editItemAt(i));
+ }
+}
+
+ssize_t NuMediaExtractor::fetchAllTrackSamples(
int64_t seekTimeUs, MediaSource::ReadOptions::SeekMode mode) {
TrackInfo *minInfo = NULL;
- ssize_t minIndex = -1;
+ ssize_t minIndex = ERROR_END_OF_STREAM;
for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
TrackInfo *info = &mSelectedTracks.editItemAt(i);
+ fetchTrackSamples(info, seekTimeUs, mode);
- if (seekTimeUs >= 0ll) {
- info->mFinalResult = OK;
+ status_t err = info->mFinalResult;
+ if (err != OK && err != ERROR_END_OF_STREAM) {
+ return err;
+ }
- if (info->mSample != NULL) {
- info->mSample->release();
- info->mSample = NULL;
- info->mSampleTimeUs = -1ll;
- }
- } else if (info->mFinalResult != OK) {
+ if (info->mSamples.empty()) {
continue;
}
- if (info->mSample == NULL) {
- MediaSource::ReadOptions options;
- if (seekTimeUs >= 0ll) {
- options.setSeekTo(seekTimeUs, mode);
- }
- status_t err = info->mSource->read(&info->mSample, &options);
-
- if (err != OK) {
- CHECK(info->mSample == NULL);
-
- info->mFinalResult = err;
-
- if (info->mFinalResult != ERROR_END_OF_STREAM) {
- ALOGW("read on track %zu failed with error %d",
- info->mTrackIndex, err);
- }
-
- info->mSampleTimeUs = -1ll;
- continue;
- } else {
- CHECK(info->mSample != NULL);
- CHECK(info->mSample->meta_data()->findInt64(
- kKeyTime, &info->mSampleTimeUs));
- }
- }
-
- if (minInfo == NULL || info->mSampleTimeUs < minInfo->mSampleTimeUs) {
+ if (minInfo == NULL) {
minInfo = info;
minIndex = i;
+ } else {
+ auto it = info->mSamples.begin();
+ auto itMin = minInfo->mSamples.begin();
+ if (it->mSampleTimeUs < itMin->mSampleTimeUs) {
+ minInfo = info;
+ minIndex = i;
+ }
}
}
return minIndex;
}
+void NuMediaExtractor::fetchTrackSamples(TrackInfo *info,
+ int64_t seekTimeUs, MediaSource::ReadOptions::SeekMode mode) {
+ if (info == NULL) {
+ return;
+ }
+
+ MediaSource::ReadOptions options;
+ if (seekTimeUs >= 0ll) {
+ options.setSeekTo(seekTimeUs, mode);
+ info->mFinalResult = OK;
+ releaseTrackSamples(info);
+ } else if (info->mFinalResult != OK || !info->mSamples.empty()) {
+ return;
+ }
+
+ status_t err = OK;
+ Vector<MediaBufferBase *> mediaBuffers;
+ if (info->mSource->supportReadMultiple()) {
+ options.setNonBlocking();
+ err = info->mSource->readMultiple(&mediaBuffers, info->mMaxFetchCount, &options);
+ } else {
+ MediaBufferBase *mbuf = NULL;
+ err = info->mSource->read(&mbuf, &options);
+ if (err == OK && mbuf != NULL) {
+ mediaBuffers.push_back(mbuf);
+ }
+ }
+
+ info->mFinalResult = err;
+ if (err != OK && err != ERROR_END_OF_STREAM) {
+ ALOGW("read on track %zu failed with error %d", info->mTrackIndex, err);
+ size_t count = mediaBuffers.size();
+ for (size_t id = 0; id < count; ++id) {
+ MediaBufferBase *mbuf = mediaBuffers[id];
+ if (mbuf != NULL) {
+ mbuf->release();
+ }
+ }
+ return;
+ }
+
+ size_t count = mediaBuffers.size();
+ bool releaseRemaining = false;
+ for (size_t id = 0; id < count; ++id) {
+ int64_t timeUs;
+ MediaBufferBase *mbuf = mediaBuffers[id];
+ if (mbuf == NULL) {
+ continue;
+ }
+ if (releaseRemaining) {
+ mbuf->release();
+ continue;
+ }
+ if (mbuf->meta_data().findInt64(kKeyTime, &timeUs)) {
+ info->mSamples.emplace_back(mbuf, timeUs);
+ } else {
+ mbuf->meta_data().dumpToLog();
+ info->mFinalResult = ERROR_MALFORMED;
+ mbuf->release();
+ releaseRemaining = true;
+ }
+ }
+}
+
status_t NuMediaExtractor::seekTo(
int64_t timeUs, MediaSource::ReadOptions::SeekMode mode) {
Mutex::Autolock autoLock(mLock);
- ssize_t minIndex = fetchTrackSamples(timeUs, mode);
+ ssize_t minIndex = fetchAllTrackSamples(timeUs, mode);
if (minIndex < 0) {
return ERROR_END_OF_STREAM;
@@ -462,7 +586,7 @@
status_t NuMediaExtractor::advance() {
Mutex::Autolock autoLock(mLock);
- ssize_t minIndex = fetchTrackSamples();
+ ssize_t minIndex = fetchAllTrackSamples();
if (minIndex < 0) {
return ERROR_END_OF_STREAM;
@@ -470,28 +594,27 @@
TrackInfo *info = &mSelectedTracks.editItemAt(minIndex);
- info->mSample->release();
- info->mSample = NULL;
- info->mSampleTimeUs = -1ll;
+ releaseOneSample(info);
return OK;
}
-status_t NuMediaExtractor::appendVorbisNumPageSamples(TrackInfo *info, const sp<ABuffer> &buffer) {
+status_t NuMediaExtractor::appendVorbisNumPageSamples(
+ MediaBufferBase *mbuf, const sp<ABuffer> &buffer) {
int32_t numPageSamples;
- if (!info->mSample->meta_data()->findInt32(
+ if (!mbuf->meta_data().findInt32(
kKeyValidSamples, &numPageSamples)) {
numPageSamples = -1;
}
- memcpy((uint8_t *)buffer->data() + info->mSample->range_length(),
+ memcpy((uint8_t *)buffer->data() + mbuf->range_length(),
&numPageSamples,
sizeof(numPageSamples));
uint32_t type;
const void *data;
size_t size, size2;
- if (info->mSample->meta_data()->findData(kKeyEncryptedSizes, &type, &data, &size)) {
+ if (mbuf->meta_data().findData(kKeyEncryptedSizes, &type, &data, &size)) {
// Signal numPageSamples (a plain int32_t) is appended at the end,
// i.e. sizeof(numPageSamples) plain bytes + 0 encrypted bytes
if (SIZE_MAX - size < sizeof(int32_t)) {
@@ -509,9 +632,9 @@
int32_t zero = 0;
memcpy(adata, data, size);
memcpy(adata + size, &zero, sizeof(zero));
- info->mSample->meta_data()->setData(kKeyEncryptedSizes, type, adata, newSize);
+ mbuf->meta_data().setData(kKeyEncryptedSizes, type, adata, newSize);
- if (info->mSample->meta_data()->findData(kKeyPlainSizes, &type, &data, &size2)) {
+ if (mbuf->meta_data().findData(kKeyPlainSizes, &type, &data, &size2)) {
if (size2 != size) {
return ERROR_MALFORMED;
}
@@ -524,7 +647,7 @@
// append sizeof(numPageSamples) to plain sizes.
int32_t int32Size = sizeof(numPageSamples);
memcpy(adata + size, &int32Size, sizeof(int32Size));
- info->mSample->meta_data()->setData(kKeyPlainSizes, type, adata, newSize);
+ mbuf->meta_data().setData(kKeyPlainSizes, type, adata, newSize);
}
return OK;
@@ -533,7 +656,7 @@
status_t NuMediaExtractor::readSampleData(const sp<ABuffer> &buffer) {
Mutex::Autolock autoLock(mLock);
- ssize_t minIndex = fetchTrackSamples();
+ ssize_t minIndex = fetchAllTrackSamples();
if (minIndex < 0) {
return ERROR_END_OF_STREAM;
@@ -541,7 +664,8 @@
TrackInfo *info = &mSelectedTracks.editItemAt(minIndex);
- size_t sampleSize = info->mSample->range_length();
+ auto it = info->mSamples.begin();
+ size_t sampleSize = it->mBuffer->range_length();
if (info->mTrackFlags & kIsVorbis) {
// Each sample's data is suffixed by the number of page samples
@@ -554,14 +678,14 @@
}
const uint8_t *src =
- (const uint8_t *)info->mSample->data()
- + info->mSample->range_offset();
+ (const uint8_t *)it->mBuffer->data()
+ + it->mBuffer->range_offset();
- memcpy((uint8_t *)buffer->data(), src, info->mSample->range_length());
+ memcpy((uint8_t *)buffer->data(), src, it->mBuffer->range_length());
status_t err = OK;
if (info->mTrackFlags & kIsVorbis) {
- err = appendVorbisNumPageSamples(info, buffer);
+ err = appendVorbisNumPageSamples(it->mBuffer, buffer);
}
if (err == OK) {
@@ -571,10 +695,32 @@
return err;
}
+status_t NuMediaExtractor::getSampleSize(size_t *sampleSize) {
+ Mutex::Autolock autoLock(mLock);
+
+ ssize_t minIndex = fetchAllTrackSamples();
+
+ if (minIndex < 0) {
+ return ERROR_END_OF_STREAM;
+ }
+
+ TrackInfo *info = &mSelectedTracks.editItemAt(minIndex);
+ auto it = info->mSamples.begin();
+ *sampleSize = it->mBuffer->range_length();
+
+ if (info->mTrackFlags & kIsVorbis) {
+ // Each sample's data is suffixed by the number of page samples
+ // or -1 if not available.
+ *sampleSize += sizeof(int32_t);
+ }
+
+ return OK;
+}
+
status_t NuMediaExtractor::getSampleTrackIndex(size_t *trackIndex) {
Mutex::Autolock autoLock(mLock);
- ssize_t minIndex = fetchTrackSamples();
+ ssize_t minIndex = fetchAllTrackSamples();
if (minIndex < 0) {
return ERROR_END_OF_STREAM;
@@ -589,14 +735,14 @@
status_t NuMediaExtractor::getSampleTime(int64_t *sampleTimeUs) {
Mutex::Autolock autoLock(mLock);
- ssize_t minIndex = fetchTrackSamples();
+ ssize_t minIndex = fetchAllTrackSamples();
if (minIndex < 0) {
return ERROR_END_OF_STREAM;
}
TrackInfo *info = &mSelectedTracks.editItemAt(minIndex);
- *sampleTimeUs = info->mSampleTimeUs;
+ *sampleTimeUs = info->mSamples.begin()->mSampleTimeUs;
return OK;
}
@@ -606,14 +752,15 @@
*sampleMeta = NULL;
- ssize_t minIndex = fetchTrackSamples();
+ ssize_t minIndex = fetchAllTrackSamples();
if (minIndex < 0) {
- return ERROR_END_OF_STREAM;
+ status_t err = minIndex;
+ return err;
}
TrackInfo *info = &mSelectedTracks.editItemAt(minIndex);
- *sampleMeta = info->mSample->meta_data();
+ *sampleMeta = new MetaData(info->mSamples.begin()->mBuffer->meta_data());
return OK;
}
@@ -624,7 +771,7 @@
}
bool NuMediaExtractor::getTotalBitrate(int64_t *bitrate) const {
- if (mTotalBitrate >= 0) {
+ if (mTotalBitrate > 0) {
*bitrate = mTotalBitrate;
return true;
}
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index 5f50e46..9375de1 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -25,7 +25,6 @@
#include <cutils/properties.h>
#include <binder/IServiceManager.h>
-#include <media/IMediaCodecService.h>
#include <media/stagefright/OMXClient.h>
#include <media/IOMX.h>
@@ -38,70 +37,25 @@
}
status_t OMXClient::connect() {
- return connect("default", nullptr);
+ return connect("default");
}
-status_t OMXClient::connect(bool* trebleFlag) {
- if (property_get_bool("persist.media.treble_omx", true)) {
- if (trebleFlag != nullptr) {
- *trebleFlag = true;
- }
- return connectTreble();
- }
- if (trebleFlag != nullptr) {
- *trebleFlag = false;
- }
- return connectLegacy();
-}
-
-status_t OMXClient::connect(const char* name, bool* trebleFlag) {
- if (property_get_bool("persist.media.treble_omx", true)) {
- if (trebleFlag != nullptr) {
- *trebleFlag = true;
- }
- return connectTreble(name);
- }
- if (trebleFlag != nullptr) {
- *trebleFlag = false;
- }
- return connectLegacy();
-}
-
-status_t OMXClient::connectLegacy() {
- sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> codecbinder = sm->getService(String16("media.codec"));
- sp<IMediaCodecService> codecservice = interface_cast<IMediaCodecService>(codecbinder);
-
- if (codecservice.get() == NULL) {
- ALOGE("Cannot obtain IMediaCodecService");
- return NO_INIT;
- }
-
- mOMX = codecservice->getOMX();
- if (mOMX.get() == NULL) {
- ALOGE("Cannot obtain mediacodec IOMX");
- return NO_INIT;
- }
-
- return OK;
-}
-
-status_t OMXClient::connectTreble(const char* name) {
+status_t OMXClient::connect(const char* name) {
using namespace ::android::hardware::media::omx::V1_0;
if (name == nullptr) {
name = "default";
}
sp<IOmx> tOmx = IOmx::getService(name);
if (tOmx.get() == nullptr) {
- ALOGE("Cannot obtain Treble IOmx.");
+ ALOGE("Cannot obtain IOmx service.");
return NO_INIT;
}
if (!tOmx->isRemote()) {
- ALOGE("Treble IOmx is in passthrough mode.");
+ ALOGE("IOmx service running in passthrough mode.");
return NO_INIT;
}
mOMX = new utils::LWOmx(tOmx);
- ALOGI("Treble IOmx obtained");
+ ALOGI("IOmx service obtained");
return OK;
}
@@ -109,4 +63,8 @@
mOMX.clear();
}
+sp<IOMX> OMXClient::interface() {
+ return mOMX;
+}
+
} // namespace android
diff --git a/media/libstagefright/OggExtractor.cpp b/media/libstagefright/OggExtractor.cpp
deleted file mode 100644
index 766230a..0000000
--- a/media/libstagefright/OggExtractor.cpp
+++ /dev/null
@@ -1,1383 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "OggExtractor"
-#include <utils/Log.h>
-
-#include "include/OggExtractor.h"
-
-#include <cutils/properties.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/base64.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-#include <utils/String8.h>
-
-extern "C" {
- #include <Tremolo/codec_internal.h>
-
- int _vorbis_unpack_books(vorbis_info *vi,oggpack_buffer *opb);
- int _vorbis_unpack_info(vorbis_info *vi,oggpack_buffer *opb);
- int _vorbis_unpack_comment(vorbis_comment *vc,oggpack_buffer *opb);
- long vorbis_packet_blocksize(vorbis_info *vi,ogg_packet *op);
-}
-
-namespace android {
-
-struct OggSource : public MediaSource {
- explicit OggSource(const sp<OggExtractor> &extractor);
-
- virtual sp<MetaData> getFormat();
-
- virtual status_t start(MetaData *params = NULL);
- virtual status_t stop();
-
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-protected:
- virtual ~OggSource();
-
-private:
- sp<OggExtractor> mExtractor;
- bool mStarted;
-
- OggSource(const OggSource &);
- OggSource &operator=(const OggSource &);
-};
-
-struct MyOggExtractor {
- MyOggExtractor(
- const sp<DataSource> &source,
- const char *mimeType,
- size_t numHeaders,
- int64_t seekPreRollUs);
- virtual ~MyOggExtractor();
-
- sp<MetaData> getFormat() const;
-
- // Returns an approximate bitrate in bits per second.
- virtual uint64_t approxBitrate() const = 0;
-
- status_t seekToTime(int64_t timeUs);
- status_t seekToOffset(off64_t offset);
- virtual status_t readNextPacket(MediaBuffer **buffer) = 0;
-
- status_t init();
-
- sp<MetaData> getFileMetaData() { return mFileMeta; }
-
-protected:
- struct Page {
- uint64_t mGranulePosition;
- int32_t mPrevPacketSize;
- uint64_t mPrevPacketPos;
- uint32_t mSerialNo;
- uint32_t mPageNo;
- uint8_t mFlags;
- uint8_t mNumSegments;
- uint8_t mLace[255];
- };
-
- struct TOCEntry {
- off64_t mPageOffset;
- int64_t mTimeUs;
- };
-
- sp<DataSource> mSource;
- off64_t mOffset;
- Page mCurrentPage;
- uint64_t mCurGranulePosition;
- uint64_t mPrevGranulePosition;
- size_t mCurrentPageSize;
- bool mFirstPacketInPage;
- uint64_t mCurrentPageSamples;
- size_t mNextLaceIndex;
-
- const char *mMimeType;
- size_t mNumHeaders;
- int64_t mSeekPreRollUs;
-
- off64_t mFirstDataOffset;
-
- vorbis_info mVi;
- vorbis_comment mVc;
-
- sp<MetaData> mMeta;
- sp<MetaData> mFileMeta;
-
- Vector<TOCEntry> mTableOfContents;
-
- ssize_t readPage(off64_t offset, Page *page);
- status_t findNextPage(off64_t startOffset, off64_t *pageOffset);
-
- virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const = 0;
-
- // Extract codec format, metadata tags, and various codec specific data;
- // the format and CSD's are required to setup the decoders for the enclosed media content.
- //
- // Valid values for `type` are:
- // 1 - bitstream identification header
- // 3 - comment header
- // 5 - codec setup header (Vorbis only)
- virtual status_t verifyHeader(MediaBuffer *buffer, uint8_t type) = 0;
-
- // Read the next ogg packet from the underlying data source; optionally
- // calculate the timestamp for the output packet whilst pretending
- // that we are parsing an Ogg Vorbis stream.
- //
- // *buffer is NULL'ed out immediately upon entry, and if successful a new buffer is allocated;
- // clients are responsible for releasing the original buffer.
- status_t _readNextPacket(MediaBuffer **buffer, bool calcVorbisTimestamp);
-
- int32_t getPacketBlockSize(MediaBuffer *buffer);
-
- void parseFileMetaData();
-
- status_t findPrevGranulePosition(off64_t pageOffset, uint64_t *granulePos);
-
- void buildTableOfContents();
-
- MyOggExtractor(const MyOggExtractor &);
- MyOggExtractor &operator=(const MyOggExtractor &);
-};
-
-struct MyVorbisExtractor : public MyOggExtractor {
- explicit MyVorbisExtractor(const sp<DataSource> &source)
- : MyOggExtractor(source,
- MEDIA_MIMETYPE_AUDIO_VORBIS,
- /* numHeaders */ 3,
- /* seekPreRollUs */ 0) {
- }
-
- virtual uint64_t approxBitrate() const;
-
- virtual status_t readNextPacket(MediaBuffer **buffer) {
- return _readNextPacket(buffer, /* calcVorbisTimestamp = */ true);
- }
-
-protected:
- virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const {
- if (granulePos > INT64_MAX / 1000000ll) {
- return INT64_MAX;
- }
- return granulePos * 1000000ll / mVi.rate;
- }
-
- virtual status_t verifyHeader(MediaBuffer *buffer, uint8_t type);
-};
-
-struct MyOpusExtractor : public MyOggExtractor {
- static const int32_t kOpusSampleRate = 48000;
- static const int64_t kOpusSeekPreRollUs = 80000; // 80 ms
-
- explicit MyOpusExtractor(const sp<DataSource> &source)
- : MyOggExtractor(source, MEDIA_MIMETYPE_AUDIO_OPUS, /*numHeaders*/ 2, kOpusSeekPreRollUs),
- mChannelCount(0),
- mCodecDelay(0),
- mStartGranulePosition(-1) {
- }
-
- virtual uint64_t approxBitrate() const {
- return 0;
- }
-
- virtual status_t readNextPacket(MediaBuffer **buffer);
-
-protected:
- virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const;
- virtual status_t verifyHeader(MediaBuffer *buffer, uint8_t type);
-
-private:
- status_t verifyOpusHeader(MediaBuffer *buffer);
- status_t verifyOpusComments(MediaBuffer *buffer);
- uint32_t getNumSamplesInPacket(MediaBuffer *buffer) const;
-
- uint8_t mChannelCount;
- uint16_t mCodecDelay;
- int64_t mStartGranulePosition;
-};
-
-static void extractAlbumArt(
- const sp<MetaData> &fileMeta, const void *data, size_t size);
-
-////////////////////////////////////////////////////////////////////////////////
-
-OggSource::OggSource(const sp<OggExtractor> &extractor)
- : mExtractor(extractor),
- mStarted(false) {
-}
-
-OggSource::~OggSource() {
- if (mStarted) {
- stop();
- }
-}
-
-sp<MetaData> OggSource::getFormat() {
- return mExtractor->mImpl->getFormat();
-}
-
-status_t OggSource::start(MetaData * /* params */) {
- if (mStarted) {
- return INVALID_OPERATION;
- }
-
- mStarted = true;
-
- return OK;
-}
-
-status_t OggSource::stop() {
- mStarted = false;
-
- return OK;
-}
-
-status_t OggSource::read(
- MediaBuffer **out, const ReadOptions *options) {
- *out = NULL;
-
- int64_t seekTimeUs;
- ReadOptions::SeekMode mode;
- if (options && options->getSeekTo(&seekTimeUs, &mode)) {
- status_t err = mExtractor->mImpl->seekToTime(seekTimeUs);
- if (err != OK) {
- return err;
- }
- }
-
- MediaBuffer *packet;
- status_t err = mExtractor->mImpl->readNextPacket(&packet);
-
- if (err != OK) {
- return err;
- }
-
-#if 0
- int64_t timeUs;
- if (packet->meta_data()->findInt64(kKeyTime, &timeUs)) {
- ALOGI("found time = %lld us", timeUs);
- } else {
- ALOGI("NO time");
- }
-#endif
-
- packet->meta_data()->setInt32(kKeyIsSyncFrame, 1);
-
- *out = packet;
-
- return OK;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-MyOggExtractor::MyOggExtractor(
- const sp<DataSource> &source,
- const char *mimeType,
- size_t numHeaders,
- int64_t seekPreRollUs)
- : mSource(source),
- mOffset(0),
- mCurGranulePosition(0),
- mPrevGranulePosition(0),
- mCurrentPageSize(0),
- mFirstPacketInPage(true),
- mCurrentPageSamples(0),
- mNextLaceIndex(0),
- mMimeType(mimeType),
- mNumHeaders(numHeaders),
- mSeekPreRollUs(seekPreRollUs),
- mFirstDataOffset(-1) {
- mCurrentPage.mNumSegments = 0;
-
- vorbis_info_init(&mVi);
- vorbis_comment_init(&mVc);
-}
-
-MyOggExtractor::~MyOggExtractor() {
- vorbis_comment_clear(&mVc);
- vorbis_info_clear(&mVi);
-}
-
-sp<MetaData> MyOggExtractor::getFormat() const {
- return mMeta;
-}
-
-status_t MyOggExtractor::findNextPage(
- off64_t startOffset, off64_t *pageOffset) {
- *pageOffset = startOffset;
-
- for (;;) {
- char signature[4];
- ssize_t n = mSource->readAt(*pageOffset, &signature, 4);
-
- if (n < 4) {
- *pageOffset = 0;
-
- return (n < 0) ? n : (status_t)ERROR_END_OF_STREAM;
- }
-
- if (!memcmp(signature, "OggS", 4)) {
- if (*pageOffset > startOffset) {
- ALOGV("skipped %lld bytes of junk to reach next frame",
- (long long)(*pageOffset - startOffset));
- }
-
- return OK;
- }
-
- ++*pageOffset;
- }
-}
-
-// Given the offset of the "current" page, find the page immediately preceding
-// it (if any) and return its granule position.
-// To do this we back up from the "current" page's offset until we find any
-// page preceding it and then scan forward to just before the current page.
-status_t MyOggExtractor::findPrevGranulePosition(
- off64_t pageOffset, uint64_t *granulePos) {
- *granulePos = 0;
-
- off64_t prevPageOffset = 0;
- off64_t prevGuess = pageOffset;
- for (;;) {
- if (prevGuess >= 5000) {
- prevGuess -= 5000;
- } else {
- prevGuess = 0;
- }
-
- ALOGV("backing up %lld bytes", (long long)(pageOffset - prevGuess));
-
- status_t err = findNextPage(prevGuess, &prevPageOffset);
- if (err == ERROR_END_OF_STREAM) {
- // We are at the last page and didn't back off enough;
- // back off 5000 bytes more and try again.
- continue;
- } else if (err != OK) {
- return err;
- }
-
- if (prevPageOffset < pageOffset || prevGuess == 0) {
- break;
- }
- }
-
- if (prevPageOffset == pageOffset) {
- // We did not find a page preceding this one.
- return UNKNOWN_ERROR;
- }
-
- ALOGV("prevPageOffset at %lld, pageOffset at %lld",
- (long long)prevPageOffset, (long long)pageOffset);
-
- for (;;) {
- Page prevPage;
- ssize_t n = readPage(prevPageOffset, &prevPage);
-
- if (n <= 0) {
- return (status_t)n;
- }
-
- prevPageOffset += n;
-
- if (prevPageOffset == pageOffset) {
- *granulePos = prevPage.mGranulePosition;
- return OK;
- }
- }
-}
-
-status_t MyOggExtractor::seekToTime(int64_t timeUs) {
- timeUs -= mSeekPreRollUs;
- if (timeUs < 0) {
- timeUs = 0;
- }
-
- if (mTableOfContents.isEmpty()) {
- // Perform approximate seeking based on avg. bitrate.
- uint64_t bps = approxBitrate();
- if (bps <= 0) {
- return INVALID_OPERATION;
- }
-
- off64_t pos = timeUs * bps / 8000000ll;
-
- ALOGV("seeking to offset %lld", (long long)pos);
- return seekToOffset(pos);
- }
-
- size_t left = 0;
- size_t right_plus_one = mTableOfContents.size();
- while (left < right_plus_one) {
- size_t center = left + (right_plus_one - left) / 2;
-
- const TOCEntry &entry = mTableOfContents.itemAt(center);
-
- if (timeUs < entry.mTimeUs) {
- right_plus_one = center;
- } else if (timeUs > entry.mTimeUs) {
- left = center + 1;
- } else {
- left = center;
- break;
- }
- }
-
- if (left == mTableOfContents.size()) {
- --left;
- }
-
- const TOCEntry &entry = mTableOfContents.itemAt(left);
-
- ALOGV("seeking to entry %zu / %zu at offset %lld",
- left, mTableOfContents.size(), (long long)entry.mPageOffset);
-
- return seekToOffset(entry.mPageOffset);
-}
-
-status_t MyOggExtractor::seekToOffset(off64_t offset) {
- if (mFirstDataOffset >= 0 && offset < mFirstDataOffset) {
- // Once we know where the actual audio data starts (past the headers)
- // don't ever seek to anywhere before that.
- offset = mFirstDataOffset;
- }
-
- off64_t pageOffset;
- status_t err = findNextPage(offset, &pageOffset);
-
- if (err != OK) {
- return err;
- }
-
- // We found the page we wanted to seek to, but we'll also need
- // the page preceding it to determine how many valid samples are on
- // this page.
- findPrevGranulePosition(pageOffset, &mPrevGranulePosition);
-
- mOffset = pageOffset;
-
- mCurrentPageSize = 0;
- mFirstPacketInPage = true;
- mCurrentPageSamples = 0;
- mCurrentPage.mNumSegments = 0;
- mCurrentPage.mPrevPacketSize = -1;
- mNextLaceIndex = 0;
-
- // XXX what if new page continues packet from last???
-
- return OK;
-}
-
-ssize_t MyOggExtractor::readPage(off64_t offset, Page *page) {
- uint8_t header[27];
- ssize_t n;
- if ((n = mSource->readAt(offset, header, sizeof(header)))
- < (ssize_t)sizeof(header)) {
- ALOGV("failed to read %zu bytes at offset %#016llx, got %zd bytes",
- sizeof(header), (long long)offset, n);
-
- if (n < 0) {
- return n;
- } else if (n == 0) {
- return ERROR_END_OF_STREAM;
- } else {
- return ERROR_IO;
- }
- }
-
- if (memcmp(header, "OggS", 4)) {
- return ERROR_MALFORMED;
- }
-
- if (header[4] != 0) {
- // Wrong version.
-
- return ERROR_UNSUPPORTED;
- }
-
- page->mFlags = header[5];
-
- if (page->mFlags & ~7) {
- // Only bits 0-2 are defined in version 0.
- return ERROR_MALFORMED;
- }
-
- page->mGranulePosition = U64LE_AT(&header[6]);
-
-#if 0
- printf("granulePosition = %llu (0x%llx)\n",
- page->mGranulePosition, page->mGranulePosition);
-#endif
-
- page->mSerialNo = U32LE_AT(&header[14]);
- page->mPageNo = U32LE_AT(&header[18]);
-
- page->mNumSegments = header[26];
- if (mSource->readAt(
- offset + sizeof(header), page->mLace, page->mNumSegments)
- < (ssize_t)page->mNumSegments) {
- return ERROR_IO;
- }
-
- size_t totalSize = 0;;
- for (size_t i = 0; i < page->mNumSegments; ++i) {
- totalSize += page->mLace[i];
- }
-
-#if 0
- String8 tmp;
- for (size_t i = 0; i < page->mNumSegments; ++i) {
- char x[32];
- sprintf(x, "%s%u", i > 0 ? ", " : "", (unsigned)page->mLace[i]);
-
- tmp.append(x);
- }
-
- ALOGV("%c %s", page->mFlags & 1 ? '+' : ' ', tmp.string());
-#endif
-
- return sizeof(header) + page->mNumSegments + totalSize;
-}
-
-status_t MyOpusExtractor::readNextPacket(MediaBuffer **out) {
- if (mOffset <= mFirstDataOffset && mStartGranulePosition < 0) {
- // The first sample might not start at time 0; find out where by subtracting
- // the number of samples on the first page from the granule position
- // (position of last complete sample) of the first page. This happens
- // the first time before we attempt to read a packet from the first page.
- MediaBuffer *mBuf;
- uint32_t numSamples = 0;
- uint64_t curGranulePosition = 0;
- while (true) {
- status_t err = _readNextPacket(&mBuf, /* calcVorbisTimestamp = */false);
- if (err != OK && err != ERROR_END_OF_STREAM) {
- return err;
- }
- // First two pages are header pages.
- if (err == ERROR_END_OF_STREAM || mCurrentPage.mPageNo > 2) {
- if (mBuf != NULL) {
- mBuf->release();
- mBuf = NULL;
- }
- break;
- }
- curGranulePosition = mCurrentPage.mGranulePosition;
- numSamples += getNumSamplesInPacket(mBuf);
- mBuf->release();
- mBuf = NULL;
- }
-
- if (curGranulePosition > numSamples) {
- mStartGranulePosition = curGranulePosition - numSamples;
- } else {
- mStartGranulePosition = 0;
- }
- seekToOffset(0);
- }
-
- status_t err = _readNextPacket(out, /* calcVorbisTimestamp = */false);
- if (err != OK) {
- return err;
- }
-
- int32_t currentPageSamples;
- // Calculate timestamps by accumulating durations starting from the first sample of a page;
- // We assume that we only seek to page boundaries.
- if ((*out)->meta_data()->findInt32(kKeyValidSamples, ¤tPageSamples)) {
- // first packet in page
- if (mOffset == mFirstDataOffset) {
- currentPageSamples -= mStartGranulePosition;
- (*out)->meta_data()->setInt32(kKeyValidSamples, currentPageSamples);
- }
- mCurGranulePosition = mCurrentPage.mGranulePosition - currentPageSamples;
- }
-
- int64_t timeUs = getTimeUsOfGranule(mCurGranulePosition);
- (*out)->meta_data()->setInt64(kKeyTime, timeUs);
-
- uint32_t frames = getNumSamplesInPacket(*out);
- mCurGranulePosition += frames;
- return OK;
-}
-
-uint32_t MyOpusExtractor::getNumSamplesInPacket(MediaBuffer *buffer) const {
- if (buffer == NULL || buffer->range_length() < 1) {
- return 0;
- }
-
- uint8_t *data = (uint8_t *)buffer->data() + buffer->range_offset();
- uint8_t toc = data[0];
- uint8_t config = (toc >> 3) & 0x1f;
- uint32_t frameSizesUs[] = {
- 10000, 20000, 40000, 60000, // 0...3
- 10000, 20000, 40000, 60000, // 4...7
- 10000, 20000, 40000, 60000, // 8...11
- 10000, 20000, // 12...13
- 10000, 20000, // 14...15
- 2500, 5000, 10000, 20000, // 16...19
- 2500, 5000, 10000, 20000, // 20...23
- 2500, 5000, 10000, 20000, // 24...27
- 2500, 5000, 10000, 20000 // 28...31
- };
- uint32_t frameSizeUs = frameSizesUs[config];
-
- uint32_t numFrames;
- uint8_t c = toc & 3;
- switch (c) {
- case 0:
- numFrames = 1;
- break;
- case 1:
- case 2:
- numFrames = 2;
- break;
- case 3:
- if (buffer->range_length() < 3) {
- numFrames = 0;
- } else {
- numFrames = data[2] & 0x3f;
- }
- break;
- default:
- TRESPASS();
- }
-
- uint32_t numSamples = frameSizeUs * numFrames * kOpusSampleRate / 1000000;
- return numSamples;
-}
-
-status_t MyOggExtractor::_readNextPacket(MediaBuffer **out, bool calcVorbisTimestamp) {
- *out = NULL;
-
- MediaBuffer *buffer = NULL;
- int64_t timeUs = -1;
-
- for (;;) {
- size_t i;
- size_t packetSize = 0;
- bool gotFullPacket = false;
- for (i = mNextLaceIndex; i < mCurrentPage.mNumSegments; ++i) {
- uint8_t lace = mCurrentPage.mLace[i];
-
- packetSize += lace;
-
- if (lace < 255) {
- gotFullPacket = true;
- ++i;
- break;
- }
- }
-
- if (mNextLaceIndex < mCurrentPage.mNumSegments) {
- off64_t dataOffset = mOffset + 27 + mCurrentPage.mNumSegments;
- for (size_t j = 0; j < mNextLaceIndex; ++j) {
- dataOffset += mCurrentPage.mLace[j];
- }
-
- size_t fullSize = packetSize;
- if (buffer != NULL) {
- fullSize += buffer->range_length();
- }
- if (fullSize > 16 * 1024 * 1024) { // arbitrary limit of 16 MB packet size
- if (buffer != NULL) {
- buffer->release();
- }
- ALOGE("b/36592202");
- return ERROR_MALFORMED;
- }
- MediaBuffer *tmp = new (std::nothrow) MediaBuffer(fullSize);
- if (tmp == NULL) {
- if (buffer != NULL) {
- buffer->release();
- }
- ALOGE("b/36592202");
- return ERROR_MALFORMED;
- }
- if (buffer != NULL) {
- memcpy(tmp->data(), buffer->data(), buffer->range_length());
- tmp->set_range(0, buffer->range_length());
- buffer->release();
- } else {
- tmp->set_range(0, 0);
- }
- buffer = tmp;
-
- ssize_t n = mSource->readAt(
- dataOffset,
- (uint8_t *)buffer->data() + buffer->range_length(),
- packetSize);
-
- if (n < (ssize_t)packetSize) {
- buffer->release();
- ALOGV("failed to read %zu bytes at %#016llx, got %zd bytes",
- packetSize, (long long)dataOffset, n);
- return ERROR_IO;
- }
-
- buffer->set_range(0, fullSize);
-
- mNextLaceIndex = i;
-
- if (gotFullPacket) {
- // We've just read the entire packet.
-
- if (mFirstPacketInPage) {
- buffer->meta_data()->setInt32(
- kKeyValidSamples, mCurrentPageSamples);
- mFirstPacketInPage = false;
- }
-
- if (calcVorbisTimestamp) {
- int32_t curBlockSize = getPacketBlockSize(buffer);
- if (mCurrentPage.mPrevPacketSize < 0) {
- mCurrentPage.mPrevPacketSize = curBlockSize;
- mCurrentPage.mPrevPacketPos =
- mCurrentPage.mGranulePosition - mCurrentPageSamples;
- timeUs = mCurrentPage.mPrevPacketPos * 1000000ll / mVi.rate;
- } else {
- // The effective block size is the average of the two overlapped blocks
- int32_t actualBlockSize =
- (curBlockSize + mCurrentPage.mPrevPacketSize) / 2;
- timeUs = mCurrentPage.mPrevPacketPos * 1000000ll / mVi.rate;
- // The actual size output by the decoder will be half the effective
- // size, due to the overlap
- mCurrentPage.mPrevPacketPos += actualBlockSize / 2;
- mCurrentPage.mPrevPacketSize = curBlockSize;
- }
- buffer->meta_data()->setInt64(kKeyTime, timeUs);
- }
- *out = buffer;
-
- return OK;
- }
-
- // fall through, the buffer now contains the start of the packet.
- }
-
- CHECK_EQ(mNextLaceIndex, mCurrentPage.mNumSegments);
-
- mOffset += mCurrentPageSize;
- ssize_t n = readPage(mOffset, &mCurrentPage);
-
- if (n <= 0) {
- if (buffer) {
- buffer->release();
- buffer = NULL;
- }
-
- ALOGV("readPage returned %zd", n);
-
- return n < 0 ? n : (status_t)ERROR_END_OF_STREAM;
- }
-
- // Prevent a harmless unsigned integer overflow by clamping to 0
- if (mCurrentPage.mGranulePosition >= mPrevGranulePosition) {
- mCurrentPageSamples =
- mCurrentPage.mGranulePosition - mPrevGranulePosition;
- } else {
- mCurrentPageSamples = 0;
- }
- mFirstPacketInPage = true;
-
- mPrevGranulePosition = mCurrentPage.mGranulePosition;
-
- mCurrentPageSize = n;
- mNextLaceIndex = 0;
-
- if (buffer != NULL) {
- if ((mCurrentPage.mFlags & 1) == 0) {
- // This page does not continue the packet, i.e. the packet
- // is already complete.
-
- if (timeUs >= 0) {
- buffer->meta_data()->setInt64(kKeyTime, timeUs);
- }
-
- buffer->meta_data()->setInt32(
- kKeyValidSamples, mCurrentPageSamples);
- mFirstPacketInPage = false;
-
- *out = buffer;
-
- return OK;
- }
- }
- }
-}
-
-status_t MyOggExtractor::init() {
- mMeta = new MetaData;
- mMeta->setCString(kKeyMIMEType, mMimeType);
-
- status_t err;
- MediaBuffer *packet;
- for (size_t i = 0; i < mNumHeaders; ++i) {
- // ignore timestamp for configuration packets
- if ((err = _readNextPacket(&packet, /* calcVorbisTimestamp = */ false)) != OK) {
- return err;
- }
- ALOGV("read packet of size %zu\n", packet->range_length());
- err = verifyHeader(packet, /* type = */ i * 2 + 1);
- packet->release();
- packet = NULL;
- if (err != OK) {
- return err;
- }
- }
-
- mFirstDataOffset = mOffset + mCurrentPageSize;
-
- off64_t size;
- uint64_t lastGranulePosition;
- if (!(mSource->flags() & DataSource::kIsCachingDataSource)
- && mSource->getSize(&size) == OK
- && findPrevGranulePosition(size, &lastGranulePosition) == OK) {
- // Let's assume it's cheap to seek to the end.
- // The granule position of the final page in the stream will
- // give us the exact duration of the content, something that
- // we can only approximate using avg. bitrate if seeking to
- // the end is too expensive or impossible (live streaming).
-
- int64_t durationUs = getTimeUsOfGranule(lastGranulePosition);
-
- mMeta->setInt64(kKeyDuration, durationUs);
-
- buildTableOfContents();
- }
-
- return OK;
-}
-
-void MyOggExtractor::buildTableOfContents() {
- off64_t offset = mFirstDataOffset;
- Page page;
- ssize_t pageSize;
- while ((pageSize = readPage(offset, &page)) > 0) {
- mTableOfContents.push();
-
- TOCEntry &entry =
- mTableOfContents.editItemAt(mTableOfContents.size() - 1);
-
- entry.mPageOffset = offset;
- entry.mTimeUs = getTimeUsOfGranule(page.mGranulePosition);
-
- offset += (size_t)pageSize;
- }
-
- // Limit the maximum amount of RAM we spend on the table of contents,
- // if necessary thin out the table evenly to trim it down to maximum
- // size.
-
- static const size_t kMaxTOCSize = 8192;
- static const size_t kMaxNumTOCEntries = kMaxTOCSize / sizeof(TOCEntry);
-
- size_t numerator = mTableOfContents.size();
-
- if (numerator > kMaxNumTOCEntries) {
- size_t denom = numerator - kMaxNumTOCEntries;
-
- size_t accum = 0;
- for (ssize_t i = mTableOfContents.size() - 1; i >= 0; --i) {
- accum += denom;
- if (accum >= numerator) {
- mTableOfContents.removeAt(i);
- accum -= numerator;
- }
- }
- }
-}
-
-int32_t MyOggExtractor::getPacketBlockSize(MediaBuffer *buffer) {
- const uint8_t *data =
- (const uint8_t *)buffer->data() + buffer->range_offset();
-
- size_t size = buffer->range_length();
-
- ogg_buffer buf;
- buf.data = (uint8_t *)data;
- buf.size = size;
- buf.refcount = 1;
- buf.ptr.owner = NULL;
-
- ogg_reference ref;
- ref.buffer = &buf;
- ref.begin = 0;
- ref.length = size;
- ref.next = NULL;
-
- ogg_packet pack;
- pack.packet = &ref;
- pack.bytes = ref.length;
- pack.b_o_s = 0;
- pack.e_o_s = 0;
- pack.granulepos = 0;
- pack.packetno = 0;
-
- return vorbis_packet_blocksize(&mVi, &pack);
-}
-
-int64_t MyOpusExtractor::getTimeUsOfGranule(uint64_t granulePos) const {
- uint64_t pcmSamplePosition = 0;
- if (granulePos > mCodecDelay) {
- pcmSamplePosition = granulePos - mCodecDelay;
- }
- if (pcmSamplePosition > INT64_MAX / 1000000ll) {
- return INT64_MAX;
- }
- return pcmSamplePosition * 1000000ll / kOpusSampleRate;
-}
-
-status_t MyOpusExtractor::verifyHeader(MediaBuffer *buffer, uint8_t type) {
- switch (type) {
- // there are actually no header types defined in the Opus spec; we choose 1 and 3 to mean
- // header and comments such that we can share code with MyVorbisExtractor.
- case 1:
- return verifyOpusHeader(buffer);
- case 3:
- return verifyOpusComments(buffer);
- default:
- return INVALID_OPERATION;
- }
-}
-
-status_t MyOpusExtractor::verifyOpusHeader(MediaBuffer *buffer) {
- const size_t kOpusHeaderSize = 19;
- const uint8_t *data =
- (const uint8_t *)buffer->data() + buffer->range_offset();
-
- size_t size = buffer->range_length();
-
- if (size < kOpusHeaderSize
- || memcmp(data, "OpusHead", 8)
- || /* version = */ data[8] != 1) {
- return ERROR_MALFORMED;
- }
-
- mChannelCount = data[9];
- mCodecDelay = U16LE_AT(&data[10]);
-
- mMeta->setData(kKeyOpusHeader, 0, data, size);
- mMeta->setInt32(kKeySampleRate, kOpusSampleRate);
- mMeta->setInt32(kKeyChannelCount, mChannelCount);
- mMeta->setInt64(kKeyOpusSeekPreRoll /* ns */, kOpusSeekPreRollUs * 1000 /* = 80 ms*/);
- mMeta->setInt64(kKeyOpusCodecDelay /* ns */,
- mCodecDelay /* sample/s */ * 1000000000ll / kOpusSampleRate);
-
- return OK;
-}
-
-status_t MyOpusExtractor::verifyOpusComments(MediaBuffer *buffer) {
- // add artificial framing bit so we can reuse _vorbis_unpack_comment
- int32_t commentSize = buffer->range_length() + 1;
- sp<ABuffer> aBuf = new ABuffer(commentSize);
- if (aBuf->capacity() <= buffer->range_length()) {
- return ERROR_MALFORMED;
- }
-
- uint8_t* commentData = aBuf->data();
- memcpy(commentData,
- (uint8_t *)buffer->data() + buffer->range_offset(),
- buffer->range_length());
-
- ogg_buffer buf;
- buf.data = commentData;
- buf.size = commentSize;
- buf.refcount = 1;
- buf.ptr.owner = NULL;
-
- ogg_reference ref;
- ref.buffer = &buf;
- ref.begin = 0;
- ref.length = commentSize;
- ref.next = NULL;
-
- oggpack_buffer bits;
- oggpack_readinit(&bits, &ref);
-
- // skip 'OpusTags'
- const char *OpusTags = "OpusTags";
- const int32_t headerLen = strlen(OpusTags);
- int32_t framingBitOffset = headerLen;
- for (int i = 0; i < headerLen; ++i) {
- char chr = oggpack_read(&bits, 8);
- if (chr != OpusTags[i]) {
- return ERROR_MALFORMED;
- }
- }
-
- int32_t vendorLen = oggpack_read(&bits, 32);
- framingBitOffset += 4;
- if (vendorLen < 0 || vendorLen > commentSize - 8) {
- return ERROR_MALFORMED;
- }
- // skip vendor string
- framingBitOffset += vendorLen;
- for (int i = 0; i < vendorLen; ++i) {
- oggpack_read(&bits, 8);
- }
-
- int32_t n = oggpack_read(&bits, 32);
- framingBitOffset += 4;
- if (n < 0 || n > ((commentSize - oggpack_bytes(&bits)) >> 2)) {
- return ERROR_MALFORMED;
- }
- for (int i = 0; i < n; ++i) {
- int32_t len = oggpack_read(&bits, 32);
- framingBitOffset += 4;
- if (len < 0 || len > (commentSize - oggpack_bytes(&bits))) {
- return ERROR_MALFORMED;
- }
- framingBitOffset += len;
- for (int j = 0; j < len; ++j) {
- oggpack_read(&bits, 8);
- }
- }
- if (framingBitOffset < 0 || framingBitOffset >= commentSize) {
- return ERROR_MALFORMED;
- }
- commentData[framingBitOffset] = 1;
-
- buf.data = commentData + headerLen;
- buf.size = commentSize - headerLen;
- buf.refcount = 1;
- buf.ptr.owner = NULL;
-
- ref.buffer = &buf;
- ref.begin = 0;
- ref.length = commentSize - headerLen;
- ref.next = NULL;
-
- oggpack_readinit(&bits, &ref);
- int err = _vorbis_unpack_comment(&mVc, &bits);
- if (0 != err) {
- return ERROR_MALFORMED;
- }
-
- parseFileMetaData();
- return OK;
-}
-
-status_t MyVorbisExtractor::verifyHeader(
- MediaBuffer *buffer, uint8_t type) {
- const uint8_t *data =
- (const uint8_t *)buffer->data() + buffer->range_offset();
-
- size_t size = buffer->range_length();
-
- if (size < 7 || data[0] != type || memcmp(&data[1], "vorbis", 6)) {
- return ERROR_MALFORMED;
- }
-
- ogg_buffer buf;
- buf.data = (uint8_t *)data;
- buf.size = size;
- buf.refcount = 1;
- buf.ptr.owner = NULL;
-
- ogg_reference ref;
- ref.buffer = &buf;
- ref.begin = 0;
- ref.length = size;
- ref.next = NULL;
-
- oggpack_buffer bits;
- oggpack_readinit(&bits, &ref);
-
- if (oggpack_read(&bits, 8) != type) {
- return ERROR_MALFORMED;
- }
- for (size_t i = 0; i < 6; ++i) {
- oggpack_read(&bits, 8); // skip 'vorbis'
- }
-
- switch (type) {
- case 1:
- {
- if (0 != _vorbis_unpack_info(&mVi, &bits)) {
- return ERROR_MALFORMED;
- }
-
- mMeta->setData(kKeyVorbisInfo, 0, data, size);
- mMeta->setInt32(kKeySampleRate, mVi.rate);
- mMeta->setInt32(kKeyChannelCount, mVi.channels);
- mMeta->setInt32(kKeyBitRate, mVi.bitrate_nominal);
-
- ALOGV("lower-bitrate = %ld", mVi.bitrate_lower);
- ALOGV("upper-bitrate = %ld", mVi.bitrate_upper);
- ALOGV("nominal-bitrate = %ld", mVi.bitrate_nominal);
- ALOGV("window-bitrate = %ld", mVi.bitrate_window);
- ALOGV("blocksizes: %d/%d",
- vorbis_info_blocksize(&mVi, 0),
- vorbis_info_blocksize(&mVi, 1)
- );
-
- off64_t size;
- if (mSource->getSize(&size) == OK) {
- uint64_t bps = approxBitrate();
- if (bps != 0) {
- mMeta->setInt64(kKeyDuration, size * 8000000ll / bps);
- }
- }
- break;
- }
-
- case 3:
- {
- if (0 != _vorbis_unpack_comment(&mVc, &bits)) {
- return ERROR_MALFORMED;
- }
-
- parseFileMetaData();
- break;
- }
-
- case 5:
- {
- if (0 != _vorbis_unpack_books(&mVi, &bits)) {
- return ERROR_MALFORMED;
- }
-
- mMeta->setData(kKeyVorbisBooks, 0, data, size);
- break;
- }
- }
-
- return OK;
-}
-
-uint64_t MyVorbisExtractor::approxBitrate() const {
- if (mVi.bitrate_nominal != 0) {
- return mVi.bitrate_nominal;
- }
-
- return (mVi.bitrate_lower + mVi.bitrate_upper) / 2;
-}
-
-void MyOggExtractor::parseFileMetaData() {
- mFileMeta = new MetaData;
- mFileMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_OGG);
-
- for (int i = 0; i < mVc.comments; ++i) {
- const char *comment = mVc.user_comments[i];
- size_t commentLength = mVc.comment_lengths[i];
- parseVorbisComment(mFileMeta, comment, commentLength);
- //ALOGI("comment #%d: '%s'", i + 1, mVc.user_comments[i]);
- }
-}
-
-void parseVorbisComment(
- const sp<MetaData> &fileMeta, const char *comment, size_t commentLength)
-{
- struct {
- const char *const mTag;
- uint32_t mKey;
- } kMap[] = {
- { "TITLE", kKeyTitle },
- { "ARTIST", kKeyArtist },
- { "ALBUMARTIST", kKeyAlbumArtist },
- { "ALBUM ARTIST", kKeyAlbumArtist },
- { "COMPILATION", kKeyCompilation },
- { "ALBUM", kKeyAlbum },
- { "COMPOSER", kKeyComposer },
- { "GENRE", kKeyGenre },
- { "AUTHOR", kKeyAuthor },
- { "TRACKNUMBER", kKeyCDTrackNumber },
- { "DISCNUMBER", kKeyDiscNumber },
- { "DATE", kKeyDate },
- { "YEAR", kKeyYear },
- { "LYRICIST", kKeyWriter },
- { "METADATA_BLOCK_PICTURE", kKeyAlbumArt },
- { "ANDROID_LOOP", kKeyAutoLoop },
- };
-
- for (size_t j = 0; j < sizeof(kMap) / sizeof(kMap[0]); ++j) {
- size_t tagLen = strlen(kMap[j].mTag);
- if (!strncasecmp(kMap[j].mTag, comment, tagLen)
- && comment[tagLen] == '=') {
- if (kMap[j].mKey == kKeyAlbumArt) {
- extractAlbumArt(
- fileMeta,
- &comment[tagLen + 1],
- commentLength - tagLen - 1);
- } else if (kMap[j].mKey == kKeyAutoLoop) {
- if (!strcasecmp(&comment[tagLen + 1], "true")) {
- fileMeta->setInt32(kKeyAutoLoop, true);
- }
- } else {
- fileMeta->setCString(kMap[j].mKey, &comment[tagLen + 1]);
- }
- }
- }
-
-}
-
-static void extractAlbumArt(
- const sp<MetaData> &fileMeta, const void *data, size_t size) {
- ALOGV("extractAlbumArt from '%s'", (const char *)data);
-
- sp<ABuffer> flacBuffer = decodeBase64(AString((const char *)data, size));
- if (flacBuffer == NULL) {
- ALOGE("malformed base64 encoded data.");
- return;
- }
-
- size_t flacSize = flacBuffer->size();
- uint8_t *flac = flacBuffer->data();
- ALOGV("got flac of size %zu", flacSize);
-
- uint32_t picType;
- uint32_t typeLen;
- uint32_t descLen;
- uint32_t dataLen;
- char type[128];
-
- if (flacSize < 8) {
- return;
- }
-
- picType = U32_AT(flac);
-
- if (picType != 3) {
- // This is not a front cover.
- return;
- }
-
- typeLen = U32_AT(&flac[4]);
- if (typeLen > sizeof(type) - 1) {
- return;
- }
-
- // we've already checked above that flacSize >= 8
- if (flacSize - 8 < typeLen) {
- return;
- }
-
- memcpy(type, &flac[8], typeLen);
- type[typeLen] = '\0';
-
- ALOGV("picType = %d, type = '%s'", picType, type);
-
- if (!strcmp(type, "-->")) {
- // This is not inline cover art, but an external url instead.
- return;
- }
-
- if (flacSize < 32 || flacSize - 32 < typeLen) {
- return;
- }
-
- descLen = U32_AT(&flac[8 + typeLen]);
- if (flacSize - 32 - typeLen < descLen) {
- return;
- }
-
- dataLen = U32_AT(&flac[8 + typeLen + 4 + descLen + 16]);
-
- // we've already checked above that (flacSize - 32 - typeLen - descLen) >= 0
- if (flacSize - 32 - typeLen - descLen < dataLen) {
- return;
- }
-
- ALOGV("got image data, %zu trailing bytes",
- flacSize - 32 - typeLen - descLen - dataLen);
-
- fileMeta->setData(
- kKeyAlbumArt, 0, &flac[8 + typeLen + 4 + descLen + 20], dataLen);
-
- fileMeta->setCString(kKeyAlbumArtMIME, type);
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-OggExtractor::OggExtractor(const sp<DataSource> &source)
- : mDataSource(source),
- mInitCheck(NO_INIT),
- mImpl(NULL) {
- for (int i = 0; i < 2; ++i) {
- if (mImpl != NULL) {
- delete mImpl;
- }
- if (i == 0) {
- mImpl = new MyVorbisExtractor(mDataSource);
- } else {
- mImpl = new MyOpusExtractor(mDataSource);
- }
- mInitCheck = mImpl->seekToOffset(0);
-
- if (mInitCheck == OK) {
- mInitCheck = mImpl->init();
- if (mInitCheck == OK) {
- break;
- }
- }
- }
-}
-
-OggExtractor::~OggExtractor() {
- delete mImpl;
- mImpl = NULL;
-}
-
-size_t OggExtractor::countTracks() {
- return mInitCheck != OK ? 0 : 1;
-}
-
-sp<IMediaSource> OggExtractor::getTrack(size_t index) {
- if (index >= 1) {
- return NULL;
- }
-
- return new OggSource(this);
-}
-
-sp<MetaData> OggExtractor::getTrackMetaData(
- size_t index, uint32_t /* flags */) {
- if (index >= 1) {
- return NULL;
- }
-
- return mImpl->getFormat();
-}
-
-sp<MetaData> OggExtractor::getMetaData() {
- return mImpl->getFileMetaData();
-}
-
-bool SniffOgg(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *) {
- char tmp[4];
- if (source->readAt(0, tmp, 4) < 4 || memcmp(tmp, "OggS", 4)) {
- return false;
- }
-
- mimeType->setTo(MEDIA_MIMETYPE_CONTAINER_OGG);
- *confidence = 0.2f;
-
- return true;
-}
-
-} // namespace android
diff --git a/media/libstagefright/OmxInfoBuilder.cpp b/media/libstagefright/OmxInfoBuilder.cpp
index 8717a79..96b896b 100644
--- a/media/libstagefright/OmxInfoBuilder.cpp
+++ b/media/libstagefright/OmxInfoBuilder.cpp
@@ -24,8 +24,7 @@
#include <utils/Log.h>
#include <cutils/properties.h>
-#include <binder/IServiceManager.h>
-#include <media/IMediaCodecService.h>
+#include <media/stagefright/foundation/MediaDefs.h>
#include <media/stagefright/OmxInfoBuilder.h>
#include <media/stagefright/ACodec.h>
@@ -34,10 +33,9 @@
#include <android/hardware/media/omx/1.0/IOmxNode.h>
#include <media/stagefright/omx/OMXUtils.h>
-#include <media/IOMXStore.h>
#include <media/IOMX.h>
-#include <media/MediaDefs.h>
#include <media/omx/1.0/WOmx.h>
+#include <media/stagefright/omx/1.0/OmxStore.h>
#include <media/openmax/OMX_Index.h>
#include <media/openmax/OMX_IndexExt.h>
@@ -48,10 +46,18 @@
namespace android {
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using namespace ::android::hardware::media::omx::V1_0;
+
namespace /* unnamed */ {
+bool hasPrefix(const hidl_string& s, const char* prefix) {
+ return strncmp(s.c_str(), prefix, strlen(prefix)) == 0;
+}
+
status_t queryCapabilities(
- const IOMXStore::NodeInfo& node, const char* mime, bool isEncoder,
+ const IOmxStore::NodeInfo& node, const char* mime, bool isEncoder,
MediaCodecInfo::CapabilitiesWriter* caps) {
sp<ACodec> codec = new ACodec();
status_t err = codec->queryCapabilities(
@@ -62,14 +68,13 @@
for (const auto& attribute : node.attributes) {
// All features have an int32 value except
// "feature-bitrate-modes", which has a string value.
- if ((attribute.key.compare(0, 8, "feature-") == 0) &&
- (attribute.key.compare(8, 15, "bitrate-modes")
- != 0)) {
- // If this attribute.key is a feature that is not a bitrate
- // control, add an int32 value.
+ if (hasPrefix(attribute.key, "feature-") &&
+ !hasPrefix(attribute.key, "feature-bitrate-modes")) {
+ // If this attribute.key is a feature that is not bitrate modes,
+ // add an int32 value.
caps->addDetail(
attribute.key.c_str(),
- attribute.value == "1" ? 1 : 0);
+ hasPrefix(attribute.value, "1") ? 1 : 0);
} else {
// Non-feature attributes
caps->addDetail(
@@ -85,155 +90,69 @@
}
status_t OmxInfoBuilder::buildMediaCodecList(MediaCodecListWriter* writer) {
- bool treble;
- sp<IOMX> omx;
- std::vector<IOMXStore::RoleInfo> roles;
+ // Obtain IOmxStore
+ sp<IOmxStore> omxStore = IOmxStore::getService();
+ if (omxStore == nullptr) {
+ ALOGE("Cannot find an IOmxStore service.");
+ return NO_INIT;
+ }
- treble = property_get_bool("persist.media.treble_omx", true);
- if (treble) {
- using namespace ::android::hardware::media::omx::V1_0;
- using ::android::hardware::hidl_vec;
- using ::android::hardware::hidl_string;
+ // List service attributes (global settings)
+ Status status;
+ hidl_vec<IOmxStore::RoleInfo> roles;
+ auto transStatus = omxStore->listRoles(
+ [&roles] (
+ const hidl_vec<IOmxStore::RoleInfo>& inRoleList) {
+ roles = inRoleList;
+ });
+ if (!transStatus.isOk()) {
+ ALOGE("Fail to obtain codec roles from IOmxStore.");
+ return NO_INIT;
+ }
- // Obtain IOmxStore
- sp<IOmxStore> omxStore = IOmxStore::getService();
- if (omxStore == nullptr) {
- ALOGE("Cannot connect to an IOmxStore instance.");
- return NO_INIT;
- }
-
- // List service attributes (global settings)
- Status status;
- hidl_vec<IOmxStore::ServiceAttribute> serviceAttributes;
- auto transStatus = omxStore->listServiceAttributes(
- [&status, &serviceAttributes]
- (Status inStatus, const hidl_vec<IOmxStore::ServiceAttribute>&
- inAttributes) {
- status = inStatus;
- serviceAttributes = inAttributes;
- });
- if (!transStatus.isOk()) {
- ALOGE("Fail to obtain global settings from IOmxStore.");
- return NO_INIT;
- }
- if (status != Status::OK) {
- ALOGE("IOmxStore reports parsing error.");
- return NO_INIT;
- }
- for (const auto& p : serviceAttributes) {
- writer->addGlobalSetting(
- p.key.c_str(), p.value.c_str());
- }
-
- // List roles and convert to IOMXStore's format
- transStatus = omxStore->listRoles(
- [&roles]
- (const hidl_vec<IOmxStore::RoleInfo>& inRoleList) {
- roles.reserve(inRoleList.size());
- for (const auto& inRole : inRoleList) {
- IOMXStore::RoleInfo role;
- role.role = inRole.role;
- role.type = inRole.type;
- role.isEncoder = inRole.isEncoder;
- role.preferPlatformNodes = inRole.preferPlatformNodes;
- std::vector<IOMXStore::NodeInfo>& nodes =
- role.nodes;
- nodes.reserve(inRole.nodes.size());
- for (const auto& inNode : inRole.nodes) {
- IOMXStore::NodeInfo node;
- node.name = inNode.name;
- node.owner = inNode.owner;
- std::vector<IOMXStore::Attribute>& attributes =
- node.attributes;
- attributes.reserve(inNode.attributes.size());
- for (const auto& inAttr : inNode.attributes) {
- IOMXStore::Attribute attr;
- attr.key = inAttr.key;
- attr.value = inAttr.value;
- attributes.push_back(std::move(attr));
- }
- nodes.push_back(std::move(node));
- }
- roles.push_back(std::move(role));
- }
- });
- if (!transStatus.isOk()) {
- ALOGE("Fail to obtain codec roles from IOmxStore.");
- return NO_INIT;
- }
- } else {
- // Obtain IOMXStore
- sp<IServiceManager> sm = defaultServiceManager();
- if (sm == nullptr) {
- ALOGE("Cannot obtain the default service manager.");
- return NO_INIT;
- }
- sp<IBinder> codecBinder = sm->getService(String16("media.codec"));
- if (codecBinder == nullptr) {
- ALOGE("Cannot obtain the media codec service.");
- return NO_INIT;
- }
- sp<IMediaCodecService> codecService =
- interface_cast<IMediaCodecService>(codecBinder);
- if (codecService == nullptr) {
- ALOGE("Wrong type of media codec service obtained.");
- return NO_INIT;
- }
- omx = codecService->getOMX();
- if (omx == nullptr) {
- ALOGE("Cannot connect to an IOMX instance.");
- }
- sp<IOMXStore> omxStore = codecService->getOMXStore();
- if (omxStore == nullptr) {
- ALOGE("Cannot connect to an IOMXStore instance.");
- return NO_INIT;
- }
-
- // List service attributes (global settings)
- std::vector<IOMXStore::Attribute> serviceAttributes;
- status_t status = omxStore->listServiceAttributes(&serviceAttributes);
- if (status != OK) {
- ALOGE("Fail to obtain global settings from IOMXStore.");
- return NO_INIT;
- }
- for (const auto& p : serviceAttributes) {
- writer->addGlobalSetting(
- p.key.c_str(), p.value.c_str());
- }
-
- // List roles
- status = omxStore->listRoles(&roles);
- if (status != OK) {
- ALOGE("Fail to obtain codec roles from IOMXStore.");
- return NO_INIT;
- }
+ hidl_vec<IOmxStore::ServiceAttribute> serviceAttributes;
+ transStatus = omxStore->listServiceAttributes(
+ [&status, &serviceAttributes] (
+ Status inStatus,
+ const hidl_vec<IOmxStore::ServiceAttribute>& inAttributes) {
+ status = inStatus;
+ serviceAttributes = inAttributes;
+ });
+ if (!transStatus.isOk()) {
+ ALOGE("Fail to obtain global settings from IOmxStore.");
+ return NO_INIT;
+ }
+ if (status != Status::OK) {
+ ALOGE("IOmxStore reports parsing error.");
+ return NO_INIT;
+ }
+ for (const auto& p : serviceAttributes) {
+ writer->addGlobalSetting(
+ p.key.c_str(), p.value.c_str());
}
// Convert roles to lists of codecs
- // codec name -> index into swCodecs
- std::map<std::string, std::unique_ptr<MediaCodecInfoWriter> >
- swCodecName2Info;
- // codec name -> index into hwCodecs
- std::map<std::string, std::unique_ptr<MediaCodecInfoWriter> >
- hwCodecName2Info;
- // owner name -> MediaCodecInfo
- // This map will be used to obtain the correct IOmx service(s) needed for
- // creating IOmxNode instances and querying capabilities.
- std::map<std::string, std::vector<sp<MediaCodecInfo> > >
- owner2CodecInfo;
+ // codec name -> index into swCodecs/hwCodecs
+ std::map<hidl_string, std::unique_ptr<MediaCodecInfoWriter>>
+ swCodecName2Info, hwCodecName2Info;
- for (const auto& role : roles) {
- const auto& typeName = role.type;
+ char rank[PROPERTY_VALUE_MAX];
+ uint32_t defaultRank = 0x100;
+ if (property_get("debug.stagefright.omx_default_rank", rank, nullptr)) {
+ defaultRank = std::strtoul(rank, nullptr, 10);
+ }
+ for (const IOmxStore::RoleInfo& role : roles) {
+ const hidl_string& typeName = role.type;
bool isEncoder = role.isEncoder;
bool preferPlatformNodes = role.preferPlatformNodes;
// If preferPlatformNodes is true, hardware nodes must be added after
// platform (software) nodes. hwCodecs is used to hold hardware nodes
// that need to be added after software nodes for the same role.
- std::vector<const IOMXStore::NodeInfo*> hwCodecs;
- for (const auto& node : role.nodes) {
- const auto& nodeName = node.name;
- bool isSoftware = nodeName.compare(0, 10, "OMX.google") == 0;
+ std::vector<const IOmxStore::NodeInfo*> hwCodecs;
+ for (const IOmxStore::NodeInfo& node : role.nodes) {
+ const hidl_string& nodeName = node.name;
+ bool isSoftware = hasPrefix(nodeName, "OMX.google");
MediaCodecInfoWriter* info;
if (isSoftware) {
auto c2i = swCodecName2Info.find(nodeName);
@@ -245,6 +164,7 @@
info->setName(nodeName.c_str());
info->setOwner(node.owner.c_str());
info->setEncoder(isEncoder);
+ info->setRank(defaultRank);
} else {
// The node has been seen before. Simply retrieve the
// existing MediaCodecInfoWriter.
@@ -261,6 +181,7 @@
info->setName(nodeName.c_str());
info->setOwner(node.owner.c_str());
info->setEncoder(isEncoder);
+ info->setRank(defaultRank);
} else {
// If preferPlatformNodes is true, this node must be
// added after all software nodes.
@@ -287,9 +208,9 @@
// added in the loop above, but rather saved in hwCodecs. They are
// going to be added here.
if (preferPlatformNodes) {
- for (const auto& node : hwCodecs) {
+ for (const IOmxStore::NodeInfo *node : hwCodecs) {
MediaCodecInfoWriter* info;
- const auto& nodeName = node->name;
+ const hidl_string& nodeName = node->name;
auto c2i = hwCodecName2Info.find(nodeName);
if (c2i == hwCodecName2Info.end()) {
// Create a new MediaCodecInfo for a new node.
@@ -299,6 +220,7 @@
info->setName(nodeName.c_str());
info->setOwner(node->owner.c_str());
info->setEncoder(isEncoder);
+ info->setRank(defaultRank);
} else {
// The node has been seen before. Simply retrieve the
// existing MediaCodecInfoWriter.
diff --git a/media/libstagefright/RemoteMediaExtractor.cpp b/media/libstagefright/RemoteMediaExtractor.cpp
new file mode 100644
index 0000000..9d2c42b
--- /dev/null
+++ b/media/libstagefright/RemoteMediaExtractor.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "RemoteMediaExtractor"
+#include <utils/Log.h>
+
+#include <media/stagefright/InterfaceUtils.h>
+#include <media/MediaAnalyticsItem.h>
+#include <media/MediaSource.h>
+#include <media/stagefright/RemoteMediaExtractor.h>
+
+// still doing some on/off toggling here.
+#define MEDIA_LOG 1
+
+namespace android {
+
+// key for media statistics
+static const char *kKeyExtractor = "extractor";
+
+// attrs for media statistics
+// NB: these are matched with public Java API constants defined
+// in frameworks/base/media/java/android/media/MediaExtractor.java
+// These must be kept synchronized with the constants there.
+static const char *kExtractorFormat = "android.media.mediaextractor.fmt";
+static const char *kExtractorMime = "android.media.mediaextractor.mime";
+static const char *kExtractorTracks = "android.media.mediaextractor.ntrk";
+
+RemoteMediaExtractor::RemoteMediaExtractor(
+ MediaExtractor *extractor,
+ const sp<DataSource> &source,
+ const sp<RefBase> &plugin)
+ :mExtractor(extractor),
+ mSource(source),
+ mExtractorPlugin(plugin) {
+
+ mAnalyticsItem = nullptr;
+ if (MEDIA_LOG) {
+ mAnalyticsItem = new MediaAnalyticsItem(kKeyExtractor);
+
+ // track the container format (mpeg, aac, wvm, etc)
+ size_t ntracks = extractor->countTracks();
+ mAnalyticsItem->setCString(kExtractorFormat, extractor->name());
+ // tracks (size_t)
+ mAnalyticsItem->setInt32(kExtractorTracks, ntracks);
+ // metadata
+ MetaDataBase pMetaData;
+ if (extractor->getMetaData(pMetaData) == OK) {
+ String8 xx = pMetaData.toString();
+ // 'titl' -- but this verges into PII
+ // 'mime'
+ const char *mime = nullptr;
+ if (pMetaData.findCString(kKeyMIMEType, &mime)) {
+ mAnalyticsItem->setCString(kExtractorMime, mime);
+ }
+ // what else is interesting and not already available?
+ }
+ }
+}
+
+RemoteMediaExtractor::~RemoteMediaExtractor() {
+ delete mExtractor;
+ mSource->close();
+ mSource.clear();
+ mExtractorPlugin = nullptr;
+ // log the current record, provided it has some information worth recording
+ if (MEDIA_LOG) {
+ if (mAnalyticsItem != nullptr) {
+ if (mAnalyticsItem->count() > 0) {
+ mAnalyticsItem->selfrecord();
+ }
+ }
+ }
+ if (mAnalyticsItem != nullptr) {
+ delete mAnalyticsItem;
+ mAnalyticsItem = nullptr;
+ }
+}
+
+size_t RemoteMediaExtractor::countTracks() {
+ return mExtractor->countTracks();
+}
+
+sp<IMediaSource> RemoteMediaExtractor::getTrack(size_t index) {
+ MediaTrack *source = mExtractor->getTrack(index);
+ return (source == nullptr)
+ ? nullptr : CreateIMediaSourceFromMediaSourceBase(this, source, mExtractorPlugin);
+}
+
+sp<MetaData> RemoteMediaExtractor::getTrackMetaData(size_t index, uint32_t flags) {
+ sp<MetaData> meta = new MetaData();
+ if (mExtractor->getTrackMetaData(*meta.get(), index, flags) == OK) {
+ return meta;
+ }
+ return nullptr;
+}
+
+sp<MetaData> RemoteMediaExtractor::getMetaData() {
+ sp<MetaData> meta = new MetaData();
+ if (mExtractor->getMetaData(*meta.get()) == OK) {
+ return meta;
+ }
+ return nullptr;
+}
+
+status_t RemoteMediaExtractor::getMetrics(Parcel *reply) {
+ if (mAnalyticsItem == nullptr || reply == nullptr) {
+ return UNKNOWN_ERROR;
+ }
+
+ mAnalyticsItem->writeToParcel(reply);
+ return OK;
+}
+
+uint32_t RemoteMediaExtractor::flags() const {
+ return mExtractor->flags();
+}
+
+status_t RemoteMediaExtractor::setMediaCas(const HInterfaceToken &casToken) {
+ return mExtractor->setMediaCas((uint8_t*)casToken.data(), casToken.size());
+}
+
+const char * RemoteMediaExtractor::name() {
+ return mExtractor->name();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// static
+sp<IMediaExtractor> RemoteMediaExtractor::wrap(
+ MediaExtractor *extractor,
+ const sp<DataSource> &source,
+ const sp<RefBase> &plugin) {
+ if (extractor == nullptr) {
+ return nullptr;
+ }
+ return new RemoteMediaExtractor(extractor, source, plugin);
+}
+
+} // namespace android
diff --git a/media/libstagefright/RemoteMediaSource.cpp b/media/libstagefright/RemoteMediaSource.cpp
new file mode 100644
index 0000000..d07afec
--- /dev/null
+++ b/media/libstagefright/RemoteMediaSource.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/stagefright/RemoteMediaExtractor.h>
+#include <media/stagefright/RemoteMediaSource.h>
+#include <media/IMediaSource.h>
+
+namespace android {
+
+RemoteMediaSource::RemoteMediaSource(
+ const sp<RemoteMediaExtractor> &extractor,
+ MediaTrack *source,
+ const sp<RefBase> &plugin)
+ : mExtractor(extractor),
+ mSource(source),
+ mExtractorPlugin(plugin) {}
+
+RemoteMediaSource::~RemoteMediaSource() {
+ delete mSource;
+ mExtractorPlugin = nullptr;
+}
+
+status_t RemoteMediaSource::start(MetaData *params) {
+ return mSource->start(params);
+}
+
+status_t RemoteMediaSource::stop() {
+ return mSource->stop();
+}
+
+sp<MetaData> RemoteMediaSource::getFormat() {
+ sp<MetaData> meta = new MetaData();
+ if (mSource->getFormat(*meta.get()) == OK) {
+ return meta;
+ }
+ return nullptr;
+}
+
+status_t RemoteMediaSource::read(
+ MediaBufferBase **buffer, const MediaSource::ReadOptions *options) {
+ return mSource->read(buffer, reinterpret_cast<const MediaSource::ReadOptions*>(options));
+}
+
+status_t RemoteMediaSource::pause() {
+ return ERROR_UNSUPPORTED;
+}
+
+status_t RemoteMediaSource::setStopTimeUs(int64_t /* stopTimeUs */) {
+ return ERROR_UNSUPPORTED;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// static
+sp<IMediaSource> RemoteMediaSource::wrap(
+ const sp<RemoteMediaExtractor> &extractor,
+ MediaTrack *source, const sp<RefBase> &plugin) {
+ if (source == nullptr) {
+ return nullptr;
+ }
+ return new RemoteMediaSource(extractor, source, plugin);
+}
+
+} // namespace android
diff --git a/media/libstagefright/SampleIterator.cpp b/media/libstagefright/SampleIterator.cpp
deleted file mode 100644
index 7a51027..0000000
--- a/media/libstagefright/SampleIterator.cpp
+++ /dev/null
@@ -1,352 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "SampleIterator"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include "include/SampleIterator.h"
-
-#include <arpa/inet.h>
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/Utils.h>
-
-#include "include/SampleTable.h"
-
-namespace android {
-
-SampleIterator::SampleIterator(SampleTable *table)
- : mTable(table),
- mInitialized(false),
- mTimeToSampleIndex(0),
- mTTSSampleIndex(0),
- mTTSSampleTime(0),
- mTTSCount(0),
- mTTSDuration(0) {
- reset();
-}
-
-void SampleIterator::reset() {
- mSampleToChunkIndex = 0;
- mFirstChunk = 0;
- mFirstChunkSampleIndex = 0;
- mStopChunk = 0;
- mStopChunkSampleIndex = 0;
- mSamplesPerChunk = 0;
- mChunkDesc = 0;
-}
-
-status_t SampleIterator::seekTo(uint32_t sampleIndex) {
- ALOGV("seekTo(%d)", sampleIndex);
-
- if (sampleIndex >= mTable->mNumSampleSizes) {
- return ERROR_END_OF_STREAM;
- }
-
- if (mTable->mSampleToChunkOffset < 0
- || mTable->mChunkOffsetOffset < 0
- || mTable->mSampleSizeOffset < 0
- || mTable->mTimeToSampleCount == 0) {
-
- return ERROR_MALFORMED;
- }
-
- if (mInitialized && mCurrentSampleIndex == sampleIndex) {
- return OK;
- }
-
- if (!mInitialized || sampleIndex < mFirstChunkSampleIndex) {
- reset();
- }
-
- if (sampleIndex >= mStopChunkSampleIndex) {
- status_t err;
- if ((err = findChunkRange(sampleIndex)) != OK) {
- ALOGE("findChunkRange failed");
- return err;
- }
- }
-
- CHECK(sampleIndex < mStopChunkSampleIndex);
-
- if (mSamplesPerChunk == 0) {
- ALOGE("b/22802344");
- return ERROR_MALFORMED;
- }
-
- uint32_t chunk =
- (sampleIndex - mFirstChunkSampleIndex) / mSamplesPerChunk
- + mFirstChunk;
-
- if (!mInitialized || chunk != mCurrentChunkIndex) {
- status_t err;
- if ((err = getChunkOffset(chunk, &mCurrentChunkOffset)) != OK) {
- ALOGE("getChunkOffset return error");
- return err;
- }
-
- mCurrentChunkSampleSizes.clear();
-
- uint32_t firstChunkSampleIndex =
- mFirstChunkSampleIndex
- + mSamplesPerChunk * (chunk - mFirstChunk);
-
- for (uint32_t i = 0; i < mSamplesPerChunk; ++i) {
- size_t sampleSize;
- if ((err = getSampleSizeDirect(
- firstChunkSampleIndex + i, &sampleSize)) != OK) {
- ALOGE("getSampleSizeDirect return error");
- mCurrentChunkSampleSizes.clear();
- return err;
- }
-
- mCurrentChunkSampleSizes.push(sampleSize);
- }
-
- mCurrentChunkIndex = chunk;
- }
-
- uint32_t chunkRelativeSampleIndex =
- (sampleIndex - mFirstChunkSampleIndex) % mSamplesPerChunk;
-
- mCurrentSampleOffset = mCurrentChunkOffset;
- for (uint32_t i = 0; i < chunkRelativeSampleIndex; ++i) {
- mCurrentSampleOffset += mCurrentChunkSampleSizes[i];
- }
-
- mCurrentSampleSize = mCurrentChunkSampleSizes[chunkRelativeSampleIndex];
- if (sampleIndex < mTTSSampleIndex) {
- mTimeToSampleIndex = 0;
- mTTSSampleIndex = 0;
- mTTSSampleTime = 0;
- mTTSCount = 0;
- mTTSDuration = 0;
- }
-
- status_t err;
- if ((err = findSampleTimeAndDuration(
- sampleIndex, &mCurrentSampleTime, &mCurrentSampleDuration)) != OK) {
- ALOGE("findSampleTime return error");
- return err;
- }
-
- mCurrentSampleIndex = sampleIndex;
-
- mInitialized = true;
-
- return OK;
-}
-
-status_t SampleIterator::findChunkRange(uint32_t sampleIndex) {
- CHECK(sampleIndex >= mFirstChunkSampleIndex);
-
- while (sampleIndex >= mStopChunkSampleIndex) {
- if (mSampleToChunkIndex == mTable->mNumSampleToChunkOffsets) {
- return ERROR_OUT_OF_RANGE;
- }
-
- mFirstChunkSampleIndex = mStopChunkSampleIndex;
-
- const SampleTable::SampleToChunkEntry *entry =
- &mTable->mSampleToChunkEntries[mSampleToChunkIndex];
-
- mFirstChunk = entry->startChunk;
- mSamplesPerChunk = entry->samplesPerChunk;
- mChunkDesc = entry->chunkDesc;
-
- if (mSampleToChunkIndex + 1 < mTable->mNumSampleToChunkOffsets) {
- mStopChunk = entry[1].startChunk;
-
- if (mSamplesPerChunk == 0 || mStopChunk < mFirstChunk ||
- (mStopChunk - mFirstChunk) > UINT32_MAX / mSamplesPerChunk ||
- ((mStopChunk - mFirstChunk) * mSamplesPerChunk >
- UINT32_MAX - mFirstChunkSampleIndex)) {
-
- return ERROR_OUT_OF_RANGE;
- }
- mStopChunkSampleIndex =
- mFirstChunkSampleIndex
- + (mStopChunk - mFirstChunk) * mSamplesPerChunk;
- } else {
- mStopChunk = 0xffffffff;
- mStopChunkSampleIndex = 0xffffffff;
- }
-
- ++mSampleToChunkIndex;
- }
-
- return OK;
-}
-
-status_t SampleIterator::getChunkOffset(uint32_t chunk, off64_t *offset) {
- *offset = 0;
-
- if (chunk >= mTable->mNumChunkOffsets) {
- return ERROR_OUT_OF_RANGE;
- }
-
- if (mTable->mChunkOffsetType == SampleTable::kChunkOffsetType32) {
- uint32_t offset32;
-
- if (mTable->mDataSource->readAt(
- mTable->mChunkOffsetOffset + 8 + 4 * chunk,
- &offset32,
- sizeof(offset32)) < (ssize_t)sizeof(offset32)) {
- return ERROR_IO;
- }
-
- *offset = ntohl(offset32);
- } else {
- CHECK_EQ(mTable->mChunkOffsetType, SampleTable::kChunkOffsetType64);
-
- uint64_t offset64;
- if (mTable->mDataSource->readAt(
- mTable->mChunkOffsetOffset + 8 + 8 * chunk,
- &offset64,
- sizeof(offset64)) < (ssize_t)sizeof(offset64)) {
- return ERROR_IO;
- }
-
- *offset = ntoh64(offset64);
- }
-
- return OK;
-}
-
-status_t SampleIterator::getSampleSizeDirect(
- uint32_t sampleIndex, size_t *size) {
- *size = 0;
-
- if (sampleIndex >= mTable->mNumSampleSizes) {
- return ERROR_OUT_OF_RANGE;
- }
-
- if (mTable->mDefaultSampleSize > 0) {
- *size = mTable->mDefaultSampleSize;
- return OK;
- }
-
- switch (mTable->mSampleSizeFieldSize) {
- case 32:
- {
- uint32_t x;
- if (mTable->mDataSource->readAt(
- mTable->mSampleSizeOffset + 12 + 4 * sampleIndex,
- &x, sizeof(x)) < (ssize_t)sizeof(x)) {
- return ERROR_IO;
- }
-
- *size = ntohl(x);
- break;
- }
-
- case 16:
- {
- uint16_t x;
- if (mTable->mDataSource->readAt(
- mTable->mSampleSizeOffset + 12 + 2 * sampleIndex,
- &x, sizeof(x)) < (ssize_t)sizeof(x)) {
- return ERROR_IO;
- }
-
- *size = ntohs(x);
- break;
- }
-
- case 8:
- {
- uint8_t x;
- if (mTable->mDataSource->readAt(
- mTable->mSampleSizeOffset + 12 + sampleIndex,
- &x, sizeof(x)) < (ssize_t)sizeof(x)) {
- return ERROR_IO;
- }
-
- *size = x;
- break;
- }
-
- default:
- {
- CHECK_EQ(mTable->mSampleSizeFieldSize, 4u);
-
- uint8_t x;
- if (mTable->mDataSource->readAt(
- mTable->mSampleSizeOffset + 12 + sampleIndex / 2,
- &x, sizeof(x)) < (ssize_t)sizeof(x)) {
- return ERROR_IO;
- }
-
- *size = (sampleIndex & 1) ? x & 0x0f : x >> 4;
- break;
- }
- }
-
- return OK;
-}
-
-status_t SampleIterator::findSampleTimeAndDuration(
- uint32_t sampleIndex, uint32_t *time, uint32_t *duration) {
- if (sampleIndex >= mTable->mNumSampleSizes) {
- return ERROR_OUT_OF_RANGE;
- }
-
- while (true) {
- if (mTTSSampleIndex > UINT32_MAX - mTTSCount) {
- return ERROR_OUT_OF_RANGE;
- }
- if(sampleIndex < mTTSSampleIndex + mTTSCount) {
- break;
- }
- if (mTimeToSampleIndex == mTable->mTimeToSampleCount ||
- (mTTSDuration != 0 && mTTSCount > UINT32_MAX / mTTSDuration) ||
- mTTSSampleTime > UINT32_MAX - (mTTSCount * mTTSDuration)) {
- return ERROR_OUT_OF_RANGE;
- }
-
- mTTSSampleIndex += mTTSCount;
- mTTSSampleTime += mTTSCount * mTTSDuration;
-
- mTTSCount = mTable->mTimeToSample[2 * mTimeToSampleIndex];
- mTTSDuration = mTable->mTimeToSample[2 * mTimeToSampleIndex + 1];
-
- ++mTimeToSampleIndex;
- }
-
- *time = mTTSSampleTime + mTTSDuration * (sampleIndex - mTTSSampleIndex);
-
- int32_t offset = mTable->getCompositionTimeOffset(sampleIndex);
- if ((offset < 0 && *time < (offset == INT32_MIN ?
- INT32_MAX : uint32_t(-offset))) ||
- (offset > 0 && *time > UINT32_MAX - offset)) {
- ALOGE("%u + %d would overflow", *time, offset);
- return ERROR_OUT_OF_RANGE;
- }
- if (offset > 0) {
- *time += offset;
- } else {
- *time -= (offset == INT32_MIN ? INT32_MAX : (-offset));
- }
-
- *duration = mTTSDuration;
-
- return OK;
-}
-
-} // namespace android
-
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
deleted file mode 100644
index 1d2a931..0000000
--- a/media/libstagefright/SampleTable.cpp
+++ /dev/null
@@ -1,1000 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "SampleTable"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <limits>
-
-#include "include/SampleTable.h"
-#include "include/SampleIterator.h"
-
-#include <arpa/inet.h>
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/Utils.h>
-
-/* TODO: remove after being merged into other branches */
-#ifndef UINT32_MAX
-#define UINT32_MAX (4294967295U)
-#endif
-
-namespace android {
-
-// static
-const uint32_t SampleTable::kChunkOffsetType32 = FOURCC('s', 't', 'c', 'o');
-// static
-const uint32_t SampleTable::kChunkOffsetType64 = FOURCC('c', 'o', '6', '4');
-// static
-const uint32_t SampleTable::kSampleSizeType32 = FOURCC('s', 't', 's', 'z');
-// static
-const uint32_t SampleTable::kSampleSizeTypeCompact = FOURCC('s', 't', 'z', '2');
-
-////////////////////////////////////////////////////////////////////////////////
-
-const off64_t kMaxOffset = std::numeric_limits<off64_t>::max();
-
-struct SampleTable::CompositionDeltaLookup {
- CompositionDeltaLookup();
-
- void setEntries(
- const int32_t *deltaEntries, size_t numDeltaEntries);
-
- int32_t getCompositionTimeOffset(uint32_t sampleIndex);
-
-private:
- Mutex mLock;
-
- const int32_t *mDeltaEntries;
- size_t mNumDeltaEntries;
-
- size_t mCurrentDeltaEntry;
- size_t mCurrentEntrySampleIndex;
-
- DISALLOW_EVIL_CONSTRUCTORS(CompositionDeltaLookup);
-};
-
-SampleTable::CompositionDeltaLookup::CompositionDeltaLookup()
- : mDeltaEntries(NULL),
- mNumDeltaEntries(0),
- mCurrentDeltaEntry(0),
- mCurrentEntrySampleIndex(0) {
-}
-
-void SampleTable::CompositionDeltaLookup::setEntries(
- const int32_t *deltaEntries, size_t numDeltaEntries) {
- Mutex::Autolock autolock(mLock);
-
- mDeltaEntries = deltaEntries;
- mNumDeltaEntries = numDeltaEntries;
- mCurrentDeltaEntry = 0;
- mCurrentEntrySampleIndex = 0;
-}
-
-int32_t SampleTable::CompositionDeltaLookup::getCompositionTimeOffset(
- uint32_t sampleIndex) {
- Mutex::Autolock autolock(mLock);
-
- if (mDeltaEntries == NULL) {
- return 0;
- }
-
- if (sampleIndex < mCurrentEntrySampleIndex) {
- mCurrentDeltaEntry = 0;
- mCurrentEntrySampleIndex = 0;
- }
-
- while (mCurrentDeltaEntry < mNumDeltaEntries) {
- uint32_t sampleCount = mDeltaEntries[2 * mCurrentDeltaEntry];
- if (sampleIndex < mCurrentEntrySampleIndex + sampleCount) {
- return mDeltaEntries[2 * mCurrentDeltaEntry + 1];
- }
-
- mCurrentEntrySampleIndex += sampleCount;
- ++mCurrentDeltaEntry;
- }
-
- return 0;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-SampleTable::SampleTable(const sp<DataSource> &source)
- : mDataSource(source),
- mChunkOffsetOffset(-1),
- mChunkOffsetType(0),
- mNumChunkOffsets(0),
- mSampleToChunkOffset(-1),
- mNumSampleToChunkOffsets(0),
- mSampleSizeOffset(-1),
- mSampleSizeFieldSize(0),
- mDefaultSampleSize(0),
- mNumSampleSizes(0),
- mHasTimeToSample(false),
- mTimeToSampleCount(0),
- mTimeToSample(NULL),
- mSampleTimeEntries(NULL),
- mCompositionTimeDeltaEntries(NULL),
- mNumCompositionTimeDeltaEntries(0),
- mCompositionDeltaLookup(new CompositionDeltaLookup),
- mSyncSampleOffset(-1),
- mNumSyncSamples(0),
- mSyncSamples(NULL),
- mLastSyncSampleIndex(0),
- mSampleToChunkEntries(NULL),
- mTotalSize(0) {
- mSampleIterator = new SampleIterator(this);
-}
-
-SampleTable::~SampleTable() {
- delete[] mSampleToChunkEntries;
- mSampleToChunkEntries = NULL;
-
- delete[] mSyncSamples;
- mSyncSamples = NULL;
-
- delete[] mTimeToSample;
- mTimeToSample = NULL;
-
- delete mCompositionDeltaLookup;
- mCompositionDeltaLookup = NULL;
-
- delete[] mCompositionTimeDeltaEntries;
- mCompositionTimeDeltaEntries = NULL;
-
- delete[] mSampleTimeEntries;
- mSampleTimeEntries = NULL;
-
- delete mSampleIterator;
- mSampleIterator = NULL;
-}
-
-bool SampleTable::isValid() const {
- return mChunkOffsetOffset >= 0
- && mSampleToChunkOffset >= 0
- && mSampleSizeOffset >= 0
- && mHasTimeToSample;
-}
-
-status_t SampleTable::setChunkOffsetParams(
- uint32_t type, off64_t data_offset, size_t data_size) {
- if (mChunkOffsetOffset >= 0) {
- return ERROR_MALFORMED;
- }
-
- CHECK(type == kChunkOffsetType32 || type == kChunkOffsetType64);
-
- mChunkOffsetOffset = data_offset;
- mChunkOffsetType = type;
-
- if (data_size < 8) {
- return ERROR_MALFORMED;
- }
-
- uint8_t header[8];
- if (mDataSource->readAt(
- data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) {
- return ERROR_IO;
- }
-
- if (U32_AT(header) != 0) {
- // Expected version = 0, flags = 0.
- return ERROR_MALFORMED;
- }
-
- mNumChunkOffsets = U32_AT(&header[4]);
-
- if (mChunkOffsetType == kChunkOffsetType32) {
- if ((data_size - 8) / 4 < mNumChunkOffsets) {
- return ERROR_MALFORMED;
- }
- } else {
- if ((data_size - 8) / 8 < mNumChunkOffsets) {
- return ERROR_MALFORMED;
- }
- }
-
- return OK;
-}
-
-status_t SampleTable::setSampleToChunkParams(
- off64_t data_offset, size_t data_size) {
- if (mSampleToChunkOffset >= 0) {
- // already set
- return ERROR_MALFORMED;
- }
-
- if (data_offset < 0) {
- return ERROR_MALFORMED;
- }
-
- mSampleToChunkOffset = data_offset;
-
- if (data_size < 8) {
- return ERROR_MALFORMED;
- }
-
- uint8_t header[8];
- if (mDataSource->readAt(
- data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) {
- return ERROR_IO;
- }
-
- if (U32_AT(header) != 0) {
- // Expected version = 0, flags = 0.
- return ERROR_MALFORMED;
- }
-
- mNumSampleToChunkOffsets = U32_AT(&header[4]);
-
- if ((data_size - 8) / sizeof(SampleToChunkEntry) < mNumSampleToChunkOffsets) {
- return ERROR_MALFORMED;
- }
-
- if ((uint64_t)kMaxTotalSize / sizeof(SampleToChunkEntry) <=
- (uint64_t)mNumSampleToChunkOffsets) {
- ALOGE("Sample-to-chunk table size too large.");
- return ERROR_OUT_OF_RANGE;
- }
-
- mTotalSize += (uint64_t)mNumSampleToChunkOffsets *
- sizeof(SampleToChunkEntry);
- if (mTotalSize > kMaxTotalSize) {
- ALOGE("Sample-to-chunk table size would make sample table too large.\n"
- " Requested sample-to-chunk table size = %llu\n"
- " Eventual sample table size >= %llu\n"
- " Allowed sample table size = %llu\n",
- (unsigned long long)mNumSampleToChunkOffsets *
- sizeof(SampleToChunkEntry),
- (unsigned long long)mTotalSize,
- (unsigned long long)kMaxTotalSize);
- return ERROR_OUT_OF_RANGE;
- }
-
- mSampleToChunkEntries =
- new (std::nothrow) SampleToChunkEntry[mNumSampleToChunkOffsets];
- if (!mSampleToChunkEntries) {
- ALOGE("Cannot allocate sample-to-chunk table with %llu entries.",
- (unsigned long long)mNumSampleToChunkOffsets);
- return ERROR_OUT_OF_RANGE;
- }
-
- if (mNumSampleToChunkOffsets == 0) {
- return OK;
- }
-
- if ((off64_t)(kMaxOffset - 8 -
- ((mNumSampleToChunkOffsets - 1) * sizeof(SampleToChunkEntry)))
- < mSampleToChunkOffset) {
- return ERROR_MALFORMED;
- }
-
- for (uint32_t i = 0; i < mNumSampleToChunkOffsets; ++i) {
- uint8_t buffer[sizeof(SampleToChunkEntry)];
-
- if (mDataSource->readAt(
- mSampleToChunkOffset + 8 + i * sizeof(SampleToChunkEntry),
- buffer,
- sizeof(buffer))
- != (ssize_t)sizeof(buffer)) {
- return ERROR_IO;
- }
- // chunk index is 1 based in the spec.
- if (U32_AT(buffer) < 1) {
- ALOGE("b/23534160");
- return ERROR_OUT_OF_RANGE;
- }
-
- // We want the chunk index to be 0-based.
- mSampleToChunkEntries[i].startChunk = U32_AT(buffer) - 1;
- mSampleToChunkEntries[i].samplesPerChunk = U32_AT(&buffer[4]);
- mSampleToChunkEntries[i].chunkDesc = U32_AT(&buffer[8]);
- }
-
- return OK;
-}
-
-status_t SampleTable::setSampleSizeParams(
- uint32_t type, off64_t data_offset, size_t data_size) {
- if (mSampleSizeOffset >= 0) {
- return ERROR_MALFORMED;
- }
-
- CHECK(type == kSampleSizeType32 || type == kSampleSizeTypeCompact);
-
- mSampleSizeOffset = data_offset;
-
- if (data_size < 12) {
- return ERROR_MALFORMED;
- }
-
- uint8_t header[12];
- if (mDataSource->readAt(
- data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) {
- return ERROR_IO;
- }
-
- if (U32_AT(header) != 0) {
- // Expected version = 0, flags = 0.
- return ERROR_MALFORMED;
- }
-
- mDefaultSampleSize = U32_AT(&header[4]);
- mNumSampleSizes = U32_AT(&header[8]);
- if (mNumSampleSizes > (UINT32_MAX - 12) / 16) {
- ALOGE("b/23247055, mNumSampleSizes(%u)", mNumSampleSizes);
- return ERROR_MALFORMED;
- }
-
- if (type == kSampleSizeType32) {
- mSampleSizeFieldSize = 32;
-
- if (mDefaultSampleSize != 0) {
- return OK;
- }
-
- if (data_size < 12 + mNumSampleSizes * 4) {
- return ERROR_MALFORMED;
- }
- } else {
- if ((mDefaultSampleSize & 0xffffff00) != 0) {
- // The high 24 bits are reserved and must be 0.
- return ERROR_MALFORMED;
- }
-
- mSampleSizeFieldSize = mDefaultSampleSize & 0xff;
- mDefaultSampleSize = 0;
-
- if (mSampleSizeFieldSize != 4 && mSampleSizeFieldSize != 8
- && mSampleSizeFieldSize != 16) {
- return ERROR_MALFORMED;
- }
-
- if (data_size < 12 + (mNumSampleSizes * mSampleSizeFieldSize + 4) / 8) {
- return ERROR_MALFORMED;
- }
- }
-
- return OK;
-}
-
-status_t SampleTable::setTimeToSampleParams(
- off64_t data_offset, size_t data_size) {
- if (mHasTimeToSample || data_size < 8) {
- return ERROR_MALFORMED;
- }
-
- uint8_t header[8];
- if (mDataSource->readAt(
- data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) {
- return ERROR_IO;
- }
-
- if (U32_AT(header) != 0) {
- // Expected version = 0, flags = 0.
- return ERROR_MALFORMED;
- }
-
- mTimeToSampleCount = U32_AT(&header[4]);
- if (mTimeToSampleCount > UINT32_MAX / (2 * sizeof(uint32_t))) {
- // Choose this bound because
- // 1) 2 * sizeof(uint32_t) is the amount of memory needed for one
- // time-to-sample entry in the time-to-sample table.
- // 2) mTimeToSampleCount is the number of entries of the time-to-sample
- // table.
- // 3) We hope that the table size does not exceed UINT32_MAX.
- ALOGE("Time-to-sample table size too large.");
- return ERROR_OUT_OF_RANGE;
- }
-
- // Note: At this point, we know that mTimeToSampleCount * 2 will not
- // overflow because of the above condition.
-
- uint64_t allocSize = (uint64_t)mTimeToSampleCount * 2 * sizeof(uint32_t);
- mTotalSize += allocSize;
- if (mTotalSize > kMaxTotalSize) {
- ALOGE("Time-to-sample table size would make sample table too large.\n"
- " Requested time-to-sample table size = %llu\n"
- " Eventual sample table size >= %llu\n"
- " Allowed sample table size = %llu\n",
- (unsigned long long)allocSize,
- (unsigned long long)mTotalSize,
- (unsigned long long)kMaxTotalSize);
- return ERROR_OUT_OF_RANGE;
- }
-
- mTimeToSample = new (std::nothrow) uint32_t[mTimeToSampleCount * 2];
- if (!mTimeToSample) {
- ALOGE("Cannot allocate time-to-sample table with %llu entries.",
- (unsigned long long)mTimeToSampleCount);
- return ERROR_OUT_OF_RANGE;
- }
-
- if (mDataSource->readAt(data_offset + 8, mTimeToSample,
- (size_t)allocSize) < (ssize_t)allocSize) {
- ALOGE("Incomplete data read for time-to-sample table.");
- return ERROR_IO;
- }
-
- for (size_t i = 0; i < mTimeToSampleCount * 2; ++i) {
- mTimeToSample[i] = ntohl(mTimeToSample[i]);
- }
-
- mHasTimeToSample = true;
- return OK;
-}
-
-// NOTE: per 14996-12, version 0 ctts contains unsigned values, while version 1
-// contains signed values, however some software creates version 0 files that
-// contain signed values, so we're always treating the values as signed,
-// regardless of version.
-status_t SampleTable::setCompositionTimeToSampleParams(
- off64_t data_offset, size_t data_size) {
- ALOGI("There are reordered frames present.");
-
- if (mCompositionTimeDeltaEntries != NULL || data_size < 8) {
- return ERROR_MALFORMED;
- }
-
- uint8_t header[8];
- if (mDataSource->readAt(
- data_offset, header, sizeof(header))
- < (ssize_t)sizeof(header)) {
- return ERROR_IO;
- }
-
- uint32_t flags = U32_AT(header);
- uint32_t version = flags >> 24;
- flags &= 0xffffff;
-
- if ((version != 0 && version != 1) || flags != 0) {
- // Expected version = 0 or 1, flags = 0.
- return ERROR_MALFORMED;
- }
-
- size_t numEntries = U32_AT(&header[4]);
-
- if (((SIZE_MAX / 8) - 1 < numEntries) || (data_size != (numEntries + 1) * 8)) {
- return ERROR_MALFORMED;
- }
-
- mNumCompositionTimeDeltaEntries = numEntries;
- uint64_t allocSize = (uint64_t)numEntries * 2 * sizeof(int32_t);
- if (allocSize > kMaxTotalSize) {
- ALOGE("Composition-time-to-sample table size too large.");
- return ERROR_OUT_OF_RANGE;
- }
-
- mTotalSize += allocSize;
- if (mTotalSize > kMaxTotalSize) {
- ALOGE("Composition-time-to-sample table would make sample table too large.\n"
- " Requested composition-time-to-sample table size = %llu\n"
- " Eventual sample table size >= %llu\n"
- " Allowed sample table size = %llu\n",
- (unsigned long long)allocSize,
- (unsigned long long)mTotalSize,
- (unsigned long long)kMaxTotalSize);
- return ERROR_OUT_OF_RANGE;
- }
-
- mCompositionTimeDeltaEntries = new (std::nothrow) int32_t[2 * numEntries];
- if (!mCompositionTimeDeltaEntries) {
- ALOGE("Cannot allocate composition-time-to-sample table with %llu "
- "entries.", (unsigned long long)numEntries);
- return ERROR_OUT_OF_RANGE;
- }
-
- if (mDataSource->readAt(data_offset + 8, mCompositionTimeDeltaEntries,
- (size_t)allocSize) < (ssize_t)allocSize) {
- delete[] mCompositionTimeDeltaEntries;
- mCompositionTimeDeltaEntries = NULL;
-
- return ERROR_IO;
- }
-
- for (size_t i = 0; i < 2 * numEntries; ++i) {
- mCompositionTimeDeltaEntries[i] = ntohl(mCompositionTimeDeltaEntries[i]);
- }
-
- mCompositionDeltaLookup->setEntries(
- mCompositionTimeDeltaEntries, mNumCompositionTimeDeltaEntries);
-
- return OK;
-}
-
-status_t SampleTable::setSyncSampleParams(off64_t data_offset, size_t data_size) {
- if (mSyncSampleOffset >= 0 || data_size < 8) {
- return ERROR_MALFORMED;
- }
-
- uint8_t header[8];
- if (mDataSource->readAt(
- data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) {
- return ERROR_IO;
- }
-
- if (U32_AT(header) != 0) {
- // Expected version = 0, flags = 0.
- return ERROR_MALFORMED;
- }
-
- uint32_t numSyncSamples = U32_AT(&header[4]);
-
- if (numSyncSamples < 2) {
- ALOGV("Table of sync samples is empty or has only a single entry!");
- }
-
- uint64_t allocSize = (uint64_t)numSyncSamples * sizeof(uint32_t);
- if (allocSize > kMaxTotalSize) {
- ALOGE("Sync sample table size too large.");
- return ERROR_OUT_OF_RANGE;
- }
-
- mTotalSize += allocSize;
- if (mTotalSize > kMaxTotalSize) {
- ALOGE("Sync sample table size would make sample table too large.\n"
- " Requested sync sample table size = %llu\n"
- " Eventual sample table size >= %llu\n"
- " Allowed sample table size = %llu\n",
- (unsigned long long)allocSize,
- (unsigned long long)mTotalSize,
- (unsigned long long)kMaxTotalSize);
- return ERROR_OUT_OF_RANGE;
- }
-
- mSyncSamples = new (std::nothrow) uint32_t[numSyncSamples];
- if (!mSyncSamples) {
- ALOGE("Cannot allocate sync sample table with %llu entries.",
- (unsigned long long)numSyncSamples);
- return ERROR_OUT_OF_RANGE;
- }
-
- if (mDataSource->readAt(data_offset + 8, mSyncSamples,
- (size_t)allocSize) != (ssize_t)allocSize) {
- delete[] mSyncSamples;
- mSyncSamples = NULL;
- return ERROR_IO;
- }
-
- for (size_t i = 0; i < numSyncSamples; ++i) {
- if (mSyncSamples[i] == 0) {
- ALOGE("b/32423862, unexpected zero value in stss");
- continue;
- }
- mSyncSamples[i] = ntohl(mSyncSamples[i]) - 1;
- }
-
- mSyncSampleOffset = data_offset;
- mNumSyncSamples = numSyncSamples;
-
- return OK;
-}
-
-uint32_t SampleTable::countChunkOffsets() const {
- return mNumChunkOffsets;
-}
-
-uint32_t SampleTable::countSamples() const {
- return mNumSampleSizes;
-}
-
-status_t SampleTable::getMaxSampleSize(size_t *max_size) {
- Mutex::Autolock autoLock(mLock);
-
- *max_size = 0;
-
- for (uint32_t i = 0; i < mNumSampleSizes; ++i) {
- size_t sample_size;
- status_t err = getSampleSize_l(i, &sample_size);
-
- if (err != OK) {
- return err;
- }
-
- if (sample_size > *max_size) {
- *max_size = sample_size;
- }
- }
-
- return OK;
-}
-
-uint32_t abs_difference(uint32_t time1, uint32_t time2) {
- return time1 > time2 ? time1 - time2 : time2 - time1;
-}
-
-// static
-int SampleTable::CompareIncreasingTime(const void *_a, const void *_b) {
- const SampleTimeEntry *a = (const SampleTimeEntry *)_a;
- const SampleTimeEntry *b = (const SampleTimeEntry *)_b;
-
- if (a->mCompositionTime < b->mCompositionTime) {
- return -1;
- } else if (a->mCompositionTime > b->mCompositionTime) {
- return 1;
- }
-
- return 0;
-}
-
-void SampleTable::buildSampleEntriesTable() {
- Mutex::Autolock autoLock(mLock);
-
- if (mSampleTimeEntries != NULL || mNumSampleSizes == 0) {
- if (mNumSampleSizes == 0) {
- ALOGE("b/23247055, mNumSampleSizes(%u)", mNumSampleSizes);
- }
- return;
- }
-
- mTotalSize += (uint64_t)mNumSampleSizes * sizeof(SampleTimeEntry);
- if (mTotalSize > kMaxTotalSize) {
- ALOGE("Sample entry table size would make sample table too large.\n"
- " Requested sample entry table size = %llu\n"
- " Eventual sample table size >= %llu\n"
- " Allowed sample table size = %llu\n",
- (unsigned long long)mNumSampleSizes * sizeof(SampleTimeEntry),
- (unsigned long long)mTotalSize,
- (unsigned long long)kMaxTotalSize);
- return;
- }
-
- mSampleTimeEntries = new (std::nothrow) SampleTimeEntry[mNumSampleSizes];
- if (!mSampleTimeEntries) {
- ALOGE("Cannot allocate sample entry table with %llu entries.",
- (unsigned long long)mNumSampleSizes);
- return;
- }
-
- uint32_t sampleIndex = 0;
- uint32_t sampleTime = 0;
-
- for (uint32_t i = 0; i < mTimeToSampleCount; ++i) {
- uint32_t n = mTimeToSample[2 * i];
- uint32_t delta = mTimeToSample[2 * i + 1];
-
- for (uint32_t j = 0; j < n; ++j) {
- if (sampleIndex < mNumSampleSizes) {
- // Technically this should always be the case if the file
- // is well-formed, but you know... there's (gasp) malformed
- // content out there.
-
- mSampleTimeEntries[sampleIndex].mSampleIndex = sampleIndex;
-
- int32_t compTimeDelta =
- mCompositionDeltaLookup->getCompositionTimeOffset(
- sampleIndex);
-
- if ((compTimeDelta < 0 && sampleTime <
- (compTimeDelta == INT32_MIN ?
- INT32_MAX : uint32_t(-compTimeDelta)))
- || (compTimeDelta > 0 &&
- sampleTime > UINT32_MAX - compTimeDelta)) {
- ALOGE("%u + %d would overflow, clamping",
- sampleTime, compTimeDelta);
- if (compTimeDelta < 0) {
- sampleTime = 0;
- } else {
- sampleTime = UINT32_MAX;
- }
- compTimeDelta = 0;
- }
-
- mSampleTimeEntries[sampleIndex].mCompositionTime =
- compTimeDelta > 0 ? sampleTime + compTimeDelta:
- sampleTime - (-compTimeDelta);
- }
-
- ++sampleIndex;
- if (sampleTime > UINT32_MAX - delta) {
- ALOGE("%u + %u would overflow, clamping",
- sampleTime, delta);
- sampleTime = UINT32_MAX;
- } else {
- sampleTime += delta;
- }
- }
- }
-
- qsort(mSampleTimeEntries, mNumSampleSizes, sizeof(SampleTimeEntry),
- CompareIncreasingTime);
-}
-
-status_t SampleTable::findSampleAtTime(
- uint64_t req_time, uint64_t scale_num, uint64_t scale_den,
- uint32_t *sample_index, uint32_t flags) {
- buildSampleEntriesTable();
-
- if (mSampleTimeEntries == NULL) {
- return ERROR_OUT_OF_RANGE;
- }
-
- uint32_t left = 0;
- uint32_t right_plus_one = mNumSampleSizes;
- while (left < right_plus_one) {
- uint32_t center = left + (right_plus_one - left) / 2;
- uint64_t centerTime =
- getSampleTime(center, scale_num, scale_den);
-
- if (req_time < centerTime) {
- right_plus_one = center;
- } else if (req_time > centerTime) {
- left = center + 1;
- } else {
- *sample_index = mSampleTimeEntries[center].mSampleIndex;
- return OK;
- }
- }
-
- uint32_t closestIndex = left;
-
- if (closestIndex == mNumSampleSizes) {
- if (flags == kFlagAfter) {
- return ERROR_OUT_OF_RANGE;
- }
- flags = kFlagBefore;
- } else if (closestIndex == 0) {
- if (flags == kFlagBefore) {
- // normally we should return out of range, but that is
- // treated as end-of-stream. instead return first sample
- //
- // return ERROR_OUT_OF_RANGE;
- }
- flags = kFlagAfter;
- }
-
- switch (flags) {
- case kFlagBefore:
- {
- --closestIndex;
- break;
- }
-
- case kFlagAfter:
- {
- // nothing to do
- break;
- }
-
- default:
- {
- CHECK(flags == kFlagClosest);
- // pick closest based on timestamp. use abs_difference for safety
- if (abs_difference(
- getSampleTime(closestIndex, scale_num, scale_den), req_time) >
- abs_difference(
- req_time, getSampleTime(closestIndex - 1, scale_num, scale_den))) {
- --closestIndex;
- }
- break;
- }
- }
-
- *sample_index = mSampleTimeEntries[closestIndex].mSampleIndex;
- return OK;
-}
-
-status_t SampleTable::findSyncSampleNear(
- uint32_t start_sample_index, uint32_t *sample_index, uint32_t flags) {
- Mutex::Autolock autoLock(mLock);
-
- *sample_index = 0;
-
- if (mSyncSampleOffset < 0) {
- // All samples are sync-samples.
- *sample_index = start_sample_index;
- return OK;
- }
-
- if (mNumSyncSamples == 0) {
- *sample_index = 0;
- return OK;
- }
-
- uint32_t left = 0;
- uint32_t right_plus_one = mNumSyncSamples;
- while (left < right_plus_one) {
- uint32_t center = left + (right_plus_one - left) / 2;
- uint32_t x = mSyncSamples[center];
-
- if (start_sample_index < x) {
- right_plus_one = center;
- } else if (start_sample_index > x) {
- left = center + 1;
- } else {
- *sample_index = x;
- return OK;
- }
- }
-
- if (left == mNumSyncSamples) {
- if (flags == kFlagAfter) {
- ALOGE("tried to find a sync frame after the last one: %d", left);
- return ERROR_OUT_OF_RANGE;
- }
- flags = kFlagBefore;
- }
- else if (left == 0) {
- if (flags == kFlagBefore) {
- ALOGE("tried to find a sync frame before the first one: %d", left);
-
- // normally we should return out of range, but that is
- // treated as end-of-stream. instead seek to first sync
- //
- // return ERROR_OUT_OF_RANGE;
- }
- flags = kFlagAfter;
- }
-
- // Now ssi[left - 1] <(=) start_sample_index <= ssi[left]
- switch (flags) {
- case kFlagBefore:
- {
- --left;
- break;
- }
- case kFlagAfter:
- {
- // nothing to do
- break;
- }
- default:
- {
- // this route is not used, but implement it nonetheless
- CHECK(flags == kFlagClosest);
-
- status_t err = mSampleIterator->seekTo(start_sample_index);
- if (err != OK) {
- return err;
- }
- uint32_t sample_time = mSampleIterator->getSampleTime();
-
- err = mSampleIterator->seekTo(mSyncSamples[left]);
- if (err != OK) {
- return err;
- }
- uint32_t upper_time = mSampleIterator->getSampleTime();
-
- err = mSampleIterator->seekTo(mSyncSamples[left - 1]);
- if (err != OK) {
- return err;
- }
- uint32_t lower_time = mSampleIterator->getSampleTime();
-
- // use abs_difference for safety
- if (abs_difference(upper_time, sample_time) >
- abs_difference(sample_time, lower_time)) {
- --left;
- }
- break;
- }
- }
-
- *sample_index = mSyncSamples[left];
- return OK;
-}
-
-status_t SampleTable::findThumbnailSample(uint32_t *sample_index) {
- Mutex::Autolock autoLock(mLock);
-
- if (mSyncSampleOffset < 0) {
- // All samples are sync-samples.
- *sample_index = 0;
- return OK;
- }
-
- uint32_t bestSampleIndex = 0;
- size_t maxSampleSize = 0;
-
- static const size_t kMaxNumSyncSamplesToScan = 20;
-
- // Consider the first kMaxNumSyncSamplesToScan sync samples and
- // pick the one with the largest (compressed) size as the thumbnail.
-
- size_t numSamplesToScan = mNumSyncSamples;
- if (numSamplesToScan > kMaxNumSyncSamplesToScan) {
- numSamplesToScan = kMaxNumSyncSamplesToScan;
- }
-
- for (size_t i = 0; i < numSamplesToScan; ++i) {
- uint32_t x = mSyncSamples[i];
-
- // Now x is a sample index.
- size_t sampleSize;
- status_t err = getSampleSize_l(x, &sampleSize);
- if (err != OK) {
- return err;
- }
-
- if (i == 0 || sampleSize > maxSampleSize) {
- bestSampleIndex = x;
- maxSampleSize = sampleSize;
- }
- }
-
- *sample_index = bestSampleIndex;
-
- return OK;
-}
-
-status_t SampleTable::getSampleSize_l(
- uint32_t sampleIndex, size_t *sampleSize) {
- return mSampleIterator->getSampleSizeDirect(
- sampleIndex, sampleSize);
-}
-
-status_t SampleTable::getMetaDataForSample(
- uint32_t sampleIndex,
- off64_t *offset,
- size_t *size,
- uint32_t *compositionTime,
- bool *isSyncSample,
- uint32_t *sampleDuration) {
- Mutex::Autolock autoLock(mLock);
-
- status_t err;
- if ((err = mSampleIterator->seekTo(sampleIndex)) != OK) {
- return err;
- }
-
- if (offset) {
- *offset = mSampleIterator->getSampleOffset();
- }
-
- if (size) {
- *size = mSampleIterator->getSampleSize();
- }
-
- if (compositionTime) {
- *compositionTime = mSampleIterator->getSampleTime();
- }
-
- if (isSyncSample) {
- *isSyncSample = false;
- if (mSyncSampleOffset < 0) {
- // Every sample is a sync sample.
- *isSyncSample = true;
- } else {
- size_t i = (mLastSyncSampleIndex < mNumSyncSamples)
- && (mSyncSamples[mLastSyncSampleIndex] <= sampleIndex)
- ? mLastSyncSampleIndex : 0;
-
- while (i < mNumSyncSamples && mSyncSamples[i] < sampleIndex) {
- ++i;
- }
-
- if (i < mNumSyncSamples && mSyncSamples[i] == sampleIndex) {
- *isSyncSample = true;
- }
-
- mLastSyncSampleIndex = i;
- }
- }
-
- if (sampleDuration) {
- *sampleDuration = mSampleIterator->getSampleDuration();
- }
-
- return OK;
-}
-
-int32_t SampleTable::getCompositionTimeOffset(uint32_t sampleIndex) {
- return mCompositionDeltaLookup->getCompositionTimeOffset(sampleIndex);
-}
-
-} // namespace android
diff --git a/media/libstagefright/SimpleDecodingSource.cpp b/media/libstagefright/SimpleDecodingSource.cpp
index 90b8603..404c537 100644
--- a/media/libstagefright/SimpleDecodingSource.cpp
+++ b/media/libstagefright/SimpleDecodingSource.cpp
@@ -14,6 +14,10 @@
* limitations under the License.
*/
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SimpleDecodingSource"
+#include <utils/Log.h>
+
#include <gui/Surface.h>
#include <media/ICrypto.h>
@@ -36,14 +40,14 @@
//static
sp<SimpleDecodingSource> SimpleDecodingSource::Create(
- const sp<IMediaSource> &source, uint32_t flags) {
+ const sp<MediaSource> &source, uint32_t flags) {
return SimpleDecodingSource::Create(source, flags, nullptr, nullptr);
}
//static
sp<SimpleDecodingSource> SimpleDecodingSource::Create(
- const sp<IMediaSource> &source, uint32_t flags, const sp<ANativeWindow> &nativeWindow,
- const char *desiredCodec) {
+ const sp<MediaSource> &source, uint32_t flags, const sp<ANativeWindow> &nativeWindow,
+ const char *desiredCodec, bool skipMediaCodecList) {
sp<Surface> surface = static_cast<Surface*>(nativeWindow.get());
const char *mime = NULL;
sp<MetaData> meta = source->getFormat();
@@ -63,6 +67,33 @@
looper->start();
sp<MediaCodec> codec;
+ auto configure = [=](const sp<MediaCodec> &codec, const AString &componentName)
+ -> sp<SimpleDecodingSource> {
+ if (codec != NULL) {
+ ALOGI("Successfully allocated codec '%s'", componentName.c_str());
+
+ status_t err = codec->configure(format, surface, NULL /* crypto */, 0 /* flags */);
+ sp<AMessage> outFormat;
+ if (err == OK) {
+ err = codec->getOutputFormat(&outFormat);
+ }
+ if (err == OK) {
+ return new SimpleDecodingSource(codec, source, looper,
+ surface != NULL,
+ strcmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS) == 0,
+ outFormat);
+ }
+
+ ALOGD("Failed to configure codec '%s'", componentName.c_str());
+ codec->release();
+ }
+ return NULL;
+ };
+
+ if (skipMediaCodecList) {
+ codec = MediaCodec::CreateByComponentName(looper, desiredCodec);
+ return configure(codec, desiredCodec);
+ }
for (size_t i = 0; i < matchingCodecs.size(); ++i) {
const AString &componentName = matchingCodecs[i];
@@ -73,22 +104,10 @@
ALOGV("Attempting to allocate codec '%s'", componentName.c_str());
codec = MediaCodec::CreateByComponentName(looper, componentName);
- if (codec != NULL) {
- ALOGI("Successfully allocated codec '%s'", componentName.c_str());
-
- status_t err = codec->configure(format, surface, NULL /* crypto */, 0 /* flags */);
- if (err == OK) {
- err = codec->getOutputFormat(&format);
- }
- if (err == OK) {
- return new SimpleDecodingSource(codec, source, looper,
- surface != NULL,
- strcmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS) == 0,
- format);
- }
-
- ALOGD("Failed to configure codec '%s'", componentName.c_str());
- codec->release();
+ sp<SimpleDecodingSource> res = configure(codec, componentName);
+ if (res != NULL) {
+ return res;
+ } else {
codec = NULL;
}
}
@@ -99,7 +118,7 @@
}
SimpleDecodingSource::SimpleDecodingSource(
- const sp<MediaCodec> &codec, const sp<IMediaSource> &source, const sp<ALooper> &looper,
+ const sp<MediaCodec> &codec, const sp<MediaSource> &source, const sp<ALooper> &looper,
bool usingSurface, bool isVorbis, const sp<AMessage> &format)
: mCodec(codec),
mSource(source),
@@ -181,7 +200,7 @@
}
status_t SimpleDecodingSource::read(
- MediaBuffer **buffer, const ReadOptions *options) {
+ MediaBufferBase **buffer, const ReadOptions *options) {
*buffer = NULL;
Mutexed<ProtectedState>::Locked me(mProtectedState);
@@ -202,7 +221,7 @@
}
status_t SimpleDecodingSource::doRead(
- Mutexed<ProtectedState>::Locked &me, MediaBuffer **buffer, const ReadOptions *options) {
+ Mutexed<ProtectedState>::Locked &me, MediaBufferBase **buffer, const ReadOptions *options) {
// |me| is always locked on entry, but is allowed to be unlocked on exit
CHECK_EQ(me->mState, STARTED);
@@ -212,7 +231,7 @@
status_t res;
// flush codec on seek
- IMediaSource::ReadOptions::SeekMode mode;
+ MediaSource::ReadOptions::SeekMode mode;
if (options != NULL && options->getSeekTo(&out_pts, &mode)) {
me->mQueuedInputEOS = false;
me->mGotOutputEOS = false;
@@ -248,7 +267,7 @@
return UNKNOWN_ERROR;
}
- MediaBuffer *in_buf;
+ MediaBufferBase *in_buf;
while (true) {
in_buf = NULL;
me.unlock();
@@ -290,7 +309,7 @@
if (in_buf != NULL) {
int64_t timestampUs = 0;
- CHECK(in_buf->meta_data()->findInt64(kKeyTime, ×tampUs));
+ CHECK(in_buf->meta_data().findInt64(kKeyTime, ×tampUs));
if (in_buf->range_length() + (mIsVorbis ? 4 : 0) > in_buffer->capacity()) {
ALOGW("'%s' received %zu input bytes for buffer of size %zu",
mComponentName.c_str(),
@@ -302,7 +321,7 @@
if (mIsVorbis) {
int32_t numPageSamples;
- if (!in_buf->meta_data()->findInt32(kKeyValidSamples, &numPageSamples)) {
+ if (!in_buf->meta_data().findInt32(kKeyValidSamples, &numPageSamples)) {
numPageSamples = -1;
}
memcpy(in_buffer->base() + cpLen, &numPageSamples, sizeof(numPageSamples));
@@ -374,7 +393,7 @@
*buffer = new MediaBuffer(out_size);
CHECK_LE(out_buffer->size(), (*buffer)->size());
memcpy((*buffer)->data(), out_buffer->data(), out_buffer->size());
- (*buffer)->meta_data()->setInt64(kKeyTime, out_pts);
+ (*buffer)->meta_data().setInt64(kKeyTime, out_pts);
mCodec->releaseOutputBuffer(out_ix);
}
return OK;
diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp
index 4ff2bfe..e010b3e 100644
--- a/media/libstagefright/StagefrightMediaScanner.cpp
+++ b/media/libstagefright/StagefrightMediaScanner.cpp
@@ -40,7 +40,8 @@
".mpeg", ".ogg", ".mid", ".smf", ".imy", ".wma", ".aac",
".wav", ".amr", ".midi", ".xmf", ".rtttl", ".rtx", ".ota",
".mkv", ".mka", ".webm", ".ts", ".fl", ".flac", ".mxmf",
- ".avi", ".mpeg", ".mpg", ".awb", ".mpga", ".mov"
+ ".avi", ".mpeg", ".mpg", ".awb", ".mpga", ".mov",
+ ".m4v", ".oga"
};
static const size_t kNumValidExtensions =
sizeof(kValidExtensions) / sizeof(kValidExtensions[0]);
@@ -62,6 +63,11 @@
client.setLocale(locale());
client.beginFile();
MediaScanResult result = processFileInternal(path, mimeType, client);
+ ALOGV("result: %d", result);
+ if (mimeType == NULL && result != MEDIA_SCAN_RESULT_OK) {
+ ALOGW("media scan failed for %s", path);
+ client.setMimeType("application/octet-stream");
+ }
client.endFile();
return result;
}
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 103da95..e80ec3b 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -20,52 +20,34 @@
#include <inttypes.h>
#include <utils/Log.h>
-#include <gui/Surface.h>
-#include "include/avc_utils.h"
+#include "include/FrameDecoder.h"
#include "include/StagefrightMetadataRetriever.h"
-#include <media/ICrypto.h>
#include <media/IMediaHTTPService.h>
-#include <media/MediaCodecBuffer.h>
-
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/ColorConverter.h>
-#include <media/stagefright/DataSource.h>
+#include <media/stagefright/DataSourceFactory.h>
#include <media/stagefright/FileSource.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaExtractorFactory.h>
#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-
#include <media/CharacterEncodingDetector.h>
namespace android {
-static const int64_t kBufferTimeOutUs = 30000ll; // 30 msec
-static const size_t kRetryCount = 20; // must be >0
-
StagefrightMetadataRetriever::StagefrightMetadataRetriever()
: mParsedMetaData(false),
- mAlbumArt(NULL) {
+ mAlbumArt(NULL),
+ mLastImageIndex(-1) {
ALOGV("StagefrightMetadataRetriever()");
}
StagefrightMetadataRetriever::~StagefrightMetadataRetriever() {
ALOGV("~StagefrightMetadataRetriever()");
clearMetadata();
- // Explicitly release extractor before continuing with the destructor,
- // some extractors might need to callback to close off the DataSource
- // and we need to make sure it's still there.
- if (mExtractor != NULL) {
- mExtractor->release();
- }
if (mSource != NULL) {
mSource->close();
}
@@ -78,14 +60,14 @@
ALOGV("setDataSource(%s)", uri);
clearMetadata();
- mSource = DataSource::CreateFromURI(httpService, uri, headers);
+ mSource = DataSourceFactory::CreateFromURI(httpService, uri, headers);
if (mSource == NULL) {
ALOGE("Unable to create data source for '%s'.", uri);
return UNKNOWN_ERROR;
}
- mExtractor = MediaExtractor::Create(mSource);
+ mExtractor = MediaExtractorFactory::Create(mSource);
if (mExtractor == NULL) {
ALOGE("Unable to instantiate an extractor for '%s'.", uri);
@@ -115,7 +97,7 @@
return err;
}
- mExtractor = MediaExtractor::Create(mSource);
+ mExtractor = MediaExtractorFactory::Create(mSource);
if (mExtractor == NULL) {
mSource.clear();
@@ -132,7 +114,7 @@
clearMetadata();
mSource = source;
- mExtractor = MediaExtractor::Create(mSource, mime);
+ mExtractor = MediaExtractorFactory::Create(mSource, mime);
if (mExtractor == NULL) {
ALOGE("Failed to instantiate a MediaExtractor.");
@@ -143,469 +125,155 @@
return OK;
}
-static VideoFrame *allocVideoFrame(
- const sp<MetaData> &trackMeta, int32_t width, int32_t height, int32_t bpp, bool metaOnly) {
- int32_t rotationAngle;
- if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
- rotationAngle = 0; // By default, no rotation
- }
+sp<IMemory> StagefrightMetadataRetriever::getImageAtIndex(
+ int index, int colorFormat, bool metaOnly, bool thumbnail) {
+ ALOGV("getImageAtIndex: index(%d) colorFormat(%d) metaOnly(%d) thumbnail(%d)",
+ index, colorFormat, metaOnly, thumbnail);
- uint32_t type;
- const void *iccData;
- size_t iccSize;
- if (!trackMeta->findData(kKeyIccProfile, &type, &iccData, &iccSize)){
- iccData = NULL;
- iccSize = 0;
- }
-
- int32_t sarWidth, sarHeight;
- int32_t displayWidth, displayHeight;
- if (trackMeta->findInt32(kKeySARWidth, &sarWidth)
- && trackMeta->findInt32(kKeySARHeight, &sarHeight)
- && sarHeight != 0) {
- displayWidth = (width * sarWidth) / sarHeight;
- displayHeight = height;
- } else if (trackMeta->findInt32(kKeyDisplayWidth, &displayWidth)
- && trackMeta->findInt32(kKeyDisplayHeight, &displayHeight)
- && displayWidth > 0 && displayHeight > 0
- && width > 0 && height > 0) {
- ALOGV("found display size %dx%d", displayWidth, displayHeight);
- } else {
- displayWidth = width;
- displayHeight = height;
- }
-
- return new VideoFrame(width, height, displayWidth, displayHeight,
- rotationAngle, bpp, !metaOnly, iccData, iccSize);
+ return getImageInternal(index, colorFormat, metaOnly, thumbnail, NULL);
}
-static bool getDstColorFormat(android_pixel_format_t colorFormat,
- OMX_COLOR_FORMATTYPE *omxColorFormat, int32_t *bpp) {
- switch (colorFormat) {
- case HAL_PIXEL_FORMAT_RGB_565:
- {
- *omxColorFormat = OMX_COLOR_Format16bitRGB565;
- *bpp = 2;
- return true;
- }
- case HAL_PIXEL_FORMAT_RGBA_8888:
- {
- *omxColorFormat = OMX_COLOR_Format32BitRGBA8888;
- *bpp = 4;
- return true;
- }
- case HAL_PIXEL_FORMAT_BGRA_8888:
- {
- *omxColorFormat = OMX_COLOR_Format32bitBGRA8888;
- *bpp = 4;
- return true;
- }
- default:
- {
- ALOGE("Unsupported color format: %d", colorFormat);
- break;
- }
+sp<IMemory> StagefrightMetadataRetriever::getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom) {
+ ALOGV("getImageRectAtIndex: index(%d) colorFormat(%d) rect {%d, %d, %d, %d}",
+ index, colorFormat, left, top, right, bottom);
+
+ FrameRect rect = {left, top, right, bottom};
+
+ if (mImageDecoder != NULL && index == mLastImageIndex) {
+ return mImageDecoder->extractFrame(&rect);
}
- return false;
+
+ return getImageInternal(
+ index, colorFormat, false /*metaOnly*/, false /*thumbnail*/, &rect);
}
-static VideoFrame *extractVideoFrame(
- const AString &componentName,
- const sp<MetaData> &trackMeta,
- const sp<IMediaSource> &source,
- int64_t frameTimeUs,
- int seekMode,
- int colorFormat,
- bool metaOnly) {
- sp<MetaData> format = source->getFormat();
+sp<IMemory> StagefrightMetadataRetriever::getImageInternal(
+ int index, int colorFormat, bool metaOnly, bool thumbnail, FrameRect* rect) {
- MediaSource::ReadOptions::SeekMode mode =
- static_cast<MediaSource::ReadOptions::SeekMode>(seekMode);
- if (seekMode < MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC ||
- seekMode > MediaSource::ReadOptions::SEEK_CLOSEST) {
- ALOGE("Unknown seek mode: %d", seekMode);
+ if (mExtractor.get() == NULL) {
+ ALOGE("no extractor.");
return NULL;
}
- int32_t dstBpp;
- OMX_COLOR_FORMATTYPE dstFormat;
- if (!getDstColorFormat(
- (android_pixel_format_t)colorFormat, &dstFormat, &dstBpp)) {
+ size_t n = mExtractor->countTracks();
+ size_t i;
+ int imageCount = 0;
+
+ for (i = 0; i < n; ++i) {
+ sp<MetaData> meta = mExtractor->getTrackMetaData(i);
+ ALOGV("getting track %zu of %zu, meta=%s", i, n, meta->toString().c_str());
+
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+ if (!strncasecmp(mime, "image/", 6)) {
+ int32_t isPrimary;
+ if ((index < 0 && meta->findInt32(
+ kKeyTrackIsDefault, &isPrimary) && isPrimary)
+ || (index == imageCount++)) {
+ break;
+ }
+ }
+ }
+
+ if (i == n) {
+ ALOGE("image track not found.");
return NULL;
}
+ sp<MetaData> trackMeta = mExtractor->getTrackMetaData(i);
+
if (metaOnly) {
- int32_t width, height;
- CHECK(trackMeta->findInt32(kKeyWidth, &width));
- CHECK(trackMeta->findInt32(kKeyHeight, &height));
- return allocVideoFrame(trackMeta, width, height, dstBpp, true);
+ return FrameDecoder::getMetadataOnly(trackMeta, colorFormat, thumbnail);
}
- MediaSource::ReadOptions options;
- sp<MetaData> overrideMeta;
- if (frameTimeUs < 0) {
- uint32_t type;
- const void *data;
- size_t size;
- int64_t thumbNailTime;
- int32_t thumbnailWidth, thumbnailHeight;
+ sp<IMediaSource> source = mExtractor->getTrack(i);
- // if we have a stand-alone thumbnail, set up the override meta,
- // and set seekTo time to -1.
- if (trackMeta->findInt32(kKeyThumbnailWidth, &thumbnailWidth)
- && trackMeta->findInt32(kKeyThumbnailHeight, &thumbnailHeight)
- && trackMeta->findData(kKeyThumbnailHVCC, &type, &data, &size)){
- overrideMeta = new MetaData(*trackMeta);
- overrideMeta->remove(kKeyDisplayWidth);
- overrideMeta->remove(kKeyDisplayHeight);
- overrideMeta->setInt32(kKeyWidth, thumbnailWidth);
- overrideMeta->setInt32(kKeyHeight, thumbnailHeight);
- overrideMeta->setData(kKeyHVCC, type, data, size);
- thumbNailTime = -1ll;
- ALOGV("thumbnail: %dx%d", thumbnailWidth, thumbnailHeight);
- } else if (!trackMeta->findInt64(kKeyThumbnailTime, &thumbNailTime)
- || thumbNailTime < 0) {
- thumbNailTime = 0;
- }
-
- options.setSeekTo(thumbNailTime, mode);
- } else {
- options.setSeekTo(frameTimeUs, mode);
- }
-
- int32_t gridRows = 1, gridCols = 1;
- if (overrideMeta == NULL) {
- // check if we're dealing with a tiled heif
- int32_t gridWidth, gridHeight;
- if (trackMeta->findInt32(kKeyGridWidth, &gridWidth) && gridWidth > 0
- && trackMeta->findInt32(kKeyGridHeight, &gridHeight) && gridHeight > 0) {
- int32_t width, height, displayWidth, displayHeight;
- CHECK(trackMeta->findInt32(kKeyWidth, &width));
- CHECK(trackMeta->findInt32(kKeyHeight, &height));
- CHECK(trackMeta->findInt32(kKeyDisplayWidth, &displayWidth));
- CHECK(trackMeta->findInt32(kKeyDisplayHeight, &displayHeight));
-
- if (width >= displayWidth && height >= displayHeight
- && (width % gridWidth == 0) && (height % gridHeight == 0)) {
- ALOGV("grid config: %dx%d, display %dx%d, grid %dx%d",
- width, height, displayWidth, displayHeight, gridWidth, gridHeight);
-
- overrideMeta = new MetaData(*trackMeta);
- overrideMeta->remove(kKeyDisplayWidth);
- overrideMeta->remove(kKeyDisplayHeight);
- overrideMeta->setInt32(kKeyWidth, gridWidth);
- overrideMeta->setInt32(kKeyHeight, gridHeight);
- gridCols = width / gridWidth;
- gridRows = height / gridHeight;
- } else {
- ALOGE("Bad grid config: %dx%d, display %dx%d, grid %dx%d",
- width, height, displayWidth, displayHeight, gridWidth, gridHeight);
- }
- }
- if (overrideMeta == NULL) {
- overrideMeta = trackMeta;
- }
- }
- int32_t numTiles = gridRows * gridCols;
-
- sp<AMessage> videoFormat;
- if (convertMetaDataToMessage(overrideMeta, &videoFormat) != OK) {
- ALOGE("b/23680780");
- ALOGW("Failed to convert meta data to message");
+ if (source.get() == NULL) {
+ ALOGE("unable to instantiate image track.");
return NULL;
}
- // TODO: Use Flexible color instead
- videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
-
- // For the thumbnail extraction case, try to allocate single buffer in both
- // input and output ports, if seeking to a sync frame. NOTE: This request may
- // fail if component requires more than that for decoding.
- bool isSeekingClosest = (seekMode == MediaSource::ReadOptions::SEEK_CLOSEST);
- bool decodeSingleFrame = !isSeekingClosest && (numTiles == 1);
- if (decodeSingleFrame) {
- videoFormat->setInt32("android._num-input-buffers", 1);
- videoFormat->setInt32("android._num-output-buffers", 1);
- }
-
- status_t err;
- sp<ALooper> looper = new ALooper;
- looper->start();
- sp<MediaCodec> decoder = MediaCodec::CreateByComponentName(
- looper, componentName, &err);
-
- if (decoder.get() == NULL || err != OK) {
- ALOGW("Failed to instantiate decoder [%s]", componentName.c_str());
- return NULL;
- }
-
- err = decoder->configure(videoFormat, NULL /* surface */, NULL /* crypto */, 0 /* flags */);
- if (err != OK) {
- ALOGW("configure returned error %d (%s)", err, asString(err));
- decoder->release();
- return NULL;
- }
-
- err = decoder->start();
- if (err != OK) {
- ALOGW("start returned error %d (%s)", err, asString(err));
- decoder->release();
- return NULL;
- }
-
- err = source->start();
- if (err != OK) {
- ALOGW("source failed to start: %d (%s)", err, asString(err));
- decoder->release();
- return NULL;
- }
-
- Vector<sp<MediaCodecBuffer> > inputBuffers;
- err = decoder->getInputBuffers(&inputBuffers);
- if (err != OK) {
- ALOGW("failed to get input buffers: %d (%s)", err, asString(err));
- decoder->release();
- source->stop();
- return NULL;
- }
-
- Vector<sp<MediaCodecBuffer> > outputBuffers;
- err = decoder->getOutputBuffers(&outputBuffers);
- if (err != OK) {
- ALOGW("failed to get output buffers: %d (%s)", err, asString(err));
- decoder->release();
- source->stop();
- return NULL;
- }
-
- sp<AMessage> outputFormat = NULL;
- bool haveMoreInputs = true;
- size_t index, offset, size;
- int64_t timeUs;
- size_t retriesLeft = kRetryCount;
- bool done = false;
const char *mime;
- bool success = format->findCString(kKeyMIMEType, &mime);
- if (!success) {
- ALOGE("Could not find mime type");
- return NULL;
+ CHECK(trackMeta->findCString(kKeyMIMEType, &mime));
+ ALOGV("extracting from %s track", mime);
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC)) {
+ mime = MEDIA_MIMETYPE_VIDEO_HEVC;
+ trackMeta = new MetaData(*trackMeta);
+ trackMeta->setCString(kKeyMIMEType, mime);
}
- bool isAvcOrHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
- || !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
+ Vector<AString> matchingCodecs;
+ MediaCodecList::findMatchingCodecs(
+ mime,
+ false, /* encoder */
+ MediaCodecList::kPreferSoftwareCodecs,
+ &matchingCodecs);
- bool firstSample = true;
- int64_t targetTimeUs = -1ll;
+ for (size_t i = 0; i < matchingCodecs.size(); ++i) {
+ const AString &componentName = matchingCodecs[i];
+ sp<ImageDecoder> decoder = new ImageDecoder(componentName, trackMeta, source);
+ int64_t frameTimeUs = thumbnail ? -1 : 0;
+ if (decoder->init(frameTimeUs, 1 /*numFrames*/, 0 /*option*/, colorFormat) == OK) {
+ sp<IMemory> frame = decoder->extractFrame(rect);
- VideoFrame *frame = NULL;
- int32_t tilesDecoded = 0;
-
- do {
- size_t inputIndex = -1;
- int64_t ptsUs = 0ll;
- uint32_t flags = 0;
- sp<MediaCodecBuffer> codecBuffer = NULL;
-
- while (haveMoreInputs) {
- err = decoder->dequeueInputBuffer(&inputIndex, kBufferTimeOutUs);
- if (err != OK) {
- ALOGW("Timed out waiting for input");
- if (retriesLeft) {
- err = OK;
+ if (frame != NULL) {
+ if (rect != NULL) {
+ // keep the decoder if slice decoding
+ mImageDecoder = decoder;
+ mLastImageIndex = index;
}
- break;
- }
- codecBuffer = inputBuffers[inputIndex];
-
- MediaBuffer *mediaBuffer = NULL;
-
- err = source->read(&mediaBuffer, &options);
- options.clearSeekTo();
- if (err != OK) {
- ALOGW("Input Error or EOS");
- haveMoreInputs = false;
- if (err == ERROR_END_OF_STREAM) {
- err = OK;
- }
- break;
- }
- if (firstSample && isSeekingClosest) {
- mediaBuffer->meta_data()->findInt64(kKeyTargetTime, &targetTimeUs);
- ALOGV("Seeking closest: targetTimeUs=%lld", (long long)targetTimeUs);
- }
- firstSample = false;
-
- if (mediaBuffer->range_length() > codecBuffer->capacity()) {
- ALOGE("buffer size (%zu) too large for codec input size (%zu)",
- mediaBuffer->range_length(), codecBuffer->capacity());
- haveMoreInputs = false;
- err = BAD_VALUE;
- } else {
- codecBuffer->setRange(0, mediaBuffer->range_length());
-
- CHECK(mediaBuffer->meta_data()->findInt64(kKeyTime, &ptsUs));
- memcpy(codecBuffer->data(),
- (const uint8_t*)mediaBuffer->data() + mediaBuffer->range_offset(),
- mediaBuffer->range_length());
- }
-
- mediaBuffer->release();
- break;
- }
-
- if (haveMoreInputs && inputIndex < inputBuffers.size()) {
- if (isAvcOrHevc && IsIDR(codecBuffer) && decodeSingleFrame) {
- // Only need to decode one IDR frame, unless we're seeking with CLOSEST
- // option, in which case we need to actually decode to targetTimeUs.
- haveMoreInputs = false;
- flags |= MediaCodec::BUFFER_FLAG_EOS;
- }
-
- ALOGV("QueueInput: size=%zu ts=%" PRId64 " us flags=%x",
- codecBuffer->size(), ptsUs, flags);
- err = decoder->queueInputBuffer(
- inputIndex,
- codecBuffer->offset(),
- codecBuffer->size(),
- ptsUs,
- flags);
-
- // we don't expect an output from codec config buffer
- if (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) {
- continue;
+ return frame;
}
}
-
- while (err == OK) {
- // wait for a decoded buffer
- err = decoder->dequeueOutputBuffer(
- &index,
- &offset,
- &size,
- &timeUs,
- &flags,
- kBufferTimeOutUs);
-
- if (err == INFO_FORMAT_CHANGED) {
- ALOGV("Received format change");
- err = decoder->getOutputFormat(&outputFormat);
- } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
- ALOGV("Output buffers changed");
- err = decoder->getOutputBuffers(&outputBuffers);
- } else {
- if (err == -EAGAIN /* INFO_TRY_AGAIN_LATER */ && --retriesLeft > 0) {
- ALOGV("Timed-out waiting for output.. retries left = %zu", retriesLeft);
- err = OK;
- } else if (err == OK) {
- // If we're seeking with CLOSEST option and obtained a valid targetTimeUs
- // from the extractor, decode to the specified frame. Otherwise we're done.
- ALOGV("Received an output buffer, timeUs=%lld", (long long)timeUs);
- sp<MediaCodecBuffer> videoFrameBuffer = outputBuffers.itemAt(index);
-
- int32_t width, height;
- CHECK(outputFormat != NULL);
- CHECK(outputFormat->findInt32("width", &width));
- CHECK(outputFormat->findInt32("height", &height));
-
- int32_t crop_left, crop_top, crop_right, crop_bottom;
- if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
- crop_left = crop_top = 0;
- crop_right = width - 1;
- crop_bottom = height - 1;
- }
-
- if (frame == NULL) {
- frame = allocVideoFrame(
- trackMeta,
- (crop_right - crop_left + 1) * gridCols,
- (crop_bottom - crop_top + 1) * gridRows,
- dstBpp,
- false /*metaOnly*/);
- }
-
- int32_t srcFormat;
- CHECK(outputFormat->findInt32("color-format", &srcFormat));
-
- ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat);
-
- int32_t dstLeft, dstTop, dstRight, dstBottom;
- if (numTiles == 1) {
- dstLeft = crop_left;
- dstTop = crop_top;
- dstRight = crop_right;
- dstBottom = crop_bottom;
- } else {
- dstLeft = tilesDecoded % gridCols * width;
- dstTop = tilesDecoded / gridCols * height;
- dstRight = dstLeft + width - 1;
- dstBottom = dstTop + height - 1;
- }
-
- if (converter.isValid()) {
- err = converter.convert(
- (const uint8_t *)videoFrameBuffer->data(),
- width, height,
- crop_left, crop_top, crop_right, crop_bottom,
- frame->mData,
- frame->mWidth,
- frame->mHeight,
- dstLeft, dstTop, dstRight, dstBottom);
- } else {
- ALOGE("Unable to convert from format 0x%08x to 0x%08x",
- srcFormat, dstFormat);
-
- err = ERROR_UNSUPPORTED;
- }
-
- done = (targetTimeUs < 0ll) || (timeUs >= targetTimeUs);
- if (numTiles > 1) {
- tilesDecoded++;
- done &= (tilesDecoded >= numTiles);
- }
- err = decoder->releaseOutputBuffer(index);
- } else {
- ALOGW("Received error %d (%s) instead of output", err, asString(err));
- done = true;
- }
- break;
- }
- }
- } while (err == OK && !done);
-
- source->stop();
- decoder->release();
-
- if (err != OK) {
- ALOGE("failed to get video frame (err %d)", err);
- delete frame;
- frame = NULL;
+ ALOGV("%s failed to extract thumbnail, trying next decoder.", componentName.c_str());
}
- return frame;
+ return NULL;
}
-VideoFrame *StagefrightMetadataRetriever::getFrameAtTime(
+sp<IMemory> StagefrightMetadataRetriever::getFrameAtTime(
int64_t timeUs, int option, int colorFormat, bool metaOnly) {
-
ALOGV("getFrameAtTime: %" PRId64 " us option: %d colorFormat: %d, metaOnly: %d",
timeUs, option, colorFormat, metaOnly);
+ sp<IMemory> frame;
+ status_t err = getFrameInternal(
+ timeUs, 1, option, colorFormat, metaOnly, &frame, NULL /*outFrames*/);
+ return (err == OK) ? frame : NULL;
+}
+
+status_t StagefrightMetadataRetriever::getFrameAtIndex(
+ std::vector<sp<IMemory> >* frames,
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
+ ALOGV("getFrameAtIndex: frameIndex %d, numFrames %d, colorFormat: %d, metaOnly: %d",
+ frameIndex, numFrames, colorFormat, metaOnly);
+
+ return getFrameInternal(
+ frameIndex, numFrames, MediaSource::ReadOptions::SEEK_FRAME_INDEX,
+ colorFormat, metaOnly, NULL /*outFrame*/, frames);
+}
+
+status_t StagefrightMetadataRetriever::getFrameInternal(
+ int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
+ sp<IMemory>* outFrame, std::vector<sp<IMemory> >* outFrames) {
if (mExtractor.get() == NULL) {
- ALOGV("no extractor.");
- return NULL;
+ ALOGE("no extractor.");
+ return NO_INIT;
}
sp<MetaData> fileMeta = mExtractor->getMetaData();
if (fileMeta == NULL) {
- ALOGV("extractor doesn't publish metadata, failed to initialize?");
- return NULL;
+ ALOGE("extractor doesn't publish metadata, failed to initialize?");
+ return NO_INIT;
}
int32_t drm = 0;
if (fileMeta->findInt32(kKeyIsDRM, &drm) && drm != 0) {
ALOGE("frame grab not allowed.");
- return NULL;
+ return ERROR_DRM_UNKNOWN;
}
size_t n = mExtractor->countTracks();
@@ -622,18 +290,28 @@
}
if (i == n) {
- ALOGV("no video track found.");
- return NULL;
+ ALOGE("no video track found.");
+ return INVALID_OPERATION;
}
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(
i, MediaExtractor::kIncludeExtensiveMetaData);
+ if (metaOnly) {
+ if (outFrame != NULL) {
+ *outFrame = FrameDecoder::getMetadataOnly(trackMeta, colorFormat);
+ if (*outFrame != NULL) {
+ return OK;
+ }
+ }
+ return UNKNOWN_ERROR;
+ }
+
sp<IMediaSource> source = mExtractor->getTrack(i);
if (source.get() == NULL) {
ALOGV("unable to instantiate video track.");
- return NULL;
+ return UNKNOWN_ERROR;
}
const void *data;
@@ -656,16 +334,25 @@
for (size_t i = 0; i < matchingCodecs.size(); ++i) {
const AString &componentName = matchingCodecs[i];
- VideoFrame *frame = extractVideoFrame(
- componentName, trackMeta, source, timeUs, option, colorFormat, metaOnly);
-
- if (frame != NULL) {
- return frame;
+ VideoFrameDecoder decoder(componentName, trackMeta, source);
+ if (decoder.init(timeUs, numFrames, option, colorFormat) == OK) {
+ if (outFrame != NULL) {
+ *outFrame = decoder.extractFrame();
+ if (*outFrame != NULL) {
+ return OK;
+ }
+ } else if (outFrames != NULL) {
+ status_t err = decoder.extractFrames(outFrames);
+ if (err == OK) {
+ return OK;
+ }
+ }
}
- ALOGV("%s failed to extract thumbnail, trying next decoder.", componentName.c_str());
+ ALOGV("%s failed to extract frame, trying next decoder.", componentName.c_str());
}
- return NULL;
+ ALOGE("all codecs failed to extract frame.");
+ return UNKNOWN_ERROR;
}
MediaAlbumArt *StagefrightMetadataRetriever::extractAlbumArt() {
@@ -793,12 +480,27 @@
mMetaData.add(METADATA_KEY_CAPTURE_FRAMERATE, String8(tmp));
}
+ int64_t exifOffset, exifSize;
+ if (meta->findInt64(kKeyExifOffset, &exifOffset)
+ && meta->findInt64(kKeyExifSize, &exifSize)) {
+ sprintf(tmp, "%lld", (long long)exifOffset);
+ mMetaData.add(METADATA_KEY_EXIF_OFFSET, String8(tmp));
+ sprintf(tmp, "%lld", (long long)exifSize);
+ mMetaData.add(METADATA_KEY_EXIF_LENGTH, String8(tmp));
+ }
+
bool hasAudio = false;
bool hasVideo = false;
int32_t videoWidth = -1;
int32_t videoHeight = -1;
+ int32_t videoFrameCount = 0;
int32_t audioBitrate = -1;
int32_t rotationAngle = -1;
+ int32_t imageCount = 0;
+ int32_t imagePrimary = 0;
+ int32_t imageWidth = -1;
+ int32_t imageHeight = -1;
+ int32_t imageRotation = -1;
// The overall duration is the duration of the longest track.
int64_t maxDurationUs = 0;
@@ -829,6 +531,21 @@
if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
rotationAngle = 0;
}
+ if (!trackMeta->findInt32(kKeyFrameCount, &videoFrameCount)) {
+ videoFrameCount = 0;
+ }
+ } else if (!strncasecmp("image/", mime, 6)) {
+ int32_t isPrimary;
+ if (trackMeta->findInt32(
+ kKeyTrackIsDefault, &isPrimary) && isPrimary) {
+ imagePrimary = imageCount;
+ CHECK(trackMeta->findInt32(kKeyWidth, &imageWidth));
+ CHECK(trackMeta->findInt32(kKeyHeight, &imageHeight));
+ if (!trackMeta->findInt32(kKeyRotation, &imageRotation)) {
+ imageRotation = 0;
+ }
+ }
+ imageCount++;
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP)) {
const char *lang;
if (trackMeta->findCString(kKeyMediaLanguage, &lang)) {
@@ -867,6 +584,30 @@
sprintf(tmp, "%d", rotationAngle);
mMetaData.add(METADATA_KEY_VIDEO_ROTATION, String8(tmp));
+
+ if (videoFrameCount > 0) {
+ sprintf(tmp, "%d", videoFrameCount);
+ mMetaData.add(METADATA_KEY_VIDEO_FRAME_COUNT, String8(tmp));
+ }
+ }
+
+ if (imageCount > 0) {
+ mMetaData.add(METADATA_KEY_HAS_IMAGE, String8("yes"));
+
+ sprintf(tmp, "%d", imageCount);
+ mMetaData.add(METADATA_KEY_IMAGE_COUNT, String8(tmp));
+
+ sprintf(tmp, "%d", imagePrimary);
+ mMetaData.add(METADATA_KEY_IMAGE_PRIMARY, String8(tmp));
+
+ sprintf(tmp, "%d", imageWidth);
+ mMetaData.add(METADATA_KEY_IMAGE_WIDTH, String8(tmp));
+
+ sprintf(tmp, "%d", imageHeight);
+ mMetaData.add(METADATA_KEY_IMAGE_HEIGHT, String8(tmp));
+
+ sprintf(tmp, "%d", imageRotation);
+ mMetaData.add(METADATA_KEY_IMAGE_ROTATION, String8(tmp));
}
if (numTracks == 1 && hasAudio && audioBitrate >= 0) {
diff --git a/media/libstagefright/StagefrightPluginLoader.cpp b/media/libstagefright/StagefrightPluginLoader.cpp
new file mode 100644
index 0000000..519e870
--- /dev/null
+++ b/media/libstagefright/StagefrightPluginLoader.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "StagefrightPluginLoader"
+#include <utils/Log.h>
+
+#include <dlfcn.h>
+
+#include "StagefrightPluginLoader.h"
+
+namespace android {
+
+/* static */ Mutex StagefrightPluginLoader::sMutex;
+/* static */ std::unique_ptr<StagefrightPluginLoader> StagefrightPluginLoader::sInstance;
+
+StagefrightPluginLoader::StagefrightPluginLoader(const char *libPath)
+ : mCreateCodec(nullptr),
+ mCreateBuilder(nullptr) {
+ mLibHandle = dlopen(libPath, RTLD_NOW | RTLD_NODELETE);
+ if (mLibHandle == nullptr) {
+ ALOGD("Failed to load library: %s (%s)", libPath, dlerror());
+ return;
+ }
+ mCreateCodec = (CodecBase::CreateCodecFunc)dlsym(mLibHandle, "CreateCodec");
+ if (mCreateCodec == nullptr) {
+ ALOGD("Failed to find symbol: CreateCodec (%s)", dlerror());
+ }
+ mCreateBuilder = (MediaCodecListBuilderBase::CreateBuilderFunc)dlsym(
+ mLibHandle, "CreateBuilder");
+ if (mCreateBuilder == nullptr) {
+ ALOGD("Failed to find symbol: CreateBuilder (%s)", dlerror());
+ }
+ mCreateInputSurface = (CodecBase::CreateInputSurfaceFunc)dlsym(
+ mLibHandle, "CreateInputSurface");
+ if (mCreateBuilder == nullptr) {
+ ALOGD("Failed to find symbol: CreateInputSurface (%s)", dlerror());
+ }
+}
+
+StagefrightPluginLoader::~StagefrightPluginLoader() {
+ if (mLibHandle != nullptr) {
+ ALOGV("Closing handle");
+ dlclose(mLibHandle);
+ }
+}
+
+CodecBase *StagefrightPluginLoader::createCodec() {
+ if (mLibHandle == nullptr || mCreateCodec == nullptr) {
+ ALOGD("Handle or CreateCodec symbol is null");
+ return nullptr;
+ }
+ return mCreateCodec();
+}
+
+MediaCodecListBuilderBase *StagefrightPluginLoader::createBuilder() {
+ if (mLibHandle == nullptr || mCreateBuilder == nullptr) {
+ ALOGD("Handle or CreateBuilder symbol is null");
+ return nullptr;
+ }
+ return mCreateBuilder();
+}
+
+PersistentSurface *StagefrightPluginLoader::createInputSurface() {
+ if (mLibHandle == nullptr || mCreateInputSurface == nullptr) {
+ ALOGD("Handle or CreateInputSurface symbol is null");
+ return nullptr;
+ }
+ return mCreateInputSurface();
+}
+
+//static
+const std::unique_ptr<StagefrightPluginLoader> &StagefrightPluginLoader::GetCCodecInstance() {
+ Mutex::Autolock _l(sMutex);
+ if (!sInstance) {
+ ALOGV("Loading library");
+ sInstance.reset(new StagefrightPluginLoader("libstagefright_ccodec.so"));
+ }
+ return sInstance;
+}
+
+} // namespace android
diff --git a/media/libstagefright/StagefrightPluginLoader.h b/media/libstagefright/StagefrightPluginLoader.h
new file mode 100644
index 0000000..999d30c
--- /dev/null
+++ b/media/libstagefright/StagefrightPluginLoader.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef STAGEFRIGHT_PLUGIN_LOADER_H_
+
+#define STAGEFRIGHT_PLUGIN_LOADER_H_
+
+#include <media/stagefright/CodecBase.h>
+#include <media/stagefright/MediaCodecListWriter.h>
+#include <media/stagefright/PersistentSurface.h>
+#include <utils/Mutex.h>
+
+namespace android {
+
+class StagefrightPluginLoader {
+public:
+ static const std::unique_ptr<StagefrightPluginLoader> &GetCCodecInstance();
+ ~StagefrightPluginLoader();
+
+ CodecBase *createCodec();
+ MediaCodecListBuilderBase *createBuilder();
+ PersistentSurface *createInputSurface();
+
+private:
+ explicit StagefrightPluginLoader(const char *libPath);
+
+ static Mutex sMutex;
+ static std::unique_ptr<StagefrightPluginLoader> sInstance;
+
+ void *mLibHandle;
+ CodecBase::CreateCodecFunc mCreateCodec;
+ MediaCodecListBuilderBase::CreateBuilderFunc mCreateBuilder;
+ CodecBase::CreateInputSurfaceFunc mCreateInputSurface;
+};
+
+} // namespace android
+
+#endif // STAGEFRIGHT_PLUGIN_LOADER_H_
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
deleted file mode 100644
index d14e86b..0000000
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ /dev/null
@@ -1,477 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-//#define LOG_NDEBUG 0
-#define LOG_TAG "SurfaceMediaSource"
-
-#include <inttypes.h>
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/SurfaceMediaSource.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
-#include <OMX_IVCommon.h>
-#include <media/hardware/HardwareAPI.h>
-#include <media/hardware/MetadataBufferType.h>
-
-#include <ui/GraphicBuffer.h>
-#include <gui/BufferItem.h>
-#include <gui/ISurfaceComposer.h>
-#include <OMX_Component.h>
-
-#include <utils/Log.h>
-#include <utils/String8.h>
-
-#include <private/gui/ComposerService.h>
-
-namespace android {
-
-SurfaceMediaSource::SurfaceMediaSource(uint32_t bufferWidth, uint32_t bufferHeight) :
- mWidth(bufferWidth),
- mHeight(bufferHeight),
- mCurrentSlot(BufferQueue::INVALID_BUFFER_SLOT),
- mNumPendingBuffers(0),
- mCurrentTimestamp(0),
- mFrameRate(30),
- mStarted(false),
- mNumFramesReceived(0),
- mNumFramesEncoded(0),
- mFirstFrameTimestamp(0),
- mMaxAcquiredBufferCount(4), // XXX double-check the default
- mUseAbsoluteTimestamps(false) {
- ALOGV("SurfaceMediaSource");
-
- if (bufferWidth == 0 || bufferHeight == 0) {
- ALOGE("Invalid dimensions %dx%d", bufferWidth, bufferHeight);
- }
-
- BufferQueue::createBufferQueue(&mProducer, &mConsumer);
- mConsumer->setDefaultBufferSize(bufferWidth, bufferHeight);
- mConsumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER |
- GRALLOC_USAGE_HW_TEXTURE);
-
- sp<ISurfaceComposer> composer(ComposerService::getComposerService());
-
- // Note that we can't create an sp<...>(this) in a ctor that will not keep a
- // reference once the ctor ends, as that would cause the refcount of 'this'
- // dropping to 0 at the end of the ctor. Since all we need is a wp<...>
- // that's what we create.
- wp<ConsumerListener> listener = static_cast<ConsumerListener*>(this);
- sp<BufferQueue::ProxyConsumerListener> proxy = new BufferQueue::ProxyConsumerListener(listener);
-
- status_t err = mConsumer->consumerConnect(proxy, false);
- if (err != NO_ERROR) {
- ALOGE("SurfaceMediaSource: error connecting to BufferQueue: %s (%d)",
- strerror(-err), err);
- }
-}
-
-SurfaceMediaSource::~SurfaceMediaSource() {
- ALOGV("~SurfaceMediaSource");
- CHECK(!mStarted);
-}
-
-nsecs_t SurfaceMediaSource::getTimestamp() {
- ALOGV("getTimestamp");
- Mutex::Autolock lock(mMutex);
- return mCurrentTimestamp;
-}
-
-void SurfaceMediaSource::setFrameAvailableListener(
- const sp<FrameAvailableListener>& listener) {
- ALOGV("setFrameAvailableListener");
- Mutex::Autolock lock(mMutex);
- mFrameAvailableListener = listener;
-}
-
-void SurfaceMediaSource::dumpState(String8& result) const
-{
- char buffer[1024];
- dumpState(result, "", buffer, 1024);
-}
-
-void SurfaceMediaSource::dumpState(
- String8& result,
- const char* /* prefix */,
- char* buffer,
- size_t /* SIZE */) const
-{
- Mutex::Autolock lock(mMutex);
-
- result.append(buffer);
- mConsumer->dumpState(result, "");
-}
-
-status_t SurfaceMediaSource::setFrameRate(int32_t fps)
-{
- ALOGV("setFrameRate");
- Mutex::Autolock lock(mMutex);
- const int MAX_FRAME_RATE = 60;
- if (fps < 0 || fps > MAX_FRAME_RATE) {
- return BAD_VALUE;
- }
- mFrameRate = fps;
- return OK;
-}
-
-MetadataBufferType SurfaceMediaSource::metaDataStoredInVideoBuffers() const {
- ALOGV("isMetaDataStoredInVideoBuffers");
- return kMetadataBufferTypeANWBuffer;
-}
-
-int32_t SurfaceMediaSource::getFrameRate( ) const {
- ALOGV("getFrameRate");
- Mutex::Autolock lock(mMutex);
- return mFrameRate;
-}
-
-status_t SurfaceMediaSource::start(MetaData *params)
-{
- ALOGV("start");
-
- Mutex::Autolock lock(mMutex);
-
- CHECK(!mStarted);
-
- mStartTimeNs = 0;
- int64_t startTimeUs;
- int32_t bufferCount = 0;
- if (params) {
- if (params->findInt64(kKeyTime, &startTimeUs)) {
- mStartTimeNs = startTimeUs * 1000;
- }
-
- if (!params->findInt32(kKeyNumBuffers, &bufferCount)) {
- ALOGE("Failed to find the advertised buffer count");
- return UNKNOWN_ERROR;
- }
-
- if (bufferCount <= 1) {
- ALOGE("bufferCount %d is too small", bufferCount);
- return BAD_VALUE;
- }
-
- mMaxAcquiredBufferCount = bufferCount;
- }
-
- CHECK_GT(mMaxAcquiredBufferCount, 1u);
-
- status_t err =
- mConsumer->setMaxAcquiredBufferCount(mMaxAcquiredBufferCount);
-
- if (err != OK) {
- return err;
- }
-
- mNumPendingBuffers = 0;
- mStarted = true;
-
- return OK;
-}
-
-status_t SurfaceMediaSource::setMaxAcquiredBufferCount(size_t count) {
- ALOGV("setMaxAcquiredBufferCount(%zu)", count);
- Mutex::Autolock lock(mMutex);
-
- CHECK_GT(count, 1u);
- mMaxAcquiredBufferCount = count;
-
- return OK;
-}
-
-status_t SurfaceMediaSource::setUseAbsoluteTimestamps() {
- ALOGV("setUseAbsoluteTimestamps");
- Mutex::Autolock lock(mMutex);
- mUseAbsoluteTimestamps = true;
-
- return OK;
-}
-
-status_t SurfaceMediaSource::stop()
-{
- ALOGV("stop");
- Mutex::Autolock lock(mMutex);
-
- if (!mStarted) {
- return OK;
- }
-
- mStarted = false;
- mFrameAvailableCondition.signal();
-
- while (mNumPendingBuffers > 0) {
- ALOGI("Still waiting for %zu buffers to be returned.",
- mNumPendingBuffers);
-
-#if DEBUG_PENDING_BUFFERS
- for (size_t i = 0; i < mPendingBuffers.size(); ++i) {
- ALOGI("%d: %p", i, mPendingBuffers.itemAt(i));
- }
-#endif
-
- mMediaBuffersAvailableCondition.wait(mMutex);
- }
-
- mMediaBuffersAvailableCondition.signal();
-
- return mConsumer->consumerDisconnect();
-}
-
-sp<MetaData> SurfaceMediaSource::getFormat()
-{
- ALOGV("getFormat");
-
- Mutex::Autolock lock(mMutex);
- sp<MetaData> meta = new MetaData;
-
- meta->setInt32(kKeyWidth, mWidth);
- meta->setInt32(kKeyHeight, mHeight);
- // The encoder format is set as an opaque colorformat
- // The encoder will later find out the actual colorformat
- // from the GL Frames itself.
- meta->setInt32(kKeyColorFormat, OMX_COLOR_FormatAndroidOpaque);
- meta->setInt32(kKeyStride, mWidth);
- meta->setInt32(kKeySliceHeight, mHeight);
- meta->setInt32(kKeyFrameRate, mFrameRate);
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_RAW);
- return meta;
-}
-
-// Pass the data to the MediaBuffer. Pass in only the metadata
-// Note: Call only when you have the lock
-void SurfaceMediaSource::passMetadataBuffer_l(MediaBuffer **buffer,
- ANativeWindowBuffer *bufferHandle) const {
- *buffer = new MediaBuffer(sizeof(VideoNativeMetadata));
- VideoNativeMetadata *data = (VideoNativeMetadata *)(*buffer)->data();
- if (data == NULL) {
- ALOGE("Cannot allocate memory for metadata buffer!");
- return;
- }
- data->eType = metaDataStoredInVideoBuffers();
- data->pBuffer = bufferHandle;
- data->nFenceFd = -1;
- ALOGV("handle = %p, offset = %zu, length = %zu",
- bufferHandle, (*buffer)->range_length(), (*buffer)->range_offset());
-}
-
-status_t SurfaceMediaSource::read(
- MediaBuffer **buffer, const ReadOptions * /* options */) {
- ALOGV("read");
- Mutex::Autolock lock(mMutex);
-
- *buffer = NULL;
-
- while (mStarted && mNumPendingBuffers == mMaxAcquiredBufferCount) {
- mMediaBuffersAvailableCondition.wait(mMutex);
- }
-
- // Update the current buffer info
- // TODO: mCurrentSlot can be made a bufferstate since there
- // can be more than one "current" slots.
-
- BufferItem item;
- // If the recording has started and the queue is empty, then just
- // wait here till the frames come in from the client side
- while (mStarted) {
-
- status_t err = mConsumer->acquireBuffer(&item, 0);
- if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
- // wait for a buffer to be queued
- mFrameAvailableCondition.wait(mMutex);
- } else if (err == OK) {
- err = item.mFence->waitForever("SurfaceMediaSource::read");
- if (err) {
- ALOGW("read: failed to wait for buffer fence: %d", err);
- }
-
- // First time seeing the buffer? Added it to the SMS slot
- if (item.mGraphicBuffer != NULL) {
- mSlots[item.mSlot].mGraphicBuffer = item.mGraphicBuffer;
- }
- mSlots[item.mSlot].mFrameNumber = item.mFrameNumber;
-
- // check for the timing of this buffer
- if (mNumFramesReceived == 0 && !mUseAbsoluteTimestamps) {
- mFirstFrameTimestamp = item.mTimestamp;
- // Initial delay
- if (mStartTimeNs > 0) {
- if (item.mTimestamp < mStartTimeNs) {
- // This frame predates start of record, discard
- mConsumer->releaseBuffer(
- item.mSlot, item.mFrameNumber, EGL_NO_DISPLAY,
- EGL_NO_SYNC_KHR, Fence::NO_FENCE);
- continue;
- }
- mStartTimeNs = item.mTimestamp - mStartTimeNs;
- }
- }
- item.mTimestamp = mStartTimeNs + (item.mTimestamp - mFirstFrameTimestamp);
-
- mNumFramesReceived++;
-
- break;
- } else {
- ALOGE("read: acquire failed with error code %d", err);
- return ERROR_END_OF_STREAM;
- }
-
- }
-
- // If the loop was exited as a result of stopping the recording,
- // it is OK
- if (!mStarted) {
- ALOGV("Read: SurfaceMediaSource is stopped. Returning ERROR_END_OF_STREAM.");
- return ERROR_END_OF_STREAM;
- }
-
- mCurrentSlot = item.mSlot;
-
- // First time seeing the buffer? Added it to the SMS slot
- if (item.mGraphicBuffer != NULL) {
- mSlots[item.mSlot].mGraphicBuffer = item.mGraphicBuffer;
- }
- mSlots[item.mSlot].mFrameNumber = item.mFrameNumber;
-
- mCurrentBuffers.push_back(mSlots[mCurrentSlot].mGraphicBuffer);
- int64_t prevTimeStamp = mCurrentTimestamp;
- mCurrentTimestamp = item.mTimestamp;
-
- mNumFramesEncoded++;
- // Pass the data to the MediaBuffer. Pass in only the metadata
-
- passMetadataBuffer_l(buffer, mSlots[mCurrentSlot].mGraphicBuffer->getNativeBuffer());
-
- (*buffer)->setObserver(this);
- (*buffer)->add_ref();
- (*buffer)->meta_data()->setInt64(kKeyTime, mCurrentTimestamp / 1000);
- ALOGV("Frames encoded = %d, timestamp = %" PRId64 ", time diff = %" PRId64,
- mNumFramesEncoded, mCurrentTimestamp / 1000,
- mCurrentTimestamp / 1000 - prevTimeStamp / 1000);
-
- ++mNumPendingBuffers;
-
-#if DEBUG_PENDING_BUFFERS
- mPendingBuffers.push_back(*buffer);
-#endif
-
- ALOGV("returning mbuf %p", *buffer);
-
- return OK;
-}
-
-static buffer_handle_t getMediaBufferHandle(MediaBuffer *buffer) {
- // need to convert to char* for pointer arithmetic and then
- // copy the byte stream into our handle
- buffer_handle_t bufferHandle;
- memcpy(&bufferHandle, (char*)(buffer->data()) + 4, sizeof(buffer_handle_t));
- return bufferHandle;
-}
-
-void SurfaceMediaSource::signalBufferReturned(MediaBuffer *buffer) {
- ALOGV("signalBufferReturned");
-
- bool foundBuffer = false;
-
- Mutex::Autolock lock(mMutex);
-
- buffer_handle_t bufferHandle = getMediaBufferHandle(buffer);
-
- for (size_t i = 0; i < mCurrentBuffers.size(); i++) {
- if (mCurrentBuffers[i]->handle == bufferHandle) {
- mCurrentBuffers.removeAt(i);
- foundBuffer = true;
- break;
- }
- }
-
- if (!foundBuffer) {
- ALOGW("returned buffer was not found in the current buffer list");
- }
-
- for (int id = 0; id < BufferQueue::NUM_BUFFER_SLOTS; id++) {
- if (mSlots[id].mGraphicBuffer == NULL) {
- continue;
- }
-
- if (bufferHandle == mSlots[id].mGraphicBuffer->handle) {
- ALOGV("Slot %d returned, matches handle = %p", id,
- mSlots[id].mGraphicBuffer->handle);
-
- mConsumer->releaseBuffer(id, mSlots[id].mFrameNumber,
- EGL_NO_DISPLAY, EGL_NO_SYNC_KHR,
- Fence::NO_FENCE);
-
- buffer->setObserver(0);
- buffer->release();
-
- foundBuffer = true;
- break;
- }
- }
-
- if (!foundBuffer) {
- CHECK(!"signalBufferReturned: bogus buffer");
- }
-
-#if DEBUG_PENDING_BUFFERS
- for (size_t i = 0; i < mPendingBuffers.size(); ++i) {
- if (mPendingBuffers.itemAt(i) == buffer) {
- mPendingBuffers.removeAt(i);
- break;
- }
- }
-#endif
-
- --mNumPendingBuffers;
- mMediaBuffersAvailableCondition.broadcast();
-}
-
-// Part of the BufferQueue::ConsumerListener
-void SurfaceMediaSource::onFrameAvailable(const BufferItem& /* item */) {
- ALOGV("onFrameAvailable");
-
- sp<FrameAvailableListener> listener;
- { // scope for the lock
- Mutex::Autolock lock(mMutex);
- mFrameAvailableCondition.broadcast();
- listener = mFrameAvailableListener;
- }
-
- if (listener != NULL) {
- ALOGV("actually calling onFrameAvailable");
- listener->onFrameAvailable();
- }
-}
-
-// SurfaceMediaSource hijacks this event to assume
-// the prodcuer is disconnecting from the BufferQueue
-// and that it should stop the recording
-void SurfaceMediaSource::onBuffersReleased() {
- ALOGV("onBuffersReleased");
-
- Mutex::Autolock lock(mMutex);
-
- mFrameAvailableCondition.signal();
-
- for (int i = 0; i < BufferQueue::NUM_BUFFER_SLOTS; i++) {
- mSlots[i].mGraphicBuffer = 0;
- }
-}
-
-void SurfaceMediaSource::onSidebandStreamChanged() {
- ALOG_ASSERT(false, "SurfaceMediaSource can't consume sideband streams");
-}
-
-} // end of namespace android
diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp
index b7c1598..9e11a94 100644
--- a/media/libstagefright/SurfaceUtils.cpp
+++ b/media/libstagefright/SurfaceUtils.cpp
@@ -18,8 +18,8 @@
#define LOG_TAG "SurfaceUtils"
#include <utils/Log.h>
+#include <media/hardware/VideoAPI.h>
#include <media/stagefright/SurfaceUtils.h>
-
#include <gui/Surface.h>
namespace android {
@@ -128,6 +128,40 @@
return NO_ERROR;
}
+void setNativeWindowHdrMetadata(ANativeWindow *nativeWindow, HDRStaticInfo *info) {
+ struct android_smpte2086_metadata smpte2086_meta = {
+ .displayPrimaryRed = {
+ info->sType1.mR.x * 0.00002f,
+ info->sType1.mR.y * 0.00002f
+ },
+ .displayPrimaryGreen = {
+ info->sType1.mG.x * 0.00002f,
+ info->sType1.mG.y * 0.00002f
+ },
+ .displayPrimaryBlue = {
+ info->sType1.mB.x * 0.00002f,
+ info->sType1.mB.y * 0.00002f
+ },
+ .whitePoint = {
+ info->sType1.mW.x * 0.00002f,
+ info->sType1.mW.y * 0.00002f
+ },
+ .maxLuminance = (float) info->sType1.mMaxDisplayLuminance,
+ .minLuminance = info->sType1.mMinDisplayLuminance * 0.0001f
+ };
+
+ int err = native_window_set_buffers_smpte2086_metadata(nativeWindow, &smpte2086_meta);
+ ALOGW_IF(err != 0, "failed to set smpte2086 metadata on surface (%d)", err);
+
+ struct android_cta861_3_metadata cta861_meta = {
+ .maxContentLightLevel = (float) info->sType1.mMaxContentLightLevel,
+ .maxFrameAverageLightLevel = (float) info->sType1.mMaxFrameAverageLightLevel
+ };
+
+ err = native_window_set_buffers_cta861_3_metadata(nativeWindow, &cta861_meta);
+ ALOGW_IF(err != 0, "failed to set cta861_3 metadata on surface (%d)", err);
+}
+
status_t pushBlankBuffersToNativeWindow(ANativeWindow *nativeWindow /* nonnull */) {
status_t err = NO_ERROR;
ANativeWindowBuffer* anb = NULL;
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 3ef8f2a..cf5e91e 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -27,7 +27,6 @@
#include "include/ESDS.h"
#include "include/HevcUtils.h"
-#include <arpa/inet.h>
#include <cutils/properties.h>
#include <media/openmax/OMX_Audio.h>
#include <media/openmax/OMX_Video.h>
@@ -37,6 +36,7 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALookup.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ByteUtils.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/MediaDefs.h>
#include <media/AudioSystem.h>
@@ -47,39 +47,6 @@
namespace android {
-uint16_t U16_AT(const uint8_t *ptr) {
- return ptr[0] << 8 | ptr[1];
-}
-
-uint32_t U32_AT(const uint8_t *ptr) {
- return ptr[0] << 24 | ptr[1] << 16 | ptr[2] << 8 | ptr[3];
-}
-
-uint64_t U64_AT(const uint8_t *ptr) {
- return ((uint64_t)U32_AT(ptr)) << 32 | U32_AT(ptr + 4);
-}
-
-uint16_t U16LE_AT(const uint8_t *ptr) {
- return ptr[0] | (ptr[1] << 8);
-}
-
-uint32_t U32LE_AT(const uint8_t *ptr) {
- return ptr[3] << 24 | ptr[2] << 16 | ptr[1] << 8 | ptr[0];
-}
-
-uint64_t U64LE_AT(const uint8_t *ptr) {
- return ((uint64_t)U32LE_AT(ptr + 4)) << 32 | U32LE_AT(ptr);
-}
-
-// XXX warning: these won't work on big-endian host.
-uint64_t ntoh64(uint64_t x) {
- return ((uint64_t)ntohl(x & 0xffffffff) << 32) | ntohl(x >> 32);
-}
-
-uint64_t hton64(uint64_t x) {
- return ((uint64_t)htonl(x & 0xffffffff) << 32) | htonl(x >> 32);
-}
-
static status_t copyNALUToABuffer(sp<ABuffer> *buffer, const uint8_t *ptr, size_t length) {
if (((*buffer)->size() + 4 + length) > ((*buffer)->capacity() - (*buffer)->offset())) {
sp<ABuffer> tmpBuffer = new (std::nothrow) ABuffer((*buffer)->size() + 4 + length + 1024);
@@ -187,6 +154,7 @@
{ 23, OMX_AUDIO_AACObjectLD },
{ 29, OMX_AUDIO_AACObjectHE_PS },
{ 39, OMX_AUDIO_AACObjectELD },
+ { 42, OMX_AUDIO_AACObjectXHE },
};
OMX_AUDIO_AACPROFILETYPE profile;
@@ -336,6 +304,8 @@
const static ALookup<uint8_t, OMX_VIDEO_HEVCPROFILETYPE> profiles {
{ 1, OMX_VIDEO_HEVCProfileMain },
{ 2, OMX_VIDEO_HEVCProfileMain10 },
+ // use Main for Main Still Picture decoding
+ { 3, OMX_VIDEO_HEVCProfileMain },
};
// set profile & level if they are recognized
@@ -343,6 +313,7 @@
OMX_VIDEO_HEVCLEVELTYPE codecLevel;
if (!profiles.map(profile, &codecProfile)) {
if (ptr[2] & 0x40 /* general compatibility flag 1 */) {
+ // Note that this case covers Main Still Picture too
codecProfile = OMX_VIDEO_HEVCProfileMain;
} else if (ptr[2] & 0x20 /* general compatibility flag 2 */) {
codecProfile = OMX_VIDEO_HEVCProfileMain10;
@@ -672,7 +643,8 @@
msg->setString("language", lang);
}
- if (!strncasecmp("video/", mime, 6)) {
+ if (!strncasecmp("video/", mime, 6) ||
+ !strncasecmp("image/", mime, 6)) {
int32_t width, height;
if (!meta->findInt32(kKeyWidth, &width)
|| !meta->findInt32(kKeyHeight, &height)) {
@@ -696,6 +668,23 @@
msg->setInt32("sar-height", sarHeight);
}
+ if (!strncasecmp("image/", mime, 6)) {
+ int32_t tileWidth, tileHeight, gridRows, gridCols;
+ if (meta->findInt32(kKeyTileWidth, &tileWidth)
+ && meta->findInt32(kKeyTileHeight, &tileHeight)
+ && meta->findInt32(kKeyGridRows, &gridRows)
+ && meta->findInt32(kKeyGridCols, &gridCols)) {
+ msg->setInt32("tile-width", tileWidth);
+ msg->setInt32("tile-height", tileHeight);
+ msg->setInt32("grid-rows", gridRows);
+ msg->setInt32("grid-cols", gridCols);
+ }
+ int32_t isPrimary;
+ if (meta->findInt32(kKeyTrackIsDefault, &isPrimary) && isPrimary) {
+ msg->setInt32("is-default", 1);
+ }
+ }
+
int32_t colorFormat;
if (meta->findInt32(kKeyColorFormat, &colorFormat)) {
msg->setInt32("color-format", colorFormat);
@@ -1327,7 +1316,7 @@
meta->setCString(kKeyMediaLanguage, lang.c_str());
}
- if (mime.startsWith("video/")) {
+ if (mime.startsWith("video/") || mime.startsWith("image/")) {
int32_t width;
int32_t height;
if (msg->findInt32("width", &width) && msg->findInt32("height", &height)) {
@@ -1351,6 +1340,26 @@
meta->setInt32(kKeyDisplayHeight, displayHeight);
}
+ if (mime.startsWith("image/")){
+ int32_t isPrimary;
+ if (msg->findInt32("is-default", &isPrimary) && isPrimary) {
+ meta->setInt32(kKeyTrackIsDefault, 1);
+ }
+ int32_t tileWidth, tileHeight, gridRows, gridCols;
+ if (msg->findInt32("tile-width", &tileWidth)) {
+ meta->setInt32(kKeyTileWidth, tileWidth);
+ }
+ if (msg->findInt32("tile-height", &tileHeight)) {
+ meta->setInt32(kKeyTileHeight, tileHeight);
+ }
+ if (msg->findInt32("grid-rows", &gridRows)) {
+ meta->setInt32(kKeyGridRows, gridRows);
+ }
+ if (msg->findInt32("grid-cols", &gridCols)) {
+ meta->setInt32(kKeyGridCols, gridCols);
+ }
+ }
+
int32_t colorFormat;
if (msg->findInt32("color-format", &colorFormat)) {
meta->setInt32(kKeyColorFormat, colorFormat);
@@ -1467,7 +1476,8 @@
// for transporting the CSD to muxers.
reassembleESDS(csd0, esds.data());
meta->setData(kKeyESDS, kKeyESDS, esds.data(), esds.size());
- } else if (mime == MEDIA_MIMETYPE_VIDEO_HEVC) {
+ } else if (mime == MEDIA_MIMETYPE_VIDEO_HEVC ||
+ mime == MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC) {
std::vector<uint8_t> hvcc(csd0size + 1024);
size_t outsize = reassembleHVCC(csd0, hvcc.data(), hvcc.size(), 4);
meta->setData(kKeyHVCC, kKeyHVCC, hvcc.data(), outsize);
@@ -1601,6 +1611,7 @@
{ OMX_AUDIO_AACObjectLD, AUDIO_FORMAT_AAC_LD},
{ OMX_AUDIO_AACObjectHE_PS, AUDIO_FORMAT_AAC_HE_V2},
{ OMX_AUDIO_AACObjectELD, AUDIO_FORMAT_AAC_ELD},
+ { OMX_AUDIO_AACObjectXHE, AUDIO_FORMAT_AAC_XHE},
{ OMX_AUDIO_AACObjectNull, AUDIO_FORMAT_AAC},
};
@@ -1809,41 +1820,17 @@
}
void writeToAMessage(const sp<AMessage> &msg, const BufferingSettings &buffering) {
- msg->setInt32("init-mode", buffering.mInitialBufferingMode);
- msg->setInt32("rebuffer-mode", buffering.mRebufferingMode);
- msg->setInt32("init-ms", buffering.mInitialWatermarkMs);
- msg->setInt32("init-kb", buffering.mInitialWatermarkKB);
- msg->setInt32("rebuffer-low-ms", buffering.mRebufferingWatermarkLowMs);
- msg->setInt32("rebuffer-high-ms", buffering.mRebufferingWatermarkHighMs);
- msg->setInt32("rebuffer-low-kb", buffering.mRebufferingWatermarkLowKB);
- msg->setInt32("rebuffer-high-kb", buffering.mRebufferingWatermarkHighKB);
+ msg->setInt32("init-ms", buffering.mInitialMarkMs);
+ msg->setInt32("resume-playback-ms", buffering.mResumePlaybackMarkMs);
}
void readFromAMessage(const sp<AMessage> &msg, BufferingSettings *buffering /* nonnull */) {
int32_t value;
- if (msg->findInt32("init-mode", &value)) {
- buffering->mInitialBufferingMode = (BufferingMode)value;
- }
- if (msg->findInt32("rebuffer-mode", &value)) {
- buffering->mRebufferingMode = (BufferingMode)value;
- }
if (msg->findInt32("init-ms", &value)) {
- buffering->mInitialWatermarkMs = value;
+ buffering->mInitialMarkMs = value;
}
- if (msg->findInt32("init-kb", &value)) {
- buffering->mInitialWatermarkKB = value;
- }
- if (msg->findInt32("rebuffer-low-ms", &value)) {
- buffering->mRebufferingWatermarkLowMs = value;
- }
- if (msg->findInt32("rebuffer-high-ms", &value)) {
- buffering->mRebufferingWatermarkHighMs = value;
- }
- if (msg->findInt32("rebuffer-low-kb", &value)) {
- buffering->mRebufferingWatermarkLowKB = value;
- }
- if (msg->findInt32("rebuffer-high-kb", &value)) {
- buffering->mRebufferingWatermarkHighKB = value;
+ if (msg->findInt32("resume-playback-ms", &value)) {
+ buffering->mResumePlaybackMarkMs = value;
}
}
@@ -1879,13 +1866,5 @@
return result;
}
-void MakeFourCCString(uint32_t x, char *s) {
- s[0] = x >> 24;
- s[1] = (x >> 16) & 0xff;
- s[2] = (x >> 8) & 0xff;
- s[3] = x & 0xff;
- s[4] = '\0';
-}
-
} // namespace android
diff --git a/media/libstagefright/VBRISeeker.cpp b/media/libstagefright/VBRISeeker.cpp
deleted file mode 100644
index 5b8f23a..0000000
--- a/media/libstagefright/VBRISeeker.cpp
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "VBRISeeker"
-
-#include <inttypes.h>
-
-#include <utils/Log.h>
-
-#include "include/VBRISeeker.h"
-
-#include "include/avc_utils.h"
-#include "include/MP3Extractor.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/Utils.h>
-
-namespace android {
-
-static uint32_t U24_AT(const uint8_t *ptr) {
- return ptr[0] << 16 | ptr[1] << 8 | ptr[2];
-}
-
-// static
-sp<VBRISeeker> VBRISeeker::CreateFromSource(
- const sp<DataSource> &source, off64_t post_id3_pos) {
- off64_t pos = post_id3_pos;
-
- uint8_t header[4];
- ssize_t n = source->readAt(pos, header, sizeof(header));
- if (n < (ssize_t)sizeof(header)) {
- return NULL;
- }
-
- uint32_t tmp = U32_AT(&header[0]);
- size_t frameSize;
- int sampleRate;
- if (!GetMPEGAudioFrameSize(tmp, &frameSize, &sampleRate)) {
- return NULL;
- }
-
- // VBRI header follows 32 bytes after the header _ends_.
- pos += sizeof(header) + 32;
-
- uint8_t vbriHeader[26];
- n = source->readAt(pos, vbriHeader, sizeof(vbriHeader));
- if (n < (ssize_t)sizeof(vbriHeader)) {
- return NULL;
- }
-
- if (memcmp(vbriHeader, "VBRI", 4)) {
- return NULL;
- }
-
- size_t numFrames = U32_AT(&vbriHeader[14]);
-
- int64_t durationUs =
- numFrames * 1000000ll * (sampleRate >= 32000 ? 1152 : 576) / sampleRate;
-
- ALOGV("duration = %.2f secs", durationUs / 1E6);
-
- size_t numEntries = U16_AT(&vbriHeader[18]);
- size_t entrySize = U16_AT(&vbriHeader[22]);
- size_t scale = U16_AT(&vbriHeader[20]);
-
- ALOGV("%zu entries, scale=%zu, size_per_entry=%zu",
- numEntries,
- scale,
- entrySize);
-
- if (entrySize > 4) {
- ALOGE("invalid VBRI entry size: %zu", entrySize);
- return NULL;
- }
-
- sp<VBRISeeker> seeker = new (std::nothrow) VBRISeeker;
- if (seeker == NULL) {
- ALOGW("Couldn't allocate VBRISeeker");
- return NULL;
- }
-
- size_t totalEntrySize = numEntries * entrySize;
- uint8_t *buffer = new (std::nothrow) uint8_t[totalEntrySize];
- if (!buffer) {
- ALOGW("Couldn't allocate %zu bytes", totalEntrySize);
- return NULL;
- }
-
- n = source->readAt(pos + sizeof(vbriHeader), buffer, totalEntrySize);
- if (n < (ssize_t)totalEntrySize) {
- delete[] buffer;
- buffer = NULL;
-
- return NULL;
- }
-
- seeker->mBasePos = post_id3_pos + frameSize;
- // only update mDurationUs if the calculated duration is valid (non zero)
- // otherwise, leave duration at -1 so that getDuration() and getOffsetForTime()
- // return false when called, to indicate that this vbri tag does not have the
- // requested information
- if (durationUs) {
- seeker->mDurationUs = durationUs;
- }
-
- off64_t offset = post_id3_pos;
- for (size_t i = 0; i < numEntries; ++i) {
- uint32_t numBytes;
- switch (entrySize) {
- case 1: numBytes = buffer[i]; break;
- case 2: numBytes = U16_AT(buffer + 2 * i); break;
- case 3: numBytes = U24_AT(buffer + 3 * i); break;
- default:
- {
- CHECK_EQ(entrySize, 4u);
- numBytes = U32_AT(buffer + 4 * i); break;
- }
- }
-
- numBytes *= scale;
-
- seeker->mSegments.push(numBytes);
-
- ALOGV("entry #%zu: %u offset %#016llx", i, numBytes, (long long)offset);
- offset += numBytes;
- }
-
- delete[] buffer;
- buffer = NULL;
-
- ALOGI("Found VBRI header.");
-
- return seeker;
-}
-
-VBRISeeker::VBRISeeker()
- : mDurationUs(-1) {
-}
-
-bool VBRISeeker::getDuration(int64_t *durationUs) {
- if (mDurationUs < 0) {
- return false;
- }
-
- *durationUs = mDurationUs;
-
- return true;
-}
-
-bool VBRISeeker::getOffsetForTime(int64_t *timeUs, off64_t *pos) {
- if (mDurationUs < 0 || mSegments.size() == 0) {
- return false;
- }
-
- int64_t segmentDurationUs = mDurationUs / mSegments.size();
-
- int64_t nowUs = 0;
- *pos = mBasePos;
- size_t segmentIndex = 0;
- while (segmentIndex < mSegments.size() && nowUs < *timeUs) {
- nowUs += segmentDurationUs;
- *pos += mSegments.itemAt(segmentIndex++);
- }
-
- ALOGV("getOffsetForTime %lld us => 0x%016llx", (long long)*timeUs, (long long)*pos);
-
- *timeUs = nowUs;
-
- return true;
-}
-
-} // namespace android
-
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
deleted file mode 100644
index 780b746..0000000
--- a/media/libstagefright/WAVExtractor.cpp
+++ /dev/null
@@ -1,570 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "WAVExtractor"
-#include <utils/Log.h>
-
-#include "include/WAVExtractor.h"
-
-#include <audio_utils/primitives.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <utils/String8.h>
-#include <cutils/bitops.h>
-
-#define CHANNEL_MASK_USE_CHANNEL_ORDER 0
-
-namespace android {
-
-enum {
- WAVE_FORMAT_PCM = 0x0001,
- WAVE_FORMAT_IEEE_FLOAT = 0x0003,
- WAVE_FORMAT_ALAW = 0x0006,
- WAVE_FORMAT_MULAW = 0x0007,
- WAVE_FORMAT_MSGSM = 0x0031,
- WAVE_FORMAT_EXTENSIBLE = 0xFFFE
-};
-
-static const char* WAVEEXT_SUBFORMAT = "\x00\x00\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71";
-static const char* AMBISONIC_SUBFORMAT = "\x00\x00\x21\x07\xD3\x11\x86\x44\xC8\xC1\xCA\x00\x00\x00";
-
-static uint32_t U32_LE_AT(const uint8_t *ptr) {
- return ptr[3] << 24 | ptr[2] << 16 | ptr[1] << 8 | ptr[0];
-}
-
-static uint16_t U16_LE_AT(const uint8_t *ptr) {
- return ptr[1] << 8 | ptr[0];
-}
-
-struct WAVSource : public MediaSource {
- WAVSource(
- const sp<DataSource> &dataSource,
- const sp<MetaData> &meta,
- uint16_t waveFormat,
- int32_t bitsPerSample,
- off64_t offset, size_t size);
-
- virtual status_t start(MetaData *params = NULL);
- virtual status_t stop();
- virtual sp<MetaData> getFormat();
-
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL);
-
- virtual bool supportNonblockingRead() { return true; }
-
-protected:
- virtual ~WAVSource();
-
-private:
- static const size_t kMaxFrameSize;
-
- sp<DataSource> mDataSource;
- sp<MetaData> mMeta;
- uint16_t mWaveFormat;
- int32_t mSampleRate;
- int32_t mNumChannels;
- int32_t mBitsPerSample;
- off64_t mOffset;
- size_t mSize;
- bool mStarted;
- MediaBufferGroup *mGroup;
- off64_t mCurrentPos;
-
- WAVSource(const WAVSource &);
- WAVSource &operator=(const WAVSource &);
-};
-
-WAVExtractor::WAVExtractor(const sp<DataSource> &source)
- : mDataSource(source),
- mValidFormat(false),
- mChannelMask(CHANNEL_MASK_USE_CHANNEL_ORDER) {
- mInitCheck = init();
-}
-
-WAVExtractor::~WAVExtractor() {
-}
-
-sp<MetaData> WAVExtractor::getMetaData() {
- sp<MetaData> meta = new MetaData;
-
- if (mInitCheck != OK) {
- return meta;
- }
-
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_WAV);
-
- return meta;
-}
-
-size_t WAVExtractor::countTracks() {
- return mInitCheck == OK ? 1 : 0;
-}
-
-sp<IMediaSource> WAVExtractor::getTrack(size_t index) {
- if (mInitCheck != OK || index > 0) {
- return NULL;
- }
-
- return new WAVSource(
- mDataSource, mTrackMeta,
- mWaveFormat, mBitsPerSample, mDataOffset, mDataSize);
-}
-
-sp<MetaData> WAVExtractor::getTrackMetaData(
- size_t index, uint32_t /* flags */) {
- if (mInitCheck != OK || index > 0) {
- return NULL;
- }
-
- return mTrackMeta;
-}
-
-status_t WAVExtractor::init() {
- uint8_t header[12];
- if (mDataSource->readAt(
- 0, header, sizeof(header)) < (ssize_t)sizeof(header)) {
- return NO_INIT;
- }
-
- if (memcmp(header, "RIFF", 4) || memcmp(&header[8], "WAVE", 4)) {
- return NO_INIT;
- }
-
- size_t totalSize = U32_LE_AT(&header[4]);
-
- off64_t offset = 12;
- size_t remainingSize = totalSize;
- while (remainingSize >= 8) {
- uint8_t chunkHeader[8];
- if (mDataSource->readAt(offset, chunkHeader, 8) < 8) {
- return NO_INIT;
- }
-
- remainingSize -= 8;
- offset += 8;
-
- uint32_t chunkSize = U32_LE_AT(&chunkHeader[4]);
-
- if (chunkSize > remainingSize) {
- return NO_INIT;
- }
-
- if (!memcmp(chunkHeader, "fmt ", 4)) {
- if (chunkSize < 16) {
- return NO_INIT;
- }
-
- uint8_t formatSpec[40];
- if (mDataSource->readAt(offset, formatSpec, 2) < 2) {
- return NO_INIT;
- }
-
- mWaveFormat = U16_LE_AT(formatSpec);
- if (mWaveFormat != WAVE_FORMAT_PCM
- && mWaveFormat != WAVE_FORMAT_IEEE_FLOAT
- && mWaveFormat != WAVE_FORMAT_ALAW
- && mWaveFormat != WAVE_FORMAT_MULAW
- && mWaveFormat != WAVE_FORMAT_MSGSM
- && mWaveFormat != WAVE_FORMAT_EXTENSIBLE) {
- return ERROR_UNSUPPORTED;
- }
-
- uint8_t fmtSize = 16;
- if (mWaveFormat == WAVE_FORMAT_EXTENSIBLE) {
- fmtSize = 40;
- }
- if (mDataSource->readAt(offset, formatSpec, fmtSize) < fmtSize) {
- return NO_INIT;
- }
-
- mNumChannels = U16_LE_AT(&formatSpec[2]);
-
- if (mNumChannels < 1 || mNumChannels > 8) {
- ALOGE("Unsupported number of channels (%d)", mNumChannels);
- return ERROR_UNSUPPORTED;
- }
-
- if (mWaveFormat != WAVE_FORMAT_EXTENSIBLE) {
- if (mNumChannels != 1 && mNumChannels != 2) {
- ALOGW("More than 2 channels (%d) in non-WAVE_EXT, unknown channel mask",
- mNumChannels);
- }
- }
-
- mSampleRate = U32_LE_AT(&formatSpec[4]);
-
- if (mSampleRate == 0) {
- return ERROR_MALFORMED;
- }
-
- mBitsPerSample = U16_LE_AT(&formatSpec[14]);
-
- if (mWaveFormat == WAVE_FORMAT_EXTENSIBLE) {
- uint16_t validBitsPerSample = U16_LE_AT(&formatSpec[18]);
- if (validBitsPerSample != mBitsPerSample) {
- if (validBitsPerSample != 0) {
- ALOGE("validBits(%d) != bitsPerSample(%d) are not supported",
- validBitsPerSample, mBitsPerSample);
- return ERROR_UNSUPPORTED;
- } else {
- // we only support valitBitsPerSample == bitsPerSample but some WAV_EXT
- // writers don't correctly set the valid bits value, and leave it at 0.
- ALOGW("WAVE_EXT has 0 valid bits per sample, ignoring");
- }
- }
-
- mChannelMask = U32_LE_AT(&formatSpec[20]);
- ALOGV("numChannels=%d channelMask=0x%x", mNumChannels, mChannelMask);
- if ((mChannelMask >> 18) != 0) {
- ALOGE("invalid channel mask 0x%x", mChannelMask);
- return ERROR_MALFORMED;
- }
-
- if ((mChannelMask != CHANNEL_MASK_USE_CHANNEL_ORDER)
- && (popcount(mChannelMask) != mNumChannels)) {
- ALOGE("invalid number of channels (%d) in channel mask (0x%x)",
- popcount(mChannelMask), mChannelMask);
- return ERROR_MALFORMED;
- }
-
- // In a WAVE_EXT header, the first two bytes of the GUID stored at byte 24 contain
- // the sample format, using the same definitions as a regular WAV header
- mWaveFormat = U16_LE_AT(&formatSpec[24]);
- if (memcmp(&formatSpec[26], WAVEEXT_SUBFORMAT, 14) &&
- memcmp(&formatSpec[26], AMBISONIC_SUBFORMAT, 14)) {
- ALOGE("unsupported GUID");
- return ERROR_UNSUPPORTED;
- }
- }
-
- if (mWaveFormat == WAVE_FORMAT_PCM) {
- if (mBitsPerSample != 8 && mBitsPerSample != 16
- && mBitsPerSample != 24 && mBitsPerSample != 32) {
- return ERROR_UNSUPPORTED;
- }
- } else if (mWaveFormat == WAVE_FORMAT_IEEE_FLOAT) {
- if (mBitsPerSample != 32) { // TODO we don't support double
- return ERROR_UNSUPPORTED;
- }
- }
- else if (mWaveFormat == WAVE_FORMAT_MSGSM) {
- if (mBitsPerSample != 0) {
- return ERROR_UNSUPPORTED;
- }
- } else if (mWaveFormat == WAVE_FORMAT_MULAW || mWaveFormat == WAVE_FORMAT_ALAW) {
- if (mBitsPerSample != 8) {
- return ERROR_UNSUPPORTED;
- }
- } else {
- return ERROR_UNSUPPORTED;
- }
-
- mValidFormat = true;
- } else if (!memcmp(chunkHeader, "data", 4)) {
- if (mValidFormat) {
- mDataOffset = offset;
- mDataSize = chunkSize;
-
- mTrackMeta = new MetaData;
-
- switch (mWaveFormat) {
- case WAVE_FORMAT_PCM:
- case WAVE_FORMAT_IEEE_FLOAT:
- mTrackMeta->setCString(
- kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
- break;
- case WAVE_FORMAT_ALAW:
- mTrackMeta->setCString(
- kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_G711_ALAW);
- break;
- case WAVE_FORMAT_MSGSM:
- mTrackMeta->setCString(
- kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MSGSM);
- break;
- default:
- CHECK_EQ(mWaveFormat, (uint16_t)WAVE_FORMAT_MULAW);
- mTrackMeta->setCString(
- kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_G711_MLAW);
- break;
- }
-
- mTrackMeta->setInt32(kKeyChannelCount, mNumChannels);
- mTrackMeta->setInt32(kKeyChannelMask, mChannelMask);
- mTrackMeta->setInt32(kKeySampleRate, mSampleRate);
- mTrackMeta->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
-
- int64_t durationUs = 0;
- if (mWaveFormat == WAVE_FORMAT_MSGSM) {
- // 65 bytes decode to 320 8kHz samples
- durationUs =
- 1000000LL * (mDataSize / 65 * 320) / 8000;
- } else {
- size_t bytesPerSample = mBitsPerSample >> 3;
-
- if (!bytesPerSample || !mNumChannels)
- return ERROR_MALFORMED;
-
- size_t num_samples = mDataSize / (mNumChannels * bytesPerSample);
-
- if (!mSampleRate)
- return ERROR_MALFORMED;
-
- durationUs =
- 1000000LL * num_samples / mSampleRate;
- }
-
- mTrackMeta->setInt64(kKeyDuration, durationUs);
-
- return OK;
- }
- }
-
- offset += chunkSize;
- }
-
- return NO_INIT;
-}
-
-const size_t WAVSource::kMaxFrameSize = 32768;
-
-WAVSource::WAVSource(
- const sp<DataSource> &dataSource,
- const sp<MetaData> &meta,
- uint16_t waveFormat,
- int32_t bitsPerSample,
- off64_t offset, size_t size)
- : mDataSource(dataSource),
- mMeta(meta),
- mWaveFormat(waveFormat),
- mSampleRate(0),
- mNumChannels(0),
- mBitsPerSample(bitsPerSample),
- mOffset(offset),
- mSize(size),
- mStarted(false),
- mGroup(NULL) {
- CHECK(mMeta->findInt32(kKeySampleRate, &mSampleRate));
- CHECK(mMeta->findInt32(kKeyChannelCount, &mNumChannels));
-
- mMeta->setInt32(kKeyMaxInputSize, kMaxFrameSize);
-}
-
-WAVSource::~WAVSource() {
- if (mStarted) {
- stop();
- }
-}
-
-status_t WAVSource::start(MetaData * /* params */) {
- ALOGV("WAVSource::start");
-
- CHECK(!mStarted);
-
- // some WAV files may have large audio buffers that use shared memory transfer.
- mGroup = new MediaBufferGroup(4 /* buffers */, kMaxFrameSize);
-
- if (mBitsPerSample == 8) {
- // As a temporary buffer for 8->16 bit conversion.
- mGroup->add_buffer(new MediaBuffer(kMaxFrameSize));
- }
-
- mCurrentPos = mOffset;
-
- mStarted = true;
-
- return OK;
-}
-
-status_t WAVSource::stop() {
- ALOGV("WAVSource::stop");
-
- CHECK(mStarted);
-
- delete mGroup;
- mGroup = NULL;
-
- mStarted = false;
-
- return OK;
-}
-
-sp<MetaData> WAVSource::getFormat() {
- ALOGV("WAVSource::getFormat");
-
- return mMeta;
-}
-
-status_t WAVSource::read(
- MediaBuffer **out, const ReadOptions *options) {
- *out = NULL;
-
- if (options != nullptr && options->getNonBlocking() && !mGroup->has_buffers()) {
- return WOULD_BLOCK;
- }
-
- int64_t seekTimeUs;
- ReadOptions::SeekMode mode;
- if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) {
- int64_t pos = 0;
-
- if (mWaveFormat == WAVE_FORMAT_MSGSM) {
- // 65 bytes decode to 320 8kHz samples
- int64_t samplenumber = (seekTimeUs * mSampleRate) / 1000000;
- int64_t framenumber = samplenumber / 320;
- pos = framenumber * 65;
- } else {
- pos = (seekTimeUs * mSampleRate) / 1000000 * mNumChannels * (mBitsPerSample >> 3);
- }
- if (pos > (off64_t)mSize) {
- pos = mSize;
- }
- mCurrentPos = pos + mOffset;
- }
-
- MediaBuffer *buffer;
- status_t err = mGroup->acquire_buffer(&buffer);
- if (err != OK) {
- return err;
- }
-
- // make sure that maxBytesToRead is multiple of 3, in 24-bit case
- size_t maxBytesToRead =
- mBitsPerSample == 8 ? kMaxFrameSize / 2 :
- (mBitsPerSample == 24 ? 3*(kMaxFrameSize/3): kMaxFrameSize);
-
- size_t maxBytesAvailable =
- (mCurrentPos - mOffset >= (off64_t)mSize)
- ? 0 : mSize - (mCurrentPos - mOffset);
-
- if (maxBytesToRead > maxBytesAvailable) {
- maxBytesToRead = maxBytesAvailable;
- }
-
- if (mWaveFormat == WAVE_FORMAT_MSGSM) {
- // Microsoft packs 2 frames into 65 bytes, rather than using separate 33-byte frames,
- // so read multiples of 65, and use smaller buffers to account for ~10:1 expansion ratio
- if (maxBytesToRead > 1024) {
- maxBytesToRead = 1024;
- }
- maxBytesToRead = (maxBytesToRead / 65) * 65;
- } else {
- // read only integral amounts of audio unit frames.
- const size_t inputUnitFrameSize = mNumChannels * mBitsPerSample / 8;
- maxBytesToRead -= maxBytesToRead % inputUnitFrameSize;
- }
-
- ssize_t n = mDataSource->readAt(
- mCurrentPos, buffer->data(),
- maxBytesToRead);
-
- if (n <= 0) {
- buffer->release();
- buffer = NULL;
-
- return ERROR_END_OF_STREAM;
- }
-
- buffer->set_range(0, n);
-
- // TODO: add capability to return data as float PCM instead of 16 bit PCM.
- if (mWaveFormat == WAVE_FORMAT_PCM) {
- if (mBitsPerSample == 8) {
- // Convert 8-bit unsigned samples to 16-bit signed.
-
- // Create new buffer with 2 byte wide samples
- MediaBuffer *tmp;
- CHECK_EQ(mGroup->acquire_buffer(&tmp), (status_t)OK);
- tmp->set_range(0, 2 * n);
-
- memcpy_to_i16_from_u8((int16_t *)tmp->data(), (const uint8_t *)buffer->data(), n);
- buffer->release();
- buffer = tmp;
- } else if (mBitsPerSample == 24) {
- // Convert 24-bit signed samples to 16-bit signed in place
- const size_t numSamples = n / 3;
-
- memcpy_to_i16_from_p24((int16_t *)buffer->data(), (const uint8_t *)buffer->data(), numSamples);
- buffer->set_range(0, 2 * numSamples);
- } else if (mBitsPerSample == 32) {
- // Convert 32-bit signed samples to 16-bit signed in place
- const size_t numSamples = n / 4;
-
- memcpy_to_i16_from_i32((int16_t *)buffer->data(), (const int32_t *)buffer->data(), numSamples);
- buffer->set_range(0, 2 * numSamples);
- }
- } else if (mWaveFormat == WAVE_FORMAT_IEEE_FLOAT) {
- if (mBitsPerSample == 32) {
- // Convert 32-bit float samples to 16-bit signed in place
- const size_t numSamples = n / 4;
-
- memcpy_to_i16_from_float((int16_t *)buffer->data(), (const float *)buffer->data(), numSamples);
- buffer->set_range(0, 2 * numSamples);
- }
- }
-
- int64_t timeStampUs = 0;
-
- if (mWaveFormat == WAVE_FORMAT_MSGSM) {
- timeStampUs = 1000000LL * (mCurrentPos - mOffset) * 320 / 65 / mSampleRate;
- } else {
- size_t bytesPerSample = mBitsPerSample >> 3;
- timeStampUs = 1000000LL * (mCurrentPos - mOffset)
- / (mNumChannels * bytesPerSample) / mSampleRate;
- }
-
- buffer->meta_data()->setInt64(kKeyTime, timeStampUs);
-
- buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
- mCurrentPos += n;
-
- *out = buffer;
-
- return OK;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-bool SniffWAV(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *) {
- char header[12];
- if (source->readAt(0, header, sizeof(header)) < (ssize_t)sizeof(header)) {
- return false;
- }
-
- if (memcmp(header, "RIFF", 4) || memcmp(&header[8], "WAVE", 4)) {
- return false;
- }
-
- sp<MediaExtractor> extractor = new WAVExtractor(source);
- if (extractor->countTracks() == 0) {
- return false;
- }
-
- *mimeType = MEDIA_MIMETYPE_CONTAINER_WAV;
- *confidence = 0.3f;
-
- return true;
-}
-
-} // namespace android
diff --git a/media/libstagefright/XINGSeeker.cpp b/media/libstagefright/XINGSeeker.cpp
deleted file mode 100644
index 81ed9c6..0000000
--- a/media/libstagefright/XINGSeeker.cpp
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "XINGSEEKER"
-#include <utils/Log.h>
-
-#include "include/XINGSeeker.h"
-#include "include/avc_utils.h"
-
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/Utils.h>
-
-namespace android {
-
-XINGSeeker::XINGSeeker()
- : mDurationUs(-1),
- mSizeBytes(0),
- mEncoderDelay(0),
- mEncoderPadding(0),
- mTOCValid(false) {
-}
-
-bool XINGSeeker::getDuration(int64_t *durationUs) {
- if (mDurationUs < 0) {
- return false;
- }
-
- *durationUs = mDurationUs;
-
- return true;
-}
-
-bool XINGSeeker::getOffsetForTime(int64_t *timeUs, off64_t *pos) {
- if (mSizeBytes == 0 || !mTOCValid || mDurationUs < 0) {
- return false;
- }
-
- float percent = (float)(*timeUs) * 100 / mDurationUs;
- float fx;
- if( percent <= 0.0f ) {
- fx = 0.0f;
- } else if( percent >= 100.0f ) {
- fx = 256.0f;
- } else {
- int a = (int)percent;
- float fa, fb;
- if ( a == 0 ) {
- fa = 0.0f;
- } else {
- fa = (float)mTOC[a-1];
- }
- if ( a < 99 ) {
- fb = (float)mTOC[a];
- } else {
- fb = 256.0f;
- }
- fx = fa + (fb-fa)*(percent-a);
- }
-
- *pos = (int)((1.0f/256.0f)*fx*mSizeBytes) + mFirstFramePos;
-
- return true;
-}
-
-// static
-sp<XINGSeeker> XINGSeeker::CreateFromSource(
- const sp<DataSource> &source, off64_t first_frame_pos) {
- sp<XINGSeeker> seeker = new XINGSeeker;
-
- seeker->mFirstFramePos = first_frame_pos;
-
- uint8_t buffer[4];
- int offset = first_frame_pos;
- if (source->readAt(offset, &buffer, 4) < 4) { // get header
- return NULL;
- }
- offset += 4;
-
- int header = U32_AT(buffer);;
- size_t xingframesize = 0;
- int sampling_rate = 0;
- int num_channels;
- int samples_per_frame = 0;
- if (!GetMPEGAudioFrameSize(header, &xingframesize, &sampling_rate, &num_channels,
- NULL, &samples_per_frame)) {
- return NULL;
- }
- seeker->mFirstFramePos += xingframesize;
-
- uint8_t version = (buffer[1] >> 3) & 3;
-
- // determine offset of XING header
- if(version & 1) { // mpeg1
- if (num_channels != 1) offset += 32;
- else offset += 17;
- } else { // mpeg 2 or 2.5
- if (num_channels != 1) offset += 17;
- else offset += 9;
- }
-
- int xingbase = offset;
-
- if (source->readAt(offset, &buffer, 4) < 4) { // XING header ID
- return NULL;
- }
- offset += 4;
- // Check XING ID
- if ((buffer[0] != 'X') || (buffer[1] != 'i')
- || (buffer[2] != 'n') || (buffer[3] != 'g')) {
- if ((buffer[0] != 'I') || (buffer[1] != 'n')
- || (buffer[2] != 'f') || (buffer[3] != 'o')) {
- return NULL;
- }
- }
-
- if (source->readAt(offset, &buffer, 4) < 4) { // flags
- return NULL;
- }
- offset += 4;
- uint32_t flags = U32_AT(buffer);
-
- if (flags & 0x0001) { // Frames field is present
- if (source->readAt(offset, buffer, 4) < 4) {
- return NULL;
- }
- int32_t frames = U32_AT(buffer);
- // only update mDurationUs if the calculated duration is valid (non zero)
- // otherwise, leave duration at -1 so that getDuration() and getOffsetForTime()
- // return false when called, to indicate that this xing tag does not have the
- // requested information
- if (frames) {
- seeker->mDurationUs = (int64_t)frames * samples_per_frame * 1000000LL / sampling_rate;
- }
- offset += 4;
- }
- if (flags & 0x0002) { // Bytes field is present
- if (source->readAt(offset, buffer, 4) < 4) {
- return NULL;
- }
- seeker->mSizeBytes = U32_AT(buffer);
- offset += 4;
- }
- if (flags & 0x0004) { // TOC field is present
- if (source->readAt(offset + 1, seeker->mTOC, 99) < 99) {
- return NULL;
- }
- seeker->mTOCValid = true;
- offset += 100;
- }
-
-#if 0
- if (flags & 0x0008) { // Quality indicator field is present
- if (source->readAt(offset, buffer, 4) < 4) {
- return NULL;
- }
- // do something with the quality indicator
- offset += 4;
- }
-
- if (source->readAt(xingbase + 0xaf - 0x24, &buffer, 1) < 1) { // encoding flags
- return false;
- }
-
- ALOGV("nogap preceding: %s, nogap continued in next: %s",
- (buffer[0] & 0x80) ? "true" : "false",
- (buffer[0] & 0x40) ? "true" : "false");
-#endif
-
- if (source->readAt(xingbase + 0xb1 - 0x24, &buffer, 3) == 3) {
- seeker->mEncoderDelay = (buffer[0] << 4) + (buffer[1] >> 4);
- seeker->mEncoderPadding = ((buffer[1] & 0xf) << 8) + buffer[2];
- }
-
- return seeker;
-}
-
-int32_t XINGSeeker::getEncoderDelay() {
- return mEncoderDelay;
-}
-
-int32_t XINGSeeker::getEncoderPadding() {
- return mEncoderPadding;
-}
-
-} // namespace android
-
diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp
deleted file mode 100644
index b75b468..0000000
--- a/media/libstagefright/avc_utils.cpp
+++ /dev/null
@@ -1,836 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "avc_utils"
-#include <utils/Log.h>
-
-#include "include/avc_utils.h"
-
-#include <media/stagefright/foundation/ABitReader.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaData.h>
-#include <utils/misc.h>
-
-namespace android {
-
-unsigned parseUE(ABitReader *br) {
- unsigned numZeroes = 0;
- while (br->getBits(1) == 0) {
- ++numZeroes;
- }
-
- unsigned x = br->getBits(numZeroes);
-
- return x + (1u << numZeroes) - 1;
-}
-
-unsigned parseUEWithFallback(ABitReader *br, unsigned fallback) {
- unsigned numZeroes = 0;
- while (br->getBitsWithFallback(1, 1) == 0) {
- ++numZeroes;
- }
- uint32_t x;
- if (numZeroes < 32) {
- if (br->getBitsGraceful(numZeroes, &x)) {
- return x + (1u << numZeroes) - 1;
- } else {
- return fallback;
- }
- } else {
- br->skipBits(numZeroes);
- return fallback;
- }
-}
-
-signed parseSE(ABitReader *br) {
- unsigned codeNum = parseUE(br);
-
- return (codeNum & 1) ? (codeNum + 1) / 2 : -signed(codeNum / 2);
-}
-
-signed parseSEWithFallback(ABitReader *br, signed fallback) {
- // NOTE: parseUE cannot normally return ~0 as the max supported value is 0xFFFE
- unsigned codeNum = parseUEWithFallback(br, ~0U);
- if (codeNum == ~0U) {
- return fallback;
- }
- return (codeNum & 1) ? (codeNum + 1) / 2 : -signed(codeNum / 2);
-}
-
-static void skipScalingList(ABitReader *br, size_t sizeOfScalingList) {
- size_t lastScale = 8;
- size_t nextScale = 8;
- for (size_t j = 0; j < sizeOfScalingList; ++j) {
- if (nextScale != 0) {
- signed delta_scale = parseSE(br);
- // ISO_IEC_14496-10_201402-ITU, 7.4.2.1.1.1, The value of delta_scale
- // shall be in the range of −128 to +127, inclusive.
- if (delta_scale < -128) {
- ALOGW("delta_scale (%d) is below range, capped to -128", delta_scale);
- delta_scale = -128;
- } else if (delta_scale > 127) {
- ALOGW("delta_scale (%d) is above range, capped to 127", delta_scale);
- delta_scale = 127;
- }
- nextScale = (lastScale + (delta_scale + 256)) % 256;
- }
-
- lastScale = (nextScale == 0) ? lastScale : nextScale;
- }
-}
-
-// Determine video dimensions from the sequence parameterset.
-void FindAVCDimensions(
- const sp<ABuffer> &seqParamSet,
- int32_t *width, int32_t *height,
- int32_t *sarWidth, int32_t *sarHeight) {
- ABitReader br(seqParamSet->data() + 1, seqParamSet->size() - 1);
-
- unsigned profile_idc = br.getBits(8);
- br.skipBits(16);
- parseUE(&br); // seq_parameter_set_id
-
- unsigned chroma_format_idc = 1; // 4:2:0 chroma format
-
- if (profile_idc == 100 || profile_idc == 110
- || profile_idc == 122 || profile_idc == 244
- || profile_idc == 44 || profile_idc == 83 || profile_idc == 86) {
- chroma_format_idc = parseUE(&br);
- if (chroma_format_idc == 3) {
- br.skipBits(1); // residual_colour_transform_flag
- }
- parseUE(&br); // bit_depth_luma_minus8
- parseUE(&br); // bit_depth_chroma_minus8
- br.skipBits(1); // qpprime_y_zero_transform_bypass_flag
-
- if (br.getBits(1)) { // seq_scaling_matrix_present_flag
- for (size_t i = 0; i < 8; ++i) {
- if (br.getBits(1)) { // seq_scaling_list_present_flag[i]
-
- // WARNING: the code below has not ever been exercised...
- // need a real-world example.
-
- if (i < 6) {
- // ScalingList4x4[i],16,...
- skipScalingList(&br, 16);
- } else {
- // ScalingList8x8[i-6],64,...
- skipScalingList(&br, 64);
- }
- }
- }
- }
- }
-
- parseUE(&br); // log2_max_frame_num_minus4
- unsigned pic_order_cnt_type = parseUE(&br);
-
- if (pic_order_cnt_type == 0) {
- parseUE(&br); // log2_max_pic_order_cnt_lsb_minus4
- } else if (pic_order_cnt_type == 1) {
- // offset_for_non_ref_pic, offset_for_top_to_bottom_field and
- // offset_for_ref_frame are technically se(v), but since we are
- // just skipping over them the midpoint does not matter.
-
- br.getBits(1); // delta_pic_order_always_zero_flag
- parseUE(&br); // offset_for_non_ref_pic
- parseUE(&br); // offset_for_top_to_bottom_field
-
- unsigned num_ref_frames_in_pic_order_cnt_cycle = parseUE(&br);
- for (unsigned i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; ++i) {
- parseUE(&br); // offset_for_ref_frame
- }
- }
-
- parseUE(&br); // num_ref_frames
- br.getBits(1); // gaps_in_frame_num_value_allowed_flag
-
- unsigned pic_width_in_mbs_minus1 = parseUE(&br);
- unsigned pic_height_in_map_units_minus1 = parseUE(&br);
- unsigned frame_mbs_only_flag = br.getBits(1);
-
- *width = pic_width_in_mbs_minus1 * 16 + 16;
-
- *height = (2 - frame_mbs_only_flag)
- * (pic_height_in_map_units_minus1 * 16 + 16);
-
- if (!frame_mbs_only_flag) {
- br.getBits(1); // mb_adaptive_frame_field_flag
- }
-
- br.getBits(1); // direct_8x8_inference_flag
-
- if (br.getBits(1)) { // frame_cropping_flag
- unsigned frame_crop_left_offset = parseUE(&br);
- unsigned frame_crop_right_offset = parseUE(&br);
- unsigned frame_crop_top_offset = parseUE(&br);
- unsigned frame_crop_bottom_offset = parseUE(&br);
-
- unsigned cropUnitX, cropUnitY;
- if (chroma_format_idc == 0 /* monochrome */) {
- cropUnitX = 1;
- cropUnitY = 2 - frame_mbs_only_flag;
- } else {
- unsigned subWidthC = (chroma_format_idc == 3) ? 1 : 2;
- unsigned subHeightC = (chroma_format_idc == 1) ? 2 : 1;
-
- cropUnitX = subWidthC;
- cropUnitY = subHeightC * (2 - frame_mbs_only_flag);
- }
-
- ALOGV("frame_crop = (%u, %u, %u, %u), cropUnitX = %u, cropUnitY = %u",
- frame_crop_left_offset, frame_crop_right_offset,
- frame_crop_top_offset, frame_crop_bottom_offset,
- cropUnitX, cropUnitY);
-
-
- // *width -= (frame_crop_left_offset + frame_crop_right_offset) * cropUnitX;
- if(__builtin_add_overflow(frame_crop_left_offset, frame_crop_right_offset, &frame_crop_left_offset) ||
- __builtin_mul_overflow(frame_crop_left_offset, cropUnitX, &frame_crop_left_offset) ||
- __builtin_sub_overflow(*width, frame_crop_left_offset, width) ||
- *width < 0) {
- *width = 0;
- }
-
- //*height -= (frame_crop_top_offset + frame_crop_bottom_offset) * cropUnitY;
- if(__builtin_add_overflow(frame_crop_top_offset, frame_crop_bottom_offset, &frame_crop_top_offset) ||
- __builtin_mul_overflow(frame_crop_top_offset, cropUnitY, &frame_crop_top_offset) ||
- __builtin_sub_overflow(*height, frame_crop_top_offset, height) ||
- *height < 0) {
- *height = 0;
- }
- }
-
- if (sarWidth != NULL) {
- *sarWidth = 0;
- }
-
- if (sarHeight != NULL) {
- *sarHeight = 0;
- }
-
- if (br.getBits(1)) { // vui_parameters_present_flag
- unsigned sar_width = 0, sar_height = 0;
-
- if (br.getBits(1)) { // aspect_ratio_info_present_flag
- unsigned aspect_ratio_idc = br.getBits(8);
-
- if (aspect_ratio_idc == 255 /* extendedSAR */) {
- sar_width = br.getBits(16);
- sar_height = br.getBits(16);
- } else {
- static const struct { unsigned width, height; } kFixedSARs[] = {
- { 0, 0 }, // Invalid
- { 1, 1 },
- { 12, 11 },
- { 10, 11 },
- { 16, 11 },
- { 40, 33 },
- { 24, 11 },
- { 20, 11 },
- { 32, 11 },
- { 80, 33 },
- { 18, 11 },
- { 15, 11 },
- { 64, 33 },
- { 160, 99 },
- { 4, 3 },
- { 3, 2 },
- { 2, 1 },
- };
-
- if (aspect_ratio_idc > 0 && aspect_ratio_idc < NELEM(kFixedSARs)) {
- sar_width = kFixedSARs[aspect_ratio_idc].width;
- sar_height = kFixedSARs[aspect_ratio_idc].height;
- }
- }
- }
-
- ALOGV("sample aspect ratio = %u : %u", sar_width, sar_height);
-
- if (sarWidth != NULL) {
- *sarWidth = sar_width;
- }
-
- if (sarHeight != NULL) {
- *sarHeight = sar_height;
- }
- }
-}
-
-status_t getNextNALUnit(
- const uint8_t **_data, size_t *_size,
- const uint8_t **nalStart, size_t *nalSize,
- bool startCodeFollows) {
- const uint8_t *data = *_data;
- size_t size = *_size;
-
- *nalStart = NULL;
- *nalSize = 0;
-
- if (size < 3) {
- return -EAGAIN;
- }
-
- size_t offset = 0;
-
- // A valid startcode consists of at least two 0x00 bytes followed by 0x01.
- for (; offset + 2 < size; ++offset) {
- if (data[offset + 2] == 0x01 && data[offset] == 0x00
- && data[offset + 1] == 0x00) {
- break;
- }
- }
- if (offset + 2 >= size) {
- *_data = &data[offset];
- *_size = 2;
- return -EAGAIN;
- }
- offset += 3;
-
- size_t startOffset = offset;
-
- for (;;) {
- while (offset < size && data[offset] != 0x01) {
- ++offset;
- }
-
- if (offset == size) {
- if (startCodeFollows) {
- offset = size + 2;
- break;
- }
-
- return -EAGAIN;
- }
-
- if (data[offset - 1] == 0x00 && data[offset - 2] == 0x00) {
- break;
- }
-
- ++offset;
- }
-
- size_t endOffset = offset - 2;
- while (endOffset > startOffset + 1 && data[endOffset - 1] == 0x00) {
- --endOffset;
- }
-
- *nalStart = &data[startOffset];
- *nalSize = endOffset - startOffset;
-
- if (offset + 2 < size) {
- *_data = &data[offset - 2];
- *_size = size - offset + 2;
- } else {
- *_data = NULL;
- *_size = 0;
- }
-
- return OK;
-}
-
-static sp<ABuffer> FindNAL(const uint8_t *data, size_t size, unsigned nalType) {
- const uint8_t *nalStart;
- size_t nalSize;
- while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
- if ((nalStart[0] & 0x1f) == nalType) {
- sp<ABuffer> buffer = new ABuffer(nalSize);
- memcpy(buffer->data(), nalStart, nalSize);
- return buffer;
- }
- }
-
- return NULL;
-}
-
-const char *AVCProfileToString(uint8_t profile) {
- switch (profile) {
- case kAVCProfileBaseline:
- return "Baseline";
- case kAVCProfileMain:
- return "Main";
- case kAVCProfileExtended:
- return "Extended";
- case kAVCProfileHigh:
- return "High";
- case kAVCProfileHigh10:
- return "High 10";
- case kAVCProfileHigh422:
- return "High 422";
- case kAVCProfileHigh444:
- return "High 444";
- case kAVCProfileCAVLC444Intra:
- return "CAVLC 444 Intra";
- default: return "Unknown";
- }
-}
-
-sp<MetaData> MakeAVCCodecSpecificData(const sp<ABuffer> &accessUnit) {
- const uint8_t *data = accessUnit->data();
- size_t size = accessUnit->size();
-
- sp<ABuffer> seqParamSet = FindNAL(data, size, 7);
- if (seqParamSet == NULL) {
- return NULL;
- }
-
- int32_t width, height;
- int32_t sarWidth, sarHeight;
- FindAVCDimensions(
- seqParamSet, &width, &height, &sarWidth, &sarHeight);
-
- sp<ABuffer> picParamSet = FindNAL(data, size, 8);
- CHECK(picParamSet != NULL);
-
- size_t csdSize =
- 1 + 3 + 1 + 1
- + 2 * 1 + seqParamSet->size()
- + 1 + 2 * 1 + picParamSet->size();
-
- sp<ABuffer> csd = new ABuffer(csdSize);
- uint8_t *out = csd->data();
-
- *out++ = 0x01; // configurationVersion
- memcpy(out, seqParamSet->data() + 1, 3); // profile/level...
-
- uint8_t profile = out[0];
- uint8_t level = out[2];
-
- out += 3;
- *out++ = (0x3f << 2) | 1; // lengthSize == 2 bytes
- *out++ = 0xe0 | 1;
-
- *out++ = seqParamSet->size() >> 8;
- *out++ = seqParamSet->size() & 0xff;
- memcpy(out, seqParamSet->data(), seqParamSet->size());
- out += seqParamSet->size();
-
- *out++ = 1;
-
- *out++ = picParamSet->size() >> 8;
- *out++ = picParamSet->size() & 0xff;
- memcpy(out, picParamSet->data(), picParamSet->size());
-
-#if 0
- ALOGI("AVC seq param set");
- hexdump(seqParamSet->data(), seqParamSet->size());
-#endif
-
- sp<MetaData> meta = new MetaData;
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
-
- meta->setData(kKeyAVCC, kTypeAVCC, csd->data(), csd->size());
- meta->setInt32(kKeyWidth, width);
- meta->setInt32(kKeyHeight, height);
-
- if ((sarWidth > 0 && sarHeight > 0) && (sarWidth != 1 || sarHeight != 1)) {
- // We treat *:0 and 0:* (unspecified) as 1:1.
-
- meta->setInt32(kKeySARWidth, sarWidth);
- meta->setInt32(kKeySARHeight, sarHeight);
-
- ALOGI("found AVC codec config (%d x %d, %s-profile level %d.%d) "
- "SAR %d : %d",
- width,
- height,
- AVCProfileToString(profile),
- level / 10,
- level % 10,
- sarWidth,
- sarHeight);
- } else {
- ALOGI("found AVC codec config (%d x %d, %s-profile level %d.%d)",
- width,
- height,
- AVCProfileToString(profile),
- level / 10,
- level % 10);
- }
-
- return meta;
-}
-
-template <typename T>
-bool IsIDRInternal(const sp<T> &buffer) {
- const uint8_t *data = buffer->data();
- size_t size = buffer->size();
-
- bool foundIDR = false;
-
- const uint8_t *nalStart;
- size_t nalSize;
- while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
- if (nalSize == 0u) {
- ALOGW("skipping empty nal unit from potentially malformed bitstream");
- continue;
- }
-
- unsigned nalType = nalStart[0] & 0x1f;
-
- if (nalType == 5) {
- foundIDR = true;
- break;
- }
- }
-
- return foundIDR;
-}
-
-bool IsIDR(const sp<ABuffer> &buffer) {
- return IsIDRInternal(buffer);
-}
-
-bool IsIDR(const sp<MediaCodecBuffer> &buffer) {
- return IsIDRInternal(buffer);
-}
-
-bool IsAVCReferenceFrame(const sp<ABuffer> &accessUnit) {
- const uint8_t *data = accessUnit->data();
- size_t size = accessUnit->size();
- if (data == NULL) {
- ALOGE("IsAVCReferenceFrame: called on NULL data (%p, %zu)", accessUnit.get(), size);
- return false;
- }
-
- const uint8_t *nalStart;
- size_t nalSize;
- while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
- if (nalSize == 0) {
- ALOGE("IsAVCReferenceFrame: invalid nalSize: 0 (%p, %zu)", accessUnit.get(), size);
- return false;
- }
-
- unsigned nalType = nalStart[0] & 0x1f;
-
- if (nalType == 5) {
- return true;
- } else if (nalType == 1) {
- unsigned nal_ref_idc = (nalStart[0] >> 5) & 3;
- return nal_ref_idc != 0;
- }
- }
-
- return true;
-}
-
-uint32_t FindAVCLayerId(const uint8_t *data, size_t size) {
- CHECK(data != NULL);
-
- const unsigned kSvcNalType = 0xE;
- const unsigned kSvcNalSearchRange = 32;
- // SVC NAL
- // |---0 1110|1--- ----|---- ----|iii- ---|
- // ^ ^
- // NAL-type = 0xE layer-Id
- //
- // layer_id 0 is for base layer, while 1, 2, ... are enhancement layers.
- // Layer n uses reference frames from layer 0, 1, ..., n-1.
-
- uint32_t layerId = 0;
- sp<ABuffer> svcNAL = FindNAL(
- data, size > kSvcNalSearchRange ? kSvcNalSearchRange : size, kSvcNalType);
- if (svcNAL != NULL && svcNAL->size() >= 4) {
- layerId = (*(svcNAL->data() + 3) >> 5) & 0x7;
- }
- return layerId;
-}
-
-sp<MetaData> MakeAACCodecSpecificData(
- unsigned profile, unsigned sampling_freq_index,
- unsigned channel_configuration) {
- sp<MetaData> meta = new MetaData;
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC);
-
- CHECK_LE(sampling_freq_index, 11u);
- static const int32_t kSamplingFreq[] = {
- 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050,
- 16000, 12000, 11025, 8000
- };
- meta->setInt32(kKeySampleRate, kSamplingFreq[sampling_freq_index]);
- meta->setInt32(kKeyChannelCount, channel_configuration);
-
- static const uint8_t kStaticESDS[] = {
- 0x03, 22,
- 0x00, 0x00, // ES_ID
- 0x00, // streamDependenceFlag, URL_Flag, OCRstreamFlag
-
- 0x04, 17,
- 0x40, // Audio ISO/IEC 14496-3
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x05, 2,
- // AudioSpecificInfo follows
-
- // oooo offf fccc c000
- // o - audioObjectType
- // f - samplingFreqIndex
- // c - channelConfig
- };
- sp<ABuffer> csd = new ABuffer(sizeof(kStaticESDS) + 2);
- memcpy(csd->data(), kStaticESDS, sizeof(kStaticESDS));
-
- csd->data()[sizeof(kStaticESDS)] =
- ((profile + 1) << 3) | (sampling_freq_index >> 1);
-
- csd->data()[sizeof(kStaticESDS) + 1] =
- ((sampling_freq_index << 7) & 0x80) | (channel_configuration << 3);
-
- meta->setData(kKeyESDS, 0, csd->data(), csd->size());
-
- return meta;
-}
-
-bool ExtractDimensionsFromVOLHeader(
- const uint8_t *data, size_t size, int32_t *width, int32_t *height) {
- ABitReader br(&data[4], size - 4);
- br.skipBits(1); // random_accessible_vol
- unsigned video_object_type_indication = br.getBits(8);
-
- CHECK_NE(video_object_type_indication,
- 0x21u /* Fine Granularity Scalable */);
-
- unsigned video_object_layer_verid __unused;
- unsigned video_object_layer_priority __unused;
- if (br.getBits(1)) {
- video_object_layer_verid = br.getBits(4);
- video_object_layer_priority = br.getBits(3);
- }
- unsigned aspect_ratio_info = br.getBits(4);
- if (aspect_ratio_info == 0x0f /* extended PAR */) {
- br.skipBits(8); // par_width
- br.skipBits(8); // par_height
- }
- if (br.getBits(1)) { // vol_control_parameters
- br.skipBits(2); // chroma_format
- br.skipBits(1); // low_delay
- if (br.getBits(1)) { // vbv_parameters
- br.skipBits(15); // first_half_bit_rate
- CHECK(br.getBits(1)); // marker_bit
- br.skipBits(15); // latter_half_bit_rate
- CHECK(br.getBits(1)); // marker_bit
- br.skipBits(15); // first_half_vbv_buffer_size
- CHECK(br.getBits(1)); // marker_bit
- br.skipBits(3); // latter_half_vbv_buffer_size
- br.skipBits(11); // first_half_vbv_occupancy
- CHECK(br.getBits(1)); // marker_bit
- br.skipBits(15); // latter_half_vbv_occupancy
- CHECK(br.getBits(1)); // marker_bit
- }
- }
- unsigned video_object_layer_shape = br.getBits(2);
- CHECK_EQ(video_object_layer_shape, 0x00u /* rectangular */);
-
- CHECK(br.getBits(1)); // marker_bit
- unsigned vop_time_increment_resolution = br.getBits(16);
- CHECK(br.getBits(1)); // marker_bit
-
- if (br.getBits(1)) { // fixed_vop_rate
- // range [0..vop_time_increment_resolution)
-
- // vop_time_increment_resolution
- // 2 => 0..1, 1 bit
- // 3 => 0..2, 2 bits
- // 4 => 0..3, 2 bits
- // 5 => 0..4, 3 bits
- // ...
-
- CHECK_GT(vop_time_increment_resolution, 0u);
- --vop_time_increment_resolution;
-
- unsigned numBits = 0;
- while (vop_time_increment_resolution > 0) {
- ++numBits;
- vop_time_increment_resolution >>= 1;
- }
-
- br.skipBits(numBits); // fixed_vop_time_increment
- }
-
- CHECK(br.getBits(1)); // marker_bit
- unsigned video_object_layer_width = br.getBits(13);
- CHECK(br.getBits(1)); // marker_bit
- unsigned video_object_layer_height = br.getBits(13);
- CHECK(br.getBits(1)); // marker_bit
-
- unsigned interlaced __unused = br.getBits(1);
-
- *width = video_object_layer_width;
- *height = video_object_layer_height;
-
- return true;
-}
-
-bool GetMPEGAudioFrameSize(
- uint32_t header, size_t *frame_size,
- int *out_sampling_rate, int *out_channels,
- int *out_bitrate, int *out_num_samples) {
- *frame_size = 0;
-
- if (out_sampling_rate) {
- *out_sampling_rate = 0;
- }
-
- if (out_channels) {
- *out_channels = 0;
- }
-
- if (out_bitrate) {
- *out_bitrate = 0;
- }
-
- if (out_num_samples) {
- *out_num_samples = 1152;
- }
-
- if ((header & 0xffe00000) != 0xffe00000) {
- return false;
- }
-
- unsigned version = (header >> 19) & 3;
-
- if (version == 0x01) {
- return false;
- }
-
- unsigned layer = (header >> 17) & 3;
-
- if (layer == 0x00) {
- return false;
- }
-
- unsigned protection __unused = (header >> 16) & 1;
-
- unsigned bitrate_index = (header >> 12) & 0x0f;
-
- if (bitrate_index == 0 || bitrate_index == 0x0f) {
- // Disallow "free" bitrate.
- return false;
- }
-
- unsigned sampling_rate_index = (header >> 10) & 3;
-
- if (sampling_rate_index == 3) {
- return false;
- }
-
- static const int kSamplingRateV1[] = { 44100, 48000, 32000 };
- int sampling_rate = kSamplingRateV1[sampling_rate_index];
- if (version == 2 /* V2 */) {
- sampling_rate /= 2;
- } else if (version == 0 /* V2.5 */) {
- sampling_rate /= 4;
- }
-
- unsigned padding = (header >> 9) & 1;
-
- if (layer == 3) {
- // layer I
-
- static const int kBitrateV1[] = {
- 32, 64, 96, 128, 160, 192, 224, 256,
- 288, 320, 352, 384, 416, 448
- };
-
- static const int kBitrateV2[] = {
- 32, 48, 56, 64, 80, 96, 112, 128,
- 144, 160, 176, 192, 224, 256
- };
-
- int bitrate =
- (version == 3 /* V1 */)
- ? kBitrateV1[bitrate_index - 1]
- : kBitrateV2[bitrate_index - 1];
-
- if (out_bitrate) {
- *out_bitrate = bitrate;
- }
-
- *frame_size = (12000 * bitrate / sampling_rate + padding) * 4;
-
- if (out_num_samples) {
- *out_num_samples = 384;
- }
- } else {
- // layer II or III
-
- static const int kBitrateV1L2[] = {
- 32, 48, 56, 64, 80, 96, 112, 128,
- 160, 192, 224, 256, 320, 384
- };
-
- static const int kBitrateV1L3[] = {
- 32, 40, 48, 56, 64, 80, 96, 112,
- 128, 160, 192, 224, 256, 320
- };
-
- static const int kBitrateV2[] = {
- 8, 16, 24, 32, 40, 48, 56, 64,
- 80, 96, 112, 128, 144, 160
- };
-
- int bitrate;
- if (version == 3 /* V1 */) {
- bitrate = (layer == 2 /* L2 */)
- ? kBitrateV1L2[bitrate_index - 1]
- : kBitrateV1L3[bitrate_index - 1];
-
- if (out_num_samples) {
- *out_num_samples = 1152;
- }
- } else {
- // V2 (or 2.5)
-
- bitrate = kBitrateV2[bitrate_index - 1];
- if (out_num_samples) {
- *out_num_samples = (layer == 1 /* L3 */) ? 576 : 1152;
- }
- }
-
- if (out_bitrate) {
- *out_bitrate = bitrate;
- }
-
- if (version == 3 /* V1 */) {
- *frame_size = 144000 * bitrate / sampling_rate + padding;
- } else {
- // V2 or V2.5
- size_t tmp = (layer == 1 /* L3 */) ? 72000 : 144000;
- *frame_size = tmp * bitrate / sampling_rate + padding;
- }
- }
-
- if (out_sampling_rate) {
- *out_sampling_rate = sampling_rate;
- }
-
- if (out_channels) {
- int channel_mode = (header >> 6) & 3;
-
- *out_channels = (channel_mode == 3) ? 1 : 2;
- }
-
- return true;
-}
-
-} // namespace android
-
diff --git a/media/libstagefright/bqhelper/Android.bp b/media/libstagefright/bqhelper/Android.bp
new file mode 100644
index 0000000..4f46be7
--- /dev/null
+++ b/media/libstagefright/bqhelper/Android.bp
@@ -0,0 +1,68 @@
+cc_library_shared {
+ name: "libstagefright_bufferqueue_helper",
+ vendor_available: true,
+ vndk: {
+ enabled: true,
+ },
+
+ srcs: [
+ "Conversion.cpp",
+ "FrameDropper.cpp",
+ "GraphicBufferSource.cpp",
+ "WProducerListener.cpp",
+ ],
+
+ export_include_dirs: [
+ "include",
+ ],
+
+ header_libs: [
+ "media_plugin_headers",
+ ],
+
+ export_header_lib_headers: [
+ "media_plugin_headers",
+ ],
+
+ shared_libs: [
+ "libbase",
+ "libbinder",
+ "libcutils",
+ "libgui",
+ "libhidlbase",
+ "libhidlmemory",
+ "libhidltransport",
+ "liblog",
+ "libstagefright_foundation",
+ "libui",
+ "libutils",
+
+ "android.hardware.graphics.bufferqueue@1.0",
+
+ "libnativewindow", // TODO(b/62923479): use header library
+ ],
+
+ export_shared_lib_headers: [
+ "libgui",
+ "libhidlmemory",
+ "libstagefright_foundation",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-Wno-unused-parameter",
+ "-Wno-documentation",
+ ],
+
+ sanitize: {
+ misc_undefined: [
+ "signed-integer-overflow",
+ "unsigned-integer-overflow",
+ ],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
+ },
+}
diff --git a/media/libstagefright/bqhelper/Conversion.cpp b/media/libstagefright/bqhelper/Conversion.cpp
new file mode 100644
index 0000000..ffed005
--- /dev/null
+++ b/media/libstagefright/bqhelper/Conversion.cpp
@@ -0,0 +1,1542 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/stagefright/bqhelper/Conversion.h>
+
+namespace android {
+namespace conversion {
+
+// native_handle_t helper functions.
+
+/**
+ * \brief Take an fd and create a native handle containing only the given fd.
+ * The created handle will need to be deleted manually with
+ * `native_handle_delete()`.
+ *
+ * \param[in] fd The source file descriptor (of type `int`).
+ * \return The create `native_handle_t*` that contains the given \p fd. If the
+ * supplied \p fd is negative, the created native handle will contain no file
+ * descriptors.
+ *
+ * If the native handle cannot be created, the return value will be
+ * `nullptr`.
+ *
+ * This function does not duplicate the file descriptor.
+ */
+native_handle_t* native_handle_create_from_fd(int fd) {
+ if (fd < 2) {
+ return native_handle_create(0, 0);
+ }
+ native_handle_t* nh = native_handle_create(1, 0);
+ if (nh == nullptr) {
+ return nullptr;
+ }
+ nh->data[0] = fd;
+ return nh;
+}
+
+/**
+ * \brief Extract a file descriptor from a native handle.
+ *
+ * \param[in] nh The source `native_handle_t*`.
+ * \param[in] index The index of the file descriptor in \p nh to read from. This
+ * input has the default value of `0`.
+ * \return The `index`-th file descriptor in \p nh. If \p nh does not have
+ * enough file descriptors, the returned value will be `-1`.
+ *
+ * This function does not duplicate the file descriptor.
+ */
+int native_handle_read_fd(native_handle_t const* nh, int index) {
+ return ((nh == nullptr) || (nh->numFds == 0) ||
+ (nh->numFds <= index) || (index < 0)) ?
+ -1 : nh->data[index];
+}
+
+/**
+ * Conversion functions
+ * ====================
+ *
+ * There are two main directions of conversion:
+ * - `inTargetType(...)`: Create a wrapper whose lifetime depends on the
+ * input. The wrapper has type `TargetType`.
+ * - `toTargetType(...)`: Create a standalone object of type `TargetType` that
+ * corresponds to the input. The lifetime of the output does not depend on the
+ * lifetime of the input.
+ * - `wrapIn(TargetType*, ...)`: Same as `inTargetType()`, but for `TargetType`
+ * that cannot be copied and/or moved efficiently, or when there are multiple
+ * output arguments.
+ * - `convertTo(TargetType*, ...)`: Same as `toTargetType()`, but for
+ * `TargetType` that cannot be copied and/or moved efficiently, or when there
+ * are multiple output arguments.
+ *
+ * `wrapIn()` and `convertTo()` functions will take output arguments before
+ * input arguments. Some of these functions might return a value to indicate
+ * success or error.
+ *
+ * In converting or wrapping something as a Treble type that contains a
+ * `hidl_handle`, `native_handle_t*` will need to be created and returned as
+ * an additional output argument, hence only `wrapIn()` or `convertTo()` would
+ * be available. The caller must call `native_handle_delete()` to deallocate the
+ * returned native handle when it is no longer needed.
+ *
+ * For types that contain file descriptors, `inTargetType()` and `wrapAs()` do
+ * not perform duplication of file descriptors, while `toTargetType()` and
+ * `convertTo()` do.
+ */
+
+/**
+ * \brief Convert `Return<void>` to `binder::Status`.
+ *
+ * \param[in] t The source `Return<void>`.
+ * \return The corresponding `binder::Status`.
+ */
+// convert: Return<void> -> ::android::binder::Status
+::android::binder::Status toBinderStatus(
+ Return<void> const& t) {
+ return ::android::binder::Status::fromExceptionCode(
+ t.isOk() ? OK : UNKNOWN_ERROR,
+ t.description().c_str());
+}
+
+/**
+ * \brief Convert `Return<void>` to `status_t`. This is for legacy binder calls.
+ *
+ * \param[in] t The source `Return<void>`.
+ * \return The corresponding `status_t`.
+ */
+// convert: Return<void> -> status_t
+status_t toStatusT(Return<void> const& t) {
+ return t.isOk() ? OK : UNKNOWN_ERROR;
+}
+
+/**
+ * \brief Wrap `native_handle_t*` in `hidl_handle`.
+ *
+ * \param[in] nh The source `native_handle_t*`.
+ * \return The `hidl_handle` that points to \p nh.
+ */
+// wrap: native_handle_t* -> hidl_handle
+hidl_handle inHidlHandle(native_handle_t const* nh) {
+ return hidl_handle(nh);
+}
+
+/**
+ * \brief Convert `int32_t` to `Dataspace`.
+ *
+ * \param[in] l The source `int32_t`.
+ * \result The corresponding `Dataspace`.
+ */
+// convert: int32_t -> Dataspace
+Dataspace toHardwareDataspace(int32_t l) {
+ return static_cast<Dataspace>(l);
+}
+
+/**
+ * \brief Convert `Dataspace` to `int32_t`.
+ *
+ * \param[in] t The source `Dataspace`.
+ * \result The corresponding `int32_t`.
+ */
+// convert: Dataspace -> int32_t
+int32_t toRawDataspace(Dataspace const& t) {
+ return static_cast<int32_t>(t);
+}
+
+/**
+ * \brief Wrap an opaque buffer inside a `hidl_vec<uint8_t>`.
+ *
+ * \param[in] l The pointer to the beginning of the opaque buffer.
+ * \param[in] size The size of the buffer.
+ * \return A `hidl_vec<uint8_t>` that points to the buffer.
+ */
+// wrap: void*, size_t -> hidl_vec<uint8_t>
+hidl_vec<uint8_t> inHidlBytes(void const* l, size_t size) {
+ hidl_vec<uint8_t> t;
+ t.setToExternal(static_cast<uint8_t*>(const_cast<void*>(l)), size, false);
+ return t;
+}
+
+/**
+ * \brief Create a `hidl_vec<uint8_t>` that is a copy of an opaque buffer.
+ *
+ * \param[in] l The pointer to the beginning of the opaque buffer.
+ * \param[in] size The size of the buffer.
+ * \return A `hidl_vec<uint8_t>` that is a copy of the input buffer.
+ */
+// convert: void*, size_t -> hidl_vec<uint8_t>
+hidl_vec<uint8_t> toHidlBytes(void const* l, size_t size) {
+ hidl_vec<uint8_t> t;
+ t.resize(size);
+ uint8_t const* src = static_cast<uint8_t const*>(l);
+ std::copy(src, src + size, t.data());
+ return t;
+}
+
+/**
+ * \brief Wrap `GraphicBuffer` in `AnwBuffer`.
+ *
+ * \param[out] t The wrapper of type `AnwBuffer`.
+ * \param[in] l The source `GraphicBuffer`.
+ */
+// wrap: GraphicBuffer -> AnwBuffer
+void wrapAs(AnwBuffer* t, GraphicBuffer const& l) {
+ t->attr.width = l.getWidth();
+ t->attr.height = l.getHeight();
+ t->attr.stride = l.getStride();
+ t->attr.format = static_cast<PixelFormat>(l.getPixelFormat());
+ t->attr.layerCount = l.getLayerCount();
+ t->attr.usage = l.getUsage();
+ t->attr.id = l.getId();
+ t->attr.generationNumber = l.getGenerationNumber();
+ t->nativeHandle = hidl_handle(l.handle);
+}
+
+/**
+ * \brief Convert `AnwBuffer` to `GraphicBuffer`.
+ *
+ * \param[out] l The destination `GraphicBuffer`.
+ * \param[in] t The source `AnwBuffer`.
+ *
+ * This function will duplicate all file descriptors in \p t.
+ */
+// convert: AnwBuffer -> GraphicBuffer
+// Ref: frameworks/native/libs/ui/GraphicBuffer.cpp: GraphicBuffer::flatten
+bool convertTo(GraphicBuffer* l, AnwBuffer const& t) {
+ native_handle_t* handle = t.nativeHandle == nullptr ?
+ nullptr : native_handle_clone(t.nativeHandle);
+
+ size_t const numInts = 12 + (handle ? handle->numInts : 0);
+ int32_t* ints = new int32_t[numInts];
+
+ size_t numFds = static_cast<size_t>(handle ? handle->numFds : 0);
+ int* fds = new int[numFds];
+
+ ints[0] = 'GBFR';
+ ints[1] = static_cast<int32_t>(t.attr.width);
+ ints[2] = static_cast<int32_t>(t.attr.height);
+ ints[3] = static_cast<int32_t>(t.attr.stride);
+ ints[4] = static_cast<int32_t>(t.attr.format);
+ ints[5] = static_cast<int32_t>(t.attr.layerCount);
+ ints[6] = static_cast<int32_t>(t.attr.usage);
+ ints[7] = static_cast<int32_t>(t.attr.id >> 32);
+ ints[8] = static_cast<int32_t>(t.attr.id & 0xFFFFFFFF);
+ ints[9] = static_cast<int32_t>(t.attr.generationNumber);
+ ints[10] = 0;
+ ints[11] = 0;
+ if (handle) {
+ ints[10] = static_cast<int32_t>(handle->numFds);
+ ints[11] = static_cast<int32_t>(handle->numInts);
+ int* intsStart = handle->data + handle->numFds;
+ std::copy(handle->data, intsStart, fds);
+ std::copy(intsStart, intsStart + handle->numInts, &ints[12]);
+ }
+
+ void const* constBuffer = static_cast<void const*>(ints);
+ size_t size = numInts * sizeof(int32_t);
+ int const* constFds = static_cast<int const*>(fds);
+ status_t status = l->unflatten(constBuffer, size, constFds, numFds);
+
+ delete [] fds;
+ delete [] ints;
+ native_handle_delete(handle);
+ return status == NO_ERROR;
+}
+
+/**
+ * Conversion functions for types outside media
+ * ============================================
+ *
+ * Some objects in libui and libgui that were made to go through binder calls do
+ * not expose ways to read or write their fields to the public. To pass an
+ * object of this kind through the HIDL boundary, translation functions need to
+ * work around the access restriction by using the publicly available
+ * `flatten()` and `unflatten()` functions.
+ *
+ * All `flatten()` and `unflatten()` overloads follow the same convention as
+ * follows:
+ *
+ * status_t flatten(ObjectType const& object,
+ * [OtherType const& other, ...]
+ * void*& buffer, size_t& size,
+ * int*& fds, size_t& numFds)
+ *
+ * status_t unflatten(ObjectType* object,
+ * [OtherType* other, ...,]
+ * void*& buffer, size_t& size,
+ * int*& fds, size_t& numFds)
+ *
+ * The number of `other` parameters varies depending on the `ObjectType`. For
+ * example, in the process of unflattening an object that contains
+ * `hidl_handle`, `other` is needed to hold `native_handle_t` objects that will
+ * be created.
+ *
+ * The last four parameters always work the same way in all overloads of
+ * `flatten()` and `unflatten()`:
+ * - For `flatten()`, `buffer` is the pointer to the non-fd buffer to be filled,
+ * `size` is the size (in bytes) of the non-fd buffer pointed to by `buffer`,
+ * `fds` is the pointer to the fd buffer to be filled, and `numFds` is the
+ * size (in ints) of the fd buffer pointed to by `fds`.
+ * - For `unflatten()`, `buffer` is the pointer to the non-fd buffer to be read
+ * from, `size` is the size (in bytes) of the non-fd buffer pointed to by
+ * `buffer`, `fds` is the pointer to the fd buffer to be read from, and
+ * `numFds` is the size (in ints) of the fd buffer pointed to by `fds`.
+ * - After a successful call to `flatten()` or `unflatten()`, `buffer` and `fds`
+ * will be advanced, while `size` and `numFds` will be decreased to reflect
+ * how much storage/data of the two buffers (fd and non-fd) have been used.
+ * - After an unsuccessful call, the values of `buffer`, `size`, `fds` and
+ * `numFds` are invalid.
+ *
+ * The return value of a successful `flatten()` or `unflatten()` call will be
+ * `OK` (also aliased as `NO_ERROR`). Any other values indicate a failure.
+ *
+ * For each object type that supports flattening, there will be two accompanying
+ * functions: `getFlattenedSize()` and `getFdCount()`. `getFlattenedSize()` will
+ * return the size of the non-fd buffer that the object will need for
+ * flattening. `getFdCount()` will return the size of the fd buffer that the
+ * object will need for flattening.
+ *
+ * The set of these four functions, `getFlattenedSize()`, `getFdCount()`,
+ * `flatten()` and `unflatten()`, are similar to functions of the same name in
+ * the abstract class `Flattenable`. The only difference is that functions in
+ * this file are not member functions of the object type. For example, we write
+ *
+ * flatten(x, buffer, size, fds, numFds)
+ *
+ * instead of
+ *
+ * x.flatten(buffer, size, fds, numFds)
+ *
+ * because we cannot modify the type of `x`.
+ *
+ * There is one exception to the naming convention: `hidl_handle` that
+ * represents a fence. The four functions for this "Fence" type have the word
+ * "Fence" attched to their names because the object type, which is
+ * `hidl_handle`, does not carry the special meaning that the object itself can
+ * only contain zero or one file descriptor.
+ */
+
+// Ref: frameworks/native/libs/ui/Fence.cpp
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten a fence.
+ *
+ * \param[in] fence The input fence of type `hidl_handle`.
+ * \return The required size of the flat buffer.
+ *
+ * The current version of this function always returns 4, which is the number of
+ * bytes required to store the number of file descriptors contained in the fd
+ * part of the flat buffer.
+ */
+size_t getFenceFlattenedSize(hidl_handle const& /* fence */) {
+ return 4;
+};
+
+/**
+ * \brief Return the number of file descriptors contained in a fence.
+ *
+ * \param[in] fence The input fence of type `hidl_handle`.
+ * \return `0` if \p fence does not contain a valid file descriptor, or `1`
+ * otherwise.
+ */
+size_t getFenceFdCount(hidl_handle const& fence) {
+ return native_handle_read_fd(fence) == -1 ? 0 : 1;
+}
+
+/**
+ * \brief Unflatten `Fence` to `hidl_handle`.
+ *
+ * \param[out] fence The destination `hidl_handle`.
+ * \param[out] nh The underlying native handle.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR`, \p nh will point to a newly created
+ * native handle, which needs to be deleted with `native_handle_delete()`
+ * afterwards.
+ */
+status_t unflattenFence(hidl_handle* fence, native_handle_t** nh,
+ void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+ if (size < 4) {
+ return NO_MEMORY;
+ }
+
+ uint32_t numFdsInHandle;
+ FlattenableUtils::read(buffer, size, numFdsInHandle);
+
+ if (numFdsInHandle > 1) {
+ return BAD_VALUE;
+ }
+
+ if (numFds < numFdsInHandle) {
+ return NO_MEMORY;
+ }
+
+ if (numFdsInHandle) {
+ *nh = native_handle_create_from_fd(*fds);
+ if (*nh == nullptr) {
+ return NO_MEMORY;
+ }
+ *fence = *nh;
+ ++fds;
+ --numFds;
+ } else {
+ *nh = nullptr;
+ *fence = hidl_handle();
+ }
+
+ return NO_ERROR;
+}
+
+/**
+ * \brief Flatten `hidl_handle` as `Fence`.
+ *
+ * \param[in] t The source `hidl_handle`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ */
+status_t flattenFence(hidl_handle const& fence,
+ void*& buffer, size_t& size, int*& fds, size_t& numFds) {
+ if (size < getFenceFlattenedSize(fence) ||
+ numFds < getFenceFdCount(fence)) {
+ return NO_MEMORY;
+ }
+ // Cast to uint32_t since the size of a size_t can vary between 32- and
+ // 64-bit processes
+ FlattenableUtils::write(buffer, size,
+ static_cast<uint32_t>(getFenceFdCount(fence)));
+ int fd = native_handle_read_fd(fence);
+ if (fd != -1) {
+ *fds = fd;
+ ++fds;
+ --numFds;
+ }
+ return NO_ERROR;
+}
+
+/**
+ * \brief Wrap `Fence` in `hidl_handle`.
+ *
+ * \param[out] t The wrapper of type `hidl_handle`.
+ * \param[out] nh The native handle pointed to by \p t.
+ * \param[in] l The source `Fence`.
+ *
+ * On success, \p nh will hold a newly created native handle, which must be
+ * deleted manually with `native_handle_delete()` afterwards.
+ */
+// wrap: Fence -> hidl_handle
+bool wrapAs(hidl_handle* t, native_handle_t** nh, Fence const& l) {
+ size_t const baseSize = l.getFlattenedSize();
+ std::unique_ptr<uint8_t[]> baseBuffer(
+ new (std::nothrow) uint8_t[baseSize]);
+ if (!baseBuffer) {
+ return false;
+ }
+
+ size_t const baseNumFds = l.getFdCount();
+ std::unique_ptr<int[]> baseFds(
+ new (std::nothrow) int[baseNumFds]);
+ if (!baseFds) {
+ return false;
+ }
+
+ void* buffer = static_cast<void*>(baseBuffer.get());
+ size_t size = baseSize;
+ int* fds = static_cast<int*>(baseFds.get());
+ size_t numFds = baseNumFds;
+ if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+ size = baseSize;
+ int const* constFds = static_cast<int const*>(baseFds.get());
+ numFds = baseNumFds;
+ if (unflattenFence(t, nh, constBuffer, size, constFds, numFds)
+ != NO_ERROR) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * \brief Convert `hidl_handle` to `Fence`.
+ *
+ * \param[out] l The destination `Fence`. `l` must not have been used
+ * (`l->isValid()` must return `false`) before this function is called.
+ * \param[in] t The source `hidl_handle`.
+ *
+ * If \p t contains a valid file descriptor, it will be duplicated.
+ */
+// convert: hidl_handle -> Fence
+bool convertTo(Fence* l, hidl_handle const& t) {
+ int fd = native_handle_read_fd(t);
+ if (fd != -1) {
+ fd = dup(fd);
+ if (fd == -1) {
+ return false;
+ }
+ }
+ native_handle_t* nh = native_handle_create_from_fd(fd);
+ if (nh == nullptr) {
+ if (fd != -1) {
+ close(fd);
+ }
+ return false;
+ }
+
+ size_t const baseSize = getFenceFlattenedSize(t);
+ std::unique_ptr<uint8_t[]> baseBuffer(
+ new (std::nothrow) uint8_t[baseSize]);
+ if (!baseBuffer) {
+ native_handle_delete(nh);
+ return false;
+ }
+
+ size_t const baseNumFds = getFenceFdCount(t);
+ std::unique_ptr<int[]> baseFds(
+ new (std::nothrow) int[baseNumFds]);
+ if (!baseFds) {
+ native_handle_delete(nh);
+ return false;
+ }
+
+ void* buffer = static_cast<void*>(baseBuffer.get());
+ size_t size = baseSize;
+ int* fds = static_cast<int*>(baseFds.get());
+ size_t numFds = baseNumFds;
+ if (flattenFence(hidl_handle(nh), buffer, size, fds, numFds) != NO_ERROR) {
+ native_handle_delete(nh);
+ return false;
+ }
+ native_handle_delete(nh);
+
+ void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+ size = baseSize;
+ int const* constFds = static_cast<int const*>(baseFds.get());
+ numFds = baseNumFds;
+ if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ return true;
+}
+
+// Ref: frameworks/native/libs/ui/FenceTime.cpp: FenceTime::Snapshot
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten
+ * `FenceTimeSnapshot`.
+ *
+ * \param[in] t The input `FenceTimeSnapshot`.
+ * \return The required size of the flat buffer.
+ */
+size_t getFlattenedSize(
+ HGraphicBufferProducer::FenceTimeSnapshot const& t) {
+ constexpr size_t min = sizeof(t.state);
+ switch (t.state) {
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::EMPTY:
+ return min;
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::FENCE:
+ return min + getFenceFlattenedSize(t.fence);
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME:
+ return min + sizeof(
+ ::android::FenceTime::Snapshot::signalTime);
+ }
+ return 0;
+}
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `FenceTimeSnapshot`.
+ *
+ * \param[in] t The input `FenceTimeSnapshot`.
+ * \return The number of file descriptors contained in \p snapshot.
+ */
+size_t getFdCount(
+ HGraphicBufferProducer::FenceTimeSnapshot const& t) {
+ return t.state ==
+ HGraphicBufferProducer::FenceTimeSnapshot::State::FENCE ?
+ getFenceFdCount(t.fence) : 0;
+}
+
+/**
+ * \brief Flatten `FenceTimeSnapshot`.
+ *
+ * \param[in] t The source `FenceTimeSnapshot`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate the file descriptor in `t.fence` if `t.state ==
+ * FENCE`.
+ */
+status_t flatten(HGraphicBufferProducer::FenceTimeSnapshot const& t,
+ void*& buffer, size_t& size, int*& fds, size_t& numFds) {
+ if (size < getFlattenedSize(t)) {
+ return NO_MEMORY;
+ }
+
+ switch (t.state) {
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::EMPTY:
+ FlattenableUtils::write(buffer, size,
+ ::android::FenceTime::Snapshot::State::EMPTY);
+ return NO_ERROR;
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::FENCE:
+ FlattenableUtils::write(buffer, size,
+ ::android::FenceTime::Snapshot::State::FENCE);
+ return flattenFence(t.fence, buffer, size, fds, numFds);
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME:
+ FlattenableUtils::write(buffer, size,
+ ::android::FenceTime::Snapshot::State::SIGNAL_TIME);
+ FlattenableUtils::write(buffer, size, t.signalTimeNs);
+ return NO_ERROR;
+ }
+ return NO_ERROR;
+}
+
+/**
+ * \brief Unflatten `FenceTimeSnapshot`.
+ *
+ * \param[out] t The destination `FenceTimeSnapshot`.
+ * \param[out] nh The underlying native handle.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR` and the constructed snapshot contains a
+ * file descriptor, \p nh will be created to hold that file descriptor. In this
+ * case, \p nh needs to be deleted with `native_handle_delete()` afterwards.
+ */
+status_t unflatten(
+ HGraphicBufferProducer::FenceTimeSnapshot* t, native_handle_t** nh,
+ void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+ if (size < sizeof(t->state)) {
+ return NO_MEMORY;
+ }
+
+ *nh = nullptr;
+ ::android::FenceTime::Snapshot::State state;
+ FlattenableUtils::read(buffer, size, state);
+ switch (state) {
+ case ::android::FenceTime::Snapshot::State::EMPTY:
+ t->state = HGraphicBufferProducer::FenceTimeSnapshot::State::EMPTY;
+ return NO_ERROR;
+ case ::android::FenceTime::Snapshot::State::FENCE:
+ t->state = HGraphicBufferProducer::FenceTimeSnapshot::State::FENCE;
+ return unflattenFence(&t->fence, nh, buffer, size, fds, numFds);
+ case ::android::FenceTime::Snapshot::State::SIGNAL_TIME:
+ t->state = HGraphicBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME;
+ if (size < sizeof(t->signalTimeNs)) {
+ return NO_MEMORY;
+ }
+ FlattenableUtils::read(buffer, size, t->signalTimeNs);
+ return NO_ERROR;
+ }
+ return NO_ERROR;
+}
+
+// Ref: frameworks/native/libs/gui/FrameTimestamps.cpp: FrameEventsDelta
+
+/**
+ * \brief Return a lower bound on the size of the non-fd buffer required to
+ * flatten `FrameEventsDelta`.
+ *
+ * \param[in] t The input `FrameEventsDelta`.
+ * \return A lower bound on the size of the flat buffer.
+ */
+constexpr size_t minFlattenedSize(
+ HGraphicBufferProducer::FrameEventsDelta const& /* t */) {
+ return sizeof(uint64_t) + // mFrameNumber
+ sizeof(uint8_t) + // mIndex
+ sizeof(uint8_t) + // mAddPostCompositeCalled
+ sizeof(uint8_t) + // mAddRetireCalled
+ sizeof(uint8_t) + // mAddReleaseCalled
+ sizeof(nsecs_t) + // mPostedTime
+ sizeof(nsecs_t) + // mRequestedPresentTime
+ sizeof(nsecs_t) + // mLatchTime
+ sizeof(nsecs_t) + // mFirstRefreshStartTime
+ sizeof(nsecs_t); // mLastRefreshStartTime
+}
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten
+ * `FrameEventsDelta`.
+ *
+ * \param[in] t The input `FrameEventsDelta`.
+ * \return The required size of the flat buffer.
+ */
+size_t getFlattenedSize(
+ HGraphicBufferProducer::FrameEventsDelta const& t) {
+ return minFlattenedSize(t) +
+ getFlattenedSize(t.gpuCompositionDoneFence) +
+ getFlattenedSize(t.displayPresentFence) +
+ getFlattenedSize(t.displayRetireFence) +
+ getFlattenedSize(t.releaseFence);
+};
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `FrameEventsDelta`.
+ *
+ * \param[in] t The input `FrameEventsDelta`.
+ * \return The number of file descriptors contained in \p t.
+ */
+size_t getFdCount(
+ HGraphicBufferProducer::FrameEventsDelta const& t) {
+ return getFdCount(t.gpuCompositionDoneFence) +
+ getFdCount(t.displayPresentFence) +
+ getFdCount(t.displayRetireFence) +
+ getFdCount(t.releaseFence);
+};
+
+/**
+ * \brief Unflatten `FrameEventsDelta`.
+ *
+ * \param[out] t The destination `FrameEventsDelta`.
+ * \param[out] nh The underlying array of native handles.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR`, \p nh will have length 4, and it will be
+ * populated with `nullptr` or newly created handles. Each non-null slot in \p
+ * nh will need to be deleted manually with `native_handle_delete()`.
+ */
+status_t unflatten(HGraphicBufferProducer::FrameEventsDelta* t,
+ std::vector<native_handle_t*>* nh,
+ void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+ if (size < minFlattenedSize(*t)) {
+ return NO_MEMORY;
+ }
+ FlattenableUtils::read(buffer, size, t->frameNumber);
+
+ // These were written as uint8_t for alignment.
+ uint8_t temp = 0;
+ FlattenableUtils::read(buffer, size, temp);
+ size_t index = static_cast<size_t>(temp);
+ if (index >= ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
+ return BAD_VALUE;
+ }
+ t->index = static_cast<uint32_t>(index);
+
+ FlattenableUtils::read(buffer, size, temp);
+ t->addPostCompositeCalled = static_cast<bool>(temp);
+ FlattenableUtils::read(buffer, size, temp);
+ t->addRetireCalled = static_cast<bool>(temp);
+ FlattenableUtils::read(buffer, size, temp);
+ t->addReleaseCalled = static_cast<bool>(temp);
+
+ FlattenableUtils::read(buffer, size, t->postedTimeNs);
+ FlattenableUtils::read(buffer, size, t->requestedPresentTimeNs);
+ FlattenableUtils::read(buffer, size, t->latchTimeNs);
+ FlattenableUtils::read(buffer, size, t->firstRefreshStartTimeNs);
+ FlattenableUtils::read(buffer, size, t->lastRefreshStartTimeNs);
+ FlattenableUtils::read(buffer, size, t->dequeueReadyTime);
+
+ // Fences
+ HGraphicBufferProducer::FenceTimeSnapshot* tSnapshot[4];
+ tSnapshot[0] = &t->gpuCompositionDoneFence;
+ tSnapshot[1] = &t->displayPresentFence;
+ tSnapshot[2] = &t->displayRetireFence;
+ tSnapshot[3] = &t->releaseFence;
+ nh->resize(4);
+ for (size_t snapshotIndex = 0; snapshotIndex < 4; ++snapshotIndex) {
+ status_t status = unflatten(
+ tSnapshot[snapshotIndex], &((*nh)[snapshotIndex]),
+ buffer, size, fds, numFds);
+ if (status != NO_ERROR) {
+ while (snapshotIndex > 0) {
+ --snapshotIndex;
+ if ((*nh)[snapshotIndex] != nullptr) {
+ native_handle_delete((*nh)[snapshotIndex]);
+ }
+ }
+ return status;
+ }
+ }
+ return NO_ERROR;
+}
+
+/**
+ * \brief Flatten `FrameEventsDelta`.
+ *
+ * \param[in] t The source `FrameEventsDelta`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate file descriptors contained in \p t.
+ */
+// Ref: frameworks/native/libs/gui/FrameTimestamp.cpp:
+// FrameEventsDelta::flatten
+status_t flatten(HGraphicBufferProducer::FrameEventsDelta const& t,
+ void*& buffer, size_t& size, int*& fds, size_t numFds) {
+ // Check that t.index is within a valid range.
+ if (t.index >= static_cast<uint32_t>(FrameEventHistory::MAX_FRAME_HISTORY)
+ || t.index > std::numeric_limits<uint8_t>::max()) {
+ return BAD_VALUE;
+ }
+
+ FlattenableUtils::write(buffer, size, t.frameNumber);
+
+ // These are static_cast to uint8_t for alignment.
+ FlattenableUtils::write(buffer, size, static_cast<uint8_t>(t.index));
+ FlattenableUtils::write(
+ buffer, size, static_cast<uint8_t>(t.addPostCompositeCalled));
+ FlattenableUtils::write(
+ buffer, size, static_cast<uint8_t>(t.addRetireCalled));
+ FlattenableUtils::write(
+ buffer, size, static_cast<uint8_t>(t.addReleaseCalled));
+
+ FlattenableUtils::write(buffer, size, t.postedTimeNs);
+ FlattenableUtils::write(buffer, size, t.requestedPresentTimeNs);
+ FlattenableUtils::write(buffer, size, t.latchTimeNs);
+ FlattenableUtils::write(buffer, size, t.firstRefreshStartTimeNs);
+ FlattenableUtils::write(buffer, size, t.lastRefreshStartTimeNs);
+ FlattenableUtils::write(buffer, size, t.dequeueReadyTime);
+
+ // Fences
+ HGraphicBufferProducer::FenceTimeSnapshot const* tSnapshot[4];
+ tSnapshot[0] = &t.gpuCompositionDoneFence;
+ tSnapshot[1] = &t.displayPresentFence;
+ tSnapshot[2] = &t.displayRetireFence;
+ tSnapshot[3] = &t.releaseFence;
+ for (size_t snapshotIndex = 0; snapshotIndex < 4; ++snapshotIndex) {
+ status_t status = flatten(
+ *(tSnapshot[snapshotIndex]), buffer, size, fds, numFds);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ }
+ return NO_ERROR;
+}
+
+// Ref: frameworks/native/libs/gui/FrameTimestamps.cpp: FrameEventHistoryDelta
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten
+ * `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ *
+ * \param[in] t The input `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ * \return The required size of the flat buffer.
+ */
+size_t getFlattenedSize(
+ HGraphicBufferProducer::FrameEventHistoryDelta const& t) {
+ size_t size = 4 + // mDeltas.size()
+ sizeof(t.compositorTiming);
+ for (size_t i = 0; i < t.deltas.size(); ++i) {
+ size += getFlattenedSize(t.deltas[i]);
+ }
+ return size;
+}
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ *
+ * \param[in] t The input `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ * \return The number of file descriptors contained in \p t.
+ */
+size_t getFdCount(
+ HGraphicBufferProducer::FrameEventHistoryDelta const& t) {
+ size_t numFds = 0;
+ for (size_t i = 0; i < t.deltas.size(); ++i) {
+ numFds += getFdCount(t.deltas[i]);
+ }
+ return numFds;
+}
+
+/**
+ * \brief Unflatten `FrameEventHistoryDelta`.
+ *
+ * \param[out] t The destination `FrameEventHistoryDelta`.
+ * \param[out] nh The underlying array of arrays of native handles.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR`, \p nh will be populated with `nullptr` or
+ * newly created handles. The second dimension of \p nh will be 4. Each non-null
+ * slot in \p nh will need to be deleted manually with `native_handle_delete()`.
+ */
+status_t unflatten(
+ HGraphicBufferProducer::FrameEventHistoryDelta* t,
+ std::vector<std::vector<native_handle_t*> >* nh,
+ void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+ if (size < 4) {
+ return NO_MEMORY;
+ }
+
+ FlattenableUtils::read(buffer, size, t->compositorTiming);
+
+ uint32_t deltaCount = 0;
+ FlattenableUtils::read(buffer, size, deltaCount);
+ if (static_cast<size_t>(deltaCount) >
+ ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
+ return BAD_VALUE;
+ }
+ t->deltas.resize(deltaCount);
+ nh->resize(deltaCount);
+ for (size_t deltaIndex = 0; deltaIndex < deltaCount; ++deltaIndex) {
+ status_t status = unflatten(
+ &(t->deltas[deltaIndex]), &((*nh)[deltaIndex]),
+ buffer, size, fds, numFds);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ }
+ return NO_ERROR;
+}
+
+/**
+ * \brief Flatten `FrameEventHistoryDelta`.
+ *
+ * \param[in] t The source `FrameEventHistoryDelta`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate file descriptors contained in \p t.
+ */
+status_t flatten(
+ HGraphicBufferProducer::FrameEventHistoryDelta const& t,
+ void*& buffer, size_t& size, int*& fds, size_t& numFds) {
+ if (t.deltas.size() > ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
+ return BAD_VALUE;
+ }
+ if (size < getFlattenedSize(t)) {
+ return NO_MEMORY;
+ }
+
+ FlattenableUtils::write(buffer, size, t.compositorTiming);
+
+ FlattenableUtils::write(buffer, size, static_cast<uint32_t>(t.deltas.size()));
+ for (size_t deltaIndex = 0; deltaIndex < t.deltas.size(); ++deltaIndex) {
+ status_t status = flatten(t.deltas[deltaIndex], buffer, size, fds, numFds);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ }
+ return NO_ERROR;
+}
+
+/**
+ * \brief Wrap `::android::FrameEventHistoryData` in
+ * `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ *
+ * \param[out] t The wrapper of type
+ * `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ * \param[out] nh The array of array of native handles that are referred to by
+ * members of \p t.
+ * \param[in] l The source `::android::FrameEventHistoryDelta`.
+ *
+ * On success, each member of \p nh will be either `nullptr` or a newly created
+ * native handle. All the non-`nullptr` elements must be deleted individually
+ * with `native_handle_delete()`.
+ */
+bool wrapAs(HGraphicBufferProducer::FrameEventHistoryDelta* t,
+ std::vector<std::vector<native_handle_t*> >* nh,
+ ::android::FrameEventHistoryDelta const& l) {
+
+ size_t const baseSize = l.getFlattenedSize();
+ std::unique_ptr<uint8_t[]> baseBuffer(
+ new (std::nothrow) uint8_t[baseSize]);
+ if (!baseBuffer) {
+ return false;
+ }
+
+ size_t const baseNumFds = l.getFdCount();
+ std::unique_ptr<int[]> baseFds(
+ new (std::nothrow) int[baseNumFds]);
+ if (!baseFds) {
+ return false;
+ }
+
+ void* buffer = static_cast<void*>(baseBuffer.get());
+ size_t size = baseSize;
+ int* fds = baseFds.get();
+ size_t numFds = baseNumFds;
+ if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+ size = baseSize;
+ int const* constFds = static_cast<int const*>(baseFds.get());
+ numFds = baseNumFds;
+ if (unflatten(t, nh, constBuffer, size, constFds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * \brief Convert `HGraphicBufferProducer::FrameEventHistoryDelta` to
+ * `::android::FrameEventHistoryDelta`.
+ *
+ * \param[out] l The destination `::android::FrameEventHistoryDelta`.
+ * \param[in] t The source `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ *
+ * This function will duplicate all file descriptors contained in \p t.
+ */
+bool convertTo(
+ ::android::FrameEventHistoryDelta* l,
+ HGraphicBufferProducer::FrameEventHistoryDelta const& t) {
+
+ size_t const baseSize = getFlattenedSize(t);
+ std::unique_ptr<uint8_t[]> baseBuffer(
+ new (std::nothrow) uint8_t[baseSize]);
+ if (!baseBuffer) {
+ return false;
+ }
+
+ size_t const baseNumFds = getFdCount(t);
+ std::unique_ptr<int[]> baseFds(
+ new (std::nothrow) int[baseNumFds]);
+ if (!baseFds) {
+ return false;
+ }
+
+ void* buffer = static_cast<void*>(baseBuffer.get());
+ size_t size = baseSize;
+ int* fds = static_cast<int*>(baseFds.get());
+ size_t numFds = baseNumFds;
+ if (flatten(t, buffer, size, fds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+ size = baseSize;
+ int const* constFds = static_cast<int const*>(baseFds.get());
+ numFds = baseNumFds;
+ if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ return true;
+}
+
+// Ref: frameworks/native/libs/ui/Region.cpp
+
+/**
+ * \brief Return the size of the buffer required to flatten `Region`.
+ *
+ * \param[in] t The input `Region`.
+ * \return The required size of the flat buffer.
+ */
+size_t getFlattenedSize(Region const& t) {
+ return sizeof(uint32_t) + t.size() * sizeof(::android::Rect);
+}
+
+/**
+ * \brief Unflatten `Region`.
+ *
+ * \param[out] t The destination `Region`.
+ * \param[in,out] buffer The pointer to the flat buffer.
+ * \param[in,out] size The size of the flat buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ */
+status_t unflatten(Region* t, void const*& buffer, size_t& size) {
+ if (size < sizeof(uint32_t)) {
+ return NO_MEMORY;
+ }
+
+ uint32_t numRects = 0;
+ FlattenableUtils::read(buffer, size, numRects);
+ if (size < numRects * sizeof(Rect)) {
+ return NO_MEMORY;
+ }
+ if (numRects > (UINT32_MAX / sizeof(Rect))) {
+ return NO_MEMORY;
+ }
+
+ t->resize(numRects);
+ for (size_t r = 0; r < numRects; ++r) {
+ ::android::Rect rect(::android::Rect::EMPTY_RECT);
+ status_t status = rect.unflatten(buffer, size);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ FlattenableUtils::advance(buffer, size, sizeof(rect));
+ (*t)[r] = Rect{
+ static_cast<int32_t>(rect.left),
+ static_cast<int32_t>(rect.top),
+ static_cast<int32_t>(rect.right),
+ static_cast<int32_t>(rect.bottom)};
+ }
+ return NO_ERROR;
+}
+
+/**
+ * \brief Flatten `Region`.
+ *
+ * \param[in] t The source `Region`.
+ * \param[in,out] buffer The pointer to the flat buffer.
+ * \param[in,out] size The size of the flat buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ */
+status_t flatten(Region const& t, void*& buffer, size_t& size) {
+ if (size < getFlattenedSize(t)) {
+ return NO_MEMORY;
+ }
+
+ FlattenableUtils::write(buffer, size, static_cast<uint32_t>(t.size()));
+ for (size_t r = 0; r < t.size(); ++r) {
+ ::android::Rect rect(
+ static_cast<int32_t>(t[r].left),
+ static_cast<int32_t>(t[r].top),
+ static_cast<int32_t>(t[r].right),
+ static_cast<int32_t>(t[r].bottom));
+ status_t status = rect.flatten(buffer, size);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ FlattenableUtils::advance(buffer, size, sizeof(rect));
+ }
+ return NO_ERROR;
+}
+
+/**
+ * \brief Convert `::android::Region` to `Region`.
+ *
+ * \param[out] t The destination `Region`.
+ * \param[in] l The source `::android::Region`.
+ */
+// convert: ::android::Region -> Region
+bool convertTo(Region* t, ::android::Region const& l) {
+ size_t const baseSize = l.getFlattenedSize();
+ std::unique_ptr<uint8_t[]> baseBuffer(
+ new (std::nothrow) uint8_t[baseSize]);
+ if (!baseBuffer) {
+ return false;
+ }
+
+ void* buffer = static_cast<void*>(baseBuffer.get());
+ size_t size = baseSize;
+ if (l.flatten(buffer, size) != NO_ERROR) {
+ return false;
+ }
+
+ void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+ size = baseSize;
+ if (unflatten(t, constBuffer, size) != NO_ERROR) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * \brief Convert `Region` to `::android::Region`.
+ *
+ * \param[out] l The destination `::android::Region`.
+ * \param[in] t The source `Region`.
+ */
+// convert: Region -> ::android::Region
+bool convertTo(::android::Region* l, Region const& t) {
+ size_t const baseSize = getFlattenedSize(t);
+ std::unique_ptr<uint8_t[]> baseBuffer(
+ new (std::nothrow) uint8_t[baseSize]);
+ if (!baseBuffer) {
+ return false;
+ }
+
+ void* buffer = static_cast<void*>(baseBuffer.get());
+ size_t size = baseSize;
+ if (flatten(t, buffer, size) != NO_ERROR) {
+ return false;
+ }
+
+ void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+ size = baseSize;
+ if (l->unflatten(constBuffer, size) != NO_ERROR) {
+ return false;
+ }
+
+ return true;
+}
+
+// Ref: frameworks/native/libs/gui/BGraphicBufferProducer.cpp:
+// BGraphicBufferProducer::QueueBufferInput
+
+/**
+ * \brief Return a lower bound on the size of the buffer required to flatten
+ * `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The input `HGraphicBufferProducer::QueueBufferInput`.
+ * \return A lower bound on the size of the flat buffer.
+ */
+constexpr size_t minFlattenedSize(
+ HGraphicBufferProducer::QueueBufferInput const& /* t */) {
+ return sizeof(int64_t) + // timestamp
+ sizeof(int) + // isAutoTimestamp
+ sizeof(android_dataspace) + // dataSpace
+ sizeof(::android::Rect) + // crop
+ sizeof(int) + // scalingMode
+ sizeof(uint32_t) + // transform
+ sizeof(uint32_t) + // stickyTransform
+ sizeof(bool); // getFrameTimestamps
+}
+
+/**
+ * \brief Return the size of the buffer required to flatten
+ * `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The input `HGraphicBufferProducer::QueueBufferInput`.
+ * \return The required size of the flat buffer.
+ */
+size_t getFlattenedSize(HGraphicBufferProducer::QueueBufferInput const& t) {
+ return minFlattenedSize(t) +
+ getFenceFlattenedSize(t.fence) +
+ getFlattenedSize(t.surfaceDamage) +
+ sizeof(HdrMetadata::validTypes);
+}
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The input `HGraphicBufferProducer::QueueBufferInput`.
+ * \return The number of file descriptors contained in \p t.
+ */
+size_t getFdCount(
+ HGraphicBufferProducer::QueueBufferInput const& t) {
+ return getFenceFdCount(t.fence);
+}
+
+/**
+ * \brief Flatten `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The source `HGraphicBufferProducer::QueueBufferInput`.
+ * \param[out] nh The native handle cloned from `t.fence`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate the file descriptor in `t.fence`. */
+status_t flatten(HGraphicBufferProducer::QueueBufferInput const& t,
+ native_handle_t** nh,
+ void*& buffer, size_t& size, int*& fds, size_t& numFds) {
+ if (size < getFlattenedSize(t)) {
+ return NO_MEMORY;
+ }
+
+ FlattenableUtils::write(buffer, size, t.timestamp);
+ FlattenableUtils::write(buffer, size, static_cast<int>(t.isAutoTimestamp));
+ FlattenableUtils::write(buffer, size,
+ static_cast<android_dataspace_t>(t.dataSpace));
+ FlattenableUtils::write(buffer, size, ::android::Rect(
+ static_cast<int32_t>(t.crop.left),
+ static_cast<int32_t>(t.crop.top),
+ static_cast<int32_t>(t.crop.right),
+ static_cast<int32_t>(t.crop.bottom)));
+ FlattenableUtils::write(buffer, size, static_cast<int>(t.scalingMode));
+ FlattenableUtils::write(buffer, size, t.transform);
+ FlattenableUtils::write(buffer, size, t.stickyTransform);
+ FlattenableUtils::write(buffer, size, t.getFrameTimestamps);
+
+ *nh = t.fence.getNativeHandle() == nullptr ?
+ nullptr : native_handle_clone(t.fence);
+ status_t status = flattenFence(hidl_handle(*nh), buffer, size, fds, numFds);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = flatten(t.surfaceDamage, buffer, size);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ FlattenableUtils::write(buffer, size, decltype(HdrMetadata::validTypes)(0));
+ return NO_ERROR;
+}
+
+/**
+ * \brief Unflatten `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[out] t The destination `HGraphicBufferProducer::QueueBufferInput`.
+ * \param[out] nh The underlying native handle for `t->fence`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR` and `t->fence` contains a valid file
+ * descriptor, \p nh will be a newly created native handle holding that file
+ * descriptor. \p nh needs to be deleted with `native_handle_delete()`
+ * afterwards.
+ */
+status_t unflatten(
+ HGraphicBufferProducer::QueueBufferInput* t, native_handle_t** nh,
+ void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+ if (size < minFlattenedSize(*t)) {
+ return NO_MEMORY;
+ }
+
+ FlattenableUtils::read(buffer, size, t->timestamp);
+ int lIsAutoTimestamp;
+ FlattenableUtils::read(buffer, size, lIsAutoTimestamp);
+ t->isAutoTimestamp = static_cast<int32_t>(lIsAutoTimestamp);
+ android_dataspace_t lDataSpace;
+ FlattenableUtils::read(buffer, size, lDataSpace);
+ t->dataSpace = static_cast<Dataspace>(lDataSpace);
+ Rect lCrop;
+ FlattenableUtils::read(buffer, size, lCrop);
+ t->crop = Rect{
+ static_cast<int32_t>(lCrop.left),
+ static_cast<int32_t>(lCrop.top),
+ static_cast<int32_t>(lCrop.right),
+ static_cast<int32_t>(lCrop.bottom)};
+ int lScalingMode;
+ FlattenableUtils::read(buffer, size, lScalingMode);
+ t->scalingMode = static_cast<int32_t>(lScalingMode);
+ FlattenableUtils::read(buffer, size, t->transform);
+ FlattenableUtils::read(buffer, size, t->stickyTransform);
+ FlattenableUtils::read(buffer, size, t->getFrameTimestamps);
+
+ status_t status = unflattenFence(&(t->fence), nh,
+ buffer, size, fds, numFds);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ // HdrMetadata ignored
+ return unflatten(&(t->surfaceDamage), buffer, size);
+}
+
+/**
+ * \brief Wrap `BGraphicBufferProducer::QueueBufferInput` in
+ * `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[out] t The wrapper of type
+ * `HGraphicBufferProducer::QueueBufferInput`.
+ * \param[out] nh The underlying native handle for `t->fence`.
+ * \param[in] l The source `BGraphicBufferProducer::QueueBufferInput`.
+ *
+ * If the return value is `true` and `t->fence` contains a valid file
+ * descriptor, \p nh will be a newly created native handle holding that file
+ * descriptor. \p nh needs to be deleted with `native_handle_delete()`
+ * afterwards.
+ */
+bool wrapAs(
+ HGraphicBufferProducer::QueueBufferInput* t,
+ native_handle_t** nh,
+ BGraphicBufferProducer::QueueBufferInput const& l) {
+
+ size_t const baseSize = l.getFlattenedSize();
+ std::unique_ptr<uint8_t[]> baseBuffer(
+ new (std::nothrow) uint8_t[baseSize]);
+ if (!baseBuffer) {
+ return false;
+ }
+
+ size_t const baseNumFds = l.getFdCount();
+ std::unique_ptr<int[]> baseFds(
+ new (std::nothrow) int[baseNumFds]);
+ if (!baseFds) {
+ return false;
+ }
+
+ void* buffer = static_cast<void*>(baseBuffer.get());
+ size_t size = baseSize;
+ int* fds = baseFds.get();
+ size_t numFds = baseNumFds;
+ if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+ size = baseSize;
+ int const* constFds = static_cast<int const*>(baseFds.get());
+ numFds = baseNumFds;
+ if (unflatten(t, nh, constBuffer, size, constFds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * \brief Convert `HGraphicBufferProducer::QueueBufferInput` to
+ * `BGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[out] l The destination `BGraphicBufferProducer::QueueBufferInput`.
+ * \param[in] t The source `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * If `t.fence` has a valid file descriptor, it will be duplicated.
+ */
+bool convertTo(
+ BGraphicBufferProducer::QueueBufferInput* l,
+ HGraphicBufferProducer::QueueBufferInput const& t) {
+
+ size_t const baseSize = getFlattenedSize(t);
+ std::unique_ptr<uint8_t[]> baseBuffer(
+ new (std::nothrow) uint8_t[baseSize]);
+ if (!baseBuffer) {
+ return false;
+ }
+
+ size_t const baseNumFds = getFdCount(t);
+ std::unique_ptr<int[]> baseFds(
+ new (std::nothrow) int[baseNumFds]);
+ if (!baseFds) {
+ return false;
+ }
+
+ void* buffer = static_cast<void*>(baseBuffer.get());
+ size_t size = baseSize;
+ int* fds = baseFds.get();
+ size_t numFds = baseNumFds;
+ native_handle_t* nh;
+ if (flatten(t, &nh, buffer, size, fds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+ size = baseSize;
+ int const* constFds = static_cast<int const*>(baseFds.get());
+ numFds = baseNumFds;
+ if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
+ if (nh != nullptr) {
+ native_handle_close(nh);
+ native_handle_delete(nh);
+ }
+ return false;
+ }
+
+ native_handle_delete(nh);
+ return true;
+}
+
+// Ref: frameworks/native/libs/gui/BGraphicBufferProducer.cpp:
+// BGraphicBufferProducer::QueueBufferOutput
+
+/**
+ * \brief Wrap `BGraphicBufferProducer::QueueBufferOutput` in
+ * `HGraphicBufferProducer::QueueBufferOutput`.
+ *
+ * \param[out] t The wrapper of type
+ * `HGraphicBufferProducer::QueueBufferOutput`.
+ * \param[out] nh The array of array of native handles that are referred to by
+ * members of \p t.
+ * \param[in] l The source `BGraphicBufferProducer::QueueBufferOutput`.
+ *
+ * On success, each member of \p nh will be either `nullptr` or a newly created
+ * native handle. All the non-`nullptr` elements must be deleted individually
+ * with `native_handle_delete()`.
+ */
+// wrap: BGraphicBufferProducer::QueueBufferOutput ->
+// HGraphicBufferProducer::QueueBufferOutput
+bool wrapAs(HGraphicBufferProducer::QueueBufferOutput* t,
+ std::vector<std::vector<native_handle_t*> >* nh,
+ BGraphicBufferProducer::QueueBufferOutput const& l) {
+ if (!wrapAs(&(t->frameTimestamps), nh, l.frameTimestamps)) {
+ return false;
+ }
+ t->width = l.width;
+ t->height = l.height;
+ t->transformHint = l.transformHint;
+ t->numPendingBuffers = l.numPendingBuffers;
+ t->nextFrameNumber = l.nextFrameNumber;
+ t->bufferReplaced = l.bufferReplaced;
+ return true;
+}
+
+/**
+ * \brief Convert `HGraphicBufferProducer::QueueBufferOutput` to
+ * `BGraphicBufferProducer::QueueBufferOutput`.
+ *
+ * \param[out] l The destination `BGraphicBufferProducer::QueueBufferOutput`.
+ * \param[in] t The source `HGraphicBufferProducer::QueueBufferOutput`.
+ *
+ * This function will duplicate all file descriptors contained in \p t.
+ */
+// convert: HGraphicBufferProducer::QueueBufferOutput ->
+// BGraphicBufferProducer::QueueBufferOutput
+bool convertTo(
+ BGraphicBufferProducer::QueueBufferOutput* l,
+ HGraphicBufferProducer::QueueBufferOutput const& t) {
+ if (!convertTo(&(l->frameTimestamps), t.frameTimestamps)) {
+ return false;
+ }
+ l->width = t.width;
+ l->height = t.height;
+ l->transformHint = t.transformHint;
+ l->numPendingBuffers = t.numPendingBuffers;
+ l->nextFrameNumber = t.nextFrameNumber;
+ l->bufferReplaced = t.bufferReplaced;
+ return true;
+}
+
+/**
+ * \brief Convert `BGraphicBufferProducer::DisconnectMode` to
+ * `HGraphicBufferProducer::DisconnectMode`.
+ *
+ * \param[in] l The source `BGraphicBufferProducer::DisconnectMode`.
+ * \return The corresponding `HGraphicBufferProducer::DisconnectMode`.
+ */
+HGraphicBufferProducer::DisconnectMode toHidlDisconnectMode(
+ BGraphicBufferProducer::DisconnectMode l) {
+ switch (l) {
+ case BGraphicBufferProducer::DisconnectMode::Api:
+ return HGraphicBufferProducer::DisconnectMode::API;
+ case BGraphicBufferProducer::DisconnectMode::AllLocal:
+ return HGraphicBufferProducer::DisconnectMode::ALL_LOCAL;
+ }
+ return HGraphicBufferProducer::DisconnectMode::API;
+}
+
+/**
+ * \brief Convert `HGraphicBufferProducer::DisconnectMode` to
+ * `BGraphicBufferProducer::DisconnectMode`.
+ *
+ * \param[in] l The source `HGraphicBufferProducer::DisconnectMode`.
+ * \return The corresponding `BGraphicBufferProducer::DisconnectMode`.
+ */
+BGraphicBufferProducer::DisconnectMode toGuiDisconnectMode(
+ HGraphicBufferProducer::DisconnectMode t) {
+ switch (t) {
+ case HGraphicBufferProducer::DisconnectMode::API:
+ return BGraphicBufferProducer::DisconnectMode::Api;
+ case HGraphicBufferProducer::DisconnectMode::ALL_LOCAL:
+ return BGraphicBufferProducer::DisconnectMode::AllLocal;
+ }
+ return BGraphicBufferProducer::DisconnectMode::Api;
+}
+
+} // namespace conversion
+} // namespace android
+
diff --git a/media/libstagefright/bqhelper/FrameDropper.cpp b/media/libstagefright/bqhelper/FrameDropper.cpp
new file mode 100644
index 0000000..d2a2473
--- /dev/null
+++ b/media/libstagefright/bqhelper/FrameDropper.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FrameDropper"
+#include <utils/Log.h>
+
+#include <media/stagefright/bqhelper/FrameDropper.h>
+#include <media/stagefright/foundation/ADebug.h>
+
+namespace android {
+
+static const int64_t kMaxJitterUs = 2000;
+
+FrameDropper::FrameDropper()
+ : mDesiredMinTimeUs(-1),
+ mMinIntervalUs(0) {
+}
+
+FrameDropper::~FrameDropper() {
+}
+
+status_t FrameDropper::setMaxFrameRate(float maxFrameRate) {
+ if (maxFrameRate < 0) {
+ mMinIntervalUs = -1ll;
+ return OK;
+ }
+
+ if (maxFrameRate == 0) {
+ ALOGE("framerate should be positive but got %f.", maxFrameRate);
+ return BAD_VALUE;
+ }
+ mMinIntervalUs = (int64_t) (1000000.0f / maxFrameRate);
+ return OK;
+}
+
+bool FrameDropper::shouldDrop(int64_t timeUs) {
+ if (mMinIntervalUs <= 0) {
+ return false;
+ }
+
+ if (mDesiredMinTimeUs < 0) {
+ mDesiredMinTimeUs = timeUs + mMinIntervalUs;
+ ALOGV("first frame %lld, next desired frame %lld",
+ (long long)timeUs, (long long)mDesiredMinTimeUs);
+ return false;
+ }
+
+ if (timeUs < (mDesiredMinTimeUs - kMaxJitterUs)) {
+ ALOGV("drop frame %lld, desired frame %lld, diff %lld",
+ (long long)timeUs, (long long)mDesiredMinTimeUs,
+ (long long)(mDesiredMinTimeUs - timeUs));
+ return true;
+ }
+
+ int64_t n = (timeUs - mDesiredMinTimeUs + kMaxJitterUs) / mMinIntervalUs;
+ mDesiredMinTimeUs += (n + 1) * mMinIntervalUs;
+ ALOGV("keep frame %lld, next desired frame %lld, diff %lld",
+ (long long)timeUs, (long long)mDesiredMinTimeUs,
+ (long long)(mDesiredMinTimeUs - timeUs));
+ return false;
+}
+
+} // namespace android
diff --git a/media/libstagefright/bqhelper/GraphicBufferSource.cpp b/media/libstagefright/bqhelper/GraphicBufferSource.cpp
new file mode 100644
index 0000000..dd03d38
--- /dev/null
+++ b/media/libstagefright/bqhelper/GraphicBufferSource.cpp
@@ -0,0 +1,1377 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#define LOG_TAG "GraphicBufferSource"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#define STRINGIFY_ENUMS // for asString in HardwareAPI.h/VideoAPI.h
+
+#include <media/stagefright/bqhelper/GraphicBufferSource.h>
+#include <media/stagefright/bqhelper/FrameDropper.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ColorUtils.h>
+#include <media/stagefright/foundation/FileDescriptor.h>
+
+#include <media/hardware/MetadataBufferType.h>
+#include <ui/GraphicBuffer.h>
+#include <gui/BufferItem.h>
+#include <media/hardware/HardwareAPI.h>
+
+#include <inttypes.h>
+
+#include <functional>
+#include <memory>
+#include <cmath>
+
+namespace android {
+
+namespace {
+// kTimestampFluctuation is an upper bound of timestamp fluctuation from the
+// source that GraphicBufferSource allows. The unit of kTimestampFluctuation is
+// frames. More specifically, GraphicBufferSource will drop a frame if
+//
+// expectedNewFrametimestamp - actualNewFrameTimestamp <
+// (0.5 - kTimestampFluctuation) * expectedtimePeriodBetweenFrames
+//
+// where
+// - expectedNewFrameTimestamp is the calculated ideal timestamp of the new
+// incoming frame
+// - actualNewFrameTimestamp is the timestamp received from the source
+// - expectedTimePeriodBetweenFrames is the ideal difference of the timestamps
+// of two adjacent frames
+//
+// See GraphicBufferSource::calculateCodecTimestamp_l() for more detail about
+// how kTimestampFluctuation is used.
+//
+// kTimestampFluctuation should be non-negative. A higher value causes a smaller
+// chance of dropping frames, but at the same time a higher bound on the
+// difference between the source timestamp and the interpreted (snapped)
+// timestamp.
+//
+// The value of 0.05 means that GraphicBufferSource expects the input timestamps
+// to fluctuate no more than 5% from the regular time period.
+//
+// TODO: Justify the choice of this value, or make it configurable.
+constexpr double kTimestampFluctuation = 0.05;
+}
+
+/**
+ * A copiable object managing a buffer in the buffer cache managed by the producer. This object
+ * holds a reference to the buffer, and maintains which buffer slot it belongs to (if any), and
+ * whether it is still in a buffer slot. It also maintains whether there are any outstanging acquire
+ * references to it (by buffers acquired from the slot) mainly so that we can keep a debug
+ * count of how many buffers we need to still release back to the producer.
+ */
+struct GraphicBufferSource::CachedBuffer {
+ /**
+ * Token that is used to track acquire counts (as opposed to all references to this object).
+ */
+ struct Acquirable { };
+
+ /**
+ * Create using a buffer cached in a slot.
+ */
+ CachedBuffer(slot_id slot, const sp<GraphicBuffer> &graphicBuffer)
+ : mIsCached(true),
+ mSlot(slot),
+ mGraphicBuffer(graphicBuffer),
+ mAcquirable(std::make_shared<Acquirable>()) {
+ }
+
+ /**
+ * Returns the cache slot that this buffer is cached in, or -1 if it is no longer cached.
+ *
+ * This assumes that -1 slot id is invalid; though, it is just a benign collision used for
+ * debugging. This object explicitly manages whether it is still cached.
+ */
+ slot_id getSlot() const {
+ return mIsCached ? mSlot : -1;
+ }
+
+ /**
+ * Returns the cached buffer.
+ */
+ sp<GraphicBuffer> getGraphicBuffer() const {
+ return mGraphicBuffer;
+ }
+
+ /**
+ * Checks whether this buffer is still in the buffer cache.
+ */
+ bool isCached() const {
+ return mIsCached;
+ }
+
+ /**
+ * Checks whether this buffer has an acquired reference.
+ */
+ bool isAcquired() const {
+ return mAcquirable.use_count() > 1;
+ }
+
+ /**
+ * Gets and returns a shared acquired reference.
+ */
+ std::shared_ptr<Acquirable> getAcquirable() {
+ return mAcquirable;
+ }
+
+private:
+ friend void GraphicBufferSource::discardBufferAtSlotIndex_l(ssize_t);
+
+ /**
+ * This method to be called when the buffer is no longer in the buffer cache.
+ * Called from discardBufferAtSlotIndex_l.
+ */
+ void onDroppedFromCache() {
+ CHECK_DBG(mIsCached);
+ mIsCached = false;
+ }
+
+ bool mIsCached;
+ slot_id mSlot;
+ sp<GraphicBuffer> mGraphicBuffer;
+ std::shared_ptr<Acquirable> mAcquirable;
+};
+
+/**
+ * A copiable object managing a buffer acquired from the producer. This must always be a cached
+ * buffer. This objects also manages its acquire fence and any release fences that may be returned
+ * by the encoder for this buffer (this buffer may be queued to the encoder multiple times).
+ * If no release fences are added by the encoder, the acquire fence is returned as the release
+ * fence for this - as it is assumed that noone waited for the acquire fence. Otherwise, it is
+ * assumed that the encoder has waited for the acquire fence (or returned it as the release
+ * fence).
+ */
+struct GraphicBufferSource::AcquiredBuffer {
+ AcquiredBuffer(
+ const std::shared_ptr<CachedBuffer> &buffer,
+ std::function<void(AcquiredBuffer *)> onReleased,
+ const sp<Fence> &acquireFence)
+ : mBuffer(buffer),
+ mAcquirable(buffer->getAcquirable()),
+ mAcquireFence(acquireFence),
+ mGotReleaseFences(false),
+ mOnReleased(onReleased) {
+ }
+
+ /**
+ * Adds a release fence returned by the encoder to this object. If this is called with an
+ * valid file descriptor, it is added to the list of release fences. These are returned to the
+ * producer on release() as a merged fence. Regardless of the validity of the file descriptor,
+ * we take note that a release fence was attempted to be added and the acquire fence can now be
+ * assumed as acquired.
+ */
+ void addReleaseFenceFd(int fenceFd) {
+ // save all release fences - these will be propagated to the producer if this buffer is
+ // ever released to it
+ if (fenceFd >= 0) {
+ mReleaseFenceFds.push_back(fenceFd);
+ }
+ mGotReleaseFences = true;
+ }
+
+ /**
+ * Returns the acquire fence file descriptor associated with this object.
+ */
+ int getAcquireFenceFd() {
+ if (mAcquireFence == nullptr || !mAcquireFence->isValid()) {
+ return -1;
+ }
+ return mAcquireFence->dup();
+ }
+
+ /**
+ * Returns whether the buffer is still in the buffer cache.
+ */
+ bool isCached() const {
+ return mBuffer->isCached();
+ }
+
+ /**
+ * Returns the acquired buffer.
+ */
+ sp<GraphicBuffer> getGraphicBuffer() const {
+ return mBuffer->getGraphicBuffer();
+ }
+
+ /**
+ * Returns the slot that this buffer is cached at, or -1 otherwise.
+ *
+ * This assumes that -1 slot id is invalid; though, it is just a benign collision used for
+ * debugging. This object explicitly manages whether it is still cached.
+ */
+ slot_id getSlot() const {
+ return mBuffer->getSlot();
+ }
+
+ /**
+ * Creates and returns a release fence object from the acquire fence and/or any release fences
+ * added. If no release fences were added (even if invalid), returns the acquire fence.
+ * Otherwise, it returns a merged fence from all the valid release fences added.
+ */
+ sp<Fence> getReleaseFence() {
+ // If did not receive release fences, we assume this buffer was not consumed (it was
+ // discarded or dropped). In this case release the acquire fence as the release fence.
+ // We do this here to avoid a dup, close and recreation of the Fence object.
+ if (!mGotReleaseFences) {
+ return mAcquireFence;
+ }
+ sp<Fence> ret = getReleaseFence(0, mReleaseFenceFds.size());
+ // clear fds as fence took ownership of them
+ mReleaseFenceFds.clear();
+ return ret;
+ }
+
+ // this video buffer is no longer referenced by the codec (or kept for later encoding)
+ // it is now safe to release to the producer
+ ~AcquiredBuffer() {
+ //mAcquirable.clear();
+ mOnReleased(this);
+ // mOnRelease method should call getReleaseFence() that releases all fds but just in case
+ ALOGW_IF(!mReleaseFenceFds.empty(), "release fences were not obtained, closing fds");
+ for (int fildes : mReleaseFenceFds) {
+ ::close(fildes);
+ TRESPASS_DBG();
+ }
+ }
+
+private:
+ std::shared_ptr<GraphicBufferSource::CachedBuffer> mBuffer;
+ std::shared_ptr<GraphicBufferSource::CachedBuffer::Acquirable> mAcquirable;
+ sp<Fence> mAcquireFence;
+ Vector<int> mReleaseFenceFds;
+ bool mGotReleaseFences;
+ std::function<void(AcquiredBuffer *)> mOnReleased;
+
+ /**
+ * Creates and returns a release fence from 0 or more release fence file descriptors in from
+ * the specified range in the array.
+ *
+ * @param start start index
+ * @param num number of release fds to merge
+ */
+ sp<Fence> getReleaseFence(size_t start, size_t num) const {
+ if (num == 0) {
+ return Fence::NO_FENCE;
+ } else if (num == 1) {
+ return new Fence(mReleaseFenceFds[start]);
+ } else {
+ return Fence::merge("GBS::AB",
+ getReleaseFence(start, num >> 1),
+ getReleaseFence(start + (num >> 1), num - (num >> 1)));
+ }
+ }
+};
+
+GraphicBufferSource::GraphicBufferSource() :
+ mInitCheck(UNKNOWN_ERROR),
+ mNumAvailableUnacquiredBuffers(0),
+ mNumOutstandingAcquires(0),
+ mEndOfStream(false),
+ mEndOfStreamSent(false),
+ mLastDataspace(HAL_DATASPACE_UNKNOWN),
+ mExecuting(false),
+ mSuspended(false),
+ mLastFrameTimestampUs(-1),
+ mStopTimeUs(-1),
+ mLastActionTimeUs(-1ll),
+ mSkipFramesBeforeNs(-1ll),
+ mFrameRepeatIntervalUs(-1ll),
+ mRepeatLastFrameGeneration(0),
+ mOutstandingFrameRepeatCount(0),
+ mFrameRepeatBlockedOnCodecBuffer(false),
+ mFps(-1.0),
+ mCaptureFps(-1.0),
+ mBaseCaptureUs(-1ll),
+ mBaseFrameUs(-1ll),
+ mFrameCount(0),
+ mPrevCaptureUs(-1ll),
+ mPrevFrameUs(-1ll),
+ mInputBufferTimeOffsetUs(0ll) {
+ ALOGV("GraphicBufferSource");
+
+ String8 name("GraphicBufferSource");
+
+ BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+ mConsumer->setConsumerName(name);
+
+ // Note that we can't create an sp<...>(this) in a ctor that will not keep a
+ // reference once the ctor ends, as that would cause the refcount of 'this'
+ // dropping to 0 at the end of the ctor. Since all we need is a wp<...>
+ // that's what we create.
+ wp<BufferQueue::ConsumerListener> listener =
+ static_cast<BufferQueue::ConsumerListener*>(this);
+ sp<IConsumerListener> proxy =
+ new BufferQueue::ProxyConsumerListener(listener);
+
+ mInitCheck = mConsumer->consumerConnect(proxy, false);
+ if (mInitCheck != NO_ERROR) {
+ ALOGE("Error connecting to BufferQueue: %s (%d)",
+ strerror(-mInitCheck), mInitCheck);
+ return;
+ }
+
+ memset(&mDefaultColorAspectsPacked, 0, sizeof(mDefaultColorAspectsPacked));
+
+ CHECK(mInitCheck == NO_ERROR);
+}
+
+GraphicBufferSource::~GraphicBufferSource() {
+ ALOGV("~GraphicBufferSource");
+ {
+ // all acquired buffers must be freed with the mutex locked otherwise our debug assertion
+ // may trigger
+ Mutex::Autolock autoLock(mMutex);
+ mAvailableBuffers.clear();
+ mSubmittedCodecBuffers.clear();
+ mLatestBuffer.mBuffer.reset();
+ }
+
+ if (mNumOutstandingAcquires != 0) {
+ ALOGW("potential buffer leak: acquired=%d", mNumOutstandingAcquires);
+ TRESPASS_DBG();
+ }
+ if (mConsumer != NULL) {
+ status_t err = mConsumer->consumerDisconnect();
+ if (err != NO_ERROR) {
+ ALOGW("consumerDisconnect failed: %d", err);
+ }
+ }
+}
+
+Status GraphicBufferSource::start() {
+ Mutex::Autolock autoLock(mMutex);
+ ALOGV("--> start; available=%zu, submittable=%zd",
+ mAvailableBuffers.size(), mFreeCodecBuffers.size());
+ CHECK(!mExecuting);
+ mExecuting = true;
+ mLastDataspace = HAL_DATASPACE_UNKNOWN;
+ ALOGV("clearing last dataSpace");
+
+ // Start by loading up as many buffers as possible. We want to do this,
+ // rather than just submit the first buffer, to avoid a degenerate case:
+ // if all BQ buffers arrive before we start executing, and we only submit
+ // one here, the other BQ buffers will just sit until we get notified
+ // that the codec buffer has been released. We'd then acquire and
+ // submit a single additional buffer, repeatedly, never using more than
+ // one codec buffer simultaneously. (We could instead try to submit
+ // all BQ buffers whenever any codec buffer is freed, but if we get the
+ // initial conditions right that will never be useful.)
+ while (haveAvailableBuffers_l()) {
+ if (!fillCodecBuffer_l()) {
+ ALOGV("stop load with available=%zu+%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
+ break;
+ }
+ }
+
+ ALOGV("done loading initial frames, available=%zu+%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
+
+ // If EOS has already been signaled, and there are no more frames to
+ // submit, try to send EOS now as well.
+ if (mStopTimeUs == -1 && mEndOfStream && !haveAvailableBuffers_l()) {
+ submitEndOfInputStream_l();
+ }
+
+ if (mFrameRepeatIntervalUs > 0ll && mLooper == NULL) {
+ mReflector = new AHandlerReflector<GraphicBufferSource>(this);
+
+ mLooper = new ALooper;
+ mLooper->registerHandler(mReflector);
+ mLooper->start();
+
+ if (mLatestBuffer.mBuffer != nullptr) {
+ queueFrameRepeat_l();
+ }
+ }
+
+ return Status::ok();
+}
+
+Status GraphicBufferSource::stop() {
+ ALOGV("stop");
+
+ Mutex::Autolock autoLock(mMutex);
+
+ if (mExecuting) {
+ // We are only interested in the transition from executing->idle,
+ // not loaded->idle.
+ mExecuting = false;
+ }
+ return Status::ok();
+}
+
+Status GraphicBufferSource::release(){
+ sp<ALooper> looper;
+ {
+ Mutex::Autolock autoLock(mMutex);
+ looper = mLooper;
+ if (mLooper != NULL) {
+ mLooper->unregisterHandler(mReflector->id());
+ mReflector.clear();
+
+ mLooper.clear();
+ }
+
+ ALOGV("--> release; available=%zu+%d eos=%d eosSent=%d acquired=%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers,
+ mEndOfStream, mEndOfStreamSent, mNumOutstandingAcquires);
+
+ // Codec is no longer executing. Releasing all buffers to bq.
+ mFreeCodecBuffers.clear();
+ mSubmittedCodecBuffers.clear();
+ mLatestBuffer.mBuffer.reset();
+ mComponent.clear();
+ mExecuting = false;
+ }
+ if (looper != NULL) {
+ looper->stop();
+ }
+ return Status::ok();
+}
+
+Status GraphicBufferSource::onInputBufferAdded(codec_buffer_id bufferId) {
+ Mutex::Autolock autoLock(mMutex);
+
+ if (mExecuting) {
+ // This should never happen -- buffers can only be allocated when
+ // transitioning from "loaded" to "idle".
+ ALOGE("addCodecBuffer: buffer added while executing");
+ return Status::fromServiceSpecificError(INVALID_OPERATION);
+ }
+
+ ALOGV("addCodecBuffer: bufferId=%u", bufferId);
+
+ mFreeCodecBuffers.push_back(bufferId);
+ return Status::ok();
+}
+
+Status GraphicBufferSource::onInputBufferEmptied(codec_buffer_id bufferId, int fenceFd) {
+ Mutex::Autolock autoLock(mMutex);
+ FileDescriptor::Autoclose fence(fenceFd);
+
+ ssize_t cbi = mSubmittedCodecBuffers.indexOfKey(bufferId);
+ if (cbi < 0) {
+ // This should never happen.
+ ALOGE("onInputBufferEmptied: buffer not recognized (bufferId=%u)", bufferId);
+ return Status::fromServiceSpecificError(BAD_VALUE);
+ }
+
+ std::shared_ptr<AcquiredBuffer> buffer = mSubmittedCodecBuffers.valueAt(cbi);
+
+ // Move buffer to available buffers
+ mSubmittedCodecBuffers.removeItemsAt(cbi);
+ mFreeCodecBuffers.push_back(bufferId);
+
+ // header->nFilledLen may not be the original value, so we can't compare
+ // that to zero to see of this was the EOS buffer. Instead we just
+ // see if there is a null AcquiredBuffer, which should only ever happen for EOS.
+ if (buffer == nullptr) {
+ if (!(mEndOfStream && mEndOfStreamSent)) {
+ // This can happen when broken code sends us the same buffer twice in a row.
+ ALOGE("onInputBufferEmptied: non-EOS null buffer (bufferId=%u)", bufferId);
+ } else {
+ ALOGV("onInputBufferEmptied: EOS null buffer (bufferId=%u@%zd)", bufferId, cbi);
+ }
+ // No GraphicBuffer to deal with, no additional input or output is expected, so just return.
+ return Status::fromServiceSpecificError(BAD_VALUE);
+ }
+
+ if (!mExecuting) {
+ // this is fine since this could happen when going from Idle to Loaded
+ ALOGV("onInputBufferEmptied: no longer executing (bufferId=%u@%zd)", bufferId, cbi);
+ return Status::fromServiceSpecificError(OK);
+ }
+
+ ALOGV("onInputBufferEmptied: bufferId=%d@%zd [slot=%d, useCount=%ld, handle=%p] acquired=%d",
+ bufferId, cbi, buffer->getSlot(), buffer.use_count(), buffer->getGraphicBuffer()->handle,
+ mNumOutstandingAcquires);
+
+ buffer->addReleaseFenceFd(fence.release());
+ // release codec reference for video buffer just in case remove does not it
+ buffer.reset();
+
+ if (haveAvailableBuffers_l()) {
+ // Fill this codec buffer.
+ CHECK(!mEndOfStreamSent);
+ ALOGV("onInputBufferEmptied: buffer freed, feeding codec (available=%zu+%d, eos=%d)",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers, mEndOfStream);
+ fillCodecBuffer_l();
+ } else if (mEndOfStream && mStopTimeUs == -1) {
+ // No frames available, but EOS is pending and no stop time, so use this buffer to
+ // send that.
+ ALOGV("onInputBufferEmptied: buffer freed, submitting EOS");
+ submitEndOfInputStream_l();
+ } else if (mFrameRepeatBlockedOnCodecBuffer) {
+ bool success = repeatLatestBuffer_l();
+ ALOGV("onInputBufferEmptied: completing deferred repeatLatestBuffer_l %s",
+ success ? "SUCCESS" : "FAILURE");
+ mFrameRepeatBlockedOnCodecBuffer = false;
+ }
+
+ // releaseReleasableBuffers_l();
+ return Status::ok();
+}
+
+void GraphicBufferSource::onDataspaceChanged_l(
+ android_dataspace dataspace, android_pixel_format pixelFormat) {
+ ALOGD("got buffer with new dataSpace #%x", dataspace);
+ mLastDataspace = dataspace;
+
+ if (ColorUtils::convertDataSpaceToV0(dataspace)) {
+ mComponent->dispatchDataSpaceChanged(
+ mLastDataspace, mDefaultColorAspectsPacked, pixelFormat);
+ }
+}
+
+bool GraphicBufferSource::fillCodecBuffer_l() {
+ CHECK(mExecuting && haveAvailableBuffers_l());
+
+ if (mFreeCodecBuffers.empty()) {
+ // No buffers available, bail.
+ ALOGV("fillCodecBuffer_l: no codec buffers, available=%zu+%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
+ return false;
+ }
+
+ VideoBuffer item;
+ if (mAvailableBuffers.empty()) {
+ ALOGV("fillCodecBuffer_l: acquiring available buffer, available=%zu+%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
+ if (acquireBuffer_l(&item) != OK) {
+ ALOGE("fillCodecBuffer_l: failed to acquire available buffer");
+ return false;
+ }
+ } else {
+ ALOGV("fillCodecBuffer_l: getting available buffer, available=%zu+%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
+ item = *mAvailableBuffers.begin();
+ mAvailableBuffers.erase(mAvailableBuffers.begin());
+ }
+
+ int64_t itemTimeUs = item.mTimestampNs / 1000;
+
+ // Process ActionItem in the Queue if there is any. If a buffer's timestamp
+ // is smaller than the first action's timestamp, no action need to be performed.
+ // If buffer's timestamp is larger or equal than the last action's timestamp,
+ // only the last action needs to be performed as all the acitions before the
+ // the action are overridden by the last action. For the other cases, traverse
+ // the Queue to find the newest action that with timestamp smaller or equal to
+ // the buffer's timestamp. For example, an action queue like
+ // [pause 1us], [resume 2us], [pause 3us], [resume 4us], [pause 5us].... Upon
+ // receiving a buffer with timestamp 3.5us, only the action [pause, 3us] needs
+ // to be handled and [pause, 1us], [resume 2us] will be discarded.
+ bool done = false;
+ bool seeStopAction = false;
+ if (!mActionQueue.empty()) {
+ // First scan to check if bufferTimestamp is smaller than first action's timestamp.
+ ActionItem nextAction = *(mActionQueue.begin());
+ if (itemTimeUs < nextAction.mActionTimeUs) {
+ ALOGV("No action. buffer timestamp %lld us < action timestamp: %lld us",
+ (long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
+ // All the actions are ahead. No action need to perform now.
+ // Release the buffer if is in suspended state, or process the buffer
+ // if not in suspended state.
+ done = true;
+ }
+
+ if (!done) {
+ // Find the newest action that with timestamp smaller than itemTimeUs. Then
+ // remove all the actions before and include the newest action.
+ List<ActionItem>::iterator it = mActionQueue.begin();
+ while (it != mActionQueue.end() && it->mActionTimeUs <= itemTimeUs
+ && nextAction.mAction != ActionItem::STOP) {
+ nextAction = *it;
+ ++it;
+ }
+ mActionQueue.erase(mActionQueue.begin(), it);
+
+ CHECK(itemTimeUs >= nextAction.mActionTimeUs);
+ switch (nextAction.mAction) {
+ case ActionItem::PAUSE:
+ {
+ mSuspended = true;
+ ALOGV("RUNNING/PAUSE -> PAUSE at buffer %lld us PAUSE Time: %lld us",
+ (long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
+ break;
+ }
+ case ActionItem::RESUME:
+ {
+ mSuspended = false;
+ ALOGV("PAUSE/RUNNING -> RUNNING at buffer %lld us RESUME Time: %lld us",
+ (long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
+ break;
+ }
+ case ActionItem::STOP:
+ {
+ ALOGV("RUNNING/PAUSE -> STOP at buffer %lld us STOP Time: %lld us",
+ (long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
+ // Clear the whole ActionQueue as recording is done
+ mActionQueue.clear();
+ seeStopAction = true;
+ break;
+ }
+ default:
+ TRESPASS_DBG("Unknown action type");
+ // return true here because we did consume an available buffer, so the
+ // loop in start will eventually terminate even if we hit this.
+ return false;
+ }
+ }
+ }
+
+ if (seeStopAction) {
+ // Clear all the buffers before setting mEndOfStream and signal EndOfInputStream.
+ releaseAllAvailableBuffers_l();
+ mEndOfStream = true;
+ submitEndOfInputStream_l();
+ return true;
+ }
+
+ if (mSuspended) {
+ return true;
+ }
+
+ int err = UNKNOWN_ERROR;
+
+ // only submit sample if start time is unspecified, or sample
+ // is queued after the specified start time
+ if (mSkipFramesBeforeNs < 0ll || item.mTimestampNs >= mSkipFramesBeforeNs) {
+ // if start time is set, offset time stamp by start time
+ if (mSkipFramesBeforeNs > 0) {
+ item.mTimestampNs -= mSkipFramesBeforeNs;
+ }
+
+ int64_t timeUs = item.mTimestampNs / 1000;
+ if (mFrameDropper != NULL && mFrameDropper->shouldDrop(timeUs)) {
+ ALOGV("skipping frame (%lld) to meet max framerate", static_cast<long long>(timeUs));
+ // set err to OK so that the skipped frame can still be saved as the lastest frame
+ err = OK;
+ } else {
+ err = submitBuffer_l(item); // this takes shared ownership of the acquired buffer on succeess
+ }
+ }
+
+ if (err != OK) {
+ ALOGV("submitBuffer_l failed, will release bq slot %d", item.mBuffer->getSlot());
+ return true;
+ } else {
+ // Don't set the last buffer id if we're not repeating,
+ // we'll be holding on to the last buffer for nothing.
+ if (mFrameRepeatIntervalUs > 0ll) {
+ setLatestBuffer_l(item);
+ }
+ ALOGV("buffer submitted [slot=%d, useCount=%ld] acquired=%d",
+ item.mBuffer->getSlot(), item.mBuffer.use_count(), mNumOutstandingAcquires);
+ mLastFrameTimestampUs = itemTimeUs;
+ }
+
+ return true;
+}
+
+bool GraphicBufferSource::repeatLatestBuffer_l() {
+ CHECK(mExecuting && !haveAvailableBuffers_l());
+
+ if (mLatestBuffer.mBuffer == nullptr || mSuspended) {
+ return false;
+ }
+
+ if (mFreeCodecBuffers.empty()) {
+ // No buffers available, bail.
+ ALOGV("repeatLatestBuffer_l: no codec buffers.");
+ return false;
+ }
+
+ if (!mLatestBuffer.mBuffer->isCached()) {
+ ALOGV("repeatLatestBuffer_l: slot was discarded, but repeating our own reference");
+ }
+
+ // it is ok to update the timestamp of latest buffer as it is only used for submission
+ status_t err = submitBuffer_l(mLatestBuffer);
+ if (err != OK) {
+ return false;
+ }
+
+ /* repeat last frame up to kRepeatLastFrameCount times.
+ * in case of static scene, a single repeat might not get rid of encoder
+ * ghosting completely, refresh a couple more times to get better quality
+ */
+ if (--mOutstandingFrameRepeatCount > 0) {
+ // set up timestamp for repeat frame
+ mLatestBuffer.mTimestampNs += mFrameRepeatIntervalUs * 1000;
+ queueFrameRepeat_l();
+ }
+
+ return true;
+}
+
+void GraphicBufferSource::setLatestBuffer_l(const VideoBuffer &item) {
+ mLatestBuffer = item;
+
+ ALOGV("setLatestBuffer_l: [slot=%d, useCount=%ld]",
+ mLatestBuffer.mBuffer->getSlot(), mLatestBuffer.mBuffer.use_count());
+
+ mOutstandingFrameRepeatCount = kRepeatLastFrameCount;
+ // set up timestamp for repeat frame
+ mLatestBuffer.mTimestampNs += mFrameRepeatIntervalUs * 1000;
+ queueFrameRepeat_l();
+}
+
+void GraphicBufferSource::queueFrameRepeat_l() {
+ mFrameRepeatBlockedOnCodecBuffer = false;
+
+ if (mReflector != NULL) {
+ sp<AMessage> msg = new AMessage(kWhatRepeatLastFrame, mReflector);
+ msg->setInt32("generation", ++mRepeatLastFrameGeneration);
+ msg->post(mFrameRepeatIntervalUs);
+ }
+}
+
+bool GraphicBufferSource::calculateCodecTimestamp_l(
+ nsecs_t bufferTimeNs, int64_t *codecTimeUs) {
+ int64_t timeUs = bufferTimeNs / 1000;
+ timeUs += mInputBufferTimeOffsetUs;
+
+ if (mCaptureFps > 0.
+ && (mFps > 2 * mCaptureFps
+ || mCaptureFps > 2 * mFps)) {
+ // Time lapse or slow motion mode
+ if (mPrevCaptureUs < 0ll) {
+ // first capture
+ mPrevCaptureUs = mBaseCaptureUs = timeUs;
+ // adjust the first sample timestamp.
+ mPrevFrameUs = mBaseFrameUs =
+ std::llround((timeUs * mCaptureFps) / mFps);
+ mFrameCount = 0;
+ } else {
+ // snap to nearest capture point
+ double nFrames = (timeUs - mPrevCaptureUs) * mCaptureFps / 1000000;
+ if (nFrames < 0.5 - kTimestampFluctuation) {
+ // skip this frame as it's too close to previous capture
+ ALOGV("skipping frame, timeUs %lld", static_cast<long long>(timeUs));
+ return false;
+ }
+ if (nFrames <= 1.0) {
+ nFrames = 1.0;
+ }
+ mFrameCount += std::llround(nFrames);
+ mPrevCaptureUs = mBaseCaptureUs + std::llround(
+ mFrameCount * 1000000 / mCaptureFps);
+ mPrevFrameUs = mBaseFrameUs + std::llround(
+ mFrameCount * 1000000 / mFps);
+ }
+
+ ALOGV("timeUs %lld, captureUs %lld, frameUs %lld",
+ static_cast<long long>(timeUs),
+ static_cast<long long>(mPrevCaptureUs),
+ static_cast<long long>(mPrevFrameUs));
+ } else {
+ if (timeUs <= mPrevFrameUs) {
+ if (mFrameDropper != NULL && mFrameDropper->disabled()) {
+ // Warn only, client has disabled frame drop logic possibly for image
+ // encoding cases where camera's ZSL mode could send out of order frames.
+ ALOGW("Received frame that's going backward in time");
+ } else {
+ // Drop the frame if it's going backward in time. Bad timestamp
+ // could disrupt encoder's rate control completely.
+ ALOGW("Dropping frame that's going backward in time");
+ return false;
+ }
+ }
+
+ mPrevFrameUs = timeUs;
+ }
+
+ *codecTimeUs = mPrevFrameUs;
+ return true;
+}
+
+status_t GraphicBufferSource::submitBuffer_l(const VideoBuffer &item) {
+ CHECK(!mFreeCodecBuffers.empty());
+ uint32_t codecBufferId = *mFreeCodecBuffers.begin();
+
+ ALOGV("submitBuffer_l [slot=%d, bufferId=%d]", item.mBuffer->getSlot(), codecBufferId);
+
+ int64_t codecTimeUs;
+ if (!calculateCodecTimestamp_l(item.mTimestampNs, &codecTimeUs)) {
+ return UNKNOWN_ERROR;
+ }
+
+ if ((android_dataspace)item.mDataspace != mLastDataspace) {
+ onDataspaceChanged_l(
+ item.mDataspace,
+ (android_pixel_format)item.mBuffer->getGraphicBuffer()->format);
+ }
+
+ std::shared_ptr<AcquiredBuffer> buffer = item.mBuffer;
+ // use a GraphicBuffer for now as component is using GraphicBuffers to hold references
+ // and it requires this graphic buffer to be able to hold its reference
+ // and thus we would need to create a new GraphicBuffer from an ANWBuffer separate from the
+ // acquired GraphicBuffer.
+ // TODO: this can be reworked globally to use ANWBuffer references
+ sp<GraphicBuffer> graphicBuffer = buffer->getGraphicBuffer();
+ status_t err = mComponent->submitBuffer(
+ codecBufferId, graphicBuffer, codecTimeUs, buffer->getAcquireFenceFd());
+
+ if (err != OK) {
+ ALOGW("WARNING: emptyGraphicBuffer failed: 0x%x", err);
+ return err;
+ }
+
+ mFreeCodecBuffers.erase(mFreeCodecBuffers.begin());
+
+ ssize_t cbix = mSubmittedCodecBuffers.add(codecBufferId, buffer);
+ ALOGV("emptyGraphicBuffer succeeded, bufferId=%u@%zd bufhandle=%p",
+ codecBufferId, cbix, graphicBuffer->handle);
+ return OK;
+}
+
+void GraphicBufferSource::submitEndOfInputStream_l() {
+ CHECK(mEndOfStream);
+ if (mEndOfStreamSent) {
+ ALOGV("EOS already sent");
+ return;
+ }
+
+ if (mFreeCodecBuffers.empty()) {
+ ALOGV("submitEndOfInputStream_l: no codec buffers available");
+ return;
+ }
+ uint32_t codecBufferId = *mFreeCodecBuffers.begin();
+
+ // We reject any additional incoming graphic buffers. There is no acquired buffer used for EOS
+ status_t err = mComponent->submitEos(codecBufferId);
+ if (err != OK) {
+ ALOGW("emptyDirectBuffer EOS failed: 0x%x", err);
+ } else {
+ mFreeCodecBuffers.erase(mFreeCodecBuffers.begin());
+ ssize_t cbix = mSubmittedCodecBuffers.add(codecBufferId, nullptr);
+ ALOGV("submitEndOfInputStream_l: buffer submitted, bufferId=%u@%zd", codecBufferId, cbix);
+ mEndOfStreamSent = true;
+
+ // no need to hold onto any buffers for frame repeating
+ ++mRepeatLastFrameGeneration;
+ mLatestBuffer.mBuffer.reset();
+ }
+}
+
+status_t GraphicBufferSource::acquireBuffer_l(VideoBuffer *ab) {
+ BufferItem bi;
+ status_t err = mConsumer->acquireBuffer(&bi, 0);
+ if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
+ // shouldn't happen
+ ALOGW("acquireBuffer_l: frame was not available");
+ return err;
+ } else if (err != OK) {
+ ALOGW("acquireBuffer_l: failed with err=%d", err);
+ return err;
+ }
+ --mNumAvailableUnacquiredBuffers;
+
+ // Manage our buffer cache.
+ std::shared_ptr<CachedBuffer> buffer;
+ ssize_t bsi = mBufferSlots.indexOfKey(bi.mSlot);
+ if (bi.mGraphicBuffer != NULL) {
+ // replace/initialize slot with new buffer
+ ALOGV("acquireBuffer_l: %s buffer slot %d", bsi < 0 ? "setting" : "UPDATING", bi.mSlot);
+ if (bsi >= 0) {
+ discardBufferAtSlotIndex_l(bsi);
+ } else {
+ bsi = mBufferSlots.add(bi.mSlot, nullptr);
+ }
+ buffer = std::make_shared<CachedBuffer>(bi.mSlot, bi.mGraphicBuffer);
+ mBufferSlots.replaceValueAt(bsi, buffer);
+ } else {
+ buffer = mBufferSlots.valueAt(bsi);
+ }
+ int64_t frameNum = bi.mFrameNumber;
+
+ std::shared_ptr<AcquiredBuffer> acquiredBuffer =
+ std::make_shared<AcquiredBuffer>(
+ buffer,
+ [frameNum, this](AcquiredBuffer *buffer){
+ // AcquiredBuffer's destructor should always be called when mMutex is locked.
+ // If we had a reentrant mutex, we could just lock it again to ensure this.
+ if (mMutex.tryLock() == 0) {
+ TRESPASS_DBG();
+ mMutex.unlock();
+ }
+
+ // we can release buffers immediately if not using adapters
+ // alternately, we could add them to mSlotsToRelease, but we would
+ // somehow need to propagate frame number to that queue
+ if (buffer->isCached()) {
+ --mNumOutstandingAcquires;
+ mConsumer->releaseBuffer(
+ buffer->getSlot(), frameNum, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR,
+ buffer->getReleaseFence());
+ }
+ },
+ bi.mFence);
+ VideoBuffer videoBuffer{acquiredBuffer, bi.mTimestamp, bi.mDataSpace};
+ *ab = videoBuffer;
+ ++mNumOutstandingAcquires;
+ return OK;
+}
+
+// BufferQueue::ConsumerListener callback
+void GraphicBufferSource::onFrameAvailable(const BufferItem& item __unused) {
+ Mutex::Autolock autoLock(mMutex);
+
+ ALOGV("onFrameAvailable: executing=%d available=%zu+%d",
+ mExecuting, mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
+ ++mNumAvailableUnacquiredBuffers;
+
+ // For BufferQueue we cannot acquire a buffer if we cannot immediately feed it to the codec
+ // UNLESS we are discarding this buffer (acquiring and immediately releasing it), which makes
+ // this an ugly logic.
+ // NOTE: We could also rely on our debug counter but that is meant only as a debug counter.
+ if (!areWeDiscardingAvailableBuffers_l() && mFreeCodecBuffers.empty()) {
+ // we may not be allowed to acquire a possibly encodable buffer, so just note that
+ // it is available
+ ALOGV("onFrameAvailable: cannot acquire buffer right now, do it later");
+
+ ++mRepeatLastFrameGeneration; // cancel any pending frame repeat
+ return;
+ }
+
+ VideoBuffer buffer;
+ status_t err = acquireBuffer_l(&buffer);
+ if (err != OK) {
+ ALOGE("onFrameAvailable: acquireBuffer returned err=%d", err);
+ } else {
+ onBufferAcquired_l(buffer);
+ }
+}
+
+bool GraphicBufferSource::areWeDiscardingAvailableBuffers_l() {
+ return mEndOfStreamSent // already sent EOS to codec
+ || mComponent == nullptr // there is no codec connected
+ || (mSuspended && mActionQueue.empty()) // we are suspended and not waiting for
+ // any further action
+ || !mExecuting;
+}
+
+void GraphicBufferSource::onBufferAcquired_l(const VideoBuffer &buffer) {
+ if (mEndOfStreamSent) {
+ // This should only be possible if a new buffer was queued after
+ // EOS was signaled, i.e. the app is misbehaving.
+ ALOGW("onFrameAvailable: EOS is sent, ignoring frame");
+ } else if (mComponent == NULL || (mSuspended && mActionQueue.empty())) {
+ // FIXME: if we are suspended but have a resume queued we will stop repeating the last
+ // frame. Is that the desired behavior?
+ ALOGV("onFrameAvailable: suspended, ignoring frame");
+ } else {
+ ++mRepeatLastFrameGeneration; // cancel any pending frame repeat
+ mAvailableBuffers.push_back(buffer);
+ if (mExecuting) {
+ fillCodecBuffer_l();
+ }
+ }
+}
+
+// BufferQueue::ConsumerListener callback
+void GraphicBufferSource::onBuffersReleased() {
+ Mutex::Autolock lock(mMutex);
+
+ uint64_t slotMask;
+ uint64_t releaseMask;
+ if (mConsumer->getReleasedBuffers(&releaseMask) != NO_ERROR) {
+ slotMask = 0xffffffffffffffffULL;
+ ALOGW("onBuffersReleased: unable to get released buffer set");
+ } else {
+ slotMask = releaseMask;
+ ALOGV("onBuffersReleased: 0x%016" PRIx64, slotMask);
+ }
+
+ AString unpopulated;
+ for (int i = 0; i < BufferQueue::NUM_BUFFER_SLOTS; i++) {
+ if ((slotMask & 0x01) != 0) {
+ if (!discardBufferInSlot_l(i)) {
+ if (!unpopulated.empty()) {
+ unpopulated.append(", ");
+ }
+ unpopulated.append(i);
+ }
+ }
+ slotMask >>= 1;
+ }
+ if (!unpopulated.empty()) {
+ ALOGW("released unpopulated slots: [%s]", unpopulated.c_str());
+ }
+}
+
+bool GraphicBufferSource::discardBufferInSlot_l(GraphicBufferSource::slot_id i) {
+ ssize_t bsi = mBufferSlots.indexOfKey(i);
+ if (bsi < 0) {
+ return false;
+ } else {
+ discardBufferAtSlotIndex_l(bsi);
+ mBufferSlots.removeItemsAt(bsi);
+ return true;
+ }
+}
+
+void GraphicBufferSource::discardBufferAtSlotIndex_l(ssize_t bsi) {
+ const std::shared_ptr<CachedBuffer>& buffer = mBufferSlots.valueAt(bsi);
+ // use -2 if there is no latest buffer, and -1 if it is no longer cached
+ slot_id latestBufferSlot =
+ mLatestBuffer.mBuffer == nullptr ? -2 : mLatestBuffer.mBuffer->getSlot();
+ ALOGV("releasing acquired buffer: [slot=%d, useCount=%ld], latest: [slot=%d]",
+ mBufferSlots.keyAt(bsi), buffer.use_count(), latestBufferSlot);
+ mBufferSlots.valueAt(bsi)->onDroppedFromCache();
+
+ // If the slot of an acquired buffer is discarded, that buffer will not have to be
+ // released to the producer, so account it here. However, it is possible that the
+ // acquired buffer has already been discarded so check if it still is.
+ if (buffer->isAcquired()) {
+ --mNumOutstandingAcquires;
+ }
+
+ // clear the buffer reference (not technically needed as caller either replaces or deletes
+ // it; done here for safety).
+ mBufferSlots.editValueAt(bsi).reset();
+ CHECK_DBG(buffer == nullptr);
+}
+
+void GraphicBufferSource::releaseAllAvailableBuffers_l() {
+ mAvailableBuffers.clear();
+ while (mNumAvailableUnacquiredBuffers > 0) {
+ VideoBuffer item;
+ if (acquireBuffer_l(&item) != OK) {
+ ALOGW("releaseAllAvailableBuffers: failed to acquire available unacquired buffer");
+ break;
+ }
+ }
+}
+
+// BufferQueue::ConsumerListener callback
+void GraphicBufferSource::onSidebandStreamChanged() {
+ ALOG_ASSERT(false, "GraphicBufferSource can't consume sideband streams");
+}
+
+status_t GraphicBufferSource::configure(
+ const sp<ComponentWrapper>& component,
+ int32_t dataSpace,
+ int32_t bufferCount,
+ uint32_t frameWidth,
+ uint32_t frameHeight,
+ uint32_t consumerUsage) {
+ if (component == NULL) {
+ return BAD_VALUE;
+ }
+
+
+ // Call setMaxAcquiredBufferCount without lock.
+ // setMaxAcquiredBufferCount could call back to onBuffersReleased
+ // if the buffer count change results in releasing of existing buffers,
+ // which would lead to deadlock.
+ status_t err = mConsumer->setMaxAcquiredBufferCount(bufferCount);
+ if (err != NO_ERROR) {
+ ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
+ bufferCount, err);
+ return err;
+ }
+
+ {
+ Mutex::Autolock autoLock(mMutex);
+ mComponent = component;
+
+ err = mConsumer->setDefaultBufferSize(frameWidth, frameHeight);
+ if (err != NO_ERROR) {
+ ALOGE("Unable to set BQ default buffer size to %ux%u: %d",
+ frameWidth, frameHeight, err);
+ return err;
+ }
+
+ consumerUsage |= GRALLOC_USAGE_HW_VIDEO_ENCODER;
+ mConsumer->setConsumerUsageBits(consumerUsage);
+
+ // Sets the default buffer data space
+ ALOGD("setting dataspace: %#x, acquired=%d", dataSpace, mNumOutstandingAcquires);
+ mConsumer->setDefaultBufferDataSpace((android_dataspace)dataSpace);
+ mLastDataspace = (android_dataspace)dataSpace;
+
+ mExecuting = false;
+ mSuspended = false;
+ mEndOfStream = false;
+ mEndOfStreamSent = false;
+ mSkipFramesBeforeNs = -1ll;
+ mFrameDropper.clear();
+ mFrameRepeatIntervalUs = -1ll;
+ mRepeatLastFrameGeneration = 0;
+ mOutstandingFrameRepeatCount = 0;
+ mLatestBuffer.mBuffer.reset();
+ mFrameRepeatBlockedOnCodecBuffer = false;
+ mFps = -1.0;
+ mCaptureFps = -1.0;
+ mBaseCaptureUs = -1ll;
+ mBaseFrameUs = -1ll;
+ mPrevCaptureUs = -1ll;
+ mPrevFrameUs = -1ll;
+ mFrameCount = 0;
+ mInputBufferTimeOffsetUs = 0;
+ mStopTimeUs = -1;
+ mActionQueue.clear();
+ }
+
+ return OK;
+}
+
+status_t GraphicBufferSource::setSuspend(bool suspend, int64_t suspendStartTimeUs) {
+ ALOGV("setSuspend=%d at time %lld us", suspend, (long long)suspendStartTimeUs);
+
+ Mutex::Autolock autoLock(mMutex);
+
+ if (mStopTimeUs != -1) {
+ ALOGE("setSuspend failed as STOP action is pending");
+ return INVALID_OPERATION;
+ }
+
+ // Push the action to the queue.
+ if (suspendStartTimeUs != -1) {
+ // suspendStartTimeUs must be smaller or equal to current systemTime.
+ int64_t currentSystemTimeUs = systemTime() / 1000;
+ if (suspendStartTimeUs > currentSystemTimeUs) {
+ ALOGE("setSuspend failed. %lld is larger than current system time %lld us",
+ (long long)suspendStartTimeUs, (long long)currentSystemTimeUs);
+ return INVALID_OPERATION;
+ }
+ if (mLastActionTimeUs != -1 && suspendStartTimeUs < mLastActionTimeUs) {
+ ALOGE("setSuspend failed. %lld is smaller than last action time %lld us",
+ (long long)suspendStartTimeUs, (long long)mLastActionTimeUs);
+ return INVALID_OPERATION;
+ }
+ mLastActionTimeUs = suspendStartTimeUs;
+ ActionItem action;
+ action.mAction = suspend ? ActionItem::PAUSE : ActionItem::RESUME;
+ action.mActionTimeUs = suspendStartTimeUs;
+ ALOGV("Push %s action into actionQueue", suspend ? "PAUSE" : "RESUME");
+ mActionQueue.push_back(action);
+ } else {
+ if (suspend) {
+ mSuspended = true;
+ releaseAllAvailableBuffers_l();
+ return OK;
+ } else {
+ mSuspended = false;
+ if (mExecuting && !haveAvailableBuffers_l()
+ && mFrameRepeatBlockedOnCodecBuffer) {
+ if (repeatLatestBuffer_l()) {
+ ALOGV("suspend/deferred repeatLatestBuffer_l SUCCESS");
+ mFrameRepeatBlockedOnCodecBuffer = false;
+ } else {
+ ALOGV("suspend/deferred repeatLatestBuffer_l FAILURE");
+ }
+ }
+ }
+ }
+ return OK;
+}
+
+status_t GraphicBufferSource::setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) {
+ ALOGV("setRepeatPreviousFrameDelayUs: delayUs=%lld", (long long)repeatAfterUs);
+
+ Mutex::Autolock autoLock(mMutex);
+
+ if (mExecuting || repeatAfterUs <= 0ll) {
+ return INVALID_OPERATION;
+ }
+
+ mFrameRepeatIntervalUs = repeatAfterUs;
+ return OK;
+}
+
+status_t GraphicBufferSource::setTimeOffsetUs(int64_t timeOffsetUs) {
+ Mutex::Autolock autoLock(mMutex);
+
+ // timeOffsetUs must be negative for adjustment.
+ if (timeOffsetUs >= 0ll) {
+ return INVALID_OPERATION;
+ }
+
+ mInputBufferTimeOffsetUs = timeOffsetUs;
+ return OK;
+}
+
+status_t GraphicBufferSource::setMaxFps(float maxFps) {
+ ALOGV("setMaxFps: maxFps=%lld", (long long)maxFps);
+
+ Mutex::Autolock autoLock(mMutex);
+
+ if (mExecuting) {
+ return INVALID_OPERATION;
+ }
+
+ mFrameDropper = new FrameDropper();
+ status_t err = mFrameDropper->setMaxFrameRate(maxFps);
+ if (err != OK) {
+ mFrameDropper.clear();
+ return err;
+ }
+
+ return OK;
+}
+
+status_t GraphicBufferSource::setStartTimeUs(int64_t skipFramesBeforeUs) {
+ ALOGV("setStartTimeUs: skipFramesBeforeUs=%lld", (long long)skipFramesBeforeUs);
+
+ Mutex::Autolock autoLock(mMutex);
+
+ mSkipFramesBeforeNs =
+ (skipFramesBeforeUs > 0 && skipFramesBeforeUs <= INT64_MAX / 1000) ?
+ (skipFramesBeforeUs * 1000) : -1ll;
+
+ return OK;
+}
+
+status_t GraphicBufferSource::setStopTimeUs(int64_t stopTimeUs) {
+ ALOGV("setStopTimeUs: %lld us", (long long)stopTimeUs);
+ Mutex::Autolock autoLock(mMutex);
+
+ if (mStopTimeUs != -1) {
+ // Ignore if stop time has already been set
+ return OK;
+ }
+
+ // stopTimeUs must be smaller or equal to current systemTime.
+ int64_t currentSystemTimeUs = systemTime() / 1000;
+ if (stopTimeUs > currentSystemTimeUs) {
+ ALOGE("setStopTimeUs failed. %lld is larger than current system time %lld us",
+ (long long)stopTimeUs, (long long)currentSystemTimeUs);
+ return INVALID_OPERATION;
+ }
+ if (mLastActionTimeUs != -1 && stopTimeUs < mLastActionTimeUs) {
+ ALOGE("setSuspend failed. %lld is smaller than last action time %lld us",
+ (long long)stopTimeUs, (long long)mLastActionTimeUs);
+ return INVALID_OPERATION;
+ }
+ mLastActionTimeUs = stopTimeUs;
+ ActionItem action;
+ action.mAction = ActionItem::STOP;
+ action.mActionTimeUs = stopTimeUs;
+ mActionQueue.push_back(action);
+ mStopTimeUs = stopTimeUs;
+ return OK;
+}
+
+status_t GraphicBufferSource::getStopTimeOffsetUs(int64_t *stopTimeOffsetUs) {
+ ALOGV("getStopTimeOffsetUs");
+ Mutex::Autolock autoLock(mMutex);
+ if (mStopTimeUs == -1) {
+ ALOGW("Fail to return stopTimeOffsetUs as stop time is not set");
+ return INVALID_OPERATION;
+ }
+ *stopTimeOffsetUs =
+ mLastFrameTimestampUs == -1 ? 0 : mStopTimeUs - mLastFrameTimestampUs;
+ return OK;
+}
+
+status_t GraphicBufferSource::setTimeLapseConfig(double fps, double captureFps) {
+ ALOGV("setTimeLapseConfig: fps=%lg, captureFps=%lg",
+ fps, captureFps);
+ Mutex::Autolock autoLock(mMutex);
+
+ if (mExecuting || !(fps > 0) || !(captureFps > 0)) {
+ return INVALID_OPERATION;
+ }
+
+ mFps = fps;
+ mCaptureFps = captureFps;
+
+ return OK;
+}
+
+status_t GraphicBufferSource::setColorAspects(int32_t aspectsPacked) {
+ Mutex::Autolock autoLock(mMutex);
+ mDefaultColorAspectsPacked = aspectsPacked;
+ ColorAspects colorAspects = ColorUtils::unpackToColorAspects(aspectsPacked);
+ ALOGD("requesting color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s))",
+ colorAspects.mRange, asString(colorAspects.mRange),
+ colorAspects.mPrimaries, asString(colorAspects.mPrimaries),
+ colorAspects.mMatrixCoeffs, asString(colorAspects.mMatrixCoeffs),
+ colorAspects.mTransfer, asString(colorAspects.mTransfer));
+
+ return OK;
+}
+
+status_t GraphicBufferSource::signalEndOfInputStream() {
+ Mutex::Autolock autoLock(mMutex);
+ ALOGV("signalEndOfInputStream: executing=%d available=%zu+%d eos=%d",
+ mExecuting, mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers, mEndOfStream);
+
+ if (mEndOfStream) {
+ ALOGE("EOS was already signaled");
+ return INVALID_OPERATION;
+ }
+
+ // Set the end-of-stream flag. If no frames are pending from the
+ // BufferQueue, and a codec buffer is available, and we're executing,
+ // and there is no stop timestamp, we initiate the EOS from here.
+ // Otherwise, we'll let codecBufferEmptied() (or start) do it.
+ //
+ // Note: if there are no pending frames and all codec buffers are
+ // available, we *must* submit the EOS from here or we'll just
+ // stall since no future events are expected.
+ mEndOfStream = true;
+
+ if (mStopTimeUs == -1 && mExecuting && !haveAvailableBuffers_l()) {
+ submitEndOfInputStream_l();
+ }
+
+ return OK;
+}
+
+void GraphicBufferSource::onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatRepeatLastFrame:
+ {
+ Mutex::Autolock autoLock(mMutex);
+
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+
+ if (generation != mRepeatLastFrameGeneration) {
+ // stale
+ break;
+ }
+
+ if (!mExecuting || haveAvailableBuffers_l()) {
+ break;
+ }
+
+ bool success = repeatLatestBuffer_l();
+ if (success) {
+ ALOGV("repeatLatestBuffer_l SUCCESS");
+ } else {
+ ALOGV("repeatLatestBuffer_l FAILURE");
+ mFrameRepeatBlockedOnCodecBuffer = true;
+ }
+ break;
+ }
+
+ default:
+ TRESPASS();
+ }
+}
+
+} // namespace android
diff --git a/media/libstagefright/bqhelper/WProducerListener.cpp b/media/libstagefright/bqhelper/WProducerListener.cpp
new file mode 100644
index 0000000..2ca13be
--- /dev/null
+++ b/media/libstagefright/bqhelper/WProducerListener.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/stagefright/bqhelper/WProducerListener.h>
+
+namespace android {
+
+// TWProducerListener
+TWProducerListener::TWProducerListener(
+ sp<BProducerListener> const& base):
+ mBase(base) {
+}
+
+Return<void> TWProducerListener::onBufferReleased() {
+ mBase->onBufferReleased();
+ return Void();
+}
+
+Return<bool> TWProducerListener::needsReleaseNotify() {
+ return mBase->needsReleaseNotify();
+}
+
+// LWProducerListener
+LWProducerListener::LWProducerListener(
+ sp<HProducerListener> const& base):
+ mBase(base) {
+}
+
+void LWProducerListener::onBufferReleased() {
+ mBase->onBufferReleased();
+}
+
+bool LWProducerListener::needsReleaseNotify() {
+ return static_cast<bool>(mBase->needsReleaseNotify());
+}
+
+} // namespace android
diff --git a/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/ComponentWrapper.h b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/ComponentWrapper.h
new file mode 100644
index 0000000..e27829b
--- /dev/null
+++ b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/ComponentWrapper.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef COMPONENT_WRAPPER_H_
+#define COMPONENT_WRAPPER_H_
+
+#include <utils/RefBase.h>
+#include <utils/StrongPointer.h>
+#include <ui/GraphicBuffer.h>
+
+#include <stdint.h>
+
+namespace android {
+
+struct ComponentWrapper : public RefBase {
+ virtual status_t submitBuffer(
+ int32_t bufferId, const sp<GraphicBuffer> &buffer = nullptr,
+ int64_t timestamp = 0, int fenceFd = -1) = 0;
+ virtual status_t submitEos(int32_t bufferId) = 0;
+ virtual void dispatchDataSpaceChanged(
+ int32_t dataSpace, int32_t aspects, int32_t pixelFormat) = 0;
+};
+
+} // namespace android
+
+#endif // COMPONENT_WRAPPER_H_
diff --git a/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/Conversion.h b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/Conversion.h
new file mode 100644
index 0000000..60d8c1e
--- /dev/null
+++ b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/Conversion.h
@@ -0,0 +1,769 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_STAGEFRIGHT_BQHELPER_CONVERSION_H_
+#define MEDIA_STAGEFRIGHT_BQHELPER_CONVERSION_H_
+
+#include <vector>
+#include <list>
+
+#include <unistd.h>
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+#include <hidlmemory/mapping.h>
+
+#include <binder/Binder.h>
+#include <binder/Status.h>
+#include <ui/FenceTime.h>
+#include <cutils/native_handle.h>
+#include <gui/IGraphicBufferProducer.h>
+
+#include <media/hardware/VideoAPI.h>
+
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <android/hardware/graphics/bufferqueue/1.0/IProducerListener.h>
+
+namespace android {
+namespace conversion {
+
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::hidl_handle;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+using ::android::status_t;
+
+using ::android::String8;
+
+using ::android::hardware::media::V1_0::Rect;
+using ::android::hardware::media::V1_0::Region;
+
+using ::android::hardware::graphics::common::V1_0::Dataspace;
+
+using ::android::hardware::graphics::common::V1_0::PixelFormat;
+
+using ::android::hardware::media::V1_0::AnwBuffer;
+using ::android::GraphicBuffer;
+
+typedef ::android::hardware::graphics::bufferqueue::V1_0::IGraphicBufferProducer
+ HGraphicBufferProducer;
+typedef ::android::IGraphicBufferProducer
+ BGraphicBufferProducer;
+
+// native_handle_t helper functions.
+
+/**
+ * \brief Take an fd and create a native handle containing only the given fd.
+ * The created handle will need to be deleted manually with
+ * `native_handle_delete()`.
+ *
+ * \param[in] fd The source file descriptor (of type `int`).
+ * \return The create `native_handle_t*` that contains the given \p fd. If the
+ * supplied \p fd is negative, the created native handle will contain no file
+ * descriptors.
+ *
+ * If the native handle cannot be created, the return value will be
+ * `nullptr`.
+ *
+ * This function does not duplicate the file descriptor.
+ */
+native_handle_t* native_handle_create_from_fd(int fd);
+
+/**
+ * \brief Extract a file descriptor from a native handle.
+ *
+ * \param[in] nh The source `native_handle_t*`.
+ * \param[in] index The index of the file descriptor in \p nh to read from. This
+ * input has the default value of `0`.
+ * \return The `index`-th file descriptor in \p nh. If \p nh does not have
+ * enough file descriptors, the returned value will be `-1`.
+ *
+ * This function does not duplicate the file descriptor.
+ */
+int native_handle_read_fd(native_handle_t const* nh, int index = 0);
+
+/**
+ * Conversion functions
+ * ====================
+ *
+ * There are two main directions of conversion:
+ * - `inTargetType(...)`: Create a wrapper whose lifetime depends on the
+ * input. The wrapper has type `TargetType`.
+ * - `toTargetType(...)`: Create a standalone object of type `TargetType` that
+ * corresponds to the input. The lifetime of the output does not depend on the
+ * lifetime of the input.
+ * - `wrapIn(TargetType*, ...)`: Same as `inTargetType()`, but for `TargetType`
+ * that cannot be copied and/or moved efficiently, or when there are multiple
+ * output arguments.
+ * - `convertTo(TargetType*, ...)`: Same as `toTargetType()`, but for
+ * `TargetType` that cannot be copied and/or moved efficiently, or when there
+ * are multiple output arguments.
+ *
+ * `wrapIn()` and `convertTo()` functions will take output arguments before
+ * input arguments. Some of these functions might return a value to indicate
+ * success or error.
+ *
+ * In converting or wrapping something as a Treble type that contains a
+ * `hidl_handle`, `native_handle_t*` will need to be created and returned as
+ * an additional output argument, hence only `wrapIn()` or `convertTo()` would
+ * be available. The caller must call `native_handle_delete()` to deallocate the
+ * returned native handle when it is no longer needed.
+ *
+ * For types that contain file descriptors, `inTargetType()` and `wrapAs()` do
+ * not perform duplication of file descriptors, while `toTargetType()` and
+ * `convertTo()` do.
+ */
+
+/**
+ * \brief Convert `Return<void>` to `binder::Status`.
+ *
+ * \param[in] t The source `Return<void>`.
+ * \return The corresponding `binder::Status`.
+ */
+// convert: Return<void> -> ::android::binder::Status
+::android::binder::Status toBinderStatus(Return<void> const& t);
+
+/**
+ * \brief Convert `Return<void>` to `status_t`. This is for legacy binder calls.
+ *
+ * \param[in] t The source `Return<void>`.
+ * \return The corresponding `status_t`.
+ */
+// convert: Return<void> -> status_t
+status_t toStatusT(Return<void> const& t);
+
+/**
+ * \brief Wrap `native_handle_t*` in `hidl_handle`.
+ *
+ * \param[in] nh The source `native_handle_t*`.
+ * \return The `hidl_handle` that points to \p nh.
+ */
+// wrap: native_handle_t* -> hidl_handle
+hidl_handle inHidlHandle(native_handle_t const* nh);
+
+/**
+ * \brief Convert `int32_t` to `Dataspace`.
+ *
+ * \param[in] l The source `int32_t`.
+ * \result The corresponding `Dataspace`.
+ */
+// convert: int32_t -> Dataspace
+Dataspace toHardwareDataspace(int32_t l);
+
+/**
+ * \brief Convert `Dataspace` to `int32_t`.
+ *
+ * \param[in] t The source `Dataspace`.
+ * \result The corresponding `int32_t`.
+ */
+// convert: Dataspace -> int32_t
+int32_t toRawDataspace(Dataspace const& t);
+
+/**
+ * \brief Wrap an opaque buffer inside a `hidl_vec<uint8_t>`.
+ *
+ * \param[in] l The pointer to the beginning of the opaque buffer.
+ * \param[in] size The size of the buffer.
+ * \return A `hidl_vec<uint8_t>` that points to the buffer.
+ */
+// wrap: void*, size_t -> hidl_vec<uint8_t>
+hidl_vec<uint8_t> inHidlBytes(void const* l, size_t size);
+
+/**
+ * \brief Create a `hidl_vec<uint8_t>` that is a copy of an opaque buffer.
+ *
+ * \param[in] l The pointer to the beginning of the opaque buffer.
+ * \param[in] size The size of the buffer.
+ * \return A `hidl_vec<uint8_t>` that is a copy of the input buffer.
+ */
+// convert: void*, size_t -> hidl_vec<uint8_t>
+hidl_vec<uint8_t> toHidlBytes(void const* l, size_t size);
+
+/**
+ * \brief Wrap `GraphicBuffer` in `AnwBuffer`.
+ *
+ * \param[out] t The wrapper of type `AnwBuffer`.
+ * \param[in] l The source `GraphicBuffer`.
+ */
+// wrap: GraphicBuffer -> AnwBuffer
+void wrapAs(AnwBuffer* t, GraphicBuffer const& l);
+
+/**
+ * \brief Convert `AnwBuffer` to `GraphicBuffer`.
+ *
+ * \param[out] l The destination `GraphicBuffer`.
+ * \param[in] t The source `AnwBuffer`.
+ *
+ * This function will duplicate all file descriptors in \p t.
+ */
+// convert: AnwBuffer -> GraphicBuffer
+// Ref: frameworks/native/libs/ui/GraphicBuffer.cpp: GraphicBuffer::flatten
+bool convertTo(GraphicBuffer* l, AnwBuffer const& t);
+
+/**
+ * Conversion functions for types outside media
+ * ============================================
+ *
+ * Some objects in libui and libgui that were made to go through binder calls do
+ * not expose ways to read or write their fields to the public. To pass an
+ * object of this kind through the HIDL boundary, translation functions need to
+ * work around the access restriction by using the publicly available
+ * `flatten()` and `unflatten()` functions.
+ *
+ * All `flatten()` and `unflatten()` overloads follow the same convention as
+ * follows:
+ *
+ * status_t flatten(ObjectType const& object,
+ * [OtherType const& other, ...]
+ * void*& buffer, size_t& size,
+ * int*& fds, size_t& numFds)
+ *
+ * status_t unflatten(ObjectType* object,
+ * [OtherType* other, ...,]
+ * void*& buffer, size_t& size,
+ * int*& fds, size_t& numFds)
+ *
+ * The number of `other` parameters varies depending on the `ObjectType`. For
+ * example, in the process of unflattening an object that contains
+ * `hidl_handle`, `other` is needed to hold `native_handle_t` objects that will
+ * be created.
+ *
+ * The last four parameters always work the same way in all overloads of
+ * `flatten()` and `unflatten()`:
+ * - For `flatten()`, `buffer` is the pointer to the non-fd buffer to be filled,
+ * `size` is the size (in bytes) of the non-fd buffer pointed to by `buffer`,
+ * `fds` is the pointer to the fd buffer to be filled, and `numFds` is the
+ * size (in ints) of the fd buffer pointed to by `fds`.
+ * - For `unflatten()`, `buffer` is the pointer to the non-fd buffer to be read
+ * from, `size` is the size (in bytes) of the non-fd buffer pointed to by
+ * `buffer`, `fds` is the pointer to the fd buffer to be read from, and
+ * `numFds` is the size (in ints) of the fd buffer pointed to by `fds`.
+ * - After a successful call to `flatten()` or `unflatten()`, `buffer` and `fds`
+ * will be advanced, while `size` and `numFds` will be decreased to reflect
+ * how much storage/data of the two buffers (fd and non-fd) have been used.
+ * - After an unsuccessful call, the values of `buffer`, `size`, `fds` and
+ * `numFds` are invalid.
+ *
+ * The return value of a successful `flatten()` or `unflatten()` call will be
+ * `OK` (also aliased as `NO_ERROR`). Any other values indicate a failure.
+ *
+ * For each object type that supports flattening, there will be two accompanying
+ * functions: `getFlattenedSize()` and `getFdCount()`. `getFlattenedSize()` will
+ * return the size of the non-fd buffer that the object will need for
+ * flattening. `getFdCount()` will return the size of the fd buffer that the
+ * object will need for flattening.
+ *
+ * The set of these four functions, `getFlattenedSize()`, `getFdCount()`,
+ * `flatten()` and `unflatten()`, are similar to functions of the same name in
+ * the abstract class `Flattenable`. The only difference is that functions in
+ * this file are not member functions of the object type. For example, we write
+ *
+ * flatten(x, buffer, size, fds, numFds)
+ *
+ * instead of
+ *
+ * x.flatten(buffer, size, fds, numFds)
+ *
+ * because we cannot modify the type of `x`.
+ *
+ * There is one exception to the naming convention: `hidl_handle` that
+ * represents a fence. The four functions for this "Fence" type have the word
+ * "Fence" attched to their names because the object type, which is
+ * `hidl_handle`, does not carry the special meaning that the object itself can
+ * only contain zero or one file descriptor.
+ */
+
+// Ref: frameworks/native/libs/ui/Fence.cpp
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten a fence.
+ *
+ * \param[in] fence The input fence of type `hidl_handle`.
+ * \return The required size of the flat buffer.
+ *
+ * The current version of this function always returns 4, which is the number of
+ * bytes required to store the number of file descriptors contained in the fd
+ * part of the flat buffer.
+ */
+size_t getFenceFlattenedSize(hidl_handle const& fence);
+
+/**
+ * \brief Return the number of file descriptors contained in a fence.
+ *
+ * \param[in] fence The input fence of type `hidl_handle`.
+ * \return `0` if \p fence does not contain a valid file descriptor, or `1`
+ * otherwise.
+ */
+size_t getFenceFdCount(hidl_handle const& fence);
+
+/**
+ * \brief Unflatten `Fence` to `hidl_handle`.
+ *
+ * \param[out] fence The destination `hidl_handle`.
+ * \param[out] nh The underlying native handle.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR`, \p nh will point to a newly created
+ * native handle, which needs to be deleted with `native_handle_delete()`
+ * afterwards.
+ */
+status_t unflattenFence(hidl_handle* fence, native_handle_t** nh,
+ void const*& buffer, size_t& size, int const*& fds, size_t& numFds);
+
+/**
+ * \brief Flatten `hidl_handle` as `Fence`.
+ *
+ * \param[in] t The source `hidl_handle`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ */
+status_t flattenFence(hidl_handle const& fence,
+ void*& buffer, size_t& size, int*& fds, size_t& numFds);
+
+/**
+ * \brief Wrap `Fence` in `hidl_handle`.
+ *
+ * \param[out] t The wrapper of type `hidl_handle`.
+ * \param[out] nh The native handle pointed to by \p t.
+ * \param[in] l The source `Fence`.
+ *
+ * On success, \p nh will hold a newly created native handle, which must be
+ * deleted manually with `native_handle_delete()` afterwards.
+ */
+// wrap: Fence -> hidl_handle
+bool wrapAs(hidl_handle* t, native_handle_t** nh, Fence const& l);
+
+/**
+ * \brief Convert `hidl_handle` to `Fence`.
+ *
+ * \param[out] l The destination `Fence`. `l` must not have been used
+ * (`l->isValid()` must return `false`) before this function is called.
+ * \param[in] t The source `hidl_handle`.
+ *
+ * If \p t contains a valid file descriptor, it will be duplicated.
+ */
+// convert: hidl_handle -> Fence
+bool convertTo(Fence* l, hidl_handle const& t);
+
+// Ref: frameworks/native/libs/ui/FenceTime.cpp: FenceTime::Snapshot
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten
+ * `FenceTimeSnapshot`.
+ *
+ * \param[in] t The input `FenceTimeSnapshot`.
+ * \return The required size of the flat buffer.
+ */
+size_t getFlattenedSize(HGraphicBufferProducer::FenceTimeSnapshot const& t);
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `FenceTimeSnapshot`.
+ *
+ * \param[in] t The input `FenceTimeSnapshot`.
+ * \return The number of file descriptors contained in \p snapshot.
+ */
+size_t getFdCount(HGraphicBufferProducer::FenceTimeSnapshot const& t);
+
+/**
+ * \brief Flatten `FenceTimeSnapshot`.
+ *
+ * \param[in] t The source `FenceTimeSnapshot`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate the file descriptor in `t.fence` if `t.state ==
+ * FENCE`.
+ */
+status_t flatten(HGraphicBufferProducer::FenceTimeSnapshot const& t,
+ void*& buffer, size_t& size, int*& fds, size_t& numFds);
+
+/**
+ * \brief Unflatten `FenceTimeSnapshot`.
+ *
+ * \param[out] t The destination `FenceTimeSnapshot`.
+ * \param[out] nh The underlying native handle.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR` and the constructed snapshot contains a
+ * file descriptor, \p nh will be created to hold that file descriptor. In this
+ * case, \p nh needs to be deleted with `native_handle_delete()` afterwards.
+ */
+status_t unflatten(
+ HGraphicBufferProducer::FenceTimeSnapshot* t, native_handle_t** nh,
+ void const*& buffer, size_t& size, int const*& fds, size_t& numFds);
+
+// Ref: frameworks/native/libs/gui/FrameTimestamps.cpp: FrameEventsDelta
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten
+ * `FrameEventsDelta`.
+ *
+ * \param[in] t The input `FrameEventsDelta`.
+ * \return The required size of the flat buffer.
+ */
+size_t getFlattenedSize(HGraphicBufferProducer::FrameEventsDelta const& t);
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `FrameEventsDelta`.
+ *
+ * \param[in] t The input `FrameEventsDelta`.
+ * \return The number of file descriptors contained in \p t.
+ */
+size_t getFdCount(HGraphicBufferProducer::FrameEventsDelta const& t);
+
+/**
+ * \brief Unflatten `FrameEventsDelta`.
+ *
+ * \param[out] t The destination `FrameEventsDelta`.
+ * \param[out] nh The underlying array of native handles.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR`, \p nh will have length 4, and it will be
+ * populated with `nullptr` or newly created handles. Each non-null slot in \p
+ * nh will need to be deleted manually with `native_handle_delete()`.
+ */
+status_t unflatten(HGraphicBufferProducer::FrameEventsDelta* t,
+ std::vector<native_handle_t*>* nh,
+ void const*& buffer, size_t& size, int const*& fds, size_t& numFds);
+
+/**
+ * \brief Flatten `FrameEventsDelta`.
+ *
+ * \param[in] t The source `FrameEventsDelta`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate file descriptors contained in \p t.
+ */
+// Ref: frameworks/native/libs/gui/FrameTimestamp.cpp:
+// FrameEventsDelta::flatten
+status_t flatten(HGraphicBufferProducer::FrameEventsDelta const& t,
+ void*& buffer, size_t& size, int*& fds, size_t numFds);
+
+// Ref: frameworks/native/libs/gui/FrameTimestamps.cpp: FrameEventHistoryDelta
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten
+ * `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ *
+ * \param[in] t The input `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ * \return The required size of the flat buffer.
+ */
+size_t getFlattenedSize(
+ HGraphicBufferProducer::FrameEventHistoryDelta const& t);
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ *
+ * \param[in] t The input `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ * \return The number of file descriptors contained in \p t.
+ */
+size_t getFdCount(
+ HGraphicBufferProducer::FrameEventHistoryDelta const& t);
+
+/**
+ * \brief Unflatten `FrameEventHistoryDelta`.
+ *
+ * \param[out] t The destination `FrameEventHistoryDelta`.
+ * \param[out] nh The underlying array of arrays of native handles.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR`, \p nh will be populated with `nullptr` or
+ * newly created handles. The second dimension of \p nh will be 4. Each non-null
+ * slot in \p nh will need to be deleted manually with `native_handle_delete()`.
+ */
+status_t unflatten(
+ HGraphicBufferProducer::FrameEventHistoryDelta* t,
+ std::vector<std::vector<native_handle_t*> >* nh,
+ void const*& buffer, size_t& size, int const*& fds, size_t& numFds);
+
+/**
+ * \brief Flatten `FrameEventHistoryDelta`.
+ *
+ * \param[in] t The source `FrameEventHistoryDelta`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate file descriptors contained in \p t.
+ */
+status_t flatten(
+ HGraphicBufferProducer::FrameEventHistoryDelta const& t,
+ void*& buffer, size_t& size, int*& fds, size_t& numFds);
+
+/**
+ * \brief Wrap `::android::FrameEventHistoryData` in
+ * `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ *
+ * \param[out] t The wrapper of type
+ * `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ * \param[out] nh The array of array of native handles that are referred to by
+ * members of \p t.
+ * \param[in] l The source `::android::FrameEventHistoryDelta`.
+ *
+ * On success, each member of \p nh will be either `nullptr` or a newly created
+ * native handle. All the non-`nullptr` elements must be deleted individually
+ * with `native_handle_delete()`.
+ */
+bool wrapAs(HGraphicBufferProducer::FrameEventHistoryDelta* t,
+ std::vector<std::vector<native_handle_t*> >* nh,
+ ::android::FrameEventHistoryDelta const& l);
+
+/**
+ * \brief Convert `HGraphicBufferProducer::FrameEventHistoryDelta` to
+ * `::android::FrameEventHistoryDelta`.
+ *
+ * \param[out] l The destination `::android::FrameEventHistoryDelta`.
+ * \param[in] t The source `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ *
+ * This function will duplicate all file descriptors contained in \p t.
+ */
+bool convertTo(
+ ::android::FrameEventHistoryDelta* l,
+ HGraphicBufferProducer::FrameEventHistoryDelta const& t);
+
+// Ref: frameworks/native/libs/ui/Region.cpp
+
+/**
+ * \brief Return the size of the buffer required to flatten `Region`.
+ *
+ * \param[in] t The input `Region`.
+ * \return The required size of the flat buffer.
+ */
+size_t getFlattenedSize(Region const& t);
+
+/**
+ * \brief Unflatten `Region`.
+ *
+ * \param[out] t The destination `Region`.
+ * \param[in,out] buffer The pointer to the flat buffer.
+ * \param[in,out] size The size of the flat buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ */
+status_t unflatten(Region* t, void const*& buffer, size_t& size);
+
+/**
+ * \brief Flatten `Region`.
+ *
+ * \param[in] t The source `Region`.
+ * \param[in,out] buffer The pointer to the flat buffer.
+ * \param[in,out] size The size of the flat buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ */
+status_t flatten(Region const& t, void*& buffer, size_t& size);
+
+/**
+ * \brief Convert `::android::Region` to `Region`.
+ *
+ * \param[out] t The destination `Region`.
+ * \param[in] l The source `::android::Region`.
+ */
+// convert: ::android::Region -> Region
+bool convertTo(Region* t, ::android::Region const& l);
+
+/**
+ * \brief Convert `Region` to `::android::Region`.
+ *
+ * \param[out] l The destination `::android::Region`.
+ * \param[in] t The source `Region`.
+ */
+// convert: Region -> ::android::Region
+bool convertTo(::android::Region* l, Region const& t);
+
+// Ref: frameworks/native/libs/gui/BGraphicBufferProducer.cpp:
+// BGraphicBufferProducer::QueueBufferInput
+
+/**
+ * \brief Return the size of the buffer required to flatten
+ * `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The input `HGraphicBufferProducer::QueueBufferInput`.
+ * \return The required size of the flat buffer.
+ */
+size_t getFlattenedSize(HGraphicBufferProducer::QueueBufferInput const& t);
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The input `HGraphicBufferProducer::QueueBufferInput`.
+ * \return The number of file descriptors contained in \p t.
+ */
+size_t getFdCount(
+ HGraphicBufferProducer::QueueBufferInput const& t);
+/**
+ * \brief Flatten `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The source `HGraphicBufferProducer::QueueBufferInput`.
+ * \param[out] nh The native handle cloned from `t.fence`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate the file descriptor in `t.fence`. */
+status_t flatten(HGraphicBufferProducer::QueueBufferInput const& t,
+ native_handle_t** nh,
+ void*& buffer, size_t& size, int*& fds, size_t& numFds);
+
+/**
+ * \brief Unflatten `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[out] t The destination `HGraphicBufferProducer::QueueBufferInput`.
+ * \param[out] nh The underlying native handle for `t->fence`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR` and `t->fence` contains a valid file
+ * descriptor, \p nh will be a newly created native handle holding that file
+ * descriptor. \p nh needs to be deleted with `native_handle_delete()`
+ * afterwards.
+ */
+status_t unflatten(
+ HGraphicBufferProducer::QueueBufferInput* t, native_handle_t** nh,
+ void const*& buffer, size_t& size, int const*& fds, size_t& numFds);
+
+/**
+ * \brief Wrap `BGraphicBufferProducer::QueueBufferInput` in
+ * `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[out] t The wrapper of type
+ * `HGraphicBufferProducer::QueueBufferInput`.
+ * \param[out] nh The underlying native handle for `t->fence`.
+ * \param[in] l The source `BGraphicBufferProducer::QueueBufferInput`.
+ *
+ * If the return value is `true` and `t->fence` contains a valid file
+ * descriptor, \p nh will be a newly created native handle holding that file
+ * descriptor. \p nh needs to be deleted with `native_handle_delete()`
+ * afterwards.
+ */
+bool wrapAs(
+ HGraphicBufferProducer::QueueBufferInput* t,
+ native_handle_t** nh,
+ BGraphicBufferProducer::QueueBufferInput const& l);
+
+/**
+ * \brief Convert `HGraphicBufferProducer::QueueBufferInput` to
+ * `BGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[out] l The destination `BGraphicBufferProducer::QueueBufferInput`.
+ * \param[in] t The source `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * If `t.fence` has a valid file descriptor, it will be duplicated.
+ */
+bool convertTo(
+ BGraphicBufferProducer::QueueBufferInput* l,
+ HGraphicBufferProducer::QueueBufferInput const& t);
+
+// Ref: frameworks/native/libs/gui/BGraphicBufferProducer.cpp:
+// BGraphicBufferProducer::QueueBufferOutput
+
+/**
+ * \brief Wrap `BGraphicBufferProducer::QueueBufferOutput` in
+ * `HGraphicBufferProducer::QueueBufferOutput`.
+ *
+ * \param[out] t The wrapper of type
+ * `HGraphicBufferProducer::QueueBufferOutput`.
+ * \param[out] nh The array of array of native handles that are referred to by
+ * members of \p t.
+ * \param[in] l The source `BGraphicBufferProducer::QueueBufferOutput`.
+ *
+ * On success, each member of \p nh will be either `nullptr` or a newly created
+ * native handle. All the non-`nullptr` elements must be deleted individually
+ * with `native_handle_delete()`.
+ */
+// wrap: BGraphicBufferProducer::QueueBufferOutput ->
+// HGraphicBufferProducer::QueueBufferOutput
+bool wrapAs(HGraphicBufferProducer::QueueBufferOutput* t,
+ std::vector<std::vector<native_handle_t*> >* nh,
+ BGraphicBufferProducer::QueueBufferOutput const& l);
+
+/**
+ * \brief Convert `HGraphicBufferProducer::QueueBufferOutput` to
+ * `BGraphicBufferProducer::QueueBufferOutput`.
+ *
+ * \param[out] l The destination `BGraphicBufferProducer::QueueBufferOutput`.
+ * \param[in] t The source `HGraphicBufferProducer::QueueBufferOutput`.
+ *
+ * This function will duplicate all file descriptors contained in \p t.
+ */
+// convert: HGraphicBufferProducer::QueueBufferOutput ->
+// BGraphicBufferProducer::QueueBufferOutput
+bool convertTo(
+ BGraphicBufferProducer::QueueBufferOutput* l,
+ HGraphicBufferProducer::QueueBufferOutput const& t);
+
+/**
+ * \brief Convert `BGraphicBufferProducer::DisconnectMode` to
+ * `HGraphicBufferProducer::DisconnectMode`.
+ *
+ * \param[in] l The source `BGraphicBufferProducer::DisconnectMode`.
+ * \return The corresponding `HGraphicBufferProducer::DisconnectMode`.
+ */
+HGraphicBufferProducer::DisconnectMode toHidlDisconnectMode(
+ BGraphicBufferProducer::DisconnectMode l);
+
+/**
+ * \brief Convert `HGraphicBufferProducer::DisconnectMode` to
+ * `BGraphicBufferProducer::DisconnectMode`.
+ *
+ * \param[in] l The source `HGraphicBufferProducer::DisconnectMode`.
+ * \return The corresponding `BGraphicBufferProducer::DisconnectMode`.
+ */
+BGraphicBufferProducer::DisconnectMode toGuiDisconnectMode(
+ HGraphicBufferProducer::DisconnectMode t);
+
+} // namespace conversion
+} // namespace android
+
+#endif // MEDIA_STAGEFRIGHT_BQHELPER_CONVERSION_H_
diff --git a/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/FrameDropper.h b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/FrameDropper.h
new file mode 100644
index 0000000..4e83059
--- /dev/null
+++ b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/FrameDropper.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FRAME_DROPPER_H_
+
+#define FRAME_DROPPER_H_
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+struct FrameDropper : public RefBase {
+ // No frames will be dropped until a valid max frame rate is set.
+ FrameDropper();
+
+ // maxFrameRate required to be positive.
+ status_t setMaxFrameRate(float maxFrameRate);
+
+ // Returns false if max frame rate has not been set via setMaxFrameRate.
+ bool shouldDrop(int64_t timeUs);
+
+ // Returns true if all frame drop logic should be disabled.
+ bool disabled() { return (mMinIntervalUs == -1ll); }
+
+protected:
+ virtual ~FrameDropper();
+
+private:
+ int64_t mDesiredMinTimeUs;
+ int64_t mMinIntervalUs;
+
+ DISALLOW_EVIL_CONSTRUCTORS(FrameDropper);
+};
+
+} // namespace android
+
+#endif // FRAME_DROPPER_H_
diff --git a/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/GraphicBufferSource.h b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/GraphicBufferSource.h
new file mode 100644
index 0000000..abc8910
--- /dev/null
+++ b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/GraphicBufferSource.h
@@ -0,0 +1,492 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GRAPHIC_BUFFER_SOURCE_H_
+
+#define GRAPHIC_BUFFER_SOURCE_H_
+
+#include <binder/Status.h>
+#include <gui/BufferQueue.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <utils/RefBase.h>
+
+#include <media/hardware/VideoAPI.h>
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/AHandlerReflector.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/bqhelper/ComponentWrapper.h>
+
+namespace android {
+
+using ::android::binder::Status;
+
+struct FrameDropper;
+
+/*
+ * This class is used to feed codecs from a Surface via BufferQueue or
+ * HW producer.
+ *
+ * Instances of the class don't run on a dedicated thread. Instead,
+ * various events trigger data movement:
+ *
+ * - Availability of a new frame of data from the BufferQueue (notified
+ * via the onFrameAvailable callback).
+ * - The return of a codec buffer.
+ * - Application signaling end-of-stream.
+ * - Transition to or from "executing" state.
+ *
+ * Frames of data (and, perhaps, the end-of-stream indication) can arrive
+ * before the codec is in the "executing" state, so we need to queue
+ * things up until we're ready to go.
+ *
+ * The GraphicBufferSource can be configure dynamically to discard frames
+ * from the source:
+ *
+ * - if their timestamp is less than a start time
+ * - if the source is suspended or stopped and the suspend/stop-time is reached
+ * - if EOS was signaled
+ * - if there is no encoder connected to it
+ *
+ * The source, furthermore, may choose to not encode (drop) frames if:
+ *
+ * - to throttle the frame rate (keep it under a certain limit)
+ *
+ * Finally the source may optionally hold onto the last non-discarded frame
+ * (even if it was dropped) to reencode it after an interval if no further
+ * frames are sent by the producer.
+ */
+class GraphicBufferSource : public BufferQueue::ConsumerListener {
+public:
+ GraphicBufferSource();
+
+ virtual ~GraphicBufferSource();
+
+ // We can't throw an exception if the constructor fails, so we just set
+ // this and require that the caller test the value.
+ status_t initCheck() const {
+ return mInitCheck;
+ }
+
+ // Returns the handle to the producer side of the BufferQueue. Buffers
+ // queued on this will be received by GraphicBufferSource.
+ sp<IGraphicBufferProducer> getIGraphicBufferProducer() const {
+ return mProducer;
+ }
+
+ // This is called when component transitions to running state, which means
+ // we can start handing it buffers. If we already have buffers of data
+ // sitting in the BufferQueue, this will send them to the codec.
+ Status start();
+
+ // This is called when component transitions to stopped, indicating that
+ // the codec is meant to return all buffers back to the client for them
+ // to be freed. Do NOT submit any more buffers to the component.
+ Status stop();
+
+ // This is called when component transitions to released, indicating that
+ // we are shutting down.
+ Status release();
+
+ // A "codec buffer", i.e. a buffer that can be used to pass data into
+ // the encoder, has been allocated. (This call does not call back into
+ // component.)
+ Status onInputBufferAdded(int32_t bufferId);
+
+ // Called when encoder is no longer using the buffer. If we have a BQ
+ // buffer available, fill it with a new frame of data; otherwise, just mark
+ // it as available.
+ Status onInputBufferEmptied(int32_t bufferId, int fenceFd);
+
+ // IGraphicBufferSource interface
+ // ------------------------------
+
+ // Configure the buffer source to be used with a component with the default
+ // data space.
+ status_t configure(
+ const sp<ComponentWrapper> &component,
+ int32_t dataSpace,
+ int32_t bufferCount,
+ uint32_t frameWidth,
+ uint32_t frameHeight,
+ uint32_t consumerUsage);
+
+ // This is called after the last input frame has been submitted or buffer
+ // timestamp is greater or equal than stopTimeUs. We need to submit an empty
+ // buffer with the EOS flag set. If we don't have a codec buffer ready,
+ // we just set the mEndOfStream flag.
+ status_t signalEndOfInputStream();
+
+ // If suspend is true, all incoming buffers (including those currently
+ // in the BufferQueue) with timestamp larger than timeUs will be discarded
+ // until the suspension is lifted. If suspend is false, all incoming buffers
+ // including those currently in the BufferQueue) with timestamp larger than
+ // timeUs will be processed. timeUs uses SYSTEM_TIME_MONOTONIC time base.
+ status_t setSuspend(bool suspend, int64_t timeUs);
+
+ // Specifies the interval after which we requeue the buffer previously
+ // queued to the encoder. This is useful in the case of surface flinger
+ // providing the input surface if the resulting encoded stream is to
+ // be displayed "live". If we were not to push through the extra frame
+ // the decoder on the remote end would be unable to decode the latest frame.
+ // This API must be called before transitioning the encoder to "executing"
+ // state and once this behaviour is specified it cannot be reset.
+ status_t setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs);
+
+ // Sets the input buffer timestamp offset.
+ // When set, the sample's timestamp will be adjusted with the timeOffsetUs.
+ status_t setTimeOffsetUs(int64_t timeOffsetUs);
+
+ /*
+ * Set the maximum frame rate on the source.
+ *
+ * When maxFps is a positive number, it indicates the maximum rate at which
+ * the buffers from this source will be sent to the encoder. Excessive
+ * frames will be dropped to meet the frame rate requirement.
+ *
+ * When maxFps is a negative number, any frame drop logic will be disabled
+ * and all frames from this source will be sent to the encoder, even when
+ * the timestamp goes backwards. Note that some components may still drop
+ * out-of-order frames silently, so this usually has to be used in
+ * conjunction with OMXNodeInstance::setMaxPtsGapUs() workaround.
+ *
+ * When maxFps is 0, this call will fail with BAD_VALUE.
+ */
+ status_t setMaxFps(float maxFps);
+
+ // Sets the time lapse (or slow motion) parameters.
+ // When set, the sample's timestamp will be modified to playback framerate,
+ // and capture timestamp will be modified to capture rate.
+ status_t setTimeLapseConfig(double fps, double captureFps);
+
+ // Sets the start time us (in system time), samples before which should
+ // be dropped and not submitted to encoder
+ status_t setStartTimeUs(int64_t startTimeUs);
+
+ // Sets the stop time us (in system time), samples after which should be dropped
+ // and not submitted to encoder. timeUs uses SYSTEM_TIME_MONOTONIC time base.
+ status_t setStopTimeUs(int64_t stopTimeUs);
+
+ // Gets the stop time offset in us. This is the time offset between latest buffer
+ // time and the stopTimeUs. If stop time is not set, INVALID_OPERATION will be returned.
+ // If return is OK, *stopTimeOffsetUs will contain the valid offset. Otherwise,
+ // *stopTimeOffsetUs will not be modified. Positive stopTimeOffsetUs means buffer time
+ // larger than stopTimeUs.
+ status_t getStopTimeOffsetUs(int64_t *stopTimeOffsetUs);
+
+ // Sets the desired color aspects, e.g. to be used when producer does not specify a dataspace.
+ status_t setColorAspects(int32_t aspectsPacked);
+
+protected:
+ // BQ::ConsumerListener interface
+ // ------------------------------
+
+ // BufferQueue::ConsumerListener interface, called when a new frame of
+ // data is available. If we're executing and a codec buffer is
+ // available, we acquire the buffer, copy the GraphicBuffer reference
+ // into the codec buffer, and call Empty[This]Buffer. If we're not yet
+ // executing or there's no codec buffer available, we just increment
+ // mNumFramesAvailable and return.
+ void onFrameAvailable(const BufferItem& item) override;
+
+ // BufferQueue::ConsumerListener interface, called when the client has
+ // released one or more GraphicBuffers. We clear out the appropriate
+ // set of mBufferSlot entries.
+ void onBuffersReleased() override;
+
+ // BufferQueue::ConsumerListener interface, called when the client has
+ // changed the sideband stream. GraphicBufferSource doesn't handle sideband
+ // streams so this is a no-op (and should never be called).
+ void onSidebandStreamChanged() override;
+
+private:
+ // Lock, covers all member variables.
+ mutable Mutex mMutex;
+
+ // Used to report constructor failure.
+ status_t mInitCheck;
+
+ // Graphic buffer reference objects
+ // --------------------------------
+
+ // These are used to keep a shared reference to GraphicBuffers and gralloc handles owned by the
+ // GraphicBufferSource as well as to manage the cache slots. Separate references are owned by
+ // the buffer cache (controlled by the buffer queue/buffer producer) and the codec.
+
+ // When we get a buffer from the producer (BQ) it designates them to be cached into specific
+ // slots. Each slot owns a shared reference to the graphic buffer (we track these using
+ // CachedBuffer) that is in that slot, but the producer controls the slots.
+ struct CachedBuffer;
+
+ // When we acquire a buffer, we must release it back to the producer once we (or the codec)
+ // no longer uses it (as long as the buffer is still in the cache slot). We use shared
+ // AcquiredBuffer instances for this purpose - and we call release buffer when the last
+ // reference is relinquished.
+ struct AcquiredBuffer;
+
+ // We also need to keep some extra metadata (other than the buffer reference) for acquired
+ // buffers. These are tracked in VideoBuffer struct.
+ struct VideoBuffer {
+ std::shared_ptr<AcquiredBuffer> mBuffer;
+ nsecs_t mTimestampNs;
+ android_dataspace_t mDataspace;
+ };
+
+ // Cached and aquired buffers
+ // --------------------------------
+
+ typedef int slot_id;
+
+ // Maps a slot to the cached buffer in that slot
+ KeyedVector<slot_id, std::shared_ptr<CachedBuffer>> mBufferSlots;
+
+ // Queue of buffers acquired in chronological order that are not yet submitted to the codec
+ List<VideoBuffer> mAvailableBuffers;
+
+ // Number of buffers that have been signaled by the producer that they are available, but
+ // we've been unable to acquire them due to our max acquire count
+ int32_t mNumAvailableUnacquiredBuffers;
+
+ // Number of frames acquired from consumer (debug only)
+ // (as in aquireBuffer called, and release needs to be called)
+ int32_t mNumOutstandingAcquires;
+
+ // Acquire a buffer from the BQ and store it in |item| if successful
+ // \return OK on success, or error on failure.
+ status_t acquireBuffer_l(VideoBuffer *item);
+
+ // Called when a buffer was acquired from the producer
+ void onBufferAcquired_l(const VideoBuffer &buffer);
+
+ // marks the buffer at the slot no longer cached, and accounts for the outstanding
+ // acquire count. Returns true if the slot was populated; otherwise, false.
+ bool discardBufferInSlot_l(slot_id i);
+
+ // marks the buffer at the slot index no longer cached, and accounts for the outstanding
+ // acquire count
+ void discardBufferAtSlotIndex_l(ssize_t bsi);
+
+ // release all acquired and unacquired available buffers
+ // This method will return if it fails to acquire an unacquired available buffer, which will
+ // leave mNumAvailableUnacquiredBuffers positive on return.
+ void releaseAllAvailableBuffers_l();
+
+ // returns whether we have any available buffers (acquired or not-yet-acquired)
+ bool haveAvailableBuffers_l() const {
+ return !mAvailableBuffers.empty() || mNumAvailableUnacquiredBuffers > 0;
+ }
+
+ // Codec buffers
+ // -------------
+
+ // When we queue buffers to the encoder, we must hold the references to the graphic buffers
+ // in those buffers - as the producer may free the slots.
+
+ typedef int32_t codec_buffer_id;
+
+ // set of codec buffer ID-s of buffers available to fill
+ List<codec_buffer_id> mFreeCodecBuffers;
+
+ // maps codec buffer ID-s to buffer info submitted to the codec. Used to keep a reference for
+ // the graphics buffer.
+ KeyedVector<codec_buffer_id, std::shared_ptr<AcquiredBuffer>> mSubmittedCodecBuffers;
+
+ // Processes the next acquired frame. If there is no available codec buffer, it returns false
+ // without any further action.
+ //
+ // Otherwise, it consumes the next acquired frame and determines if it needs to be discarded or
+ // dropped. If neither are needed, it submits it to the codec. It also saves the latest
+ // non-dropped frame and submits it for repeat encoding (if this is enabled).
+ //
+ // \require there must be an acquired frame (i.e. we're in the onFrameAvailable callback,
+ // or if we're in codecBufferEmptied and mNumFramesAvailable is nonzero).
+ // \require codec must be executing
+ // \returns true if acquired (and handled) the next frame. Otherwise, false.
+ bool fillCodecBuffer_l();
+
+ // Calculates the media timestamp for |item| and on success it submits the buffer to the codec,
+ // while also keeping a reference for it in mSubmittedCodecBuffers.
+ // Returns UNKNOWN_ERROR if the buffer was not submitted due to buffer timestamp. Otherwise,
+ // it returns any submit success or error value returned by the codec.
+ status_t submitBuffer_l(const VideoBuffer &item);
+
+ // Submits an empty buffer, with the EOS flag set if there is an available codec buffer and
+ // sets mEndOfStreamSent flag. Does nothing if there is no codec buffer available.
+ void submitEndOfInputStream_l();
+
+ // Set to true if we want to send end-of-stream after we run out of available frames from the
+ // producer
+ bool mEndOfStream;
+
+ // Flag that the EOS was submitted to the encoder
+ bool mEndOfStreamSent;
+
+ // Dataspace for the last frame submitted to the codec
+ android_dataspace mLastDataspace;
+
+ // Default color aspects for this source
+ int32_t mDefaultColorAspectsPacked;
+
+ // called when the data space of the input buffer changes
+ void onDataspaceChanged_l(android_dataspace dataspace, android_pixel_format pixelFormat);
+
+ // Pointer back to the component that created us. We send buffers here.
+ sp<ComponentWrapper> mComponent;
+
+ // Set by start() / stop().
+ bool mExecuting;
+
+ bool mSuspended;
+
+ // returns true if this source is unconditionally discarding acquired buffers at the moment
+ // regardless of the metadata of those buffers
+ bool areWeDiscardingAvailableBuffers_l();
+
+ int64_t mLastFrameTimestampUs;
+
+ // Our BufferQueue interfaces. mProducer is passed to the producer through
+ // getIGraphicBufferProducer, and mConsumer is used internally to retrieve
+ // the buffers queued by the producer.
+ sp<IGraphicBufferProducer> mProducer;
+ sp<IGraphicBufferConsumer> mConsumer;
+
+ // The time to stop sending buffers.
+ int64_t mStopTimeUs;
+
+ struct ActionItem {
+ typedef enum {
+ PAUSE,
+ RESUME,
+ STOP
+ } ActionType;
+ ActionType mAction;
+ int64_t mActionTimeUs;
+ };
+
+ // Maintain last action timestamp to ensure all the action timestamps are
+ // monotonically increasing.
+ int64_t mLastActionTimeUs;
+
+ // An action queue that queue up all the actions sent to GraphicBufferSource.
+ // STOP action should only show up at the end of the list as all the actions
+ // after a STOP action will be discarded. mActionQueue is protected by mMutex.
+ List<ActionItem> mActionQueue;
+
+ ////
+ friend struct AHandlerReflector<GraphicBufferSource>;
+
+ enum {
+ kWhatRepeatLastFrame, ///< queue last frame for reencoding
+ };
+ enum {
+ kRepeatLastFrameCount = 10,
+ };
+
+ int64_t mSkipFramesBeforeNs;
+
+ sp<FrameDropper> mFrameDropper;
+
+ sp<ALooper> mLooper;
+ sp<AHandlerReflector<GraphicBufferSource> > mReflector;
+
+ // Repeat last frame feature
+ // -------------------------
+ // configuration parameter: repeat interval for frame repeating (<0 if repeating is disabled)
+ int64_t mFrameRepeatIntervalUs;
+
+ // current frame repeat generation - used to cancel a pending frame repeat
+ int32_t mRepeatLastFrameGeneration;
+
+ // number of times to repeat latest frame (0 = none)
+ int32_t mOutstandingFrameRepeatCount;
+
+ // The previous buffer should've been repeated but
+ // no codec buffer was available at the time.
+ bool mFrameRepeatBlockedOnCodecBuffer;
+
+ // hold a reference to the last acquired (and not discarded) frame for frame repeating
+ VideoBuffer mLatestBuffer;
+
+ // queue last frame for reencode after the repeat interval.
+ void queueFrameRepeat_l();
+
+ // save |item| as the latest buffer and queue it for reencode (repeat)
+ void setLatestBuffer_l(const VideoBuffer &item);
+
+ // submit last frame to encoder and queue it for reencode
+ // \return true if buffer was submitted, false if it wasn't (e.g. source is suspended, there
+ // is no available codec buffer)
+ bool repeatLatestBuffer_l();
+
+ // Time lapse / slow motion configuration
+ // --------------------------------------
+
+ // desired frame rate for encoding - value <= 0 if undefined
+ double mFps;
+
+ // desired frame rate for capture - value <= 0 if undefined
+ double mCaptureFps;
+
+ // Time lapse mode is enabled if the capture frame rate is defined and it is
+ // smaller than half the encoding frame rate (if defined). In this mode,
+ // frames that come in between the capture interval (the reciprocal of the
+ // capture frame rate) are dropped and the encoding timestamp is adjusted to
+ // match the desired encoding frame rate.
+ //
+ // Slow motion mode is enabled if both encoding and capture frame rates are
+ // defined and the encoding frame rate is less than half the capture frame
+ // rate. In this mode, the source is expected to produce frames with an even
+ // timestamp interval (after rounding) with the configured capture fps. The
+ // first source timestamp is used as the source base time. Afterwards, the
+ // timestamp of each source frame is snapped to the nearest expected capture
+ // timestamp and scaled to match the configured encoding frame rate.
+
+ // These modes must be enabled before using this source.
+
+ // adjusted capture timestamp of the base frame
+ int64_t mBaseCaptureUs;
+
+ // adjusted encoding timestamp of the base frame
+ int64_t mBaseFrameUs;
+
+ // number of frames from the base time
+ int64_t mFrameCount;
+
+ // adjusted capture timestamp for previous frame (negative if there were
+ // none)
+ int64_t mPrevCaptureUs;
+
+ // adjusted media timestamp for previous frame (negative if there were none)
+ int64_t mPrevFrameUs;
+
+ // desired offset between media time and capture time
+ int64_t mInputBufferTimeOffsetUs;
+
+ // Calculates and outputs the timestamp to use for a buffer with a specific buffer timestamp
+ // |bufferTimestampNs|. Returns false on failure (buffer too close or timestamp is moving
+ // backwards). Otherwise, stores the media timestamp in |*codecTimeUs| and returns true.
+ //
+ // This method takes into account the start time offset and any time lapse or slow motion time
+ // adjustment requests.
+ bool calculateCodecTimestamp_l(nsecs_t bufferTimeNs, int64_t *codecTimeUs);
+
+ void onMessageReceived(const sp<AMessage> &msg);
+
+ DISALLOW_EVIL_CONSTRUCTORS(GraphicBufferSource);
+};
+
+} // namespace android
+
+#endif // GRAPHIC_BUFFER_SOURCE_H_
diff --git a/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/WGraphicBufferProducer.h b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/WGraphicBufferProducer.h
new file mode 100644
index 0000000..8ddf20f
--- /dev/null
+++ b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/WGraphicBufferProducer.h
@@ -0,0 +1,380 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_STAGEFRIGHT_WGRAPHICBUFFERPRODUCER_H_
+#define MEDIA_STAGEFRIGHT_WGRAPHICBUFFERPRODUCER_H_
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <binder/Binder.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/IProducerListener.h>
+#include <media/stagefright/bqhelper/Conversion.h>
+#include <media/stagefright/bqhelper/WProducerListener.h>
+#include <system/window.h>
+
+#include <android/hardware/graphics/bufferqueue/1.0/IGraphicBufferProducer.h>
+
+namespace android {
+
+using ::android::hardware::media::V1_0::AnwBuffer;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_handle;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+typedef ::android::hardware::graphics::bufferqueue::V1_0::
+ IGraphicBufferProducer HGraphicBufferProducer;
+typedef ::android::hardware::graphics::bufferqueue::V1_0::
+ IProducerListener HProducerListener;
+
+typedef ::android::IGraphicBufferProducer BGraphicBufferProducer;
+typedef ::android::IProducerListener BProducerListener;
+using ::android::BnGraphicBufferProducer;
+
+#ifndef LOG
+struct LOG_dummy {
+ template <typename T>
+ LOG_dummy& operator<< (const T&) { return *this; }
+};
+
+#define LOG(x) LOG_dummy()
+#endif
+
+// Instantiate only if HGraphicBufferProducer is base of BASE.
+template <typename BASE,
+ typename = typename std::enable_if<std::is_base_of<HGraphicBufferProducer, BASE>::value>::type>
+struct TWGraphicBufferProducer : public BASE {
+ TWGraphicBufferProducer(sp<BGraphicBufferProducer> const& base) : mBase(base) {}
+ Return<void> requestBuffer(int32_t slot, HGraphicBufferProducer::requestBuffer_cb _hidl_cb) override {
+ sp<GraphicBuffer> buf;
+ status_t status = mBase->requestBuffer(slot, &buf);
+ AnwBuffer anwBuffer;
+ if (buf != nullptr) {
+ ::android::conversion::wrapAs(&anwBuffer, *buf);
+ }
+ _hidl_cb(static_cast<int32_t>(status), anwBuffer);
+ return Void();
+ }
+
+ Return<int32_t> setMaxDequeuedBufferCount(int32_t maxDequeuedBuffers) override {
+ return static_cast<int32_t>(mBase->setMaxDequeuedBufferCount(
+ static_cast<int>(maxDequeuedBuffers)));
+ }
+
+ Return<int32_t> setAsyncMode(bool async) override {
+ return static_cast<int32_t>(mBase->setAsyncMode(async));
+ }
+
+ Return<void> dequeueBuffer(
+ uint32_t width, uint32_t height,
+ ::android::hardware::graphics::common::V1_0::PixelFormat format, uint32_t usage,
+ bool getFrameTimestamps, HGraphicBufferProducer::dequeueBuffer_cb _hidl_cb) override {
+ int slot;
+ sp<Fence> fence;
+ ::android::FrameEventHistoryDelta outTimestamps;
+ status_t status = mBase->dequeueBuffer(
+ &slot, &fence, width, height,
+ static_cast<::android::PixelFormat>(format), usage, nullptr,
+ getFrameTimestamps ? &outTimestamps : nullptr);
+ hidl_handle tFence;
+ HGraphicBufferProducer::FrameEventHistoryDelta tOutTimestamps;
+
+ native_handle_t* nh = nullptr;
+ if ((fence == nullptr) || !::android::conversion::wrapAs(&tFence, &nh, *fence)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::dequeueBuffer - "
+ "Invalid output fence";
+ _hidl_cb(static_cast<int32_t>(status),
+ static_cast<int32_t>(slot),
+ tFence,
+ tOutTimestamps);
+ return Void();
+ }
+ std::vector<std::vector<native_handle_t*> > nhAA;
+ if (getFrameTimestamps && !::android::conversion::wrapAs(&tOutTimestamps, &nhAA, outTimestamps)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::dequeueBuffer - "
+ "Invalid output timestamps";
+ _hidl_cb(static_cast<int32_t>(status),
+ static_cast<int32_t>(slot),
+ tFence,
+ tOutTimestamps);
+ native_handle_delete(nh);
+ return Void();
+ }
+
+ _hidl_cb(static_cast<int32_t>(status),
+ static_cast<int32_t>(slot),
+ tFence,
+ tOutTimestamps);
+ native_handle_delete(nh);
+ if (getFrameTimestamps) {
+ for (auto& nhA : nhAA) {
+ for (auto& handle : nhA) {
+ native_handle_delete(handle);
+ }
+ }
+ }
+ return Void();
+ }
+
+ Return<int32_t> detachBuffer(int32_t slot) override {
+ return static_cast<int32_t>(mBase->detachBuffer(slot));
+ }
+
+ Return<void> detachNextBuffer(HGraphicBufferProducer::detachNextBuffer_cb _hidl_cb) override {
+ sp<GraphicBuffer> outBuffer;
+ sp<Fence> outFence;
+ status_t status = mBase->detachNextBuffer(&outBuffer, &outFence);
+ AnwBuffer tBuffer;
+ hidl_handle tFence;
+
+ if (outBuffer == nullptr) {
+ LOG(ERROR) << "TWGraphicBufferProducer::detachNextBuffer - "
+ "Invalid output buffer";
+ _hidl_cb(static_cast<int32_t>(status), tBuffer, tFence);
+ return Void();
+ }
+ ::android::conversion::wrapAs(&tBuffer, *outBuffer);
+ native_handle_t* nh = nullptr;
+ if ((outFence != nullptr) && !::android::conversion::wrapAs(&tFence, &nh, *outFence)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::detachNextBuffer - "
+ "Invalid output fence";
+ _hidl_cb(static_cast<int32_t>(status), tBuffer, tFence);
+ return Void();
+ }
+
+ _hidl_cb(static_cast<int32_t>(status), tBuffer, tFence);
+ native_handle_delete(nh);
+ return Void();
+ }
+
+ Return<void> attachBuffer(const AnwBuffer& buffer, HGraphicBufferProducer::attachBuffer_cb _hidl_cb) override {
+ int outSlot;
+ sp<GraphicBuffer> lBuffer = new GraphicBuffer();
+ if (!::android::conversion::convertTo(lBuffer.get(), buffer)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::attachBuffer - "
+ "Invalid input native window buffer";
+ _hidl_cb(static_cast<int32_t>(BAD_VALUE), -1);
+ return Void();
+ }
+ status_t status = mBase->attachBuffer(&outSlot, lBuffer);
+
+ _hidl_cb(static_cast<int32_t>(status), static_cast<int32_t>(outSlot));
+ return Void();
+ }
+
+ Return<void> queueBuffer(
+ int32_t slot, const HGraphicBufferProducer::QueueBufferInput& input,
+ HGraphicBufferProducer::queueBuffer_cb _hidl_cb) override {
+ HGraphicBufferProducer::QueueBufferOutput tOutput;
+ BGraphicBufferProducer::QueueBufferInput lInput(
+ 0, false, HAL_DATASPACE_UNKNOWN,
+ ::android::Rect(0, 0, 1, 1),
+ NATIVE_WINDOW_SCALING_MODE_FREEZE,
+ 0, ::android::Fence::NO_FENCE);
+ if (!::android::conversion::convertTo(&lInput, input)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::queueBuffer - "
+ "Invalid input";
+ _hidl_cb(static_cast<int32_t>(BAD_VALUE), tOutput);
+ return Void();
+ }
+ BGraphicBufferProducer::QueueBufferOutput lOutput;
+ status_t status = mBase->queueBuffer(
+ static_cast<int>(slot), lInput, &lOutput);
+
+ std::vector<std::vector<native_handle_t*> > nhAA;
+ if (!::android::conversion::wrapAs(&tOutput, &nhAA, lOutput)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::queueBuffer - "
+ "Invalid output";
+ _hidl_cb(static_cast<int32_t>(BAD_VALUE), tOutput);
+ return Void();
+ }
+
+ _hidl_cb(static_cast<int32_t>(status), tOutput);
+ for (auto& nhA : nhAA) {
+ for (auto& nh : nhA) {
+ native_handle_delete(nh);
+ }
+ }
+ return Void();
+ }
+
+ Return<int32_t> cancelBuffer(int32_t slot, const hidl_handle& fence) override {
+ sp<Fence> lFence = new Fence();
+ if (!::android::conversion::convertTo(lFence.get(), fence)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::cancelBuffer - "
+ "Invalid input fence";
+ return static_cast<int32_t>(BAD_VALUE);
+ }
+ return static_cast<int32_t>(mBase->cancelBuffer(static_cast<int>(slot), lFence));
+ }
+
+ Return<void> query(int32_t what, HGraphicBufferProducer::query_cb _hidl_cb) override {
+ int lValue;
+ int lReturn = mBase->query(static_cast<int>(what), &lValue);
+ _hidl_cb(static_cast<int32_t>(lReturn), static_cast<int32_t>(lValue));
+ return Void();
+ }
+
+ Return<void> connect(const sp<HProducerListener>& listener,
+ int32_t api, bool producerControlledByApp,
+ HGraphicBufferProducer::connect_cb _hidl_cb) override {
+ sp<BProducerListener> lListener = listener == nullptr ?
+ nullptr : new LWProducerListener(listener);
+ BGraphicBufferProducer::QueueBufferOutput lOutput;
+ status_t status = mBase->connect(lListener,
+ static_cast<int>(api),
+ producerControlledByApp,
+ &lOutput);
+
+ HGraphicBufferProducer::QueueBufferOutput tOutput;
+ std::vector<std::vector<native_handle_t*> > nhAA;
+ if (!::android::conversion::wrapAs(&tOutput, &nhAA, lOutput)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::connect - "
+ "Invalid output";
+ _hidl_cb(static_cast<int32_t>(status), tOutput);
+ return Void();
+ }
+
+ _hidl_cb(static_cast<int32_t>(status), tOutput);
+ for (auto& nhA : nhAA) {
+ for (auto& nh : nhA) {
+ native_handle_delete(nh);
+ }
+ }
+ return Void();
+ }
+
+ Return<int32_t> disconnect(
+ int32_t api,
+ HGraphicBufferProducer::DisconnectMode mode) override {
+ return static_cast<int32_t>(mBase->disconnect(
+ static_cast<int>(api),
+ ::android::conversion::toGuiDisconnectMode(mode)));
+ }
+
+ Return<int32_t> setSidebandStream(const hidl_handle& stream) override {
+ return static_cast<int32_t>(mBase->setSidebandStream(NativeHandle::create(
+ stream ? native_handle_clone(stream) : NULL, true)));
+ }
+
+ Return<void> allocateBuffers(
+ uint32_t width, uint32_t height,
+ ::android::hardware::graphics::common::V1_0::PixelFormat format,
+ uint32_t usage) override {
+ mBase->allocateBuffers(
+ width, height,
+ static_cast<::android::PixelFormat>(format),
+ usage);
+ return Void();
+ }
+
+ Return<int32_t> allowAllocation(bool allow) override {
+ return static_cast<int32_t>(mBase->allowAllocation(allow));
+ }
+
+ Return<int32_t> setGenerationNumber(uint32_t generationNumber) override {
+ return static_cast<int32_t>(mBase->setGenerationNumber(generationNumber));
+ }
+
+ Return<void> getConsumerName(HGraphicBufferProducer::getConsumerName_cb _hidl_cb) override {
+ _hidl_cb(mBase->getConsumerName().string());
+ return Void();
+ }
+
+ Return<int32_t> setSharedBufferMode(bool sharedBufferMode) override {
+ return static_cast<int32_t>(mBase->setSharedBufferMode(sharedBufferMode));
+ }
+
+ Return<int32_t> setAutoRefresh(bool autoRefresh) override {
+ return static_cast<int32_t>(mBase->setAutoRefresh(autoRefresh));
+ }
+
+ Return<int32_t> setDequeueTimeout(int64_t timeoutNs) override {
+ return static_cast<int32_t>(mBase->setDequeueTimeout(timeoutNs));
+ }
+
+ Return<void> getLastQueuedBuffer(HGraphicBufferProducer::getLastQueuedBuffer_cb _hidl_cb) override {
+ sp<GraphicBuffer> lOutBuffer = new GraphicBuffer();
+ sp<Fence> lOutFence = new Fence();
+ float lOutTransformMatrix[16];
+ status_t status = mBase->getLastQueuedBuffer(
+ &lOutBuffer, &lOutFence, lOutTransformMatrix);
+
+ AnwBuffer tOutBuffer;
+ if (lOutBuffer != nullptr) {
+ ::android::conversion::wrapAs(&tOutBuffer, *lOutBuffer);
+ }
+ hidl_handle tOutFence;
+ native_handle_t* nh = nullptr;
+ if ((lOutFence == nullptr) || !::android::conversion::wrapAs(&tOutFence, &nh, *lOutFence)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::getLastQueuedBuffer - "
+ "Invalid output fence";
+ _hidl_cb(static_cast<int32_t>(status),
+ tOutBuffer,
+ tOutFence,
+ hidl_array<float, 16>());
+ return Void();
+ }
+ hidl_array<float, 16> tOutTransformMatrix(lOutTransformMatrix);
+
+ _hidl_cb(static_cast<int32_t>(status), tOutBuffer, tOutFence, tOutTransformMatrix);
+ native_handle_delete(nh);
+ return Void();
+ }
+
+ Return<void> getFrameTimestamps(HGraphicBufferProducer::getFrameTimestamps_cb _hidl_cb) override {
+ ::android::FrameEventHistoryDelta lDelta;
+ mBase->getFrameTimestamps(&lDelta);
+
+ HGraphicBufferProducer::FrameEventHistoryDelta tDelta;
+ std::vector<std::vector<native_handle_t*> > nhAA;
+ if (!::android::conversion::wrapAs(&tDelta, &nhAA, lDelta)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::getFrameTimestamps - "
+ "Invalid output frame timestamps";
+ _hidl_cb(tDelta);
+ return Void();
+ }
+
+ _hidl_cb(tDelta);
+ for (auto& nhA : nhAA) {
+ for (auto& nh : nhA) {
+ native_handle_delete(nh);
+ }
+ }
+ return Void();
+ }
+
+ Return<void> getUniqueId(HGraphicBufferProducer::getUniqueId_cb _hidl_cb) override {
+ uint64_t outId;
+ status_t status = mBase->getUniqueId(&outId);
+ _hidl_cb(static_cast<int32_t>(status), outId);
+ return Void();
+ }
+
+private:
+ sp<BGraphicBufferProducer> mBase;
+};
+
+} // namespace android
+
+#endif // MEDIA_STAGEFRIGHT_WGRAPHICBUFFERPRODUCER_H_
diff --git a/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/WProducerListener.h b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/WProducerListener.h
new file mode 100644
index 0000000..febba87
--- /dev/null
+++ b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/WProducerListener.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_STAGEFRIGHT_WPRODUCERLISTENER_H_
+#define MEDIA_STAGEFRIGHT_WPRODUCERLISTENER_H_
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <binder/IBinder.h>
+#include <gui/IProducerListener.h>
+
+#include <android/hardware/graphics/bufferqueue/1.0/IProducerListener.h>
+
+namespace android {
+
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+typedef ::android::hardware::graphics::bufferqueue::V1_0::IProducerListener
+ HProducerListener;
+typedef ::android::IProducerListener
+ BProducerListener;
+using ::android::BnProducerListener;
+
+struct TWProducerListener : public HProducerListener {
+ sp<BProducerListener> mBase;
+ TWProducerListener(sp<BProducerListener> const& base);
+ Return<void> onBufferReleased() override;
+ Return<bool> needsReleaseNotify() override;
+};
+
+class LWProducerListener : public BnProducerListener {
+public:
+ sp<HProducerListener> mBase;
+ LWProducerListener(sp<HProducerListener> const& base);
+ void onBufferReleased() override;
+ bool needsReleaseNotify() override;
+};
+
+} // namespace android
+
+#endif // MEDIA_STAGEFRIGHT_WPRODUCERLISTENER_H_
diff --git a/media/libstagefright/bqhelper/tests/Android.bp b/media/libstagefright/bqhelper/tests/Android.bp
new file mode 100644
index 0000000..2fbc3d6
--- /dev/null
+++ b/media/libstagefright/bqhelper/tests/Android.bp
@@ -0,0 +1,15 @@
+cc_test {
+ name: "FrameDropper_test",
+
+ srcs: ["FrameDropper_test.cpp"],
+
+ shared_libs: [
+ "libstagefright_bufferqueue_helper",
+ "libutils",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+}
diff --git a/media/libstagefright/bqhelper/tests/FrameDropper_test.cpp b/media/libstagefright/bqhelper/tests/FrameDropper_test.cpp
new file mode 100644
index 0000000..55ae77c
--- /dev/null
+++ b/media/libstagefright/bqhelper/tests/FrameDropper_test.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FrameDropper_test"
+#include <utils/Log.h>
+
+#include <gtest/gtest.h>
+
+#include <media/stagefright/bqhelper/FrameDropper.h>
+#include <media/stagefright/foundation/ADebug.h>
+
+namespace android {
+
+struct TestFrame {
+ int64_t timeUs;
+ bool shouldDrop;
+};
+
+static const TestFrame testFrames20Fps[] = {
+ {1000000, false}, {1050000, false}, {1100000, false}, {1150000, false},
+ {1200000, false}, {1250000, false}, {1300000, false}, {1350000, false},
+ {1400000, false}, {1450000, false}, {1500000, false}, {1550000, false},
+ {1600000, false}, {1650000, false}, {1700000, false}, {1750000, false},
+ {1800000, false}, {1850000, false}, {1900000, false}, {1950000, false},
+};
+
+static const TestFrame testFrames30Fps[] = {
+ {1000000, false}, {1033333, false}, {1066667, false}, {1100000, false},
+ {1133333, false}, {1166667, false}, {1200000, false}, {1233333, false},
+ {1266667, false}, {1300000, false}, {1333333, false}, {1366667, false},
+ {1400000, false}, {1433333, false}, {1466667, false}, {1500000, false},
+ {1533333, false}, {1566667, false}, {1600000, false}, {1633333, false},
+};
+
+static const TestFrame testFrames40Fps[] = {
+ {1000000, false}, {1025000, true}, {1050000, false}, {1075000, false},
+ {1100000, false}, {1125000, true}, {1150000, false}, {1175000, false},
+ {1200000, false}, {1225000, true}, {1250000, false}, {1275000, false},
+ {1300000, false}, {1325000, true}, {1350000, false}, {1375000, false},
+ {1400000, false}, {1425000, true}, {1450000, false}, {1475000, false},
+};
+
+static const TestFrame testFrames60Fps[] = {
+ {1000000, false}, {1016667, true}, {1033333, false}, {1050000, true},
+ {1066667, false}, {1083333, true}, {1100000, false}, {1116667, true},
+ {1133333, false}, {1150000, true}, {1166667, false}, {1183333, true},
+ {1200000, false}, {1216667, true}, {1233333, false}, {1250000, true},
+ {1266667, false}, {1283333, true}, {1300000, false}, {1316667, true},
+};
+
+static const TestFrame testFramesVariableFps[] = {
+ // 40fps
+ {1000000, false}, {1025000, true}, {1050000, false}, {1075000, false},
+ {1100000, false}, {1125000, true}, {1150000, false}, {1175000, false},
+ {1200000, false}, {1225000, true}, {1250000, false}, {1275000, false},
+ {1300000, false}, {1325000, true}, {1350000, false}, {1375000, false},
+ {1400000, false}, {1425000, true}, {1450000, false}, {1475000, false},
+ // a timestamp jump plus switch to 20fps
+ {2000000, false}, {2050000, false}, {2100000, false}, {2150000, false},
+ {2200000, false}, {2250000, false}, {2300000, false}, {2350000, false},
+ {2400000, false}, {2450000, false}, {2500000, false}, {2550000, false},
+ {2600000, false}, {2650000, false}, {2700000, false}, {2750000, false},
+ {2800000, false}, {2850000, false}, {2900000, false}, {2950000, false},
+ // 60fps
+ {2966667, false}, {2983333, true}, {3000000, false}, {3016667, true},
+ {3033333, false}, {3050000, true}, {3066667, false}, {3083333, true},
+ {3100000, false}, {3116667, true}, {3133333, false}, {3150000, true},
+ {3166667, false}, {3183333, true}, {3200000, false}, {3216667, true},
+ {3233333, false}, {3250000, true}, {3266667, false}, {3283333, true},
+};
+
+static const int kMaxTestJitterUs = 2000;
+// return one of 1000, 0, -1000 as jitter.
+static int GetJitter(size_t i) {
+ return (1 - (i % 3)) * (kMaxTestJitterUs / 2);
+}
+
+class FrameDropperTest : public ::testing::Test {
+public:
+ FrameDropperTest() : mFrameDropper(new FrameDropper()) {
+ EXPECT_EQ(OK, mFrameDropper->setMaxFrameRate(30.0));
+ }
+
+protected:
+ void RunTest(const TestFrame* frames, size_t size) {
+ for (size_t i = 0; i < size; ++i) {
+ int jitter = GetJitter(i);
+ int64_t testTimeUs = frames[i].timeUs + jitter;
+ printf("time %lld, testTime %lld, jitter %d\n",
+ (long long)frames[i].timeUs, (long long)testTimeUs, jitter);
+ EXPECT_EQ(frames[i].shouldDrop, mFrameDropper->shouldDrop(testTimeUs));
+ }
+ }
+
+ sp<FrameDropper> mFrameDropper;
+};
+
+TEST_F(FrameDropperTest, TestInvalidMaxFrameRate) {
+ EXPECT_NE(OK, mFrameDropper->setMaxFrameRate(-1.0));
+ EXPECT_NE(OK, mFrameDropper->setMaxFrameRate(0));
+}
+
+TEST_F(FrameDropperTest, Test20Fps) {
+ RunTest(testFrames20Fps, ARRAY_SIZE(testFrames20Fps));
+}
+
+TEST_F(FrameDropperTest, Test30Fps) {
+ RunTest(testFrames30Fps, ARRAY_SIZE(testFrames30Fps));
+}
+
+TEST_F(FrameDropperTest, Test40Fps) {
+ RunTest(testFrames40Fps, ARRAY_SIZE(testFrames40Fps));
+}
+
+TEST_F(FrameDropperTest, Test60Fps) {
+ RunTest(testFrames60Fps, ARRAY_SIZE(testFrames60Fps));
+}
+
+TEST_F(FrameDropperTest, TestVariableFps) {
+ RunTest(testFramesVariableFps, ARRAY_SIZE(testFramesVariableFps));
+}
+
+} // namespace android
diff --git a/media/libstagefright/codec2/Android.mk b/media/libstagefright/codec2/Android.mk
deleted file mode 100644
index ef06ed7..0000000
--- a/media/libstagefright/codec2/Android.mk
+++ /dev/null
@@ -1,21 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- C2.cpp \
-
-LOCAL_C_INCLUDES += \
- $(TOP)/frameworks/av/media/libstagefright/codec2/include \
- $(TOP)/frameworks/native/include/media/hardware \
-
-LOCAL_MODULE:= libstagefright_codec2
-LOCAL_CFLAGS += -Werror -Wall
-LOCAL_CLANG := true
-LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow cfi
-LOCAL_SANITIZE_DIAG := cfi
-
-include $(BUILD_SHARED_LIBRARY)
-
-################################################################################
-
-include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/media/libstagefright/codec2/C2.cpp b/media/libstagefright/codec2/C2.cpp
deleted file mode 100644
index a51b073..0000000
--- a/media/libstagefright/codec2/C2.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <C2.h>
-#include <C2Buffer.h>
-#include <C2Component.h>
-#include <C2Config.h>
-#include <C2Param.h>
-#include <C2ParamDef.h>
-#include <C2Work.h>
-
-namespace android {
-
-/**
- * There is nothing here yet. This library is built to see what symbols and methods get
- * defined as part of the API include files.
- *
- * Going forward, the Codec2 library will contain utility methods that are useful for
- * Codec2 clients.
- */
-
-} // namespace android
-
diff --git a/media/libstagefright/codec2/include/C2.h b/media/libstagefright/codec2/include/C2.h
deleted file mode 100644
index 7d00a03..0000000
--- a/media/libstagefright/codec2/include/C2.h
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef C2_H_
-#define C2_H_
-
-#include <string>
-#include <vector>
-#include <list>
-
-#ifdef __ANDROID__
-
-#include <utils/Errors.h> // for status_t
-#include <utils/Timers.h> // for nsecs_t
-
-namespace android {
-
-#else
-
-#include <errno.h>
-typedef int64_t nsecs_t;
-
-enum {
- GRALLOC_USAGE_SW_READ_OFTEN,
- GRALLOC_USAGE_RENDERSCRIPT,
- GRALLOC_USAGE_HW_TEXTURE,
- GRALLOC_USAGE_HW_COMPOSER,
- GRALLOC_USAGE_HW_VIDEO_ENCODER,
- GRALLOC_USAGE_PROTECTED,
- GRALLOC_USAGE_SW_WRITE_OFTEN,
- GRALLOC_USAGE_HW_RENDER,
-};
-
-#endif
-
-/** \mainpage Codec2
- *
- * Codec2 is a frame-based data processing API used by android.
- *
- * The framework accesses components via the \ref API.
- */
-
-/** \ingroup API
- *
- * The Codec2 API defines the operation of data processing components and their interaction with
- * the rest of the system.
- *
- * Coding Conventions
- *
- * Mitigating Binary Compatibility.
- *
- * While full binary compatibility is not a goal of the API (due to our use of STL), we try to
- * mitigate binary breaks by adhering to the following conventions:
- *
- * - at most one vtable with placeholder virtual methods
- * - all optional/placeholder virtual methods returning a status_t, with C2_NOT_IMPLEMENTED not
- * requiring any update to input/output arguments.
- * - limiting symbol export of inline methods
- * - use of pimpl (or shared-pimpl)
- *
- * Naming
- *
- * - all classes and types prefix with C2
- * - classes for internal use prefix with _C2
- * - enum values in global namespace prefix with C2_ all caps
- * - enum values inside classes have no C2_ prefix as class already has it
- * - supporting two kinds of enum naming: all-caps and kCamelCase
- * \todo revisit kCamelCase for param-type
- *
- * Aspects
- *
- * Aspects define certain common behavior across a group of objects.
- * - classes whose name matches _C2.*Aspect
- * - only protected constructors
- * - no desctructor and copiable
- * - all methods are inline or static (this is opposite of the interface paradigm where all methods
- * are virtual, which would not work due to the at most one vtable rule.)
- * - only private variables (this prevents subclasses interfering with the aspects.)
- */
-
-/// \defgroup types Common Types
-/// @{
-
-/**
- * C2String: basic string implementation
- */
-typedef std::string C2String;
-typedef const char *C2StringLiteral;
-
-/**
- * C2Error: status codes used.
- */
-typedef int32_t C2Error;
-enum {
-#ifndef __ANDROID__
- OK = 0,
- BAD_VALUE = -EINVAL,
- BAD_INDEX = -EOVERFLOW,
- UNKNOWN_TRANSACTION = -EBADMSG,
- ALREADY_EXISTS = -EEXIST,
- NAME_NOT_FOUND = -ENOENT,
- INVALID_OPERATION = -ENOSYS,
- NO_MEMORY = -ENOMEM,
- PERMISSION_DENIED = -EPERM,
- TIMED_OUT = -ETIMEDOUT,
- UNKNOWN_ERROR = -EINVAL,
-#endif
-
- C2_OK = OK, ///< operation completed successfully
-
- // bad input
- C2_BAD_VALUE = BAD_VALUE, ///< argument has invalid value (user error)
- C2_BAD_INDEX = BAD_INDEX, ///< argument uses invalid index (user error)
- C2_UNSUPPORTED = UNKNOWN_TRANSACTION, ///< argument/index is value but not supported \todo is this really BAD_INDEX/VALUE?
-
- // bad sequencing of events
- C2_DUPLICATE = ALREADY_EXISTS, ///< object already exists
- C2_NOT_FOUND = NAME_NOT_FOUND, ///< object not found
- C2_BAD_STATE = INVALID_OPERATION, ///< operation is not permitted in the current state
-
- // bad environment
- C2_NO_MEMORY = NO_MEMORY, ///< not enough memory to complete operation
- C2_NO_PERMISSION = PERMISSION_DENIED, ///< missing permission to complete operation
- C2_TIMED_OUT = TIMED_OUT, ///< operation did not complete within timeout
-
- // bad versioning
- C2_NOT_IMPLEMENTED = UNKNOWN_TRANSACTION, ///< operation is not implemented (optional only) \todo for now reuse error code
-
- // unknown fatal
- C2_CORRUPTED = UNKNOWN_ERROR, ///< some unexpected error prevented the operation
-};
-
-/// @}
-
-/// \defgroup utils Utilities
-/// @{
-
-#define C2_DO_NOT_COPY(type, args...) \
- type args& operator=(const type args&) = delete; \
- type(const type args&) = delete; \
-
-#define C2_PURE __attribute__((pure))
-#define C2_CONST __attribute__((const))
-#define C2_HIDE __attribute__((visibility("hidden")))
-#define C2_INTERNAL __attribute__((internal_linkage))
-
-#define DEFINE_OTHER_COMPARISON_OPERATORS(type) \
- inline bool operator!=(const type &other) { return !(*this == other); } \
- inline bool operator<=(const type &other) { return (*this == other) || (*this < other); } \
- inline bool operator>=(const type &other) { return !(*this < other); } \
- inline bool operator>(const type &other) { return !(*this < other) && !(*this == other); }
-
-#define DEFINE_FIELD_BASED_COMPARISON_OPERATORS(type, field) \
- inline bool operator<(const type &other) const { return field < other.field; } \
- inline bool operator==(const type &other) const { return field == other.field; } \
- DEFINE_OTHER_COMPARISON_OPERATORS(type)
-
-/// \cond INTERNAL
-
-/// \defgroup utils_internal
-/// @{
-
-template<typename... T> struct c2_types;
-
-/** specialization for a single type */
-template<typename T>
-struct c2_types<T> {
- typedef typename std::decay<T>::type wide_type;
- typedef wide_type narrow_type;
- typedef wide_type mintype;
-};
-
-/** specialization for two types */
-template<typename T, typename U>
-struct c2_types<T, U> {
- static_assert(std::is_floating_point<T>::value == std::is_floating_point<U>::value,
- "mixing floating point and non-floating point types is disallowed");
- static_assert(std::is_signed<T>::value == std::is_signed<U>::value,
- "mixing signed and unsigned types is disallowed");
-
- typedef typename std::decay<
- decltype(true ? std::declval<T>() : std::declval<U>())>::type wide_type;
- typedef typename std::decay<
- typename std::conditional<sizeof(T) < sizeof(U), T, U>::type>::type narrow_type;
- typedef typename std::conditional<
- std::is_signed<T>::value, wide_type, narrow_type>::type mintype;
-};
-
-/// @}
-
-/// \endcond
-
-/**
- * Type support utility class. Only supports similar classes, such as:
- * - all floating point
- * - all unsigned/all signed
- * - all pointer
- */
-template<typename T, typename U, typename... V>
-struct c2_types<T, U, V...> {
- /** Common type that accommodates all template parameter types. */
- typedef typename c2_types<typename c2_types<T, U>::wide_type, V...>::wide_type wide_type;
- /** Narrowest type of the template parameter types. */
- typedef typename c2_types<typename c2_types<T, U>::narrow_type, V...>::narrow_type narrow_type;
- /** Type that accommodates the minimum value for any input for the template parameter types. */
- typedef typename c2_types<typename c2_types<T, U>::mintype, V...>::mintype mintype;
-};
-
-/**
- * \ingroup utils_internal
- * specialization for two values */
-template<typename T, typename U>
-inline constexpr typename c2_types<T, U>::wide_type c2_max(const T a, const U b) {
- typedef typename c2_types<T, U>::wide_type wide_type;
- return ({ wide_type a_(a), b_(b); a_ > b_ ? a_ : b_; });
-}
-
-/**
- * Finds the maximum value of a list of "similarly typed" values.
- *
- * This is an extension to std::max where the types do not have to be identical, and the smallest
- * resulting type is used that accommodates the argument types.
- *
- * \note Value types must be similar, e.g. all floating point, all pointers, all signed, or all
- * unsigned.
- *
- * @return the largest of the input arguments.
- */
-template<typename T, typename U, typename... V>
-constexpr typename c2_types<T, U, V...>::wide_type c2_max(const T a, const U b, const V ... c) {
- typedef typename c2_types<T, U, V...>::wide_type wide_type;
- return ({ wide_type a_(a), b_(c2_max(b, c...)); a_ > b_ ? a_ : b_; });
-}
-
-/**
- * \ingroup utils_internal
- * specialization for two values */
-template<typename T, typename U>
-inline constexpr typename c2_types<T, U>::mintype c2_min(const T a, const U b) {
- typedef typename c2_types<T, U>::wide_type wide_type;
- return ({
- wide_type a_(a), b_(b);
- static_cast<typename c2_types<T, U>::mintype>(a_ < b_ ? a_ : b_);
- });
-}
-
-/**
- * Finds the minimum value of a list of "similarly typed" values.
- *
- * This is an extension to std::min where the types do not have to be identical, and the smallest
- * resulting type is used that accommodates the argument types.
- *
- * \note Value types must be similar, e.g. all floating point, all pointers, all signed, or all
- * unsigned.
- *
- * @return the smallest of the input arguments.
- */
-template<typename T, typename U, typename... V>
-constexpr typename c2_types<T, U, V...>::mintype c2_min(const T a, const U b, const V ... c) {
- typedef typename c2_types<U, V...>::mintype rest_type;
- typedef typename c2_types<T, rest_type>::wide_type wide_type;
- return ({
- wide_type a_(a), b_(c2_min(b, c...));
- static_cast<typename c2_types<T, rest_type>::mintype>(a_ < b_ ? a_ : b_);
- });
-}
-
-/// @}
-
-#ifdef __ANDROID__
-} // namespace android
-#endif
-
-#endif // C2_H_
diff --git a/media/libstagefright/codec2/include/C2Buffer.h b/media/libstagefright/codec2/include/C2Buffer.h
deleted file mode 100644
index 9f6b487..0000000
--- a/media/libstagefright/codec2/include/C2Buffer.h
+++ /dev/null
@@ -1,1777 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef C2BUFFER_H_
-#define C2BUFFER_H_
-
-#include <C2.h>
-#include <C2Param.h> // for C2Info
-
-#include <list>
-#include <memory>
-
-typedef int C2Fence;
-
-#ifdef __ANDROID__
-
-// #include <system/window.h>
-#include <cutils/native_handle.h>
-#include <hardware/gralloc.h> // TODO: remove
-
-typedef native_handle_t C2Handle;
-
-#else
-
-typedef void* C2Handle;
-
-#endif
-
-namespace android {
-
-/// \defgroup buffer Buffers
-/// @{
-
-/// \defgroup buffer_sync Synchronization
-/// @{
-
-/**
- * Synchronization is accomplished using event and fence objects.
- *
- * These are cross-process extensions of promise/future infrastructure.
- * Events are analogous to std::promise<void>, whereas fences are to std::shared_future<void>.
- *
- * Fences and events are shareable/copyable.
- *
- * Fences are used in two scenarios, and all copied instances refer to the same event.
- * \todo do events need to be copyable or should they be unique?
- *
- * acquire sync fence object: signaled when it is safe for the component or client to access
- * (the contents of) an object.
- *
- * release sync fence object: \todo
- *
- * Fences can be backed by hardware. Hardware fences are guaranteed to signal NO MATTER WHAT within
- * a short (platform specific) amount of time; this guarantee is usually less than 15 msecs.
- */
-
-/**
- * Fence object used by components and the framework.
- *
- * Implements the waiting for an event, analogous to a 'future'.
- *
- * To be implemented by vendors if using HW fences.
- */
-class C2Fence {
-public:
- /**
- * Waits for a fence to be signaled with a timeout.
- *
- * \todo a mechanism to cancel a wait - for now the only way to do this is to abandon the
- * event, but fences are shared so canceling a wait will cancel all waits.
- *
- * \param timeoutNs the maximum time to wait in nsecs
- *
- * \retval C2_OK the fence has been signaled
- * \retval C2_TIMED_OUT the fence has not been signaled within the timeout
- * \retval C2_BAD_STATE the fence has been abandoned without being signaled (it will never
- * be signaled)
- * \retval C2_NO_PERMISSION no permission to wait for the fence (unexpected - system)
- * \retval C2_CORRUPTED some unknown error prevented waiting for the fence (unexpected)
- */
- C2Error wait(nsecs_t timeoutNs);
-
- /**
- * Used to check if this fence is valid (if there is a chance for it to be signaled.)
- * A fence becomes invalid if the controling event is destroyed without it signaling the fence.
- *
- * \return whether this fence is valid
- */
- bool valid() const;
-
- /**
- * Used to check if this fence has been signaled (is ready).
- *
- * \return whether this fence has been signaled
- */
- bool ready() const;
-
- /**
- * Returns a file descriptor that can be used to wait for this fence in a select system call.
- * \note The returned file descriptor, if valid, must be closed by the caller.
- *
- * This can be used in e.g. poll() system calls. This file becomes readable (POLLIN) when the
- * fence is signaled, and bad (POLLERR) if the fence is abandoned.
- *
- * \return a file descriptor representing this fence (with ownership), or -1 if the fence
- * has already been signaled (\todo or abandoned).
- *
- * \todo this must be compatible with fences used by gralloc
- */
- int fd() const;
-
- /**
- * Returns whether this fence is a hardware-backed fence.
- * \return whether this is a hardware fence
- */
- bool isHW() const;
-
-private:
- class Impl;
- std::shared_ptr<Impl> mImpl;
-};
-
-/**
- * Event object used by components and the framework.
- *
- * Implements the signaling of an event, analogous to a 'promise'.
- *
- * Hardware backed events do not go through this object, and must be exposed directly as fences
- * by vendors.
- */
-class C2Event {
-public:
- /**
- * Returns a fence for this event.
- */
- C2Fence fence() const;
-
- /**
- * Signals (all) associated fence(s).
- * This has no effect no effect if the event was already signaled or abandoned.
- *
- * \retval C2_OK the fence(s) were successfully signaled
- * \retval C2_BAD_STATE the fence(s) have already been abandoned or merged (caller error)
- * \retval C2_ALREADY_EXISTS the fence(s) have already been signaled (caller error)
- * \retval C2_NO_PERMISSION no permission to signal the fence (unexpected - system)
- * \retval C2_CORRUPTED some unknown error prevented signaling the fence(s) (unexpected)
- */
- C2Error fire();
-
- /**
- * Trigger this event from the merging of the supplied fences. This means that it will be
- * abandoned if any of these fences have been abandoned, and it will be fired if all of these
- * fences have been signaled.
- *
- * \retval C2_OK the merging was successfully done
- * \retval C2_NO_MEMORY not enough memory to perform the merging
- * \retval C2_ALREADY_EXISTS the fence have already been merged (caller error)
- * \retval C2_BAD_STATE the fence have already been signaled or abandoned (caller error)
- * \retval C2_NO_PERMISSION no permission to merge the fence (unexpected - system)
- * \retval C2_CORRUPTED some unknown error prevented merging the fence(s) (unexpected)
- */
- C2Error merge(std::vector<C2Fence> fences);
-
- /**
- * Abandons the event and any associated fence(s).
- * \note Call this to explicitly abandon an event before it is destructed to avoid a warning.
- *
- * This has no effect no effect if the event was already signaled or abandoned.
- *
- * \retval C2_OK the fence(s) were successfully signaled
- * \retval C2_BAD_STATE the fence(s) have already been signaled or merged (caller error)
- * \retval C2_ALREADY_EXISTS the fence(s) have already been abandoned (caller error)
- * \retval C2_NO_PERMISSION no permission to abandon the fence (unexpected - system)
- * \retval C2_CORRUPTED some unknown error prevented signaling the fence(s) (unexpected)
- */
- C2Error abandon();
-
-private:
- class Impl;
- std::shared_ptr<Impl> mImpl;
-};
-
-/// \addtogroup buf_internal Internal
-/// @{
-
-/**
- * Interface for objects that encapsulate an updatable error value.
- */
-struct _C2InnateError {
- inline C2Error error() const { return mError; }
-
-protected:
- _C2InnateError(C2Error error) : mError(error) { }
-
- C2Error mError; // this error is updatable by the object
-};
-
-/// @}
-
-/**
- * This is a utility template for objects protected by an acquire fence, so that errors during
- * acquiring the object are propagated to the object itself.
- */
-template<typename T>
-class C2Acquirable : public C2Fence {
-public:
- /**
- * Acquires the object protected by an acquire fence. Any errors during the mapping will be
- * passed to the object.
- *
- * \return acquired object potentially invalidated if waiting for the fence failed.
- */
- T get();
-
-protected:
- C2Acquirable(C2Error error, C2Fence fence, T t) : C2Fence(fence), mInitialError(error), mT(t) { }
-
-private:
- C2Error mInitialError;
- T mT; // TODO: move instead of copy
-};
-
-/// @}
-
-/// \defgroup linear Linear Data Blocks
-/// @{
-
-/**************************************************************************************************
- LINEAR ASPECTS, BLOCKS AND VIEWS
-**************************************************************************************************/
-
-/**
- * Common aspect for all objects that have a linear capacity.
- */
-class _C2LinearCapacityAspect {
-/// \name Linear capacity interface
-/// @{
-public:
- inline uint32_t capacity() const { return mCapacity; }
-
-protected:
-
-#if UINTPTR_MAX == 0xffffffff
- static_assert(sizeof(size_t) == sizeof(uint32_t), "size_t is too big");
-#else
- static_assert(sizeof(size_t) > sizeof(uint32_t), "size_t is too small");
- // explicitly disable construction from size_t
- inline explicit _C2LinearCapacityAspect(size_t capacity) = delete;
-#endif
-
- inline explicit _C2LinearCapacityAspect(uint32_t capacity)
- : mCapacity(capacity) { }
-
- inline explicit _C2LinearCapacityAspect(const _C2LinearCapacityAspect *parent)
- : mCapacity(parent == nullptr ? 0 : parent->capacity()) { }
-
-private:
- const uint32_t mCapacity;
-/// @}
-};
-
-/**
- * Aspect for objects that have a linear range.
- *
- * This class is copiable.
- */
-class _C2LinearRangeAspect : public _C2LinearCapacityAspect {
-/// \name Linear range interface
-/// @{
-public:
- inline uint32_t offset() const { return mOffset; }
- inline uint32_t size() const { return mSize; }
-
-protected:
- inline explicit _C2LinearRangeAspect(const _C2LinearCapacityAspect *parent)
- : _C2LinearCapacityAspect(parent),
- mOffset(0),
- mSize(capacity()) { }
-
- inline _C2LinearRangeAspect(const _C2LinearCapacityAspect *parent, size_t offset, size_t size)
- : _C2LinearCapacityAspect(parent),
- mOffset(c2_min(offset, capacity())),
- mSize(c2_min(size, capacity() - mOffset)) { }
-
- // subsection of the two [offset, offset + size] ranges
- inline _C2LinearRangeAspect(const _C2LinearRangeAspect *parent, size_t offset, size_t size)
- : _C2LinearCapacityAspect(parent == nullptr ? 0 : parent->capacity()),
- mOffset(c2_min(c2_max(offset, parent == nullptr ? 0 : parent->offset()), capacity())),
- mSize(c2_min(c2_min(size, parent == nullptr ? 0 : parent->size()), capacity() - mOffset)) { }
-
-private:
- friend class _C2EditableLinearRange;
- // invariants 0 <= mOffset <= mOffset + mSize <= capacity()
- uint32_t mOffset;
- uint32_t mSize;
-/// @}
-};
-
-/**
- * Aspect for objects that have an editable linear range.
- *
- * This class is copiable.
- */
-class _C2EditableLinearRange : public _C2LinearRangeAspect {
-protected:
- inline explicit _C2EditableLinearRange(const _C2LinearCapacityAspect *parent)
- : _C2LinearRangeAspect(parent) { }
-
- inline _C2EditableLinearRange(const _C2LinearCapacityAspect *parent, size_t offset, size_t size)
- : _C2LinearRangeAspect(parent, offset, size) { }
-
- // subsection of the two [offset, offset + size] ranges
- inline _C2EditableLinearRange(const _C2LinearRangeAspect *parent, size_t offset, size_t size)
- : _C2LinearRangeAspect(parent, offset, size) { }
-
-/// \name Editable linear range interface
-/// @{
-
- /**
- * Sets the offset to |offset|, while trying to keep the end of the buffer unchanged (e.g.
- * size will grow if offset is decreased, and may shrink if offset is increased.) Returns
- * true if successful, which is equivalent to if 0 <= |offset| <= capacity().
- *
- * Note: setting offset and size will yield different result depending on the order of the
- * operations. Always set offset first to ensure proper size.
- */
- inline bool setOffset(uint32_t offset) {
- if (offset > capacity()) {
- return false;
- }
-
- if (offset > mOffset + mSize) {
- mSize = 0;
- } else {
- mSize = mOffset + mSize - offset;
- }
- mOffset = offset;
- return true;
- }
- /**
- * Sets the size to |size|. Returns true if successful, which is equivalent to
- * if 0 <= |size| <= capacity() - offset().
- *
- * Note: setting offset and size will yield different result depending on the order of the
- * operations. Always set offset first to ensure proper size.
- */
- inline bool setSize(uint32_t size) {
- if (size > capacity() - mOffset) {
- return false;
- } else {
- mSize = size;
- return true;
- }
- }
- /**
- * Sets the offset to |offset| with best effort. Same as setOffset() except that offset will
- * be clamped to the buffer capacity.
- *
- * Note: setting offset and size (even using best effort) will yield different result depending
- * on the order of the operations. Always set offset first to ensure proper size.
- */
- inline void setOffset_be(uint32_t offset) {
- if (offset > capacity()) {
- offset = capacity();
- }
- if (offset > mOffset + mSize) {
- mSize = 0;
- } else {
- mSize = mOffset + mSize - offset;
- }
- mOffset = offset;
- }
- /**
- * Sets the size to |size| with best effort. Same as setSize() except that the selected region
- * will be clamped to the buffer capacity (e.g. size is clamped to [0, capacity() - offset()]).
- *
- * Note: setting offset and size (even using best effort) will yield different result depending
- * on the order of the operations. Always set offset first to ensure proper size.
- */
- inline void setSize_be(uint32_t size) {
- mSize = std::min(size, capacity() - mOffset);
- }
-/// @}
-};
-
-// ================================================================================================
-// BLOCKS
-// ================================================================================================
-
-/**
- * Blocks are sections of allocations. They can be either 1D or 2D.
- */
-
-class C2LinearAllocation;
-
-class C2Block1D : public _C2LinearRangeAspect {
-public:
- const C2Handle *handle() const;
-
-protected:
- C2Block1D(std::shared_ptr<C2LinearAllocation> alloc);
- C2Block1D(std::shared_ptr<C2LinearAllocation> alloc, size_t offset, size_t size);
-
-private:
- class Impl;
- std::shared_ptr<Impl> mImpl;
-};
-
-/**
- * Read view provides read-only access for a linear memory segment.
- *
- * This class is copiable.
- */
-class C2ReadView : public _C2LinearCapacityAspect {
-public:
- /**
- * \return pointer to the start of the block or nullptr on error.
- */
- const uint8_t *data();
-
- /**
- * Returns a portion of this view.
- *
- * \param offset the start offset of the portion. \note This is clamped to the capacity of this
- * view.
- * \param size the size of the portion. \note This is clamped to the remaining data from offset.
- *
- * \return a read view containing a portion of this view
- */
- C2ReadView subView(size_t offset, size_t size) const;
-
- /**
- * \return error during the creation/mapping of this view.
- */
- C2Error error();
-
-private:
- class Impl;
- std::shared_ptr<Impl> mImpl;
-};
-
-/**
- * Write view provides read/write access for a linear memory segment.
- *
- * This class is copiable. \todo movable only?
- */
-class C2WriteView : public _C2EditableLinearRange {
-public:
- /**
- * Start of the block.
- *
- * \return pointer to the start of the block or nullptr on error.
- */
- uint8_t *base();
-
- /**
- * \return pointer to the block at the current offset or nullptr on error.
- */
- uint8_t *data();
-
- /**
- * \return error during the creation/mapping of this view.
- */
- C2Error error();
-
-private:
- class Impl;
- /// \todo should this be unique_ptr to make this movable only - to avoid inconsistent regions
- /// between copies.
- std::shared_ptr<Impl> mImpl;
-};
-
-/**
- * A constant (read-only) linear block (portion of an allocation) with an acquire fence.
- * Blocks are unmapped when created, and can be mapped into a read view on demand.
- *
- * This class is copiable and contains a reference to the allocation that it is based on.
- */
-class C2ConstLinearBlock : public C2Block1D {
-public:
- /**
- * Maps this block into memory and returns a read view for it.
- *
- * \return a read view for this block.
- */
- C2Acquirable<C2ReadView> map() const;
-
- /**
- * Returns a portion of this block.
- *
- * \param offset the start offset of the portion. \note This is clamped to the capacity of this
- * block.
- * \param size the size of the portion. \note This is clamped to the remaining data from offset.
- *
- * \return a constant linear block containing a portion of this block
- */
- C2ConstLinearBlock subBlock(size_t offset, size_t size) const;
-
- /**
- * Returns the acquire fence for this block.
- *
- * \return a fence that must be waited on before reading the block.
- */
- C2Fence fence() const { return mFence; }
-
-private:
- C2Fence mFence;
-};
-
-/**
- * Linear block is a writeable 1D block. Once written, it can be shared in whole or in parts with
- * consumers/readers as read-only const linear block(s).
- */
-class C2LinearBlock : public C2Block1D {
-public:
- /**
- * Maps this block into memory and returns a write view for it.
- *
- * \return a write view for this block.
- */
- C2Acquirable<C2WriteView> map();
-
- /**
- * Creates a read-only const linear block for a portion of this block; optionally protected
- * by an acquire fence. There are two ways to use this:
- *
- * 1) share ready block after writing data into the block. In this case no fence shall be
- * supplied, and the block shall not be modified after calling this method.
- * 2) share block metadata before actually (finishing) writing the data into the block. In
- * this case a fence must be supplied that will be triggered when the data is written.
- * The block shall be modified only until firing the event for the fence.
- */
- C2ConstLinearBlock share(size_t offset, size_t size, C2Fence fence);
-};
-
-/// @}
-
-/**************************************************************************************************
- CIRCULAR BLOCKS AND VIEWS
-**************************************************************************************************/
-
-/// \defgroup circular Circular buffer support
-/// @{
-
-/**
- * Circular blocks can be used to share data between a writer and a reader (and/or other consumers)-
- * in a memory-efficient way by reusing a section of memory. Circular blocks are a bit more complex
- * than single reader/single writer schemes to facilitate block-based consuming of data.
- *
- * They can operate in two modes:
- *
- * 1) one writer that creates blocks to be consumed (this model can be used by components)
- *
- * 2) one writer that writes continuously, and one reader that can creates blocks to be consumed
- * by further recipients (this model is used by the framework, and cannot be used by components.)
- *
- * Circular blocks have four segments with running pointers:
- * - reserved: data reserved and available for the writer
- * - committed: data committed by the writer and available to the reader (if present)
- * - used: data used by consumers (if present)
- * - available: unused data available to be reserved
- */
-class C2CircularBlock : public C2Block1D {
- // TODO: add methods
-
-private:
- size_t mReserved __unused; // end of reserved section
- size_t mCommitted __unused; // end of committed section
- size_t mUsed __unused; // end of used section
- size_t mFree __unused; // end of free section
-};
-
-class _C2CircularBlockSegment : public _C2LinearCapacityAspect {
-public:
- /**
- * Returns the available size for this segment.
- *
- * \return currently available size for this segment
- */
- size_t available() const;
-
- /**
- * Reserve some space for this segment from its current start.
- *
- * \param size desired space in bytes
- * \param fence a pointer to an acquire fence. If non-null, the reservation is asynchronous and
- * a fence will be stored here that will be signaled when the reservation is
- * complete. If null, the reservation is synchronous.
- *
- * \retval C2_OK the space was successfully reserved
- * \retval C2_NO_MEMORY the space requested cannot be reserved
- * \retval C2_TIMED_OUT the reservation timed out \todo when?
- * \retval C2_CORRUPTED some unknown error prevented reserving space. (unexpected)
- */
- C2Error reserve(size_t size, C2Fence *fence /* nullable */);
-
- /**
- * Abandons a portion of this segment. This will move to the beginning of this segment.
- *
- * \note This methods is only allowed if this segment is producing blocks.
- *
- * \param size number of bytes to abandon
- *
- * \retval C2_OK the data was successfully abandoned
- * \retval C2_TIMED_OUT the operation timed out (unexpected)
- * \retval C2_CORRUPTED some unknown error prevented abandoning the data (unexpected)
- */
- C2Error abandon(size_t size);
-
- /**
- * Share a portion as block(s) with consumers (these are moved to the used section).
- *
- * \note This methods is only allowed if this segment is producing blocks.
- * \note Share does not move the beginning of the segment. (\todo add abandon/offset?)
- *
- * \param size number of bytes to share
- * \param fence fence to be used for the section
- * \param blocks list where the blocks of the section are appended to
- *
- * \retval C2_OK the portion was successfully shared
- * \retval C2_NO_MEMORY not enough memory to share the portion
- * \retval C2_TIMED_OUT the operation timed out (unexpected)
- * \retval C2_CORRUPTED some unknown error prevented sharing the data (unexpected)
- */
- C2Error share(size_t size, C2Fence fence, std::list<C2ConstLinearBlock> &blocks);
-
- /**
- * Returns the beginning offset of this segment from the start of this circular block.
- *
- * @return beginning offset
- */
- size_t begin();
-
- /**
- * Returns the end offset of this segment from the start of this circular block.
- *
- * @return end offset
- */
- size_t end();
-};
-
-/**
- * A circular write-view is a dynamic mapped view for a segment of a circular block. Care must be
- * taken when using this view so that only the section owned by the segment is modified.
- */
-class C2CircularWriteView : public _C2LinearCapacityAspect {
-public:
- /**
- * Start of the circular block.
- * \note the segment does not own this pointer.
- *
- * \return pointer to the start of the circular block or nullptr on error.
- */
- uint8_t *base();
-
- /**
- * \return error during the creation/mapping of this view.
- */
- C2Error error();
-};
-
-/**
- * The writer of a circular buffer.
- *
- * Can commit data to a reader (not supported for components) OR share data blocks directly with a
- * consumer.
- *
- * If a component supports outputting data into circular buffers, it must allocate a circular
- * block and use a circular writer.
- */
-class C2CircularWriter : public _C2CircularBlockSegment {
-public:
- /**
- * Commits a portion of this segment to the next segment. This moves the beginning of the
- * segment.
- *
- * \param size number of bytes to commit to the next segment
- * \param fence fence used for the commit (the fence must signal before the data is committed)
- */
- C2Error commit(size_t size, C2Fence fence);
-
- /**
- * Maps this block into memory and returns a write view for it.
- *
- * \return a write view for this block.
- */
- C2Acquirable<C2CircularWriteView> map();
-};
-
-/// @}
-
-/// \defgroup graphic Graphic Data Blocks
-/// @{
-
-/**
- * Interface for objects that have a width and height (planar capacity).
- */
-class _C2PlanarCapacityAspect {
-/// \name Planar capacity interface
-/// @{
-public:
- inline uint32_t width() const { return mWidth; }
- inline uint32_t height() const { return mHeight; }
-
-protected:
- inline _C2PlanarCapacityAspect(uint32_t width, uint32_t height)
- : mWidth(width), mHeight(height) { }
-
- inline _C2PlanarCapacityAspect(const _C2PlanarCapacityAspect *parent)
- : mWidth(parent == nullptr ? 0 : parent->width()),
- mHeight(parent == nullptr ? 0 : parent->height()) { }
-
-private:
- const uint32_t mWidth;
- const uint32_t mHeight;
-/// @}
-};
-
-/**
- * C2Rect: rectangle type with non-negative coordinates.
- *
- * \note This struct has public fields without getters/setters. All methods are inline.
- */
-struct C2Rect {
-// public:
- uint32_t mLeft;
- uint32_t mTop;
- uint32_t mWidth;
- uint32_t mHeight;
-
- inline C2Rect(uint32_t width, uint32_t height)
- : C2Rect(width, height, 0, 0) { }
-
- inline C2Rect(uint32_t width, uint32_t height, uint32_t left, uint32_t top)
- : mLeft(left), mTop(top), mWidth(width), mHeight(height) { }
-
- // utility methods
-
- inline bool isEmpty() const {
- return mWidth == 0 || mHeight == 0;
- }
-
- inline bool isValid() const {
- return mLeft <= ~mWidth && mTop <= ~mHeight;
- }
-
- inline operator bool() const {
- return isValid() && !isEmpty();
- }
-
- inline bool operator!() const {
- return !bool(*this);
- }
-
- inline bool contains(const C2Rect &other) const {
- if (!isValid() || !other.isValid()) {
- return false;
- } else if (other.isEmpty()) {
- return true;
- } else {
- return mLeft <= other.mLeft && mTop <= other.mTop
- && mLeft + mWidth >= other.mLeft + other.mWidth
- && mTop + mHeight >= other.mTop + other.mHeight;
- }
- }
-
- inline bool operator==(const C2Rect &other) const {
- if (!isValid()) {
- return !other.isValid();
- } else if (isEmpty()) {
- return other.isEmpty();
- } else {
- return mLeft == other.mLeft && mTop == other.mTop
- && mWidth == other.mWidth && mHeight == other.mHeight;
- }
- }
-
- inline bool operator!=(const C2Rect &other) const {
- return !operator==(other);
- }
-
- inline bool operator>=(const C2Rect &other) const {
- return contains(other);
- }
-
- inline bool operator>(const C2Rect &other) const {
- return contains(other) && !operator==(other);
- }
-
- inline bool operator<=(const C2Rect &other) const {
- return other.contains(*this);
- }
-
- inline bool operator<(const C2Rect &other) const {
- return other.contains(*this) && !operator==(other);
- }
-};
-
-/**
- * C2PlaneInfo: information on the layout of flexible planes.
- *
- * Public fields without getters/setters.
- */
-struct C2PlaneInfo {
-// public:
- enum Channel : uint32_t {
- Y,
- R,
- G,
- B,
- A,
- Cr,
- Cb,
- } mChannel;
-
- int32_t mColInc; // column increment in bytes. may be negative
- int32_t mRowInc; // row increment in bytes. may be negative
- uint32_t mHorizSubsampling; // subsampling compared to width
- uint32_t mVertSubsampling; // subsampling compared to height
-
- uint32_t mBitDepth;
- uint32_t mAllocatedDepth;
-
- inline ssize_t minOffset(uint32_t width, uint32_t height) {
- ssize_t offs = 0;
- if (width > 0 && mColInc < 0) {
- offs += mColInc * (ssize_t)(width - 1);
- }
- if (height > 0 && mRowInc < 0) {
- offs += mRowInc * (ssize_t)(height - 1);
- }
- return offs;
- }
-
- inline ssize_t maxOffset(uint32_t width, uint32_t height, uint32_t allocatedDepth) {
- ssize_t offs = (allocatedDepth + 7) >> 3;
- if (width > 0 && mColInc > 0) {
- offs += mColInc * (ssize_t)(width - 1);
- }
- if (height > 0 && mRowInc > 0) {
- offs += mRowInc * (ssize_t)(height - 1);
- }
- return offs;
- }
-};
-
-struct C2PlaneLayout {
-public:
- enum Type : uint32_t {
- MEDIA_IMAGE_TYPE_UNKNOWN = 0,
- MEDIA_IMAGE_TYPE_YUV = 0x100,
- MEDIA_IMAGE_TYPE_YUVA,
- MEDIA_IMAGE_TYPE_RGB,
- MEDIA_IMAGE_TYPE_RGBA,
- };
-
- Type mType;
- uint32_t mNumPlanes; // number of planes
-
- enum PlaneIndex : uint32_t {
- Y = 0,
- U = 1,
- V = 2,
- R = 0,
- G = 1,
- B = 2,
- A = 3,
- MAX_NUM_PLANES = 4,
- };
-
- C2PlaneInfo mPlanes[MAX_NUM_PLANES];
-};
-
-/**
- * Aspect for objects that have a planar section (crop rectangle).
- *
- * This class is copiable.
- */
-class _C2PlanarSection : public _C2PlanarCapacityAspect {
-/// \name Planar section interface
-/// @{
-public:
- // crop can be an empty rect, does not have to line up with subsampling
- // NOTE: we do not support floating-point crop
- inline const C2Rect crop() { return mCrop; }
-
- /**
- * Sets crop to crop intersected with [(0,0) .. (width, height)]
- */
- inline void setCrop_be(const C2Rect &crop);
-
- /**
- * If crop is within the dimensions of this object, it sets crop to it.
- *
- * \return true iff crop is within the dimensions of this object
- */
- inline bool setCrop(const C2Rect &crop);
-
-private:
- C2Rect mCrop;
-/// @}
-};
-
-class C2Block2D : public _C2PlanarSection {
-public:
- const C2Handle *handle() const;
-
-private:
- class Impl;
- std::shared_ptr<Impl> mImpl;
-};
-
-/**
- * Graphic view provides read or read-write access for a graphic block.
- *
- * This class is copiable.
- *
- * \note Due to the subsampling of graphic buffers, a read view must still contain a crop rectangle
- * to ensure subsampling is followed. This results in nearly identical interface between read and
- * write views, so C2GraphicView can encompass both of them.
- */
-class C2GraphicView : public _C2PlanarSection {
-public:
- /**
- * \return pointer to the start of the block or nullptr on error.
- */
- const uint8_t *data() const;
-
- /**
- * \return pointer to the start of the block or nullptr on error.
- */
- uint8_t *data();
-
- /**
- * Returns a section of this view.
- *
- * \param rect the dimension of the section. \note This is clamped to the crop of this view.
- *
- * \return a read view containing the requested section of this view
- */
- const C2GraphicView subView(const C2Rect &rect) const;
- C2GraphicView subView(const C2Rect &rect);
-
- /**
- * \return error during the creation/mapping of this view.
- */
- C2Error error() const;
-
-private:
- class Impl;
- std::shared_ptr<Impl> mImpl;
-};
-
-/**
- * A constant (read-only) graphic block (portion of an allocation) with an acquire fence.
- * Blocks are unmapped when created, and can be mapped into a read view on demand.
- *
- * This class is copiable and contains a reference to the allocation that it is based on.
- */
-class C2ConstGraphicBlock : public C2Block2D {
-public:
- /**
- * Maps this block into memory and returns a read view for it.
- *
- * \return a read view for this block.
- */
- C2Acquirable<const C2GraphicView> map() const;
-
- /**
- * Returns a section of this block.
- *
- * \param rect the coordinates of the section. \note This is clamped to the crop rectangle of
- * this block.
- *
- * \return a constant graphic block containing a portion of this block
- */
- C2ConstGraphicBlock subBlock(const C2Rect &rect) const;
-
- /**
- * Returns the acquire fence for this block.
- *
- * \return a fence that must be waited on before reading the block.
- */
- C2Fence fence() const { return mFence; }
-
-private:
- C2Fence mFence;
-};
-
-/**
- * Graphic block is a writeable 2D block. Once written, it can be shared in whole or in part with
- * consumers/readers as read-only const graphic block.
- */
-class C2GraphicBlock : public C2Block2D {
-public:
- /**
- * Maps this block into memory and returns a write view for it.
- *
- * \return a write view for this block.
- */
- C2Acquirable<C2GraphicView> map();
-
- /**
- * Creates a read-only const linear block for a portion of this block; optionally protected
- * by an acquire fence. There are two ways to use this:
- *
- * 1) share ready block after writing data into the block. In this case no fence shall be
- * supplied, and the block shall not be modified after calling this method.
- * 2) share block metadata before actually (finishing) writing the data into the block. In
- * this case a fence must be supplied that will be triggered when the data is written.
- * The block shall be modified only until firing the event for the fence.
- */
- C2ConstGraphicBlock share(const C2Rect &crop, C2Fence fence);
-};
-
-/// @}
-
-/// \defgroup buffer_onj Buffer objects
-/// @{
-
-// ================================================================================================
-// BUFFERS
-// ================================================================================================
-
-/// \todo: Do we still need this?
-///
-// There are 2 kinds of buffers: linear or graphic. Linear buffers can contain a single block, or
-// a list of blocks (LINEAR_CHUNKS). Support for list of blocks is optional, and can allow consuming
-// data from circular buffers or scattered data sources without extra memcpy. Currently, list of
-// graphic blocks is not supported.
-
-class C2LinearBuffer; // read-write buffer
-class C2GraphicBuffer; // read-write buffer
-class C2LinearChunksBuffer;
-
-/**
- * C2BufferData: the main, non-meta data of a buffer. A buffer can contain either linear blocks
- * or graphic blocks, and can contain either a single block or multiple blocks. This is determined
- * by its type.
- */
-class C2BufferData {
-public:
- /**
- * The type of buffer data.
- */
- enum Type : uint32_t {
- LINEAR, ///< the buffer contains a single linear block
- LINEAR_CHUNKS, ///< the buffer contains one or more linear blocks
- GRAPHIC, ///< the buffer contains a single graphic block
- GRAPHIC_CHUNKS, ///< the buffer contains one of more graphic blocks
- };
-
- /**
- * Gets the type of this buffer (data).
- * \return the type of this buffer data.
- */
- Type type() const;
-
- /**
- * Gets the linear blocks of this buffer.
- * \return a constant list of const linear blocks of this buffer.
- * \retval empty list if this buffer does not contain linear block(s).
- */
- const std::list<C2ConstLinearBlock> linearBlocks() const;
-
- /**
- * Gets the graphic blocks of this buffer.
- * \return a constant list of const graphic blocks of this buffer.
- * \retval empty list if this buffer does not contain graphic block(s).
- */
- const std::list<C2ConstGraphicBlock> graphicBlocks() const;
-
-private:
- class Impl;
- std::shared_ptr<Impl> mImpl;
-
-protected:
- // no public constructor
- // C2BufferData(const std::shared_ptr<const Impl> &impl) : mImpl(impl) {}
-};
-
-/**
- * C2Buffer: buffer base class. These are always used as shared_ptrs. Though the underlying buffer
- * objects (native buffers, ion buffers, or dmabufs) are reference-counted by the system,
- * C2Buffers hold only a single reference.
- *
- * These objects cannot be used on the stack.
- */
-class C2Buffer {
-public:
- /**
- * Gets the buffer's data.
- *
- * \return the buffer's data.
- */
- const C2BufferData data() const;
-
- /**
- * These will still work if used in onDeathNotify.
- */
-#if 0
- inline std::shared_ptr<C2LinearBuffer> asLinearBuffer() const {
- return mType == LINEAR ? std::shared_ptr::reinterpret_cast<C2LinearBuffer>(this) : nullptr;
- }
-
- inline std::shared_ptr<C2GraphicBuffer> asGraphicBuffer() const {
- return mType == GRAPHIC ? std::shared_ptr::reinterpret_cast<C2GraphicBuffer>(this) : nullptr;
- }
-
- inline std::shared_ptr<C2CircularBuffer> asCircularBuffer() const {
- return mType == CIRCULAR ? std::shared_ptr::reinterpret_cast<C2CircularBuffer>(this) : nullptr;
- }
-#endif
-
- ///@name Pre-destroy notification handling
- ///@{
-
- /**
- * Register for notification just prior to the destruction of this object.
- */
- typedef void (*OnDestroyNotify) (const C2Buffer *buf, void *arg);
-
- /**
- * Registers for a pre-destroy notification. This is called just prior to the destruction of
- * this buffer (when this buffer is no longer valid.)
- *
- * \param onDestroyNotify the notification callback
- * \param arg an arbitrary parameter passed to the callback
- *
- * \retval C2_OK the registration was successful.
- * \retval C2_DUPLICATE a notification was already registered for this callback and argument
- * \retval C2_NO_MEMORY not enough memory to register for this callback
- * \retval C2_CORRUPTED an unknown error prevented the registration (unexpected)
- */
- C2Error registerOnDestroyNotify(OnDestroyNotify *onDestroyNotify, void *arg = nullptr);
-
- /**
- * Unregisters a previously registered pre-destroy notification.
- *
- * \param onDestroyNotify the notification callback
- * \param arg an arbitrary parameter passed to the callback
- *
- * \retval C2_OK the unregistration was successful.
- * \retval C2_NOT_FOUND the notification was not found
- * \retval C2_CORRUPTED an unknown error prevented the registration (unexpected)
- */
- C2Error unregisterOnDestroyNotify(OnDestroyNotify *onDestroyNotify, void *arg = nullptr);
-
- ///@}
-
- virtual ~C2Buffer() = default;
-
- ///@name Buffer-specific arbitrary metadata handling
- ///@{
-
- /**
- * Gets the list of metadata associated with this buffer.
- *
- * \return a constant list of info objects associated with this buffer.
- */
- const std::list<std::shared_ptr<const C2Info>> infos() const;
-
- /**
- * Attaches (or updates) an (existing) metadata for this buffer.
- * If the metadata is stream specific, the stream information will be reset.
- *
- * \param info Metadata to update
- *
- * \retval C2_OK the metadata was successfully attached/updated.
- * \retval C2_NO_MEMORY not enough memory to attach the metadata (this return value is not
- * used if the same kind of metadata is already attached to the buffer).
- */
- C2Error setInfo(const std::shared_ptr<C2Info> &info);
-
- /**
- * Checks if there is a certain type of metadata attached to this buffer.
- *
- * \param index the parameter type of the metadata
- *
- * \return true iff there is a metadata with the parameter type attached to this buffer.
- */
- bool hasInfo(C2Param::Type index) const;
- std::shared_ptr<C2Info> removeInfo(C2Param::Type index) const;
- ///@}
-
-protected:
- // no public constructor
- inline C2Buffer() = default;
-
-private:
-// Type _mType;
-};
-
-/**
- * An extension of C2Info objects that can contain arbitrary buffer data.
- *
- * \note This object is not describable and contains opaque data.
- */
-class C2InfoBuffer {
-public:
- /**
- * Gets the index of this info object.
- *
- * \return the parameter index.
- */
- const C2Param::Index index() const;
-
- /**
- * Gets the buffer's data.
- *
- * \return the buffer's data.
- */
- const C2BufferData data() const;
-};
-
-/// @}
-
-/**************************************************************************************************
- ALLOCATIONS
-**************************************************************************************************/
-
-/// \defgroup allocator Allocation and memory placement
-/// @{
-
-/**
- * Buffer/memory usage bits. These are used by the allocators to select optimal memory type/pool and
- * buffer layout.
- *
- * \note This struct has public fields without getters/setters. All methods are inline.
- */
-struct C2MemoryUsage {
-// public:
- // TODO: match these to gralloc1.h
- enum Consumer : uint64_t {
- kSoftwareRead = GRALLOC_USAGE_SW_READ_OFTEN,
- kRenderScriptRead = GRALLOC_USAGE_RENDERSCRIPT,
- kTextureRead = GRALLOC_USAGE_HW_TEXTURE,
- kHardwareComposer = GRALLOC_USAGE_HW_COMPOSER,
- kHardwareEncoder = GRALLOC_USAGE_HW_VIDEO_ENCODER,
- kProtectedRead = GRALLOC_USAGE_PROTECTED,
- };
-
- enum Producer : uint64_t {
- kSoftwareWrite = GRALLOC_USAGE_SW_WRITE_OFTEN,
- kRenderScriptWrite = GRALLOC_USAGE_RENDERSCRIPT,
- kTextureWrite = GRALLOC_USAGE_HW_RENDER,
- kCompositionTarget = GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_RENDER,
- kHardwareDecoder = GRALLOC_USAGE_HW_VIDEO_ENCODER,
- kProtectedWrite = GRALLOC_USAGE_PROTECTED,
- };
-
- uint64_t mConsumer; // e.g. input
- uint64_t mProducer; // e.g. output
-};
-
-/**
- * \ingroup linear allocator
- * 1D allocation interface.
- */
-class C2LinearAllocation : public _C2LinearCapacityAspect {
-public:
- /**
- * Maps a portion of an allocation starting from |offset| with |size| into local process memory.
- * Stores the starting address into |addr|, or NULL if the operation was unsuccessful.
- * |fenceFd| is a file descriptor referring to an acquire sync fence object. If it is already
- * safe to access the buffer contents, then -1.
- *
- * \param offset starting position of the portion to be mapped (this does not have to
- * be page aligned)
- * \param size size of the portion to be mapped (this does not have to be page
- * aligned)
- * \param usage the desired usage. \todo this must be kSoftwareRead and/or
- * kSoftwareWrite.
- * \param fenceFd a pointer to a file descriptor if an async mapping is requested. If
- * not-null, and acquire fence FD will be stored here on success, or -1
- * on failure. If null, the mapping will be synchronous.
- * \param addr a pointer to where the starting address of the mapped portion will be
- * stored. On failure, nullptr will be stored here.
- *
- * \todo Only one portion can be mapped at the same time - this is true for gralloc, but there
- * is no need for this for 1D buffers.
- * \todo Do we need to support sync operation as we could just wait for the fence?
- *
- * \retval C2_OK the operation was successful
- * \retval C2_NO_PERMISSION no permission to map the portion
- * \retval C2_TIMED_OUT the operation timed out
- * \retval C2_NO_MEMORY not enough memory to complete the operation
- * \retval C2_BAD_VALUE the parameters (offset/size) are invalid or outside the allocation, or
- * the usage flags are invalid (caller error)
- * \retval C2_CORRUPTED some unknown error prevented the operation from completing (unexpected)
- */
- virtual C2Error map(
- size_t offset, size_t size, C2MemoryUsage usage, int *fenceFd /* nullable */,
- void **addr /* nonnull */) = 0;
-
- /**
- * Unmaps a portion of an allocation at |addr| with |size|. These must be parameters previously
- * passed to |map|; otherwise, this operation is a no-op.
- *
- * \param addr starting address of the mapped region
- * \param size size of the mapped region
- * \param fenceFd a pointer to a file descriptor if an async unmapping is requested. If
- * not-null, a release fence FD will be stored here on success, or -1
- * on failure. This fence signals when the original allocation contains
- * any changes that happened to the mapped region. If null, the unmapping
- * will be synchronous.
- *
- * \retval C2_OK the operation was successful
- * \retval C2_TIMED_OUT the operation timed out
- * \retval C2_BAD_VALUE the parameters (addr/size) do not correspond to previously mapped
- * regions (caller error)
- * \retval C2_CORRUPTED some unknown error prevented the operation from completing (unexpected)
- * \retval C2_NO_PERMISSION no permission to unmap the portion (unexpected - system)
- */
- virtual C2Error unmap(void *addr, size_t size, int *fenceFd /* nullable */) = 0;
-
- /**
- * Returns true if this is a valid allocation.
- *
- * \todo remove?
- */
- virtual bool isValid() const = 0;
-
- /**
- * Returns a pointer to the allocation handle.
- */
- virtual const C2Handle *handle() const = 0;
-
- /**
- * Returns true if this is the same allocation as |other|.
- */
- virtual bool equals(const std::shared_ptr<C2LinearAllocation> &other) const = 0;
-
-protected:
- // \todo should we limit allocation directly?
- C2LinearAllocation(size_t capacity) : _C2LinearCapacityAspect(c2_min(capacity, UINT32_MAX)) {}
- virtual ~C2LinearAllocation() = default;
-};
-
-/**
- * \ingroup graphic allocator
- * 2D allocation interface.
- */
-class C2GraphicAllocation : public _C2PlanarCapacityAspect {
-public:
- /**
- * Maps a rectangular section (as defined by |rect|) of a 2D allocation into local process
- * memory for flexible access. On success, it fills out |layout| with the plane specifications
- * and fills the |addr| array with pointers to the first byte of the top-left pixel of each
- * plane used. Otherwise, it leaves |layout| and |addr| untouched. |fenceFd| is a file
- * descriptor referring to an acquire sync fence object. If it is already safe to access the
- * buffer contents, then -1.
- *
- * \note Only one portion of the graphic allocation can be mapped at the same time. (This is
- * from gralloc1 limitation.)
- *
- * \param rect section to be mapped (this does not have to be aligned)
- * \param usage the desired usage. \todo this must be kSoftwareRead and/or
- * kSoftwareWrite.
- * \param fenceFd a pointer to a file descriptor if an async mapping is requested. If
- * not-null, and acquire fence FD will be stored here on success, or -1
- * on failure. If null, the mapping will be synchronous.
- * \param layout a pointer to where the mapped planes' descriptors will be
- * stored. On failure, nullptr will be stored here.
- *
- * \todo Do we need to support sync operation as we could just wait for the fence?
- *
- * \retval C2_OK the operation was successful
- * \retval C2_NO_PERMISSION no permission to map the section
- * \retval C2_ALREADY_EXISTS there is already a mapped region (caller error)
- * \retval C2_TIMED_OUT the operation timed out
- * \retval C2_NO_MEMORY not enough memory to complete the operation
- * \retval C2_BAD_VALUE the parameters (rect) are invalid or outside the allocation, or the
- * usage flags are invalid (caller error)
- * \retval C2_CORRUPTED some unknown error prevented the operation from completing (unexpected)
-
- */
- virtual C2Error map(
- C2Rect rect, C2MemoryUsage usage, int *fenceFd,
- // TODO: return <addr, size> buffers with plane sizes
- C2PlaneLayout *layout /* nonnull */, uint8_t **addr /* nonnull */) = 0;
-
- /**
- * Unmaps the last mapped rectangular section.
- *
- * \param fenceFd a pointer to a file descriptor if an async unmapping is requested. If
- * not-null, a release fence FD will be stored here on success, or -1
- * on failure. This fence signals when the original allocation contains
- * any changes that happened to the mapped section. If null, the unmapping
- * will be synchronous.
- *
- * \retval C2_OK the operation was successful
- * \retval C2_TIMED_OUT the operation timed out
- * \retval C2_NOT_FOUND there is no mapped region (caller error)
- * \retval C2_CORRUPTED some unknown error prevented the operation from completing (unexpected)
- * \retval C2_NO_PERMISSION no permission to unmap the section (unexpected - system)
- */
- virtual C2Error unmap(C2Fence *fenceFd /* nullable */) = 0;
-
- /**
- * Returns true if this is a valid allocation.
- *
- * \todo remove?
- */
- virtual bool isValid() const = 0;
-
- /**
- * Returns a pointer to the allocation handle.
- */
- virtual const C2Handle *handle() const = 0;
-
- /**
- * Returns true if this is the same allocation as |other|.
- */
- virtual bool equals(const std::shared_ptr<const C2GraphicAllocation> &other) = 0;
-
-protected:
- virtual ~C2GraphicAllocation();
-};
-
-/**
- * Allocators are used by the framework to allocate memory (allocations) for buffers. They can
- * support either 1D or 2D allocations.
- *
- * \note In theory they could support both, but in practice, we will use only one or the other.
- *
- * Never constructed on stack.
- *
- * Allocators are provided by vendors.
- */
-class C2Allocator {
-public:
- /**
- * Allocates a 1D allocation of given |capacity| and |usage|. If successful, the allocation is
- * stored in |allocation|. Otherwise, |allocation| is set to 'nullptr'.
- *
- * \param capacity the size of requested allocation (the allocation could be slightly
- * larger, e.g. to account for any system-required alignment)
- * \param usage the memory usage info for the requested allocation. \note that the
- * returned allocation may be later used/mapped with different usage.
- * The allocator should layout the buffer to be optimized for this usage,
- * but must support any usage. One exception: protected buffers can
- * only be used in a protected scenario.
- * \param allocation pointer to where the allocation shall be stored on success. nullptr
- * will be stored here on failure
- *
- * \retval C2_OK the allocation was successful
- * \retval C2_NO_MEMORY not enough memory to complete the allocation
- * \retval C2_TIMED_OUT the allocation timed out
- * \retval C2_NO_PERMISSION no permission to complete the allocation
- * \retval C2_BAD_VALUE capacity or usage are not supported (invalid) (caller error)
- * \retval C2_UNSUPPORTED this allocator does not support 1D allocations
- * \retval C2_CORRUPTED some unknown, unrecoverable error occured during allocation (unexpected)
- */
- virtual C2Error allocateLinearBuffer(
- uint32_t capacity __unused, C2MemoryUsage usage __unused,
- std::shared_ptr<C2LinearAllocation> *allocation /* nonnull */) {
- *allocation = nullptr;
- return C2_UNSUPPORTED;
- }
-
- /**
- * (Re)creates a 1D allocation from a native |handle|. If successful, the allocation is stored
- * in |allocation|. Otherwise, |allocation| is set to 'nullptr'.
- *
- * \param handle the handle for the existing allocation
- * \param allocation pointer to where the allocation shall be stored on success. nullptr
- * will be stored here on failure
- *
- * \retval C2_OK the allocation was recreated successfully
- * \retval C2_NO_MEMORY not enough memory to recreate the allocation
- * \retval C2_TIMED_OUT the recreation timed out (unexpected)
- * \retval C2_NO_PERMISSION no permission to recreate the allocation
- * \retval C2_BAD_VALUE invalid handle (caller error)
- * \retval C2_UNSUPPORTED this allocator does not support 1D allocations
- * \retval C2_CORRUPTED some unknown, unrecoverable error occured during allocation (unexpected)
- */
- virtual C2Error recreateLinearBuffer(
- const C2Handle *handle __unused,
- std::shared_ptr<C2LinearAllocation> *allocation /* nonnull */) {
- *allocation = nullptr;
- return C2_UNSUPPORTED;
- }
-
- /**
- * Allocates a 2D allocation of given |width|, |height|, |format| and |usage|. If successful,
- * the allocation is stored in |allocation|. Otherwise, |allocation| is set to 'nullptr'.
- *
- * \param width the width of requested allocation (the allocation could be slightly
- * larger, e.g. to account for any system-required alignment)
- * \param height the height of requested allocation (the allocation could be slightly
- * larger, e.g. to account for any system-required alignment)
- * \param format the pixel format of requested allocation. This could be a vendor
- * specific format.
- * \param usage the memory usage info for the requested allocation. \note that the
- * returned allocation may be later used/mapped with different usage.
- * The allocator should layout the buffer to be optimized for this usage,
- * but must support any usage. One exception: protected buffers can
- * only be used in a protected scenario.
- * \param allocation pointer to where the allocation shall be stored on success. nullptr
- * will be stored here on failure
- *
- * \retval C2_OK the allocation was successful
- * \retval C2_NO_MEMORY not enough memory to complete the allocation
- * \retval C2_TIMED_OUT the allocation timed out
- * \retval C2_NO_PERMISSION no permission to complete the allocation
- * \retval C2_BAD_VALUE width, height, format or usage are not supported (invalid) (caller error)
- * \retval C2_UNSUPPORTED this allocator does not support 2D allocations
- * \retval C2_CORRUPTED some unknown, unrecoverable error occured during allocation (unexpected)
- */
- virtual C2Error allocateGraphicBuffer(
- uint32_t width __unused, uint32_t height __unused, uint32_t format __unused,
- C2MemoryUsage usage __unused,
- std::shared_ptr<C2GraphicAllocation> *allocation /* nonnull */) {
- *allocation = nullptr;
- return C2_UNSUPPORTED;
- }
-
- /**
- * (Re)creates a 2D allocation from a native handle. If successful, the allocation is stored
- * in |allocation|. Otherwise, |allocation| is set to 'nullptr'.
- *
- * \param handle the handle for the existing allocation
- * \param allocation pointer to where the allocation shall be stored on success. nullptr
- * will be stored here on failure
- *
- * \retval C2_OK the allocation was recreated successfully
- * \retval C2_NO_MEMORY not enough memory to recreate the allocation
- * \retval C2_TIMED_OUT the recreation timed out (unexpected)
- * \retval C2_NO_PERMISSION no permission to recreate the allocation
- * \retval C2_BAD_VALUE invalid handle (caller error)
- * \retval C2_UNSUPPORTED this allocator does not support 2D allocations
- * \retval C2_CORRUPTED some unknown, unrecoverable error occured during recreation (unexpected)
- */
- virtual C2Error recreateGraphicBuffer(
- const C2Handle *handle __unused,
- std::shared_ptr<C2GraphicAllocation> *allocation /* nonnull */) {
- *allocation = nullptr;
- return C2_UNSUPPORTED;
- }
-
-protected:
- C2Allocator() = default;
-
- virtual ~C2Allocator() = default;
-};
-
-/**
- * Block allocators are used by components to allocate memory for output buffers. They can
- * support either linear (1D), circular (1D) or graphic (2D) allocations.
- *
- * Never constructed on stack.
- *
- * Block allocators are provided by the framework.
- */
-class C2BlockAllocator {
-public:
- /**
- * Allocates a linear writeable block of given |capacity| and |usage|. If successful, the
- * block is stored in |block|. Otherwise, |block| is set to 'nullptr'.
- *
- * \param capacity the size of requested block.
- * \param usage the memory usage info for the requested allocation. \note that the
- * returned allocation may be later used/mapped with different usage.
- * The allocator shall lay out the buffer to be optimized for this usage,
- * but must support any usage. One exception: protected buffers can
- * only be used in a protected scenario.
- * \param block pointer to where the allocated block shall be stored on success. nullptr
- * will be stored here on failure
- *
- * \retval C2_OK the allocation was successful
- * \retval C2_NO_MEMORY not enough memory to complete the allocation
- * \retval C2_TIMED_OUT the allocation timed out
- * \retval C2_NO_PERMISSION no permission to complete the allocation
- * \retval C2_BAD_VALUE capacity or usage are not supported (invalid) (caller error)
- * \retval C2_UNSUPPORTED this allocator does not support linear allocations
- * \retval C2_CORRUPTED some unknown, unrecoverable error occured during allocation (unexpected)
- */
- virtual C2Error allocateLinearBlock(
- uint32_t capacity __unused, C2MemoryUsage usage __unused,
- std::shared_ptr<C2LinearBlock> *block /* nonnull */) {
- *block = nullptr;
- return C2_UNSUPPORTED;
- }
-
- /**
- * Allocates a circular writeable block of given |capacity| and |usage|. If successful, the
- * block is stored in |block|. Otherwise, |block| is set to 'nullptr'.
- *
- * \param capacity the size of requested circular block. (the allocation could be slightly
- * larger, e.g. to account for any system-required alignment)
- * \param usage the memory usage info for the requested allocation. \note that the
- * returned allocation may be later used/mapped with different usage.
- * The allocator shall lay out the buffer to be optimized for this usage,
- * but must support any usage. One exception: protected buffers can
- * only be used in a protected scenario.
- * \param block pointer to where the allocated block shall be stored on success. nullptr
- * will be stored here on failure
- *
- * \retval C2_OK the allocation was successful
- * \retval C2_NO_MEMORY not enough memory to complete the allocation
- * \retval C2_TIMED_OUT the allocation timed out
- * \retval C2_NO_PERMISSION no permission to complete the allocation
- * \retval C2_BAD_VALUE capacity or usage are not supported (invalid) (caller error)
- * \retval C2_UNSUPPORTED this allocator does not support circular allocations
- * \retval C2_CORRUPTED some unknown, unrecoverable error occured during allocation (unexpected)
- */
- virtual C2Error allocateCircularBlock(
- uint32_t capacity __unused, C2MemoryUsage usage __unused,
- std::shared_ptr<C2CircularBlock> *block /* nonnull */) {
- *block = nullptr;
- return C2_UNSUPPORTED;
- }
-
- /**
- * Allocates a 2D graphic block of given |width|, |height|, |format| and |usage|. If successful,
- * the allocation is stored in |block|. Otherwise, |block| is set to 'nullptr'.
- *
- * \param width the width of requested allocation (the allocation could be slightly
- * larger, e.g. to account for any system-required alignment)
- * \param height the height of requested allocation (the allocation could be slightly
- * larger, e.g. to account for any system-required alignment)
- * \param format the pixel format of requested allocation. This could be a vendor
- * specific format.
- * \param usage the memory usage info for the requested allocation. \note that the
- * returned allocation may be later used/mapped with different usage.
- * The allocator should layout the buffer to be optimized for this usage,
- * but must support any usage. One exception: protected buffers can
- * only be used in a protected scenario.
- * \param block pointer to where the allocation shall be stored on success. nullptr
- * will be stored here on failure
- *
- * \retval C2_OK the allocation was successful
- * \retval C2_NO_MEMORY not enough memory to complete the allocation
- * \retval C2_TIMED_OUT the allocation timed out
- * \retval C2_NO_PERMISSION no permission to complete the allocation
- * \retval C2_BAD_VALUE width, height, format or usage are not supported (invalid) (caller error)
- * \retval C2_UNSUPPORTED this allocator does not support 2D allocations
- * \retval C2_CORRUPTED some unknown, unrecoverable error occured during allocation (unexpected)
- */
- virtual C2Error allocateGraphicBlock(
- uint32_t width __unused, uint32_t height __unused, uint32_t format __unused,
- C2MemoryUsage usage __unused,
- std::shared_ptr<C2GraphicBlock> *block /* nonnull */) {
- *block = nullptr;
- return C2_UNSUPPORTED;
- }
-
-protected:
- C2BlockAllocator() = default;
-
- virtual ~C2BlockAllocator() = default;
-};
-
-/// @}
-
-/// \cond INTERNAL
-
-/// \todo These are no longer used
-
-/// \addtogroup linear
-/// @{
-
-/** \deprecated */
-class C2LinearBuffer
- : public C2Buffer, public _C2LinearRangeAspect,
- public std::enable_shared_from_this<C2LinearBuffer> {
-public:
- /** \todo what is this? */
- const C2Handle *handle() const;
-
-protected:
- inline C2LinearBuffer(const C2ConstLinearBlock &block);
-
-private:
- class Impl;
- Impl *mImpl;
-};
-
-class C2ReadCursor;
-
-class C2WriteCursor {
-public:
- uint32_t remaining() const; // remaining data to be read
- void commit(); // commits the current position. discard data before current position
- void reset() const; // resets position to the last committed position
- // slices off at most |size| bytes, and moves cursor ahead by the number of bytes
- // sliced off.
- C2ReadCursor slice(uint32_t size) const;
- // slices off at most |size| bytes, and moves cursor ahead by the number of bytes
- // sliced off.
- C2WriteCursor reserve(uint32_t size);
- // bool read(T&);
- // bool write(T&);
- C2Fence waitForSpace(uint32_t size);
-};
-
-/// @}
-
-/// \addtogroup graphic
-/// @{
-
-struct C2ColorSpace {
-//public:
- enum Standard {
- BT601,
- BT709,
- BT2020,
- // TODO
- };
-
- enum Range {
- LIMITED,
- FULL,
- // TODO
- };
-
- enum TransferFunction {
- BT709Transfer,
- BT2020Transfer,
- HybridLogGamma2,
- HybridLogGamma4,
- // TODO
- };
-};
-
-/** \deprecated */
-class C2GraphicBuffer : public C2Buffer {
-public:
- // constant attributes
- inline uint32_t width() const { return mWidth; }
- inline uint32_t height() const { return mHeight; }
- inline uint32_t format() const { return mFormat; }
- inline const C2MemoryUsage usage() const { return mUsage; }
-
- // modifiable attributes
-
-
- virtual const C2ColorSpace colorSpace() const = 0;
- // best effort
- virtual void setColorSpace_be(const C2ColorSpace &colorSpace) = 0;
- virtual bool setColorSpace(const C2ColorSpace &colorSpace) = 0;
-
- const C2Handle *handle() const;
-
-protected:
- uint32_t mWidth;
- uint32_t mHeight;
- uint32_t mFormat;
- C2MemoryUsage mUsage;
-
- class Impl;
- Impl *mImpl;
-};
-
-/// @}
-
-/// \endcond
-
-/// @}
-
-} // namespace android
-
-#endif // C2BUFFER_H_
diff --git a/media/libstagefright/codec2/include/C2Component.h b/media/libstagefright/codec2/include/C2Component.h
deleted file mode 100644
index 1ee9302..0000000
--- a/media/libstagefright/codec2/include/C2Component.h
+++ /dev/null
@@ -1,685 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef C2COMPONENT_H_
-
-#define C2COMPONENT_H_
-
-#include <stdbool.h>
-#include <stdint.h>
-
-#include <list>
-#include <memory>
-#include <vector>
-#include <functional>
-
-#include <C2Param.h>
-#include <C2Work.h>
-
-namespace android {
-
-/// \defgroup components Components
-/// @{
-
-class C2Component;
-
-class C2ComponentListener {
-public:
- virtual void onWorkDone(std::weak_ptr<C2Component> component,
- std::vector<std::unique_ptr<C2Work>> workItems) = 0;
-
- virtual void onTripped(std::weak_ptr<C2Component> component,
- std::vector<std::shared_ptr<C2SettingResult>> settingResult) = 0;
-
- virtual void onError(std::weak_ptr<C2Component> component,
- uint32_t errorCode) = 0;
-
- // virtual void onTunnelReleased(<from>, <to>) = 0;
-
- // virtual void onComponentReleased(<id>) = 0;
-
-protected:
- virtual ~C2ComponentListener();
-};
-
-/**
- * Component interface object. This object contains all of the configuration of a potential or
- * actual component. It can be created and used independently of an actual C2Component instance to
- * query support and parameters for various component settings and configurations for a potential
- * component. Actual components also expose this interface.
- */
-
-class C2ComponentInterface {
-public:
- // ALWAYS AVAILABLE METHODS
- // =============================================================================================
-
- /**
- * Returns the name of this component or component interface object.
- * This is a unique name for this component or component interface 'class'; however, multiple
- * instances of this component SHALL have the same name.
- *
- * This method MUST be supported in any state. This call does not change the state nor the
- * internal states of the component.
- *
- * This method MUST be "non-blocking" and return within 1ms.
- *
- * \return the name of this component or component interface object.
- * \retval an empty string if there was not enough memory to allocate the actual name.
- */
- virtual C2String getName() const = 0;
-
- /**
- * Returns a unique ID for this component or interface object.
- * This ID is used as work targets, unique work IDs, and when configuring tunneling.
- *
- * This method MUST be supported in any state. This call does not change the state nor the
- * internal states of the component.
- *
- * This method MUST be "non-blocking" and return within 1ms.
- *
- * \return a unique node ID for this component or component interface instance.
- */
- virtual node_id getId() const = 0;
-
- /**
- * Queries a set of parameters from the component or interface object.
- * Querying is performed at best effort: the component SHALL query all supported parameters and
- * skip unsupported ones, or heap allocated parameters that could not be allocated. Any errors
- * are communicated in the return value. Additionally, preallocated (e.g. stack) parameters that
- * could not be queried are invalidated. Parameters to be allocated on the heap are omitted from
- * the result.
- *
- * \note Parameter values do not depend on the order of query.
- *
- * \todo This method cannot be used to query info-buffers. Is that a problem?
- *
- * This method MUST be supported in any state. This call does not change the state nor the
- * internal states of the component.
- *
- * This method MUST be "non-blocking" and return within 1ms.
- *
- * \param[in,out] stackParams a list of params queried. These are initialized specific to each
- * setting; e.g. size and index are set and rest of the members are
- * cleared.
- * \note Flexible settings that are of incorrect size will be invalidated.
- * \param[in] heapParamIndices a vector of param indices for params to be queried and returned on the
- * heap. These parameters will be returned in heapParams. Unsupported param
- * indices will be ignored.
- * \param[out] heapParams a list of params where to which the supported heap parameters will be
- * appended in the order they appear in heapParamIndices.
- *
- * \retval C2_OK all parameters could be queried
- * \retval C2_BAD_INDEX all supported parameters could be queried, but some parameters were not
- * supported
- * \retval C2_NO_MEMORY could not allocate memory for a supported parameter
- * \retval C2_CORRUPTED some unknown error prevented the querying of the parameters
- * (unexpected)
- */
- virtual status_t query_nb(
- const std::vector<C2Param* const> &stackParams,
- const std::vector<C2Param::Index> &heapParamIndices,
- std::vector<std::unique_ptr<C2Param>>* const heapParams) const = 0;
-
- /**
- * Sets a set of parameters for the component or interface object.
- * Tuning is performed at best effort: the component SHALL update all supported configuration at
- * best effort (unless configured otherwise) and skip unsupported ones. Any errors are
- * communicated in the return value and in |failures|.
- *
- * \note Parameter tuning DOES depend on the order of the tuning parameters. E.g. some parameter
- * update may allow some subsequent parameter update.
- *
- * This method MUST be supported in any state.
- *
- * This method MUST be "non-blocking" and return within 1ms.
- *
- * \param[in,out] params a list of parameter updates. These will be updated to the actual
- * parameter values after the updates (this is because tuning is performed
- * at best effort).
- * \todo params that could not be updated are not marked here, so are
- * confusing - are they "existing" values or intended to be configured
- * values?
- * \param[out] failures a list of parameter failures
- *
- * \retval C2_OK all parameters could be updated successfully
- * \retval C2_BAD_INDEX all supported parameters could be updated successfully, but some
- * parameters were not supported
- * \retval C2_BAD_VALUE some supported parameters could not be updated successfully because
- * they contained unsupported values. These are returned in |failures|.
- * \retval C2_NO_MEMORY some supported parameters could not be updated successfully because
- * they contained unsupported values, but could not allocate a failure
- * object for them.
- * \retval C2_CORRUPTED some unknown error prevented the update of the parameters
- * (unexpected)
- */
- virtual status_t config_nb(
- const std::vector<C2Param* const> ¶ms,
- std::vector<std::unique_ptr<C2SettingResult>>* const failures) = 0;
-
- /**
- * Atomically sets a set of parameters for the component or interface object.
- *
- * \note This method is used mainly for reserving resources for a component.
- *
- * The component SHALL update all supported configuration at
- * best effort(TBD) (unless configured otherwise) and skip unsupported ones. Any errors are
- * communicated in the return value and in |failures|.
- *
- * \note Parameter tuning DOES depend on the order of the tuning parameters. E.g. some parameter
- * update may allow some subsequent parameter update.
- *
- * This method MUST be supported in any state.
- *
- * This method may be momentarily blocking, but MUST return within 5ms.
- *
- * \param params[in,out] a list of parameter updates. These will be updated to the actual
- * parameter values after the updates (this is because tuning is performed
- * at best effort).
- * \todo params that could not be updated are not marked here, so are
- * confusing - are they "existing" values or intended to be configured
- * values?
- * \param failures[out] a list of parameter failures
- *
- * \retval C2_OK all parameters could be updated successfully
- * \retval C2_BAD_INDEX all supported parameters could be updated successfully, but some
- * parameters were not supported
- * \retval C2_BAD_VALUE some supported parameters could not be updated successfully because
- * they contained unsupported values. These are returned in |failures|.
- * \retval C2_NO_MEMORY some supported parameters could not be updated successfully because
- * they contained unsupported values, but could not allocate a failure
- * object for them.
- * \retval C2_CORRUPTED some unknown error prevented the update of the parameters
- * (unexpected)
- */
- virtual status_t commit_sm(
- const std::vector<C2Param* const> ¶ms,
- std::vector<std::unique_ptr<C2SettingResult>>* const failures) = 0;
-
- // TUNNELING
- // =============================================================================================
-
- /**
- * Creates a tunnel from this component to the target component.
- *
- * If the component is successfully created, subsequent work items queued may include a
- * tunneled path between these components.
- *
- * This method MUST be supported in any state.
- *
- * This method may be momentarily blocking, but MUST return within 5ms.
- *
- * \retval C2_OK the tunnel was successfully created
- * \retval C2_BAD_INDEX the target component does not exist
- * \retval C2_ALREADY_EXIST the tunnel already exists
- * \retval C2_UNSUPPORTED the tunnel is not supported
- *
- * \retval C2_TIMED_OUT could not create the tunnel within the time limit (unexpected)
- * \retval C2_CORRUPTED some unknown error prevented the creation of the tunnel (unexpected)
- */
- virtual status_t createTunnel_sm(node_id targetComponent) = 0;
-
- /**
- * Releases a tunnel from this component to the target component.
- *
- * The release of a tunnel is delayed while there are pending work items for the tunnel.
- * After releasing a tunnel, subsequent work items queued MUST NOT include a tunneled
- * path between these components.
- *
- * This method MUST be supported in any state.
- *
- * This method may be momentarily blocking, but MUST return within 5ms.
- *
- * \retval C2_OK the tunnel was marked for release successfully
- * \retval C2_BAD_INDEX the target component does not exist
- * \retval C2_NOT_FOUND the tunnel does not exist
- *
- * \retval C2_TIMED_OUT could not mark the tunnel for release within the time limit (unexpected)
- * \retval C2_CORRUPTED some unknown error prevented the release of the tunnel (unexpected)
- */
- virtual status_t releaseTunnel_sm(node_id targetComponent) = 0;
-
-
- // REFLECTION MECHANISM (USED FOR EXTENSION)
- // =============================================================================================
-
- /**
- * Returns the parameter reflector.
- *
- * This is used to describe parameter fields.
- *
- * \return a shared parameter reflector object.
- */
- virtual std::shared_ptr<C2ParamReflector> getParamReflector() const = 0;
-
- /**
- * Returns the set of supported parameters.
- *
- * \param[out] params a vector of supported parameters will be appended to this vector.
- *
- * \retval C2_OK the operation completed successfully.
- * \retval C2_NO_MEMORY not enough memory to complete this method.
- */
- virtual status_t getSupportedParams(
- std::vector<std::shared_ptr<C2ParamDescriptor>> * const params) const = 0;
-
- /**
- *
- * \todo should this take a list considering that setting some fields may further limit other
- * fields in the same list?
- */
- virtual status_t getSupportedValues(
- const std::vector<const C2ParamField> fields,
- std::vector<C2FieldSupportedValues>* const values) const = 0;
-
- virtual ~C2ComponentInterface() = default;
-};
-
-class C2Component {
-public:
- // METHODS AVAILABLE WHEN RUNNING
- // =============================================================================================
-
- /**
- * Queues up work for the component.
- *
- * This method MUST be supported in running (including tripped) states.
- *
- * This method MUST be "non-blocking" and return within 1ms
- *
- * It is acceptable for this method to return OK and return an error value using the
- * onWorkDone() callback.
- *
- * \retval C2_OK the work was successfully queued
- * \retval C2_BAD_INDEX some component(s) in the work do(es) not exist
- * \retval C2_UNSUPPORTED the components are not tunneled
- *
- * \retval C2_NO_MEMORY not enough memory to queue the work
- * \retval C2_CORRUPTED some unknown error prevented queuing the work (unexpected)
- */
- virtual status_t queue_nb(std::list<std::unique_ptr<C2Work>>* const items) = 0;
-
- /**
- * Announces a work to be queued later for the component. This reserves a slot for the queue
- * to ensure correct work ordering even if the work is queued later.
- *
- * This method MUST be supported in running (including tripped) states.
- *
- * This method MUST be "non-blocking" and return within 1 ms
- *
- * \retval C2_OK the work announcement has been successfully recorded
- * \retval C2_BAD_INDEX some component(s) in the work outline do(es) not exist
- * \retval C2_UNSUPPORTED the componentes are not tunneled
- *
- * \retval C2_NO_MEMORY not enough memory to record the work announcement
- * \retval C2_CORRUPTED some unknown error prevented recording the announcement (unexpected)
- *
- * \todo Can this be rolled into queue_nb?
- */
- virtual status_t announce_nb(const std::vector<C2WorkOutline> &items) = 0;
-
- /**
- * Discards and abandons any pending work for the component, and optionally any component
- * downstream.
- *
- * \todo define this: we could flush all work before last item queued for component across all
- * components linked to this; flush only work items that are queued to this
- * component
- * \todo return work # of last flushed item; or all flushed (but not returned items)
- * \todo we could make flush take a work item and flush all work before/after that item to allow
- * TBD (slicing/seek?)
- * \todo we could simply take a list of numbers and flush those... this is bad for decoders
- * also, what would happen to fine grade references?
- *
- * This method MUST be supported in running (including tripped) states.
- *
- * This method may be momentarily blocking, but must return within 5ms.
- *
- * Work that could be immediately abandoned/discarded SHALL be returned in |flushedWork|; this
- * can be done in an arbitrary order.
- *
- * Work that could not be abandoned or discarded immediately SHALL be marked to be
- * discarded at the earliest opportunity, and SHALL be returned via the onWorkDone() callback.
- *
- * \param flushThrough flush work from this component and all components connected downstream
- * from it via tunneling.
- *
- * \retval C2_OK the work announcement has been successfully recorded
- * \retval C2_TIMED_OUT the flush could not be completed within the time limit (unexpected)
- * \retval C2_CORRUPTED some unknown error prevented flushing from completion (unexpected)
- */
- virtual status_t flush_sm(bool flushThrough, std::list<std::unique_ptr<C2Work>>* const flushedWork) = 0;
-
- /**
- * Drains the component, and optionally downstream components
- *
- * \todo define this; we could place EOS to all upstream components, just this component, or
- * all upstream and downstream component.
- * \todo should EOS carry over to downstream components?
- *
- * Marks last work item as "end-of-stream", so component is notified not to wait for further
- * work before it processes work already queued. This method is called to set the end-of-stream
- * flag after work has been queued. Client can continue to queue further work immediately after
- * this method returns.
- *
- * This method MUST be supported in running (including tripped) states.
- *
- * This method MUST be "non-blocking" and return within 1ms.
- *
- * Work that is completed SHALL be returned via the onWorkDone() callback.
- *
- * \param drainThrough marks the last work item with a persistent "end-of-stream" marker that
- * will drain downstream components.
- *
- * \todo this may confuse work-ordering downstream; could be an mode enum
- *
- * \retval C2_OK the work announcement has been successfully recorded
- * \retval C2_TIMED_OUT the flush could not be completed within the time limit (unexpected)
- * \retval C2_CORRUPTED some unknown error prevented flushing from completion (unexpected)
- */
- virtual status_t drain_nb(bool drainThrough) = 0;
-
- // STATE CHANGE METHODS
- // =============================================================================================
-
- /**
- * Starts the component.
- *
- * This method MUST be supported in stopped state.
- *
- * \todo This method MUST return within 500ms. Seems this should be able to return quickly, as
- * there are no immediate guarantees. Though there are guarantees for responsiveness immediately
- * after start returns.
- *
- * \todo Could we just start a ComponentInterface to get a Component?
- *
- * \retval C2_OK the work announcement has been successfully recorded
- * \retval C2_NO_MEMORY not enough memory to start the component
- * \retval C2_TIMED_OUT the component could not be started within the time limit (unexpected)
- * \retval C2_CORRUPTED some unknown error prevented starting the component (unexpected)
- */
- virtual status_t start() = 0;
-
- /**
- * Stops the component.
- *
- * This method MUST be supported in running (including tripped) state.
- *
- * This method MUST return withing 500ms.
- *
- * Upon this call, all pending work SHALL be abandoned.
- *
- * \todo should this return completed work, since client will just free it? Perhaps just to
- * verify accounting.
- *
- * This does not alter any settings and tunings that may have resulted in a tripped state.
- * (Is this material given the definition? Perhaps in case we want to start again.)
- */
- virtual status_t stop() = 0;
-
- /**
- * Resets the component.
- *
- * This method MUST be supported in running (including tripped) state.
- *
- * This method MUST be supported during any other call (\todo or just blocking ones?)
- *
- * This method MUST return withing 500ms.
- *
- * After this call returns all work is/must be abandoned, all references should be released.
- *
- * \todo should this return completed work, since client will just free it? Also, if it unblocks
- * a stop, where should completed work be returned?
- *
- * This brings settings back to their default - "guaranteeing" no tripped space.
- *
- * \todo reclaim support - it seems that since ownership is passed, this will allow reclaiming stuff.
- */
- virtual void reset() = 0;
-
- /**
- * Releases the component.
- *
- * This method MUST be supported in any state. (\todo Or shall we force reset() first to bring
- * to a known state?)
- *
- * This method MUST return withing 500ms.
- *
- * \todo should this return completed work, since client will just free it? Also, if it unblocks
- * a stop, where should completed work be returned?
- *
- * TODO: does it matter if this call has a short time limit? Yes, as upon return all references
- * shall be abandoned.
- */
- virtual void release() = 0;
-
- /**
- * Returns the interface for this component.
- *
- * \return the component interface
- */
- virtual std::shared_ptr<C2ComponentInterface> intf() = 0;
-
-protected:
- virtual ~C2Component() = default;
-};
-
-class C2FrameInfoParser {
-public:
- /**
- * \return the content type supported by this info parser.
- *
- * \todo this may be redundant
- */
- virtual C2StringLiteral getType() const = 0;
-
- /**
- * \return a vector of supported parameter indices parsed by this info parser.
- *
- * \todo sticky vs. non-sticky params? this may be communicated by param-reflector.
- */
- virtual const std::vector<C2Param::Index> getParsedParams() const = 0;
-
- /**
- * Resets this info parser. This brings this parser to its initial state after creation.
- *
- * This method SHALL return within 5ms.
- *
- * \retval C2_OK the info parser was reset
- * \retval C2_TIMED_OUT could not reset the parser within the time limit (unexpected)
- * \retval C2_CORRUPTED some unknown error prevented the resetting of the parser (unexpected)
- */
- virtual status_t reset() { return C2_OK; }
-
- virtual status_t parseFrame(C2BufferPack &frame);
-
- virtual ~C2FrameInfoParser() = default;
-};
-
-struct C2ComponentInfo {
- // TBD
-
-};
-
-class C2AllocatorStore {
-public:
- // TBD
-
- enum Type {
- LINEAR, ///< basic linear allocator type
- GRALLOC, ///< basic gralloc allocator type
- };
-
- /**
- * Creates an allocator.
- *
- * \param type the type of allocator to create
- * \param allocator shared pointer where the created allocator is stored. Cleared on failure
- * and updated on success.
- *
- * \retval C2_OK the allocator was created successfully
- * \retval C2_TIMED_OUT could not create the allocator within the time limit (unexpected)
- * \retval C2_CORRUPTED some unknown error prevented the creation of the allocator (unexpected)
- *
- * \retval C2_NOT_FOUND no such allocator
- * \retval C2_NO_MEMORY not enough memory to create the allocator
- */
- virtual status_t createAllocator(Type type, std::shared_ptr<C2Allocator>* const allocator) = 0;
-
- virtual ~C2AllocatorStore() = default;
-};
-
-class C2ComponentStore {
- /**
- * Creates a component.
- *
- * This method SHALL return within 100ms.
- *
- * \param name name of the component to create
- * \param component shared pointer where the created component is stored. Cleared on
- * failure and updated on success.
- *
- * \retval C2_OK the component was created successfully
- * \retval C2_TIMED_OUT could not create the component within the time limit (unexpected)
- * \retval C2_CORRUPTED some unknown error prevented the creation of the component (unexpected)
- *
- * \retval C2_NOT_FOUND no such component
- * \retval C2_NO_MEMORY not enough memory to create the component
- */
- virtual status_t createComponent(C2String name, std::shared_ptr<C2Component>* const component);
-
- /**
- * Creates a component interface.
- *
- * This method SHALL return within 100ms.
- *
- * \param name name of the component interface to create
- * \param interface shared pointer where the created interface is stored
- *
- * \retval C2_OK the component interface was created successfully
- * \retval C2_TIMED_OUT could not create the component interface within the time limit
- * (unexpected)
- * \retval C2_CORRUPTED some unknown error prevented the creation of the component interface
- * (unexpected)
- *
- * \retval C2_NOT_FOUND no such component interface
- * \retval C2_NO_MEMORY not enough memory to create the component interface
- *
- * \todo Do we need an interface, or could this just be a component that is never started?
- */
- virtual status_t createInterface(C2String name, std::shared_ptr<C2ComponentInterface>* const interface);
-
- /**
- * Returns the list of components supported by this component store.
- *
- * This method SHALL return within 1ms.
- *
- * \retval vector of component information.
- */
- virtual std::vector<std::unique_ptr<const C2ComponentInfo>> getComponents();
-
- // -------------------------------------- UTILITY METHODS --------------------------------------
-
- // on-demand buffer layout conversion (swizzling)
- virtual status_t copyBuffer(std::shared_ptr<C2GraphicBuffer> src, std::shared_ptr<C2GraphicBuffer> dst);
-
- // status_t selectPreferredColor(formats<A>, formats<B>);
-
- // GLOBAL SETTINGS
- // system-wide stride & slice-height (???)
-
- /**
- * Queries a set of system-wide parameters.
- * Querying is performed at best effort: the store SHALL query all supported parameters and
- * skip unsupported ones, or heap allocated parameters that could not be allocated. Any errors
- * are communicated in the return value. Additionally, preallocated (e.g. stack) parameters that
- * could not be queried are invalidated. Parameters to be allocated on the heap are omitted from
- * the result.
- *
- * \note Parameter values do not depend on the order of query.
- *
- * This method MUST be "non-blocking" and return within 1ms.
- *
- * \param stackParams a list of params queried. These are initialized specific to each
- * setting; e.g. size and index are set and rest of the members are
- * cleared.
- * NOTE: Flexible settings that are of incorrect size will be invalidated.
- * \param heapParamIndices a vector of param indices for params to be queried and returned on the
- * heap. These parameters will be returned in heapParams. Unsupported param
- * indices will be ignored.
- * \param heapParams a list of params where to which the supported heap parameters will be
- * appended in the order they appear in heapParamIndices.
- *
- * \retval C2_OK all parameters could be queried
- * \retval C2_BAD_INDEX all supported parameters could be queried, but some parameters were not
- * supported
- * \retval C2_NO_MEMORY could not allocate memory for a supported parameter
- * \retval C2_CORRUPTED some unknown error prevented the querying of the parameters
- * (unexpected)
- */
- virtual status_t query_nb(
- const std::vector<C2Param* const> &stackParams,
- const std::vector<C2Param::Index> &heapParamIndices,
- std::vector<std::unique_ptr<C2Param>>* const heapParams) = 0;
-
- /**
- * Sets a set of system-wide parameters.
- *
- * \note There are no settable system-wide parameters defined thus far, but may be added in the
- * future.
- *
- * Tuning is performed at best effort: the store SHALL update all supported configuration at
- * best effort (unless configured otherwise) and skip unsupported ones. Any errors are
- * communicated in the return value and in |failures|.
- *
- * \note Parameter tuning DOES depend on the order of the tuning parameters. E.g. some parameter
- * update may allow some subsequent parameter update.
- *
- * This method MUST be "non-blocking" and return within 1ms.
- *
- * \param params a list of parameter updates. These will be updated to the actual
- * parameter values after the updates (this is because tuning is performed
- * at best effort).
- * \todo params that could not be updated are not marked here, so are
- * confusing - are they "existing" values or intended to be configured
- * values?
- * \param failures a list of parameter failures
- *
- * \retval C2_OK all parameters could be updated successfully
- * \retval C2_BAD_INDEX all supported parameters could be updated successfully, but some
- * parameters were not supported
- * \retval C2_BAD_VALUE some supported parameters could not be updated successfully because
- * they contained unsupported values. These are returned in |failures|.
- * \retval C2_NO_MEMORY some supported parameters could not be updated successfully because
- * they contained unsupported values, but could not allocate a failure
- * object for them.
- * \retval C2_CORRUPTED some unknown error prevented the update of the parameters
- * (unexpected)
- */
- virtual status_t config_nb(
- const std::vector<C2Param* const> ¶ms,
- std::list<std::unique_ptr<C2SettingResult>>* const failures) = 0;
-
- virtual ~C2ComponentStore() = default;
-};
-
-// ================================================================================================
-
-/// @}
-
-} // namespace android
-
-#endif // C2COMPONENT_H_
diff --git a/media/libstagefright/codec2/include/C2Config.h b/media/libstagefright/codec2/include/C2Config.h
deleted file mode 100644
index 30e9193..0000000
--- a/media/libstagefright/codec2/include/C2Config.h
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef C2CONFIG_H_
-#define C2CONFIG_H_
-
-#include <C2ParamDef.h>
-
-namespace android {
-
-/// \defgroup config Component configuration
-/// @{
-
-#ifndef DEFINE_C2_ENUM_VALUE_AUTO_HELPER
-#define DEFINE_C2_ENUM_VALUE_AUTO_HELPER(name, type, prefix, ...)
-#define DEFINE_C2_ENUM_VALUE_CUSTOM_HELPER(name, type, names, ...)
-#endif
-
-#define C2ENUM(name, type, ...) \
-enum name : type { __VA_ARGS__ }; \
-DEFINE_C2_ENUM_VALUE_AUTO_HELPER(name, type, NULL, __VA_ARGS__)
-
-#define C2ENUM_CUSTOM_PREFIX(name, type, prefix, ...) \
-enum name : type { __VA_ARGS__ }; \
-DEFINE_C2_ENUM_VALUE_AUTO_HELPER(name, type, prefix, __VA_ARGS__)
-
-#define C2ENUM_CUSTOM_NAMES(name, type, names, ...) \
-enum name : type { __VA_ARGS__ }; \
-DEFINE_C2_ENUM_VALUE_CUSTOM_HELPER(name, type, names, __VA_ARGS__)
-
-enum C2ParamIndexKind : uint32_t {
- /// domain
- kParamIndexDomain,
-
- /// configuration descriptors
- kParamIndexSupportedParams,
- kParamIndexRequiredParams,
- kParamIndexReadOnlyParams,
- kParamIndexRequestedInfos,
-
- /// latency
- kParamIndexLatency,
-
- // generic time behavior
- kParamIndexTemporal,
-
- /// port configuration
- kParamIndexMime,
- kParamIndexStreamCount,
- kParamIndexFormat,
-
- // video info
-
- kParamIndexStructStart = 0x1,
- kParamIndexVideoSize,
- kParamIndexMaxVideoSizeHint,
-
- kParamIndexParamStart = 0x800,
-};
-
-C2ENUM(C2DomainKind, int32_t,
- C2DomainVideo,
- C2DomainAudio,
- C2DomainOther = C2DomainAudio + 1
-);
-
-// read-only
-
-typedef C2GlobalParam<C2Info, C2SimpleValueStruct<C2DomainKind>, kParamIndexDomain> C2ComponentDomainInfo;
-// typedef C2GlobalParam<C2Info, C2Uint32Value, kParamIndexDomain> C2ComponentDomainInfo;
-//DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<C2DomainKind>, { C2FIELD(mValue, "value") });
-
-// read-only
-typedef C2GlobalParam<C2Info, C2Uint32Array, kParamIndexSupportedParams> C2SupportedParamsInfo;
-
-/// \todo do we define it as a param?
-// read-only
-typedef C2GlobalParam<C2Info, C2Uint32Array, kParamIndexRequiredParams> C2RequiredParamsInfo;
-
-// read-only
-typedef C2GlobalParam<C2Info, C2Uint32Array, kParamIndexReadOnlyParams> C2ReadOnlyParamsInfo;
-
-// read-only
-typedef C2GlobalParam<C2Info, C2Uint32Array, kParamIndexRequestedInfos> C2RequestedInfosInfo;
-
-// read-only
-//typedef C2GlobalParam<C2Info, C2Uint32Value, kParamIndexRequestedInfos> C2RequestedInfosInfo;
-
-/// latency
-
-typedef C2PortParam<C2Info, C2Uint32Value, kParamIndexLatency> C2PortLatencyInfo;
-
-typedef C2GlobalParam<C2Info, C2Uint32Value, kParamIndexLatency> C2ComponentLatencyInfo;
-
-/// \todo
-typedef C2GlobalParam<C2Info, C2Uint32Value, kParamIndexTemporal> C2ComponentTemporalInfo;
-
-/// port configuration
-
-typedef C2PortParam<C2Tuning, C2StringValue, kParamIndexMime> C2PortMimeConfig;
-
-typedef C2PortParam<C2Tuning, C2Uint32Value, kParamIndexStreamCount> C2PortStreamCountConfig;
-
-typedef C2StreamParam<C2Tuning, C2StringValue, kParamIndexMime> C2StreamMimeConfig;
-
-C2ENUM(C2FormatKind, uint32_t,
- C2FormatCompressed,
- C2FormatAudio = 1,
- C2FormatVideo = 4,
-)
-
-typedef C2StreamParam<C2Tuning, C2Uint32Value, kParamIndexFormat> C2StreamFormatConfig;
-
-/*
- Component description fields:
-
-// format (video/compressed/audio/other-do we need other?) per stream
-
-// likely some of these are exposed as separate settings:
-
-struct C2BaseTuning {
- // latency characteristics
- uint32_t latency;
- bool temporal; // seems this only makes sense if latency is 1..., so this could be captured as latency = 0
- uint32_t delay;
-
- uint32_t numInputStreams; // RW? - or suggestion only: RO
- uint32_t numOutputStreams; // RW
- //
- // refs characteristics (per stream?)
- uint32_t maxInputRefs; // RO
- uint32_t maxOutputRefs; // RO
- uint32_t maxInputMemory; // RO - max time refs are held for
- uint32_t maxOutputMemory; // RO
-
- // per stream
- bool compressed;
- // format... video/compressed/audio/other?
- // actual "audio/video" format type
- uint32_t width/height? is this needed, or just queue...
- // mime...
-};
-*/
-
-
-
-
-
-
-// overall component
-// => C: domain: audio or video
-// => C: kind: decoder, encoder or filter
-// => "mime" class
-
-// => C: temporal (bool) => does this depend on ordering?
-// => I: latency
-// => I: history max duration...
-// => I: history max frames kept...
-// => I: reordering depth
-// => I: frc (bool) (perhaps ratio?)
-// => I: current frc
-
-// - pause
-// => last frame 'number' processed
-// => current frame 'number' processed
-// => invalid settings =>[]
-
-// video decoder configuration: // audio
-// - encoding // -encoding
-// - hint: max width/height // -hint: sample rate, channels
-// - hint: profile/level // -hint: tools used
-// - hint: framerate (bitrate?) // -hint: bitrate
-// - default: color space (from container)
-// - hint: color format // -hint: pcm-encoding
-// - hint: # of views (e.g. MVC) // -hint?: channel groups
-// - default: HDR static info (from container) // -hint?: channel mappings
-// - hint: rotation (e.g. for allocator)
-
-// => # of streams required and their formats? (setting?)
-// => # of streams produced and their formats? (tuning)
-
-// => output
-// - # of views // -channel groups && channel mappings
-// - width/height/crop/color format/color space/HDR static info (from buffers)
-// (as required by the allocator & framework)
-// - SEI (or equivalent) <= [port]
-// - CC
-// - reference info
-
-// video encoder configurations
-// - encoding // - encoding
-// - hint: width/height // - hint: sample rate, channels
-// - hint: frame rate
-// - hint: max width/height (? does this differ from width/height?)
-// - # of input (e.g. MVC) // - hint: # groups and mappings
-// - # of output (e.g. SVC) => bitrates/width/height/framerates? per stream
-// - hint: profile/level // - hint: profile/level
-// - HDR static info + (info: HDR)
-// - color space
-// - hint: color format? // - hint: pcm encoding
-// - SEI
-// - CC
-// - reference directive
-// - hint: bitrate (or quality) // - hint: bitrate/quality
-// - optional: codec-specific parameters // - optional: csd
-
-// => output // => output
-// - layers per stream? // E-AC3?... DTS?...Dolby-Vision?
-// - reference info
-
-
-// RM:
-// - need SPS for full knowledge => component should return max. (component can use less)
-// - critical parameters? (interlaced? profile? level?)
-
-struct C2VideoSizeStruct {
- int32_t mWidth; ///< video width
- int32_t mHeight; ///< video height
-
- DEFINE_AND_DESCRIBE_C2STRUCT(VideoSize)
- C2FIELD(mWidth, "width")
- C2FIELD(mHeight, "height")
-};
-
-// video size for video decoder [OUT]
-typedef C2StreamParam<C2Info, C2VideoSizeStruct> C2VideoSizeStreamInfo;
-
-// max video size for video decoder [IN]
-typedef C2PortParam<C2Setting, C2VideoSizeStruct, kParamIndexMaxVideoSizeHint> C2MaxVideoSizeHintPortSetting;
-
-// video encoder size [IN]
-typedef C2StreamParam<C2Tuning, C2VideoSizeStruct> C2VideoSizeStreamTuning;
-
-/// @}
-
-} // namespace android
-
-#endif
diff --git a/media/libstagefright/codec2/include/C2Param.h b/media/libstagefright/codec2/include/C2Param.h
deleted file mode 100644
index fd43061..0000000
--- a/media/libstagefright/codec2/include/C2Param.h
+++ /dev/null
@@ -1,1171 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef C2PARAM_H_
-#define C2PARAM_H_
-
-#include <C2.h>
-
-#include <stdbool.h>
-#include <stdint.h>
-
-#include <algorithm>
-#include <list>
-#include <string>
-#include <type_traits>
-
-#define C2_PACK __attribute__((packed))
-
-namespace android {
-
-/// \addtogroup Parameters
-/// @{
-
-/// \defgroup internal Internal helpers.
-
-/*!
- * \file
- * PARAMETERS: SETTINGs, TUNINGs, and INFOs
- * ===
- *
- * These represent miscellaneous control and metadata information and are likely copied into
- * kernel space. Therefore, these are C-like structures designed to carry just a small amount of
- * information. We are using C++ to be able to add constructors, as well as non-virtual and class
- * methods.
- *
- * ==Specification details:
- *
- * Restrictions:
- * - must be POD struct, e.g. no vtable (no virtual destructor)
- * - must have the same size in 64-bit and 32-bit mode (no size_t)
- * - as such, no pointer members
- *
- * Behavior:
- * - Params can be global (not related to input or output), related to input or output,
- * or related to an input/output stream.
- * - All params are queried/set using a unique param index, which incorporates a potential stream
- * index and/or port.
- * - Querying (supported) params MUST never fail.
- * - All params MUST have default values.
- * - If some fields have "unsupported" or "invalid" values during setting, this SHOULD be
- * communicated to the app.
- * a) Ideally, this should be avoided. When setting parameters, in general, component should do
- * "best effort" to apply all settings. It should change "invalid/unsupported" values to the
- * nearest supported values.
- * - This is communicated to the client by changing the source values in tune()/
- * configure().
- * b) If falling back to a supported value is absolutely impossible, the component SHALL return
- * an error for the specific setting, but should continue to apply other settings.
- * TODO: this currently may result in unintended results.
- *
- * **NOTE:** unlike OMX, params are not versioned. Instead, a new struct with new base index
- * SHALL be added as new versions are required.
- *
- * The proper subtype (Setting, Info or Param) is incorporated into the class type. Define structs
- * to define multiple subtyped versions of related parameters.
- *
- * ==Implementation details:
- *
- * - Use macros to define parameters
- * - All parameters must have a default constructor
- * - This is only used for instantiating the class in source (e.g. will not be used
- * when building a parameter by the framework from key/value pairs.)
- */
-
-/// \ingroup internal
-struct _C2ParamManipulator;
-
-/**
- * Parameter base class.
- */
-struct C2Param {
- // param index encompasses the following:
- //
- // - type (setting, tuning, info, struct)
- // - vendor extension flag
- // - flexible parameter flag
- // - direction (global, input, output)
- // - stream flag
- // - stream ID (usually 0)
- //
- // layout:
- //
- // +------+-----+---+------+--------+----|------+--------------+
- // | kind | dir | - |stream|streamID|flex|vendor| base index |
- // +------+-----+---+------+--------+----+------+--------------+
- // bit: 31..30 29.28 25 24 .. 17 16 15 14 .. 0
- //
-public:
- /**
- * C2Param kinds, usable as bitmaps.
- */
- enum Kind : uint32_t {
- NONE = 0,
- STRUCT = (1 << 0),
- INFO = (1 << 1),
- SETTING = (1 << 2),
- TUNING = (1 << 3) | SETTING, // tunings are settings
- };
-
- /**
- * base index (including the vendor extension bit) is a global index for
- * C2 parameter structs. (e.g. the same indices cannot be reused for different
- * structs for different components).
- */
- struct BaseIndex {
- protected:
- enum : uint32_t {
- kTypeMask = 0xC0000000,
- kTypeStruct = 0x00000000,
- kTypeTuning = 0x40000000,
- kTypeSetting = 0x80000000,
- kTypeInfo = 0xC0000000,
-
- kDirMask = 0x30000000,
- kDirGlobal = 0x20000000,
- kDirUndefined = 0x30000000, // MUST have all bits set
- kDirInput = 0x00000000,
- kDirOutput = 0x10000000,
-
- kStreamFlag = 0x02000000,
- kStreamIdMask = 0x01FE0000,
- kStreamIdShift = 17,
- kStreamIdMax = kStreamIdMask >> kStreamIdShift,
- kStreamMask = kStreamFlag | kStreamIdMask,
-
- kFlexibleFlag = 0x00010000,
- kVendorFlag = 0x00008000,
- kParamMask = 0x0000FFFF,
- kBaseMask = kParamMask | kFlexibleFlag,
- };
-
- public:
- enum : uint32_t {
- kVendorStart = kVendorFlag, ///< vendor structs SHALL start after this
- _kFlexibleFlag = kFlexibleFlag, // TODO: this is only needed for testing
- };
-
- /// constructor/conversion from uint32_t
- inline BaseIndex(uint32_t index) : mIndex(index) { }
-
- // no conversion from uint64_t
- inline BaseIndex(uint64_t index) = delete;
-
- /// returns true iff this is a vendor extension parameter
- inline bool isVendor() const { return mIndex & kVendorFlag; }
-
- /// returns true iff this is a flexible parameter (with variable size)
- inline bool isFlexible() const { return mIndex & kFlexibleFlag; }
-
- /// returns the base type: the index for the underlying struct
- inline unsigned int baseIndex() const { return mIndex & kBaseMask; }
-
- /// returns the param index for the underlying struct
- inline unsigned int paramIndex() const { return mIndex & kParamMask; }
-
- DEFINE_FIELD_BASED_COMPARISON_OPERATORS(BaseIndex, mIndex)
-
- protected:
- uint32_t mIndex;
- };
-
- /**
- * type encompasses the parameter kind (tuning, setting, info), whether the
- * parameter is global, input or output, and whether it is for a stream.
- */
- struct Type : public BaseIndex {
- /// returns true iff this is a global parameter (not for input nor output)
- inline bool isGlobal() const { return (mIndex & kDirMask) == kDirGlobal; }
- /// returns true iff this is an input or input stream parameter
- inline bool forInput() const { return (mIndex & kDirMask) == kDirInput; }
- /// returns true iff this is an output or output stream parameter
- inline bool forOutput() const { return (mIndex & kDirMask) == kDirOutput; }
-
- /// returns true iff this is a stream parameter
- inline bool forStream() const { return mIndex & kStreamFlag; }
- /// returns true iff this is a port (input or output) parameter
- inline bool forPort() const { return !forStream() && !isGlobal(); }
-
- /// returns the parameter type: the parameter index without the stream ID
- inline uint32_t type() const { return mIndex & (~kStreamIdMask); }
-
- /// return the kind of this param
- inline Kind kind() const {
- switch (mIndex & kTypeMask) {
- case kTypeStruct: return STRUCT;
- case kTypeInfo: return INFO;
- case kTypeSetting: return SETTING;
- case kTypeTuning: return TUNING;
- default: return NONE; // should not happen
- }
- }
-
- /// constructor/conversion from uint32_t
- inline Type(uint32_t index) : BaseIndex(index) { }
-
- // no conversion from uint64_t
- inline Type(uint64_t index) = delete;
-
- private:
- friend struct C2Param; // for setPort()
- friend struct C2Tuning; // for kTypeTuning
- friend struct C2Setting; // for kTypeSetting
- friend struct C2Info; // for kTypeInfo
- // for kDirGlobal
- template<typename T, typename S, int I, class F> friend struct C2GlobalParam;
- template<typename T, typename S, int I, class F> friend struct C2PortParam; // for kDir*
- template<typename T, typename S, int I, class F> friend struct C2StreamParam; // for kDir*
- friend struct _C2ParamInspector; // for testing
-
- /**
- * Sets the port/stream direction.
- * @return true on success, false if could not set direction (e.g. it is global param).
- */
- inline bool setPort(bool output) {
- if (isGlobal()) {
- return false;
- } else {
- mIndex = (mIndex & ~kDirMask) | (output ? kDirOutput : kDirInput);
- return true;
- }
- }
- };
-
- /**
- * index encompasses all remaining information: basically the stream ID.
- */
- struct Index : public Type {
- /// returns the index as uint32_t
- inline operator uint32_t() const { return mIndex; }
-
- /// constructor/conversion from uint32_t
- inline Index(uint32_t index) : Type(index) { }
-
- // no conversion from uint64_t
- inline Index(uint64_t index) = delete;
-
- /// returns the stream ID or ~0 if not a stream
- inline unsigned stream() const {
- return forStream() ? rawStream() : ~0U;
- }
-
- private:
- friend struct C2Param; // for setStream, makeStreamId, isValid
- friend struct _C2ParamInspector; // for testing
-
- /**
- * @return true if the type is valid, e.g. direction is not undefined AND
- * stream is 0 if not a stream param.
- */
- inline bool isValid() const {
- // there is no Type::isValid (even though some of this check could be
- // performed on types) as this is only used on index...
- return (forStream() ? rawStream() < kStreamIdMax : rawStream() == 0)
- && (mIndex & kDirMask) != kDirUndefined;
- }
-
- /// returns the raw stream ID field
- inline unsigned rawStream() const {
- return (mIndex & kStreamIdMask) >> kStreamIdShift;
- }
-
- /// returns the streamId bitfield for a given |stream|. If stream is invalid,
- /// returns an invalid bitfield.
- inline static uint32_t makeStreamId(unsigned stream) {
- // saturate stream ID (max value is invalid)
- if (stream > kStreamIdMax) {
- stream = kStreamIdMax;
- }
- return (stream << kStreamIdShift) & kStreamIdMask;
- }
-
- /**
- * Sets the stream index.
- * \return true on success, false if could not set index (e.g. not a stream param).
- */
- inline bool setStream(unsigned stream) {
- if (forStream()) {
- mIndex = (mIndex & ~kStreamIdMask) | makeStreamId(stream);
- return this->stream() < kStreamIdMax;
- }
- return false;
- }
- };
-
-public:
- // public getters for Index methods
-
- /// returns true iff this is a vendor extension parameter
- inline bool isVendor() const { return _mIndex.isVendor(); }
- /// returns true iff this is a flexible parameter
- inline bool isFlexible() const { return _mIndex.isFlexible(); }
- /// returns true iff this is a global parameter (not for input nor output)
- inline bool isGlobal() const { return _mIndex.isGlobal(); }
- /// returns true iff this is an input or input stream parameter
- inline bool forInput() const { return _mIndex.forInput(); }
- /// returns true iff this is an output or output stream parameter
- inline bool forOutput() const { return _mIndex.forOutput(); }
-
- /// returns true iff this is a stream parameter
- inline bool forStream() const { return _mIndex.forStream(); }
- /// returns true iff this is a port (input or output) parameter
- inline bool forPort() const { return _mIndex.forPort(); }
-
- /// returns the stream ID or ~0 if not a stream
- inline unsigned stream() const { return _mIndex.stream(); }
-
- /// returns the parameter type: the parameter index without the stream ID
- inline uint32_t type() const { return _mIndex.type(); }
-
- /// returns the kind of this parameter
- inline Kind kind() const { return _mIndex.kind(); }
-
- /// returns the size of the parameter or 0 if the parameter is invalid
- inline size_t size() const { return _mSize; }
-
- /// returns true iff the parameter is valid
- inline operator bool() const { return _mIndex.isValid() && _mSize > 0; }
-
- /// returns true iff the parameter is invalid
- inline bool operator!() const { return !operator bool(); }
-
- // equality is done by memcmp (use equals() to prevent any overread)
- inline bool operator==(const C2Param &o) const {
- return equals(o) && memcmp(this, &o, _mSize) == 0;
- }
- inline bool operator!=(const C2Param &o) const { return !operator==(o); }
-
- /// safe(r) type cast from pointer and size
- inline static C2Param* From(void *addr, size_t len) {
- // _mSize must fit into size
- if (len < sizeof(_mSize) + offsetof(C2Param, _mSize)) {
- return nullptr;
- }
- // _mSize must match length
- C2Param *param = (C2Param*)addr;
- if (param->_mSize != len) {
- return nullptr;
- }
- return param;
- }
-
-#if 0
- template<typename P, class=decltype(C2Param(P()))>
- P *As() { return P::From(this); }
- template<typename P>
- const P *As() const { return const_cast<const P*>(P::From(const_cast<C2Param*>(this))); }
-#endif
-
-protected:
- /// sets the stream field. Returns true iff successful.
- inline bool setStream(unsigned stream) {
- return _mIndex.setStream(stream);
- }
-
- /// sets the port (direction). Returns true iff successful.
- inline bool setPort(bool output) {
- return _mIndex.setPort(output);
- }
-
-public:
- /// invalidate this parameter. There is no recovery from this call; e.g. parameter
- /// cannot be 'corrected' to be valid.
- inline void invalidate() { _mSize = 0; }
-
- // if other is the same kind of (valid) param as this, copy it into this and return true.
- // otherwise, do not copy anything, and return false.
- inline bool updateFrom(const C2Param &other) {
- if (other._mSize == _mSize && other._mIndex == _mIndex && _mSize > 0) {
- memcpy(this, &other, _mSize);
- return true;
- }
- return false;
- }
-
-protected:
- // returns |o| if it is a null ptr, or if can suitably be a param of given |type| (e.g. has
- // same type (ignoring stream ID), and size). Otherwise, returns null. If |checkDir| is false,
- // allow undefined or different direction (e.g. as constructed from C2PortParam() vs.
- // C2PortParam::input), but still require equivalent type (stream, port or global); otherwise,
- // return null.
- inline static const C2Param* ifSuitable(
- const C2Param* o, size_t size, Type type, size_t flexSize = 0, bool checkDir = true) {
- if (o == nullptr || o->_mSize < size || (flexSize && ((o->_mSize - size) % flexSize))) {
- return nullptr;
- } else if (checkDir) {
- return o->_mIndex.type() == type.mIndex ? o : nullptr;
- } else if (o->_mIndex.isGlobal()) {
- return nullptr;
- } else {
- return ((o->_mIndex.type() ^ type.mIndex) & ~Type::kDirMask) ? nullptr : o;
- }
- }
-
- /// base constructor
- inline C2Param(uint32_t paramSize, Index paramIndex)
- : _mSize(paramSize),
- _mIndex(paramIndex) {
- if (paramSize > sizeof(C2Param)) {
- memset(this + 1, 0, paramSize - sizeof(C2Param));
- }
- }
-
- /// base constructor with stream set
- inline C2Param(uint32_t paramSize, Index paramIndex, unsigned stream)
- : _mSize(paramSize),
- _mIndex(paramIndex | Index::makeStreamId(stream)) {
- if (paramSize > sizeof(C2Param)) {
- memset(this + 1, 0, paramSize - sizeof(C2Param));
- }
- if (!forStream()) {
- invalidate();
- }
- }
-
-private:
- friend struct _C2ParamInspector; // for testing
-
- /// returns the base type: the index for the underlying struct (for testing
- /// as this can be gotten by the baseIndex enum)
- inline uint32_t _baseIndex() const { return _mIndex.baseIndex(); }
-
- /// returns true iff |o| has the same size and index as this. This performs the
- /// basic check for equality.
- inline bool equals(const C2Param &o) const {
- return _mSize == o._mSize && _mIndex == o._mIndex;
- }
-
- uint32_t _mSize;
- Index _mIndex;
-};
-
-/// \ingroup internal
-/// allow C2Params access to private methods, e.g. constructors
-#define C2PARAM_MAKE_FRIENDS \
- template<typename U, typename S, int I, class F> friend struct C2GlobalParam; \
- template<typename U, typename S, int I, class F> friend struct C2PortParam; \
- template<typename U, typename S, int I, class F> friend struct C2StreamParam; \
-
-/**
- * Setting base structure for component method signatures. Wrap constructors.
- */
-struct C2Setting : public C2Param {
-protected:
- template<typename ...Args>
- inline C2Setting(const Args(&... args)) : C2Param(args...) { }
-public: // TODO
- enum : uint32_t { indexFlags = Type::kTypeSetting };
-};
-
-/**
- * Tuning base structure for component method signatures. Wrap constructors.
- */
-struct C2Tuning : public C2Setting {
-protected:
- template<typename ...Args>
- inline C2Tuning(const Args(&... args)) : C2Setting(args...) { }
-public: // TODO
- enum : uint32_t { indexFlags = Type::kTypeTuning };
-};
-
-/**
- * Info base structure for component method signatures. Wrap constructors.
- */
-struct C2Info : public C2Param {
-protected:
- template<typename ...Args>
- inline C2Info(const Args(&... args)) : C2Param(args...) { }
-public: // TODO
- enum : uint32_t { indexFlags = Type::kTypeInfo };
-};
-
-/**
- * Structure uniquely specifying a field in an arbitrary structure.
- *
- * \note This structure is used differently in C2FieldDescriptor to
- * identify array fields, such that _mSize is the size of each element. This is
- * because the field descriptor contains the array-length, and we want to keep
- * a relevant element size for variable length arrays.
- */
-struct _C2FieldId {
-//public:
- /**
- * Constructor used for C2FieldDescriptor that removes the array extent.
- *
- * \param[in] offset pointer to the field in an object at address 0.
- */
- template<typename T, class B=typename std::remove_extent<T>::type>
- inline _C2FieldId(T* offset)
- : // offset is from "0" so will fit on 32-bits
- _mOffset((uint32_t)(uintptr_t)(offset)),
- _mSize(sizeof(B)) { }
-
- /**
- * Direct constructor from offset and size.
- *
- * \param[in] offset offset of the field.
- * \param[in] size size of the field.
- */
- inline _C2FieldId(size_t offset, size_t size)
- : _mOffset(offset), _mSize(size) {}
-
- /**
- * Constructor used to identify a field in an object.
- *
- * \param U[type] pointer to the object that contains this field. This is needed in case the
- * field is in an (inherited) base class, in which case T will be that base class.
- * \param pm[im] member pointer to the field
- */
- template<typename R, typename T, typename U, typename B=typename std::remove_extent<R>::type>
- inline _C2FieldId(U *, R T::* pm)
- : _mOffset((uint32_t)(uintptr_t)(&(((U*)256)->*pm)) - 256u),
- _mSize(sizeof(B)) { }
-
- /**
- * Constructor used to identify a field in an object.
- *
- * \param U[type] pointer to the object that contains this field
- * \param pm[im] member pointer to the field
- */
- template<typename R, typename T, typename B=typename std::remove_extent<R>::type>
- inline _C2FieldId(R T::* pm)
- : _mOffset((uint32_t)(uintptr_t)(&(((T*)0)->*pm))),
- _mSize(sizeof(B)) { }
-
- inline bool operator==(const _C2FieldId &other) const {
- return _mOffset == other._mOffset && _mSize == other._mSize;
- }
-
- inline bool operator<(const _C2FieldId &other) const {
- return _mOffset < other._mOffset ||
- // NOTE: order parent structure before sub field
- (_mOffset == other._mOffset && _mSize > other._mSize);
- }
-
- DEFINE_OTHER_COMPARISON_OPERATORS(_C2FieldId)
-
-#if 0
- inline uint32_t offset() const { return _mOffset; }
- inline uint32_t size() const { return _mSize; }
-#endif
-
-#if defined(FRIEND_TEST)
- friend void PrintTo(const _C2FieldId &d, ::std::ostream*);
-#endif
-
-private:
- uint32_t _mOffset; // offset of field
- uint32_t _mSize; // size of field
-};
-
-/**
- * Structure uniquely specifying a field in a configuration
- */
-struct C2ParamField {
-//public:
- // TODO: fix what this is for T[] (for now size becomes T[1])
- template<typename S, typename T>
- inline C2ParamField(S* param, T* offset)
- : _mIndex(param->index()),
- _mFieldId(offset) {}
-
- template<typename R, typename T, typename U>
- inline C2ParamField(U *p, R T::* pm) : _mIndex(p->type()), _mFieldId(p, pm) { }
-
- inline bool operator==(const C2ParamField &other) const {
- return _mIndex == other._mIndex && _mFieldId == other._mFieldId;
- }
-
- inline bool operator<(const C2ParamField &other) const {
- return _mIndex < other._mIndex ||
- (_mIndex == other._mIndex && _mFieldId < other._mFieldId);
- }
-
- DEFINE_OTHER_COMPARISON_OPERATORS(C2ParamField)
-
-private:
- C2Param::Index _mIndex;
- _C2FieldId _mFieldId;
-};
-
-/**
- * A shared (union) representation of numeric values
- */
-class C2Value {
-public:
- /// A union of supported primitive types.
- union Primitive {
- int32_t i32; ///< int32_t value
- uint32_t u32; ///< uint32_t value
- int64_t i64; ///< int64_t value
- uint64_t u64; ///< uint64_t value
- float fp; ///< float value
-
- // constructors - implicit
- Primitive(int32_t value) : i32(value) { }
- Primitive(uint32_t value) : u32(value) { }
- Primitive(int64_t value) : i64(value) { }
- Primitive(uint64_t value) : u64(value) { }
- Primitive(float value) : fp(value) { }
-
- Primitive() : u64(0) { }
-
- private:
- friend class C2Value;
- template<typename T> const T &ref() const;
- };
-
- enum Type {
- NO_INIT,
- INT32,
- UINT32,
- INT64,
- UINT64,
- FLOAT,
- };
-
- template<typename T> static constexpr Type typeFor();
-
- // constructors - implicit
- template<typename T>
- C2Value(T value) : mType(typeFor<T>()), mValue(value) { }
-
- C2Value() : mType(NO_INIT) { }
-
- inline Type type() const { return mType; }
-
- template<typename T>
- inline bool get(T *value) const {
- if (mType == typeFor<T>()) {
- *value = mValue.ref<T>();
- return true;
- }
- return false;
- }
-
-private:
- Type mType;
- Primitive mValue;
-};
-
-template<> const int32_t &C2Value::Primitive::ref<int32_t>() const { return i32; }
-template<> const int64_t &C2Value::Primitive::ref<int64_t>() const { return i64; }
-template<> const uint32_t &C2Value::Primitive::ref<uint32_t>() const { return u32; }
-template<> const uint64_t &C2Value::Primitive::ref<uint64_t>() const { return u64; }
-template<> const float &C2Value::Primitive::ref<float>() const { return fp; }
-
-template<> constexpr C2Value::Type C2Value::typeFor<int32_t>() { return INT32; }
-template<> constexpr C2Value::Type C2Value::typeFor<int64_t>() { return INT64; }
-template<> constexpr C2Value::Type C2Value::typeFor<uint32_t>() { return UINT32; }
-template<> constexpr C2Value::Type C2Value::typeFor<uint64_t>() { return UINT64; }
-template<> constexpr C2Value::Type C2Value::typeFor<float>() { return FLOAT; }
-
-/**
- * field descriptor. A field is uniquely defined by an index into a parameter.
- * (Note: Stream-id is not captured as a field.)
- *
- * Ordering of fields is by offset. In case of structures, it is depth first,
- * with a structure taking an index just before and in addition to its members.
- */
-struct C2FieldDescriptor {
-//public:
- /** field types and flags
- * \note: only 32-bit and 64-bit fields are supported (e.g. no boolean, as that
- * is represented using INT32).
- */
- enum Type : uint32_t {
- // primitive types
- INT32 = C2Value::INT32, ///< 32-bit signed integer
- UINT32 = C2Value::UINT32, ///< 32-bit unsigned integer
- INT64 = C2Value::INT64, ///< 64-bit signed integer
- UINT64 = C2Value::UINT64, ///< 64-bit signed integer
- FLOAT = C2Value::FLOAT, ///< 32-bit floating point
-
- // array types
- STRING = 0x100, ///< fixed-size string (POD)
- BLOB, ///< blob. Blobs have no sub-elements and can be thought of as byte arrays;
- ///< however, bytes cannot be individually addressed by clients.
-
- // complex types
- STRUCT_FLAG = 0x10000, ///< structs. Marked with this flag in addition to their baseIndex.
- };
-
- typedef std::pair<C2String, C2Value::Primitive> named_value_type;
- typedef std::vector<const named_value_type> named_values_type;
- //typedef std::pair<std::vector<C2String>, std::vector<C2Value::Primitive>> named_values_type;
-
- /**
- * Template specialization that returns the named values for a type.
- *
- * \todo hide from client.
- *
- * \return a vector of name-value pairs.
- */
- template<typename B>
- static named_values_type namedValuesFor(const B &);
-
- inline C2FieldDescriptor(uint32_t type, uint32_t length, C2StringLiteral name, size_t offset, size_t size)
- : _mType((Type)type), _mLength(length), _mName(name), _mFieldId(offset, size) { }
-
- template<typename T, class B=typename std::remove_extent<T>::type>
- inline C2FieldDescriptor(const T* offset, const char *name)
- : _mType(this->getType((B*)nullptr)),
- _mLength(std::is_array<T>::value ? std::extent<T>::value : 1),
- _mName(name),
- _mNamedValues(namedValuesFor(*(B*)0)),
- _mFieldId(offset) {}
-
-/*
- template<typename T, typename B=typename std::remove_extent<T>::type>
- inline C2FieldDescriptor<T, B, false>(T* offset, const char *name)
- : _mType(this->getType((B*)nullptr)),
- _mLength(std::is_array<T>::value ? std::extent<T>::value : 1),
- _mName(name),
- _mFieldId(offset) {}
-*/
-
- /// \deprecated
- template<typename T, typename S, class B=typename std::remove_extent<T>::type>
- constexpr inline C2FieldDescriptor(S*, T S::* field, const char *name)
- : _mType(this->getType((B*)nullptr)),
- _mLength(std::is_array<T>::value ? std::extent<T>::value : 1),
- _mName(name),
- _mFieldId(&(((S*)0)->*field)) {}
-
- /// returns the type of this field
- inline Type type() const { return _mType; }
- /// returns the length of the field in case it is an array. Returns 0 for
- /// T[] arrays, returns 1 for T[1] arrays as well as if the field is not an array.
- inline size_t length() const { return _mLength; }
- /// returns the name of the field
- inline C2StringLiteral name() const { return _mName; }
-
- const named_values_type &namedValues() const { return _mNamedValues; }
-
-#if defined(FRIEND_TEST)
- friend void PrintTo(const C2FieldDescriptor &, ::std::ostream*);
- friend bool operator==(const C2FieldDescriptor &, const C2FieldDescriptor &);
- FRIEND_TEST(C2ParamTest_ParamFieldList, VerifyStruct);
-#endif
-
-private:
- const Type _mType;
- const uint32_t _mLength; // the last member can be arbitrary length if it is T[] array,
- // extending to the end of the parameter (this is marked with
- // 0). T[0]-s are not fields.
- const C2StringLiteral _mName;
- const named_values_type _mNamedValues;
-
- const _C2FieldId _mFieldId; // field identifier (offset and size)
-
- // NOTE: We do not capture default value(s) here as that may depend on the component.
- // NOTE: We also do not capture bestEffort, as 1) this should be true for most fields,
- // 2) this is at parameter granularity.
-
- // type resolution
- inline static Type getType(int32_t*) { return INT32; }
- inline static Type getType(uint32_t*) { return UINT32; }
- inline static Type getType(int64_t*) { return INT64; }
- inline static Type getType(uint64_t*) { return UINT64; }
- inline static Type getType(float*) { return FLOAT; }
- inline static Type getType(char*) { return STRING; }
- inline static Type getType(uint8_t*) { return BLOB; }
-
- template<typename T,
- class=typename std::enable_if<std::is_enum<T>::value>::type>
- inline static Type getType(T*) {
- typename std::underlying_type<T>::type underlying(0);
- return getType(&underlying);
- }
-
- // verify C2Struct by having a fieldList and a baseIndex.
- template<typename T,
- class=decltype(T::baseIndex + 1), class=decltype(T::fieldList)>
- inline static Type getType(T*) {
- static_assert(!std::is_base_of<C2Param, T>::value, "cannot use C2Params as fields");
- return (Type)(T::baseIndex | STRUCT_FLAG);
- }
-};
-
-#define DEFINE_NO_NAMED_VALUES_FOR(type) \
-template<> inline C2FieldDescriptor::named_values_type C2FieldDescriptor::namedValuesFor(const type &) { \
- return named_values_type(); \
-}
-
-// We cannot subtype constructor for enumerated types so insted define no named values for
-// non-enumerated integral types.
-DEFINE_NO_NAMED_VALUES_FOR(int32_t)
-DEFINE_NO_NAMED_VALUES_FOR(uint32_t)
-DEFINE_NO_NAMED_VALUES_FOR(int64_t)
-DEFINE_NO_NAMED_VALUES_FOR(uint64_t)
-DEFINE_NO_NAMED_VALUES_FOR(uint8_t)
-DEFINE_NO_NAMED_VALUES_FOR(char)
-DEFINE_NO_NAMED_VALUES_FOR(float)
-
-/**
- * Describes the fields of a structure.
- */
-struct C2StructDescriptor {
-public:
- /// Returns the parameter type
- inline C2Param::BaseIndex baseIndex() const { return _mType.baseIndex(); }
-
- // Returns the number of fields in this param (not counting any recursive fields).
- // Must be at least 1 for valid params.
- inline size_t numFields() const { return _mFields.size(); }
-
- // Returns the list of immediate fields (not counting any recursive fields).
- typedef std::vector<const C2FieldDescriptor>::const_iterator field_iterator;
- inline field_iterator cbegin() const { return _mFields.cbegin(); }
- inline field_iterator cend() const { return _mFields.cend(); }
-
- // only supplying const iterator - but these are needed for range based loops
- inline field_iterator begin() const { return _mFields.cbegin(); }
- inline field_iterator end() const { return _mFields.cend(); }
-
- template<typename T>
- inline C2StructDescriptor(T*)
- : C2StructDescriptor(T::baseIndex, T::fieldList) { }
-
- inline C2StructDescriptor(
- C2Param::BaseIndex type,
- std::initializer_list<const C2FieldDescriptor> fields)
- : _mType(type), _mFields(fields) { }
-
-private:
- const C2Param::BaseIndex _mType;
- const std::vector<const C2FieldDescriptor> _mFields;
-};
-
-/**
- * Describes parameters for a component.
- */
-struct C2ParamDescriptor {
-public:
- /**
- * Returns whether setting this param is required to configure this component.
- * This can only be true for builtin params for platform-defined components (e.g. video and
- * audio encoders/decoders, video/audio filters).
- * For vendor-defined components, it can be true even for vendor-defined params,
- * but it is not recommended, in case the component becomes platform-defined.
- */
- inline bool isRequired() const { return _mIsRequired; }
-
- /**
- * Returns whether this parameter is persistent. This is always true for C2Tuning and C2Setting,
- * but may be false for C2Info. If true, this parameter persists across frames and applies to
- * the current and subsequent frames. If false, this C2Info parameter only applies to the
- * current frame and is not assumed to have the same value (or even be present) on subsequent
- * frames, unless it is specified for those frames.
- */
- inline bool isPersistent() const { return _mIsPersistent; }
-
- /// Returns the name of this param.
- /// This defaults to the underlying C2Struct's name, but could be altered for a component.
- inline C2String name() const { return _mName; }
-
- /// Returns the parameter type
- /// \todo fix this
- inline C2Param::Type type() const { return _mType; }
-
- template<typename T>
- inline C2ParamDescriptor(bool isRequired, C2StringLiteral name, const T*)
- : _mIsRequired(isRequired),
- _mIsPersistent(true),
- _mName(name),
- _mType(T::typeIndex) { }
-
- inline C2ParamDescriptor(
- bool isRequired, C2StringLiteral name, C2Param::Type type)
- : _mIsRequired(isRequired),
- _mIsPersistent(true),
- _mName(name),
- _mType(type) { }
-
-private:
- const bool _mIsRequired;
- const bool _mIsPersistent;
- const C2String _mName;
- const C2Param::Type _mType;
-};
-
-/// \ingroup internal
-/// Define a structure without baseIndex.
-#define DEFINE_C2STRUCT_NO_BASE(name) \
-public: \
- typedef C2##name##Struct _type; /**< type name shorthand */ \
- const static std::initializer_list<const C2FieldDescriptor> fieldList; /**< structure fields */
-
-/// Define a structure with matching baseIndex.
-#define DEFINE_C2STRUCT(name) \
-public: \
- enum : uint32_t { baseIndex = kParamIndex##name }; \
- DEFINE_C2STRUCT_NO_BASE(name)
-
-/// Define a flexible structure with matching baseIndex.
-#define DEFINE_FLEX_C2STRUCT(name, flexMember) \
-public: \
- FLEX(C2##name##Struct, flexMember) \
- enum : uint32_t { baseIndex = kParamIndex##name | C2Param::BaseIndex::_kFlexibleFlag }; \
- DEFINE_C2STRUCT_NO_BASE(name)
-
-/// \ingroup internal
-/// Describe a structure of a templated structure.
-#define DESCRIBE_TEMPLATED_C2STRUCT(strukt, list) \
- template<> \
- const std::initializer_list<const C2FieldDescriptor> strukt::fieldList = list;
-
-/// \deprecated
-/// Describe the fields of a structure using an initializer list.
-#define DESCRIBE_C2STRUCT(name, list) \
- const std::initializer_list<const C2FieldDescriptor> C2##name##Struct::fieldList = list;
-
-/**
- * Describe a field of a structure.
- * These must be in order.
- *
- * There are two ways to use this macro:
- *
- * ~~~~~~~~~~~~~ (.cpp)
- * struct C2VideoWidthStruct {
- * int32_t mWidth;
- * C2VideoWidthStruct() {} // optional default constructor
- * C2VideoWidthStruct(int32_t _width) : mWidth(_width) {}
- *
- * DEFINE_AND_DESCRIBE_C2STRUCT(VideoWidth)
- * C2FIELD(mWidth, "width")
- * };
- * ~~~~~~~~~~~~~
- *
- * ~~~~~~~~~~~~~ (.cpp)
- * struct C2VideoWidthStruct {
- * int32_t mWidth;
- * C2VideoWidthStruct() = default; // optional default constructor
- * C2VideoWidthStruct(int32_t _width) : mWidth(_width) {}
- *
- * DEFINE_C2STRUCT(VideoWidth)
- * } C2_PACK;
- *
- * DESCRIBE_C2STRUCT(VideoWidth, {
- * C2FIELD(mWidth, "width")
- * })
- * ~~~~~~~~~~~~~
- *
- * For flexible structures (those ending in T[]), use the flexible macros:
- *
- * ~~~~~~~~~~~~~ (.cpp)
- * struct C2VideoFlexWidthsStruct {
- * int32_t mWidths[];
- * C2VideoFlexWidthsStruct(); // must have a default constructor
- *
- * private:
- * // may have private constructors taking number of widths as the first argument
- * // This is used by the C2Param factory methods, e.g.
- * // C2VideoFlexWidthsGlobalParam::alloc_unique(size_t, int32_t);
- * C2VideoFlexWidthsStruct(size_t flexCount, int32_t value) {
- * for (size_t i = 0; i < flexCount; ++i) {
- * mWidths[i] = value;
- * }
- * }
- *
- * // If the last argument is T[N] or std::initializer_list<T>, the flexCount will
- * // be automatically calculated and passed by the C2Param factory methods, e.g.
- * // int widths[] = { 1, 2, 3 };
- * // C2VideoFlexWidthsGlobalParam::alloc_unique(widths);
- * template<unsigned N>
- * C2VideoFlexWidthsStruct(size_t flexCount, const int32_t(&init)[N]) {
- * for (size_t i = 0; i < flexCount; ++i) {
- * mWidths[i] = init[i];
- * }
- * }
- *
- * DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(VideoFlexWidths, mWidths)
- * C2FIELD(mWidths, "widths")
- * };
- * ~~~~~~~~~~~~~
- *
- * ~~~~~~~~~~~~~ (.cpp)
- * struct C2VideoFlexWidthsStruct {
- * int32_t mWidths[];
- * C2VideoFlexWidthsStruct(); // must have a default constructor
- *
- * DEFINE_FLEX_C2STRUCT(VideoFlexWidths, mWidths)
- * } C2_PACK;
- *
- * DESCRIBE_C2STRUCT(VideoFlexWidths, {
- * C2FIELD(mWidths, "widths")
- * })
- * ~~~~~~~~~~~~~
- *
- */
-#define C2FIELD(member, name) \
- C2FieldDescriptor(&((_type*)(nullptr))->member, name),
-
-/// \deprecated
-#define C2SOLE_FIELD(member, name) \
- C2FieldDescriptor(&_type::member, name, 0)
-
-/// Define a structure with matching baseIndex and start describing its fields.
-/// This must be at the end of the structure definition.
-#define DEFINE_AND_DESCRIBE_C2STRUCT(name) \
- DEFINE_C2STRUCT(name) } C2_PACK; \
- const std::initializer_list<const C2FieldDescriptor> C2##name##Struct::fieldList = {
-
-/// Define a flexible structure with matching baseIndex and start describing its fields.
-/// This must be at the end of the structure definition.
-#define DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(name, flexMember) \
- DEFINE_FLEX_C2STRUCT(name, flexMember) } C2_PACK; \
- const std::initializer_list<const C2FieldDescriptor> C2##name##Struct::fieldList = {
-
-/**
- * Parameter reflector class.
- *
- * This class centralizes the description of parameter structures. This can be shared
- * by multiple components as describing a parameter does not imply support of that
- * parameter. However, each supported parameter and any dependent structures within
- * must be described by the parameter reflector provided by a component.
- */
-class C2ParamReflector {
-public:
- /**
- * Describes a parameter structure.
- *
- * \param[in] paramIndex the base index of the parameter structure
- *
- * \return the description of the parameter structure
- * \retval nullptr if the parameter is not supported by this reflector
- *
- * This methods shall not block and return immediately.
- *
- * \note this class does not take a set of indices because we would then prefer
- * to also return any dependent structures, and we don't want this logic to be
- * repeated in each reflector. Alternately, this could just return a map of all
- * descriptions, but we want to conserve memory if client only wants the description
- * of a few indices.
- */
- virtual std::unique_ptr<C2StructDescriptor> describe(C2Param::BaseIndex paramIndex) = 0;
-
-protected:
- virtual ~C2ParamReflector() = default;
-};
-
-/**
- * A useable supported values for a field.
- *
- * This can be either a range or a set of values. The range can be linear or geometric with a
- * clear minimum and maximum value, and can have an optional step size or geometric ratio. Values
- * can optionally represent flags.
- *
- * \note Do not use flags to represent bitfields. Use individual values or separate fields instead.
- */
-template<typename T>
-struct C2TypedFieldSupportedValues {
-//public:
- enum Type {
- RANGE, ///< a numeric range that can be continuous or discrete
- VALUES, ///< a list of values
- FLAGS ///< a list of flags that can be OR-ed
- };
-
- Type type;
-
- struct {
- T min;
- T max;
- T step;
- T nom;
- T denom;
- } range;
- std::vector<T> values;
-
- C2TypedFieldSupportedValues(T min, T max, T step = T(std::is_floating_point<T>::value ? 0 : 1))
- : type(RANGE),
- range{min, max, step, (T)1, (T)1} { }
-
- C2TypedFieldSupportedValues(T min, T max, T nom, T den) :
- type(RANGE),
- range{min, max, (T)0, nom, den} { }
-
- C2TypedFieldSupportedValues(bool flags, std::initializer_list<T> list) :
- type(flags ? FLAGS : VALUES),
- values(list) {}
-};
-
-/**
- * Generic supported values for a field.
- *
- * This can be either a range or a set of values. The range can be linear or geometric with a
- * clear minimum and maximum value, and can have an optional step size or geometric ratio. Values
- * can optionally represent flags.
- *
- * \note Do not use flags to represent bitfields. Use individual values or separate fields instead.
- */
-struct C2FieldSupportedValues {
-//public:
- enum Type {
- RANGE, ///< a numeric range that can be continuous or discrete
- VALUES, ///< a list of values
- FLAGS ///< a list of flags that can be OR-ed
- };
-
- Type type;
-
- typedef C2Value::Primitive Primitive;
-
- struct {
- Primitive min;
- Primitive max;
- Primitive step;
- Primitive nom;
- Primitive denom;
- } range;
- std::vector<Primitive> values;
-
- template<typename T>
- C2FieldSupportedValues(T min, T max, T step = T(std::is_floating_point<T>::value ? 0 : 1))
- : type(RANGE),
- range{min, max, step, (T)1, (T)1} { }
-
- template<typename T>
- C2FieldSupportedValues(T min, T max, T nom, T den) :
- type(RANGE),
- range{min, max, (T)0, nom, den} { }
-
- template<typename T>
- C2FieldSupportedValues(bool flags, std::initializer_list<T> list)
- : type(flags ? FLAGS : VALUES),
- range{(T)0, (T)0, (T)0, (T)0, (T)0} {
- for(T value : list) {
- values.emplace_back(value);
- }
- }
-
- template<typename T, typename E=decltype(C2FieldDescriptor::namedValuesFor(*(T*)0))>
- C2FieldSupportedValues(bool flags, const T*)
- : type(flags ? FLAGS : VALUES),
- range{(T)0, (T)0, (T)0, (T)0, (T)0} {
- C2FieldDescriptor::named_values_type named = C2FieldDescriptor::namedValuesFor(*(T*)0);
- for (const C2FieldDescriptor::named_value_type &item : named) {
- values.emplace_back(item.second);
- }
- }
-};
-
-/// @}
-
-} // namespace android
-
-#endif // C2PARAM_H_
diff --git a/media/libstagefright/codec2/include/C2ParamDef.h b/media/libstagefright/codec2/include/C2ParamDef.h
deleted file mode 100644
index f369617..0000000
--- a/media/libstagefright/codec2/include/C2ParamDef.h
+++ /dev/null
@@ -1,901 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** \file
- * Templates used to declare parameters.
- */
-#ifndef C2PARAM_DEF_H_
-#define C2PARAM_DEF_H_
-
-#include <type_traits>
-
-#include <C2Param.h>
-
-namespace android {
-
-/// \addtogroup Parameters
-/// @{
-
-/* ======================== UTILITY TEMPLATES FOR PARAMETER DEFINITIONS ======================== */
-
-/// \addtogroup internal
-/// @{
-
-/// Helper class that checks if a type has equality and inequality operators.
-struct C2_HIDE _C2Comparable_impl
-{
- template<typename S, typename=decltype(S() == S())>
- static std::true_type __testEQ(int);
- template<typename>
- static std::false_type __testEQ(...);
-
- template<typename S, typename=decltype(S() != S())>
- static std::true_type __testNE(int);
- template<typename>
- static std::false_type __testNE(...);
-};
-
-/**
- * Helper template that returns if a type has equality and inequality operators.
- *
- * Use as _C2Comparable<typename S>::value.
- */
-template<typename S>
-struct C2_HIDE _C2Comparable
- : public std::integral_constant<bool, decltype(_C2Comparable_impl::__testEQ<S>(0))::value
- || decltype(_C2Comparable_impl::__testNE<S>(0))::value> {
-};
-
-/// Helper class that checks if a type has a baseIndex constant.
-struct C2_HIDE _C2BaseIndexHelper_impl
-{
- template<typename S, int=S::baseIndex>
- static std::true_type __testBaseIndex(int);
- template<typename>
- static std::false_type __testBaseIndex(...);
-};
-
-/// Helper template that verifies a type's baseIndex and creates it if the type does not have one.
-template<typename S, int BaseIndex,
- bool HasBase=decltype(_C2BaseIndexHelper_impl::__testBaseIndex<S>(0))::value>
-struct C2_HIDE C2BaseIndexOverride {
- // TODO: what if we allow structs without baseIndex?
- static_assert(BaseIndex == S::baseIndex, "baseIndex differs from structure");
-};
-
-/// Specialization for types without a baseIndex.
-template<typename S, int BaseIndex>
-struct C2_HIDE C2BaseIndexOverride<S, BaseIndex, false> {
-public:
- enum : uint32_t {
- baseIndex = BaseIndex, ///< baseIndex override.
- };
-};
-
-/// Helper template that adds a baseIndex to a type if it does not have one.
-template<typename S, int BaseIndex>
-struct C2_HIDE C2AddBaseIndex : public S, public C2BaseIndexOverride<S, BaseIndex> {};
-
-/**
- * \brief Helper class to check struct requirements for parameters.
- *
- * Features:
- * - verify default constructor, no virtual methods, and no equality operators.
- * - expose typeIndex, and non-flex flexSize.
- */
-template<typename S, int BaseIndex, unsigned TypeIndex>
-struct C2_HIDE C2StructCheck {
- static_assert(
- std::is_default_constructible<S>::value, "C2 structure must have default constructor");
- static_assert(!std::is_polymorphic<S>::value, "C2 structure must not have virtual methods");
- static_assert(!_C2Comparable<S>::value, "C2 structure must not have operator== or !=");
-
-public:
- enum : uint32_t {
- typeIndex = BaseIndex | TypeIndex
- };
-
-protected:
- enum : uint32_t {
- flexSize = 0, // TODO: is this still needed? this may be confusing.
- };
-};
-
-/// Helper class that checks if a type has an integer flexSize member.
-struct C2_HIDE _C2Flexible_impl {
- /// specialization for types that have a flexSize member
- template<typename S, unsigned=S::flexSize>
- static std::true_type __testFlexSize(int);
- template<typename>
- static std::false_type __testFlexSize(...);
-};
-
-/// Helper template that returns if a type has an integer flexSize member.
-template<typename S>
-struct C2_HIDE _C2Flexible
- : public std::integral_constant<bool, decltype(_C2Flexible_impl::__testFlexSize<S>(0))::value> {
-};
-
-/// Macro to test if a type is flexible (has a flexSize member).
-#define IF_FLEXIBLE(S) ENABLE_IF(_C2Flexible<S>::value)
-/// Shorthand for std::enable_if
-#define ENABLE_IF(cond) typename std::enable_if<cond>::type
-
-/// Helper template that exposes the flexible subtype of a struct.
-template<typename S, typename E=void>
-struct C2_HIDE _C2FlexHelper {
- typedef void flexType;
- enum : uint32_t { flexSize = 0 };
-};
-
-/// Specialization for flexible types.
-template<typename S>
-struct C2_HIDE _C2FlexHelper<S,
- typename std::enable_if<!std::is_void<typename S::flexMemberType>::value>::type> {
- typedef typename _C2FlexHelper<typename S::flexMemberType>::flexType flexType;
- enum : uint32_t { flexSize = _C2FlexHelper<typename S::flexMemberType>::flexSize };
-};
-
-/// Specialization for flex arrays.
-template<typename S>
-struct C2_HIDE _C2FlexHelper<S[],
- typename std::enable_if<std::is_void<typename _C2FlexHelper<S>::flexType>::value>::type> {
- typedef S flexType;
- enum : uint32_t { flexSize = sizeof(S) };
-};
-
-/**
- * \brief Helper class to check flexible struct requirements and add common operations.
- *
- * Features:
- * - expose baseIndex and fieldList (this is normally inherited from the struct, but flexible
- * structs cannot be base classes and thus inherited from)
- * - disable copy assignment and construction (TODO: this is already done in the FLEX macro for the
- * flexible struct, so may not be needed here)
- */
-template<typename S, int BaseIndex, unsigned TypeIndex>
-struct C2_HIDE C2FlexStructCheck : public C2StructCheck<S, BaseIndex, TypeIndex> {
-public:
- enum : uint32_t {
- /// \hideinitializer
- baseIndex = BaseIndex | C2Param::BaseIndex::_kFlexibleFlag, ///< flexible struct base-index
- };
-
- const static std::initializer_list<const C2FieldDescriptor> fieldList; // TODO assign here
-
- // default constructor needed because of the disabled copy constructor
- inline C2FlexStructCheck() = default;
-
-protected:
- // cannot copy flexible params
- C2FlexStructCheck(const C2FlexStructCheck<S, BaseIndex, TypeIndex> &) = delete;
- C2FlexStructCheck& operator= (const C2FlexStructCheck<S, BaseIndex, TypeIndex> &) = delete;
-
- // constants used for helper methods
- enum : uint32_t {
- /// \hideinitializer
- flexSize = _C2FlexHelper<S>::flexSize, ///< size of flexible type
- /// \hideinitializer
- maxSize = (uint32_t)std::min((size_t)UINT32_MAX, SIZE_MAX), // TODO: is this always u32 max?
- /// \hideinitializer
- baseSize = sizeof(S) + sizeof(C2Param), ///< size of the base param
- };
-
- /// returns the allocated size of this param with flexCount, or 0 if it would overflow.
- inline static size_t calcSize(size_t flexCount, size_t size = baseSize) {
- if (flexCount <= (maxSize - size) / S::flexSize) {
- return size + S::flexSize * flexCount;
- }
- return 0;
- }
-
- /// dynamic new operator usable for params of type S
- inline void* operator new(size_t size, size_t flexCount) noexcept {
- // TODO: assert(size == baseSize);
- size = calcSize(flexCount, size);
- if (size > 0) {
- return ::operator new(size);
- }
- return nullptr;
- }
-};
-
-// TODO: this probably does not work.
-/// Expose fieldList from subClass;
-template<typename S, int BaseIndex, unsigned TypeIndex>
-const std::initializer_list<const C2FieldDescriptor> C2FlexStructCheck<S, BaseIndex, TypeIndex>::fieldList = S::fieldList;
-
-/// Define From() cast operators for params.
-#define DEFINE_CAST_OPERATORS(_type) \
- inline static _type* From(C2Param *other) { \
- return (_type*)C2Param::ifSuitable( \
- other, sizeof(_type),_type::typeIndex, _type::flexSize, \
- (_type::typeIndex & T::Index::kDirUndefined) != T::Index::kDirUndefined); \
- } \
- inline static const _type* From(const C2Param *other) { \
- return const_cast<const _type*>(From(const_cast<C2Param *>(other))); \
- } \
- inline static _type* From(std::nullptr_t) { return nullptr; } \
-
-/**
- * Define flexible allocators (alloc_shared or alloc_unique) for flexible params.
- * - P::alloc_xyz(flexCount, args...): allocate for given flex-count.
- * - P::alloc_xyz(args..., T[]): allocate for size of (and with) init array.
- * - P::alloc_xyz(T[]): allocate for size of (and with) init array with no other args.
- * - P::alloc_xyz(args..., std::initializer_list<T>): allocate for size of (and with) initializer
- * list.
- */
-#define DEFINE_FLEXIBLE_ALLOC(_type, S, ptr) \
- template<typename ...Args> \
- inline static std::ptr##_ptr<_type> alloc_##ptr(size_t flexCount, const Args(&... args)) { \
- return std::ptr##_ptr<_type>(new(flexCount) _type(flexCount, args...)); \
- } \
- /* NOTE: unfortunately this is not supported by clang yet */ \
- template<typename ...Args, typename U=typename S::flexType, unsigned N> \
- inline static std::ptr##_ptr<_type> alloc_##ptr(const Args(&... args), const U(&init)[N]) { \
- return std::ptr##_ptr<_type>(new(N) _type(N, args..., init)); \
- } \
- /* so for now, specialize for no args */ \
- template<typename U=typename S::flexType, unsigned N> \
- inline static std::ptr##_ptr<_type> alloc_##ptr(const U(&init)[N]) { \
- return std::ptr##_ptr<_type>(new(N) _type(N, init)); \
- } \
- template<typename ...Args, typename U=typename S::flexType> \
- inline static std::ptr##_ptr<_type> alloc_##ptr( \
- const Args(&... args), const std::initializer_list<U> &init) { \
- return std::ptr##_ptr<_type>(new(init.size()) _type(init.size(), args..., init)); \
- } \
-
-/**
- * Define flexible methods alloc_shared, alloc_unique and flexCount.
- */
-#define DEFINE_FLEXIBLE_METHODS(_type, S) \
- DEFINE_FLEXIBLE_ALLOC(_type, S, shared) \
- DEFINE_FLEXIBLE_ALLOC(_type, S, unique) \
- inline size_t flexCount() const { \
- static_assert(sizeof(_type) == _type::baseSize, "incorrect baseSize"); \
- size_t sz = this->size(); \
- if (sz >= sizeof(_type)) { \
- return (sz - sizeof(_type)) / _type::flexSize; \
- } \
- return 0; \
- } \
-
-/// Mark flexible member variable and make structure flexible.
-#define FLEX(cls, m) \
- C2_DO_NOT_COPY(cls) \
-private: \
- C2PARAM_MAKE_FRIENDS \
- /* default constructor with flexCount */ \
- inline cls(size_t) : cls() {} \
- /** \if 0 */ \
- template<typename, typename> friend struct _C2FlexHelper; \
- typedef decltype(m) flexMemberType; \
-public: \
- /* constexpr static flexMemberType cls::* flexMember = &cls::m; */ \
- typedef typename _C2FlexHelper<flexMemberType>::flexType flexType; \
- static_assert(\
- !std::is_void<flexType>::value, \
- "member is not flexible, or a flexible array of a flexible type"); \
- enum : uint32_t { flexSize = _C2FlexHelper<flexMemberType>::flexSize }; \
- /** \endif */ \
-
-/// @}
-
-/**
- * Global-parameter template.
- *
- * Base template to define a global setting/tuning or info based on a structure and
- * an optional BaseIndex. Global parameters are not tied to a port (input or output).
- *
- * Parameters wrap structures by prepending a (parameter) header. The fields of the wrapped
- * structure can be accessed directly, and constructors and potential public methods are also
- * wrapped.
- *
- * \tparam T param type C2Setting, C2Tuning or C2Info
- * \tparam S wrapped structure
- * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
- */
-template<typename T, typename S, int BaseIndex=S::baseIndex, class Flex=void>
-struct C2_HIDE C2GlobalParam : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
- public C2StructCheck<S, BaseIndex, T::indexFlags | T::Type::kDirGlobal> {
-private:
- typedef C2GlobalParam<T, S, BaseIndex> _type;
-
-public:
- /// Wrapper around base structure's constructor.
- template<typename ...Args>
- inline C2GlobalParam(const Args(&... args)) : T(sizeof(_type), _type::typeIndex), S(args...) { }
-
- DEFINE_CAST_OPERATORS(_type)
-};
-
-/**
- * Global-parameter template for flexible structures.
- *
- * Base template to define a global setting/tuning or info based on a flexible structure and
- * an optional BaseIndex. Global parameters are not tied to a port (input or output).
- *
- * \tparam T param type C2Setting, C2Tuning or C2Info
- * \tparam S wrapped flexible structure
- * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
- *
- * Parameters wrap structures by prepending a (parameter) header. The fields and methods of flexible
- * structures can be accessed via the m member variable; however, the constructors of the structure
- * are wrapped directly. (This is because flexible types cannot be subclassed.)
- */
-template<typename T, typename S, int BaseIndex>
-struct C2_HIDE C2GlobalParam<T, S, BaseIndex, IF_FLEXIBLE(S)>
- : public T, public C2FlexStructCheck<S, BaseIndex, T::indexFlags | T::Type::kDirGlobal> {
-private:
- typedef C2GlobalParam<T, S, BaseIndex> _type;
-
- /// Wrapper around base structure's constructor.
- template<typename ...Args>
- inline C2GlobalParam(size_t flexCount, const Args(&... args))
- : T(_type::calcSize(flexCount), _type::typeIndex), m(flexCount, args...) { }
-
-public:
- S m; ///< wrapped flexible structure
-
- DEFINE_FLEXIBLE_METHODS(_type, S)
- DEFINE_CAST_OPERATORS(_type)
-};
-
-/**
- * Port-parameter template.
- *
- * Base template to define a port setting/tuning or info based on a structure and
- * an optional BaseIndex. Port parameters are tied to a port (input or output), but not to a
- * specific stream.
- *
- * \tparam T param type C2Setting, C2Tuning or C2Info
- * \tparam S wrapped structure
- * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
- *
- * Parameters wrap structures by prepending a (parameter) header. The fields of the wrapped
- * structure can be accessed directly, and constructors and potential public methods are also
- * wrapped.
- *
- * There are 3 flavors of port parameters: unspecified, input and output. Parameters with
- * unspecified port expose a setPort method, and add an initial port parameter to the constructor.
- */
-template<typename T, typename S, int BaseIndex=S::baseIndex, class Flex=void>
-struct C2_HIDE C2PortParam : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
- private C2StructCheck<S, BaseIndex, T::indexFlags | T::Index::kDirUndefined> {
-private:
- typedef C2PortParam<T, S, BaseIndex> _type;
-
-public:
- /// Default constructor.
- inline C2PortParam() : T(sizeof(_type), _type::typeIndex) { }
- template<typename ...Args>
- /// Wrapper around base structure's constructor while specifying port/direction.
- inline C2PortParam(bool _output, const Args(&... args))
- : T(sizeof(_type), _output ? output::typeIndex : input::typeIndex), S(args...) { }
- /// Set port/direction.
- inline void setPort(bool output) { C2Param::setPort(output); }
-
- DEFINE_CAST_OPERATORS(_type)
-
- /// Specialization for an input port parameter.
- struct input : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
- public C2StructCheck<S, BaseIndex, T::indexFlags | T::Index::kDirInput> {
- /// Wrapper around base structure's constructor.
- template<typename ...Args>
- inline input(const Args(&... args)) : T(sizeof(_type), input::typeIndex), S(args...) { }
-
- DEFINE_CAST_OPERATORS(input)
-
- };
-
- /// Specialization for an output port parameter.
- struct output : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
- public C2StructCheck<S, BaseIndex, T::indexFlags | T::Index::kDirOutput> {
- /// Wrapper around base structure's constructor.
- template<typename ...Args>
- inline output(const Args(&... args)) : T(sizeof(_type), output::typeIndex), S(args...) { }
-
- DEFINE_CAST_OPERATORS(output)
- };
-};
-
-/**
- * Port-parameter template for flexible structures.
- *
- * Base template to define a port setting/tuning or info based on a flexible structure and
- * an optional BaseIndex. Port parameters are tied to a port (input or output), but not to a
- * specific stream.
- *
- * \tparam T param type C2Setting, C2Tuning or C2Info
- * \tparam S wrapped flexible structure
- * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
- *
- * Parameters wrap structures by prepending a (parameter) header. The fields and methods of flexible
- * structures can be accessed via the m member variable; however, the constructors of the structure
- * are wrapped directly. (This is because flexible types cannot be subclassed.)
- *
- * There are 3 flavors of port parameters: unspecified, input and output. Parameters with
- * unspecified port expose a setPort method, and add an initial port parameter to the constructor.
- */
-template<typename T, typename S, int BaseIndex>
-struct C2_HIDE C2PortParam<T, S, BaseIndex, IF_FLEXIBLE(S)>
- : public T, public C2FlexStructCheck<S, BaseIndex, T::indexFlags | T::Type::kDirUndefined> {
-private:
- typedef C2PortParam<T, S, BaseIndex> _type;
-
- /// Default constructor for basic allocation: new(flexCount) P.
- inline C2PortParam(size_t flexCount) : T(_type::calcSize(flexCount), _type::typeIndex) { }
- template<typename ...Args>
- /// Wrapper around base structure's constructor while also specifying port/direction.
- inline C2PortParam(size_t flexCount, bool _output, const Args(&... args))
- : T(_type::calcSize(flexCount), _output ? output::typeIndex : input::typeIndex),
- m(flexCount, args...) { }
-
-public:
- /// Set port/direction.
- inline void setPort(bool output) { C2Param::setPort(output); }
-
- S m; ///< wrapped flexible structure
-
- DEFINE_FLEXIBLE_METHODS(_type, S)
- DEFINE_CAST_OPERATORS(_type)
-
- /// Specialization for an input port parameter.
- struct input : public T, public C2BaseIndexOverride<S, BaseIndex>,
- public C2FlexStructCheck<S, BaseIndex, T::indexFlags | T::Index::kDirInput> {
- private:
- /// Wrapper around base structure's constructor while also specifying port/direction.
- template<typename ...Args>
- inline input(size_t flexCount, const Args(&... args))
- : T(_type::calcSize(flexCount), input::typeIndex), m(flexCount, args...) { }
-
- public:
- S m; ///< wrapped flexible structure
-
- DEFINE_FLEXIBLE_METHODS(input, S)
- DEFINE_CAST_OPERATORS(input)
- };
-
- /// Specialization for an output port parameter.
- struct output : public T, public C2BaseIndexOverride<S, BaseIndex>,
- public C2FlexStructCheck<S, BaseIndex, T::indexFlags | T::Index::kDirOutput> {
- private:
- /// Wrapper around base structure's constructor while also specifying port/direction.
- template<typename ...Args>
- inline output(size_t flexCount, const Args(&... args))
- : T(_type::calcSize(flexCount), output::typeIndex), m(flexCount, args...) { }
-
- public:
- S m; ///< wrapped flexible structure
-
- DEFINE_FLEXIBLE_METHODS(output, S)
- DEFINE_CAST_OPERATORS(output)
- };
-};
-
-/**
- * Stream-parameter template.
- *
- * Base template to define a stream setting/tuning or info based on a structure and
- * an optional BaseIndex. Stream parameters are tied to a specific stream on a port (input or
- * output).
- *
- * \tparam T param type C2Setting, C2Tuning or C2Info
- * \tparam S wrapped structure
- * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
- *
- * Parameters wrap structures by prepending a (parameter) header. The fields of the wrapped
- * structure can be accessed directly, and constructors and potential public methods are also
- * wrapped.
- *
- * There are 3 flavors of stream parameters: unspecified port, input and output. All of these expose
- * a setStream method and an extra initial streamID parameter for the constructor. Moreover,
- * parameters with unspecified port expose a setPort method, and add an additional initial port
- * parameter to the constructor.
- */
-template<typename T, typename S, int BaseIndex=S::baseIndex, class Flex=void>
-struct C2_HIDE C2StreamParam : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
- private C2StructCheck<S, BaseIndex,
- T::indexFlags | T::Index::kStreamFlag | T::Index::kDirUndefined> {
-private:
- typedef C2StreamParam<T, S, BaseIndex> _type;
-
-public:
- /// Default constructor. Port/direction and stream-ID is undefined.
- inline C2StreamParam() : T(sizeof(_type), _type::typeIndex) { }
- /// Wrapper around base structure's constructor while also specifying port/direction and
- /// stream-ID.
- template<typename ...Args>
- inline C2StreamParam(bool _output, unsigned stream, const Args(&... args))
- : T(sizeof(_type), _output ? output::typeIndex : input::typeIndex, stream),
- S(args...) { }
- /// Set port/direction.
- inline void setPort(bool output) { C2Param::setPort(output); }
- /// Set stream-id. \retval true if the stream-id was successfully set.
- inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
-
- DEFINE_CAST_OPERATORS(_type)
-
- /// Specialization for an input stream parameter.
- struct input : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
- public C2StructCheck<S, BaseIndex,
- T::indexFlags | T::Index::kStreamFlag | T::Type::kDirInput> {
- /// Default constructor. Stream-ID is undefined.
- inline input() : T(sizeof(_type), input::typeIndex) { }
- /// Wrapper around base structure's constructor while also specifying stream-ID.
- template<typename ...Args>
- inline input(unsigned stream, const Args(&... args))
- : T(sizeof(_type), input::typeIndex, stream), S(args...) { }
- /// Set stream-id. \retval true if the stream-id was successfully set.
- inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
-
- DEFINE_CAST_OPERATORS(input)
- };
-
- /// Specialization for an output stream parameter.
- struct output : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
- public C2StructCheck<S, BaseIndex,
- T::indexFlags | T::Index::kStreamFlag | T::Type::kDirOutput> {
- /// Default constructor. Stream-ID is undefined.
- inline output() : T(sizeof(_type), output::typeIndex) { }
- /// Wrapper around base structure's constructor while also specifying stream-ID.
- template<typename ...Args>
- inline output(unsigned stream, const Args(&... args))
- : T(sizeof(_type), output::typeIndex, stream), S(args...) { }
- /// Set stream-id. \retval true if the stream-id was successfully set.
- inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
-
- DEFINE_CAST_OPERATORS(output)
- };
-};
-
-/**
- * Stream-parameter template for flexible structures.
- *
- * Base template to define a stream setting/tuning or info based on a flexible structure and
- * an optional BaseIndex. Stream parameters are tied to a specific stream on a port (input or
- * output).
- *
- * \tparam T param type C2Setting, C2Tuning or C2Info
- * \tparam S wrapped flexible structure
- * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
- *
- * Parameters wrap structures by prepending a (parameter) header. The fields and methods of flexible
- * structures can be accessed via the m member variable; however, the constructors of the structure
- * are wrapped directly. (This is because flexible types cannot be subclassed.)
- *
- * There are 3 flavors of stream parameters: unspecified port, input and output. All of these expose
- * a setStream method and an extra initial streamID parameter for the constructor. Moreover,
- * parameters with unspecified port expose a setPort method, and add an additional initial port
- * parameter to the constructor.
- */
-template<typename T, typename S, int BaseIndex>
-struct C2_HIDE C2StreamParam<T, S, BaseIndex, IF_FLEXIBLE(S)>
- : public T, public C2BaseIndexOverride<S, BaseIndex>,
- private C2FlexStructCheck<S, BaseIndex,
- T::indexFlags | T::Index::kStreamFlag | T::Index::kDirUndefined> {
-private:
- typedef C2StreamParam<T, S> _type;
- /// Default constructor. Port/direction and stream-ID is undefined.
- inline C2StreamParam(size_t flexCount) : T(_type::calcSize(flexCount), _type::typeIndex, 0u) { }
- /// Wrapper around base structure's constructor while also specifying port/direction and
- /// stream-ID.
- template<typename ...Args>
- inline C2StreamParam(size_t flexCount, bool _output, unsigned stream, const Args(&... args))
- : T(_type::calcSize(flexCount), _output ? output::typeIndex : input::typeIndex, stream),
- m(flexCount, args...) { }
-
-public:
- S m; ///< wrapped flexible structure
-
- /// Set port/direction.
- inline void setPort(bool output) { C2Param::setPort(output); }
- /// Set stream-id. \retval true if the stream-id was successfully set.
- inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
-
- DEFINE_FLEXIBLE_METHODS(_type, S)
- DEFINE_CAST_OPERATORS(_type)
-
- /// Specialization for an input stream parameter.
- struct input : public T, public C2BaseIndexOverride<S, BaseIndex>,
- public C2FlexStructCheck<S, BaseIndex,
- T::indexFlags | T::Index::kStreamFlag | T::Type::kDirInput> {
- private:
- /// Default constructor. Stream-ID is undefined.
- inline input(size_t flexCount) : T(_type::calcSize(flexCount), input::typeIndex) { }
- /// Wrapper around base structure's constructor while also specifying stream-ID.
- template<typename ...Args>
- inline input(size_t flexCount, unsigned stream, const Args(&... args))
- : T(_type::calcSize(flexCount), input::typeIndex, stream), m(flexCount, args...) { }
-
- public:
- S m; ///< wrapped flexible structure
-
- /// Set stream-id. \retval true if the stream-id was successfully set.
- inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
-
- DEFINE_FLEXIBLE_METHODS(input, S)
- DEFINE_CAST_OPERATORS(input)
- };
-
- /// Specialization for an output stream parameter.
- struct output : public T, public C2BaseIndexOverride<S, BaseIndex>,
- public C2FlexStructCheck<S, BaseIndex,
- T::indexFlags | T::Index::kStreamFlag | T::Type::kDirOutput> {
- private:
- /// Default constructor. Stream-ID is undefined.
- inline output(size_t flexCount) : T(_type::calcSize(flexCount), output::typeIndex) { }
- /// Wrapper around base structure's constructor while also specifying stream-ID.
- template<typename ...Args>
- inline output(size_t flexCount, unsigned stream, const Args(&... args))
- : T(_type::calcSize(flexCount), output::typeIndex, stream), m(flexCount, args...) { }
-
- public:
- S m; ///< wrapped flexible structure
-
- /// Set stream-id. \retval true if the stream-id was successfully set.
- inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
-
- DEFINE_FLEXIBLE_METHODS(output, S)
- DEFINE_CAST_OPERATORS(output)
- };
-};
-
-/* ======================== SIMPLE VALUE PARAMETERS ======================== */
-
-/**
- * \ingroup internal
- * A structure template encapsulating a single element with default constructors and no base-index.
- */
-template<typename T>
-struct C2SimpleValueStruct {
- T mValue; ///< simple value of the structure
- // Default constructor.
- inline C2SimpleValueStruct() = default;
- // Constructor with an initial value.
- inline C2SimpleValueStruct(T value) : mValue(value) {}
- DEFINE_C2STRUCT_NO_BASE(SimpleValue)
-};
-
-// TODO: move this and next to some generic place
-/**
- * Interface to a block of (mapped) memory containing an array of some type (T).
- */
-template<typename T>
-struct C2MemoryBlock {
- /// \returns the number of elements in this block.
- virtual size_t size() const = 0;
- /// \returns a const pointer to the start of this block. Care must be taken to not read outside
- /// the block.
- virtual const T *data() const = 0; // TODO: should this be friend access only in some C2Memory module?
- /// \returns a pointer to the start of this block. Care must be taken to not read or write
- /// outside the block.
- inline T *data() { return const_cast<T*>(data()); }
-protected:
- // TODO: for now it should never be deleted as C2MemoryBlock
- virtual ~C2MemoryBlock() = default;
-};
-
-/**
- * Interface to a block of memory containing a constant (constexpr) array of some type (T).
- */
-template<typename T>
-struct C2ConstMemoryBlock : public C2MemoryBlock<T> {
- virtual const T * data() const { return mData; }
- virtual size_t size() const { return mSize; }
-
- /// Constructor.
- template<unsigned N>
- inline constexpr C2ConstMemoryBlock(const T(&init)[N]) : mData(init), mSize(N) {}
-
-private:
- const T *mData;
- const size_t mSize;
-};
-
-/// \addtogroup internal
-/// @{
-
-/// Helper class to initialize flexible arrays with various initalizers.
-struct _C2ValueArrayHelper {
- // char[]-s are used as null terminated strings, so the last element is never inited.
-
- /// Initialize a flexible array using a constexpr memory block.
- template<typename T>
- static void init(T(&array)[], size_t arrayLen, const C2MemoryBlock<T> &block) {
- // reserve last element for terminal 0 for strings
- if (arrayLen && std::is_same<T, char>::value) {
- --arrayLen;
- }
- if (block.data()) {
- memcpy(array, block.data(), std::min(arrayLen, block.size()) * sizeof(T));
- }
- }
-
- /// Initialize a flexible array using an initializer list.
- template<typename T>
- static void init(T(&array)[], size_t arrayLen, const std::initializer_list<T> &init) {
- size_t ix = 0;
- // reserve last element for terminal 0 for strings
- if (arrayLen && std::is_same<T, char>::value) {
- --arrayLen;
- }
- for (const T &item : init) {
- if (ix == arrayLen) {
- break;
- }
- array[ix++] = item;
- }
- }
-
- /// Initialize a flexible array using another flexible array.
- template<typename T, unsigned N>
- static void init(T(&array)[], size_t arrayLen, const T(&str)[N]) {
- // reserve last element for terminal 0 for strings
- if (arrayLen && std::is_same<T, char>::value) {
- --arrayLen;
- }
- if (arrayLen) {
- strncpy(array, str, std::min(arrayLen, (size_t)N));
- }
- }
-};
-
-/**
- * Specialization for a flexible blob and string arrays. A structure template encapsulating a single
- * flexible array member with default flexible constructors and no base-index. This type cannot be
- * constructed on its own as it's size is 0.
- *
- * \internal This is different from C2SimpleArrayStruct<T[]> simply because its member has the name
- * as mValue to reflect this is a single value.
- */
-template<typename T>
-struct C2SimpleValueStruct<T[]> {
- static_assert(std::is_same<T, char>::value || std::is_same<T, uint8_t>::value,
- "C2SimpleValueStruct<T[]> is only for BLOB or STRING");
- T mValue[];
-
- inline C2SimpleValueStruct() = default;
- DEFINE_C2STRUCT_NO_BASE(SimpleValue)
- FLEX(C2SimpleValueStruct, mValue)
-
-private:
- inline C2SimpleValueStruct(size_t flexCount, const C2MemoryBlock<T> &block) {
- _C2ValueArrayHelper::init(mValue, flexCount, block);
- }
-
- inline C2SimpleValueStruct(size_t flexCount, const std::initializer_list<T> &init) {
- _C2ValueArrayHelper::init(mValue, flexCount, init);
- }
-
- template<unsigned N>
- inline C2SimpleValueStruct(size_t flexCount, const T(&init)[N]) {
- _C2ValueArrayHelper::init(mValue, flexCount, init);
- }
-};
-
-/// @}
-
-/**
- * A structure template encapsulating a single flexible array element of a specific type (T) with
- * default constructors and no base-index. This type cannot be constructed on its own as it's size
- * is 0. Instead, it is meant to be used as a parameter, e.g.
- *
- * typedef C2StreamParam<C2Info, C2SimpleArrayStruct<C2MyFancyStruct>,
- * kParamIndexMyFancyArrayStreamParam> C2MyFancyArrayStreamInfo;
- */
-template<typename T>
-struct C2SimpleArrayStruct {
- static_assert(!std::is_same<T, char>::value && !std::is_same<T, uint8_t>::value,
- "use C2SimpleValueStruct<T[]> is for BLOB or STRING");
-
- T mValues[]; ///< array member
- /// Default constructor
- inline C2SimpleArrayStruct() = default;
- DEFINE_C2STRUCT_NO_BASE(SimpleArray)
- FLEX(C2SimpleArrayStruct, mValues)
-
-private:
- /// Construct from a C2MemoryBlock.
- /// Used only by the flexible parameter allocators (alloc_unique & alloc_shared).
- inline C2SimpleArrayStruct(size_t flexCount, const C2MemoryBlock<T> &block) {
- _C2ValueArrayHelper::init(mValues, flexCount, block);
- }
-
- /// Construct from an initializer list.
- /// Used only by the flexible parameter allocators (alloc_unique & alloc_shared).
- inline C2SimpleArrayStruct(size_t flexCount, const std::initializer_list<T> &init) {
- _C2ValueArrayHelper::init(mValues, flexCount, init);
- }
-
- /// Construct from another flexible array.
- /// Used only by the flexible parameter allocators (alloc_unique & alloc_shared).
- template<unsigned N>
- inline C2SimpleArrayStruct(size_t flexCount, const T(&init)[N]) {
- _C2ValueArrayHelper::init(mValues, flexCount, init);
- }
-};
-
-/**
- * \addtogroup simplevalue Simple value and array structures.
- * @{
- *
- * Simple value structures.
- *
- * Structures containing a single simple value. These can be reused to easily define simple
- * parameters of various types:
- *
- * typedef C2PortParam<C2Tuning, C2Int32Value, kParamIndexMyIntegerPortParam>
- * C2MyIntegerPortParamTuning;
- *
- * They contain a single member (mValue or mValues) that is described as "value" or "values".
- */
-/// A 32-bit signed integer parameter in mValue, described as "value"
-typedef C2SimpleValueStruct<int32_t> C2Int32Value;
-/// A 32-bit signed integer array parameter in mValues, described as "values"
-typedef C2SimpleArrayStruct<int32_t> C2Int32Array;
-/// A 32-bit unsigned integer parameter in mValue, described as "value"
-typedef C2SimpleValueStruct<uint32_t> C2Uint32Value;
-/// A 32-bit unsigned integer array parameter in mValues, described as "values"
-typedef C2SimpleArrayStruct<uint32_t> C2Uint32Array;
-/// A 64-bit signed integer parameter in mValue, described as "value"
-typedef C2SimpleValueStruct<int64_t> C2Int64Value;
-/// A 64-bit signed integer array parameter in mValues, described as "values"
-typedef C2SimpleArrayStruct<int64_t> C2Int64Array;
-/// A 64-bit unsigned integer parameter in mValue, described as "value"
-typedef C2SimpleValueStruct<uint64_t> C2Uint64Value;
-/// A 64-bit unsigned integer array parameter in mValues, described as "values"
-typedef C2SimpleArrayStruct<uint64_t> C2Uint64Array;
-/// A float parameter in mValue, described as "value"
-typedef C2SimpleValueStruct<float> C2FloatValue;
-/// A float array parameter in mValues, described as "values"
-typedef C2SimpleArrayStruct<float> C2FloatArray;
-/// A blob flexible parameter in mValue, described as "value"
-typedef C2SimpleValueStruct<uint8_t[]> C2BlobValue;
-/// A string flexible parameter in mValue, described as "value"
-typedef C2SimpleValueStruct<char[]> C2StringValue;
-
-#if 1
-template<typename T>
-const std::initializer_list<const C2FieldDescriptor> C2SimpleValueStruct<T>::fieldList = { C2FIELD(mValue, "value") };
-template<typename T>
-const std::initializer_list<const C2FieldDescriptor> C2SimpleValueStruct<T[]>::fieldList = { C2FIELD(mValue, "value") };
-template<typename T>
-const std::initializer_list<const C2FieldDescriptor> C2SimpleArrayStruct<T>::fieldList = { C2FIELD(mValues, "values") };
-#else
-// This seem to be able to be handled by the template above
-DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<int32_t>, { C2FIELD(mValue, "value") });
-DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<uint32_t>, { C2FIELD(mValue, "value") });
-DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<int64_t>, { C2FIELD(mValue, "value") });
-DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<uint64_t>, { C2FIELD(mValue, "value") });
-DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<float>, { C2FIELD(mValue, "value") });
-DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<uint8_t[]>, { C2FIELD(mValue, "value") });
-DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<char[]>, { C2FIELD(mValue, "value") });
-DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleArrayStruct<int32_t>, { C2FIELD(mValues, "values") });
-DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleArrayStruct<uint32_t>, { C2FIELD(mValues, "values") });
-DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleArrayStruct<int64_t>, { C2FIELD(mValues, "values") });
-DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleArrayStruct<uint64_t>, { C2FIELD(mValues, "values") });
-DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleArrayStruct<float>, { C2FIELD(mValues, "values") });
-#endif
-
-/// @}
-
-/// @}
-
-} // namespace android
-
-#endif // C2PARAM_DEF_H_
diff --git a/media/libstagefright/codec2/include/C2Work.h b/media/libstagefright/codec2/include/C2Work.h
deleted file mode 100644
index a42d11a..0000000
--- a/media/libstagefright/codec2/include/C2Work.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef C2WORK_H_
-
-#define C2WORK_H_
-
-#include <stdint.h>
-#include <stdbool.h>
-#include <C2Param.h>
-#include <C2Buffer.h>
-#include <C2Config.h>
-
-#include <memory>
-#include <list>
-#include <vector>
-
-typedef int status_t;
-
-namespace android {
-
-/// \defgroup work Work and data processing
-/// @{
-
-struct C2SettingResult {
- enum Failure {
- READ_ONLY, ///< parameter is read-only and cannot be set
- MISMATCH, ///< parameter mismatches input data
- BAD_VALUE, ///< parameter does not accept value
- BAD_TYPE, ///< parameter is not supported
- BAD_PORT, ///< parameter is not supported on the specific port
- BAD_INDEX, ///< parameter is not supported on the specific stream
- CONFLICT, ///< parameter is in conflict with another setting
- };
-
- C2ParamField field;
- Failure failure;
- std::unique_ptr<C2FieldSupportedValues> supportedValues; //< if different from normal (e.g. in conflict w/another param or input data)
- std::list<C2ParamField> conflictingFields;
-};
-
-// ================================================================================================
-// WORK
-// ================================================================================================
-
-// node_id-s
-typedef uint32_t node_id;
-
-enum flags_t : uint32_t {
- BUFFERFLAG_CODEC_CONFIG,
- BUFFERFLAG_DROP_FRAME,
- BUFFERFLAG_END_OF_STREAM,
-};
-
-enum {
- kParamIndexWorkOrdinal,
-};
-
-struct C2WorkOrdinalStruct {
- uint64_t timestamp;
- uint64_t frame_index; // submission ordinal on the initial component
- uint64_t custom_ordinal; // can be given by the component, e.g. decode order
-
- DEFINE_AND_DESCRIBE_C2STRUCT(WorkOrdinal)
- C2FIELD(timestamp, "timestamp")
- C2FIELD(frame_index, "frame-index")
- C2FIELD(custom_ordinal, "custom-ordinal")
-};
-
-struct C2BufferPack {
-//public:
- flags_t flags;
- C2WorkOrdinalStruct ordinal;
- std::vector<std::shared_ptr<C2Buffer>> buffers;
- //< for initial work item, these may also come from the parser - if provided
- //< for output buffers, these are the responses to requestedInfos
- std::list<std::unique_ptr<C2Info>> infos;
- std::list<std::shared_ptr<C2InfoBuffer>> infoBuffers;
-};
-
-struct C2Worklet {
-//public:
- // IN
- node_id component;
-
- std::list<std::unique_ptr<C2Param>> tunings; //< tunings to be applied before processing this
- // worklet
- std::list<C2Param::Type> requestedInfos;
- std::vector<std::shared_ptr<C2BlockAllocator>> allocators; //< This vector shall be the same size as
- //< output.buffers.
-
- // OUT
- C2BufferPack output;
- std::list<std::unique_ptr<C2SettingResult>> failures;
-};
-
-/**
- * This structure holds information about all a single work item.
- *
- * This structure shall be passed by the client to the component for the first worklet. As such,
- * worklets must not be empty. The ownership of this object is passed.
- *
- * input:
- * The input data to be processed. This is provided by the client with ownership. When the work
- * is returned, the input buffer-pack's buffer vector shall contain nullptrs.
- *
- * worklets:
- * The chain of components and associated allocators, tunings and info requests that the data
- * must pass through. If this has more than a single element, the tunnels between successive
- * components of the worklet chain must have been (successfully) pre-registered at the time
- * the work is submitted. Allocating the output buffers in the worklets is the responsibility
- * of each component. Upon work submission, each output buffer-pack shall be an appropriately
- * sized vector containing nullptrs. When the work is completed/returned to the client,
- *
- * worklets_processed:
- * It shall be initialized to 0 by the client when the work is submitted.
- * It shall contain the number of worklets that were successfully processed when the work is
- * returned. If this is less then the number of worklets, result must not be success.
- * It must be in the range of [0, worklets.size()].
- *
- * result:
- * The final outcome of the work. If 0 when work is returned, it is assumed that all worklets
- * have been processed.
- */
-struct C2Work {
-//public:
- // pre-chain infos (for portions of a tunneling chain that happend before this work-chain for
- // this work item - due to framework facilitated (non-tunneled) work-chaining)
- std::list<std::pair<std::unique_ptr<C2PortMimeConfig>, std::unique_ptr<C2Info>>> preChainInfos;
- std::list<std::pair<std::unique_ptr<C2PortMimeConfig>, std::unique_ptr<C2Buffer>>> preChainInfoBlobs;
-
- C2BufferPack input;
- std::list<std::unique_ptr<C2Worklet>> worklets;
-
- uint32_t worklets_processed;
- status_t result;
-};
-
-struct C2WorkOutline {
-//public:
- C2WorkOrdinalStruct ordinal;
- std::list<node_id> chain;
-};
-
-/// @}
-
-} // namespace android
-
-#endif // C2WORK_H_
diff --git a/media/libstagefright/codec2/tests/Android.mk b/media/libstagefright/codec2/tests/Android.mk
deleted file mode 100644
index 49c4253..0000000
--- a/media/libstagefright/codec2/tests/Android.mk
+++ /dev/null
@@ -1,37 +0,0 @@
-# Build the unit tests.
-LOCAL_PATH:= $(call my-dir)
-include $(CLEAR_VARS)
-LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
-
-LOCAL_MODULE := codec2_test
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_SRC_FILES := \
- vndk/C2UtilTest.cpp \
- C2_test.cpp \
- C2Param_test.cpp \
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- libstagefright_codec2 \
- liblog
-
-LOCAL_C_INCLUDES := \
- frameworks/av/media/libstagefright/codec2/include \
- frameworks/av/media/libstagefright/codec2/vndk/include \
- $(TOP)/frameworks/native/include/media/openmax \
-
-LOCAL_CFLAGS += -Werror -Wall -std=c++14
-LOCAL_CLANG := true
-
-include $(BUILD_NATIVE_TEST)
-
-# Include subdirectory makefiles
-# ============================================================
-
-# If we're building with ONE_SHOT_MAKEFILE (mm, mmm), then what the framework
-# team really wants is to build the stuff defined by this makefile.
-ifeq (,$(ONE_SHOT_MAKEFILE))
-include $(call first-makefiles-under,$(LOCAL_PATH))
-endif
diff --git a/media/libstagefright/codec2/tests/C2Param_test.cpp b/media/libstagefright/codec2/tests/C2Param_test.cpp
deleted file mode 100644
index ec82c84..0000000
--- a/media/libstagefright/codec2/tests/C2Param_test.cpp
+++ /dev/null
@@ -1,2687 +0,0 @@
-/*
- * Copyright 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "C2Param_test"
-
-#include <gtest/gtest.h>
-
-#include <util/C2ParamUtils.h>
-#include <C2ParamDef.h>
-
-namespace android {
-
-void PrintTo(const _C2FieldId &id, ::std::ostream* os) {
- *os << "@" << id._mOffset << "+" << id._mSize;
-}
-
-void PrintTo(const C2FieldDescriptor &fd, ::std::ostream *os) {
- using FD=C2FieldDescriptor;
- switch (fd.type()) {
- case FD::INT32: *os << "i32"; break;
- case FD::INT64: *os << "i64"; break;
- case FD::UINT32: *os << "u32"; break;
- case FD::UINT64: *os << "u64"; break;
- case FD::FLOAT: *os << "float"; break;
- case FD::STRING: *os << "char"; break;
- case FD::BLOB: *os << "u8"; break;
- default:
- if (fd.type() & FD::STRUCT_FLAG) {
- *os << "struct-" << (fd.type() & ~FD::STRUCT_FLAG);
- } else {
- *os << "type-" << fd.type();
- }
- }
- *os << " " << fd.name();
- if (fd.length() > 1) {
- *os << "[" << fd.length() << "]";
- } else if (fd.length() == 0) {
- *os << "[]";
- }
- *os << " (";
- PrintTo(fd._mFieldId, os);
- *os << "*" << fd.length() << ")";
-}
-
-enum C2ParamIndexType {
- kParamIndexNumber,
- kParamIndexNumbers,
- kParamIndexNumber2,
- kParamIndexVendorStart = C2Param::BaseIndex::kVendorStart,
- kParamIndexVendorNumbers,
-};
-
-void ffff(int(*)(int)) {}
-
-/* ============================= STRUCT DECLARATION AND DESCRIPTION ============================= */
-
-typedef C2FieldDescriptor FD;
-
-class C2ParamTest : public ::testing::Test {
-};
-
-class C2ParamTest_ParamFieldList
- : public ::testing::TestWithParam<std::initializer_list<const C2FieldDescriptor>> {
-};
-
-enum {
- kParamIndexSize,
- kParamIndexTestA,
- kParamIndexTestB,
- kParamIndexTestFlexS32,
- kParamIndexTestFlexEndS32,
- kParamIndexTestFlexS64,
- kParamIndexTestFlexEndS64,
- kParamIndexTestFlexSize,
- kParamIndexTestFlexEndSize,
-};
-
-struct C2SizeStruct {
- int32_t mNumber;
- int32_t mHeight;
- enum : uint32_t { baseIndex = kParamIndexSize }; // <= needed for C2FieldDescriptor
- const static std::initializer_list<const C2FieldDescriptor> fieldList; // <= needed for C2FieldDescriptor
- const static FD::Type TYPE = (FD::Type)(baseIndex | FD::STRUCT_FLAG);
-};
-
-DEFINE_NO_NAMED_VALUES_FOR(C2SizeStruct)
-
-// Test 1. define a structure without any helper methods
-
-bool operator==(const C2FieldDescriptor &a, const C2FieldDescriptor &b) {
- return a.type() == b.type()
- && a.length() == b.length()
- && strcmp(a.name(), b.name()) == 0
- && a._mFieldId == b._mFieldId;
-}
-
-struct C2TestStruct_A {
- int32_t mSigned32;
- int64_t mSigned64[2];
- uint32_t mUnsigned32[1];
- uint64_t mUnsigned64;
- float mFloat;
- C2SizeStruct mSize[3];
- uint8_t mBlob[100];
- char mString[100];
- bool mYesNo[100];
-
- const static std::initializer_list<const C2FieldDescriptor> fieldList;
- // enum : uint32_t { baseIndex = kParamIndexTest };
- // typedef C2TestStruct_A _type;
-} __attribute__((packed));
-
-const std::initializer_list<const C2FieldDescriptor> C2TestStruct_A::fieldList =
- { { FD::INT32, 1, "s32", 0, 4 },
- { FD::INT64, 2, "s64", 4, 8 },
- { FD::UINT32, 1, "u32", 20, 4 },
- { FD::UINT64, 1, "u64", 24, 8 },
- { FD::FLOAT, 1, "fp", 32, 4 },
- { C2SizeStruct::TYPE, 3, "size", 36, 8 },
- { FD::BLOB, 100, "blob", 60, 1 },
- { FD::STRING, 100, "str", 160, 1 },
- { FD::BLOB, 100, "y-n", 260, 1 } };
-
-TEST_P(C2ParamTest_ParamFieldList, VerifyStruct) {
- std::vector<const C2FieldDescriptor> fields = GetParam(), expected = C2TestStruct_A::fieldList;
-
- // verify first field descriptor
- EXPECT_EQ(FD::INT32, fields[0].type());
- EXPECT_STREQ("s32", fields[0].name());
- EXPECT_EQ(1u, fields[0].length());
- EXPECT_EQ(_C2FieldId(0, 4), fields[0]._mFieldId);
-
- EXPECT_EQ(expected[0], fields[0]);
- EXPECT_EQ(expected[1], fields[1]);
- EXPECT_EQ(expected[2], fields[2]);
- EXPECT_EQ(expected[3], fields[3]);
- EXPECT_EQ(expected[4], fields[4]);
- EXPECT_EQ(expected[5], fields[5]);
- EXPECT_EQ(expected[6], fields[6]);
- EXPECT_EQ(expected[7], fields[7]);
- for (size_t i = 8; i < fields.size() && i < expected.size(); ++i) {
- EXPECT_EQ(expected[i], fields[i]);
- }
-}
-
-INSTANTIATE_TEST_CASE_P(InitializerList, C2ParamTest_ParamFieldList, ::testing::Values(C2TestStruct_A::fieldList));
-
-// define fields using C2FieldDescriptor pointer constructor
-const std::initializer_list<const C2FieldDescriptor> C2TestStruct_A_FD_PTR_fieldList =
- { C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mSigned32, "s32"),
- C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mSigned64, "s64"),
- C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mUnsigned32, "u32"),
- C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mUnsigned64, "u64"),
- C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mFloat, "fp"),
- C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mSize, "size"),
- C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mBlob, "blob"),
- C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mString, "str"),
- // C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mYesNo, "y-n")
- };
-
-INSTANTIATE_TEST_CASE_P(PointerConstructor, C2ParamTest_ParamFieldList, ::testing::Values(C2TestStruct_A_FD_PTR_fieldList));
-
-// define fields using C2FieldDescriptor member-pointer constructor
-const std::initializer_list<const C2FieldDescriptor> C2TestStruct_A_FD_MEM_PTR_fieldList =
- { C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mSigned32, "s32"),
- C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mSigned64, "s64"),
- C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mUnsigned32, "u32"),
- C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mUnsigned64, "u64"),
- C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mFloat, "fp"),
- C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mSize, "size"),
- C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mBlob, "blob"),
- C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mString, "str"),
- // C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mYesNo, "y-n")
- };
-
-INSTANTIATE_TEST_CASE_P(MemberPointerConstructor, C2ParamTest_ParamFieldList, ::testing::Values(C2TestStruct_A_FD_MEM_PTR_fieldList));
-
-// Test 2. define a structure with two-step helper methods
-
-struct C2TestAStruct {
- int32_t mSigned32;
- int64_t mSigned64[2];
- uint32_t mUnsigned32[1];
- uint64_t mUnsigned64;
- float mFloat;
- C2SizeStruct mSize[3];
- uint8_t mBlob[100];
- char mString[100];
- bool mYesNo[100];
-
-private: // test access level
- DEFINE_C2STRUCT(TestA)
-} C2_PACK;
-
-DESCRIBE_C2STRUCT(TestA, {
- C2FIELD(mSigned32, "s32")
- C2FIELD(mSigned64, "s64")
- C2FIELD(mUnsigned32, "u32")
- C2FIELD(mUnsigned64, "u64")
- C2FIELD(mFloat, "fp")
- C2FIELD(mSize, "size")
- C2FIELD(mBlob, "blob")
- C2FIELD(mString, "str")
- // C2FIELD(mYesNo, "y-n")
-}) // ; optional
-
-INSTANTIATE_TEST_CASE_P(DescribeStruct2Step, C2ParamTest_ParamFieldList, ::testing::Values(C2TestAStruct::fieldList));
-
-// Test 3. define a structure with one-step helper method
-
-struct C2TestBStruct {
- int32_t mSigned32;
- int64_t mSigned64[2];
- uint32_t mUnsigned32[1];
- uint64_t mUnsigned64;
- float mFloat;
- C2SizeStruct mSize[3];
- uint8_t mBlob[100];
- char mString[100];
- bool mYesNo[100];
-
-private: // test access level
- DEFINE_AND_DESCRIBE_C2STRUCT(TestB)
-
- C2FIELD(mSigned32, "s32")
- C2FIELD(mSigned64, "s64")
- C2FIELD(mUnsigned32, "u32")
- C2FIELD(mUnsigned64, "u64")
- C2FIELD(mFloat, "fp")
- C2FIELD(mSize, "size")
- C2FIELD(mBlob, "blob")
- C2FIELD(mString, "str")
- // C2FIELD(mYesNo, "y-n")
-};
-
-INSTANTIATE_TEST_CASE_P(DescribeStruct1Step, C2ParamTest_ParamFieldList, ::testing::Values(C2TestBStruct::fieldList));
-
-// Test 4. flexible members
-
-template<typename T>
-class C2ParamTest_FlexParamFieldList : public ::testing::Test {
-protected:
- using Type=FD::Type;
-
- // static std::initializer_list<std::initializer_list<const C2FieldDescriptor>>
- static std::vector<std::vector<const C2FieldDescriptor>>
- GetLists();
-
- constexpr static Type flexType =
- std::is_same<T, int32_t>::value ? FD::INT32 :
- std::is_same<T, int64_t>::value ? FD::INT64 :
- std::is_same<T, uint32_t>::value ? FD::UINT32 :
- std::is_same<T, uint64_t>::value ? FD::UINT64 :
- std::is_same<T, float>::value ? FD::FLOAT :
- std::is_same<T, uint8_t>::value ? FD::BLOB :
- std::is_same<T, char>::value ? FD::STRING :
- std::is_same<T, C2SizeStruct>::value ? C2SizeStruct::TYPE : (Type)0;
- constexpr static size_t flexSize = sizeof(T);
-};
-
-typedef ::testing::Types<int32_t, int64_t, C2SizeStruct> FlexTypes;
-TYPED_TEST_CASE(C2ParamTest_FlexParamFieldList, FlexTypes);
-
-TYPED_TEST(C2ParamTest_FlexParamFieldList, VerifyStruct) {
- for (auto a : this->GetLists()) {
- std::vector<const C2FieldDescriptor> fields = a;
- if (fields.size() > 1) {
- EXPECT_EQ(2u, fields.size());
- EXPECT_EQ(C2FieldDescriptor(FD::INT32, 1, "s32", 0, 4), fields[0]);
- EXPECT_EQ(C2FieldDescriptor(this->flexType, 0, "flex", 4, this->flexSize),
- fields[1]);
- } else {
- EXPECT_EQ(1u, fields.size());
- EXPECT_EQ(C2FieldDescriptor(this->flexType, 0, "flex", 0, this->flexSize),
- fields[0]);
- }
- }
-}
-
-struct C2TestStruct_FlexS32 {
- int32_t mFlex[];
-
- const static std::initializer_list<const C2FieldDescriptor> fieldList;
- // enum : uint32_t { baseIndex = kParamIndexTestFlex, flexSize = 4 };
- // typedef C2TestStruct_FlexS32 _type;
- // typedef int32_t flexType;
-};
-
-const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexS32::fieldList = {
- { FD::INT32, 0, "flex", 0, 4 }
-};
-
-struct C2TestStruct_FlexEndS32 {
- int32_t mSigned32;
- int32_t mFlex[];
-
- const static std::initializer_list<const C2FieldDescriptor> fieldList;
- // enum : uint32_t { baseIndex = kParamIndexTestFlexEnd, flexSize = 4 };
- // typedef C2TestStruct_FlexEnd _type;
- // typedef int32_t flexType;
-};
-
-const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexEndS32::fieldList = {
- { FD::INT32, 1, "s32", 0, 4 },
- { FD::INT32, 0, "flex", 4, 4 },
-};
-
-const static std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexEndS32_ptr_fieldList = {
- C2FieldDescriptor(&((C2TestStruct_FlexEndS32*)0)->mSigned32, "s32"),
- C2FieldDescriptor(&((C2TestStruct_FlexEndS32*)0)->mFlex, "flex"),
-};
-
-struct C2TestFlexS32Struct {
- int32_t mFlexSigned32[];
-private: // test access level
- C2TestFlexS32Struct() {}
-
- DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(TestFlexS32, mFlexSigned32)
- C2FIELD(mFlexSigned32, "flex")
-};
-
-struct C2TestFlexEndS32Struct {
- int32_t mSigned32;
- int32_t mFlexSigned32[];
-private: // test access level
- C2TestFlexEndS32Struct() {}
-
- DEFINE_FLEX_C2STRUCT(TestFlexEndS32, mFlexSigned32)
-} C2_PACK;
-
-DESCRIBE_C2STRUCT(TestFlexEndS32, {
- C2FIELD(mSigned32, "s32")
- C2FIELD(mFlexSigned32, "flex")
-}) // ; optional
-
-template<>
-std::vector<std::vector<const C2FieldDescriptor>>
-//std::initializer_list<std::initializer_list<const C2FieldDescriptor>>
-C2ParamTest_FlexParamFieldList<int32_t>::GetLists() {
- return {
- C2TestStruct_FlexS32::fieldList,
- C2TestStruct_FlexEndS32::fieldList,
- C2TestStruct_FlexEndS32_ptr_fieldList,
- C2TestFlexS32Struct::fieldList,
- C2TestFlexEndS32Struct::fieldList,
- };
-}
-
-struct C2TestStruct_FlexS64 {
- int64_t mFlexSigned64[];
-
- const static std::initializer_list<const C2FieldDescriptor> fieldList;
- // enum : uint32_t { baseIndex = kParamIndexTestFlexS64, flexSize = 8 };
- // typedef C2TestStruct_FlexS64 _type;
- // typedef int64_t flexType;
-};
-
-const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexS64::fieldList = {
- { FD::INT64, 0, "flex", 0, 8 }
-};
-
-struct C2TestStruct_FlexEndS64 {
- int32_t mSigned32;
- int64_t mSigned64Flex[];
-
- const static std::initializer_list<const C2FieldDescriptor> fieldList;
- // enum : uint32_t { baseIndex = C2TestStruct_FlexEndS64, flexSize = 8 };
- // typedef C2TestStruct_FlexEndS64 _type;
- // typedef int64_t flexType;
-};
-
-const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexEndS64::fieldList = {
- { FD::INT32, 1, "s32", 0, 4 },
- { FD::INT64, 0, "flex", 4, 8 },
-};
-
-struct C2TestFlexS64Struct {
- int64_t mFlexSigned64[];
- C2TestFlexS64Struct() {}
-
- DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(TestFlexS64, mFlexSigned64)
- C2FIELD(mFlexSigned64, "flex")
-};
-
-struct C2TestFlexEndS64Struct {
- int32_t mSigned32;
- int64_t mFlexSigned64[];
- C2TestFlexEndS64Struct() {}
-
- DEFINE_FLEX_C2STRUCT(TestFlexEndS64, mFlexSigned64)
-} C2_PACK;
-
-DESCRIBE_C2STRUCT(TestFlexEndS64, {
- C2FIELD(mSigned32, "s32")
- C2FIELD(mFlexSigned64, "flex")
-}) // ; optional
-
-template<>
-std::vector<std::vector<const C2FieldDescriptor>>
-//std::initializer_list<std::initializer_list<const C2FieldDescriptor>>
-C2ParamTest_FlexParamFieldList<int64_t>::GetLists() {
- return {
- C2TestStruct_FlexS64::fieldList,
- C2TestStruct_FlexEndS64::fieldList,
- C2TestFlexS64Struct::fieldList,
- C2TestFlexEndS64Struct::fieldList,
- };
-}
-
-struct C2TestStruct_FlexSize {
- C2SizeStruct mFlexSize[];
-
- const static std::initializer_list<const C2FieldDescriptor> fieldList;
- // enum : uint32_t { baseIndex = kParamIndexTestFlexSize, flexSize = 8 };
- // typedef C2TestStruct_FlexSize _type;
- // typedef C2SizeStruct flexType;
-};
-
-const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexSize::fieldList = {
- { C2SizeStruct::TYPE, 0, "flex", 0, sizeof(C2SizeStruct) }
-};
-
-struct C2TestStruct_FlexEndSize {
- int32_t mSigned32;
- C2SizeStruct mSizeFlex[];
-
- const static std::initializer_list<const C2FieldDescriptor> fieldList;
- // enum : uint32_t { baseIndex = C2TestStruct_FlexEndSize, flexSize = 8 };
- // typedef C2TestStruct_FlexEndSize _type;
- // typedef C2SizeStruct flexType;
-};
-
-const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexEndSize::fieldList = {
- { FD::INT32, 1, "s32", 0, 4 },
- { C2SizeStruct::TYPE, 0, "flex", 4, sizeof(C2SizeStruct) },
-};
-
-struct C2TestFlexSizeStruct {
- C2SizeStruct mFlexSize[];
- C2TestFlexSizeStruct() {}
-
- DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(TestFlexSize, mFlexSize)
- C2FIELD(mFlexSize, "flex")
-};
-
-struct C2TestFlexEndSizeStruct {
- int32_t mSigned32;
- C2SizeStruct mFlexSize[];
- C2TestFlexEndSizeStruct() {}
-
- DEFINE_FLEX_C2STRUCT(TestFlexEndSize, mFlexSize)
-} C2_PACK;
-
-DESCRIBE_C2STRUCT(TestFlexEndSize, {
- C2FIELD(mSigned32, "s32")
- C2FIELD(mFlexSize, "flex")
-}) // ; optional
-
-template<>
-std::vector<std::vector<const C2FieldDescriptor>>
-//std::initializer_list<std::initializer_list<const C2FieldDescriptor>>
-C2ParamTest_FlexParamFieldList<C2SizeStruct>::GetLists() {
- return {
- C2TestStruct_FlexSize::fieldList,
- C2TestStruct_FlexEndSize::fieldList,
- C2TestFlexSizeStruct::fieldList,
- C2TestFlexEndSizeStruct::fieldList,
- };
-}
-
-TEST_F(C2ParamTest, FieldId) {
- // pointer constructor
- EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId(&((C2TestStruct_A*)0)->mSigned32));
- EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId(&((C2TestStruct_A*)0)->mSigned64));
- EXPECT_EQ(_C2FieldId(20, 4), _C2FieldId(&((C2TestStruct_A*)0)->mUnsigned32));
- EXPECT_EQ(_C2FieldId(24, 8), _C2FieldId(&((C2TestStruct_A*)0)->mUnsigned64));
- EXPECT_EQ(_C2FieldId(32, 4), _C2FieldId(&((C2TestStruct_A*)0)->mFloat));
- EXPECT_EQ(_C2FieldId(36, 8), _C2FieldId(&((C2TestStruct_A*)0)->mSize));
- EXPECT_EQ(_C2FieldId(60, 1), _C2FieldId(&((C2TestStruct_A*)0)->mBlob));
- EXPECT_EQ(_C2FieldId(160, 1), _C2FieldId(&((C2TestStruct_A*)0)->mString));
- EXPECT_EQ(_C2FieldId(260, 1), _C2FieldId(&((C2TestStruct_A*)0)->mYesNo));
-
- EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId(&((C2TestFlexEndSizeStruct*)0)->mSigned32));
- EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId(&((C2TestFlexEndSizeStruct*)0)->mFlexSize));
-
- // member pointer constructor
- EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mSigned32));
- EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mSigned64));
- EXPECT_EQ(_C2FieldId(20, 4), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mUnsigned32));
- EXPECT_EQ(_C2FieldId(24, 8), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mUnsigned64));
- EXPECT_EQ(_C2FieldId(32, 4), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mFloat));
- EXPECT_EQ(_C2FieldId(36, 8), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mSize));
- EXPECT_EQ(_C2FieldId(60, 1), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mBlob));
- EXPECT_EQ(_C2FieldId(160, 1), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mString));
- EXPECT_EQ(_C2FieldId(260, 1), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mYesNo));
-
- EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId((C2TestFlexEndSizeStruct*)0, &C2TestFlexEndSizeStruct::mSigned32));
- EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId((C2TestFlexEndSizeStruct*)0, &C2TestFlexEndSizeStruct::mFlexSize));
-
- // member pointer sans type pointer
- EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId(&C2TestStruct_A::mSigned32));
- EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId(&C2TestStruct_A::mSigned64));
- EXPECT_EQ(_C2FieldId(20, 4), _C2FieldId(&C2TestStruct_A::mUnsigned32));
- EXPECT_EQ(_C2FieldId(24, 8), _C2FieldId(&C2TestStruct_A::mUnsigned64));
- EXPECT_EQ(_C2FieldId(32, 4), _C2FieldId(&C2TestStruct_A::mFloat));
- EXPECT_EQ(_C2FieldId(36, 8), _C2FieldId(&C2TestStruct_A::mSize));
- EXPECT_EQ(_C2FieldId(60, 1), _C2FieldId(&C2TestStruct_A::mBlob));
- EXPECT_EQ(_C2FieldId(160, 1), _C2FieldId(&C2TestStruct_A::mString));
- EXPECT_EQ(_C2FieldId(260, 1), _C2FieldId(&C2TestStruct_A::mYesNo));
-
- EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId(&C2TestFlexEndSizeStruct::mSigned32));
- EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId(&C2TestFlexEndSizeStruct::mFlexSize));
-
- typedef C2GlobalParam<C2Info, C2TestAStruct> C2TestAInfo;
- typedef C2GlobalParam<C2Info, C2TestFlexEndSizeStruct> C2TestFlexEndSizeInfo;
-
- // pointer constructor in C2Param
- EXPECT_EQ(_C2FieldId(8, 4), _C2FieldId(&((C2TestAInfo*)0)->mSigned32));
- EXPECT_EQ(_C2FieldId(12, 8), _C2FieldId(&((C2TestAInfo*)0)->mSigned64));
- EXPECT_EQ(_C2FieldId(28, 4), _C2FieldId(&((C2TestAInfo*)0)->mUnsigned32));
- EXPECT_EQ(_C2FieldId(32, 8), _C2FieldId(&((C2TestAInfo*)0)->mUnsigned64));
- EXPECT_EQ(_C2FieldId(40, 4), _C2FieldId(&((C2TestAInfo*)0)->mFloat));
- EXPECT_EQ(_C2FieldId(44, 8), _C2FieldId(&((C2TestAInfo*)0)->mSize));
- EXPECT_EQ(_C2FieldId(68, 1), _C2FieldId(&((C2TestAInfo*)0)->mBlob));
- EXPECT_EQ(_C2FieldId(168, 1), _C2FieldId(&((C2TestAInfo*)0)->mString));
- EXPECT_EQ(_C2FieldId(268, 1), _C2FieldId(&((C2TestAInfo*)0)->mYesNo));
-
- EXPECT_EQ(_C2FieldId(8, 4), _C2FieldId(&((C2TestFlexEndSizeInfo*)0)->m.mSigned32));
- EXPECT_EQ(_C2FieldId(12, 8), _C2FieldId(&((C2TestFlexEndSizeInfo*)0)->m.mFlexSize));
-
- // member pointer in C2Param
- EXPECT_EQ(_C2FieldId(8, 4), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mSigned32));
- EXPECT_EQ(_C2FieldId(12, 8), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mSigned64));
- EXPECT_EQ(_C2FieldId(28, 4), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mUnsigned32));
- EXPECT_EQ(_C2FieldId(32, 8), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mUnsigned64));
- EXPECT_EQ(_C2FieldId(40, 4), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mFloat));
- EXPECT_EQ(_C2FieldId(44, 8), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mSize));
- EXPECT_EQ(_C2FieldId(68, 1), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mBlob));
- EXPECT_EQ(_C2FieldId(168, 1), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mString));
- EXPECT_EQ(_C2FieldId(268, 1), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mYesNo));
-
- // NOTE: cannot use a member pointer for flex params due to introduction of 'm'
- // EXPECT_EQ(_C2FieldId(8, 4), _C2FieldId(&C2TestFlexEndSizeInfo::m.mSigned32));
- // EXPECT_EQ(_C2FieldId(12, 8), _C2FieldId(&C2TestFlexEndSizeInfo::m.mFlexSize));
-
-
-
-}
-
-struct S32 {
- template<typename T, class B=typename std::remove_extent<T>::type>
- inline S32(const T*) {
- static_assert(!std::is_array<T>::value, "should not be an array");
- static_assert(std::is_same<B, int32_t>::value, "should be int32_t");
- }
-};
-
-struct FLX {
- template<typename U, typename T, class B=typename std::remove_extent<T>::type>
- inline FLX(const T*, const U*) {
- static_assert(std::is_array<T>::value, "should be an array");
- static_assert(std::extent<T>::value == 0, "should be an array of 0 extent");
- static_assert(std::is_same<B, U>::value, "should be type U");
- }
-};
-
-struct MP {
- template<typename U, typename T, typename ExpectedU, typename UnexpectedU>
- inline MP(T U::*, const ExpectedU*, const UnexpectedU*) {
- static_assert(!std::is_same<U, UnexpectedU>::value, "should not be member pointer of the base type");
- static_assert(std::is_same<U, ExpectedU>::value, "should be member pointer of the derived type");
- }
-
- template<typename U, typename T, typename B, typename D>
- inline MP(T D::*, const D*) { }
-};
-
-void compiledStatic_arrayTypePropagationTest() {
- (void)S32(&((C2TestFlexEndS32Struct *)0)->mSigned32);
- (void)FLX(&((C2TestFlexEndS32Struct *)0)->mFlexSigned32, (int32_t*)0);
- (void)FLX(&((C2TestFlexS32Struct *)0)->mFlexSigned32, (int32_t*)0);
-
- typedef C2GlobalParam<C2Info, C2TestAStruct> C2TestAInfo;
-
- // TRICKY: &derivedClass::baseMember has type of baseClass::*
- static_assert(std::is_same<decltype(&C2TestAInfo::mSigned32), int32_t C2TestAStruct::*>::value,
- "base member pointer should have base class in type");
-
- // therefore, member pointer expands to baseClass::* in templates
- (void)MP(&C2TestAInfo::mSigned32,
- (C2TestAStruct*)0 /* expected */, (C2TestAInfo*)0 /* unexpected */);
- // but can be cast to derivedClass::*
- (void)MP((int32_t C2TestAInfo::*)&C2TestAInfo::mSigned32,
- (C2TestAInfo*)0 /* expected */, (C2TestAStruct*)0 /* unexpected */);
-
- // TRICKY: baseClass::* does not autoconvert to derivedClass::* even in templates
- // (void)MP(&C2TestAInfo::mSigned32, (C2TestAInfo*)0);
-}
-
-TEST_F(C2ParamTest, MemberPointerCast) {
- typedef C2GlobalParam<C2Info, C2TestAStruct> C2TestAInfo;
-
- static_assert(offsetof(C2TestAInfo, mSigned32) == 8, "offset should be 8");
- constexpr int32_t C2TestAStruct::* s32ptr = &C2TestAInfo::mSigned32;
- constexpr int32_t C2TestAInfo::* s32ptr_derived = (int32_t C2TestAStruct::*)&C2TestAInfo::mSigned32;
- constexpr int32_t C2TestAInfo::* s32ptr_cast2derived = (int32_t C2TestAInfo::*)s32ptr;
- C2TestAInfo *info = (C2TestAInfo *)256;
- C2TestAStruct *strukt = (C2TestAStruct *)info;
- int32_t *info_s32_derived = &(info->*s32ptr_derived);
- int32_t *info_s32_cast2derived = &(info->*s32ptr_cast2derived);
- int32_t *info_s32 = &(info->*s32ptr);
- int32_t *strukt_s32 = &(strukt->*s32ptr);
-
- EXPECT_EQ(256u, (uintptr_t)info);
- EXPECT_EQ(264u, (uintptr_t)strukt);
- EXPECT_EQ(264u, (uintptr_t)info_s32_derived);
- EXPECT_EQ(264u, (uintptr_t)info_s32_cast2derived);
- EXPECT_EQ(264u, (uintptr_t)info_s32);
- EXPECT_EQ(264u, (uintptr_t)strukt_s32);
-
- typedef C2GlobalParam<C2Info, C2TestFlexEndSizeStruct> C2TestFlexEndSizeInfo;
- static_assert(offsetof(C2TestFlexEndSizeInfo, m.mSigned32) == 8, "offset should be 8");
- static_assert(offsetof(C2TestFlexEndSizeInfo, m.mFlexSize) == 12, "offset should be 12");
-}
-
-/* ===================================== PARAM USAGE TESTS ===================================== */
-
-struct C2NumberStruct {
- int32_t mNumber;
- C2NumberStruct() {}
- C2NumberStruct(int32_t _number) : mNumber(_number) {}
-
- DEFINE_AND_DESCRIBE_C2STRUCT(Number)
- C2FIELD(mNumber, "number")
-};
-
-struct C2NumbersStruct {
- int32_t mNumbers[];
- C2NumbersStruct() {}
-
- DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(Numbers, mNumbers)
- C2FIELD(mNumbers, "numbers")
-};
-static_assert(sizeof(C2NumbersStruct) == 0, "C2NumbersStruct has incorrect size");
-
-typedef C2GlobalParam<C2Tuning, C2NumberStruct> C2NumberTuning;
-typedef C2PortParam<C2Tuning, C2NumberStruct> C2NumberPortTuning;
-typedef C2StreamParam<C2Tuning, C2NumberStruct> C2NumberStreamTuning;
-
-typedef C2GlobalParam<C2Tuning, C2NumbersStruct> C2NumbersTuning;
-typedef C2PortParam<C2Tuning, C2NumbersStruct> C2NumbersPortTuning;
-typedef C2StreamParam<C2Tuning, C2NumbersStruct> C2NumbersStreamTuning;
-
-//
-#if 0
-
-void test() {
- C2NumberStruct s(10);
- (void)C2NumberStruct::fieldList;
-};
-
-typedef C2StreamParam<C2Tuning, C2Int64Value, kParamIndexNumberB> C2NumberConfig4;
-typedef C2PortParam<C2Tuning, C2Int32Value, kParamIndexNumber> C2NumberConfig3;
-typedef C2GlobalParam<C2Tuning, C2StringValue, kParamIndexNumber> C2VideoNameConfig;
-
-void test3() {
- C2NumberConfig3 s(10);
- s.mValue = 11;
- s = 12;
- (void)C2NumberConfig3::fieldList;
- std::shared_ptr<C2VideoNameConfig> n = C2VideoNameConfig::alloc_shared(25);
- strcpy(n->m.mValue, "lajos");
- C2NumberConfig4 t(false, 0, 11);
- t.mValue = 15;
-};
-
-struct C2NumbersStruct {
- int32_t mNumbers[];
- enum { baseIndex = kParamIndexNumber };
- const static std::initializer_list<const C2FieldDescriptor> fieldList;
- C2NumbersStruct() {}
-
- FLEX(C2NumbersStruct, mNumbers);
-};
-
-static_assert(sizeof(C2NumbersStruct) == 0, "yes");
-
-
-typedef C2GlobalParam<C2Info, C2NumbersStruct> C2NumbersInfo;
-
-const std::initializer_list<const C2FieldDescriptor> C2NumbersStruct::fieldList =
-// { { FD::INT32, 0, "widths" } };
- { C2FieldDescriptor(&((C2NumbersStruct*)(nullptr))->mNumbers, "number") };
-
-typedef C2PortParam<C2Tuning, C2NumberStruct> C2NumberConfig;
-
-std::list<const C2FieldDescriptor> myList = C2NumberConfig::fieldList;
-
- std::unique_ptr<android::C2ParamDescriptor> __test_describe(uint32_t paramType) {
- std::list<const C2FieldDescriptor> fields = describeC2Params<C2NumberConfig>();
-
- auto widths = C2NumbersInfo::alloc_shared(5);
- widths->flexCount();
- widths->m.mNumbers[4] = 1;
-
- test();
- test3();
-
- C2NumberConfig outputWidth(false, 123);
-
- C2Param::Index index(paramType);
- switch (paramType) {
- case C2NumberConfig::baseIndex:
- return std::unique_ptr<C2ParamDescriptor>(new C2ParamDescriptor{
- true /* isRequired */,
- "number",
- index,
- });
- }
- return nullptr;
- }
-
-
-} // namespace android
-
-#endif
-//
-
-template<typename T>
-bool canSetPort(T &o, bool output) { return o.setPort(output); }
-bool canSetPort(...) { return false; }
-
-template<typename S, typename=decltype(((S*)0)->setPort(true))>
-static std::true_type _canCallSetPort(int);
-template<typename>
-static std::false_type _canCallSetPort(...);
-#define canCallSetPort(x) decltype(_canCallSetPort<std::remove_reference<decltype(x)>::type>(0))::value
-
-/* ======================================= STATIC TESTS ======================================= */
-
-static_assert(_C2Comparable<int>::value, "int is not comparable");
-static_assert(!_C2Comparable<void>::value, "void is comparable");
-
-struct C2_HIDE _test0 {
- bool operator==(const _test0&);
- bool operator!=(const _test0&);
-};
-struct C2_HIDE _test1 {
- bool operator==(const _test1&);
-};
-struct C2_HIDE _test2 {
- bool operator!=(const _test2&);
-};
-static_assert(_C2Comparable<_test0>::value, "class with == and != is not comparable");
-static_assert(_C2Comparable<_test1>::value, "class with == is not comparable");
-static_assert(_C2Comparable<_test2>::value, "class with != is not comparable");
-
-/* ======================================= C2PARAM TESTS ======================================= */
-
-struct _C2ParamInspector {
- static void StaticTest();
- static void StaticFlexTest();
-};
-
-// TEST_F(_C2ParamInspector, StaticTest) {
-void _C2ParamInspector::StaticTest() {
- typedef C2Param::Index I;
-
- // C2NumberStruct: baseIndex = kIndex (args)
- static_assert(C2NumberStruct::baseIndex == kParamIndexNumber, "bad index");
- static_assert(sizeof(C2NumberStruct) == 4, "bad size");
-
- // C2NumberTuning: kIndex | tun | global (args)
- static_assert(C2NumberTuning::baseIndex == kParamIndexNumber, "bad index");
- static_assert(C2NumberTuning::typeIndex == (kParamIndexNumber | I::kTypeTuning | I::kDirGlobal), "bad index");
- static_assert(sizeof(C2NumberTuning) == 12, "bad size");
-
- static_assert(offsetof(C2NumberTuning, _mSize) == 0, "bad size");
- static_assert(offsetof(C2NumberTuning, _mIndex) == 4, "bad offset");
- static_assert(offsetof(C2NumberTuning, mNumber) == 8, "bad offset");
-
- // C2NumberPortTuning: kIndex | tun | port (bool, args)
- static_assert(sizeof(C2NumberPortTuning) == 12, "bad size");
- // C2NumberPortTuning::input: kIndex | tun | port | input (args)
- // C2NumberPortTuning::output: kIndex | tun | port | output (args)
- static_assert(C2NumberPortTuning::input::baseIndex ==
- kParamIndexNumber, "bad index");
- static_assert(C2NumberPortTuning::input::typeIndex ==
- (kParamIndexNumber | I::kTypeTuning | I::kDirInput), "bad index");
- static_assert(C2NumberPortTuning::output::baseIndex ==
- kParamIndexNumber, "bad index");
- static_assert(C2NumberPortTuning::output::typeIndex ==
- (kParamIndexNumber | I::kTypeTuning | I::kDirOutput), "bad index");
- static_assert(sizeof(C2NumberPortTuning::input) == 12, "bad size");
- static_assert(sizeof(C2NumberPortTuning::output) == 12, "bad size");
- static_assert(offsetof(C2NumberPortTuning::input, _mSize) == 0, "bad size");
- static_assert(offsetof(C2NumberPortTuning::input, _mIndex) == 4, "bad offset");
- static_assert(offsetof(C2NumberPortTuning::input, mNumber) == 8, "bad offset");
- static_assert(offsetof(C2NumberPortTuning::output, _mSize) == 0, "bad size");
- static_assert(offsetof(C2NumberPortTuning::output, _mIndex) == 4, "bad offset");
- static_assert(offsetof(C2NumberPortTuning::output, mNumber) == 8, "bad offset");
-
- // C2NumberStreamTuning: kIndex | tun | str (bool, uint, args)
- static_assert(sizeof(C2NumberStreamTuning) == 12u, "bad size");
- // C2NumberStreamTuning::input kIndex | tun | str | input (int, args)
- // C2NumberStreamTuning::output kIx | tun | str | output (int, args)
- static_assert(C2NumberStreamTuning::input::baseIndex ==
- kParamIndexNumber, "bad index");
- static_assert(C2NumberStreamTuning::input::typeIndex ==
- (kParamIndexNumber | I::kTypeTuning | I::kDirInput | I::kStreamFlag), "bad index");
- static_assert(C2NumberStreamTuning::output::baseIndex ==
- kParamIndexNumber, "bad index");
- static_assert(C2NumberStreamTuning::output::typeIndex ==
- (kParamIndexNumber | I::kTypeTuning | I::kDirOutput | I::kStreamFlag), "bad index");
- static_assert(sizeof(C2NumberStreamTuning::input) == 12u, "bad size");
- static_assert(sizeof(C2NumberStreamTuning::output) == 12u, "bad size");
- static_assert(offsetof(C2NumberStreamTuning::input, _mSize) == 0, "bad size");
- static_assert(offsetof(C2NumberStreamTuning::input, _mIndex) == 4, "bad offset");
- static_assert(offsetof(C2NumberStreamTuning::input, mNumber) == 8, "bad offset");
- static_assert(offsetof(C2NumberStreamTuning::output, _mSize) == 0, "bad size");
- static_assert(offsetof(C2NumberStreamTuning::output, _mIndex) == 4, "bad offset");
- static_assert(offsetof(C2NumberStreamTuning::output, mNumber) == 8, "bad offset");
-}
-
-void _C2ParamInspector::StaticFlexTest() {
- typedef C2Param::Index I;
-
- // C2NumbersStruct: baseIndex = kIndex (args)
- static_assert(C2NumbersStruct::baseIndex == (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
- static_assert(sizeof(C2NumbersStruct) == 0, "bad size");
-
- // C2NumbersTuning: kIndex | tun | global (args)
- static_assert(C2NumbersTuning::baseIndex == (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
- static_assert(C2NumbersTuning::typeIndex == (I::kFlexibleFlag | kParamIndexNumbers | I::kTypeTuning | I::kDirGlobal), "bad index");
- static_assert(sizeof(C2NumbersTuning) == 8, "bad size");
-
- static_assert(offsetof(C2NumbersTuning, _mSize) == 0, "bad size");
- static_assert(offsetof(C2NumbersTuning, _mIndex) == 4, "bad offset");
- static_assert(offsetof(C2NumbersTuning, m.mNumbers) == 8, "bad offset");
-
- // C2NumbersPortTuning: kIndex | tun | port (bool, args)
- static_assert(sizeof(C2NumbersPortTuning) == 8, "bad size");
- // C2NumbersPortTuning::input: kIndex | tun | port | input (args)
- // C2NumbersPortTuning::output: kIndex | tun | port | output (args)
- static_assert(C2NumbersPortTuning::input::baseIndex ==
- (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
- static_assert(C2NumbersPortTuning::input::typeIndex ==
- (I::kFlexibleFlag | kParamIndexNumbers | I::kTypeTuning | I::kDirInput), "bad index");
- static_assert(C2NumbersPortTuning::output::baseIndex ==
- (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
- static_assert(C2NumbersPortTuning::output::typeIndex ==
- (I::kFlexibleFlag | kParamIndexNumbers | I::kTypeTuning | I::kDirOutput), "bad index");
- static_assert(sizeof(C2NumbersPortTuning::input) == 8, "bad size");
- static_assert(sizeof(C2NumbersPortTuning::output) == 8, "bad size");
- static_assert(offsetof(C2NumbersPortTuning::input, _mSize) == 0, "bad size");
- static_assert(offsetof(C2NumbersPortTuning::input, _mIndex) == 4, "bad offset");
- static_assert(offsetof(C2NumbersPortTuning::input, m.mNumbers) == 8, "bad offset");
- static_assert(offsetof(C2NumbersPortTuning::output, _mSize) == 0, "bad size");
- static_assert(offsetof(C2NumbersPortTuning::output, _mIndex) == 4, "bad offset");
- static_assert(offsetof(C2NumbersPortTuning::output, m.mNumbers) == 8, "bad offset");
-
- // C2NumbersStreamTuning: kIndex | tun | str (bool, uint, args)
- static_assert(sizeof(C2NumbersStreamTuning) == 8, "bad size");
- // C2NumbersStreamTuning::input kIndex | tun | str | input (int, args)
- // C2NumbersStreamTuning::output kIx | tun | str | output (int, args)
- static_assert(C2NumbersStreamTuning::input::baseIndex ==
- (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
- static_assert(C2NumbersStreamTuning::input::typeIndex ==
- (I::kFlexibleFlag | kParamIndexNumbers | I::kTypeTuning | I::kDirInput | I::kStreamFlag), "bad index");
- static_assert(C2NumbersStreamTuning::output::baseIndex ==
- (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
- static_assert(C2NumbersStreamTuning::output::typeIndex ==
- (I::kFlexibleFlag | kParamIndexNumbers | I::kTypeTuning | I::kDirOutput | I::kStreamFlag), "bad index");
- static_assert(sizeof(C2NumbersStreamTuning::input) == 8, "bad size");
- static_assert(sizeof(C2NumbersStreamTuning::output) == 8, "bad size");
- static_assert(offsetof(C2NumbersStreamTuning::input, _mSize) == 0, "bad size");
- static_assert(offsetof(C2NumbersStreamTuning::input, _mIndex) == 4, "bad offset");
- static_assert(offsetof(C2NumbersStreamTuning::input, m.mNumbers) == 8, "bad offset");
- static_assert(offsetof(C2NumbersStreamTuning::output, _mSize) == 0, "bad size");
- static_assert(offsetof(C2NumbersStreamTuning::output, _mIndex) == 4, "bad offset");
- static_assert(offsetof(C2NumbersStreamTuning::output, m.mNumbers) == 8, "bad offset");
-}
-
-TEST_F(C2ParamTest, ParamOpsTest) {
- const C2NumberStruct str(100);
- C2NumberStruct bstr;
-
- {
- EXPECT_EQ(100, str.mNumber);
- bstr.mNumber = 100;
-
- C2Param::BaseIndex index = C2NumberStruct::baseIndex;
- EXPECT_FALSE(index.isVendor());
- EXPECT_FALSE(index.isFlexible());
- EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
- EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
- }
-
- const C2NumberTuning tun(100);
- C2NumberTuning btun;
-
- {
- // flags & invariables
- for (const auto &p : { tun, btun }) {
- EXPECT_TRUE((bool)p);
- EXPECT_FALSE(!p);
- EXPECT_EQ(12u, p.size());
-
- EXPECT_FALSE(p.isVendor());
- EXPECT_FALSE(p.isFlexible());
- EXPECT_TRUE(p.isGlobal());
- EXPECT_FALSE(p.forInput());
- EXPECT_FALSE(p.forOutput());
- EXPECT_FALSE(p.forStream());
- EXPECT_FALSE(p.forPort());
- }
-
- // value
- EXPECT_EQ(100, tun.mNumber);
- EXPECT_EQ(0, btun.mNumber);
- EXPECT_FALSE(tun == btun);
- EXPECT_FALSE(tun.operator==(btun));
- EXPECT_TRUE(tun != btun);
- EXPECT_TRUE(tun.operator!=(btun));
- btun.mNumber = 100;
- EXPECT_EQ(tun, btun);
-
- // index
- EXPECT_EQ(C2Param::Type(tun.type()).baseIndex(), C2NumberStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(tun.type()).paramIndex(), kParamIndexNumber);
- EXPECT_EQ(tun.type(), C2NumberTuning::typeIndex);
- EXPECT_EQ(tun.stream(), ~0u);
-
- C2Param::BaseIndex index = C2NumberTuning::baseIndex;
- EXPECT_FALSE(index.isVendor());
- EXPECT_FALSE(index.isFlexible());
- EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
- EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
-
- C2Param::Type type = C2NumberTuning::typeIndex;
- EXPECT_FALSE(type.isVendor());
- EXPECT_FALSE(type.isFlexible());
- EXPECT_TRUE(type.isGlobal());
- EXPECT_FALSE(type.forInput());
- EXPECT_FALSE(type.forOutput());
- EXPECT_FALSE(type.forStream());
- EXPECT_FALSE(type.forPort());
-
- EXPECT_EQ(C2NumberTuning::From(nullptr), nullptr);
- EXPECT_EQ(C2NumberTuning::From(&tun), &tun);
- EXPECT_EQ(C2NumberPortTuning::From(&tun), nullptr);
- EXPECT_EQ(C2NumberPortTuning::input::From(&tun), nullptr);
- EXPECT_EQ(C2NumberPortTuning::output::From(&tun), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::From(&tun), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::input::From(&tun), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::output::From(&tun), nullptr);
- }
-
- const C2NumberPortTuning outp1(true, 100), inp1(false, 100);
- C2NumberPortTuning boutp1, binp1, binp3(false, 100);
- const C2NumberPortTuning::input inp2(100);
- C2NumberPortTuning::input binp2;
- const C2NumberPortTuning::output outp2(100);
- C2NumberPortTuning::output boutp2;
-
- {
- static_assert(canCallSetPort(binp3), "should be able to");
- static_assert(canCallSetPort(binp1), "should be able to");
- static_assert(!canCallSetPort(inp1), "should not be able to (const)");
- static_assert(!canCallSetPort(inp2), "should not be able to (const & type)");
- static_assert(!canCallSetPort(binp2), "should not be able to (type)");
-
- // flags & invariables
- for (const auto &p : { outp1, inp1, boutp1 }) {
- EXPECT_EQ(12u, p.size());
- EXPECT_FALSE(p.isVendor());
- EXPECT_FALSE(p.isFlexible());
- EXPECT_FALSE(p.isGlobal());
- EXPECT_FALSE(p.forStream());
- EXPECT_TRUE(p.forPort());
- }
- for (const auto &p : { inp2, binp2 }) {
- EXPECT_EQ(12u, p.size());
- EXPECT_FALSE(p.isVendor());
- EXPECT_FALSE(p.isFlexible());
- EXPECT_FALSE(p.isGlobal());
- EXPECT_FALSE(p.forStream());
- EXPECT_TRUE(p.forPort());
- }
- for (const auto &p : { outp2, boutp2 }) {
- EXPECT_EQ(12u, p.size());
- EXPECT_FALSE(p.isVendor());
- EXPECT_FALSE(p.isFlexible());
- EXPECT_FALSE(p.isGlobal());
- EXPECT_FALSE(p.forStream());
- EXPECT_TRUE(p.forPort());
- }
-
- // port specific flags & invariables
- EXPECT_FALSE(outp1.forInput());
- EXPECT_TRUE(outp1.forOutput());
-
- EXPECT_TRUE(inp1.forInput());
- EXPECT_FALSE(inp1.forOutput());
-
- for (const auto &p : { outp1, inp1 }) {
- EXPECT_TRUE((bool)p);
- EXPECT_FALSE(!p);
- EXPECT_EQ(100, p.mNumber);
- }
- for (const auto &p : { outp2, boutp2 }) {
- EXPECT_TRUE((bool)p);
- EXPECT_FALSE(!p);
-
- EXPECT_FALSE(p.forInput());
- EXPECT_TRUE(p.forOutput());
- }
- for (const auto &p : { inp2, binp2 }) {
- EXPECT_TRUE((bool)p);
- EXPECT_FALSE(!p);
-
- EXPECT_TRUE(p.forInput());
- EXPECT_FALSE(p.forOutput());
- }
- for (const auto &p : { boutp1 } ) {
- EXPECT_FALSE((bool)p);
- EXPECT_TRUE(!p);
-
- EXPECT_FALSE(p.forInput());
- EXPECT_FALSE(p.forOutput());
- EXPECT_EQ(0, p.mNumber);
- }
-
- // values
- EXPECT_EQ(100, inp2.mNumber);
- EXPECT_EQ(100, outp2.mNumber);
- EXPECT_EQ(0, binp1.mNumber);
- EXPECT_EQ(0, binp2.mNumber);
- EXPECT_EQ(0, boutp1.mNumber);
- EXPECT_EQ(0, boutp2.mNumber);
-
- EXPECT_TRUE(inp1 != outp1);
- EXPECT_TRUE(inp1 == inp2);
- EXPECT_TRUE(outp1 == outp2);
- EXPECT_TRUE(binp1 == boutp1);
- EXPECT_TRUE(binp2 != boutp2);
-
- EXPECT_TRUE(inp1 != binp1);
- binp1.mNumber = 100;
- EXPECT_TRUE(inp1 != binp1);
- binp1.setPort(false /* output */);
- EXPECT_TRUE((bool)binp1);
- EXPECT_FALSE(!binp1);
- EXPECT_TRUE(inp1 == binp1);
-
- EXPECT_TRUE(inp2 != binp2);
- binp2.mNumber = 100;
- EXPECT_TRUE(inp2 == binp2);
-
- binp1.setPort(true /* output */);
- EXPECT_TRUE(outp1 == binp1);
-
- EXPECT_TRUE(outp1 != boutp1);
- boutp1.mNumber = 100;
- EXPECT_TRUE(outp1 != boutp1);
- boutp1.setPort(true /* output */);
- EXPECT_TRUE((bool)boutp1);
- EXPECT_FALSE(!boutp1);
- EXPECT_TRUE(outp1 == boutp1);
-
- EXPECT_TRUE(outp2 != boutp2);
- boutp2.mNumber = 100;
- EXPECT_TRUE(outp2 == boutp2);
-
- boutp1.setPort(false /* output */);
- EXPECT_TRUE(inp1 == boutp1);
-
- // index
- EXPECT_EQ(C2Param::Type(inp1.type()).baseIndex(), C2NumberStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(inp1.type()).paramIndex(), kParamIndexNumber);
- EXPECT_EQ(inp1.type(), C2NumberPortTuning::input::typeIndex);
- EXPECT_EQ(inp1.stream(), ~0u);
-
- EXPECT_EQ(C2Param::Type(inp2.type()).baseIndex(), C2NumberStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(inp2.type()).paramIndex(), kParamIndexNumber);
- EXPECT_EQ(inp2.type(), C2NumberPortTuning::input::typeIndex);
- EXPECT_EQ(inp2.stream(), ~0u);
-
- EXPECT_EQ(C2Param::Type(outp1.type()).baseIndex(), C2NumberStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(outp1.type()).paramIndex(), kParamIndexNumber);
- EXPECT_EQ(outp1.type(), C2NumberPortTuning::output::typeIndex);
- EXPECT_EQ(outp1.stream(), ~0u);
-
- EXPECT_EQ(C2Param::Type(outp2.type()).baseIndex(), C2NumberStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(outp2.type()).paramIndex(), kParamIndexNumber);
- EXPECT_EQ(outp2.type(), C2NumberPortTuning::output::typeIndex);
- EXPECT_EQ(outp2.stream(), ~0u);
-
- C2Param::BaseIndex index = C2NumberPortTuning::input::typeIndex;
- EXPECT_FALSE(index.isVendor());
- EXPECT_FALSE(index.isFlexible());
- EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
- EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
-
- index = C2NumberPortTuning::output::typeIndex;
- EXPECT_FALSE(index.isVendor());
- EXPECT_FALSE(index.isFlexible());
- EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
- EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
-
- C2Param::Type type = C2NumberPortTuning::input::typeIndex;
- EXPECT_FALSE(type.isVendor());
- EXPECT_FALSE(type.isFlexible());
- EXPECT_FALSE(type.isGlobal());
- EXPECT_TRUE(type.forInput());
- EXPECT_FALSE(type.forOutput());
- EXPECT_FALSE(type.forStream());
- EXPECT_TRUE(type.forPort());
-
- type = C2NumberPortTuning::output::typeIndex;
- EXPECT_FALSE(type.isVendor());
- EXPECT_FALSE(type.isFlexible());
- EXPECT_FALSE(type.isGlobal());
- EXPECT_FALSE(type.forInput());
- EXPECT_TRUE(type.forOutput());
- EXPECT_FALSE(type.forStream());
- EXPECT_TRUE(type.forPort());
-
- EXPECT_EQ(C2NumberPortTuning::From(nullptr), nullptr);
- EXPECT_EQ(C2NumberPortTuning::input::From(nullptr), nullptr);
- EXPECT_EQ(C2NumberPortTuning::output::From(nullptr), nullptr);
- EXPECT_EQ(C2NumberTuning::From(&inp1), nullptr);
- EXPECT_EQ(C2NumberTuning::From(&inp2), nullptr);
- EXPECT_EQ(C2NumberTuning::From(&outp1), nullptr);
- EXPECT_EQ(C2NumberTuning::From(&outp2), nullptr);
- EXPECT_EQ(C2NumberPortTuning::From(&inp1), &inp1);
- EXPECT_EQ(C2NumberPortTuning::From(&inp2), (C2NumberPortTuning*)&inp2);
- EXPECT_EQ(C2NumberPortTuning::From(&outp1), &outp1);
- EXPECT_EQ(C2NumberPortTuning::From(&outp2), (C2NumberPortTuning*)&outp2);
- EXPECT_EQ(C2NumberPortTuning::input::From(&inp1), (C2NumberPortTuning::input*)&inp1);
- EXPECT_EQ(C2NumberPortTuning::input::From(&inp2), &inp2);
- EXPECT_EQ(C2NumberPortTuning::input::From(&outp1), nullptr);
- EXPECT_EQ(C2NumberPortTuning::input::From(&outp2), nullptr);
- EXPECT_EQ(C2NumberPortTuning::output::From(&inp1), nullptr);
- EXPECT_EQ(C2NumberPortTuning::output::From(&inp2), nullptr);
- EXPECT_EQ(C2NumberPortTuning::output::From(&outp1), (C2NumberPortTuning::output*)&outp1);
- EXPECT_EQ(C2NumberPortTuning::output::From(&outp2), &outp2);
- EXPECT_EQ(C2NumberStreamTuning::From(&inp1), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::From(&inp2), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::From(&outp1), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::From(&outp2), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::input::From(&inp1), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::input::From(&inp2), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::input::From(&outp1), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::input::From(&outp2), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::output::From(&inp1), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::output::From(&inp2), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::output::From(&outp1), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::output::From(&outp2), nullptr);
- }
-
- const C2NumberStreamTuning outs1(true, 1u, 100), ins1(false, 1u, 100);
- C2NumberStreamTuning bouts1, bins1, bins3(false, 1u, 100);
- const C2NumberStreamTuning::input ins2(1u, 100);
- C2NumberStreamTuning::input bins2;
- const C2NumberStreamTuning::output outs2(1u, 100);
- C2NumberStreamTuning::output bouts2;
-
- {
- static_assert(canCallSetPort(bins3), "should be able to");
- static_assert(canCallSetPort(bins1), "should be able to");
- static_assert(!canCallSetPort(ins1), "should not be able to (const)");
- static_assert(!canCallSetPort(ins2), "should not be able to (const & type)");
- static_assert(!canCallSetPort(bins2), "should not be able to (type)");
-
- // flags & invariables
- for (const auto &p : { outs1, ins1, bouts1 }) {
- EXPECT_EQ(12u, p.size());
- EXPECT_FALSE(p.isVendor());
- EXPECT_FALSE(p.isFlexible());
- EXPECT_FALSE(p.isGlobal());
- EXPECT_TRUE(p.forStream());
- EXPECT_FALSE(p.forPort());
- }
- for (const auto &p : { ins2, bins2 }) {
- EXPECT_EQ(12u, p.size());
- EXPECT_FALSE(p.isVendor());
- EXPECT_FALSE(p.isFlexible());
- EXPECT_FALSE(p.isGlobal());
- EXPECT_TRUE(p.forStream());
- EXPECT_FALSE(p.forPort());
- }
- for (const auto &p : { outs2, bouts2 }) {
- EXPECT_EQ(12u, p.size());
- EXPECT_FALSE(p.isVendor());
- EXPECT_FALSE(p.isFlexible());
- EXPECT_FALSE(p.isGlobal());
- EXPECT_TRUE(p.forStream());
- EXPECT_FALSE(p.forPort());
- }
-
- // port specific flags & invariables
- EXPECT_FALSE(outs1.forInput());
- EXPECT_TRUE(outs1.forOutput());
-
- EXPECT_TRUE(ins1.forInput());
- EXPECT_FALSE(ins1.forOutput());
-
- for (const auto &p : { outs1, ins1 }) {
- EXPECT_TRUE((bool)p);
- EXPECT_FALSE(!p);
- EXPECT_EQ(100, p.mNumber);
- EXPECT_EQ(1u, p.stream());
- }
- for (const auto &p : { outs2, bouts2 }) {
- EXPECT_TRUE((bool)p);
- EXPECT_FALSE(!p);
-
- EXPECT_FALSE(p.forInput());
- EXPECT_TRUE(p.forOutput());
- }
- for (const auto &p : { ins2, bins2 }) {
- EXPECT_TRUE((bool)p);
- EXPECT_FALSE(!p);
-
- EXPECT_TRUE(p.forInput());
- EXPECT_FALSE(p.forOutput());
- }
- for (const auto &p : { bouts1 } ) {
- EXPECT_FALSE((bool)p);
- EXPECT_TRUE(!p);
-
- EXPECT_FALSE(p.forInput());
- EXPECT_FALSE(p.forOutput());
- EXPECT_EQ(0, p.mNumber);
- }
-
- // values
- EXPECT_EQ(100, ins2.mNumber);
- EXPECT_EQ(100, outs2.mNumber);
- EXPECT_EQ(0, bins1.mNumber);
- EXPECT_EQ(0, bins2.mNumber);
- EXPECT_EQ(0, bouts1.mNumber);
- EXPECT_EQ(0, bouts2.mNumber);
-
- EXPECT_EQ(1u, ins2.stream());
- EXPECT_EQ(1u, outs2.stream());
- EXPECT_EQ(0u, bins1.stream());
- EXPECT_EQ(0u, bins2.stream());
- EXPECT_EQ(0u, bouts1.stream());
- EXPECT_EQ(0u, bouts2.stream());
-
- EXPECT_TRUE(ins1 != outs1);
- EXPECT_TRUE(ins1 == ins2);
- EXPECT_TRUE(outs1 == outs2);
- EXPECT_TRUE(bins1 == bouts1);
- EXPECT_TRUE(bins2 != bouts2);
-
- EXPECT_TRUE(ins1 != bins1);
- bins1.mNumber = 100;
- EXPECT_TRUE(ins1 != bins1);
- bins1.setPort(false /* output */);
- EXPECT_TRUE(ins1 != bins1);
- bins1.setStream(1u);
- EXPECT_TRUE(ins1 == bins1);
-
- EXPECT_TRUE(ins2 != bins2);
- bins2.mNumber = 100;
- EXPECT_TRUE(ins2 != bins2);
- bins2.setStream(1u);
- EXPECT_TRUE(ins2 == bins2);
-
- bins1.setPort(true /* output */);
- EXPECT_TRUE(outs1 == bins1);
-
- EXPECT_TRUE(outs1 != bouts1);
- bouts1.mNumber = 100;
- EXPECT_TRUE(outs1 != bouts1);
- bouts1.setPort(true /* output */);
- EXPECT_TRUE(outs1 != bouts1);
- bouts1.setStream(1u);
- EXPECT_TRUE(outs1 == bouts1);
-
- EXPECT_TRUE(outs2 != bouts2);
- bouts2.mNumber = 100;
- EXPECT_TRUE(outs2 != bouts2);
- bouts2.setStream(1u);
- EXPECT_TRUE(outs2 == bouts2);
-
- bouts1.setPort(false /* output */);
- EXPECT_TRUE(ins1 == bouts1);
-
- // index
- EXPECT_EQ(C2Param::Type(ins1.type()).baseIndex(), C2NumberStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(ins1.type()).paramIndex(), kParamIndexNumber);
- EXPECT_EQ(ins1.type(), C2NumberStreamTuning::input::typeIndex);
-
- EXPECT_EQ(C2Param::Type(ins2.type()).baseIndex(), C2NumberStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(ins2.type()).paramIndex(), kParamIndexNumber);
- EXPECT_EQ(ins2.type(), C2NumberStreamTuning::input::typeIndex);
-
- EXPECT_EQ(C2Param::Type(outs1.type()).baseIndex(), C2NumberStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(outs1.type()).paramIndex(), kParamIndexNumber);
- EXPECT_EQ(outs1.type(), C2NumberStreamTuning::output::typeIndex);
-
- EXPECT_EQ(C2Param::Type(outs2.type()).baseIndex(), C2NumberStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(outs2.type()).paramIndex(), kParamIndexNumber);
- EXPECT_EQ(outs2.type(), C2NumberStreamTuning::output::typeIndex);
-
- C2Param::BaseIndex index = C2NumberStreamTuning::input::typeIndex;
- EXPECT_FALSE(index.isVendor());
- EXPECT_FALSE(index.isFlexible());
- EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
- EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
-
- index = C2NumberStreamTuning::output::typeIndex;
- EXPECT_FALSE(index.isVendor());
- EXPECT_FALSE(index.isFlexible());
- EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
- EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
-
- C2Param::Type type = C2NumberStreamTuning::input::typeIndex;
- EXPECT_FALSE(type.isVendor());
- EXPECT_FALSE(type.isFlexible());
- EXPECT_FALSE(type.isGlobal());
- EXPECT_TRUE(type.forInput());
- EXPECT_FALSE(type.forOutput());
- EXPECT_TRUE(type.forStream());
- EXPECT_FALSE(type.forPort());
-
- type = C2NumberStreamTuning::output::typeIndex;
- EXPECT_FALSE(type.isVendor());
- EXPECT_FALSE(type.isFlexible());
- EXPECT_FALSE(type.isGlobal());
- EXPECT_FALSE(type.forInput());
- EXPECT_TRUE(type.forOutput());
- EXPECT_TRUE(type.forStream());
- EXPECT_FALSE(type.forPort());
-
- EXPECT_EQ(C2NumberPortTuning::From(nullptr), nullptr);
- EXPECT_EQ(C2NumberPortTuning::input::From(nullptr), nullptr);
- EXPECT_EQ(C2NumberPortTuning::output::From(nullptr), nullptr);
- EXPECT_EQ(C2NumberTuning::From(&ins1), nullptr);
- EXPECT_EQ(C2NumberTuning::From(&ins2), nullptr);
- EXPECT_EQ(C2NumberTuning::From(&outs1), nullptr);
- EXPECT_EQ(C2NumberTuning::From(&outs2), nullptr);
- EXPECT_EQ(C2NumberPortTuning::From(&ins1), nullptr);
- EXPECT_EQ(C2NumberPortTuning::From(&ins2), nullptr);
- EXPECT_EQ(C2NumberPortTuning::From(&outs1), nullptr);
- EXPECT_EQ(C2NumberPortTuning::From(&outs2), nullptr);
- EXPECT_EQ(C2NumberPortTuning::input::From(&ins1), nullptr);
- EXPECT_EQ(C2NumberPortTuning::input::From(&ins2), nullptr);
- EXPECT_EQ(C2NumberPortTuning::input::From(&outs1), nullptr);
- EXPECT_EQ(C2NumberPortTuning::input::From(&outs2), nullptr);
- EXPECT_EQ(C2NumberPortTuning::output::From(&ins1), nullptr);
- EXPECT_EQ(C2NumberPortTuning::output::From(&ins2), nullptr);
- EXPECT_EQ(C2NumberPortTuning::output::From(&outs1), nullptr);
- EXPECT_EQ(C2NumberPortTuning::output::From(&outs2), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::From(&ins1), &ins1);
- EXPECT_EQ(C2NumberStreamTuning::From(&ins2), (C2NumberStreamTuning*)&ins2);
- EXPECT_EQ(C2NumberStreamTuning::From(&outs1), &outs1);
- EXPECT_EQ(C2NumberStreamTuning::From(&outs2), (C2NumberStreamTuning*)&outs2);
- EXPECT_EQ(C2NumberStreamTuning::input::From(&ins1), (C2NumberStreamTuning::input*)&ins1);
- EXPECT_EQ(C2NumberStreamTuning::input::From(&ins2), &ins2);
- EXPECT_EQ(C2NumberStreamTuning::input::From(&outs1), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::input::From(&outs2), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::output::From(&ins1), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::output::From(&ins2), nullptr);
- EXPECT_EQ(C2NumberStreamTuning::output::From(&outs1), (C2NumberStreamTuning::output*)&outs1);
- EXPECT_EQ(C2NumberStreamTuning::output::From(&outs2), &outs2);
-
- }
-
- {
- uint32_t videoWidth[] = { 12u, C2NumberStreamTuning::output::typeIndex, 100 };
- C2Param *p1 = C2Param::From(videoWidth, sizeof(videoWidth));
- EXPECT_NE(p1, nullptr);
- EXPECT_EQ(12u, p1->size());
- EXPECT_EQ(p1->type(), C2NumberStreamTuning::output::typeIndex);
-
- p1 = C2Param::From(videoWidth, sizeof(videoWidth) + 2);
- EXPECT_EQ(p1, nullptr);
-
- p1 = C2Param::From(videoWidth, sizeof(videoWidth) - 2);
- EXPECT_EQ(p1, nullptr);
-
- p1 = C2Param::From(videoWidth, 3);
- EXPECT_EQ(p1, nullptr);
-
- p1 = C2Param::From(videoWidth, 0);
- EXPECT_EQ(p1, nullptr);
- }
-}
-
-void StaticTestAddBaseIndex() {
- struct nobase {};
- struct base { enum : uint32_t { baseIndex = 1 }; };
- static_assert(C2AddBaseIndex<nobase, 2>::baseIndex == 2, "should be 2");
- static_assert(C2AddBaseIndex<base, 1>::baseIndex == 1, "should be 1");
-}
-
-class TestFlexHelper {
- struct _Flex {
- int32_t a;
- char b[];
- _Flex() {}
- FLEX(_Flex, b);
- };
-
- struct _BoFlex {
- _Flex a;
- _BoFlex() {}
- FLEX(_BoFlex, a);
- };
-
- struct _NonFlex {
- };
-
-
- static void StaticTest() {
- static_assert(std::is_same<_C2FlexHelper<char>::flexType, void>::value, "should be void");
- static_assert(std::is_same<_C2FlexHelper<char[]>::flexType, char>::value, "should be char");
- static_assert(std::is_same<_C2FlexHelper<_Flex>::flexType, char>::value, "should be char");
-
- static_assert(std::is_same<_C2FlexHelper<_BoFlex>::flexType, char>::value, "should be void");
-
- static_assert(_C2Flexible<_Flex>::value, "should be flexible");
- static_assert(!_C2Flexible<_NonFlex>::value, "should not be flexible");
- }
-};
-
-TEST_F(C2ParamTest, FlexParamOpsTest) {
-// const C2NumbersStruct str{100};
- C2NumbersStruct bstr;
- {
-// EXPECT_EQ(100, str->m.mNumbers[0]);
- (void)&bstr.mNumbers[0];
-
- C2Param::BaseIndex index = C2NumbersStruct::baseIndex;
- EXPECT_FALSE(index.isVendor());
- EXPECT_TRUE(index.isFlexible());
- EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
- EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
- }
-
- std::unique_ptr<C2NumbersTuning> tun_ = C2NumbersTuning::alloc_unique(1);
- tun_->m.mNumbers[0] = 100;
- std::unique_ptr<const C2NumbersTuning> tun = std::move(tun_);
- std::shared_ptr<C2NumbersTuning> btun = C2NumbersTuning::alloc_shared(1);
-
- {
- // flags & invariables
- const C2NumbersTuning *T[] = { tun.get(), btun.get() };
- for (const auto p : T) {
- EXPECT_TRUE((bool)(*p));
- EXPECT_FALSE(!(*p));
- EXPECT_EQ(12u, p->size());
-
- EXPECT_FALSE(p->isVendor());
- EXPECT_TRUE(p->isFlexible());
- EXPECT_TRUE(p->isGlobal());
- EXPECT_FALSE(p->forInput());
- EXPECT_FALSE(p->forOutput());
- EXPECT_FALSE(p->forStream());
- EXPECT_FALSE(p->forPort());
- }
-
- // value
- EXPECT_EQ(100, tun->m.mNumbers[0]);
- EXPECT_EQ(0, btun->m.mNumbers[0]);
- EXPECT_FALSE(*tun == *btun);
- EXPECT_FALSE(tun->operator==(*btun));
- EXPECT_TRUE(*tun != *btun);
- EXPECT_TRUE(tun->operator!=(*btun));
- btun->m.mNumbers[0] = 100;
- EXPECT_EQ(*tun, *btun);
-
- // index
- EXPECT_EQ(C2Param::Type(tun->type()).baseIndex(), C2NumbersStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(tun->type()).paramIndex(), kParamIndexNumbers);
- EXPECT_EQ(tun->type(), C2NumbersTuning::typeIndex);
- EXPECT_EQ(tun->stream(), ~0u);
-
- C2Param::BaseIndex index = C2NumbersTuning::baseIndex;
- EXPECT_FALSE(index.isVendor());
- EXPECT_TRUE(index.isFlexible());
- EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
- EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
-
- C2Param::Type type = C2NumbersTuning::typeIndex;
- EXPECT_FALSE(type.isVendor());
- EXPECT_TRUE(type.isFlexible());
- EXPECT_TRUE(type.isGlobal());
- EXPECT_FALSE(type.forInput());
- EXPECT_FALSE(type.forOutput());
- EXPECT_FALSE(type.forStream());
- EXPECT_FALSE(type.forPort());
-
- EXPECT_EQ(C2NumbersTuning::From(nullptr), nullptr);
- EXPECT_EQ(C2NumbersTuning::From(tun.get()), tun.get());
- EXPECT_EQ(C2NumbersPortTuning::From(tun.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::input::From(tun.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::output::From(tun.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::From(tun.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::input::From(tun.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::output::From(tun.get()), nullptr);
- }
-
- std::unique_ptr<C2NumbersPortTuning> outp1_(C2NumbersPortTuning::alloc_unique(1, true)),
- inp1_ = C2NumbersPortTuning::alloc_unique(1, false);
- outp1_->m.mNumbers[0] = 100;
- inp1_->m.mNumbers[0] = 100;
- std::unique_ptr<const C2NumbersPortTuning> outp1 = std::move(outp1_);
- std::unique_ptr<const C2NumbersPortTuning> inp1 = std::move(inp1_);
- std::shared_ptr<C2NumbersPortTuning> boutp1(C2NumbersPortTuning::alloc_shared(1)),
- binp1 = C2NumbersPortTuning::alloc_shared(1),
- binp3 = C2NumbersPortTuning::alloc_shared(1, false);
- binp3->m.mNumbers[0] = 100;
- std::unique_ptr<C2NumbersPortTuning::input> inp2_(C2NumbersPortTuning::input::alloc_unique(1));
- inp2_->m.mNumbers[0] = 100;
- std::unique_ptr<const C2NumbersPortTuning::input> inp2 = std::move(inp2_);
- std::shared_ptr<C2NumbersPortTuning::input> binp2(C2NumbersPortTuning::input::alloc_shared(1));
- std::unique_ptr<C2NumbersPortTuning::output> outp2_(C2NumbersPortTuning::output::alloc_unique(1));
- outp2_->m.mNumbers[0] = 100;
- std::unique_ptr<const C2NumbersPortTuning::output> outp2 = std::move(outp2_);
- std::shared_ptr<C2NumbersPortTuning::output> boutp2(C2NumbersPortTuning::output::alloc_shared(1));
-
- {
- static_assert(canCallSetPort(*binp3), "should be able to");
- static_assert(canCallSetPort(*binp1), "should be able to");
- static_assert(!canCallSetPort(*inp1), "should not be able to (const)");
- static_assert(!canCallSetPort(*inp2), "should not be able to (const & type)");
- static_assert(!canCallSetPort(*binp2), "should not be able to (type)");
-
- // flags & invariables
- const C2NumbersPortTuning *P[] = { outp1.get(), inp1.get(), boutp1.get() };
- for (const auto p : P) {
- EXPECT_EQ(12u, p->size());
- EXPECT_FALSE(p->isVendor());
- EXPECT_TRUE(p->isFlexible());
- EXPECT_FALSE(p->isGlobal());
- EXPECT_FALSE(p->forStream());
- EXPECT_TRUE(p->forPort());
- }
- const C2NumbersPortTuning::input *PI[] = { inp2.get(), binp2.get() };
- for (const auto p : PI) {
- EXPECT_EQ(12u, p->size());
- EXPECT_FALSE(p->isVendor());
- EXPECT_TRUE(p->isFlexible());
- EXPECT_FALSE(p->isGlobal());
- EXPECT_FALSE(p->forStream());
- EXPECT_TRUE(p->forPort());
- }
- const C2NumbersPortTuning::output *PO[] = { outp2.get(), boutp2.get() };
- for (const auto p : PO) {
- EXPECT_EQ(12u, p->size());
- EXPECT_FALSE(p->isVendor());
- EXPECT_TRUE(p->isFlexible());
- EXPECT_FALSE(p->isGlobal());
- EXPECT_FALSE(p->forStream());
- EXPECT_TRUE(p->forPort());
- }
-
- // port specific flags & invariables
- EXPECT_FALSE(outp1->forInput());
- EXPECT_TRUE(outp1->forOutput());
-
- EXPECT_TRUE(inp1->forInput());
- EXPECT_FALSE(inp1->forOutput());
-
- const C2NumbersPortTuning *P2[] = { outp1.get(), inp1.get() };
- for (const auto p : P2) {
- EXPECT_TRUE((bool)(*p));
- EXPECT_FALSE(!(*p));
- EXPECT_EQ(100, p->m.mNumbers[0]);
- }
- for (const auto p : PO) {
- EXPECT_TRUE((bool)(*p));
- EXPECT_FALSE(!(*p));
-
- EXPECT_FALSE(p->forInput());
- EXPECT_TRUE(p->forOutput());
- }
- for (const auto p : PI) {
- EXPECT_TRUE((bool)(*p));
- EXPECT_FALSE(!(*p));
-
- EXPECT_TRUE(p->forInput());
- EXPECT_FALSE(p->forOutput());
- }
- const C2NumbersPortTuning *P3[] = { boutp1.get() };
- for (const auto p : P3) {
- EXPECT_FALSE((bool)(*p));
- EXPECT_TRUE(!(*p));
-
- EXPECT_FALSE(p->forInput());
- EXPECT_FALSE(p->forOutput());
- EXPECT_EQ(0, p->m.mNumbers[0]);
- }
-
- // values
- EXPECT_EQ(100, inp2->m.mNumbers[0]);
- EXPECT_EQ(100, outp2->m.mNumbers[0]);
- EXPECT_EQ(0, binp1->m.mNumbers[0]);
- EXPECT_EQ(0, binp2->m.mNumbers[0]);
- EXPECT_EQ(0, boutp1->m.mNumbers[0]);
- EXPECT_EQ(0, boutp2->m.mNumbers[0]);
-
- EXPECT_TRUE(*inp1 != *outp1);
- EXPECT_TRUE(*inp1 == *inp2);
- EXPECT_TRUE(*outp1 == *outp2);
- EXPECT_TRUE(*binp1 == *boutp1);
- EXPECT_TRUE(*binp2 != *boutp2);
-
- EXPECT_TRUE(*inp1 != *binp1);
- binp1->m.mNumbers[0] = 100;
- EXPECT_TRUE(*inp1 != *binp1);
- binp1->setPort(false /* output */);
- EXPECT_TRUE((bool)*binp1);
- EXPECT_FALSE(!*binp1);
- EXPECT_TRUE(*inp1 == *binp1);
-
- EXPECT_TRUE(*inp2 != *binp2);
- binp2->m.mNumbers[0] = 100;
- EXPECT_TRUE(*inp2 == *binp2);
-
- binp1->setPort(true /* output */);
- EXPECT_TRUE(*outp1 == *binp1);
-
- EXPECT_TRUE(*outp1 != *boutp1);
- boutp1->m.mNumbers[0] = 100;
- EXPECT_TRUE(*outp1 != *boutp1);
- boutp1->setPort(true /* output */);
- EXPECT_TRUE((bool)*boutp1);
- EXPECT_FALSE(!*boutp1);
- EXPECT_TRUE(*outp1 == *boutp1);
-
- EXPECT_TRUE(*outp2 != *boutp2);
- boutp2->m.mNumbers[0] = 100;
- EXPECT_TRUE(*outp2 == *boutp2);
-
- boutp1->setPort(false /* output */);
- EXPECT_TRUE(*inp1 == *boutp1);
-
- // index
- EXPECT_EQ(C2Param::Type(inp1->type()).baseIndex(), C2NumbersStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(inp1->type()).paramIndex(), kParamIndexNumbers);
- EXPECT_EQ(inp1->type(), C2NumbersPortTuning::input::typeIndex);
- EXPECT_EQ(inp1->stream(), ~0u);
-
- EXPECT_EQ(C2Param::Type(inp2->type()).baseIndex(), C2NumbersStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(inp2->type()).paramIndex(), kParamIndexNumbers);
- EXPECT_EQ(inp2->type(), C2NumbersPortTuning::input::typeIndex);
- EXPECT_EQ(inp2->stream(), ~0u);
-
- EXPECT_EQ(C2Param::Type(outp1->type()).baseIndex(), C2NumbersStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(outp1->type()).paramIndex(), kParamIndexNumbers);
- EXPECT_EQ(outp1->type(), C2NumbersPortTuning::output::typeIndex);
- EXPECT_EQ(outp1->stream(), ~0u);
-
- EXPECT_EQ(C2Param::Type(outp2->type()).baseIndex(), C2NumbersStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(outp2->type()).paramIndex(), kParamIndexNumbers);
- EXPECT_EQ(outp2->type(), C2NumbersPortTuning::output::typeIndex);
- EXPECT_EQ(outp2->stream(), ~0u);
-
- C2Param::BaseIndex index = C2NumbersPortTuning::input::typeIndex;
- EXPECT_FALSE(index.isVendor());
- EXPECT_TRUE(index.isFlexible());
- EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
- EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
-
- index = C2NumbersPortTuning::output::typeIndex;
- EXPECT_FALSE(index.isVendor());
- EXPECT_TRUE(index.isFlexible());
- EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
- EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
-
- C2Param::Type type = C2NumbersPortTuning::input::typeIndex;
- EXPECT_FALSE(type.isVendor());
- EXPECT_TRUE(type.isFlexible());
- EXPECT_FALSE(type.isGlobal());
- EXPECT_TRUE(type.forInput());
- EXPECT_FALSE(type.forOutput());
- EXPECT_FALSE(type.forStream());
- EXPECT_TRUE(type.forPort());
-
- type = C2NumbersPortTuning::output::typeIndex;
- EXPECT_FALSE(type.isVendor());
- EXPECT_TRUE(type.isFlexible());
- EXPECT_FALSE(type.isGlobal());
- EXPECT_FALSE(type.forInput());
- EXPECT_TRUE(type.forOutput());
- EXPECT_FALSE(type.forStream());
- EXPECT_TRUE(type.forPort());
-
- EXPECT_EQ(C2NumbersPortTuning::From(nullptr), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::input::From(nullptr), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::output::From(nullptr), nullptr);
- EXPECT_EQ(C2NumbersTuning::From(inp1.get()), nullptr);
- EXPECT_EQ(C2NumbersTuning::From(inp2.get()), nullptr);
- EXPECT_EQ(C2NumbersTuning::From(outp1.get()), nullptr);
- EXPECT_EQ(C2NumbersTuning::From(outp2.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::From(inp1.get()), inp1.get());
- EXPECT_EQ(C2NumbersPortTuning::From(inp2.get()), (C2NumbersPortTuning*)inp2.get());
- EXPECT_EQ(C2NumbersPortTuning::From(outp1.get()), outp1.get());
- EXPECT_EQ(C2NumbersPortTuning::From(outp2.get()), (C2NumbersPortTuning*)outp2.get());
- EXPECT_EQ(C2NumbersPortTuning::input::From(inp1.get()), (C2NumbersPortTuning::input*)inp1.get());
- EXPECT_EQ(C2NumbersPortTuning::input::From(inp2.get()), inp2.get());
- EXPECT_EQ(C2NumbersPortTuning::input::From(outp1.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::input::From(outp2.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::output::From(inp1.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::output::From(inp2.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::output::From(outp1.get()), (C2NumbersPortTuning::output*)outp1.get());
- EXPECT_EQ(C2NumbersPortTuning::output::From(outp2.get()), outp2.get());
- EXPECT_EQ(C2NumbersStreamTuning::From(inp1.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::From(inp2.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::From(outp1.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::From(outp2.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::input::From(inp1.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::input::From(inp2.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::input::From(outp1.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::input::From(outp2.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::output::From(inp1.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::output::From(inp2.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::output::From(outp1.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::output::From(outp2.get()), nullptr);
-
- }
-
- std::unique_ptr<C2NumbersStreamTuning> outs1_(C2NumbersStreamTuning::alloc_unique(1, true, 1u));
- outs1_->m.mNumbers[0] = 100;
- std::unique_ptr<const C2NumbersStreamTuning> outs1 = std::move(outs1_);
- std::unique_ptr<C2NumbersStreamTuning> ins1_(C2NumbersStreamTuning::alloc_unique(1, false, 1u));
- ins1_->m.mNumbers[0] = 100;
- std::unique_ptr<const C2NumbersStreamTuning> ins1 = std::move(ins1_);
- std::shared_ptr<C2NumbersStreamTuning> bouts1(C2NumbersStreamTuning::alloc_shared(1));
- std::shared_ptr<C2NumbersStreamTuning> bins1(C2NumbersStreamTuning::alloc_shared(1));
- std::shared_ptr<C2NumbersStreamTuning> bins3(C2NumbersStreamTuning::alloc_shared(1, false, 1u));
- bins3->m.mNumbers[0] = 100;
- std::unique_ptr<C2NumbersStreamTuning::input> ins2_(C2NumbersStreamTuning::input::alloc_unique(1, 1u));
- ins2_->m.mNumbers[0] = 100;
- std::unique_ptr<const C2NumbersStreamTuning::input> ins2 = std::move(ins2_);
- std::shared_ptr<C2NumbersStreamTuning::input> bins2(C2NumbersStreamTuning::input::alloc_shared(1));
- std::unique_ptr<C2NumbersStreamTuning::output> outs2_(C2NumbersStreamTuning::output::alloc_unique(1, 1u));
- outs2_->m.mNumbers[0] = 100;
- std::unique_ptr<const C2NumbersStreamTuning::output> outs2 = std::move(outs2_);
- std::shared_ptr<C2NumbersStreamTuning::output> bouts2(C2NumbersStreamTuning::output::alloc_shared(1));
-
- {
- static_assert(canCallSetPort(*bins3), "should be able to");
- static_assert(canCallSetPort(*bins1), "should be able to");
- static_assert(!canCallSetPort(*ins1), "should not be able to (const)");
- static_assert(!canCallSetPort(*ins2), "should not be able to (const & type)");
- static_assert(!canCallSetPort(*bins2), "should not be able to (type)");
-
- // flags & invariables
- const C2NumbersStreamTuning *S[] = { outs1.get(), ins1.get(), bouts1.get() };
- for (const auto p : S) {
- EXPECT_EQ(12u, p->size());
- EXPECT_FALSE(p->isVendor());
- EXPECT_TRUE(p->isFlexible());
- EXPECT_FALSE(p->isGlobal());
- EXPECT_TRUE(p->forStream());
- EXPECT_FALSE(p->forPort());
- }
- const C2NumbersStreamTuning::input *SI[] = { ins2.get(), bins2.get() };
- for (const auto p : SI) {
- EXPECT_EQ(12u, p->size());
- EXPECT_FALSE(p->isVendor());
- EXPECT_TRUE(p->isFlexible());
- EXPECT_FALSE(p->isGlobal());
- EXPECT_TRUE(p->forStream());
- EXPECT_FALSE(p->forPort());
- }
- const C2NumbersStreamTuning::output *SO[] = { outs2.get(), bouts2.get() };
- for (const auto p : SO) {
- EXPECT_EQ(12u, p->size());
- EXPECT_FALSE(p->isVendor());
- EXPECT_TRUE(p->isFlexible());
- EXPECT_FALSE(p->isGlobal());
- EXPECT_TRUE(p->forStream());
- EXPECT_FALSE(p->forPort());
- }
-
- // port specific flags & invariables
- EXPECT_FALSE(outs1->forInput());
- EXPECT_TRUE(outs1->forOutput());
-
- EXPECT_TRUE(ins1->forInput());
- EXPECT_FALSE(ins1->forOutput());
-
- const C2NumbersStreamTuning *S2[] = { outs1.get(), ins1.get() };
- for (const auto p : S2) {
- EXPECT_TRUE((bool)(*p));
- EXPECT_FALSE(!(*p));
- EXPECT_EQ(100, p->m.mNumbers[0]);
- EXPECT_EQ(1u, p->stream());
- }
- for (const auto p : SO) {
- EXPECT_TRUE((bool)(*p));
- EXPECT_FALSE(!(*p));
-
- EXPECT_FALSE(p->forInput());
- EXPECT_TRUE(p->forOutput());
- }
- for (const auto p : SI) {
- EXPECT_TRUE((bool)(*p));
- EXPECT_FALSE(!(*p));
-
- EXPECT_TRUE(p->forInput());
- EXPECT_FALSE(p->forOutput());
- }
- const C2NumbersStreamTuning *S3[] = { bouts1.get() };
- for (const auto p : S3) {
- EXPECT_FALSE((bool)(*p));
- EXPECT_TRUE(!(*p));
-
- EXPECT_FALSE(p->forInput());
- EXPECT_FALSE(p->forOutput());
- EXPECT_EQ(0, p->m.mNumbers[0]);
- }
-
- // values
- EXPECT_EQ(100, ins2->m.mNumbers[0]);
- EXPECT_EQ(100, outs2->m.mNumbers[0]);
- EXPECT_EQ(0, bins1->m.mNumbers[0]);
- EXPECT_EQ(0, bins2->m.mNumbers[0]);
- EXPECT_EQ(0, bouts1->m.mNumbers[0]);
- EXPECT_EQ(0, bouts2->m.mNumbers[0]);
-
- EXPECT_EQ(1u, ins2->stream());
- EXPECT_EQ(1u, outs2->stream());
- EXPECT_EQ(0u, bins1->stream());
- EXPECT_EQ(0u, bins2->stream());
- EXPECT_EQ(0u, bouts1->stream());
- EXPECT_EQ(0u, bouts2->stream());
-
- EXPECT_TRUE(*ins1 != *outs1);
- EXPECT_TRUE(*ins1 == *ins2);
- EXPECT_TRUE(*outs1 == *outs2);
- EXPECT_TRUE(*bins1 == *bouts1);
- EXPECT_TRUE(*bins2 != *bouts2);
-
- EXPECT_TRUE(*ins1 != *bins1);
- bins1->m.mNumbers[0] = 100;
- EXPECT_TRUE(*ins1 != *bins1);
- bins1->setPort(false /* output */);
- EXPECT_TRUE(*ins1 != *bins1);
- bins1->setStream(1u);
- EXPECT_TRUE(*ins1 == *bins1);
-
- EXPECT_TRUE(*ins2 != *bins2);
- bins2->m.mNumbers[0] = 100;
- EXPECT_TRUE(*ins2 != *bins2);
- bins2->setStream(1u);
- EXPECT_TRUE(*ins2 == *bins2);
-
- bins1->setPort(true /* output */);
- EXPECT_TRUE(*outs1 == *bins1);
-
- EXPECT_TRUE(*outs1 != *bouts1);
- bouts1->m.mNumbers[0] = 100;
- EXPECT_TRUE(*outs1 != *bouts1);
- bouts1->setPort(true /* output */);
- EXPECT_TRUE(*outs1 != *bouts1);
- bouts1->setStream(1u);
- EXPECT_TRUE(*outs1 == *bouts1);
-
- EXPECT_TRUE(*outs2 != *bouts2);
- bouts2->m.mNumbers[0] = 100;
- EXPECT_TRUE(*outs2 != *bouts2);
- bouts2->setStream(1u);
- EXPECT_TRUE(*outs2 == *bouts2);
-
- bouts1->setPort(false /* output */);
- EXPECT_TRUE(*ins1 == *bouts1);
-
- // index
- EXPECT_EQ(C2Param::Type(ins1->type()).baseIndex(), C2NumbersStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(ins1->type()).paramIndex(), kParamIndexNumbers);
- EXPECT_EQ(ins1->type(), C2NumbersStreamTuning::input::typeIndex);
-
- EXPECT_EQ(C2Param::Type(ins2->type()).baseIndex(), C2NumbersStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(ins2->type()).paramIndex(), kParamIndexNumbers);
- EXPECT_EQ(ins2->type(), C2NumbersStreamTuning::input::typeIndex);
-
- EXPECT_EQ(C2Param::Type(outs1->type()).baseIndex(), C2NumbersStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(outs1->type()).paramIndex(), kParamIndexNumbers);
- EXPECT_EQ(outs1->type(), C2NumbersStreamTuning::output::typeIndex);
-
- EXPECT_EQ(C2Param::Type(outs2->type()).baseIndex(), C2NumbersStruct::baseIndex);
- EXPECT_EQ(C2Param::Type(outs2->type()).paramIndex(), kParamIndexNumbers);
- EXPECT_EQ(outs2->type(), C2NumbersStreamTuning::output::typeIndex);
-
- C2Param::BaseIndex index = C2NumbersStreamTuning::input::typeIndex;
- EXPECT_FALSE(index.isVendor());
- EXPECT_TRUE(index.isFlexible());
- EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
- EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
-
- index = C2NumbersStreamTuning::output::typeIndex;
- EXPECT_FALSE(index.isVendor());
- EXPECT_TRUE(index.isFlexible());
- EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
- EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
-
- C2Param::Type type = C2NumbersStreamTuning::input::typeIndex;
- EXPECT_FALSE(type.isVendor());
- EXPECT_TRUE(type.isFlexible());
- EXPECT_FALSE(type.isGlobal());
- EXPECT_TRUE(type.forInput());
- EXPECT_FALSE(type.forOutput());
- EXPECT_TRUE(type.forStream());
- EXPECT_FALSE(type.forPort());
-
- type = C2NumbersStreamTuning::output::typeIndex;
- EXPECT_FALSE(type.isVendor());
- EXPECT_TRUE(type.isFlexible());
- EXPECT_FALSE(type.isGlobal());
- EXPECT_FALSE(type.forInput());
- EXPECT_TRUE(type.forOutput());
- EXPECT_TRUE(type.forStream());
- EXPECT_FALSE(type.forPort());
-
- EXPECT_EQ(C2NumbersPortTuning::From(nullptr), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::input::From(nullptr), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::output::From(nullptr), nullptr);
- EXPECT_EQ(C2NumbersTuning::From(ins1.get()), nullptr);
- EXPECT_EQ(C2NumbersTuning::From(ins2.get()), nullptr);
- EXPECT_EQ(C2NumbersTuning::From(outs1.get()), nullptr);
- EXPECT_EQ(C2NumbersTuning::From(outs2.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::From(ins1.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::From(ins2.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::From(outs1.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::From(outs2.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::input::From(ins1.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::input::From(ins2.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::input::From(outs1.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::input::From(outs2.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::output::From(ins1.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::output::From(ins2.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::output::From(outs1.get()), nullptr);
- EXPECT_EQ(C2NumbersPortTuning::output::From(outs2.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::From(ins1.get()), ins1.get());
- EXPECT_EQ(C2NumbersStreamTuning::From(ins2.get()), (C2NumbersStreamTuning*)ins2.get());
- EXPECT_EQ(C2NumbersStreamTuning::From(outs1.get()), outs1.get());
- EXPECT_EQ(C2NumbersStreamTuning::From(outs2.get()), (C2NumbersStreamTuning*)outs2.get());
- EXPECT_EQ(C2NumbersStreamTuning::input::From(ins1.get()), (C2NumbersStreamTuning::input*)ins1.get());
- EXPECT_EQ(C2NumbersStreamTuning::input::From(ins2.get()), ins2.get());
- EXPECT_EQ(C2NumbersStreamTuning::input::From(outs1.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::input::From(outs2.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::output::From(ins1.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::output::From(ins2.get()), nullptr);
- EXPECT_EQ(C2NumbersStreamTuning::output::From(outs1.get()), (C2NumbersStreamTuning::output*)outs1.get());
- EXPECT_EQ(C2NumbersStreamTuning::output::From(outs2.get()), outs2.get());
-
- }
-
- {
- C2Int32Value int32Value(INT32_MIN);
- static_assert(std::is_same<decltype(int32Value.mValue), int32_t>::value, "should be int32_t");
- EXPECT_EQ(INT32_MIN, int32Value.mValue);
- std::list<const C2FieldDescriptor> fields = int32Value.fieldList;
- EXPECT_EQ(1u, fields.size());
- EXPECT_EQ(FD::INT32, fields.cbegin()->type());
- EXPECT_EQ(1u, fields.cbegin()->length());
- EXPECT_EQ(C2String("value"), fields.cbegin()->name());
- }
-
- {
- C2Uint32Value uint32Value(UINT32_MAX);
- static_assert(std::is_same<decltype(uint32Value.mValue), uint32_t>::value, "should be uint32_t");
- EXPECT_EQ(UINT32_MAX, uint32Value.mValue);
- std::list<const C2FieldDescriptor> fields = uint32Value.fieldList;
- EXPECT_EQ(1u, fields.size());
- EXPECT_EQ(FD::UINT32, fields.cbegin()->type());
- EXPECT_EQ(1u, fields.cbegin()->length());
- EXPECT_EQ(C2String("value"), fields.cbegin()->name());
- }
-
- {
- C2Int64Value int64Value(INT64_MIN);
- static_assert(std::is_same<decltype(int64Value.mValue), int64_t>::value, "should be int64_t");
- EXPECT_EQ(INT64_MIN, int64Value.mValue);
- std::list<const C2FieldDescriptor> fields = int64Value.fieldList;
- EXPECT_EQ(1u, fields.size());
- EXPECT_EQ(FD::INT64, fields.cbegin()->type());
- EXPECT_EQ(1u, fields.cbegin()->length());
- EXPECT_EQ(C2String("value"), fields.cbegin()->name());
- }
-
- {
- C2Uint64Value uint64Value(UINT64_MAX);
- static_assert(std::is_same<decltype(uint64Value.mValue), uint64_t>::value, "should be uint64_t");
- EXPECT_EQ(UINT64_MAX, uint64Value.mValue);
- std::list<const C2FieldDescriptor> fields = uint64Value.fieldList;
- EXPECT_EQ(1u, fields.size());
- EXPECT_EQ(FD::UINT64, fields.cbegin()->type());
- EXPECT_EQ(1u, fields.cbegin()->length());
- EXPECT_EQ(C2String("value"), fields.cbegin()->name());
- }
-
- {
- C2FloatValue floatValue(123.4f);
- static_assert(std::is_same<decltype(floatValue.mValue), float>::value, "should be float");
- EXPECT_EQ(123.4f, floatValue.mValue);
- std::list<const C2FieldDescriptor> fields = floatValue.fieldList;
- EXPECT_EQ(1u, fields.size());
- EXPECT_EQ(FD::FLOAT, fields.cbegin()->type());
- EXPECT_EQ(1u, fields.cbegin()->length());
- EXPECT_EQ(C2String("value"), fields.cbegin()->name());
- }
-
- {
- uint8_t initValue[] = "ABCD";
- typedef C2GlobalParam<C2Setting, C2BlobValue, 0> BlobSetting;
- std::unique_ptr<BlobSetting> blobValue = BlobSetting::alloc_unique(6, C2ConstMemoryBlock<uint8_t>(initValue));
- static_assert(std::is_same<decltype(blobValue->m.mValue), uint8_t[]>::value, "should be uint8_t[]");
- EXPECT_EQ(0, memcmp(blobValue->m.mValue, "ABCD\0", 6));
- EXPECT_EQ(6u, blobValue->flexCount());
- std::list<const C2FieldDescriptor> fields = blobValue->fieldList;
- EXPECT_EQ(1u, fields.size());
- EXPECT_EQ(FD::BLOB, fields.cbegin()->type());
- EXPECT_EQ(0u, fields.cbegin()->length());
- EXPECT_EQ(C2String("value"), fields.cbegin()->name());
-
- blobValue = BlobSetting::alloc_unique(3, C2ConstMemoryBlock<uint8_t>(initValue));
- EXPECT_EQ(0, memcmp(blobValue->m.mValue, "ABC", 3));
- EXPECT_EQ(3u, blobValue->flexCount());
- }
-
- {
- constexpr char initValue[] = "ABCD";
- typedef C2GlobalParam<C2Setting, C2StringValue, 0> StringSetting;
- std::unique_ptr<StringSetting> stringValue = StringSetting::alloc_unique(6, C2ConstMemoryBlock<char>(initValue));
- stringValue = StringSetting::alloc_unique(6, initValue);
- static_assert(std::is_same<decltype(stringValue->m.mValue), char[]>::value, "should be char[]");
- EXPECT_EQ(0, memcmp(stringValue->m.mValue, "ABCD\0", 6));
- EXPECT_EQ(6u, stringValue->flexCount());
- std::list<const C2FieldDescriptor> fields = stringValue->fieldList;
- EXPECT_EQ(1u, fields.size());
- EXPECT_EQ(FD::STRING, fields.cbegin()->type());
- EXPECT_EQ(0u, fields.cbegin()->length());
- EXPECT_EQ(C2String("value"), fields.cbegin()->name());
-
- stringValue = StringSetting::alloc_unique(3, C2ConstMemoryBlock<char>(initValue));
- EXPECT_EQ(0, memcmp(stringValue->m.mValue, "AB", 3));
- EXPECT_EQ(3u, stringValue->flexCount());
-
- stringValue = StringSetting::alloc_unique(11, "initValue");
- EXPECT_EQ(0, memcmp(stringValue->m.mValue, "initValue\0", 11));
- EXPECT_EQ(11u, stringValue->flexCount());
-
- stringValue = StringSetting::alloc_unique(initValue);
- EXPECT_EQ(0, memcmp(stringValue->m.mValue, "ABCD", 5));
- EXPECT_EQ(5u, stringValue->flexCount());
-
- stringValue = StringSetting::alloc_unique({ 'A', 'B', 'C', 'D' });
- EXPECT_EQ(0, memcmp(stringValue->m.mValue, "ABC", 4));
- EXPECT_EQ(4u, stringValue->flexCount());
- }
-
- {
- uint32_t videoWidth[] = { 12u, C2NumbersStreamTuning::output::typeIndex, 100 };
- C2Param *p1 = C2Param::From(videoWidth, sizeof(videoWidth));
- EXPECT_NE(nullptr, p1);
- EXPECT_EQ(12u, p1->size());
- EXPECT_EQ(C2NumbersStreamTuning::output::typeIndex, p1->type());
-
- C2NumbersStreamTuning::output *vst = C2NumbersStreamTuning::output::From(p1);
- EXPECT_NE(nullptr, vst);
- if (vst) {
- EXPECT_EQ(1u, vst->flexCount());
- EXPECT_EQ(100, vst->m.mNumbers[0]);
- }
-
- p1 = C2Param::From(videoWidth, sizeof(videoWidth) + 2);
- EXPECT_EQ(nullptr, p1);
-
- p1 = C2Param::From(videoWidth, sizeof(videoWidth) - 2);
- EXPECT_EQ(nullptr, p1);
-
- p1 = C2Param::From(videoWidth, 3);
- EXPECT_EQ(nullptr, p1);
-
- p1 = C2Param::From(videoWidth, 0);
- EXPECT_EQ(nullptr, p1);
- }
-
- {
- uint32_t videoWidth[] = { 16u, C2NumbersPortTuning::input::typeIndex, 101, 102 };
-
- C2Param *p1 = C2Param::From(videoWidth, sizeof(videoWidth));
- EXPECT_NE(nullptr, p1);
- EXPECT_EQ(16u, p1->size());
- EXPECT_EQ(C2NumbersPortTuning::input::typeIndex, p1->type());
-
- C2NumbersPortTuning::input *vpt = C2NumbersPortTuning::input::From(p1);
- EXPECT_NE(nullptr, vpt);
- if (vpt) {
- EXPECT_EQ(2u, vpt->flexCount());
- EXPECT_EQ(101, vpt->m.mNumbers[0]);
- EXPECT_EQ(102, vpt->m.mNumbers[1]);
- }
-
- p1 = C2Param::From(videoWidth, sizeof(videoWidth) + 2);
- EXPECT_EQ(nullptr, p1);
-
- p1 = C2Param::From(videoWidth, sizeof(videoWidth) - 2);
- EXPECT_EQ(nullptr, p1);
-
- p1 = C2Param::From(videoWidth, 3);
- EXPECT_EQ(nullptr, p1);
-
- p1 = C2Param::From(videoWidth, 0);
- EXPECT_EQ(nullptr, p1);
- }
-}
-
-// ***********************
-
-}
-
-#include <util/C2ParamUtils.h>
-#include <C2Config.h>
-#include <C2Component.h>
-#include <unordered_map>
-
-namespace android {
-
-C2ENUM(
- MetadataType, int32_t,
- kInvalid = -1,
- kNone = 0,
- kGralloc,
- kNativeHandle,
- kANativeWindow,
- kCamera,
-)
-
-enum {
- kParamIndexVideoConfig = 0x1234,
-};
-
-struct C2VideoConfigStruct {
- int32_t mWidth;
- uint32_t mHeight;
- MetadataType mMetadataType;
- int32_t mSupportedFormats[];
-
- C2VideoConfigStruct() {}
-
- DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(VideoConfig, mSupportedFormats)
- C2FIELD(mWidth, "width")
- C2FIELD(mHeight, "height")
- C2FIELD(mMetadataType, "metadata-type")
- C2FIELD(mSupportedFormats, "formats")
-};
-
-typedef C2PortParam<C2Tuning, C2VideoConfigStruct> C2VideoConfigPortTuning;
-
-class MyReflector : public C2ParamReflector {
-private:
- std::unique_ptr<C2VideoConfigPortTuning::input> inputVideoConfigTuning;
- std::unique_ptr<C2VideoConfigPortTuning::output> outputVideoConfigTuning;
-
-public:
- void describeSupportedValues() {
- C2TypedFieldSupportedValues<int32_t> supportedWidths(16, 1920, 8);
- C2FieldSupportedValues supportedWidths2(16, 1920, 8);
-
-
- std::list<C2FieldSupportedValues> supported;
- //supported.emplace_push(inputVideoConfigTuning->mNumber, range(16, 1920, 8));
- //supported.emplace_push(inputVideoConfigTuning->mHeight, range(16, 1088, 8));
- //supported.emplace_push(inputVideoConfigTuning->mMetadataType, all_enums);
- //supported.emplace_push(inputVideoConfigTuning->mSupportedFormats, { 0, 1, 5, 7 });
- }
-
- virtual std::unique_ptr<android::C2StructDescriptor> describe(C2Param::BaseIndex paramType) {
- switch (paramType.baseIndex()) {
- case C2VideoConfigPortTuning::baseIndex:
- return std::unique_ptr<C2StructDescriptor>(new C2StructDescriptor{
- paramType.baseIndex(),
- C2VideoConfigPortTuning::fieldList,
- });
- }
- return nullptr;
- }
-};
-
-class MyComponentInstance : public C2ComponentInterface {
-public:
- virtual C2String getName() const {
- /// \todo this seems too specific
- return "sample.interface";
- };
-
- virtual node_id getId() const {
- /// \todo how are these shared?
- return 0;
- }
-
- virtual status_t commit_sm(
- const std::vector<C2Param* const> ¶ms,
- std::vector<std::unique_ptr<C2SettingResult>>* const failures) {
- (void)params;
- (void)failures;
- return C2_UNSUPPORTED;
- }
-
- virtual status_t config_nb(
- const std::vector<C2Param* const> ¶ms,
- std::vector<std::unique_ptr<C2SettingResult>>* const failures) {
- (void)params;
- (void)failures;
- return C2_UNSUPPORTED;
- }
-
- virtual status_t createTunnel_sm(node_id targetComponent) {
- (void)targetComponent;
- return C2_UNSUPPORTED;
- }
-
- virtual status_t query_nb(
- const std::vector<C2Param* const> &stackParams,
- const std::vector<C2Param::Index> &heapParamIndices,
- std::vector<std::unique_ptr<C2Param>>* const heapParams) const {
- for (C2Param* const param : stackParams) {
- if (!*param) { // param is already invalid - remember it
- continue;
- }
-
- // note: this does not handle stream params (should use index...)
- if (!mMyParams.count(param->type())) {
- continue; // not my param
- }
-
- C2Param & myParam = mMyParams.find(param->type())->second;
- if (myParam.size() != param->size()) { // incorrect size
- param->invalidate();
- continue;
- }
-
- param->updateFrom(myParam);
- }
-
- for (const C2Param::Index index : heapParamIndices) {
- if (mMyParams.count(index)) {
- C2Param & myParam = mMyParams.find(index)->second;
- std::unique_ptr<C2Param> paramCopy(C2Param::From(&myParam, myParam.size()));
- heapParams->push_back(std::move(paramCopy));
- }
- }
-
- return C2_OK;
- }
-
- std::unordered_map<uint32_t, C2Param &> mMyParams;
-
- C2ComponentDomainInfo mDomainInfo;
-
- MyComponentInstance() {
- mMyParams.insert({mDomainInfo.type(), mDomainInfo});
- }
-
- virtual status_t releaseTunnel_sm(node_id targetComponent) {
- (void)targetComponent;
- return C2_UNSUPPORTED;
- }
-
- class MyParamReflector : public C2ParamReflector {
- const MyComponentInstance *instance;
-
- public:
- MyParamReflector(const MyComponentInstance *i) : instance(i) { }
-
- virtual std::unique_ptr<C2StructDescriptor> describe(C2Param::BaseIndex paramIndex) {
- switch (paramIndex.baseIndex()) {
- case decltype(instance->mDomainInfo)::baseIndex:
- default:
- return std::unique_ptr<C2StructDescriptor>(new C2StructDescriptor{
- instance->mDomainInfo.type(),
- decltype(instance->mDomainInfo)::fieldList,
- });
- }
- return nullptr;
- }
- };
-
- virtual status_t getSupportedValues(
- const std::vector<const C2ParamField> fields,
- std::vector<C2FieldSupportedValues>* const values) const {
- for (const C2ParamField &field : fields) {
- if (field == C2ParamField(&mDomainInfo, &C2ComponentDomainInfo::mValue)) {
- values->push_back(C2FieldSupportedValues(
- false /* flag */,
- &mDomainInfo.mValue
- //,
- //{(int32_t)C2DomainVideo}
- ));
- }
- }
- return C2_OK;
- }
-
- virtual std::shared_ptr<C2ParamReflector> getParamReflector() const {
- return std::shared_ptr<C2ParamReflector>(new MyParamReflector(this));
- }
-
- virtual status_t getSupportedParams(std::vector<std::shared_ptr<C2ParamDescriptor>> * const params) const {
- params->push_back(std::make_shared<C2ParamDescriptor>(
- true /* required */, "_domain", &mDomainInfo));
- return C2_OK;
- }
-
- status_t getSupportedParams2(std::vector<std::shared_ptr<C2ParamDescriptor>> * const params) {
- params->push_back(std::shared_ptr<C2ParamDescriptor>(
- new C2ParamDescriptor(true /* required */, "_domain", &mDomainInfo)));
- return C2_OK;
- }
-
-};
-
-template<typename E, bool S=std::is_enum<E>::value>
-struct getter {
- int32_t get(const C2FieldSupportedValues::Primitive &p, int32_t*) {
- return p.i32;
- }
- int64_t get(const C2FieldSupportedValues::Primitive &p, int64_t*) {
- return p.i64;
- }
- uint32_t get(const C2FieldSupportedValues::Primitive &p, uint32_t*) {
- return p.u32;
- }
- uint64_t get(const C2FieldSupportedValues::Primitive &p, uint64_t*) {
- return p.u64;
- }
- float get(const C2FieldSupportedValues::Primitive &p, float*) {
- return p.fp;
- }
-};
-
-template<typename E>
-struct getter<E, true> {
- typename std::underlying_type<E>::type get(const C2FieldSupportedValues::Primitive &p, E*) {
- using u=typename std::underlying_type<E>::type;
- return getter<u>().get(p, (u*)0);
- }
-};
-
-template<typename T, bool E=std::is_enum<T>::value>
-struct lax_underlying_type {
- typedef typename std::underlying_type<T>::type type;
-};
-
-template<typename T>
-struct lax_underlying_type<T, false> {
- typedef T type;
-};
-
-template<typename E>
-typename lax_underlying_type<E>::type get(
- const C2FieldSupportedValues::Primitive &p, E*) {
- return getter<E>().get(p, (E*)0);
-}
-
-template<typename T>
-void dumpFSV(const C2FieldSupportedValues &sv, T*t) {
- using namespace std;
- cout << (std::is_enum<T>::value ? (std::is_signed<typename std::underlying_type<T>::type>::value ? "i" : "u")
- : std::is_integral<T>::value ? std::is_signed<T>::value ? "i" : "u" : "f")
- << (8 * sizeof(T));
- if (sv.type == sv.RANGE) {
- cout << ".range(" << get(sv.range.min, t);
- if (get(sv.range.step, t) != std::is_integral<T>::value) {
- cout << ":" << get(sv.range.step, t);
- }
- if (get(sv.range.nom, t) != 1 || get(sv.range.denom, t) != 1) {
- cout << ":" << get(sv.range.nom, t) << "/" << get(sv.range.denom, t);
- }
- cout << get(sv.range.max, t) << ")";
- }
- if (sv.values.size()) {
- cout << (sv.type == sv.FLAGS ? ".flags(" : ".list(");
- const char *sep = "";
- for (const C2FieldSupportedValues::Primitive &p : sv.values) {
- cout << sep << get(p, t);
- sep = ",";
- }
- cout << ")";
- }
- cout << endl;
-}
-
-void dumpType(C2Param::Type type) {
- using namespace std;
- cout << (type.isVendor() ? "Vendor" : "C2");
- if (type.forInput()) {
- cout << "Input";
- } else if (type.forOutput()) {
- cout << "Output";
- } else if (type.forPort() && !type.forStream()) {
- cout << "Port";
- }
- if (type.forStream()) {
- cout << "Stream";
- }
-
- if (type.isFlexible()) {
- cout << "Flex";
- }
-
- cout << type.paramIndex();
-
- switch (type.kind()) {
- case C2Param::INFO: cout << "Info"; break;
- case C2Param::SETTING: cout << "Setting"; break;
- case C2Param::TUNING: cout << "Tuning"; break;
- case C2Param::STRUCT: cout << "Struct"; break;
- default: cout << "Kind" << (int32_t)type.kind(); break;
- }
-}
-
-void dumpType(C2Param::BaseIndex type) {
- using namespace std;
- cout << (type.isVendor() ? "Vendor" : "C2");
- if (type.isFlexible()) {
- cout << "Flex";
- }
-
- cout << type.paramIndex() << "Struct";
-}
-
-void dumpType(FD::Type type) {
- using namespace std;
- switch (type) {
- case FD::BLOB: cout << "blob "; break;
- case FD::FLOAT: cout << "float "; break;
- case FD::INT32: cout << "int32_t "; break;
- case FD::INT64: cout << "int64_t "; break;
- case FD::UINT32: cout << "uint32_t "; break;
- case FD::UINT64: cout << "uint64_t "; break;
- case FD::STRING: cout << "char "; break;
- default:
- cout << "struct ";
- dumpType((C2Param::Type)type);
- break;
- }
-}
-
-void dumpStruct(const C2StructDescriptor &sd) {
- using namespace std;
- cout << "struct ";
- dumpType(sd.baseIndex());
- cout << " {" << endl;
- //C2FieldDescriptor &f;
- for (const C2FieldDescriptor &f : sd) {
- PrintTo(f, &cout);
- cout << endl;
-
- if (f.namedValues().size()) {
- cout << ".named(";
- const char *sep = "";
- for (const FD::named_value_type &p : f.namedValues()) {
- cout << sep << p.first << "=";
- switch (f.type()) {
- case C2Value::INT32: cout << get(p.second, (int32_t *)0); break;
- case C2Value::INT64: cout << get(p.second, (int64_t *)0); break;
- case C2Value::UINT32: cout << get(p.second, (uint32_t *)0); break;
- case C2Value::UINT64: cout << get(p.second, (uint64_t *)0); break;
- case C2Value::FLOAT: cout << get(p.second, (float *)0); break;
- default: cout << "???"; break;
- }
- sep = ",";
- }
- cout << ")";
- }
- }
-
- cout << "};" << endl;
-}
-
-void dumpDesc(const C2ParamDescriptor &pd) {
- using namespace std;
- if (pd.isRequired()) {
- cout << "required ";
- }
- if (pd.isPersistent()) {
- cout << "persistent ";
- }
- cout << "struct ";
- dumpType(pd.type());
- cout << " " << pd.name() << ";" << endl;
-}
-
-TEST_F(C2ParamTest, ReflectorTest) {
- C2ComponentDomainInfo domainInfo;
- std::shared_ptr<C2ComponentInterface> comp(new MyComponentInstance);
- std::vector<C2FieldSupportedValues> values;
-
- std::unique_ptr<C2StructDescriptor> desc{
- comp->getParamReflector()->describe(C2ComponentDomainInfo::indexFlags)};
- dumpStruct(*desc);
-
- EXPECT_EQ(
- C2_OK,
- comp->getSupportedValues(
- { C2ParamField(&domainInfo, &C2ComponentDomainInfo::mValue) },
- &values)
- );
-
- for (const C2FieldSupportedValues &sv : values) {
- dumpFSV(sv, &domainInfo.mValue);
- }
-}
-
-C2ENUM(Enum1, uint32_t,
- Enum1Value1,
- Enum1Value2,
- Enum1Value4 = Enum1Value2 + 2,
-);
-
-C2ENUM_CUSTOM_PREFIX(Enum2, uint32_t, "Enum",
- Enum2Value1,
- Enum2Value2,
- Enum2Value4 = Enum1Value2 + 2,
-);
-
-C2ENUM_CUSTOM_NAMES(Enum3, uint8_t,
- ({ { "value1", Enum3Value1 },
- { "value2", Enum3Value2 },
- { "value4", Enum3Value4 },
- { "invalid", Invalid } }),
- Enum3Value1,
- Enum3Value2,
- Enum3Value4 = Enum3Value2 + 2,
- Invalid,
-);
-
-TEST_F(C2ParamTest, EnumUtilsTest) {
- std::vector<std::pair<C2String, Enum3>> pairs ( { { "value1", Enum3Value1 },
- { "value2", Enum3Value2 },
- { "value4", Enum3Value4 },
- { "invalid", Invalid } });
- Enum3 e3;
- FD::namedValuesFor(e3);
-}
-
-TEST_F(C2ParamTest, ParamUtilsTest) {
- // upper case
- EXPECT_EQ("yes", C2ParamUtils::camelCaseToDashed("YES"));
- EXPECT_EQ("no", C2ParamUtils::camelCaseToDashed("NO"));
- EXPECT_EQ("yes-no", C2ParamUtils::camelCaseToDashed("YES_NO"));
- EXPECT_EQ("yes-no", C2ParamUtils::camelCaseToDashed("YES__NO"));
- EXPECT_EQ("a2dp", C2ParamUtils::camelCaseToDashed("A2DP"));
- EXPECT_EQ("mp2-ts", C2ParamUtils::camelCaseToDashed("MP2_TS"));
- EXPECT_EQ("block-2d", C2ParamUtils::camelCaseToDashed("BLOCK_2D"));
- EXPECT_EQ("mpeg-2-ts", C2ParamUtils::camelCaseToDashed("MPEG_2_TS"));
- EXPECT_EQ("_hidden-value", C2ParamUtils::camelCaseToDashed("_HIDDEN_VALUE"));
- EXPECT_EQ("__hidden-value2", C2ParamUtils::camelCaseToDashed("__HIDDEN_VALUE2"));
- EXPECT_EQ("__hidden-value-2", C2ParamUtils::camelCaseToDashed("__HIDDEN_VALUE_2"));
-
- // camel case
- EXPECT_EQ("yes", C2ParamUtils::camelCaseToDashed("Yes"));
- EXPECT_EQ("no", C2ParamUtils::camelCaseToDashed("No"));
- EXPECT_EQ("yes-no", C2ParamUtils::camelCaseToDashed("YesNo"));
- EXPECT_EQ("yes-no", C2ParamUtils::camelCaseToDashed("Yes_No"));
- EXPECT_EQ("mp2-ts", C2ParamUtils::camelCaseToDashed("MP2Ts"));
- EXPECT_EQ("block-2d", C2ParamUtils::camelCaseToDashed("Block2D"));
- EXPECT_EQ("mpeg-2-ts", C2ParamUtils::camelCaseToDashed("Mpeg2ts"));
- EXPECT_EQ("_hidden-value", C2ParamUtils::camelCaseToDashed("_HiddenValue"));
- EXPECT_EQ("__hidden-value-2", C2ParamUtils::camelCaseToDashed("__HiddenValue2"));
-
- // mixed case
- EXPECT_EQ("mp2t-s", C2ParamUtils::camelCaseToDashed("MP2T_s"));
- EXPECT_EQ("block-2d", C2ParamUtils::camelCaseToDashed("Block_2D"));
- EXPECT_EQ("block-2-d", C2ParamUtils::camelCaseToDashed("Block2_D"));
- EXPECT_EQ("mpeg-2-ts", C2ParamUtils::camelCaseToDashed("Mpeg_2ts"));
- EXPECT_EQ("mpeg-2-ts", C2ParamUtils::camelCaseToDashed("Mpeg_2_TS"));
- EXPECT_EQ("_hidden-value", C2ParamUtils::camelCaseToDashed("_Hidden__VALUE"));
- EXPECT_EQ("__hidden-value-2", C2ParamUtils::camelCaseToDashed("__HiddenValue_2"));
- EXPECT_EQ("_2", C2ParamUtils::camelCaseToDashed("_2"));
- EXPECT_EQ("__23", C2ParamUtils::camelCaseToDashed("__23"));
-}
-
-TEST_F(C2ParamTest, C2ValueTest) {
- C2Value val;
- int32_t i32 = -32;
- int64_t i64 = -64;
- uint32_t u32 = 32;
- uint64_t u64 = 64;
- float fp = 1.5f;
-
- EXPECT_EQ(C2Value::NO_INIT, val.type());
- EXPECT_EQ(false, val.get(&i32));
- EXPECT_EQ(-32, i32);
- EXPECT_EQ(false, val.get(&i64));
- EXPECT_EQ(-64, i64);
- EXPECT_EQ(false, val.get(&u32));
- EXPECT_EQ(32u, u32);
- EXPECT_EQ(false, val.get(&u64));
- EXPECT_EQ(64u, u64);
- EXPECT_EQ(false, val.get(&fp));
- EXPECT_EQ(1.5f, fp);
-
- val = int32_t(-3216);
- EXPECT_EQ(C2Value::INT32, val.type());
- EXPECT_EQ(true, val.get(&i32));
- EXPECT_EQ(-3216, i32);
- EXPECT_EQ(false, val.get(&i64));
- EXPECT_EQ(-64, i64);
- EXPECT_EQ(false, val.get(&u32));
- EXPECT_EQ(32u, u32);
- EXPECT_EQ(false, val.get(&u64));
- EXPECT_EQ(64u, u64);
- EXPECT_EQ(false, val.get(&fp));
- EXPECT_EQ(1.5f, fp);
-
- val = uint32_t(3216);
- EXPECT_EQ(C2Value::UINT32, val.type());
- EXPECT_EQ(false, val.get(&i32));
- EXPECT_EQ(-3216, i32);
- EXPECT_EQ(false, val.get(&i64));
- EXPECT_EQ(-64, i64);
- EXPECT_EQ(true, val.get(&u32));
- EXPECT_EQ(3216u, u32);
- EXPECT_EQ(false, val.get(&u64));
- EXPECT_EQ(64u, u64);
- EXPECT_EQ(false, val.get(&fp));
- EXPECT_EQ(1.5f, fp);
-
- val = int64_t(-6432);
- EXPECT_EQ(C2Value::INT64, val.type());
- EXPECT_EQ(false, val.get(&i32));
- EXPECT_EQ(-3216, i32);
- EXPECT_EQ(true, val.get(&i64));
- EXPECT_EQ(-6432, i64);
- EXPECT_EQ(false, val.get(&u32));
- EXPECT_EQ(3216u, u32);
- EXPECT_EQ(false, val.get(&u64));
- EXPECT_EQ(64u, u64);
- EXPECT_EQ(false, val.get(&fp));
- EXPECT_EQ(1.5f, fp);
-
- val = uint64_t(6432);
- EXPECT_EQ(C2Value::UINT64, val.type());
- EXPECT_EQ(false, val.get(&i32));
- EXPECT_EQ(-3216, i32);
- EXPECT_EQ(false, val.get(&i64));
- EXPECT_EQ(-6432, i64);
- EXPECT_EQ(false, val.get(&u32));
- EXPECT_EQ(3216u, u32);
- EXPECT_EQ(true, val.get(&u64));
- EXPECT_EQ(6432u, u64);
- EXPECT_EQ(false, val.get(&fp));
- EXPECT_EQ(1.5f, fp);
-
- val = 15.25f;
- EXPECT_EQ(C2Value::FLOAT, val.type());
- EXPECT_EQ(false, val.get(&i32));
- EXPECT_EQ(-3216, i32);
- EXPECT_EQ(false, val.get(&i64));
- EXPECT_EQ(-6432, i64);
- EXPECT_EQ(false, val.get(&u32));
- EXPECT_EQ(3216u, u32);
- EXPECT_EQ(false, val.get(&u64));
- EXPECT_EQ(6432u, u64);
- EXPECT_EQ(true, val.get(&fp));
- EXPECT_EQ(15.25f, fp);
-}
-
-} // namespace android
diff --git a/media/libstagefright/codec2/tests/C2_test.cpp b/media/libstagefright/codec2/tests/C2_test.cpp
deleted file mode 100644
index 92a3d91..0000000
--- a/media/libstagefright/codec2/tests/C2_test.cpp
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "C2_test"
-
-#include <gtest/gtest.h>
-
-#include <C2.h>
-
-namespace android {
-
-/* ======================================= STATIC TESTS ======================================= */
-
-template<int N>
-struct c2_const_checker
-{
- inline constexpr static int num() { return N; }
-};
-
-constexpr auto min_i32_i32 = c2_min(int32_t(1), int32_t(2));
-static_assert(std::is_same<decltype(min_i32_i32), const int32_t>::value, "should be int32_t");
-constexpr auto min_i32_i64 = c2_min(int32_t(3), int64_t(2));
-static_assert(std::is_same<decltype(min_i32_i64), const int64_t>::value, "should be int64_t");
-constexpr auto min_i8_i32 = c2_min(int8_t(0xff), int32_t(0xffffffff));
-static_assert(std::is_same<decltype(min_i8_i32), const int32_t>::value, "should be int32_t");
-
-static_assert(c2_const_checker<min_i32_i32>::num() == 1, "should be 1");
-static_assert(c2_const_checker<min_i32_i64>::num() == 2, "should be 2");
-static_assert(c2_const_checker<min_i8_i32>::num() == 0xffffffff, "should be 0xffffffff");
-
-constexpr auto min_u32_u32 = c2_min(uint32_t(1), uint32_t(2));
-static_assert(std::is_same<decltype(min_u32_u32), const uint32_t>::value, "should be uint32_t");
-constexpr auto min_u32_u64 = c2_min(uint32_t(3), uint64_t(2));
-static_assert(std::is_same<decltype(min_u32_u64), const uint32_t>::value, "should be uint32_t");
-constexpr auto min_u32_u8 = c2_min(uint32_t(0xffffffff), uint8_t(0xff));
-static_assert(std::is_same<decltype(min_u32_u8), const uint8_t>::value, "should be uint8_t");
-
-static_assert(c2_const_checker<min_u32_u32>::num() == 1, "should be 1");
-static_assert(c2_const_checker<min_u32_u64>::num() == 2, "should be 2");
-static_assert(c2_const_checker<min_u32_u8>::num() == 0xff, "should be 0xff");
-
-constexpr auto max_i32_i32 = c2_max(int32_t(1), int32_t(2));
-static_assert(std::is_same<decltype(max_i32_i32), const int32_t>::value, "should be int32_t");
-constexpr auto max_i32_i64 = c2_max(int32_t(3), int64_t(2));
-static_assert(std::is_same<decltype(max_i32_i64), const int64_t>::value, "should be int64_t");
-constexpr auto max_i8_i32 = c2_max(int8_t(0xff), int32_t(0xffffffff));
-static_assert(std::is_same<decltype(max_i8_i32), const int32_t>::value, "should be int32_t");
-
-static_assert(c2_const_checker<max_i32_i32>::num() == 2, "should be 2");
-static_assert(c2_const_checker<max_i32_i64>::num() == 3, "should be 3");
-static_assert(c2_const_checker<max_i8_i32>::num() == 0xffffffff, "should be 0xffffffff");
-
-constexpr auto max_u32_u32 = c2_max(uint32_t(1), uint32_t(2));
-static_assert(std::is_same<decltype(max_u32_u32), const uint32_t>::value, "should be uint32_t");
-constexpr auto max_u32_u64 = c2_max(uint32_t(3), uint64_t(2));
-static_assert(std::is_same<decltype(max_u32_u64), const uint64_t>::value, "should be uint64_t");
-constexpr auto max_u32_u8 = c2_max(uint32_t(0x7fffffff), uint8_t(0xff));
-static_assert(std::is_same<decltype(max_u32_u8), const uint32_t>::value, "should be uint32_t");
-
-static_assert(c2_const_checker<max_u32_u32>::num() == 2, "should be 2");
-static_assert(c2_const_checker<max_u32_u64>::num() == 3, "should be 3");
-static_assert(c2_const_checker<max_u32_u8>::num() == 0x7fffffff, "should be 0x7fffffff");
-
-} // namespace android
diff --git a/media/libstagefright/codec2/tests/vndk/C2UtilTest.cpp b/media/libstagefright/codec2/tests/vndk/C2UtilTest.cpp
deleted file mode 100644
index 7a1374b..0000000
--- a/media/libstagefright/codec2/tests/vndk/C2UtilTest.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <util/_C2MacroUtils.h>
-
-/** \file
- * Tests for vndk/util.
- */
-
-/* --------------------------------------- _C2MacroUtils --------------------------------------- */
-
-static_assert(0 == _C2_ARGC(), "should be 0");
-static_assert(1 == _C2_ARGC(1), "should be 1");
-static_assert(2 == _C2_ARGC(1, 2), "should be 2");
-static_assert(64 == _C2_ARGC(
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
- 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
- 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64), "should be 64");
-
-static_assert(0 == _C2_ARGC(,), "should be 0");
-static_assert(1 == _C2_ARGC(1,), "should be 1");
-static_assert(2 == _C2_ARGC(1, 2,), "should be 2");
-static_assert(64 == _C2_ARGC(
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
- 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
- 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,), "should be 64");
-
diff --git a/media/libstagefright/codec2/vndk/include/util/C2ParamUtils.h b/media/libstagefright/codec2/vndk/include/util/C2ParamUtils.h
deleted file mode 100644
index edae303..0000000
--- a/media/libstagefright/codec2/vndk/include/util/C2ParamUtils.h
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef C2UTILS_PARAM_UTILS_H_
-#define C2UTILS_PARAM_UTILS_H_
-
-#include <C2Param.h>
-#include <util/_C2MacroUtils.h>
-
-#include <iostream>
-
-/** \file
- * Utilities for parameter handling to be used by Codec2 implementations.
- */
-
-namespace android {
-
-/// \cond INTERNAL
-
-/* ---------------------------- UTILITIES FOR ENUMERATION REFLECTION ---------------------------- */
-
-/**
- * Utility class that allows ignoring enum value assignment (e.g. both '(_C2EnumConst)kValue = x'
- * and '(_C2EnumConst)kValue' will eval to kValue.
- */
-template<typename T>
-class _C2EnumConst {
-public:
- // implicit conversion from T
- inline _C2EnumConst(T value) : _mValue(value) {}
- // implicit conversion to T
- inline operator T() { return _mValue; }
- // implicit conversion to C2Value::Primitive
- inline operator C2Value::Primitive() { return (T)_mValue; }
- // ignore assignment and return T here to avoid implicit conversion to T later
- inline T &operator =(T value __unused) { return _mValue; }
-private:
- T _mValue;
-};
-
-/// mapper to get name of enum
-/// \note this will contain any initialization, which we will remove when converting to lower-case
-#define _C2_GET_ENUM_NAME(x, y) #x
-/// mapper to get value of enum
-#define _C2_GET_ENUM_VALUE(x, type) (_C2EnumConst<type>)x
-
-/// \endcond
-
-#define DEFINE_C2_ENUM_VALUE_AUTO_HELPER(name, type, prefix, ...) \
-template<> C2FieldDescriptor::named_values_type C2FieldDescriptor::namedValuesFor(const name &r __unused) { \
- return C2ParamUtils::sanitizeEnumValues( \
- std::vector<C2Value::Primitive> { _C2_MAP(_C2_GET_ENUM_VALUE, type, __VA_ARGS__) }, \
- { _C2_MAP(_C2_GET_ENUM_NAME, type, __VA_ARGS__) }, \
- prefix); \
-}
-
-#define DEFINE_C2_ENUM_VALUE_CUSTOM_HELPER(name, type, names, ...) \
-template<> C2FieldDescriptor::named_values_type C2FieldDescriptor::namedValuesFor(const name &r __unused) { \
- return C2ParamUtils::customEnumValues( \
- std::vector<std::pair<C2StringLiteral, name>> names); \
-}
-
-
-class C2ParamUtils {
-private:
- static size_t countLeadingUnderscores(C2StringLiteral a) {
- size_t i = 0;
- while (a[i] == '_') {
- ++i;
- }
- return i;
- }
-
- static size_t countMatching(C2StringLiteral a, const C2String &b) {
- for (size_t i = 0; i < b.size(); ++i) {
- if (!a[i] || a[i] != b[i]) {
- return i;
- }
- }
- return b.size();
- }
-
- // ABCDef => abc-def
- // ABCD2ef => abcd2-ef // 0
- // ABCD2Ef => ancd2-ef // -1
- // AbcDef => abc-def // -1
- // Abc2Def => abc-2def
- // Abc2def => abc-2-def
- // _Yo => _yo
- // _yo => _yo
- // C2_yo => c2-yo
- // C2__yo => c2-yo
-
- static C2String camelCaseToDashed(C2String name) {
- enum {
- kNone = '.',
- kLower = 'a',
- kUpper = 'A',
- kDigit = '1',
- kDash = '-',
- kUnderscore = '_',
- } type = kNone;
- size_t word_start = 0;
- for (size_t ix = 0; ix < name.size(); ++ix) {
- /* std::cout << name.substr(0, word_start) << "|"
- << name.substr(word_start, ix - word_start) << "["
- << name.substr(ix, 1) << "]" << name.substr(ix + 1)
- << ": " << (char)type << std::endl; */
- if (isupper(name[ix])) {
- if (type == kLower) {
- name.insert(ix++, 1, '-');
- word_start = ix;
- }
- name[ix] = tolower(name[ix]);
- type = kUpper;
- } else if (islower(name[ix])) {
- if (type == kDigit && ix > 0) {
- name.insert(ix++, 1, '-');
- word_start = ix;
- } else if (type == kUpper && ix > word_start + 1) {
- name.insert(ix++ - 1, 1, '-');
- word_start = ix - 1;
- }
- type = kLower;
- } else if (isdigit(name[ix])) {
- if (type == kLower) {
- name.insert(ix++, 1, '-');
- word_start = ix;
- }
- type = kDigit;
- } else if (name[ix] == '_') {
- if (type == kDash) {
- name.erase(ix--, 1);
- } else if (type != kNone && type != kUnderscore) {
- name[ix] = '-';
- type = kDash;
- word_start = ix + 1;
- } else {
- type = kUnderscore;
- word_start = ix + 1;
- }
- } else {
- name.resize(ix);
- }
- }
- // std::cout << "=> " << name << std::endl;
- return name;
- }
-
- static std::vector<C2String> sanitizeEnumValueNames(
- const std::vector<C2StringLiteral> names,
- C2StringLiteral _prefix = NULL) {
- std::vector<C2String> sanitizedNames;
- C2String prefix;
- size_t extraUnderscores = 0;
- bool first = true;
- if (_prefix) {
- extraUnderscores = countLeadingUnderscores(_prefix);
- prefix = _prefix + extraUnderscores;
- first = false;
- // std::cout << "prefix:" << prefix << ", underscores:" << extraUnderscores << std::endl;
- }
-
- // calculate prefix and minimum leading underscores
- for (C2StringLiteral s : names) {
- // std::cout << s << std::endl;
- size_t underscores = countLeadingUnderscores(s);
- if (first) {
- extraUnderscores = underscores;
- prefix = s + underscores;
- first = false;
- } else {
- size_t matching = countMatching(
- s + underscores,
- prefix);
- prefix.resize(matching);
- extraUnderscores = std::min(underscores, extraUnderscores);
- }
- // std::cout << "prefix:" << prefix << ", underscores:" << extraUnderscores << std::endl;
- if (prefix.size() == 0 && extraUnderscores == 0) {
- break;
- }
- }
-
- // we swallow the first underscore after upper case prefixes
- bool upperCasePrefix = true;
- for (size_t i = 0; i < prefix.size(); ++i) {
- if (islower(prefix[i])) {
- upperCasePrefix = false;
- break;
- }
- }
-
- for (C2StringLiteral s : names) {
- size_t underscores = countLeadingUnderscores(s);
- C2String sanitized = C2String(s, underscores - extraUnderscores);
- sanitized.append(s + prefix.size() + underscores +
- (upperCasePrefix && s[prefix.size() + underscores] == '_'));
- sanitizedNames.push_back(camelCaseToDashed(sanitized));
- }
-
- for (C2String s : sanitizedNames) {
- std::cout << s << std::endl;
- }
-
- return sanitizedNames;
- }
-
- friend class C2ParamTest_ParamUtilsTest_Test;
-
-public:
- static std::vector<C2String> getEnumValuesFromString(C2StringLiteral value) {
- std::vector<C2String> foundNames;
- size_t pos = 0, len = strlen(value);
- do {
- size_t endPos = strcspn(value + pos, " ,=") + pos;
- if (endPos > pos) {
- foundNames.emplace_back(value + pos, endPos - pos);
- }
- if (value[endPos] && value[endPos] != ',') {
- endPos += strcspn(value + endPos, ",");
- }
- pos = strspn(value + endPos, " ,") + endPos;
- } while (pos < len);
- return foundNames;
- }
-
- template<typename T>
- static C2FieldDescriptor::named_values_type sanitizeEnumValues(
- std::vector<T> values,
- std::vector<C2StringLiteral> names,
- C2StringLiteral prefix = NULL) {
- C2FieldDescriptor::named_values_type namedValues;
- std::vector<C2String> sanitizedNames = sanitizeEnumValueNames(names, prefix);
- for (size_t i = 0; i < values.size() && i < sanitizedNames.size(); ++i) {
- namedValues.emplace_back(sanitizedNames[i], values[i]);
- }
- return namedValues;
- }
-
- template<typename E>
- static C2FieldDescriptor::named_values_type customEnumValues(
- std::vector<std::pair<C2StringLiteral, E>> items) {
- C2FieldDescriptor::named_values_type namedValues;
- for (auto &item : items) {
- namedValues.emplace_back(item.first, item.second);
- }
- return namedValues;
- }
-};
-
-/* ---------------------------- UTILITIES FOR PARAMETER REFLECTION ---------------------------- */
-
-/* ======================== UTILITY TEMPLATES FOR PARAMETER REFLECTION ======================== */
-
-#if 1
-template<typename... Params>
-class C2_HIDE _C2Tuple { };
-
-C2_HIDE
-void addC2Params(std::list<const C2FieldDescriptor> &, _C2Tuple<> *) {
-}
-
-template<typename T, typename... Params>
-C2_HIDE
-void addC2Params(std::list<const C2FieldDescriptor> &fields, _C2Tuple<T, Params...> *)
-{
- //C2Param::index_t index = T::baseIndex;
- //(void)index;
- fields.insert(fields.end(), T::fieldList);
- addC2Params(fields, (_C2Tuple<Params...> *)nullptr);
-}
-
-template<typename... Params>
-C2_HIDE
-std::list<const C2FieldDescriptor> describeC2Params() {
- std::list<const C2FieldDescriptor> fields;
- addC2Params(fields, (_C2Tuple<Params...> *)nullptr);
- return fields;
-}
-
-#endif
-
-/* ---------------------------- UTILITIES FOR ENUMERATION REFLECTION ---------------------------- */
-
-} // namespace android
-
-#endif // C2UTILS_PARAM_UTILS_H_
-
diff --git a/media/libstagefright/codec2/vndk/include/util/_C2MacroUtils.h b/media/libstagefright/codec2/vndk/include/util/_C2MacroUtils.h
deleted file mode 100644
index 04e9ba5..0000000
--- a/media/libstagefright/codec2/vndk/include/util/_C2MacroUtils.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef C2UTILS_MACRO_UTILS_H_
-#define C2UTILS_MACRO_UTILS_H_
-
-/** \file
- * Macro utilities for the utils library used by Codec2 implementations.
- */
-
-/// \if 0
-
-/* --------------------------------- VARIABLE ARGUMENT COUNTING --------------------------------- */
-
-// remove empty arguments - _C2_ARG() expands to '', while _C2_ARG(x) expands to ', x'
-// _C2_ARGn(...) does the same for n arguments
-#define _C2_ARG(...) , ##__VA_ARGS__
-#define _C2_ARG2(_1, _2) _C2_ARG(_1) _C2_ARG(_2)
-#define _C2_ARG4(_1, _2, _3, _4) _C2_ARG2(_1, _2) _C2_ARG2(_3, _4)
-#define _C2_ARG8(_1, _2, _3, _4, _5, _6, _7, _8) _C2_ARG4(_1, _2, _3, _4) _C2_ARG4(_5, _6, _7, _8)
-#define _C2_ARG16(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16) \
- _C2_ARG8(_1, _2, _3, _4, _5, _6, _7, _8) _C2_ARG8(_9, _10, _11, _12, _13, _14, _15, _16)
-
-// return the 65th argument
-#define _C2_ARGC_3(_, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, \
- _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, \
- _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, \
- _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, ...) _64
-
-/// \endif
-
-/**
- * Returns the number of arguments.
- */
-// We do this by prepending 1 and appending 65 designed values such that the 65th element
-// will be the number of arguments.
-#define _C2_ARGC(...) _C2_ARGC_1(0, ##__VA_ARGS__, \
- 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, \
- 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, \
- 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
-
-/// \if 0
-
-// step 1. remove empty arguments - this is needed to allow trailing comma in enum definitions
-// (NOTE: we don't know which argument will have this trailing comma so we have to try all)
-#define _C2_ARGC_1(_, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, \
- _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, \
- _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, \
- _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, ...) \
- _C2_ARGC_2(_ _C2_ARG(_0) \
- _C2_ARG16(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16) \
- _C2_ARG16(_17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32) \
- _C2_ARG16(_33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48) \
- _C2_ARG16(_49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64), \
- ##__VA_ARGS__)
-
-// step 2. this is needed as removed arguments cannot be passed directly as empty into a macro
-#define _C2_ARGC_2(...) _C2_ARGC_3(__VA_ARGS__)
-
-/// \endif
-
-/* -------------------------------- VARIABLE ARGUMENT CONVERSION -------------------------------- */
-
-/// \if 0
-
-// macros that convert _1, _2, _3, ... to fn(_1, arg), fn(_2, arg), fn(_3, arg), ...
-#define _C2_MAP_64(fn, arg, head, ...) fn(head, arg), _C2_MAP_63(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_63(fn, arg, head, ...) fn(head, arg), _C2_MAP_62(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_62(fn, arg, head, ...) fn(head, arg), _C2_MAP_61(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_61(fn, arg, head, ...) fn(head, arg), _C2_MAP_60(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_60(fn, arg, head, ...) fn(head, arg), _C2_MAP_59(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_59(fn, arg, head, ...) fn(head, arg), _C2_MAP_58(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_58(fn, arg, head, ...) fn(head, arg), _C2_MAP_57(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_57(fn, arg, head, ...) fn(head, arg), _C2_MAP_56(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_56(fn, arg, head, ...) fn(head, arg), _C2_MAP_55(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_55(fn, arg, head, ...) fn(head, arg), _C2_MAP_54(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_54(fn, arg, head, ...) fn(head, arg), _C2_MAP_53(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_53(fn, arg, head, ...) fn(head, arg), _C2_MAP_52(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_52(fn, arg, head, ...) fn(head, arg), _C2_MAP_51(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_51(fn, arg, head, ...) fn(head, arg), _C2_MAP_50(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_50(fn, arg, head, ...) fn(head, arg), _C2_MAP_49(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_49(fn, arg, head, ...) fn(head, arg), _C2_MAP_48(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_48(fn, arg, head, ...) fn(head, arg), _C2_MAP_47(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_47(fn, arg, head, ...) fn(head, arg), _C2_MAP_46(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_46(fn, arg, head, ...) fn(head, arg), _C2_MAP_45(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_45(fn, arg, head, ...) fn(head, arg), _C2_MAP_44(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_44(fn, arg, head, ...) fn(head, arg), _C2_MAP_43(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_43(fn, arg, head, ...) fn(head, arg), _C2_MAP_42(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_42(fn, arg, head, ...) fn(head, arg), _C2_MAP_41(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_41(fn, arg, head, ...) fn(head, arg), _C2_MAP_40(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_40(fn, arg, head, ...) fn(head, arg), _C2_MAP_39(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_39(fn, arg, head, ...) fn(head, arg), _C2_MAP_38(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_38(fn, arg, head, ...) fn(head, arg), _C2_MAP_37(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_37(fn, arg, head, ...) fn(head, arg), _C2_MAP_36(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_36(fn, arg, head, ...) fn(head, arg), _C2_MAP_35(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_35(fn, arg, head, ...) fn(head, arg), _C2_MAP_34(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_34(fn, arg, head, ...) fn(head, arg), _C2_MAP_33(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_33(fn, arg, head, ...) fn(head, arg), _C2_MAP_32(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_32(fn, arg, head, ...) fn(head, arg), _C2_MAP_31(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_31(fn, arg, head, ...) fn(head, arg), _C2_MAP_30(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_30(fn, arg, head, ...) fn(head, arg), _C2_MAP_29(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_29(fn, arg, head, ...) fn(head, arg), _C2_MAP_28(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_28(fn, arg, head, ...) fn(head, arg), _C2_MAP_27(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_27(fn, arg, head, ...) fn(head, arg), _C2_MAP_26(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_26(fn, arg, head, ...) fn(head, arg), _C2_MAP_25(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_25(fn, arg, head, ...) fn(head, arg), _C2_MAP_24(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_24(fn, arg, head, ...) fn(head, arg), _C2_MAP_23(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_23(fn, arg, head, ...) fn(head, arg), _C2_MAP_22(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_22(fn, arg, head, ...) fn(head, arg), _C2_MAP_21(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_21(fn, arg, head, ...) fn(head, arg), _C2_MAP_20(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_20(fn, arg, head, ...) fn(head, arg), _C2_MAP_19(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_19(fn, arg, head, ...) fn(head, arg), _C2_MAP_18(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_18(fn, arg, head, ...) fn(head, arg), _C2_MAP_17(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_17(fn, arg, head, ...) fn(head, arg), _C2_MAP_16(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_16(fn, arg, head, ...) fn(head, arg), _C2_MAP_15(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_15(fn, arg, head, ...) fn(head, arg), _C2_MAP_14(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_14(fn, arg, head, ...) fn(head, arg), _C2_MAP_13(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_13(fn, arg, head, ...) fn(head, arg), _C2_MAP_12(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_12(fn, arg, head, ...) fn(head, arg), _C2_MAP_11(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_11(fn, arg, head, ...) fn(head, arg), _C2_MAP_10(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_10(fn, arg, head, ...) fn(head, arg), _C2_MAP_9(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_9(fn, arg, head, ...) fn(head, arg), _C2_MAP_8(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_8(fn, arg, head, ...) fn(head, arg), _C2_MAP_7(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_7(fn, arg, head, ...) fn(head, arg), _C2_MAP_6(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_6(fn, arg, head, ...) fn(head, arg), _C2_MAP_5(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_5(fn, arg, head, ...) fn(head, arg), _C2_MAP_4(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_4(fn, arg, head, ...) fn(head, arg), _C2_MAP_3(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_3(fn, arg, head, ...) fn(head, arg), _C2_MAP_2(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_2(fn, arg, head, ...) fn(head, arg), _C2_MAP_1(fn, arg, ##__VA_ARGS__)
-#define _C2_MAP_1(fn, arg, head, ...) fn(head, arg)
-
-/// \endif
-
-/**
- * Maps each argument using another macro x -> fn(x, arg)
- */
-// use wrapper to call the proper mapper based on the number of arguments
-#define _C2_MAP(fn, arg, ...) _C2_MAP__(_C2_ARGC(__VA_ARGS__), fn, arg, ##__VA_ARGS__)
-
-/// \if 0
-
-// evaluate _n so it becomes a number
-#define _C2_MAP__(_n, fn, arg, ...) _C2_MAP_(_n, fn, arg, __VA_ARGS__)
-// call the proper mapper
-#define _C2_MAP_(_n, fn, arg, ...) _C2_MAP_##_n (fn, arg, __VA_ARGS__)
-
-/// \endif
-
-#endif // C2UTILS_MACRO_UTILS_H_
diff --git a/media/libstagefright/codecs/aacdec/Android.bp b/media/libstagefright/codecs/aacdec/Android.bp
index 21c00a1..7352854 100644
--- a/media/libstagefright/codecs/aacdec/Android.bp
+++ b/media/libstagefright/codecs/aacdec/Android.bp
@@ -17,6 +17,8 @@
cflags: ["-Werror"],
+ version_script: "exports.lds",
+
sanitize: {
misc_undefined: [
"signed-integer-overflow",
diff --git a/media/libstagefright/codecs/aacdec/DrcPresModeWrap.cpp b/media/libstagefright/codecs/aacdec/DrcPresModeWrap.cpp
index 129ad65..95d3724 100644
--- a/media/libstagefright/codecs/aacdec/DrcPresModeWrap.cpp
+++ b/media/libstagefright/codecs/aacdec/DrcPresModeWrap.cpp
@@ -24,7 +24,7 @@
//#define DRC_PRES_MODE_WRAP_DEBUG
#define GPM_ENCODER_TARGET_LEVEL 64
-#define MAX_TARGET_LEVEL 64
+#define MAX_TARGET_LEVEL 40
CDrcPresModeWrapper::CDrcPresModeWrapper()
{
@@ -164,7 +164,7 @@
if (mDataUpdate) {
// sanity check
if (mDesTarget < MAX_TARGET_LEVEL){
- mDesTarget = MAX_TARGET_LEVEL; // limit target level to -16 dB or below
+ mDesTarget = MAX_TARGET_LEVEL; // limit target level to -10 dB or below
newTarget = MAX_TARGET_LEVEL;
}
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index e0c0c32..bc0a69f 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -36,6 +36,7 @@
#define DRC_DEFAULT_MOBILE_DRC_CUT 127 /* maximum compression of dynamic range for mobile conf */
#define DRC_DEFAULT_MOBILE_DRC_BOOST 127 /* maximum compression of dynamic range for mobile conf */
#define DRC_DEFAULT_MOBILE_DRC_HEAVY 1 /* switch for heavy compression for mobile conf */
+#define DRC_DEFAULT_MOBILE_DRC_EFFECT 3 /* MPEG-D DRC effect type; 3 => Limited playback range */
#define DRC_DEFAULT_MOBILE_ENC_LEVEL (-1) /* encoder target level; -1 => the value is unknown, otherwise dB step value (e.g. 64 for -16 dB) */
#define MAX_CHANNEL_COUNT 8 /* maximum number of audio channels that can be decoded */
// names of properties that can be used to override the default DRC settings
@@ -44,6 +45,7 @@
#define PROP_DRC_OVERRIDE_BOOST "aac_drc_boost"
#define PROP_DRC_OVERRIDE_HEAVY "aac_drc_heavy"
#define PROP_DRC_OVERRIDE_ENC_LEVEL "aac_drc_enc_target_level"
+#define PROP_DRC_OVERRIDE_EFFECT "ro.aac_drc_effect_type"
namespace android {
@@ -63,6 +65,7 @@
OMX_AUDIO_AACObjectLD,
OMX_AUDIO_AACObjectELD,
OMX_AUDIO_AACObjectER_Scalable,
+ OMX_AUDIO_AACObjectXHE,
};
SoftAAC2::SoftAAC2(
@@ -207,6 +210,15 @@
} else {
mDrcWrap.setParam(DRC_PRES_MODE_WRAP_ENCODER_TARGET, DRC_DEFAULT_MOBILE_ENC_LEVEL);
}
+ // AAC_UNIDRC_SET_EFFECT
+ int32_t effectType =
+ property_get_int32(PROP_DRC_OVERRIDE_EFFECT, DRC_DEFAULT_MOBILE_DRC_EFFECT);
+ if (effectType < -1 || effectType > 8) {
+ effectType = DRC_DEFAULT_MOBILE_DRC_EFFECT;
+ }
+ ALOGV("AAC decoder using MPEG-D DRC effect type %d (default=%d)",
+ effectType, DRC_DEFAULT_MOBILE_DRC_EFFECT);
+ aacDecoder_SetParam(mAACDecoder, AAC_UNIDRC_SET_EFFECT, effectType);
// By default, the decoder creates a 5.1 channel downmix signal.
// For seven and eight channel input streams, enable 6.1 and 7.1 channel output
@@ -414,10 +426,10 @@
return OMX_ErrorNone;
}
- case OMX_IndexParamAudioAndroidAacPresentation:
+ case OMX_IndexParamAudioAndroidAacDrcPresentation:
{
- const OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE *aacPresParams =
- (const OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE *)params;
+ const OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE *aacPresParams =
+ (const OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE *)params;
if (!isValidOMXParam(aacPresParams)) {
return OMX_ErrorBadParameter;
@@ -443,6 +455,10 @@
ALOGV("set nMaxOutputChannels=%d", max);
aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, max);
}
+ if (aacPresParams->nDrcEffectType >= -1) {
+ ALOGV("set nDrcEffectType=%d", aacPresParams->nDrcEffectType);
+ aacDecoder_SetParam(mAACDecoder, AAC_UNIDRC_SET_EFFECT, aacPresParams->nDrcEffectType);
+ }
bool updateDrcWrapper = false;
if (aacPresParams->nDrcBoost >= 0) {
ALOGV("set nDrcBoost=%d", aacPresParams->nDrcBoost);
diff --git a/media/libstagefright/codecs/aacdec/exports.lds b/media/libstagefright/codecs/aacdec/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/aacdec/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/aacenc/Android.bp b/media/libstagefright/codecs/aacenc/Android.bp
index d734b9c..9342351 100644
--- a/media/libstagefright/codecs/aacenc/Android.bp
+++ b/media/libstagefright/codecs/aacenc/Android.bp
@@ -14,6 +14,8 @@
cflags: ["-Werror"],
+ version_script: "exports.lds",
+
sanitize: {
misc_undefined: [
"signed-integer-overflow",
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder.h b/media/libstagefright/codecs/aacenc/SoftAACEncoder.h
deleted file mode 100644
index e64c1b7..0000000
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef SOFT_AAC_ENCODER_H_
-
-#define SOFT_AAC_ENCODER_H_
-
-#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
-
-struct VO_AUDIO_CODECAPI;
-struct VO_MEM_OPERATOR;
-
-namespace android {
-
-struct SoftAACEncoder : public SimpleSoftOMXComponent {
- SoftAACEncoder(
- const char *name,
- const OMX_CALLBACKTYPE *callbacks,
- OMX_PTR appData,
- OMX_COMPONENTTYPE **component);
-
-protected:
- virtual ~SoftAACEncoder();
-
- virtual OMX_ERRORTYPE internalGetParameter(
- OMX_INDEXTYPE index, OMX_PTR params);
-
- virtual OMX_ERRORTYPE internalSetParameter(
- OMX_INDEXTYPE index, const OMX_PTR params);
-
- virtual void onQueueFilled(OMX_U32 portIndex);
-
- virtual void onReset();
-
-private:
- enum {
- kNumBuffers = 4,
- kNumSamplesPerFrame = 1024,
- };
-
- void *mEncoderHandle;
- VO_AUDIO_CODECAPI *mApiHandle;
- VO_MEM_OPERATOR *mMemOperator;
-
- OMX_U32 mNumChannels;
- OMX_U32 mSampleRate;
- OMX_U32 mBitRate;
-
- bool mSentCodecSpecificData;
- size_t mInputSize;
- int16_t *mInputFrame;
- int64_t mInputTimeUs;
-
- bool mSawInputEOS;
-
- uint8_t mAudioSpecificConfigData[2];
-
- bool mSignalledError;
-
- void initPorts();
- status_t initEncoder();
-
- status_t setAudioSpecificConfigData();
- status_t setAudioParams();
-
- DISALLOW_EVIL_CONSTRUCTORS(SoftAACEncoder);
-};
-
-} // namespace android
-
-#endif // SOFT_AAC_ENCODER_H_
diff --git a/media/libstagefright/codecs/aacenc/exports.lds b/media/libstagefright/codecs/aacenc/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/aacenc/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/amrnb/dec/Android.bp b/media/libstagefright/codecs/amrnb/dec/Android.bp
index b493e21..880f161 100644
--- a/media/libstagefright/codecs/amrnb/dec/Android.bp
+++ b/media/libstagefright/codecs/amrnb/dec/Android.bp
@@ -41,7 +41,7 @@
],
include_dirs: ["frameworks/av/media/libstagefright/include"],
- local_include_dirs: ["src"],
+ export_include_dirs: ["src"],
cflags: [
"-DOSCL_UNUSED_ARG(x)=(void)(x)",
@@ -50,6 +50,8 @@
"-Werror",
],
+ version_script: "exports.lds",
+
//sanitize: {
// misc_undefined: [
// "signed-integer-overflow",
@@ -85,6 +87,8 @@
"-Werror",
],
+ version_script: "exports.lds",
+
//sanitize: {
// misc_undefined: [
// "signed-integer-overflow",
@@ -106,7 +110,6 @@
],
compile_multilib: "32",
}
-
//###############################################################################
cc_test {
name: "libstagefright_amrnbdec_test",
diff --git a/media/libstagefright/codecs/amrnb/dec/exports.lds b/media/libstagefright/codecs/amrnb/dec/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/amrnb/dec/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/amrnb/enc/Android.bp b/media/libstagefright/codecs/amrnb/enc/Android.bp
index 1e8fd31..19fd4a8 100644
--- a/media/libstagefright/codecs/amrnb/enc/Android.bp
+++ b/media/libstagefright/codecs/amrnb/enc/Android.bp
@@ -63,13 +63,15 @@
],
include_dirs: ["frameworks/av/media/libstagefright/include"],
- local_include_dirs: ["src"],
+ export_include_dirs: ["src"],
cflags: [
"-DOSCL_UNUSED_ARG(x)=(void)(x)",
"-Werror",
],
+ version_script: "exports.lds",
+
//addressing b/25409744
//sanitize: {
// misc_undefined: [
diff --git a/media/libstagefright/codecs/amrnb/enc/exports.lds b/media/libstagefright/codecs/amrnb/enc/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/amrnb/enc/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/amrwb/Android.bp b/media/libstagefright/codecs/amrwb/Android.bp
index 14a73d6..9fefd81 100644
--- a/media/libstagefright/codecs/amrwb/Android.bp
+++ b/media/libstagefright/codecs/amrwb/Android.bp
@@ -45,8 +45,11 @@
],
include_dirs: ["frameworks/av/media/libstagefright/include"],
- local_include_dirs: ["src"],
- export_include_dirs: ["include"],
+
+ export_include_dirs: [
+ "src",
+ "include",
+ ],
cflags: [
"-DOSCL_UNUSED_ARG(x)=(void)(x)",
diff --git a/media/libstagefright/codecs/amrwbenc/Android.bp b/media/libstagefright/codecs/amrwbenc/Android.bp
index b6f637f..ebe08c6 100644
--- a/media/libstagefright/codecs/amrwbenc/Android.bp
+++ b/media/libstagefright/codecs/amrwbenc/Android.bp
@@ -159,6 +159,8 @@
cflags: ["-Werror"],
+ version_script: "exports.lds",
+
sanitize: {
misc_undefined: [
"signed-integer-overflow",
diff --git a/media/libstagefright/codecs/amrwbenc/exports.lds b/media/libstagefright/codecs/amrwbenc/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/amrwbenc/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/avcdec/Android.bp b/media/libstagefright/codecs/avcdec/Android.bp
index 34db19b..cf50a04 100644
--- a/media/libstagefright/codecs/avcdec/Android.bp
+++ b/media/libstagefright/codecs/avcdec/Android.bp
@@ -11,9 +11,10 @@
cflags: [
"-Wall",
"-Werror",
- "-Wno-unused-variable",
],
+ version_script: "exports.lds",
+
include_dirs: [
"external/libavc/decoder",
"external/libavc/common",
@@ -22,7 +23,6 @@
],
shared_libs: [
- "libmedia_omx",
"libstagefright_omx",
"libstagefright_foundation",
"libutils",
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
index c342b6c..3924fc2 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
@@ -171,7 +171,7 @@
status_t SoftAVC::resetPlugin() {
mIsInFlush = false;
mReceivedEOS = false;
- mInputOffset = 0;
+
memset(mTimeStamps, 0, sizeof(mTimeStamps));
memset(mTimeStampsValid, 0, sizeof(mTimeStampsValid));
@@ -274,10 +274,6 @@
status = ivdec_api_function(mCodecCtx, (void *)&s_create_ip, (void *)&s_create_op);
- mCodecCtx = (iv_obj_t*)s_create_op.s_ivd_create_op_t.pv_handle;
- mCodecCtx->pv_fxns = dec_fxns;
- mCodecCtx->u4_size = sizeof(iv_obj_t);
-
if (status != IV_SUCCESS) {
ALOGE("Error in create: 0x%x",
s_create_op.s_ivd_create_op_t.u4_error_code);
@@ -285,6 +281,10 @@
mCodecCtx = NULL;
return UNKNOWN_ERROR;
}
+
+ mCodecCtx = (iv_obj_t*)s_create_op.s_ivd_create_op_t.pv_handle;
+ mCodecCtx->pv_fxns = dec_fxns;
+ mCodecCtx->u4_size = sizeof(iv_obj_t);
}
/* Reset the plugin state */
@@ -304,7 +304,6 @@
}
status_t SoftAVC::deInitDecoder() {
- size_t i;
IV_API_CALL_STATUS_T status;
if (mCodecCtx) {
@@ -334,6 +333,7 @@
SoftVideoDecoderOMXComponent::onReset();
mSignalledError = false;
+ mInputOffset = 0;
resetDecoder();
resetPlugin();
}
@@ -450,7 +450,6 @@
ivd_video_decode_ip_t s_dec_ip;
ivd_video_decode_op_t s_dec_op;
IV_API_CALL_STATUS_T status;
- size_t sizeY, sizeUV;
setDecodeArgs(&s_dec_ip, &s_dec_op, NULL, NULL, 0);
@@ -465,7 +464,8 @@
free(mFlushOutBuffer);
mFlushOutBuffer = NULL;
}
-
+ } else {
+ mInputOffset = 0;
}
}
@@ -530,7 +530,7 @@
notifyEmptyBufferDone(inHeader);
if (!(inHeader->nFlags & OMX_BUFFERFLAG_EOS)) {
- continue;
+ return;
}
mReceivedEOS = true;
@@ -561,7 +561,6 @@
ivd_video_decode_ip_t s_dec_ip;
ivd_video_decode_op_t s_dec_op;
nsecs_t timeDelay, timeTaken;
- size_t sizeY, sizeUV;
if (!setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx)) {
ALOGE("Decoder arg setup failed");
diff --git a/media/libstagefright/codecs/avcdec/exports.lds b/media/libstagefright/codecs/avcdec/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/avcdec/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/avcenc/Android.bp b/media/libstagefright/codecs/avcenc/Android.bp
index 5203126..cefe77c 100644
--- a/media/libstagefright/codecs/avcenc/Android.bp
+++ b/media/libstagefright/codecs/avcenc/Android.bp
@@ -17,7 +17,7 @@
],
shared_libs: [
- "libmedia_omx",
+ "libstagefright_foundation",
"libstagefright_omx",
"libutils",
"liblog",
@@ -39,5 +39,8 @@
"-Wno-unused-variable",
],
ldflags: ["-Wl,-Bsymbolic"],
+
+ version_script: "exports.lds",
+
compile_multilib: "32",
}
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
index 358c743..379d41e 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
@@ -26,7 +26,6 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaData.h>
#include <OMX_IndexExt.h>
#include <OMX_VideoExt.h>
@@ -1170,6 +1169,12 @@
ps_inp_raw_buf->e_color_fmt = mIvVideoColorFormat;
source = NULL;
if ((inputBufferHeader != NULL) && inputBufferHeader->nFilledLen) {
+ OMX_ERRORTYPE error = validateInputBuffer(inputBufferHeader);
+ if (error != OMX_ErrorNone) {
+ ALOGE("b/69065651");
+ android_errorWriteLog(0x534e4554, "69065651");
+ return error;
+ }
source = inputBufferHeader->pBuffer + inputBufferHeader->nOffset;
if (mInputDataIsMeta) {
diff --git a/media/libstagefright/codecs/avcenc/exports.lds b/media/libstagefright/codecs/avcenc/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/avcenc/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/flac/dec/Android.bp b/media/libstagefright/codecs/flac/dec/Android.bp
index 595cfdb..9af086b 100644
--- a/media/libstagefright/codecs/flac/dec/Android.bp
+++ b/media/libstagefright/codecs/flac/dec/Android.bp
@@ -18,6 +18,8 @@
cflags: ["-Werror"],
+ version_script: "exports.lds",
+
sanitize: {
misc_undefined: [
"signed-integer-overflow",
diff --git a/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.cpp b/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.cpp
index 4ab1ab2..2c0f224 100644
--- a/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.cpp
+++ b/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.cpp
@@ -45,9 +45,11 @@
OMX_COMPONENTTYPE **component)
: SimpleSoftOMXComponent(name, callbacks, appData, component),
mFLACDecoder(NULL),
- mHasStreamInfo(false),
mInputBufferCount(0),
+ mHasStreamInfo(false),
mSignalledError(false),
+ mSawInputEOS(false),
+ mFinishedDecoder(false),
mOutputPortSettingsChange(NONE) {
ALOGV("ctor:");
memset(&mStreamInfo, 0, sizeof(mStreamInfo));
@@ -57,6 +59,7 @@
SoftFlacDecoder::~SoftFlacDecoder() {
ALOGV("dtor:");
+ delete mFLACDecoder;
}
void SoftFlacDecoder::initPorts() {
@@ -291,7 +294,6 @@
}
void SoftFlacDecoder::onQueueFilled(OMX_U32 /* portIndex */) {
- ALOGV("onQueueFilled:");
if (mSignalledError || mOutputPortSettingsChange != NONE) {
return;
}
@@ -299,96 +301,114 @@
List<BufferInfo *> &inQueue = getPortQueue(0);
List<BufferInfo *> &outQueue = getPortQueue(1);
- while (!inQueue.empty() && !outQueue.empty()) {
- BufferInfo *inInfo = *inQueue.begin();
- OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+ ALOGV("onQueueFilled %d/%d:", inQueue.empty(), outQueue.empty());
+ while ((!inQueue.empty() || mSawInputEOS) && !outQueue.empty() && !mFinishedDecoder) {
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
- uint8_t* inBuffer = inHeader->pBuffer + inHeader->nOffset;
- uint32_t inBufferLength = inHeader->nFilledLen;
- bool endOfInput = (inHeader->nFlags & OMX_BUFFERFLAG_EOS) != 0;
+ short *outBuffer = reinterpret_cast<short *>(outHeader->pBuffer + outHeader->nOffset);
+ size_t outBufferSize = outHeader->nAllocLen - outHeader->nOffset;
+ int64_t timeStamp = 0;
- if (inHeader->nFilledLen == 0) {
- if (endOfInput) {
- outHeader->nFilledLen = 0;
- outHeader->nFlags = OMX_BUFFERFLAG_EOS;
- outInfo->mOwnedByUs = false;
- outQueue.erase(outQueue.begin());
- notifyFillBufferDone(outHeader);
- } else {
- ALOGE("onQueueFilled: emptyInputBuffer received");
+ if (!inQueue.empty()) {
+ BufferInfo *inInfo = *inQueue.begin();
+ OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+ uint8_t* inBuffer = inHeader->pBuffer + inHeader->nOffset;
+ uint32_t inBufferLength = inHeader->nFilledLen;
+ ALOGV("input: %u bytes", inBufferLength);
+ if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ ALOGV("saw EOS");
+ mSawInputEOS = true;
+ if (mInputBufferCount == 0 && inHeader->nFilledLen == 0) {
+ // first buffer was empty and EOS: signal EOS on output and return
+ ALOGV("empty first EOS");
+ outHeader->nFilledLen = 0;
+ outHeader->nTimeStamp = inHeader->nTimeStamp;
+ outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+ outInfo->mOwnedByUs = false;
+ outQueue.erase(outQueue.begin());
+ notifyFillBufferDone(outHeader);
+ mFinishedDecoder = true;
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ notifyEmptyBufferDone(inHeader);
+ return;
+ }
}
- inInfo->mOwnedByUs = false;
- inQueue.erase(inQueue.begin());
- notifyEmptyBufferDone(inHeader);
- return;
- }
- if (mInputBufferCount == 0 && !(inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG)) {
- ALOGE("onQueueFilled: first buffer should have OMX_BUFFERFLAG_CODECCONFIG set");
- inHeader->nFlags |= OMX_BUFFERFLAG_CODECCONFIG;
- }
- if ((inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) != 0) {
- status_t decoderErr = mFLACDecoder->parseMetadata(inBuffer, inBufferLength);
- mInputBufferCount++;
- if (decoderErr != OK && decoderErr != WOULD_BLOCK) {
- ALOGE("onQueueFilled: FLACDecoder parseMetaData returns error %d", decoderErr);
+ if (mInputBufferCount == 0 && !(inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG)) {
+ ALOGE("onQueueFilled: first buffer should have OMX_BUFFERFLAG_CODECCONFIG set");
+ inHeader->nFlags |= OMX_BUFFERFLAG_CODECCONFIG;
+ }
+ if ((inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) != 0) {
+ ALOGV("received config buffer of size %u", inBufferLength);
+ status_t decoderErr = mFLACDecoder->parseMetadata(inBuffer, inBufferLength);
+ mInputBufferCount++;
+
+ if (decoderErr != OK && decoderErr != WOULD_BLOCK) {
+ ALOGE("onQueueFilled: FLACDecoder parseMetaData returns error %d", decoderErr);
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorStreamCorrupt, decoderErr, NULL);
+ return;
+ }
+
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ notifyEmptyBufferDone(inHeader);
+
+ if (decoderErr == WOULD_BLOCK) {
+ continue;
+ }
+ mStreamInfo = mFLACDecoder->getStreamInfo();
+ mHasStreamInfo = true;
+
+ // Only send out port settings changed event if both sample rate
+ // and numChannels are valid.
+ if (mStreamInfo.sample_rate && mStreamInfo.channels) {
+ ALOGD("onQueueFilled: initially configuring decoder: %d Hz, %d channels",
+ mStreamInfo.sample_rate, mStreamInfo.channels);
+
+ notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
+ mOutputPortSettingsChange = AWAITING_DISABLED;
+ }
+ return;
+ }
+
+ status_t decoderErr = mFLACDecoder->decodeOneFrame(
+ inBuffer, inBufferLength, outBuffer, &outBufferSize);
+ if (decoderErr != OK) {
+ ALOGE("onQueueFilled: FLACDecoder decodeOneFrame returns error %d", decoderErr);
mSignalledError = true;
notify(OMX_EventError, OMX_ErrorStreamCorrupt, decoderErr, NULL);
return;
}
+ mInputBufferCount++;
+ timeStamp = inHeader->nTimeStamp;
inInfo->mOwnedByUs = false;
inQueue.erase(inQueue.begin());
notifyEmptyBufferDone(inHeader);
- if (decoderErr == WOULD_BLOCK) {
+ if (outBufferSize == 0) {
+ ALOGV("no output, trying again");
continue;
}
- mStreamInfo = mFLACDecoder->getStreamInfo();
- mHasStreamInfo = true;
-
- // Only send out port settings changed event if both sample rate
- // and numChannels are valid.
- if (mStreamInfo.sample_rate && mStreamInfo.channels) {
- ALOGD("onQueueFilled: initially configuring decoder: %d Hz, %d channels",
- mStreamInfo.sample_rate, mStreamInfo.channels);
-
- notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
- mOutputPortSettingsChange = AWAITING_DISABLED;
+ } else if (mSawInputEOS) {
+ status_t decoderErr = mFLACDecoder->decodeOneFrame(NULL, 0, outBuffer, &outBufferSize);
+ mFinishedDecoder = true;
+ if (decoderErr != OK) {
+ ALOGE("onQueueFilled: FLACDecoder finish returns error %d", decoderErr);
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorStreamCorrupt, decoderErr, NULL);
+ return;
}
- return;
- }
-
- short *outBuffer =
- reinterpret_cast<short *>(outHeader->pBuffer + outHeader->nOffset);
- size_t outBufferSize = outHeader->nAllocLen - outHeader->nOffset;
-
- status_t decoderErr = mFLACDecoder->decodeOneFrame(
- inBuffer, inBufferLength, outBuffer, &outBufferSize);
- if (decoderErr != OK) {
- ALOGE("onQueueFilled: FLACDecoder decodeOneFrame returns error %d", decoderErr);
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorStreamCorrupt, decoderErr, NULL);
- return;
- }
-
- mInputBufferCount++;
- int64_t ts = inHeader->nTimeStamp;
- inInfo->mOwnedByUs = false;
- inQueue.erase(inQueue.begin());
- notifyEmptyBufferDone(inHeader);
-
- if (endOfInput) {
outHeader->nFlags = OMX_BUFFERFLAG_EOS;
- } else if (outBufferSize == 0) {
- continue;
} else {
- outHeader->nFlags = 0;
+ // no more input buffers at this time, loop and see if there is more output
+ continue;
}
outHeader->nFilledLen = outBufferSize;
- outHeader->nTimeStamp = ts;
+ outHeader->nTimeStamp = timeStamp;
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
@@ -405,9 +425,12 @@
void SoftFlacDecoder::drainDecoder() {
mFLACDecoder->flush();
+ mSawInputEOS = false;
+ mFinishedDecoder = false;
}
void SoftFlacDecoder::onReset() {
+ ALOGV("onReset");
drainDecoder();
memset(&mStreamInfo, 0, sizeof(mStreamInfo));
diff --git a/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.h b/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.h
index 4a21c34..b63f7ad 100644
--- a/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.h
+++ b/media/libstagefright/codecs/flac/dec/SoftFlacDecoder.h
@@ -50,11 +50,13 @@
kNumOutputBuffers = 4,
};
- sp<FLACDecoder> mFLACDecoder;
+ FLACDecoder *mFLACDecoder;
FLAC__StreamMetadata_StreamInfo mStreamInfo;
- bool mHasStreamInfo;
size_t mInputBufferCount;
+ bool mHasStreamInfo;
bool mSignalledError;
+ bool mSawInputEOS;
+ bool mFinishedDecoder;
enum {
NONE,
diff --git a/media/libstagefright/codecs/flac/dec/exports.lds b/media/libstagefright/codecs/flac/dec/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/flac/dec/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/flac/enc/Android.bp b/media/libstagefright/codecs/flac/enc/Android.bp
index 066917b..46b974d 100644
--- a/media/libstagefright/codecs/flac/enc/Android.bp
+++ b/media/libstagefright/codecs/flac/enc/Android.bp
@@ -10,6 +10,8 @@
cflags: ["-Werror"],
+ version_script: "exports.lds",
+
sanitize: {
misc_undefined: [
"signed-integer-overflow",
@@ -22,7 +24,6 @@
},
shared_libs: [
- "libmedia_omx",
"libstagefright_omx",
"libstagefright_foundation",
"libutils",
diff --git a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
index 56d2d69..fdc8975 100644
--- a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
+++ b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
@@ -56,12 +56,13 @@
mCompressionLevel(FLAC_COMPRESSION_LEVEL_DEFAULT),
mEncoderWriteData(false),
mEncoderReturnedEncodedData(false),
+ mSawInputEOS(false),
+ mSentOutputEOS(false),
mEncoderReturnedNbBytes(0),
- mInputBufferPcm32(NULL)
-#ifdef WRITE_FLAC_HEADER_IN_FIRST_BUFFER
- , mHeaderOffset(0)
- , mWroteHeader(false)
-#endif
+ mInputBufferPcm32(NULL),
+ mHeaderOffset(0),
+ mHeaderComplete(false),
+ mWroteHeader(false)
{
ALOGV("SoftFlacEncoder::SoftFlacEncoder(name=%s)", name);
initPorts();
@@ -354,55 +355,55 @@
List<BufferInfo *> &inQueue = getPortQueue(0);
List<BufferInfo *> &outQueue = getPortQueue(1);
- while (!inQueue.empty() && !outQueue.empty()) {
- BufferInfo *inInfo = *inQueue.begin();
- OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+ FLAC__bool ok = true;
+
+ while ((!inQueue.empty() || mSawInputEOS) && !outQueue.empty() && !mSentOutputEOS) {
+ if (!inQueue.empty()) {
+ BufferInfo *inInfo = *inQueue.begin();
+ OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+ if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ ALOGV("saw EOS on buffer of size %u", inHeader->nFilledLen);
+ mSawInputEOS = true;
+ }
+
+ if (inHeader->nFilledLen > kMaxInputBufferSize) {
+ ALOGE("input buffer too large (%d).", inHeader->nFilledLen);
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+
+ assert(mNumChannels != 0);
+ mEncoderWriteData = true;
+ mEncoderReturnedEncodedData = false;
+ mEncoderReturnedNbBytes = 0;
+ mCurrentInputTimeStamp = inHeader->nTimeStamp;
+
+ const unsigned nbInputFrames = inHeader->nFilledLen / (2 * mNumChannels);
+ const unsigned nbInputSamples = inHeader->nFilledLen / 2;
+ const OMX_S16 * const pcm16 = reinterpret_cast<OMX_S16 *>(inHeader->pBuffer);
+
+ CHECK_LE(nbInputSamples, 2 * kMaxNumSamplesPerFrame);
+ for (unsigned i=0 ; i < nbInputSamples ; i++) {
+ mInputBufferPcm32[i] = (FLAC__int32) pcm16[i];
+ }
+ ALOGV(" about to encode %u samples per channel", nbInputFrames);
+ ok = FLAC__stream_encoder_process_interleaved(
+ mFlacStreamEncoder,
+ mInputBufferPcm32,
+ nbInputFrames /*samples per channel*/ );
+
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+ }
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
- if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
- inQueue.erase(inQueue.begin());
- inInfo->mOwnedByUs = false;
- notifyEmptyBufferDone(inHeader);
-
- outHeader->nFilledLen = 0;
- outHeader->nFlags = OMX_BUFFERFLAG_EOS;
-
- outQueue.erase(outQueue.begin());
- outInfo->mOwnedByUs = false;
- notifyFillBufferDone(outHeader);
-
- return;
- }
-
- if (inHeader->nFilledLen > kMaxInputBufferSize) {
- ALOGE("input buffer too large (%d).", inHeader->nFilledLen);
- mSignalledError = true;
- notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
- return;
- }
-
- assert(mNumChannels != 0);
- mEncoderWriteData = true;
- mEncoderReturnedEncodedData = false;
- mEncoderReturnedNbBytes = 0;
- mCurrentInputTimeStamp = inHeader->nTimeStamp;
-
- const unsigned nbInputFrames = inHeader->nFilledLen / (2 * mNumChannels);
- const unsigned nbInputSamples = inHeader->nFilledLen / 2;
- const OMX_S16 * const pcm16 = reinterpret_cast<OMX_S16 *>(inHeader->pBuffer);
-
- CHECK_LE(nbInputSamples, 2 * kMaxNumSamplesPerFrame);
- for (unsigned i=0 ; i < nbInputSamples ; i++) {
- mInputBufferPcm32[i] = (FLAC__int32) pcm16[i];
- }
- ALOGV(" about to encode %u samples per channel", nbInputFrames);
- FLAC__bool ok = FLAC__stream_encoder_process_interleaved(
- mFlacStreamEncoder,
- mInputBufferPcm32,
- nbInputFrames /*samples per channel*/ );
-
if (ok) {
if (mEncoderReturnedEncodedData && (mEncoderReturnedNbBytes != 0)) {
ALOGV(" dequeueing buffer on output port after writing data");
@@ -414,6 +415,21 @@
mEncoderReturnedEncodedData = false;
} else {
ALOGV(" encoder process_interleaved returned without data to write");
+ if (mSawInputEOS) {
+ ALOGV("finishing encoder");
+ mSentOutputEOS = true;
+ FLAC__stream_encoder_finish(mFlacStreamEncoder);
+ if (mEncoderReturnedEncodedData && (mEncoderReturnedNbBytes != 0)) {
+ ALOGV(" dequeueing residual buffer on output port after writing data");
+ outInfo->mOwnedByUs = false;
+ outQueue.erase(outQueue.begin());
+ outInfo = NULL;
+ outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+ notifyFillBufferDone(outHeader);
+ outHeader = NULL;
+ mEncoderReturnedEncodedData = false;
+ }
+ }
}
} else {
ALOGE(" error encountered during encoding");
@@ -422,11 +438,6 @@
return;
}
- inInfo->mOwnedByUs = false;
- inQueue.erase(inQueue.begin());
- inInfo = NULL;
- notifyEmptyBufferDone(inHeader);
- inHeader = NULL;
}
}
@@ -438,16 +449,22 @@
ALOGV("SoftFlacEncoder::onEncodedFlacAvailable(bytes=%zu, samples=%u, curr_frame=%u)",
bytes, samples, current_frame);
-#ifdef WRITE_FLAC_HEADER_IN_FIRST_BUFFER
if (samples == 0) {
- ALOGI(" saving %zu bytes of header", bytes);
- memcpy(mHeader + mHeaderOffset, buffer, bytes);
- mHeaderOffset += bytes;// will contain header size when finished receiving header
+ ALOGV("saving %zu bytes of header", bytes);
+ if (mHeaderOffset + bytes > sizeof(mHeader) || mHeaderComplete) {
+ ALOGW("header is too big, or header already received");
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ } else {
+ memcpy(mHeader + mHeaderOffset, buffer, bytes);
+ mHeaderOffset += bytes;// will contain header size when finished receiving header
+ if (buffer[0] & 0x80) {
+ mHeaderComplete = true;
+ }
+ }
return FLAC__STREAM_ENCODER_WRITE_STATUS_OK;
}
-#endif
-
if ((samples == 0) || !mEncoderWriteData) {
// called by the encoder because there's header data to save, but it's not the role
// of this component (unless WRITE_FLAC_HEADER_IN_FIRST_BUFFER is defined)
@@ -460,16 +477,23 @@
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
-#ifdef WRITE_FLAC_HEADER_IN_FIRST_BUFFER
- if (!mWroteHeader) {
- ALOGI(" writing %d bytes of header on output port", mHeaderOffset);
+ if (mHeaderComplete && !mWroteHeader) {
+ ALOGV(" writing %d bytes of header on output port", mHeaderOffset);
memcpy(outHeader->pBuffer + outHeader->nOffset + outHeader->nFilledLen,
mHeader, mHeaderOffset);
outHeader->nFilledLen += mHeaderOffset;
- outHeader->nOffset += mHeaderOffset;
mWroteHeader = true;
+ outInfo->mOwnedByUs = false;
+ outQueue.erase(outQueue.begin());
+ outHeader->nFlags = OMX_BUFFERFLAG_CODECCONFIG;
+ notifyFillBufferDone(outHeader);
+ outInfo = NULL;
+ outHeader = NULL;
+ // get the next buffer for the rest of the data
+ CHECK(!outQueue.empty());
+ outInfo = *outQueue.begin();
+ outHeader = outInfo->mHeader;
}
-#endif
// write encoded data
ALOGV(" writing %zu bytes of encoded data on output port", bytes);
diff --git a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.h b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.h
index f4f0655..64a6b1e 100644
--- a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.h
+++ b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.h
@@ -22,10 +22,6 @@
#include "FLAC/stream_encoder.h"
-// use this symbol to have the first output buffer start with FLAC frame header so a dump of
-// all the output buffers can be opened as a .flac file
-//#define WRITE_FLAC_HEADER_IN_FIRST_BUFFER
-
namespace android {
struct SoftFlacEncoder : public SimpleSoftOMXComponent {
@@ -62,6 +58,8 @@
// should the data received by the callback be written to the output port
bool mEncoderWriteData;
bool mEncoderReturnedEncodedData;
+ bool mSawInputEOS;
+ bool mSentOutputEOS;
size_t mEncoderReturnedNbBytes;
OMX_TICKS mCurrentInputTimeStamp;
@@ -85,11 +83,10 @@
// before passing the input data to the encoder
FLAC__int32* mInputBufferPcm32;
-#ifdef WRITE_FLAC_HEADER_IN_FIRST_BUFFER
unsigned mHeaderOffset;
+ bool mHeaderComplete;
bool mWroteHeader;
char mHeader[128];
-#endif
DISALLOW_EVIL_CONSTRUCTORS(SoftFlacEncoder);
};
diff --git a/media/libstagefright/codecs/flac/enc/exports.lds b/media/libstagefright/codecs/flac/enc/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/flac/enc/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/g711/dec/Android.bp b/media/libstagefright/codecs/g711/dec/Android.bp
index fff72a8..3d97d8c 100644
--- a/media/libstagefright/codecs/g711/dec/Android.bp
+++ b/media/libstagefright/codecs/g711/dec/Android.bp
@@ -13,7 +13,7 @@
],
shared_libs: [
- "libmedia_omx",
+ "libstagefright_foundation",
"libstagefright_omx",
"libutils",
"liblog",
@@ -21,6 +21,8 @@
cflags: ["-Werror"],
+ version_script: "exports.lds",
+
sanitize: {
misc_undefined: [
"signed-integer-overflow",
diff --git a/media/libstagefright/codecs/g711/dec/exports.lds b/media/libstagefright/codecs/g711/dec/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/g711/dec/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/gsm/dec/Android.bp b/media/libstagefright/codecs/gsm/dec/Android.bp
index 753eeef..1c3208b 100644
--- a/media/libstagefright/codecs/gsm/dec/Android.bp
+++ b/media/libstagefright/codecs/gsm/dec/Android.bp
@@ -15,6 +15,8 @@
cflags: ["-Werror"],
+ version_script: "exports.lds",
+
sanitize: {
misc_undefined: [
"signed-integer-overflow",
@@ -27,7 +29,7 @@
},
shared_libs: [
- "libmedia_omx",
+ "libstagefright_foundation",
"libstagefright_omx",
"libutils",
"liblog",
diff --git a/media/libstagefright/codecs/gsm/dec/exports.lds b/media/libstagefright/codecs/gsm/dec/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/gsm/dec/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/hevcdec/Android.bp b/media/libstagefright/codecs/hevcdec/Android.bp
index d9a5ee3..45920e6 100644
--- a/media/libstagefright/codecs/hevcdec/Android.bp
+++ b/media/libstagefright/codecs/hevcdec/Android.bp
@@ -14,6 +14,8 @@
"-Wno-unused-variable",
],
+ version_script: "exports.lds",
+
include_dirs: [
"external/libhevc/decoder",
"external/libhevc/common",
@@ -32,7 +34,6 @@
},
shared_libs: [
- "libmedia_omx",
"libstagefright_omx",
"libstagefright_foundation",
"libutils",
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
index 2745087..bb7d361 100644
--- a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
@@ -48,7 +48,8 @@
(IVD_CONTROL_API_COMMAND_TYPE_T)IHEVCD_CXA_CMD_CTL_SET_NUM_CORES
static const CodecProfileLevel kProfileLevels[] = {
- { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel51 },
+ { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel51 },
+ { OMX_VIDEO_HEVCProfileMainStill, OMX_VIDEO_HEVCMainTierLevel51 },
};
SoftHEVC::SoftHEVC(
@@ -312,10 +313,6 @@
status = ivdec_api_function(mCodecCtx, (void *)&s_create_ip, (void *)&s_create_op);
- mCodecCtx = (iv_obj_t*)s_create_op.s_ivd_create_op_t.pv_handle;
- mCodecCtx->pv_fxns = dec_fxns;
- mCodecCtx->u4_size = sizeof(iv_obj_t);
-
if (status != IV_SUCCESS) {
ALOGE("Error in create: 0x%x",
s_create_op.s_ivd_create_op_t.u4_error_code);
@@ -323,6 +320,10 @@
mCodecCtx = NULL;
return UNKNOWN_ERROR;
}
+
+ mCodecCtx = (iv_obj_t*)s_create_op.s_ivd_create_op_t.pv_handle;
+ mCodecCtx->pv_fxns = dec_fxns;
+ mCodecCtx->u4_size = sizeof(iv_obj_t);
}
/* Reset the plugin state */
diff --git a/media/libstagefright/codecs/hevcdec/exports.lds b/media/libstagefright/codecs/hevcdec/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/hevcdec/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/m4v_h263/dec/Android.bp b/media/libstagefright/codecs/m4v_h263/dec/Android.bp
index 1216ae5..ca70cc2 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/Android.bp
+++ b/media/libstagefright/codecs/m4v_h263/dec/Android.bp
@@ -53,6 +53,8 @@
"-Werror",
],
+ version_script: "exports.lds",
+
sanitize: {
misc_undefined: [
"signed-integer-overflow",
@@ -93,7 +95,6 @@
static_libs: ["libstagefright_m4vh263dec"],
shared_libs: [
- "libmedia_omx",
"libstagefright_omx",
"libstagefright_foundation",
"libutils",
diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
index 39b67ab..fda7028 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
@@ -353,7 +353,8 @@
bool portWillReset = false;
const bool fakeStride = true;
SoftVideoDecoderOMXComponent::handlePortSettingsChange(
- &portWillReset, buf_width, buf_height, cropSettingsMode, fakeStride);
+ &portWillReset, buf_width, buf_height,
+ OMX_COLOR_FormatYUV420Planar, cropSettingsMode, fakeStride);
if (portWillReset) {
if (mMode == MODE_H263) {
PVCleanUpVideoDecoder(mHandle);
diff --git a/media/libstagefright/codecs/m4v_h263/dec/exports.lds b/media/libstagefright/codecs/m4v_h263/dec/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/m4v_h263/dec/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.bp b/media/libstagefright/codecs/m4v_h263/enc/Android.bp
index a95b807..6be4036 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/Android.bp
+++ b/media/libstagefright/codecs/m4v_h263/enc/Android.bp
@@ -4,8 +4,7 @@
srcs: [
"src/bitstream_io.cpp",
- "src/combined_encode.cpp",
- "src/datapart_encode.cpp",
+ "src/combined_encode.cpp", "src/datapart_encode.cpp",
"src/dct.cpp",
"src/findhalfpel.cpp",
"src/fastcodemb.cpp",
@@ -31,6 +30,8 @@
"-Werror",
],
+ version_script: "exports.lds",
+
include_dirs: [
"frameworks/av/media/libstagefright/include",
"frameworks/native/include/media/openmax",
@@ -80,7 +81,7 @@
static_libs: ["libstagefright_m4vh263enc"],
shared_libs: [
- "libmedia_omx",
+ "libstagefright_foundation",
"libstagefright_omx",
"libutils",
"liblog",
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
index 7b90a01..f6a7b0e 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
@@ -434,6 +434,14 @@
}
if (inHeader->nFilledLen > 0) {
+ OMX_ERRORTYPE error = validateInputBuffer(inHeader);
+ if (error != OMX_ErrorNone) {
+ ALOGE("b/69065651");
+ android_errorWriteLog(0x534e4554, "69065651");
+ mSignalledError = true;
+ notify(OMX_EventError, error, 0, 0);
+ return;
+ }
const uint8_t *inputData = NULL;
if (mInputDataIsMeta) {
inputData =
diff --git a/media/libstagefright/codecs/m4v_h263/enc/exports.lds b/media/libstagefright/codecs/m4v_h263/enc/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/m4v_h263/enc/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/mp3dec/Android.bp b/media/libstagefright/codecs/mp3dec/Android.bp
index c554a99..9fa9a4c 100644
--- a/media/libstagefright/codecs/mp3dec/Android.bp
+++ b/media/libstagefright/codecs/mp3dec/Android.bp
@@ -65,8 +65,11 @@
},
include_dirs: ["frameworks/av/media/libstagefright/include"],
- local_include_dirs: ["src"],
- export_include_dirs: ["include"],
+
+ export_include_dirs: [
+ "include",
+ "src",
+ ],
cflags: [
"-DOSCL_UNUSED_ARG(x)=(void)(x)",
@@ -96,6 +99,8 @@
cflags: ["-Werror"],
+ version_script: "exports.lds",
+
sanitize: {
misc_undefined: [
"signed-integer-overflow",
@@ -107,7 +112,6 @@
},
shared_libs: [
- "libmedia_omx",
"libstagefright_omx",
"libstagefright_foundation",
"libutils",
diff --git a/media/libstagefright/codecs/mp3dec/exports.lds b/media/libstagefright/codecs/mp3dec/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/mp3dec/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/mp3dec/src/pvmp3_decode_header.cpp b/media/libstagefright/codecs/mp3dec/src/pvmp3_decode_header.cpp
index d443b7c..bc5fd79 100644
--- a/media/libstagefright/codecs/mp3dec/src/pvmp3_decode_header.cpp
+++ b/media/libstagefright/codecs/mp3dec/src/pvmp3_decode_header.cpp
@@ -184,7 +184,7 @@
info->emphasis = (temp << 30) >> 30; /* 2 */
- if (!info->bitrate_index || info->sampling_frequency == 3)
+ if (!info->bitrate_index || info->bitrate_index == 15 || info->sampling_frequency == 3)
{
err = UNSUPPORTED_FREE_BITRATE;
}
diff --git a/media/libstagefright/codecs/mpeg2dec/Android.bp b/media/libstagefright/codecs/mpeg2dec/Android.bp
index 9590e9f..fb0db8f 100644
--- a/media/libstagefright/codecs/mpeg2dec/Android.bp
+++ b/media/libstagefright/codecs/mpeg2dec/Android.bp
@@ -14,6 +14,8 @@
"-Wno-unused-variable",
],
+ version_script: "exports.lds",
+
include_dirs: [
"external/libmpeg2/decoder",
"external/libmpeg2/common",
@@ -22,7 +24,6 @@
],
shared_libs: [
- "libmedia_omx",
"libstagefright_omx",
"libstagefright_foundation",
"libutils",
diff --git a/media/libstagefright/codecs/mpeg2dec/exports.lds b/media/libstagefright/codecs/mpeg2dec/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/mpeg2dec/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/on2/dec/Android.bp b/media/libstagefright/codecs/on2/dec/Android.bp
index 59c1f5d..8a9399a 100644
--- a/media/libstagefright/codecs/on2/dec/Android.bp
+++ b/media/libstagefright/codecs/on2/dec/Android.bp
@@ -15,7 +15,6 @@
static_libs: ["libvpx"],
shared_libs: [
- "libmedia_omx",
"libstagefright_omx",
"libstagefright_foundation",
"libutils",
@@ -24,6 +23,8 @@
cflags: ["-Werror"],
+ version_script: "exports.lds",
+
sanitize: {
misc_undefined: [
"signed-integer-overflow",
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 3490008..8d5f3e7 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -30,7 +30,9 @@
// Only need to declare the highest supported profile and level here.
static const CodecProfileLevel kVP9ProfileLevels[] = {
- { OMX_VIDEO_VP9Profile0, OMX_VIDEO_VP9Level5 },
+ { OMX_VIDEO_VP9Profile0, OMX_VIDEO_VP9Level5 },
+ { OMX_VIDEO_VP9Profile2, OMX_VIDEO_VP9Level5 },
+ { OMX_VIDEO_VP9Profile2HDR, OMX_VIDEO_VP9Level5 },
};
SoftVPX::SoftVPX(
@@ -78,6 +80,10 @@
return cpuCoreCount;
}
+bool SoftVPX::supportDescribeHdrStaticInfo() {
+ return true;
+}
+
status_t SoftVPX::initDecoder() {
mCtx = new vpx_codec_ctx_t;
vpx_codec_err_t vpx_err;
@@ -146,15 +152,21 @@
uint32_t height = mImg->d_h;
outInfo = *outQueue.begin();
outHeader = outInfo->mHeader;
- CHECK_EQ(mImg->fmt, VPX_IMG_FMT_I420);
- handlePortSettingsChange(portWillReset, width, height);
+ CHECK(mImg->fmt == VPX_IMG_FMT_I420 || mImg->fmt == VPX_IMG_FMT_I42016);
+ OMX_COLOR_FORMATTYPE outputColorFormat = OMX_COLOR_FormatYUV420Planar;
+ int32_t bpp = 1;
+ if (mImg->fmt == VPX_IMG_FMT_I42016) {
+ outputColorFormat = OMX_COLOR_FormatYUV420Planar16;
+ bpp = 2;
+ }
+ handlePortSettingsChange(portWillReset, width, height, outputColorFormat);
if (*portWillReset) {
return true;
}
outHeader->nOffset = 0;
outHeader->nFlags = 0;
- outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
+ outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * bpp * 3) / 2;
outHeader->nTimeStamp = *(OMX_TICKS *)mImg->user_priv;
if (outputBufferSafe(outHeader)) {
uint8_t *dst = outHeader->pBuffer;
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.h b/media/libstagefright/codecs/on2/dec/SoftVPX.h
index d6bb902..b62b526 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.h
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.h
@@ -40,10 +40,11 @@
virtual void onQueueFilled(OMX_U32 portIndex);
virtual void onPortFlushCompleted(OMX_U32 portIndex);
virtual void onReset();
+ virtual bool supportDescribeHdrStaticInfo();
private:
enum {
- kNumBuffers = 4
+ kNumBuffers = 10
};
enum {
diff --git a/media/libstagefright/codecs/on2/dec/exports.lds b/media/libstagefright/codecs/on2/dec/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/on2/dec/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/on2/enc/Android.bp b/media/libstagefright/codecs/on2/enc/Android.bp
index 5a52225..3d9feeb 100644
--- a/media/libstagefright/codecs/on2/enc/Android.bp
+++ b/media/libstagefright/codecs/on2/enc/Android.bp
@@ -13,6 +13,8 @@
cflags: ["-Wall", "-Werror"],
+ version_script: "exports.lds",
+
include_dirs: [
"frameworks/av/media/libstagefright/include",
"frameworks/native/include/media/openmax",
@@ -32,7 +34,6 @@
static_libs: ["libvpx"],
shared_libs: [
- "libmedia_omx",
"libstagefright_omx",
"libstagefright_foundation",
"libutils",
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.cpp
index 4c7290d..1ea1c85 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.cpp
@@ -69,6 +69,13 @@
codecReturn);
return codecReturn;
}
+ codecReturn = vpx_codec_control(mCodecContext, VP9E_SET_ROW_MT, 1);
+ if (codecReturn != VPX_CODEC_OK) {
+ ALOGE("Error setting VP9E_SET_ROW_MT to 1. vpx_codec_control() "
+ "returned %d", codecReturn);
+ return codecReturn;
+ }
+
// For VP9, we always set CPU_USED to 8 (because the realtime default is 0
// which is too slow).
codecReturn = vpx_codec_control(mCodecContext, VP8E_SET_CPUUSED, 8);
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index a5666da..f6257b1 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -653,6 +653,13 @@
return;
}
+ OMX_ERRORTYPE error = validateInputBuffer(inputBufferHeader);
+ if (error != OMX_ErrorNone) {
+ ALOGE("b/27569635");
+ android_errorWriteLog(0x534e4554, "27569635");
+ notify(OMX_EventError, error, 0, 0);
+ return;
+ }
const uint8_t *source =
inputBufferHeader->pBuffer + inputBufferHeader->nOffset;
@@ -668,14 +675,6 @@
return;
}
} else {
- if (inputBufferHeader->nFilledLen < frameSize) {
- android_errorWriteLog(0x534e4554, "27569635");
- notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
- return;
- } else if (inputBufferHeader->nFilledLen > frameSize) {
- ALOGW("Input buffer contains too many pixels");
- }
-
if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
ConvertYUV420SemiPlanarToYUV420Planar(
source, mConversionBuffer, mWidth, mHeight);
diff --git a/media/libstagefright/codecs/on2/enc/exports.lds b/media/libstagefright/codecs/on2/enc/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/opus/dec/Android.bp b/media/libstagefright/codecs/opus/dec/Android.bp
index 88d6ec4..43318f2 100644
--- a/media/libstagefright/codecs/opus/dec/Android.bp
+++ b/media/libstagefright/codecs/opus/dec/Android.bp
@@ -14,7 +14,6 @@
shared_libs: [
"libopus",
- "libmedia_omx",
"libstagefright_omx",
"libstagefright_foundation",
"libutils",
@@ -23,6 +22,8 @@
cflags: ["-Werror"],
+ version_script: "exports.lds",
+
sanitize: {
misc_undefined: [
"signed-integer-overflow",
diff --git a/media/libstagefright/codecs/opus/dec/exports.lds b/media/libstagefright/codecs/opus/dec/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/opus/dec/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/raw/Android.bp b/media/libstagefright/codecs/raw/Android.bp
index f21d46f..c8d7d00 100644
--- a/media/libstagefright/codecs/raw/Android.bp
+++ b/media/libstagefright/codecs/raw/Android.bp
@@ -14,6 +14,8 @@
cflags: ["-Werror"],
+ version_script: "exports.lds",
+
sanitize: {
misc_undefined: [
"signed-integer-overflow",
diff --git a/media/libstagefright/codecs/raw/exports.lds b/media/libstagefright/codecs/raw/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/raw/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/vorbis/dec/Android.bp b/media/libstagefright/codecs/vorbis/dec/Android.bp
index 628b36c..a9265cb 100644
--- a/media/libstagefright/codecs/vorbis/dec/Android.bp
+++ b/media/libstagefright/codecs/vorbis/dec/Android.bp
@@ -14,7 +14,6 @@
shared_libs: [
"libvorbisidec",
- "libmedia_omx",
"libstagefright_omx",
"libstagefright_foundation",
"libutils",
@@ -23,6 +22,8 @@
cflags: ["-Werror"],
+ version_script: "exports.lds",
+
sanitize: {
misc_undefined: [
"signed-integer-overflow",
diff --git a/media/libstagefright/codecs/vorbis/dec/exports.lds b/media/libstagefright/codecs/vorbis/dec/exports.lds
new file mode 100644
index 0000000..e24f3fa
--- /dev/null
+++ b/media/libstagefright/codecs/vorbis/dec/exports.lds
@@ -0,0 +1,5 @@
+{
+ global:
+ _Z22createSoftOMXComponentPKcPK16OMX_CALLBACKTYPEPvPP17OMX_COMPONENTTYPE;
+ local: *;
+};
diff --git a/media/libstagefright/codecs/xaacdec/Android.bp b/media/libstagefright/codecs/xaacdec/Android.bp
new file mode 100644
index 0000000..7392f1e
--- /dev/null
+++ b/media/libstagefright/codecs/xaacdec/Android.bp
@@ -0,0 +1,36 @@
+cc_library_shared {
+ name: "libstagefright_soft_xaacdec",
+ vendor_available: true,
+
+ srcs: [
+ "SoftXAAC.cpp",
+ ],
+
+ include_dirs: [
+ "frameworks/av/media/libstagefright/include",
+ "frameworks/native/include/media/openmax",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-DENABLE_MPEG_D_DRC"
+ ],
+
+ sanitize: {
+ // integer_overflow: true,
+ misc_undefined: [ "signed-integer-overflow", "unsigned-integer-overflow", ],
+ cfi: true,
+ },
+
+ static_libs: ["libxaacdec"],
+
+ shared_libs: [
+ "libstagefright_omx",
+ "libstagefright_foundation",
+ "libutils",
+ "libcutils",
+ "liblog",
+ ],
+
+ compile_multilib: "32",
+}
diff --git a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
new file mode 100644
index 0000000..e0d9662
--- /dev/null
+++ b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
@@ -0,0 +1,1928 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftXAAC"
+#include <utils/Log.h>
+
+#include "SoftXAAC.h"
+
+#include <OMX_AudioExt.h>
+#include <OMX_IndexExt.h>
+#include <cutils/properties.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaErrors.h>
+#include <utils/misc.h>
+#include <math.h>
+
+#define DRC_DEFAULT_MOBILE_REF_LEVEL 64 /* 64*-0.25dB = -16 dB below full scale for mobile conf */
+#define DRC_DEFAULT_MOBILE_DRC_CUT 127 /* maximum compression of dynamic range for mobile conf */
+#define DRC_DEFAULT_MOBILE_DRC_BOOST 127 /* maximum compression of dynamic range for mobile conf */
+#define DRC_DEFAULT_MOBILE_DRC_HEAVY 1 /* switch for heavy compression for mobile conf */
+#define DRC_DEFAULT_MOBILE_ENC_LEVEL (-1) /* encoder target level; -1 => the value is unknown, otherwise dB step value (e.g. 64 for -16 dB) */
+#define DRC_KEY_AAC_DRC_EFFECT_TYPE (3) /* Default Effect type is "Limited playback" */
+/* REF_LEVEL of 64 pairs well with EFFECT_TYPE of 3. */
+#define DRC_DEFAULT_MOBILE_LOUDNESS_LEVEL (64) /* Default loudness value for MPEG-D DRC */
+
+#define PROP_DRC_OVERRIDE_REF_LEVEL "aac_drc_reference_level"
+#define PROP_DRC_OVERRIDE_CUT "aac_drc_cut"
+#define PROP_DRC_OVERRIDE_BOOST "aac_drc_boost"
+#define PROP_DRC_OVERRIDE_HEAVY "aac_drc_heavy"
+#define PROP_DRC_OVERRIDE_ENC_LEVEL "aac_drc_enc_target_level"
+#define PROP_DRC_OVERRIDE_EFFECT_TYPE "ro.aac_drc_effect_type"
+
+#define MAX_CHANNEL_COUNT 8 /* maximum number of audio channels that can be decoded */
+
+
+#define RETURN_IF_FATAL(retval, str) \
+ if (retval & IA_FATAL_ERROR) { \
+ ALOGE("Error in %s: Returned: %d", str, retval); \
+ return retval; \
+ } else if (retval != IA_NO_ERROR) { \
+ ALOGW("Warning in %s: Returned: %d", str, retval); \
+ }
+
+namespace android {
+
+template<class T>
+static void InitOMXParams(T *params) {
+ params->nSize = sizeof(T);
+ params->nVersion.s.nVersionMajor = 1;
+ params->nVersion.s.nVersionMinor = 0;
+ params->nVersion.s.nRevision = 0;
+ params->nVersion.s.nStep = 0;
+}
+
+static const OMX_U32 kSupportedProfiles[] = {
+ OMX_AUDIO_AACObjectLC,
+ OMX_AUDIO_AACObjectHE,
+ OMX_AUDIO_AACObjectHE_PS,
+ OMX_AUDIO_AACObjectLD,
+ OMX_AUDIO_AACObjectELD,
+};
+
+SoftXAAC::SoftXAAC(
+ const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component)
+ : SimpleSoftOMXComponent(name, callbacks, appData, component),
+ mIsADTS(false),
+ mInputBufferCount(0),
+ mOutputBufferCount(0),
+ mSignalledError(false),
+ mLastInHeader(NULL),
+ mPrevTimestamp(0),
+ mCurrentTimestamp(0),
+ mOutputPortSettingsChange(NONE),
+ mXheaacCodecHandle(NULL),
+ mMpegDDrcHandle(NULL),
+ mInputBufferSize(0),
+ mOutputFrameLength(1024),
+ mInputBuffer(NULL),
+ mOutputBuffer(NULL),
+ mSampFreq(0),
+ mNumChannels(0),
+ mPcmWdSz(0),
+ mChannelMask(0),
+ mIsCodecInitialized(false),
+ mIsCodecConfigFlushRequired(false),
+ mMpegDDRCPresent(0),
+ mDRCFlag(0)
+
+{
+ initPorts();
+ CHECK_EQ(initDecoder(), (status_t)OK);
+}
+
+SoftXAAC::~SoftXAAC() {
+ int errCode = deInitXAACDecoder();
+ if (0 != errCode) {
+ ALOGE("deInitXAACDecoder() failed %d",errCode);
+ }
+
+ mIsCodecInitialized = false;
+ mIsCodecConfigFlushRequired = false;
+}
+
+void SoftXAAC::initPorts() {
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+
+ def.nPortIndex = 0;
+ def.eDir = OMX_DirInput;
+ def.nBufferCountMin = kNumInputBuffers;
+ def.nBufferCountActual = def.nBufferCountMin;
+ def.nBufferSize = 8192;
+ def.bEnabled = OMX_TRUE;
+ def.bPopulated = OMX_FALSE;
+ def.eDomain = OMX_PortDomainAudio;
+ def.bBuffersContiguous = OMX_FALSE;
+ def.nBufferAlignment = 1;
+
+ def.format.audio.cMIMEType = const_cast<char *>("audio/aac");
+ def.format.audio.pNativeRender = NULL;
+ def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingAAC;
+
+ addPort(def);
+
+ def.nPortIndex = 1;
+ def.eDir = OMX_DirOutput;
+ def.nBufferCountMin = kNumOutputBuffers;
+ def.nBufferCountActual = def.nBufferCountMin;
+ def.nBufferSize = 4096 * MAX_CHANNEL_COUNT;
+ def.bEnabled = OMX_TRUE;
+ def.bPopulated = OMX_FALSE;
+ def.eDomain = OMX_PortDomainAudio;
+ def.bBuffersContiguous = OMX_FALSE;
+ def.nBufferAlignment = 2;
+
+ def.format.audio.cMIMEType = const_cast<char *>("audio/raw");
+ def.format.audio.pNativeRender = NULL;
+ def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
+
+ addPort(def);
+}
+
+status_t SoftXAAC::initDecoder() {
+ status_t status = UNKNOWN_ERROR;
+
+ int ui_drc_val;
+ IA_ERRORCODE err_code = IA_NO_ERROR;
+ int loop = 0;
+
+ err_code = initXAACDecoder();
+ if(err_code != IA_NO_ERROR) {
+ if (NULL == mXheaacCodecHandle) {
+ ALOGE("AAC decoder handle is null");
+ }
+ if (NULL == mMpegDDrcHandle) {
+ ALOGE("MPEG-D DRC decoder handle is null");
+ }
+ for(loop= 1; loop < mMallocCount; loop++) {
+ if (mMemoryArray[loop] == NULL) {
+ ALOGE(" memory allocation error %d\n",loop);
+ break;
+ }
+ }
+ ALOGE("initXAACDecoder Failed");
+
+ for(loop = 0; loop < mMallocCount; loop++) {
+ if(mMemoryArray[loop])
+ free(mMemoryArray[loop]);
+ }
+ mMallocCount = 0;
+ return status;
+ } else {
+ status = OK;
+ }
+
+ mEndOfInput = false;
+ mEndOfOutput = false;
+
+ char value[PROPERTY_VALUE_MAX];
+ if (property_get(PROP_DRC_OVERRIDE_REF_LEVEL, value, NULL))
+ {
+ ui_drc_val = atoi(value);
+ ALOGV("AAC decoder using desired DRC target reference level of %d instead of %d",ui_drc_val,
+ DRC_DEFAULT_MOBILE_REF_LEVEL);
+ }
+ else
+ {
+ ui_drc_val= DRC_DEFAULT_MOBILE_REF_LEVEL;
+ }
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LEVEL,
+ &ui_drc_val);
+
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LEVEL");
+#ifdef ENABLE_MPEG_D_DRC
+
+ /* Use ui_drc_val from PROP_DRC_OVERRIDE_REF_LEVEL or DRC_DEFAULT_MOBILE_REF_LEVEL
+ * for IA_ENHAACPLUS_DEC_DRC_TARGET_LOUDNESS too */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_DRC_TARGET_LOUDNESS,
+ &ui_drc_val);
+
+
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_DRC_TARGET_LOUDNESS");
+#endif
+
+
+ if (property_get(PROP_DRC_OVERRIDE_CUT, value, NULL))
+ {
+ ui_drc_val = atoi(value);
+ ALOGV("AAC decoder using desired DRC attenuation factor of %d instead of %d", ui_drc_val,
+ DRC_DEFAULT_MOBILE_DRC_CUT);
+ }
+ else
+ {
+ ui_drc_val=DRC_DEFAULT_MOBILE_DRC_CUT;
+ }
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_CUT,
+ &ui_drc_val);
+
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_CUT");
+
+ if (property_get(PROP_DRC_OVERRIDE_BOOST, value, NULL))
+ {
+ ui_drc_val = atoi(value);
+ ALOGV("AAC decoder using desired DRC boost factor of %d instead of %d", ui_drc_val,
+ DRC_DEFAULT_MOBILE_DRC_BOOST);
+ }
+ else
+ {
+ ui_drc_val = DRC_DEFAULT_MOBILE_DRC_BOOST;
+ }
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_BOOST,
+ &ui_drc_val);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_BOOST");
+
+ if (property_get(PROP_DRC_OVERRIDE_HEAVY, value, NULL))
+ {
+ ui_drc_val = atoi(value);
+ ALOGV("AAC decoder using desired Heavy compression factor of %d instead of %d", ui_drc_val,
+ DRC_DEFAULT_MOBILE_DRC_HEAVY);
+ }
+ else
+ {
+ ui_drc_val = DRC_DEFAULT_MOBILE_DRC_HEAVY;
+ }
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_HEAVY_COMP,
+ &ui_drc_val);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_HEAVY_COMP");
+
+#ifdef ENABLE_MPEG_D_DRC
+ if (property_get(PROP_DRC_OVERRIDE_EFFECT_TYPE, value, NULL))
+ {
+ ui_drc_val = atoi(value);
+ ALOGV("AAC decoder using desired DRC effect type of %d instead of %d", ui_drc_val,
+ DRC_KEY_AAC_DRC_EFFECT_TYPE);
+ }
+ else
+ {
+ ui_drc_val = DRC_KEY_AAC_DRC_EFFECT_TYPE;
+ }
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_DRC_EFFECT_TYPE,
+ &ui_drc_val);
+
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_DRC_EFFECT_TYPE");
+
+#endif
+ return status;
+}
+
+OMX_ERRORTYPE SoftXAAC::internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR params) {
+
+ switch ((OMX_U32) index) {
+
+ case OMX_IndexParamAudioPortFormat:
+ {
+ OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
+ (OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+
+ if (!isValidOMXParam(formatParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
+ if (formatParams->nPortIndex > 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ if (formatParams->nIndex > 0) {
+ return OMX_ErrorNoMore;
+ }
+
+ formatParams->eEncoding =
+ (formatParams->nPortIndex == 0)
+ ? OMX_AUDIO_CodingAAC : OMX_AUDIO_CodingPCM;
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioAac:
+ {
+ OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
+ (OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
+
+ if (!isValidOMXParam(aacParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
+ if (aacParams->nPortIndex != 0) {
+ return OMX_ErrorUndefined;
+ }
+
+ aacParams->nBitRate = 0;
+ aacParams->nAudioBandWidth = 0;
+ aacParams->nAACtools = 0;
+ aacParams->nAACERtools = 0;
+ aacParams->eAACProfile = OMX_AUDIO_AACObjectMain;
+
+ aacParams->eAACStreamFormat =
+ mIsADTS
+ ? OMX_AUDIO_AACStreamFormatMP4ADTS
+ : OMX_AUDIO_AACStreamFormatMP4FF;
+
+ aacParams->eChannelMode = OMX_AUDIO_ChannelModeStereo;
+
+ if (!isConfigured()) {
+ aacParams->nChannels = 1;
+ aacParams->nSampleRate = 44100;
+ aacParams->nFrameLength = 0;
+ } else {
+ aacParams->nChannels = mNumChannels;
+ aacParams->nSampleRate = mSampFreq;
+ aacParams->nFrameLength = mOutputFrameLength;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioPcm:
+ {
+ OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+ (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
+ if (pcmParams->nPortIndex != 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ pcmParams->eNumData = OMX_NumericalDataSigned;
+ pcmParams->eEndian = OMX_EndianBig;
+ pcmParams->bInterleaved = OMX_TRUE;
+ pcmParams->nBitPerSample = 16;
+ pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
+ pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
+ pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
+ pcmParams->eChannelMapping[2] = OMX_AUDIO_ChannelCF;
+ pcmParams->eChannelMapping[3] = OMX_AUDIO_ChannelLFE;
+ pcmParams->eChannelMapping[4] = OMX_AUDIO_ChannelLS;
+ pcmParams->eChannelMapping[5] = OMX_AUDIO_ChannelRS;
+
+ if (!isConfigured()) {
+ pcmParams->nChannels = 1;
+ pcmParams->nSamplingRate = 44100;
+ } else {
+ pcmParams->nChannels = mNumChannels;
+ pcmParams->nSamplingRate = mSampFreq;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioProfileQuerySupported:
+ {
+ OMX_AUDIO_PARAM_ANDROID_PROFILETYPE *profileParams =
+ (OMX_AUDIO_PARAM_ANDROID_PROFILETYPE *)params;
+
+ if (!isValidOMXParam(profileParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
+ if (profileParams->nPortIndex != 0) {
+ return OMX_ErrorUndefined;
+ }
+
+ if (profileParams->nProfileIndex >= NELEM(kSupportedProfiles)) {
+ return OMX_ErrorNoMore;
+ }
+
+ profileParams->eProfile =
+ kSupportedProfiles[profileParams->nProfileIndex];
+
+ return OMX_ErrorNone;
+ }
+
+ default:
+ return SimpleSoftOMXComponent::internalGetParameter(index, params);
+ }
+}
+
+OMX_ERRORTYPE SoftXAAC::internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR params) {
+
+ switch ((int)index) {
+ case OMX_IndexParamStandardComponentRole:
+ {
+ const OMX_PARAM_COMPONENTROLETYPE *roleParams =
+ (const OMX_PARAM_COMPONENTROLETYPE *)params;
+
+ if (!isValidOMXParam(roleParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
+ if (strncmp((const char *)roleParams->cRole,
+ "audio_decoder.aac",
+ OMX_MAX_STRINGNAME_SIZE - 1)) {
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioPortFormat:
+ {
+ const OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
+ (const OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+
+ if (!isValidOMXParam(formatParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
+ if (formatParams->nPortIndex > 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ if ((formatParams->nPortIndex == 0
+ && formatParams->eEncoding != OMX_AUDIO_CodingAAC)
+ || (formatParams->nPortIndex == 1
+ && formatParams->eEncoding != OMX_AUDIO_CodingPCM)) {
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioAac:
+ {
+ const OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
+ (const OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
+
+ if (!isValidOMXParam(aacParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
+ if (aacParams->nPortIndex != 0) {
+ return OMX_ErrorUndefined;
+ }
+
+ if (aacParams->eAACStreamFormat == OMX_AUDIO_AACStreamFormatMP4FF) {
+ mIsADTS = false;
+ } else if (aacParams->eAACStreamFormat
+ == OMX_AUDIO_AACStreamFormatMP4ADTS) {
+ mIsADTS = true;
+ } else {
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioAndroidAacDrcPresentation:
+ {
+ const OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE *aacPresParams =
+ (const OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE *)params;
+
+ if (!isValidOMXParam(aacPresParams)) {
+ ALOGE("set OMX_ErrorBadParameter");
+ return OMX_ErrorBadParameter;
+ }
+
+ // for the following parameters of the OMX_AUDIO_PARAM_AACPROFILETYPE structure,
+ // a value of -1 implies the parameter is not set by the application:
+ // nMaxOutputChannels -1 by default
+ // nDrcCut uses default platform properties, see initDecoder()
+ // nDrcBoost idem
+ // nHeavyCompression idem
+ // nTargetReferenceLevel idem
+ // nEncodedTargetLevel idem
+ if (aacPresParams->nMaxOutputChannels >= 0) {
+ int max;
+ if (aacPresParams->nMaxOutputChannels >= 8) { max = 8; }
+ else if (aacPresParams->nMaxOutputChannels >= 6) { max = 6; }
+ else if (aacPresParams->nMaxOutputChannels >= 2) { max = 2; }
+ else {
+ // -1 or 0: disable downmix, 1: mono
+ max = aacPresParams->nMaxOutputChannels;
+ }
+ }
+ /* Apply DRC Changes */
+ IA_ERRORCODE err_code = setXAACDRCInfo(aacPresParams->nDrcCut,
+ aacPresParams->nDrcBoost,
+ aacPresParams->nTargetReferenceLevel,
+ aacPresParams->nHeavyCompression
+#ifdef ENABLE_MPEG_D_DRC
+ ,aacPresParams->nDrcEffectType
+#endif
+ ); // TOD0 : Revert this change
+ if (err_code != IA_NO_ERROR) {
+ ALOGE("Error in OMX_IndexParamAudioAndroidAacDrcPresentation");
+ return OMX_ErrorBadParameter;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioPcm:
+ {
+ const OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+ (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
+ if (pcmParams->nPortIndex != 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ default:
+ return SimpleSoftOMXComponent::internalSetParameter(index, params);
+ }
+}
+
+bool SoftXAAC::isConfigured() const {
+ return mInputBufferCount > 0;
+}
+
+void SoftXAAC::onQueueFilled(OMX_U32 /* portIndex */) {
+ if (mSignalledError || mOutputPortSettingsChange != NONE) {
+ ALOGE("onQueueFilled do not process %d %d",mSignalledError,mOutputPortSettingsChange);
+ return;
+ }
+
+ uint8_t* inBuffer = NULL;
+ uint32_t inBufferLength = 0;
+
+ List<BufferInfo *> &inQueue = getPortQueue(0);
+ List<BufferInfo *> &outQueue = getPortQueue(1);
+
+ signed int numOutBytes = 0;
+
+ /* If decoder call fails in between, then mOutputFrameLength is used */
+ /* Decoded output for AAC is 1024/2048 samples / channel */
+ /* TODO: For USAC mOutputFrameLength can go up to 4096 */
+ /* Note: entire buffer logic to save and retrieve assumes 2 bytes per*/
+ /* sample currently */
+ if (mIsCodecInitialized) {
+ numOutBytes = mOutputFrameLength * (mPcmWdSz/8) * mNumChannels;
+ if ((mPcmWdSz/8) != 2) {
+ ALOGE("XAAC assumes 2 bytes per sample! mPcmWdSz %d",mPcmWdSz);
+ }
+ }
+
+ while ((!inQueue.empty() || mEndOfInput) && !outQueue.empty()) {
+ if (!inQueue.empty()) {
+ BufferInfo *inInfo = *inQueue.begin();
+ OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+ /* No need to check inHeader != NULL, as inQueue is not empty */
+ mEndOfInput = (inHeader->nFlags & OMX_BUFFERFLAG_EOS) != 0;
+
+ if (mInputBufferCount == 0 && !(inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG)) {
+ ALOGW("first buffer should have OMX_BUFFERFLAG_CODECCONFIG set");
+ inHeader->nFlags |= OMX_BUFFERFLAG_CODECCONFIG;
+ }
+ if ((inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) != 0) {
+ inBuffer = inHeader->pBuffer + inHeader->nOffset;
+ inBufferLength = inHeader->nFilledLen;
+
+ /* GA header configuration sent to Decoder! */
+ int err_code = configXAACDecoder(inBuffer,inBufferLength);
+ if (0 != err_code) {
+ ALOGW("configXAACDecoder err_code = %d", err_code);
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, err_code, NULL);
+ return;
+ }
+ mInputBufferCount++;
+ mOutputBufferCount++; // fake increase of outputBufferCount to keep the counters aligned
+
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ mLastInHeader = NULL;
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+
+ // Only send out port settings changed event if both sample rate
+ // and mNumChannels are valid.
+ if (mSampFreq && mNumChannels && !mIsCodecConfigFlushRequired) {
+ ALOGV("Configuring decoder: %d Hz, %d channels", mSampFreq, mNumChannels);
+ notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
+ mOutputPortSettingsChange = AWAITING_DISABLED;
+ }
+
+ return;
+ }
+
+ if (inHeader->nFilledLen == 0) {
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ mLastInHeader = NULL;
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+ continue;
+ }
+
+ // Restore Offset and Length for Port reconfig case
+ size_t tempOffset = inHeader->nOffset;
+ size_t tempFilledLen = inHeader->nFilledLen;
+ if (mIsADTS) {
+ size_t adtsHeaderSize = 0;
+ // skip 30 bits, aac_frame_length follows.
+ // ssssssss ssssiiip ppffffPc ccohCCll llllllll lll?????
+
+ const uint8_t *adtsHeader = inHeader->pBuffer + inHeader->nOffset;
+
+ bool signalError = false;
+ if (inHeader->nFilledLen < 7) {
+ ALOGE("Audio data too short to contain even the ADTS header. "
+ "Got %d bytes.", inHeader->nFilledLen);
+ hexdump(adtsHeader, inHeader->nFilledLen);
+ signalError = true;
+ } else {
+ bool protectionAbsent = (adtsHeader[1] & 1);
+
+ unsigned aac_frame_length =
+ ((adtsHeader[3] & 3) << 11)
+ | (adtsHeader[4] << 3)
+ | (adtsHeader[5] >> 5);
+
+ if (inHeader->nFilledLen < aac_frame_length) {
+ ALOGE("Not enough audio data for the complete frame. "
+ "Got %d bytes, frame size according to the ADTS "
+ "header is %u bytes.",
+ inHeader->nFilledLen, aac_frame_length);
+ hexdump(adtsHeader, inHeader->nFilledLen);
+ signalError = true;
+ } else {
+ adtsHeaderSize = (protectionAbsent ? 7 : 9);
+ if (aac_frame_length < adtsHeaderSize) {
+ signalError = true;
+ } else {
+ inBuffer = (uint8_t *)adtsHeader + adtsHeaderSize;
+ inBufferLength = aac_frame_length - adtsHeaderSize;
+
+ inHeader->nOffset += adtsHeaderSize;
+ inHeader->nFilledLen -= adtsHeaderSize;
+ }
+ }
+ }
+
+ if (signalError) {
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorStreamCorrupt, ERROR_MALFORMED, NULL);
+ return;
+ }
+
+ // insert buffer size and time stamp
+ if (mLastInHeader != inHeader) {
+ mCurrentTimestamp = inHeader->nTimeStamp;
+ mLastInHeader = inHeader;
+ } else {
+ mCurrentTimestamp = mPrevTimestamp +
+ mOutputFrameLength * 1000000ll / mSampFreq;
+ }
+ } else {
+ inBuffer = inHeader->pBuffer + inHeader->nOffset;
+ inBufferLength = inHeader->nFilledLen;
+ mLastInHeader = inHeader;
+ mCurrentTimestamp = inHeader->nTimeStamp;
+ }
+
+ int numLoops = 0;
+ signed int prevSampleRate = mSampFreq;
+ signed int prevNumChannels = mNumChannels;
+
+ /* XAAC decoder expects first frame to be fed via configXAACDecoder API */
+ /* which should initialize the codec. Once this state is reached, call the */
+ /* decodeXAACStream API with same frame to decode! */
+ if (!mIsCodecInitialized) {
+ int err_code = configXAACDecoder(inBuffer,inBufferLength);
+ if (0 != err_code) {
+ ALOGW("configXAACDecoder Failed 2 err_code = %d", err_code);
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, err_code, NULL);
+ return;
+ }
+ mIsCodecConfigFlushRequired = true;
+ }
+
+ if (!mSampFreq || !mNumChannels) {
+ if ((mInputBufferCount > 2) && (mOutputBufferCount <= 1)) {
+ ALOGW("Invalid AAC stream");
+ ALOGW("mSampFreq %d mNumChannels %d ",mSampFreq,mNumChannels);
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+ } else if ((mSampFreq != prevSampleRate) ||
+ (mNumChannels != prevNumChannels)) {
+ ALOGV("Reconfiguring decoder: %d->%d Hz, %d->%d channels",
+ prevSampleRate, mSampFreq, prevNumChannels, mNumChannels);
+ inHeader->nOffset = tempOffset;
+ inHeader->nFilledLen = tempFilledLen;
+ notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
+ mOutputPortSettingsChange = AWAITING_DISABLED;
+ return;
+ }
+
+ signed int bytesConsumed = 0;
+ int errorCode = 0;
+ if (mIsCodecInitialized) {
+ errorCode = decodeXAACStream(inBuffer,inBufferLength, &bytesConsumed, &numOutBytes);
+ } else {
+ ALOGW("Assumption that first frame after header initializes decoder failed!");
+ }
+ inHeader->nFilledLen -= bytesConsumed;
+ inHeader->nOffset += bytesConsumed;
+
+ if (inHeader->nFilledLen != 0) {
+ ALOGE("All data not consumed");
+ }
+
+ /* In case of error, decoder would have given out empty buffer */
+ if ((0 != errorCode) && (0 == numOutBytes) && mIsCodecInitialized) {
+ numOutBytes = mOutputFrameLength * (mPcmWdSz/8) * mNumChannels;
+ }
+ numLoops++;
+
+ if (0 == bytesConsumed) {
+ ALOGW("bytesConsumed is zero");
+ }
+
+ if (errorCode) {
+ /* Clear buffer for output buffer is done inside XAAC codec */
+ /* TODO - Check if below memset is on top of reset inside codec */
+ memset(mOutputBuffer, 0, numOutBytes); // TODO: check for overflow, ASAN
+ // Discard input buffer.
+ inHeader->nFilledLen = 0;
+ // fall through
+ }
+
+ if (inHeader->nFilledLen == 0) {
+ inInfo->mOwnedByUs = false;
+ mInputBufferCount++;
+ inQueue.erase(inQueue.begin());
+ mLastInHeader = NULL;
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+ } else {
+ ALOGV("inHeader->nFilledLen = %d", inHeader->nFilledLen);
+ }
+
+ if (!outQueue.empty() && numOutBytes) {
+ BufferInfo *outInfo = *outQueue.begin();
+ OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+ if (outHeader->nOffset != 0) {
+ ALOGE("outHeader->nOffset != 0 is not handled");
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+
+ signed short *outBuffer =
+ reinterpret_cast<signed short *>(outHeader->pBuffer + outHeader->nOffset);
+ int samplesize = mNumChannels * sizeof(int16_t);
+ if (outHeader->nOffset
+ + mOutputFrameLength * samplesize
+ > outHeader->nAllocLen) {
+ ALOGE("buffer overflow");
+ mSignalledError = true;
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ return;
+ }
+ memcpy(outBuffer, mOutputBuffer, numOutBytes);
+ outHeader->nFilledLen = numOutBytes;
+
+ if (mEndOfInput && !outQueue.empty()) {
+ outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+ mEndOfOutput = true;
+ } else {
+ outHeader->nFlags = 0;
+ }
+ outHeader->nTimeStamp = mCurrentTimestamp;
+ mPrevTimestamp = mCurrentTimestamp;
+
+ mOutputBufferCount++;
+ outInfo->mOwnedByUs = false;
+ outQueue.erase(outQueue.begin());
+ outInfo = NULL;
+ notifyFillBufferDone(outHeader);
+ outHeader = NULL;
+ }
+ }
+
+ if (mEndOfInput) {
+ if (!outQueue.empty()) {
+ if (!mEndOfOutput) {
+ ALOGV(" empty block signaling EOS");
+ // send partial or empty block signaling EOS
+ mEndOfOutput = true;
+ BufferInfo *outInfo = *outQueue.begin();
+ OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+ outHeader->nFilledLen = 0;
+ outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+ outHeader->nTimeStamp = mPrevTimestamp ;
+
+ mOutputBufferCount++;
+ outInfo->mOwnedByUs = false;
+ outQueue.erase(outQueue.begin());
+ outInfo = NULL;
+ notifyFillBufferDone(outHeader);
+ outHeader = NULL;
+ }
+ break; // if outQueue not empty but no more output
+ }
+ }
+ }
+}
+
+void SoftXAAC::onPortFlushCompleted(OMX_U32 portIndex) {
+ if (portIndex == 0) {
+ // Make sure that the next buffer output does not still
+ // depend on fragments from the last one decoded.
+ // drain all existing data
+ if (mIsCodecInitialized) {
+ IA_ERRORCODE err_code = configflushDecode();
+ if (err_code != IA_NO_ERROR) {
+ ALOGE("Error in configflushDecode: Error %d", err_code);
+ }
+ }
+ drainDecoder();
+ mLastInHeader = NULL;
+ mEndOfInput = false;
+ } else {
+ mEndOfOutput = false;
+ }
+}
+
+int SoftXAAC::configflushDecode() {
+ IA_ERRORCODE err_code;
+ UWORD32 ui_init_done;
+ uint32_t inBufferLength=8203;
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_INIT,
+ IA_CMD_TYPE_FLUSH_MEM,
+ NULL);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_FLUSH_MEM");
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_INPUT_BYTES,
+ 0,
+ &inBufferLength);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_INIT,
+ IA_CMD_TYPE_FLUSH_MEM,
+ NULL);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_FLUSH_MEM");
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_DONE_QUERY,
+ &ui_init_done);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_DONE_QUERY");
+
+
+ if (ui_init_done) {
+ err_code = getXAACStreamInfo();
+ RETURN_IF_FATAL(err_code, "getXAACStreamInfo");
+
+ ALOGV("Found Codec with below config---\nsampFreq %d\nnumChannels %d\npcmWdSz %d\nchannelMask %d\noutputFrameLength %d",
+ mSampFreq,mNumChannels,mPcmWdSz,mChannelMask,mOutputFrameLength);
+ if(mNumChannels > MAX_CHANNEL_COUNT) {
+ ALOGE(" No of channels are more than max channels\n");
+ mIsCodecInitialized = false;
+ }
+ else
+ mIsCodecInitialized = true;
+ }
+ return err_code;
+
+}
+int SoftXAAC::drainDecoder() {
+ return 0;
+}
+
+void SoftXAAC::onReset() {
+ drainDecoder();
+
+ // reset the "configured" state
+ mInputBufferCount = 0;
+ mOutputBufferCount = 0;
+ mEndOfInput = false;
+ mEndOfOutput = false;
+ mLastInHeader = NULL;
+
+ mSignalledError = false;
+ mOutputPortSettingsChange = NONE;
+}
+
+void SoftXAAC::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
+ if (portIndex != 1) {
+ return;
+ }
+
+ switch (mOutputPortSettingsChange) {
+ case NONE:
+ break;
+
+ case AWAITING_DISABLED:
+ {
+ CHECK(!enabled);
+ mOutputPortSettingsChange = AWAITING_ENABLED;
+ break;
+ }
+
+ default:
+ {
+ CHECK_EQ((int)mOutputPortSettingsChange, (int)AWAITING_ENABLED);
+ CHECK(enabled);
+ mOutputPortSettingsChange = NONE;
+ break;
+ }
+ }
+}
+
+int SoftXAAC::initXAACDecoder() {
+ LOOPIDX i;
+
+ /* Error code */
+ IA_ERRORCODE err_code = IA_NO_ERROR;
+
+ /* First part */
+ /* Error Handler Init */
+ /* Get Library Name, Library Version and API Version */
+ /* Initialize API structure + Default config set */
+ /* Set config params from user */
+ /* Initialize memory tables */
+ /* Get memory information and allocate memory */
+
+ /* Memory variables */
+ UWORD32 ui_proc_mem_tabs_size;
+ /* API size */
+ UWORD32 pui_api_size;
+
+ mInputBufferSize = 0;
+ mInputBuffer = 0;
+ mOutputBuffer = 0;
+ mMallocCount = 0;
+
+ /* Process struct initing end */
+ /* ******************************************************************/
+ /* Initialize API structure and set config params to default */
+ /* ******************************************************************/
+
+ /* Get the API size */
+ err_code = ixheaacd_dec_api(NULL,
+ IA_API_CMD_GET_API_SIZE,
+ 0,
+ &pui_api_size);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_API_SIZE");
+
+ if (mMallocCount == MAX_MEM_ALLOCS) {
+ ALOGE("mMemoryArray is full");
+ return IA_FATAL_ERROR;
+ }
+
+ /* Allocate memory for API */
+ mMemoryArray[mMallocCount] = memalign(4, pui_api_size);
+ if (mMemoryArray[mMallocCount] == NULL) {
+ ALOGE("malloc for pui_api_size + 4 >> %d Failed",pui_api_size + 4);
+ return IA_FATAL_ERROR;
+ }
+ /* Set API object with the memory allocated */
+ mXheaacCodecHandle =
+ (pVOID)((WORD8*)mMemoryArray[mMallocCount]);
+ mMallocCount++;
+
+ /* Set the config params to default values */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_API_PRE_CONFIG_PARAMS,
+ NULL);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_API_PRE_CONFIG_PARAMS");
+#ifdef ENABLE_MPEG_D_DRC
+ /* Get the API size */
+ err_code = ia_drc_dec_api(NULL, IA_API_CMD_GET_API_SIZE, 0, &pui_api_size);
+
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_API_SIZE");
+
+ if (mMallocCount == MAX_MEM_ALLOCS) {
+ ALOGE("mMemoryArray is full");
+ return IA_FATAL_ERROR;
+ }
+
+ /* Allocate memory for API */
+ mMemoryArray[mMallocCount] = memalign(4, pui_api_size);
+
+ if(mMemoryArray[mMallocCount] == NULL)
+ {
+ ALOGE("malloc for drc api structure Failed");
+ return IA_FATAL_ERROR;
+ }
+ memset(mMemoryArray[mMallocCount],0,pui_api_size);
+
+ /* Set API object with the memory allocated */
+ mMpegDDrcHandle =
+ (pVOID)((WORD8*)mMemoryArray[mMallocCount]);
+ mMallocCount++;
+
+
+ /* Set the config params to default values */
+ err_code = ia_drc_dec_api(
+ mMpegDDrcHandle,
+ IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_API_PRE_CONFIG_PARAMS,
+ NULL);
+
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_API_PRE_CONFIG_PARAMS");
+#endif
+
+ /* ******************************************************************/
+ /* Set config parameters */
+ /* ******************************************************************/
+ UWORD32 ui_mp4_flag = 1;
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_ISMP4,
+ &ui_mp4_flag);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_ISMP4");
+
+ /* ******************************************************************/
+ /* Initialize Memory info tables */
+ /* ******************************************************************/
+
+ /* Get memory info tables size */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_GET_MEMTABS_SIZE,
+ 0,
+ &ui_proc_mem_tabs_size);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEMTABS_SIZE");
+
+ if (mMallocCount == MAX_MEM_ALLOCS) {
+ ALOGE("mMemoryArray is full");
+ return IA_FATAL_ERROR;
+ }
+
+ mMemoryArray[mMallocCount] = memalign(4, ui_proc_mem_tabs_size);
+ if (mMemoryArray[mMallocCount] == NULL) {
+ ALOGE("Malloc for size (ui_proc_mem_tabs_size + 4) = %d failed!",ui_proc_mem_tabs_size + 4);
+ return IA_FATAL_ERROR;
+ }
+ mMallocCount++;
+ /* Set pointer for process memory tables */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_MEMTABS_PTR,
+ 0,
+ (pVOID)((WORD8*)mMemoryArray[mMallocCount - 1]));
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEMTABS_PTR");
+
+
+ /* initialize the API, post config, fill memory tables */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS,
+ NULL);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS");
+
+ /* ******************************************************************/
+ /* Allocate Memory with info from library */
+ /* ******************************************************************/
+ /* There are four different types of memories, that needs to be allocated */
+ /* persistent,scratch,input and output */
+ for(i = 0; i < 4; i++) {
+ int ui_size = 0, ui_alignment = 0, ui_type = 0;
+ pVOID pv_alloc_ptr;
+
+ /* Get memory size */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_GET_MEM_INFO_SIZE,
+ i,
+ &ui_size);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_SIZE");
+
+ /* Get memory alignment */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_GET_MEM_INFO_ALIGNMENT,
+ i,
+ &ui_alignment);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_ALIGNMENT");
+
+ /* Get memory type */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_GET_MEM_INFO_TYPE,
+ i,
+ &ui_type);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_TYPE");
+
+ if (mMallocCount == MAX_MEM_ALLOCS) {
+ ALOGE("mMemoryArray is full");
+ return IA_FATAL_ERROR;
+ }
+
+ mMemoryArray[mMallocCount] =
+ memalign(ui_alignment , ui_size);
+ if (mMemoryArray[mMallocCount] == NULL) {
+ ALOGE("Malloc for size (ui_size + ui_alignment) = %d failed!",ui_size + ui_alignment);
+ return IA_FATAL_ERROR;
+ }
+ pv_alloc_ptr =
+ (pVOID )((WORD8*)mMemoryArray[mMallocCount]);
+ mMallocCount++;
+
+ /* Set the buffer pointer */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_MEM_PTR,
+ i,
+ pv_alloc_ptr);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEM_PTR");
+ if (ui_type == IA_MEMTYPE_INPUT) {
+ mInputBuffer = (pWORD8)pv_alloc_ptr;
+ mInputBufferSize = ui_size;
+
+ }
+
+ if (ui_type == IA_MEMTYPE_OUTPUT) {
+ mOutputBuffer = (pWORD8)pv_alloc_ptr;
+ }
+
+ }
+ /* End first part */
+
+ return IA_NO_ERROR;
+}
+
+int SoftXAAC::configXAACDecoder(uint8_t* inBuffer, uint32_t inBufferLength) {
+
+ UWORD32 ui_init_done;
+ int32_t i_bytes_consumed;
+
+ if (mInputBufferSize < inBufferLength) {
+ ALOGE("Cannot config AAC, input buffer size %d < inBufferLength %d",mInputBufferSize,inBufferLength);
+ return false;
+ }
+
+ /* Copy the buffer passed by Android plugin to codec input buffer */
+ memcpy(mInputBuffer, inBuffer, inBufferLength);
+
+ /* Set number of bytes to be processed */
+ IA_ERRORCODE err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_INPUT_BYTES,
+ 0,
+ &inBufferLength);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+
+ if (mIsCodecConfigFlushRequired) {
+ /* If codec is already initialized, then GA header is passed again */
+ /* Need to call the Flush API instead of INIT_PROCESS */
+ mIsCodecInitialized = false; /* Codec needs to be Reinitialized after flush */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_INIT,
+ IA_CMD_TYPE_GA_HDR,
+ NULL);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_GA_HDR");
+ }
+ else {
+ /* Initialize the process */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_PROCESS,
+ NULL);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_PROCESS");
+ }
+
+ /* Checking for end of initialization */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_DONE_QUERY,
+ &ui_init_done);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_DONE_QUERY");
+
+ /* How much buffer is used in input buffers */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_GET_CURIDX_INPUT_BUF,
+ 0,
+ &i_bytes_consumed);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_CURIDX_INPUT_BUF");
+
+ if(ui_init_done){
+ err_code = getXAACStreamInfo();
+ RETURN_IF_FATAL(err_code, "getXAACStreamInfo");
+
+ ALOGI("Found Codec with below config---\nsampFreq %d\nnumChannels %d\npcmWdSz %d\nchannelMask %d\noutputFrameLength %d",
+ mSampFreq,mNumChannels,mPcmWdSz,mChannelMask,mOutputFrameLength);
+ mIsCodecInitialized = true;
+
+#ifdef ENABLE_MPEG_D_DRC
+ err_code = configMPEGDDrc();
+ RETURN_IF_FATAL(err_code, "configMPEGDDrc");
+#endif
+ }
+
+ return IA_NO_ERROR;
+}
+int SoftXAAC::configMPEGDDrc()
+{
+ IA_ERRORCODE err_code = IA_NO_ERROR;
+ int i_effect_type;
+ int i_loud_norm;
+ int i_target_loudness;
+ unsigned int i_sbr_mode;
+ int n_mems;
+ int i;
+
+#ifdef ENABLE_MPEG_D_DRC
+ {
+
+ /* Sampling Frequency */
+ {
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_SAMP_FREQ, &mSampFreq);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_SAMP_FREQ");
+ }
+ /* Total Number of Channels */
+ {
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_NUM_CHANNELS, &mNumChannels);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_NUM_CHANNELS");
+ }
+
+ /* PCM word size */
+ {
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_PCM_WDSZ, &mPcmWdSz);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_PCM_WDSZ");
+ }
+
+ /*Set Effect Type*/
+
+ {
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_EFFECT_TYPE, &i_effect_type);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_EFFECT_TYPE");
+
+
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_DRC_EFFECT_TYPE, &i_effect_type);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_DRC_EFFECT_TYPE");
+
+ }
+
+/*Set target loudness */
+
+ {
+ err_code = ixheaacd_dec_api(
+ mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LOUDNESS, &i_target_loudness);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LOUDNESS");
+
+
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_DRC_TARGET_LOUDNESS, &i_target_loudness);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_DRC_TARGET_LOUDNESS");
+
+ }
+
+ /*Set loud_norm_flag*/
+ {
+ err_code = ixheaacd_dec_api(
+ mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_LOUD_NORM, &i_loud_norm);
+ RETURN_IF_FATAL(err_code,"IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_LOUD_NORM");
+
+
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_DRC_LOUD_NORM, &i_loud_norm);
+ RETURN_IF_FATAL(err_code,"IA_DRC_DEC_CONFIG_DRC_LOUD_NORM");
+
+ }
+
+
+
+ err_code = ixheaacd_dec_api(
+ mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE, &i_sbr_mode);
+ RETURN_IF_FATAL(err_code,"IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE");
+
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS, NULL);
+
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS");
+
+
+
+ for (i = 0; i < (WORD32)2; i++) {
+ WORD32 ui_size, ui_alignment, ui_type;
+ pVOID pv_alloc_ptr;
+
+ /* Get memory size */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle,
+ IA_API_CMD_GET_MEM_INFO_SIZE, i, &ui_size);
+
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_SIZE");
+
+ /* Get memory alignment */
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle,
+ IA_API_CMD_GET_MEM_INFO_ALIGNMENT, i, &ui_alignment);
+
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_ALIGNMENT");
+
+ /* Get memory type */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle,
+ IA_API_CMD_GET_MEM_INFO_TYPE, i, &ui_type);
+
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEM_INFO_TYPE");
+
+ if (mMallocCount == MAX_MEM_ALLOCS) {
+ ALOGE("mMemoryArray is full");
+ return IA_FATAL_ERROR;
+ }
+ mMemoryArray[mMallocCount] = memalign(4, ui_size);
+ if (mMemoryArray[mMallocCount] == NULL) {
+ ALOGE(" Cannot create requested memory %d",ui_size);
+ return IA_FATAL_ERROR;
+ }
+ pv_alloc_ptr =
+ (pVOID )((WORD8*)mMemoryArray[mMallocCount]);
+ mMallocCount++;
+
+ /* Set the buffer pointer */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle,
+ IA_API_CMD_SET_MEM_PTR, i, pv_alloc_ptr);
+
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEM_PTR");
+ }
+ {
+ WORD32 ui_size;
+ ui_size=8192*2;
+ if (mMallocCount == MAX_MEM_ALLOCS) {
+ ALOGE("mMemoryArray is full");
+ return IA_FATAL_ERROR;
+ }
+
+ mMemoryArray[mMallocCount]=memalign(4, ui_size);
+ if (mMemoryArray[mMallocCount] == NULL) {
+ ALOGE(" Cannot create requested memory %d",ui_size);
+ return IA_FATAL_ERROR;
+ }
+
+ mDrcInBuf=(int8_t *)mMemoryArray[mMallocCount];
+ mMallocCount++;
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEM_PTR,
+ 2, /*mOutputBuffer*/ mDrcInBuf);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEM_PTR");
+
+ if (mMallocCount == MAX_MEM_ALLOCS) {
+ ALOGE("mMemoryArray is full");
+ return IA_FATAL_ERROR;
+ }
+
+ mMemoryArray[mMallocCount]=memalign(4, ui_size);
+ if (mMemoryArray[mMallocCount] == NULL) {
+ ALOGE(" Cannot create requested memory %d",ui_size);
+ return IA_FATAL_ERROR;
+ }
+
+ mDrcOutBuf=(int8_t *)mMemoryArray[mMallocCount];
+ mMallocCount++;
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEM_PTR,
+ 3, /*mOutputBuffer*/ mDrcOutBuf);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEM_PTR");
+ }
+ /*ITTIAM: DRC buffers
+ buf[0] - contains extension element pay load loudness related
+ buf[1] - contains extension element pay load*/
+ {
+ VOID *p_array[2][16];
+ WORD32 ii;
+ WORD32 buf_sizes[2][16];
+ WORD32 num_elements;
+ WORD32 num_config_ext;
+ WORD32 bit_str_fmt = 1;
+
+
+
+ WORD32 uo_num_chan;
+
+ memset(buf_sizes, 0, 32 * sizeof(WORD32));
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_EXT_ELE_BUF_SIZES, &buf_sizes[0][0]);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_EXT_ELE_BUF_SIZES");
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_EXT_ELE_PTR, &p_array);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_EXT_ELE_PTR");
+
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_SET_BUFF_PTR, 0);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_SET_BUFF_PTR");
+
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_NUM_ELE, &num_elements);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_NUM_ELE");
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_NUM_CONFIG_EXT, &num_config_ext);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_NUM_CONFIG_EXT");
+
+ for (ii = 0; ii < num_config_ext; ii++) {
+ /*copy loudness bitstream*/
+ if (buf_sizes[0][ii] > 0) {
+ memcpy(mDrcInBuf, p_array[0][ii], buf_sizes[0][ii]);
+
+ /*Set bitstream_split_format */
+ err_code = ia_drc_dec_api(
+ mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+ /* Set number of bytes to be processed */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle,
+ IA_API_CMD_SET_INPUT_BYTES_IL_BS, 0,
+ &buf_sizes[0][ii]);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES_IL_BS");
+
+
+ /* Execute process */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_CPY_IL_BSF_BUFF, NULL);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_CPY_IL_BSF_BUFF");
+
+
+ mDRCFlag = 1;
+ }
+ }
+
+ for (ii = 0; ii < num_elements; ii++) {
+ /*copy config bitstream*/
+ if (buf_sizes[1][ii] > 0) {
+ memcpy(mDrcInBuf, p_array[1][ii], buf_sizes[1][ii]);
+ /* Set number of bytes to be processed */
+
+ /*Set bitstream_split_format */
+ err_code = ia_drc_dec_api(
+ mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+ err_code = ia_drc_dec_api(mMpegDDrcHandle,
+ IA_API_CMD_SET_INPUT_BYTES_IC_BS, 0,
+ &buf_sizes[1][ii]);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES_IC_BS");
+
+
+ /* Execute process */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_CPY_IC_BSF_BUFF, NULL);
+
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_CPY_IC_BSF_BUFF");
+
+ mDRCFlag = 1;
+ }
+ }
+
+ if (mDRCFlag == 1) {
+ mMpegDDRCPresent = 1;
+ } else {
+ mMpegDDRCPresent = 0;
+ }
+
+
+ /*Read interface buffer config file bitstream*/
+ if(mMpegDDRCPresent==1){
+
+ WORD32 interface_is_present = 1;
+ WORD32 frame_length;
+
+ if(i_sbr_mode != 0)
+ {
+ if (i_sbr_mode == 1)
+ {
+ frame_length = 2048;
+ }
+ else if(i_sbr_mode == 3)
+ {
+ frame_length = 4096;
+ }
+ else
+ {
+ frame_length = 1024;
+ }
+ }
+ else
+ {
+ frame_length = 4096;
+ }
+
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_FRAME_SIZE, &frame_length);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_FRAME_SIZE");
+
+
+
+ err_code = ia_drc_dec_api(
+ mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_INT_PRESENT, &interface_is_present);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_INT_PRESENT");
+
+
+ /* Execute process */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_CPY_IN_BSF_BUFF, NULL);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_CPY_IN_BSF_BUFF");
+
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_PROCESS, NULL);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_INIT_PROCESS");
+
+ err_code = ia_drc_dec_api(
+ mMpegDDrcHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_NUM_CHANNELS, &uo_num_chan);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_NUM_CHANNELS");
+ }
+ }
+ }
+#endif
+
+return err_code;
+
+}
+int SoftXAAC::decodeXAACStream(uint8_t* inBuffer,
+ uint32_t inBufferLength,
+ int32_t *bytesConsumed,
+ int32_t *outBytes) {
+ if (mInputBufferSize < inBufferLength) {
+ ALOGE("Cannot config AAC, input buffer size %d < inBufferLength %d",mInputBufferSize,inBufferLength);
+ return -1;
+ }
+
+ /* Copy the buffer passed by Android plugin to codec input buffer */
+ memcpy(mInputBuffer,inBuffer,inBufferLength);
+
+ /* Set number of bytes to be processed */
+ IA_ERRORCODE err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_INPUT_BYTES,
+ 0,
+ &inBufferLength);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+
+ /* Execute process */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_EXECUTE,
+ IA_CMD_TYPE_DO_EXECUTE,
+ NULL);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
+
+ UWORD32 ui_exec_done;
+ /* Checking for end of processing */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_EXECUTE,
+ IA_CMD_TYPE_DONE_QUERY,
+ &ui_exec_done);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DONE_QUERY");
+
+#ifdef ENABLE_MPEG_D_DRC
+ {
+ if (ui_exec_done != 1) {
+ VOID *p_array; // ITTIAM:buffer to handle gain payload
+ WORD32 buf_size = 0; // ITTIAM:gain payload length
+ WORD32 bit_str_fmt = 1;
+ WORD32 gain_stream_flag = 1;
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN, &buf_size);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN");
+
+
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF, &p_array);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF");
+
+
+ if (buf_size > 0) {
+ /*Set bitstream_split_format */
+ err_code = ia_drc_dec_api(
+ mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+ memcpy(mDrcInBuf, p_array, buf_size);
+ /* Set number of bytes to be processed */
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle,
+ IA_API_CMD_SET_INPUT_BYTES_BS, 0, &buf_size);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+ err_code = ia_drc_dec_api(
+ mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_GAIN_STREAM_FLAG, &gain_stream_flag);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+
+ /* Execute process */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+ IA_CMD_TYPE_INIT_CPY_BSF_BUFF, NULL);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+
+ mMpegDDRCPresent = 1;
+ }
+ }
+ }
+#endif
+ /* How much buffer is used in input buffers */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_GET_CURIDX_INPUT_BUF,
+ 0,
+ bytesConsumed);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_CURIDX_INPUT_BUF");
+
+ /* Get the output bytes */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_GET_OUTPUT_BYTES,
+ 0,
+ outBytes);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_OUTPUT_BYTES");
+#ifdef ENABLE_MPEG_D_DRC
+
+ if (mMpegDDRCPresent == 1) {
+ memcpy(mDrcInBuf, mOutputBuffer, *outBytes);
+ err_code = ia_drc_dec_api(mMpegDDrcHandle,
+ IA_API_CMD_SET_INPUT_BYTES, 0, outBytes);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+
+
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_EXECUTE,
+ IA_CMD_TYPE_DO_EXECUTE, NULL);
+ RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
+
+ memcpy(mOutputBuffer, mDrcOutBuf, *outBytes);
+ }
+#endif
+ return err_code;
+}
+
+int SoftXAAC::deInitXAACDecoder() {
+ ALOGI("deInitXAACDecoder");
+
+ /* Tell that the input is over in this buffer */
+ IA_ERRORCODE err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_INPUT_OVER,
+ 0,
+ NULL);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_INPUT_OVER");
+
+ for(int i = 0; i < mMallocCount; i++)
+ {
+ if(mMemoryArray[i])
+ free(mMemoryArray[i]);
+ }
+ mMallocCount = 0;
+
+ return err_code;
+}
+
+IA_ERRORCODE SoftXAAC::getXAACStreamInfo() {
+ IA_ERRORCODE err_code = IA_NO_ERROR;
+
+ /* Sampling frequency */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_SAMP_FREQ,
+ &mSampFreq);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_SAMP_FREQ");
+
+ /* Total Number of Channels */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_NUM_CHANNELS,
+ &mNumChannels);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_NUM_CHANNELS");
+
+ /* PCM word size */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_PCM_WDSZ,
+ &mPcmWdSz);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_PCM_WDSZ");
+
+ /* channel mask to tell the arrangement of channels in bit stream */
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_CHANNEL_MASK,
+ &mChannelMask);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_CHANNEL_MASK");
+
+ /* Channel mode to tell MONO/STEREO/DUAL-MONO/NONE_OF_THESE */
+ UWORD32 ui_channel_mode;
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_CHANNEL_MODE,
+ &ui_channel_mode);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_CHANNEL_MODE");
+ if(ui_channel_mode == 0)
+ ALOGV("Channel Mode: MONO_OR_PS\n");
+ else if(ui_channel_mode == 1)
+ ALOGV("Channel Mode: STEREO\n");
+ else if(ui_channel_mode == 2)
+ ALOGV("Channel Mode: DUAL-MONO\n");
+ else
+ ALOGV("Channel Mode: NONE_OF_THESE or MULTICHANNEL\n");
+
+ /* Channel mode to tell SBR PRESENT/NOT_PRESENT */
+ UWORD32 ui_sbr_mode;
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE,
+ &ui_sbr_mode);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE");
+ if(ui_sbr_mode == 0)
+ ALOGV("SBR Mode: NOT_PRESENT\n");
+ else if(ui_sbr_mode == 1)
+ ALOGV("SBR Mode: PRESENT\n");
+ else
+ ALOGV("SBR Mode: ILLEGAL\n");
+
+ /* mOutputFrameLength = 1024 * (1 + SBR_MODE) for AAC */
+ /* For USAC it could be 1024 * 3 , support to query */
+ /* not yet added in codec */
+ mOutputFrameLength = 1024 * (1 + ui_sbr_mode);
+
+ ALOGI("mOutputFrameLength %d ui_sbr_mode %d",mOutputFrameLength,ui_sbr_mode);
+
+ return IA_NO_ERROR;
+}
+
+IA_ERRORCODE SoftXAAC::setXAACDRCInfo(int32_t drcCut,
+ int32_t drcBoost,
+ int32_t drcRefLevel,
+ int32_t drcHeavyCompression
+ #ifdef ENABLE_MPEG_D_DRC
+ ,int32_t drEffectType
+ #endif
+ ) {
+ IA_ERRORCODE err_code = IA_NO_ERROR;
+
+ int32_t ui_drc_enable = 1;
+ int32_t i_effect_type, i_target_loudness, i_loud_norm;
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_ENABLE,
+ &ui_drc_enable);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_ENABLE");
+ if (drcCut !=-1) {
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_CUT,
+ &drcCut);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_CUT");
+ }
+
+ if (drcBoost !=-1) {
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_BOOST,
+ &drcBoost);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_BOOST");
+ }
+
+ if (drcRefLevel != -1) {
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LEVEL,
+ &drcRefLevel);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LEVEL");
+ }
+#ifdef ENABLE_MPEG_D_DRC
+ if (drcRefLevel != -1) {
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_DRC_TARGET_LOUDNESS,
+ &drcRefLevel);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_DRC_TARGET_LOUDNESS");
+ }
+#endif
+ if (drcHeavyCompression != -1) {
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_HEAVY_COMP,
+ &drcHeavyCompression);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_HEAVY_COMP");
+ }
+
+#ifdef ENABLE_MPEG_D_DRC
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+ IA_API_CMD_SET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_DRC_EFFECT_TYPE,
+ &drEffectType);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_DRC_EFFECT_TYPE");
+#endif
+
+#ifdef ENABLE_MPEG_D_DRC
+ /*Set Effect Type*/
+
+ {
+ err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_EFFECT_TYPE, &i_effect_type);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_EFFECT_TYPE");
+
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_DRC_EFFECT_TYPE, &i_effect_type);
+
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_DRC_EFFECT_TYPE");
+
+ }
+
+/*Set target loudness */
+
+ {
+ err_code = ixheaacd_dec_api(
+ mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LOUDNESS, &i_target_loudness);
+ RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_TARGET_LOUDNESS");
+
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_DRC_TARGET_LOUDNESS, &i_target_loudness);
+ RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_DRC_TARGET_LOUDNESS");
+
+ }
+ /*Set loud_norm_flag*/
+ {
+ err_code = ixheaacd_dec_api(
+ mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+ IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_LOUD_NORM, &i_loud_norm);
+ RETURN_IF_FATAL(err_code,"IA_ENHAACPLUS_DEC_CONFIG_PARAM_DRC_LOUD_NORM");
+
+ err_code =
+ ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+ IA_DRC_DEC_CONFIG_DRC_LOUD_NORM, &i_loud_norm);
+
+ RETURN_IF_FATAL(err_code,"IA_DRC_DEC_CONFIG_DRC_LOUD_NORM");
+
+ }
+
+#endif
+
+
+ return IA_NO_ERROR;
+}
+
+} // namespace android
+
+android::SoftOMXComponent *createSoftOMXComponent(
+ const char *name, const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData, OMX_COMPONENTTYPE **component) {
+ ALOGI("createSoftOMXComponent for SoftXAACDEC");
+ return new android::SoftXAAC(name, callbacks, appData, component);
+}
diff --git a/media/libstagefright/codecs/xaacdec/SoftXAAC.h b/media/libstagefright/codecs/xaacdec/SoftXAAC.h
new file mode 100644
index 0000000..0b3a612
--- /dev/null
+++ b/media/libstagefright/codecs/xaacdec/SoftXAAC.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFTXAAC_H_
+#define SOFTXAAC_H_
+
+#include <media/stagefright/omx/SimpleSoftOMXComponent.h>
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "ixheaacd_type_def.h"
+#include "ixheaacd_error_standards.h"
+#include "ixheaacd_error_handler.h"
+#include "ixheaacd_apicmd_standards.h"
+#include "ixheaacd_memory_standards.h"
+#include "ixheaacd_aac_config.h"
+
+#include "impd_apicmd_standards.h"
+#include "impd_drc_config_params.h"
+
+#define MAX_MEM_ALLOCS 100
+
+extern "C" IA_ERRORCODE ixheaacd_dec_api(pVOID p_ia_module_obj,
+ WORD32 i_cmd, WORD32 i_idx, pVOID pv_value);
+extern "C" IA_ERRORCODE ia_drc_dec_api(pVOID p_ia_module_obj,
+ WORD32 i_cmd, WORD32 i_idx, pVOID pv_value);
+extern "C" IA_ERRORCODE ixheaacd_get_config_param(pVOID p_ia_process_api_obj,
+ pWORD32 pi_samp_freq,
+ pWORD32 pi_num_chan,
+ pWORD32 pi_pcm_wd_sz,
+ pWORD32 pi_channel_mask);
+
+namespace android {
+
+struct SoftXAAC : public SimpleSoftOMXComponent {
+ SoftXAAC(const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component);
+
+protected:
+ virtual ~SoftXAAC();
+
+ virtual OMX_ERRORTYPE internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR params);
+
+ virtual OMX_ERRORTYPE internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR params);
+
+ virtual void onQueueFilled(OMX_U32 portIndex);
+ virtual void onPortFlushCompleted(OMX_U32 portIndex);
+ virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
+ virtual void onReset();
+
+private:
+ enum {
+ kNumInputBuffers = 4,
+ kNumOutputBuffers = 4,
+ kNumDelayBlocksMax = 8,
+ };
+
+ bool mIsADTS;
+ size_t mInputBufferCount;
+ size_t mOutputBufferCount;
+ bool mSignalledError;
+ OMX_BUFFERHEADERTYPE *mLastInHeader;
+ int64_t mPrevTimestamp;
+ int64_t mCurrentTimestamp;
+ uint32_t mBufSize;
+
+ enum {
+ NONE,
+ AWAITING_DISABLED,
+ AWAITING_ENABLED
+ } mOutputPortSettingsChange;
+
+ void initPorts();
+ status_t initDecoder();
+ bool isConfigured() const;
+ int drainDecoder();
+ int initXAACDecoder();
+ int deInitXAACDecoder();
+
+ int configXAACDecoder(uint8_t* inBuffer, uint32_t inBufferLength);
+ int configMPEGDDrc();
+ int decodeXAACStream(uint8_t* inBuffer,
+ uint32_t inBufferLength,
+ int32_t *bytesConsumed,
+ int32_t *outBytes);
+
+ int configflushDecode();
+ IA_ERRORCODE getXAACStreamInfo();
+ IA_ERRORCODE setXAACDRCInfo(int32_t drcCut,
+ int32_t drcBoost,
+ int32_t drcRefLevel,
+ int32_t drcHeavyCompression
+#ifdef ENABLE_MPEG_D_DRC
+ ,int32_t drEffectType
+#endif
+ );
+
+ bool mEndOfInput;
+ bool mEndOfOutput;
+
+ void* mXheaacCodecHandle;
+ void* mMpegDDrcHandle;
+ uint32_t mInputBufferSize;
+ uint32_t mOutputFrameLength;
+ int8_t* mInputBuffer;
+ int8_t* mOutputBuffer;
+ int32_t mSampFreq;
+ int32_t mNumChannels;
+ int32_t mPcmWdSz;
+ int32_t mChannelMask;
+ bool mIsCodecInitialized;
+ bool mIsCodecConfigFlushRequired;
+ int8_t *mDrcInBuf;
+ int8_t *mDrcOutBuf;
+ int32_t mMpegDDRCPresent;
+ int32_t mDRCFlag;
+
+
+ void* mMemoryArray[MAX_MEM_ALLOCS];
+ int32_t mMallocCount;
+
+ DISALLOW_EVIL_CONSTRUCTORS(SoftXAAC);
+
+};
+
+} // namespace android
+
+#endif // SOFTXAAC_H_
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index a0654c7..05f4104 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -19,13 +19,28 @@
#include <utils/Log.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/ColorConverter.h>
#include <media/stagefright/MediaErrors.h>
#include "libyuv/convert_from.h"
#include "libyuv/video_common.h"
+#include <functional>
+#include <sys/time.h>
#define USE_LIBYUV
+#define PERF_PROFILING 0
+
+
+#if defined(__aarch64__) || defined(__ARM_NEON__)
+#define USE_NEON_Y410 1
+#else
+#define USE_NEON_Y410 0
+#endif
+
+#if USE_NEON_Y410
+#include <arm_neon.h>
+#endif
namespace android {
@@ -43,6 +58,11 @@
bool ColorConverter::isValid() const {
switch (mSrcFormat) {
+ case OMX_COLOR_FormatYUV420Planar16:
+ if (mDstFormat == OMX_COLOR_FormatYUV444Y410) {
+ return true;
+ }
+ // fall-thru
case OMX_COLOR_FormatYUV420Planar:
return mDstFormat == OMX_COLOR_Format16bitRGB565
|| mDstFormat == OMX_COLOR_Format32BitRGBA8888
@@ -59,6 +79,12 @@
}
}
+bool ColorConverter::isDstRGB() const {
+ return mDstFormat == OMX_COLOR_Format16bitRGB565
+ || mDstFormat == OMX_COLOR_Format32BitRGBA8888
+ || mDstFormat == OMX_COLOR_Format32bitBGRA8888;
+}
+
ColorConverter::BitmapParams::BitmapParams(
void *bits,
size_t width, size_t height,
@@ -81,10 +107,16 @@
case OMX_COLOR_Format32bitBGRA8888:
case OMX_COLOR_Format32BitRGBA8888:
+ case OMX_COLOR_FormatYUV444Y410:
mBpp = 4;
mStride = 4 * mWidth;
break;
+ case OMX_COLOR_FormatYUV420Planar16:
+ mBpp = 2;
+ mStride = 2 * mWidth;
+ break;
+
case OMX_COLOR_FormatYUV420Planar:
case OMX_COLOR_FormatCbYCrY:
case OMX_QCOM_COLOR_FormatYVU420SemiPlanar:
@@ -129,6 +161,12 @@
dstWidth, dstHeight,
dstCropLeft, dstCropTop, dstCropRight, dstCropBottom, mDstFormat);
+ if (!((src.mCropLeft & 1) == 0
+ && src.cropWidth() == dst.cropWidth()
+ && src.cropHeight() == dst.cropHeight())) {
+ return ERROR_UNSUPPORTED;
+ }
+
status_t err;
switch (mSrcFormat) {
@@ -140,6 +178,19 @@
#endif
break;
+ case OMX_COLOR_FormatYUV420Planar16:
+ {
+#if PERF_PROFILING
+ int64_t startTimeUs = ALooper::GetNowUs();
+#endif
+ err = convertYUV420Planar16(src, dst);
+#if PERF_PROFILING
+ int64_t endTimeUs = ALooper::GetNowUs();
+ ALOGD("convertYUV420Planar16 took %lld us", (long long) (endTimeUs - startTimeUs));
+#endif
+ break;
+ }
+
case OMX_COLOR_FormatCbYCrY:
err = convertCbYCrY(src, dst);
break;
@@ -172,12 +223,6 @@
uint8_t *kAdjustedClip = initClip();
- if (!((src.mCropLeft & 1) == 0
- && src.cropWidth() == dst.cropWidth()
- && src.cropHeight() == dst.cropHeight())) {
- return ERROR_UNSUPPORTED;
- }
-
uint16_t *dst_ptr = (uint16_t *)dst.mBits
+ dst.mCropTop * dst.mWidth + dst.mCropLeft;
@@ -232,12 +277,6 @@
status_t ColorConverter::convertYUV420PlanarUseLibYUV(
const BitmapParams &src, const BitmapParams &dst) {
- if (!((src.mCropLeft & 1) == 0
- && src.cropWidth() == dst.cropWidth()
- && src.cropHeight() == dst.cropHeight())) {
- return ERROR_UNSUPPORTED;
- }
-
uint8_t *dst_ptr = (uint8_t *)dst.mBits
+ dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
@@ -274,90 +313,120 @@
return OK;
}
-void ColorConverter::writeToDst(
- void *dst_ptr, uint8_t *kAdjustedClip, bool uncropped,
- signed r1, signed g1, signed b1,
- signed r2, signed g2, signed b2) {
- switch (mDstFormat) {
+std::function<void (void *, void *, void *, size_t,
+ signed *, signed *, signed *, signed *)>
+getReadFromSrc(OMX_COLOR_FORMATTYPE srcFormat) {
+ switch(srcFormat) {
+ case OMX_COLOR_FormatYUV420Planar:
+ return [](void *src_y, void *src_u, void *src_v, size_t x,
+ signed *y1, signed *y2, signed *u, signed *v) {
+ *y1 = ((uint8_t*)src_y)[x] - 16;
+ *y2 = ((uint8_t*)src_y)[x + 1] - 16;
+ *u = ((uint8_t*)src_u)[x / 2] - 128;
+ *v = ((uint8_t*)src_v)[x / 2] - 128;
+ };
+ case OMX_COLOR_FormatYUV420Planar16:
+ return [](void *src_y, void *src_u, void *src_v, size_t x,
+ signed *y1, signed *y2, signed *u, signed *v) {
+ *y1 = (signed)(((uint16_t*)src_y)[x] >> 2) - 16;
+ *y2 = (signed)(((uint16_t*)src_y)[x + 1] >> 2) - 16;
+ *u = (signed)(((uint16_t*)src_u)[x / 2] >> 2) - 128;
+ *v = (signed)(((uint16_t*)src_v)[x / 2] >> 2) - 128;
+ };
+ default:
+ TRESPASS();
+ }
+ return nullptr;
+}
+
+std::function<void (void *, bool, signed, signed, signed, signed, signed, signed)>
+getWriteToDst(OMX_COLOR_FORMATTYPE dstFormat, uint8_t *kAdjustedClip) {
+ switch (dstFormat) {
case OMX_COLOR_Format16bitRGB565:
{
- uint32_t rgb1 =
- ((kAdjustedClip[r1] >> 3) << 11)
- | ((kAdjustedClip[g1] >> 2) << 5)
- | (kAdjustedClip[b1] >> 3);
+ return [kAdjustedClip](void *dst_ptr, bool uncropped,
+ signed r1, signed g1, signed b1,
+ signed r2, signed g2, signed b2) {
+ uint32_t rgb1 =
+ ((kAdjustedClip[r1] >> 3) << 11)
+ | ((kAdjustedClip[g1] >> 2) << 5)
+ | (kAdjustedClip[b1] >> 3);
- if (uncropped) {
- uint32_t rgb2 =
- ((kAdjustedClip[r2] >> 3) << 11)
- | ((kAdjustedClip[g2] >> 2) << 5)
- | (kAdjustedClip[b2] >> 3);
+ if (uncropped) {
+ uint32_t rgb2 =
+ ((kAdjustedClip[r2] >> 3) << 11)
+ | ((kAdjustedClip[g2] >> 2) << 5)
+ | (kAdjustedClip[b2] >> 3);
- *(uint32_t *)dst_ptr = (rgb2 << 16) | rgb1;
- } else {
- *(uint16_t *)dst_ptr = rgb1;
- }
- break;
+ *(uint32_t *)dst_ptr = (rgb2 << 16) | rgb1;
+ } else {
+ *(uint16_t *)dst_ptr = rgb1;
+ }
+ };
}
case OMX_COLOR_Format32BitRGBA8888:
{
- ((uint32_t *)dst_ptr)[0] =
- (kAdjustedClip[r1])
- | (kAdjustedClip[g1] << 8)
- | (kAdjustedClip[b1] << 16)
- | (0xFF << 24);
-
- if (uncropped) {
- ((uint32_t *)dst_ptr)[1] =
- (kAdjustedClip[r2])
- | (kAdjustedClip[g2] << 8)
- | (kAdjustedClip[b2] << 16)
+ return [kAdjustedClip](void *dst_ptr, bool uncropped,
+ signed r1, signed g1, signed b1,
+ signed r2, signed g2, signed b2) {
+ ((uint32_t *)dst_ptr)[0] =
+ (kAdjustedClip[r1])
+ | (kAdjustedClip[g1] << 8)
+ | (kAdjustedClip[b1] << 16)
| (0xFF << 24);
- }
- break;
+
+ if (uncropped) {
+ ((uint32_t *)dst_ptr)[1] =
+ (kAdjustedClip[r2])
+ | (kAdjustedClip[g2] << 8)
+ | (kAdjustedClip[b2] << 16)
+ | (0xFF << 24);
+ }
+ };
}
case OMX_COLOR_Format32bitBGRA8888:
{
- ((uint32_t *)dst_ptr)[0] =
- (kAdjustedClip[b1])
- | (kAdjustedClip[g1] << 8)
- | (kAdjustedClip[r1] << 16)
- | (0xFF << 24);
-
- if (uncropped) {
- ((uint32_t *)dst_ptr)[1] =
- (kAdjustedClip[b2])
- | (kAdjustedClip[g2] << 8)
- | (kAdjustedClip[r2] << 16)
+ return [kAdjustedClip](void *dst_ptr, bool uncropped,
+ signed r1, signed g1, signed b1,
+ signed r2, signed g2, signed b2) {
+ ((uint32_t *)dst_ptr)[0] =
+ (kAdjustedClip[b1])
+ | (kAdjustedClip[g1] << 8)
+ | (kAdjustedClip[r1] << 16)
| (0xFF << 24);
- }
- break;
+
+ if (uncropped) {
+ ((uint32_t *)dst_ptr)[1] =
+ (kAdjustedClip[b2])
+ | (kAdjustedClip[g2] << 8)
+ | (kAdjustedClip[r2] << 16)
+ | (0xFF << 24);
+ }
+ };
}
default:
- break;
+ TRESPASS();
}
+ return nullptr;
}
+
status_t ColorConverter::convertYUV420Planar(
const BitmapParams &src, const BitmapParams &dst) {
- if (!((src.mCropLeft & 1) == 0
- && src.cropWidth() == dst.cropWidth()
- && src.cropHeight() == dst.cropHeight())) {
- return ERROR_UNSUPPORTED;
- }
-
uint8_t *kAdjustedClip = initClip();
+ auto readFromSrc = getReadFromSrc(mSrcFormat);
+ auto writeToDst = getWriteToDst(mDstFormat, kAdjustedClip);
+
uint8_t *dst_ptr = (uint8_t *)dst.mBits
- + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
+ + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
- const uint8_t *src_y =
- (const uint8_t *)src.mBits + src.mCropTop * src.mStride + src.mCropLeft;
+ uint8_t *src_y = (uint8_t *)src.mBits
+ + src.mCropTop * src.mStride + src.mCropLeft * src.mBpp;
- const uint8_t *src_u =
- (const uint8_t *)src.mBits + src.mStride * src.mHeight
- + (src.mCropTop / 2) * (src.mStride / 2) + src.mCropLeft / 2;
+ uint8_t *src_u = (uint8_t *)src.mBits + src.mStride * src.mHeight
+ + (src.mCropTop / 2) * (src.mStride / 2) + src.mCropLeft / 2 * src.mBpp;
- const uint8_t *src_v =
- src_u + (src.mStride / 2) * (src.mHeight / 2);
+ uint8_t *src_v = src_u + (src.mStride / 2) * (src.mHeight / 2);
for (size_t y = 0; y < src.cropHeight(); ++y) {
for (size_t x = 0; x < src.cropWidth(); x += 2) {
@@ -379,11 +448,8 @@
// clip range -278 .. 535
- signed y1 = (signed)src_y[x] - 16;
- signed y2 = (signed)src_y[x + 1] - 16;
-
- signed u = (signed)src_u[x / 2] - 128;
- signed v = (signed)src_v[x / 2] - 128;
+ signed y1, y2, u, v;
+ readFromSrc(src_y, src_u, src_v, x, &y1, &y2, &u, &v);
signed u_b = u * 517;
signed u_g = -u * 100;
@@ -401,8 +467,7 @@
signed r2 = (tmp2 + v_r) / 256;
bool uncropped = x + 1 < src.cropWidth();
- (void)writeToDst(dst_ptr + x * dst.mBpp,
- kAdjustedClip, uncropped, r1, g1, b1, r2, g2, b2);
+ writeToDst(dst_ptr + x * dst.mBpp, uncropped, r1, g1, b1, r2, g2, b2);
}
src_y += src.mStride;
@@ -418,16 +483,247 @@
return OK;
}
+status_t ColorConverter::convertYUV420Planar16(
+ const BitmapParams &src, const BitmapParams &dst) {
+ if (mDstFormat == OMX_COLOR_FormatYUV444Y410) {
+ return convertYUV420Planar16ToY410(src, dst);
+ }
+
+ return convertYUV420Planar(src, dst);
+}
+
+/*
+ * Pack 10-bit YUV into RGBA_1010102.
+ *
+ * Media sends 10-bit YUV in a RGBA_1010102 format buffer. SF will handle
+ * the conversion to RGB using RenderEngine fallback.
+ *
+ * We do not perform a YUV->RGB conversion here, however the conversion with
+ * BT2020 to Full range is below for reference:
+ *
+ * B = 1.168 *(Y - 64) + 2.148 *(U - 512)
+ * G = 1.168 *(Y - 64) - 0.652 *(V - 512) - 0.188 *(U - 512)
+ * R = 1.168 *(Y - 64) + 1.683 *(V - 512)
+ *
+ * B = 1196/1024 *(Y - 64) + 2200/1024 *(U - 512)
+ * G = .................... - 668/1024 *(V - 512) - 192/1024 *(U - 512)
+ * R = .................... + 1723/1024 *(V - 512)
+ *
+ * min_B = (1196 *(- 64) + 2200 *(- 512)) / 1024 = -1175
+ * min_G = (1196 *(- 64) - 668 *(1023 - 512) - 192 *(1023 - 512)) / 1024 = -504
+ * min_R = (1196 *(- 64) + 1723 *(- 512)) / 1024 = -937
+ *
+ * max_B = (1196 *(1023 - 64) + 2200 *(1023 - 512)) / 1024 = 2218
+ * max_G = (1196 *(1023 - 64) - 668 *(- 512) - 192 *(- 512)) / 1024 = 1551
+ * max_R = (1196 *(1023 - 64) + 1723 *(1023 - 512)) / 1024 = 1980
+ *
+ * clip range -1175 .. 2218
+ *
+ */
+
+#if !USE_NEON_Y410
+
+status_t ColorConverter::convertYUV420Planar16ToY410(
+ const BitmapParams &src, const BitmapParams &dst) {
+ uint8_t *dst_ptr = (uint8_t *)dst.mBits
+ + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
+
+ const uint8_t *src_y =
+ (const uint8_t *)src.mBits + src.mCropTop * src.mStride + src.mCropLeft * src.mBpp;
+
+ const uint8_t *src_u =
+ (const uint8_t *)src.mBits + src.mStride * src.mHeight
+ + (src.mCropTop / 2) * (src.mStride / 2) + (src.mCropLeft / 2) * src.mBpp;
+
+ const uint8_t *src_v =
+ src_u + (src.mStride / 2) * (src.mHeight / 2);
+
+ // Converting two lines at a time, slightly faster
+ for (size_t y = 0; y < src.cropHeight(); y += 2) {
+ uint32_t *dst_top = (uint32_t *) dst_ptr;
+ uint32_t *dst_bot = (uint32_t *) (dst_ptr + dst.mStride);
+ uint16_t *ptr_ytop = (uint16_t*) src_y;
+ uint16_t *ptr_ybot = (uint16_t*) (src_y + src.mStride);
+ uint16_t *ptr_u = (uint16_t*) src_u;
+ uint16_t *ptr_v = (uint16_t*) src_v;
+
+ uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
+ size_t x = 0;
+ for (; x < src.cropWidth() - 3; x += 4) {
+ u01 = *((uint32_t*)ptr_u); ptr_u += 2;
+ v01 = *((uint32_t*)ptr_v); ptr_v += 2;
+
+ y01 = *((uint32_t*)ptr_ytop); ptr_ytop += 2;
+ y23 = *((uint32_t*)ptr_ytop); ptr_ytop += 2;
+ y45 = *((uint32_t*)ptr_ybot); ptr_ybot += 2;
+ y67 = *((uint32_t*)ptr_ybot); ptr_ybot += 2;
+
+ uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
+ uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
+
+ *dst_top++ = ((y01 & 0x3FF) << 10) | uv0;
+ *dst_top++ = ((y01 >> 16) << 10) | uv0;
+ *dst_top++ = ((y23 & 0x3FF) << 10) | uv1;
+ *dst_top++ = ((y23 >> 16) << 10) | uv1;
+
+ *dst_bot++ = ((y45 & 0x3FF) << 10) | uv0;
+ *dst_bot++ = ((y45 >> 16) << 10) | uv0;
+ *dst_bot++ = ((y67 & 0x3FF) << 10) | uv1;
+ *dst_bot++ = ((y67 >> 16) << 10) | uv1;
+ }
+
+ // There should be at most 2 more pixels to process. Note that we don't
+ // need to consider odd case as the buffer is always aligned to even.
+ if (x < src.cropWidth()) {
+ u01 = *ptr_u;
+ v01 = *ptr_v;
+ y01 = *((uint32_t*)ptr_ytop);
+ y45 = *((uint32_t*)ptr_ybot);
+ uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
+ *dst_top++ = ((y01 & 0x3FF) << 10) | uv0;
+ *dst_top++ = ((y01 >> 16) << 10) | uv0;
+ *dst_bot++ = ((y45 & 0x3FF) << 10) | uv0;
+ *dst_bot++ = ((y45 >> 16) << 10) | uv0;
+ }
+
+ src_y += src.mStride * 2;
+ src_u += src.mStride / 2;
+ src_v += src.mStride / 2;
+ dst_ptr += dst.mStride * 2;
+ }
+
+ return OK;
+}
+
+#else
+
+status_t ColorConverter::convertYUV420Planar16ToY410(
+ const BitmapParams &src, const BitmapParams &dst) {
+ uint8_t *out = (uint8_t *)dst.mBits
+ + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
+
+ const uint8_t *src_y =
+ (const uint8_t *)src.mBits + src.mCropTop * src.mStride + src.mCropLeft * src.mBpp;
+
+ const uint8_t *src_u =
+ (const uint8_t *)src.mBits + src.mStride * src.mHeight
+ + (src.mCropTop / 2) * (src.mStride / 2) + (src.mCropLeft / 2) * src.mBpp;
+
+ const uint8_t *src_v =
+ src_u + (src.mStride / 2) * (src.mHeight / 2);
+
+ for (size_t y = 0; y < src.cropHeight(); y++) {
+ uint16_t *ptr_y = (uint16_t*) src_y;
+ uint16_t *ptr_u = (uint16_t*) src_u;
+ uint16_t *ptr_v = (uint16_t*) src_v;
+ uint32_t *ptr_out = (uint32_t *) out;
+
+ // Process 16-pixel at a time.
+ uint32_t *ptr_limit = ptr_out + (src.cropWidth() & ~15);
+ while (ptr_out < ptr_limit) {
+ uint16x4_t u0123 = vld1_u16(ptr_u); ptr_u += 4;
+ uint16x4_t u4567 = vld1_u16(ptr_u); ptr_u += 4;
+ uint16x4_t v0123 = vld1_u16(ptr_v); ptr_v += 4;
+ uint16x4_t v4567 = vld1_u16(ptr_v); ptr_v += 4;
+ uint16x4_t y0123 = vld1_u16(ptr_y); ptr_y += 4;
+ uint16x4_t y4567 = vld1_u16(ptr_y); ptr_y += 4;
+ uint16x4_t y89ab = vld1_u16(ptr_y); ptr_y += 4;
+ uint16x4_t ycdef = vld1_u16(ptr_y); ptr_y += 4;
+
+ uint32x2_t uvtempl;
+ uint32x4_t uvtempq;
+
+ uvtempq = vaddw_u16(vshll_n_u16(v0123, 20), u0123);
+
+ uvtempl = vget_low_u32(uvtempq);
+ uint32x4_t uv0011 = vreinterpretq_u32_u64(
+ vaddw_u32(vshll_n_u32(uvtempl, 32), uvtempl));
+
+ uvtempl = vget_high_u32(uvtempq);
+ uint32x4_t uv2233 = vreinterpretq_u32_u64(
+ vaddw_u32(vshll_n_u32(uvtempl, 32), uvtempl));
+
+ uvtempq = vaddw_u16(vshll_n_u16(v4567, 20), u4567);
+
+ uvtempl = vget_low_u32(uvtempq);
+ uint32x4_t uv4455 = vreinterpretq_u32_u64(
+ vaddw_u32(vshll_n_u32(uvtempl, 32), uvtempl));
+
+ uvtempl = vget_high_u32(uvtempq);
+ uint32x4_t uv6677 = vreinterpretq_u32_u64(
+ vaddw_u32(vshll_n_u32(uvtempl, 32), uvtempl));
+
+ uint32x4_t dsttemp;
+
+ dsttemp = vorrq_u32(uv0011, vshll_n_u16(y0123, 10));
+ vst1q_u32(ptr_out, dsttemp); ptr_out += 4;
+
+ dsttemp = vorrq_u32(uv2233, vshll_n_u16(y4567, 10));
+ vst1q_u32(ptr_out, dsttemp); ptr_out += 4;
+
+ dsttemp = vorrq_u32(uv4455, vshll_n_u16(y89ab, 10));
+ vst1q_u32(ptr_out, dsttemp); ptr_out += 4;
+
+ dsttemp = vorrq_u32(uv6677, vshll_n_u16(ycdef, 10));
+ vst1q_u32(ptr_out, dsttemp); ptr_out += 4;
+ }
+
+ src_y += src.mStride;
+ if (y & 1) {
+ src_u += src.mStride / 2;
+ src_v += src.mStride / 2;
+ }
+ out += dst.mStride;
+ }
+
+ // Process the left-overs out-of-loop, 2-pixel at a time. Note that we don't
+ // need to consider odd case as the buffer is always aligned to even.
+ if (src.cropWidth() & 15) {
+ size_t xstart = (src.cropWidth() & ~15);
+
+ uint8_t *out = (uint8_t *)dst.mBits + dst.mCropTop * dst.mStride
+ + (dst.mCropLeft + xstart) * dst.mBpp;
+
+ const uint8_t *src_y = (const uint8_t *)src.mBits + src.mCropTop * src.mStride
+ + (src.mCropLeft + xstart) * src.mBpp;
+
+ const uint8_t *src_u = (const uint8_t *)src.mBits + src.mStride * src.mHeight
+ + (src.mCropTop / 2) * (src.mStride / 2)
+ + ((src.mCropLeft + xstart) / 2) * src.mBpp;
+
+ const uint8_t *src_v = src_u + (src.mStride / 2) * (src.mHeight / 2);
+
+ for (size_t y = 0; y < src.cropHeight(); y++) {
+ uint16_t *ptr_y = (uint16_t*) src_y;
+ uint16_t *ptr_u = (uint16_t*) src_u;
+ uint16_t *ptr_v = (uint16_t*) src_v;
+ uint32_t *ptr_out = (uint32_t *) out;
+ for (size_t x = xstart; x < src.cropWidth(); x += 2) {
+ uint16_t u = *ptr_u++;
+ uint16_t v = *ptr_v++;
+ uint32_t y01 = *((uint32_t*)ptr_y); ptr_y += 2;
+ uint32_t uv = u | (((uint32_t)v) << 20);
+ *ptr_out++ = ((y01 & 0x3FF) << 10) | uv;
+ *ptr_out++ = ((y01 >> 16) << 10) | uv;
+ }
+ src_y += src.mStride;
+ if (y & 1) {
+ src_u += src.mStride / 2;
+ src_v += src.mStride / 2;
+ }
+ out += dst.mStride;
+ }
+ }
+
+ return OK;
+}
+
+#endif // USE_NEON_Y410
+
status_t ColorConverter::convertQCOMYUV420SemiPlanar(
const BitmapParams &src, const BitmapParams &dst) {
uint8_t *kAdjustedClip = initClip();
- if (!((src.mCropLeft & 1) == 0
- && src.cropWidth() == dst.cropWidth()
- && src.cropHeight() == dst.cropHeight())) {
- return ERROR_UNSUPPORTED;
- }
-
uint16_t *dst_ptr = (uint16_t *)dst.mBits
+ dst.mCropTop * dst.mWidth + dst.mCropLeft;
@@ -496,12 +792,6 @@
uint8_t *kAdjustedClip = initClip();
- if (!((src.mCropLeft & 1) == 0
- && src.cropWidth() == dst.cropWidth()
- && src.cropHeight() == dst.cropHeight())) {
- return ERROR_UNSUPPORTED;
- }
-
uint16_t *dst_ptr = (uint16_t *)dst.mBits
+ dst.mCropTop * dst.mWidth + dst.mCropLeft;
@@ -568,12 +858,6 @@
const BitmapParams &src, const BitmapParams &dst) {
uint8_t *kAdjustedClip = initClip();
- if (!((src.mCropLeft & 1) == 0
- && src.cropWidth() == dst.cropWidth()
- && src.cropHeight() == dst.cropHeight())) {
- return ERROR_UNSUPPORTED;
- }
-
uint16_t *dst_ptr = (uint16_t *)dst.mBits
+ dst.mCropTop * dst.mWidth + dst.mCropLeft;
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index a07787a..657a05b 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -18,10 +18,11 @@
#include <utils/Log.h>
#include "../include/SoftwareRenderer.h"
-
#include <cutils/properties.h> // for property_get
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ColorUtils.h>
+#include <media/stagefright/SurfaceUtils.h>
#include <system/window.h>
#include <ui/Fence.h>
#include <ui/GraphicBufferMapper.h>
@@ -30,7 +31,6 @@
namespace android {
-
static int ALIGN(int x, int y) {
// y must be a power of 2.
return (x + y - 1) & ~(y - 1);
@@ -50,7 +50,9 @@
mCropBottom(0),
mCropWidth(0),
mCropHeight(0),
- mRotationDegrees(rotation) {
+ mRotationDegrees(rotation),
+ mDataSpace(HAL_DATASPACE_UNKNOWN) {
+ memset(&mHDRStaticInfo, 0, sizeof(mHDRStaticInfo));
}
SoftwareRenderer::~SoftwareRenderer() {
@@ -58,7 +60,8 @@
mConverter = NULL;
}
-void SoftwareRenderer::resetFormatIfChanged(const sp<AMessage> &format) {
+void SoftwareRenderer::resetFormatIfChanged(
+ const sp<AMessage> &format, size_t numOutputBuffers) {
CHECK(format != NULL);
int32_t colorFormatNew;
@@ -76,13 +79,26 @@
cropBottomNew = heightNew - 1;
}
+ // The native window buffer format for high-bitdepth content could
+ // depend on the dataspace also.
+ android_dataspace dataSpace;
+ bool dataSpaceChangedForPlanar16 = false;
+ if (colorFormatNew == OMX_COLOR_FormatYUV420Planar16
+ && format->findInt32("android._dataspace", (int32_t *)&dataSpace)
+ && dataSpace != mDataSpace) {
+ // Do not modify mDataSpace here, it's only modified at last
+ // when we do native_window_set_buffers_data_space().
+ dataSpaceChangedForPlanar16 = true;
+ }
+
if (static_cast<int32_t>(mColorFormat) == colorFormatNew &&
mWidth == widthNew &&
mHeight == heightNew &&
mCropLeft == cropLeftNew &&
mCropTop == cropTopNew &&
mCropRight == cropRightNew &&
- mCropBottom == cropBottomNew) {
+ mCropBottom == cropBottomNew &&
+ !dataSpaceChangedForPlanar16) {
// Nothing changed, no need to reset renderer.
return;
}
@@ -130,6 +146,22 @@
bufHeight = (mCropHeight + 1) & ~1;
break;
}
+ case OMX_COLOR_FormatYUV420Planar16:
+ {
+ if (((dataSpace & HAL_DATASPACE_STANDARD_MASK) == HAL_DATASPACE_STANDARD_BT2020)
+ && ((dataSpace & HAL_DATASPACE_TRANSFER_MASK) == HAL_DATASPACE_TRANSFER_ST2084)) {
+ // Here we would convert OMX_COLOR_FormatYUV420Planar16 into
+ // OMX_COLOR_FormatYUV444Y410, and put it inside a buffer with
+ // format HAL_PIXEL_FORMAT_RGBA_1010102. Surfaceflinger will
+ // use render engine to convert it to RGB if needed.
+ halFormat = HAL_PIXEL_FORMAT_RGBA_1010102;
+ } else {
+ halFormat = HAL_PIXEL_FORMAT_YV12;
+ }
+ bufWidth = (mCropWidth + 1) & ~1;
+ bufHeight = (mCropHeight + 1) & ~1;
+ break;
+ }
default:
{
break;
@@ -141,6 +173,10 @@
mConverter = new ColorConverter(
mColorFormat, OMX_COLOR_Format16bitRGB565);
CHECK(mConverter->isValid());
+ } else if (halFormat == HAL_PIXEL_FORMAT_RGBA_1010102) {
+ mConverter = new ColorConverter(
+ mColorFormat, OMX_COLOR_FormatYUV444Y410);
+ CHECK(mConverter->isValid());
}
CHECK(mNativeWindow != NULL);
@@ -151,7 +187,7 @@
CHECK_EQ(0,
native_window_set_usage(
mNativeWindow.get(),
- GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN
+ GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_RARELY
| GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_EXTERNAL_DISP));
CHECK_EQ(0,
@@ -167,6 +203,11 @@
CHECK_EQ(0, native_window_set_buffers_format(
mNativeWindow.get(),
halFormat));
+ if (OK != native_window_set_buffer_count(
+ mNativeWindow.get(), numOutputBuffers + 4)) {
+ ALOGE("Failed to set native window buffer count to (%zu + 4)",
+ numOutputBuffers);
+ }
// NOTE: native window uses extended right-bottom coordinate
android_native_rect_t crop;
@@ -202,8 +243,8 @@
std::list<FrameRenderTracker::Info> SoftwareRenderer::render(
const void *data, size_t , int64_t mediaTimeUs, nsecs_t renderTimeNs,
- void* /*platformPrivate*/, const sp<AMessage>& format) {
- resetFormatIfChanged(format);
+ size_t numOutputBuffers, const sp<AMessage>& format) {
+ resetFormatIfChanged(format, numOutputBuffers);
FrameRenderTracker::Info *info = NULL;
ANativeWindowBuffer *buf;
@@ -226,8 +267,9 @@
Rect bounds(mCropWidth, mCropHeight);
void *dst;
- CHECK_EQ(0, mapper.lock(
- buf->handle, GRALLOC_USAGE_SW_WRITE_OFTEN, bounds, &dst));
+ CHECK_EQ(0, mapper.lock(buf->handle,
+ GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_RARELY,
+ bounds, &dst));
// TODO move the other conversions also into ColorConverter, and
// fix cropping issues (when mCropLeft/Top != 0 or mWidth != mCropWidth)
@@ -276,6 +318,46 @@
dst_u += dst_c_stride;
dst_v += dst_c_stride;
}
+ } else if (mColorFormat == OMX_COLOR_FormatYUV420Planar16) {
+ const uint16_t *src_y = (const uint16_t *)data;
+ const uint16_t *src_u = (const uint16_t *)data + mWidth * mHeight;
+ const uint16_t *src_v = src_u + (mWidth / 2 * mHeight / 2);
+
+ src_y += mCropLeft + mCropTop * mWidth;
+ src_u += (mCropLeft + mCropTop * mWidth / 2) / 2;
+ src_v += (mCropLeft + mCropTop * mWidth / 2) / 2;
+
+ uint8_t *dst_y = (uint8_t *)dst;
+ size_t dst_y_size = buf->stride * buf->height;
+ size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
+ size_t dst_c_size = dst_c_stride * buf->height / 2;
+ uint8_t *dst_v = dst_y + dst_y_size;
+ uint8_t *dst_u = dst_v + dst_c_size;
+
+ dst_y += mCropTop * buf->stride + mCropLeft;
+ dst_v += (mCropTop / 2) * dst_c_stride + mCropLeft / 2;
+ dst_u += (mCropTop / 2) * dst_c_stride + mCropLeft / 2;
+
+ for (int y = 0; y < mCropHeight; ++y) {
+ for (int x = 0; x < mCropWidth; ++x) {
+ dst_y[x] = (uint8_t)(src_y[x] >> 2);
+ }
+
+ src_y += mWidth;
+ dst_y += buf->stride;
+ }
+
+ for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
+ for (int x = 0; x < (mCropWidth + 1) / 2; ++x) {
+ dst_u[x] = (uint8_t)(src_u[x] >> 2);
+ dst_v[x] = (uint8_t)(src_v[x] >> 2);
+ }
+
+ src_u += mWidth / 2;
+ src_v += mWidth / 2;
+ dst_u += dst_c_stride;
+ dst_v += dst_c_stride;
+ }
} else if (mColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar
|| mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
const uint8_t *src_y = (const uint8_t *)data;
@@ -365,12 +447,29 @@
// color conversion to RGB. For now, just mark dataspace for YUV rendering.
android_dataspace dataSpace;
if (format->findInt32("android._dataspace", (int32_t *)&dataSpace) && dataSpace != mDataSpace) {
+ mDataSpace = dataSpace;
+
+ if (mConverter != NULL && mConverter->isDstRGB()) {
+ // graphics only supports full range RGB. ColorConverter should have
+ // converted any YUV to full range.
+ dataSpace = (android_dataspace)
+ ((dataSpace & ~HAL_DATASPACE_RANGE_MASK) | HAL_DATASPACE_RANGE_FULL);
+ }
+
ALOGD("setting dataspace on output surface to #%x", dataSpace);
if ((err = native_window_set_buffers_data_space(mNativeWindow.get(), dataSpace))) {
ALOGW("failed to set dataspace on surface (%d)", err);
}
- mDataSpace = dataSpace;
}
+ if (format->contains("hdr-static-info")) {
+ HDRStaticInfo info;
+ if (ColorUtils::getHDRStaticInfoFromFormat(format, &info)
+ && memcmp(&mHDRStaticInfo, &info, sizeof(info))) {
+ setNativeWindowHdrMetadata(mNativeWindow.get(), &info);
+ mHDRStaticInfo = info;
+ }
+ }
+
if ((err = mNativeWindow->queueBuffer(mNativeWindow.get(), buf, -1)) != 0) {
ALOGW("Surface::queueBuffer returned error %d", err);
} else {
diff --git a/media/libstagefright/data/media_codecs_google_c2.xml b/media/libstagefright/data/media_codecs_google_c2.xml
new file mode 100644
index 0000000..bb78013
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_c2.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<MediaCodecs>
+ <Include href="media_codecs_google_c2_audio.xml" />
+ <Include href="media_codecs_google_c2_video.xml" />
+</MediaCodecs>
diff --git a/media/libstagefright/data/media_codecs_google_c2_audio.xml b/media/libstagefright/data/media_codecs_google_c2_audio.xml
new file mode 100644
index 0000000..0b554a2
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_c2_audio.xml
@@ -0,0 +1,97 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2014 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<Included>
+ <Decoders>
+ <MediaCodec name="c2.android.mp3.decoder" type="audio/mpeg">
+ <Limit name="channel-count" max="2" />
+ <Limit name="sample-rate" ranges="8000,11025,12000,16000,22050,24000,32000,44100,48000" />
+ <Limit name="bitrate" range="8000-320000" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.amrnb.decoder" type="audio/3gpp">
+ <Limit name="channel-count" max="1" />
+ <Limit name="sample-rate" ranges="8000" />
+ <Limit name="bitrate" range="4750-12200" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.amrwb.decoder" type="audio/amr-wb">
+ <Limit name="channel-count" max="1" />
+ <Limit name="sample-rate" ranges="16000" />
+ <Limit name="bitrate" range="6600-23850" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.aac.decoder" type="audio/mp4a-latm">
+ <Limit name="channel-count" max="8" />
+ <Limit name="sample-rate" ranges="7350,8000,11025,12000,16000,22050,24000,32000,44100,48000" />
+ <Limit name="bitrate" range="8000-960000" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.g711.alaw.decoder" type="audio/g711-alaw">
+ <Limit name="channel-count" max="1" />
+ <Limit name="sample-rate" ranges="8000-48000" />
+ <Limit name="bitrate" range="64000" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.g711.mlaw.decoder" type="audio/g711-mlaw">
+ <Limit name="channel-count" max="1" />
+ <Limit name="sample-rate" ranges="8000-48000" />
+ <Limit name="bitrate" range="64000" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.vorbis.decoder" type="audio/vorbis">
+ <Limit name="channel-count" max="8" />
+ <Limit name="sample-rate" ranges="8000-96000" />
+ <Limit name="bitrate" range="32000-500000" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.opus.decoder" type="audio/opus">
+ <Limit name="channel-count" max="8" />
+ <Limit name="sample-rate" ranges="48000" />
+ <Limit name="bitrate" range="6000-510000" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.raw.decoder" type="audio/raw">
+ <Limit name="channel-count" max="8" />
+ <Limit name="sample-rate" ranges="8000-96000" />
+ <Limit name="bitrate" range="1-10000000" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.flac.decoder" type="audio/flac">
+ <Limit name="channel-count" max="8" />
+ <Limit name="sample-rate" ranges="1-655350" />
+ <Limit name="bitrate" range="1-21000000" />
+ </MediaCodec>
+ </Decoders>
+ <Encoders>
+ <MediaCodec name="c2.android.aac.encoder" type="audio/mp4a-latm">
+ <Limit name="channel-count" max="6" />
+ <Limit name="sample-rate" ranges="8000,11025,12000,16000,22050,24000,32000,44100,48000" />
+ <!-- also may support 64000, 88200 and 96000 Hz -->
+ <Limit name="bitrate" range="8000-960000" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.amrnb.encoder" type="audio/3gpp">
+ <Limit name="channel-count" max="1" />
+ <Limit name="sample-rate" ranges="8000" />
+ <Limit name="bitrate" range="4750-12200" />
+ <Feature name="bitrate-modes" value="CBR" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.amrwb.encoder" type="audio/amr-wb">
+ <Limit name="channel-count" max="1" />
+ <Limit name="sample-rate" ranges="16000" />
+ <Limit name="bitrate" range="6600-23850" />
+ <Feature name="bitrate-modes" value="CBR" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.flac.encoder" type="audio/flac">
+ <Limit name="channel-count" max="2" />
+ <Limit name="sample-rate" ranges="1-655350" />
+ <Limit name="bitrate" range="1-21000000" />
+ <Limit name="complexity" range="0-8" default="5" />
+ <Feature name="bitrate-modes" value="CQ" />
+ </MediaCodec>
+ </Encoders>
+</Included>
diff --git a/media/libstagefright/data/media_codecs_google_c2_video.xml b/media/libstagefright/data/media_codecs_google_c2_video.xml
new file mode 100644
index 0000000..adb45b3
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_c2_video.xml
@@ -0,0 +1,122 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2014 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<Included>
+ <Decoders>
+ <MediaCodec name="c2.android.mpeg4.decoder" type="video/mp4v-es">
+ <!-- profiles and levels: ProfileSimple : Level3 -->
+ <Limit name="size" min="2x2" max="352x288" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="12-11880" />
+ <Limit name="bitrate" range="1-384000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.h263.decoder" type="video/3gpp">
+ <!-- profiles and levels: ProfileBaseline : Level30, ProfileBaseline : Level45
+ ProfileISWV2 : Level30, ProfileISWV2 : Level45 -->
+ <Limit name="size" min="2x2" max="352x288" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-384000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.avc.decoder" type="video/avc">
+ <!-- profiles and levels: ProfileHigh : Level52 -->
+ <Limit name="size" min="2x2" max="4080x4080" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="block-count" range="1-32768" /> <!-- max 4096x2048 equivalent -->
+ <Limit name="blocks-per-second" range="1-1966080" />
+ <Limit name="bitrate" range="1-48000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.hevc.decoder" type="video/hevc">
+ <!-- profiles and levels: ProfileMain : MainTierLevel51 -->
+ <Limit name="size" min="2x2" max="4096x4096" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="8x8" />
+ <Limit name="block-count" range="1-196608" /> <!-- max 4096x3072 -->
+ <Limit name="blocks-per-second" range="1-2000000" />
+ <Limit name="bitrate" range="1-10000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.vp8.decoder" type="video/x-vnd.on2.vp8">
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="block-count" range="1-16384" />
+ <Limit name="blocks-per-second" range="1-1000000" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.vp9.decoder" type="video/x-vnd.on2.vp9">
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="block-count" range="1-16384" />
+ <Limit name="blocks-per-second" range="1-500000" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ </Decoders>
+
+ <Encoders>
+ <MediaCodec name="c2.android.h263.encoder" type="video/3gpp">
+ <!-- profiles and levels: ProfileBaseline : Level45 -->
+ <Limit name="size" min="176x144" max="176x144" />
+ <Limit name="alignment" value="16x16" />
+ <Limit name="bitrate" range="1-128000" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.avc.encoder" type="video/avc">
+ <!-- profiles and levels: ProfileBaseline : Level41 -->
+ <Limit name="size" min="16x16" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="block-count" range="1-8192" /> <!-- max 2048x1024 -->
+ <Limit name="blocks-per-second" range="1-245760" />
+ <Limit name="bitrate" range="1-12000000" />
+ <Feature name="intra-refresh" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.mpeg4.encoder" type="video/mp4v-es">
+ <!-- profiles and levels: ProfileCore : Level2 -->
+ <Limit name="size" min="16x16" max="176x144" />
+ <Limit name="alignment" value="16x16" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="12-1485" />
+ <Limit name="bitrate" range="1-64000" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.vp8.encoder" type="video/x-vnd.on2.vp8">
+ <!-- profiles and levels: ProfileMain : Level_Version0-3 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <!-- 2016 devices can encode at about 10fps at this block count -->
+ <Limit name="block-count" range="1-16384" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="bitrate-modes" value="VBR,CBR" />
+ </MediaCodec>
+ <MediaCodec name="c2.android.vp9.encoder" type="video/x-vnd.on2.vp9">
+ <!-- profiles and levels: ProfileMain : Level_Version0-3 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <!-- 2016 devices can encode at about 8fps at this block count -->
+ <Limit name="block-count" range="1-3600" /> <!-- max 1280x720 -->
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="bitrate-modes" value="VBR,CBR" />
+ </MediaCodec>
+ </Encoders>
+</Included>
diff --git a/media/libstagefright/exports.lds b/media/libstagefright/exports.lds
new file mode 100644
index 0000000..aabc233
--- /dev/null
+++ b/media/libstagefright/exports.lds
@@ -0,0 +1,508 @@
+{
+ global:
+ *;
+ local:
+ _ZN7android4ESDS*;
+ _ZNK7android4ESDS*;
+ _ZN7android14ColorConverter*;
+ _ZNK7android14ColorConverter*;
+ _ZN7android16SoftwareRenderer*;
+ ABGRToARGB;
+ ABGRToI420;
+ ABGRToUVRow_Any_NEON*;
+ ABGRToUVRow_C;
+ ABGRToUVRow_NEON*;
+ ABGRToYRow_Any_NEON*;
+ ABGRToYRow_C;
+ ABGRToYRow_NEON*;
+ Android420ToI420;
+ ARGB1555ToARGB;
+ ARGB1555ToARGBRow_Any_NEON*;
+ ARGB1555ToARGBRow_C;
+ ARGB1555ToARGBRow_NEON*;
+ ARGB1555ToI420;
+ ARGB1555ToUVRow_Any_NEON*;
+ ARGB1555ToUVRow_C;
+ ARGB1555ToUVRow_NEON*;
+ ARGB1555ToYRow_Any_NEON*;
+ ARGB1555ToYRow_C;
+ ARGB1555ToYRow_NEON*;
+ ARGB4444ToARGB;
+ ARGB4444ToARGBRow_Any_NEON*;
+ ARGB4444ToARGBRow_C;
+ ARGB4444ToARGBRow_NEON*;
+ ARGB4444ToI420;
+ ARGB4444ToUVRow_Any_NEON*;
+ ARGB4444ToUVRow_C;
+ ARGB4444ToUVRow_NEON*;
+ ARGB4444ToYRow_Any_NEON*;
+ ARGB4444ToYRow_C;
+ ARGB4444ToYRow_NEON*;
+ ARGBAdd;
+ ARGBAddRow_Any_NEON*;
+ ARGBAddRow_C;
+ ARGBAddRow_NEON*;
+ ARGBAffineRow_C;
+ ARGBAttenuate;
+ ARGBAttenuateRow_Any_NEON*;
+ ARGBAttenuateRow_C;
+ ARGBAttenuateRow_NEON*;
+ ARGBBlend;
+ ARGBBlendRow_C;
+ ARGBBlendRow_NEON*;
+ ARGBBlur;
+ ARGBColorMatrix;
+ ARGBColorMatrixRow_C;
+ ARGBColorMatrixRow_NEON*;
+ ARGBColorTable;
+ ARGBColorTableRow_C;
+ ARGBComputeCumulativeSum;
+ ARGBCopy;
+ ARGBCopyAlpha;
+ ARGBCopyAlphaRow_C;
+ ARGBCopyYToAlpha;
+ ARGBCopyYToAlphaRow_C;
+ ARGBExtractAlpha;
+ ARGBExtractAlphaRow_Any_NEON*;
+ ARGBExtractAlphaRow_C;
+ ARGBExtractAlphaRow_NEON*;
+ ARGBGray;
+ ARGBGrayRow_C;
+ ARGBGrayRow_NEON*;
+ ARGBGrayTo;
+ ARGBInterpolate;
+ ARGBLumaColorTable;
+ ARGBLumaColorTableRow_C;
+ ARGBMirror;
+ ARGBMirrorRow_Any_NEON*;
+ ARGBMirrorRow_C;
+ ARGBMirrorRow_NEON*;
+ ARGBMultiply;
+ ARGBMultiplyRow_Any_NEON*;
+ ARGBMultiplyRow_C;
+ ARGBMultiplyRow_NEON*;
+ ARGBPolynomial;
+ ARGBPolynomialRow_C;
+ ARGBQuantize;
+ ARGBQuantizeRow_C;
+ ARGBQuantizeRow_NEON*;
+ ARGBRect;
+ ARGBSepia;
+ ARGBSepiaRow_C;
+ ARGBSepiaRow_NEON*;
+ ARGBSetRow_Any_NEON*;
+ ARGBSetRow_C;
+ ARGBSetRow_NEON*;
+ ARGBShade;
+ ARGBShadeRow_C;
+ ARGBShadeRow_NEON*;
+ ARGBShuffle;
+ ARGBShuffleRow_Any_NEON*;
+ ARGBShuffleRow_C;
+ ARGBShuffleRow_NEON*;
+ ARGBSobel;
+ ARGBSobelToPlane;
+ ARGBSobelXY;
+ ARGBSubtract;
+ ARGBSubtractRow_Any_NEON*;
+ ARGBSubtractRow_C;
+ ARGBSubtractRow_NEON*;
+ ARGBToABGR;
+ ARGBToARGB1555Row_Any_NEON*;
+ ARGBToARGB1555Row_C;
+ ARGBToARGB1555Row_NEON*;
+ ARGBToARGB4444Row_Any_NEON*;
+ ARGBToARGB4444Row_C;
+ ARGBToARGB4444Row_NEON*;
+ ARGBToBGRA;
+ ARGBToI420;
+ ARGBToRAWRow_Any_NEON*;
+ ARGBToRAWRow_C;
+ ARGBToRAWRow_NEON*;
+ ARGBToRGB24Row_Any_NEON*;
+ ARGBToRGB24Row_C;
+ ARGBToRGB24Row_NEON*;
+ ARGBToRGB565DitherRow_Any_NEON*;
+ ARGBToRGB565DitherRow_C;
+ ARGBToRGB565DitherRow_NEON*;
+ ARGBToRGB565Row_Any_NEON*;
+ ARGBToRGB565Row_C;
+ ARGBToRGB565Row_NEON*;
+ ARGBToUV444Row_Any_NEON*;
+ ARGBToUV444Row_C;
+ ARGBToUV444Row_NEON*;
+ ARGBToUVJRow_Any_NEON*;
+ ARGBToUVJRow_C;
+ ARGBToUVJRow_NEON*;
+ ARGBToUVRow_Any_NEON*;
+ ARGBToUVRow_C;
+ ARGBToUVRow_NEON*;
+ ARGBToYJRow_Any_NEON*;
+ ARGBToYJRow_C;
+ ARGBToYJRow_NEON*;
+ ARGBToYRow_Any_NEON*;
+ ARGBToYRow_C;
+ ARGBToYRow_NEON*;
+ ARGBUnattenuate;
+ ARGBUnattenuateRow_C;
+ ArmCpuCaps*;
+ BGRAToARGB;
+ BGRAToI420;
+ BGRAToUVRow_Any_NEON*;
+ BGRAToUVRow_C;
+ BGRAToUVRow_NEON*;
+ BGRAToYRow_Any_NEON*;
+ BGRAToYRow_C;
+ BGRAToYRow_NEON*;
+ BlendPlane;
+ BlendPlaneRow_C;
+ CanonicalFourCC;
+ ComputeCumulativeSumRow_C;
+ ConvertFromI420;
+ CopyPlane;
+ CopyPlane_16;
+ CopyRow_16_C;
+ CopyRow_Any_NEON*;
+ CopyRow_C;
+ CopyRow_NEON*;
+ CpuId*;
+ cpu_info_*;
+ CumulativeSumToAverageRow_C;
+ FixedDiv1_C;
+ FixedDiv_C;
+ fixed_invtbl8;
+ GetARGBBlend;
+ H420ToABGR;
+ H420ToARGB;
+ H422ToABGR;
+ H422ToARGB;
+ HalfFloat1Row_Any_NEON*;
+ HalfFloat1Row_NEON*;
+ HalfFloatPlane;
+ HalfFloatRow_Any_NEON*;
+ HalfFloatRow_C;
+ HalfFloatRow_NEON*;
+ I400Copy;
+ I400Mirror;
+ I400ToARGB;
+ I400ToARGBRow_Any_NEON*;
+ I400ToARGBRow_C;
+ I400ToARGBRow_NEON*;
+ I400ToI400;
+ I400ToI420;
+ I420AlphaToABGR;
+ I420AlphaToARGB;
+ I420Blend;
+ I420Copy;
+ I420Interpolate;
+ I420Mirror;
+ I420Rect;
+ I420Scale;
+ I420Scale_16;
+ I420ToABGR;
+ I420ToARGB;
+ I420ToARGB1555;
+ I420ToARGB4444;
+ I420ToBGRA;
+ I420ToI400;
+ I420ToI422;
+ I420ToI444;
+ I420ToNV12;
+ I420ToNV21;
+ I420ToRAW;
+ I420ToRGB24;
+ I420ToRGB565;
+ I420ToRGB565Dither;
+ I420ToRGBA;
+ I420ToUYVY;
+ I420ToYUY2;
+ I422AlphaToARGBRow_Any_NEON*;
+ I422AlphaToARGBRow_C;
+ I422AlphaToARGBRow_NEON*;
+ I422Copy;
+ I422ToABGR;
+ I422ToARGB;
+ I422ToARGB1555Row_Any_NEON*;
+ I422ToARGB1555Row_C;
+ I422ToARGB1555Row_NEON*;
+ I422ToARGB4444Row_Any_NEON*;
+ I422ToARGB4444Row_C;
+ I422ToARGB4444Row_NEON*;
+ I422ToARGBRow_Any_NEON*;
+ I422ToARGBRow_C;
+ I422ToARGBRow_NEON*;
+ I422ToBGRA;
+ I422ToI420;
+ I422ToRGB24Row_Any_NEON*;
+ I422ToRGB24Row_C;
+ I422ToRGB24Row_NEON*;
+ I422ToRGB565;
+ I422ToRGB565Row_Any_NEON*;
+ I422ToRGB565Row_C;
+ I422ToRGB565Row_NEON*;
+ I422ToRGBA;
+ I422ToRGBARow_Any_NEON*;
+ I422ToRGBARow_C;
+ I422ToRGBARow_NEON*;
+ I422ToUYVY;
+ I422ToUYVYRow_Any_NEON*;
+ I422ToUYVYRow_C;
+ I422ToUYVYRow_NEON*;
+ I422ToYUY2;
+ I422ToYUY2Row_Any_NEON*;
+ I422ToYUY2Row_C;
+ I422ToYUY2Row_NEON*;
+ I444Copy;
+ I444ToABGR;
+ I444ToARGB;
+ I444ToARGBRow_Any_NEON*;
+ I444ToARGBRow_C;
+ I444ToARGBRow_NEON*;
+ I444ToI420;
+ InitCpuFlags*;
+ InterpolatePlane;
+ InterpolateRow_16_C;
+ InterpolateRow_Any_NEON*;
+ InterpolateRow_C;
+ InterpolateRow_NEON*;
+ J400ToARGB;
+ J400ToARGBRow_Any_NEON*;
+ J400ToARGBRow_C;
+ J400ToARGBRow_NEON*;
+ J420ToABGR;
+ J420ToARGB;
+ J422ToABGR;
+ J422ToARGB;
+ J444ToARGB;
+ kYuvH709Constants;
+ kYuvI601Constants;
+ kYuvJPEGConstants;
+ kYvuH709Constants;
+ kYvuI601Constants;
+ kYvuJPEGConstants;
+ M420ToARGB;
+ M420ToI420;
+ MaskCpuFlags*;
+ MergeUVPlane;
+ MergeUVRow_Any_NEON*;
+ MergeUVRow_C;
+ MergeUVRow_NEON*;
+ MipsCpuCaps*;
+ MirrorPlane;
+ MirrorRow_Any_NEON*;
+ MirrorRow_C;
+ MirrorRow_NEON*;
+ MirrorUVRow_C;
+ MirrorUVRow_NEON*;
+ NV12ToARGBRow_Any_NEON*;
+ NV12ToARGBRow_C;
+ NV12ToARGBRow_NEON*;
+ NV12ToI420;
+ NV12ToRGB565;
+ NV12ToRGB565Row_Any_NEON*;
+ NV12ToRGB565Row_C;
+ NV12ToRGB565Row_NEON*;
+ NV21ToARGB;
+ NV21ToARGBRow_Any_NEON*;
+ NV21ToARGBRow_C;
+ NV21ToARGBRow_NEON*;
+ NV21ToI420;
+ RAWToARGB;
+ RAWToARGBRow_Any_NEON*;
+ RAWToARGBRow_C;
+ RAWToARGBRow_NEON*;
+ RAWToI420;
+ RAWToRGB24;
+ RAWToRGB24Row_Any_NEON*;
+ RAWToRGB24Row_C;
+ RAWToRGB24Row_NEON*;
+ RAWToUVRow_Any_NEON*;
+ RAWToUVRow_C;
+ RAWToUVRow_NEON*;
+ RAWToYRow_Any_NEON*;
+ RAWToYRow_C;
+ RAWToYRow_NEON*;
+ RGB24ToARGB;
+ RGB24ToARGBRow_Any_NEON*;
+ RGB24ToARGBRow_C;
+ RGB24ToARGBRow_NEON*;
+ RGB24ToI420;
+ RGB24ToUVRow_Any_NEON*;
+ RGB24ToUVRow_C;
+ RGB24ToUVRow_NEON*;
+ RGB24ToYRow_Any_NEON*;
+ RGB24ToYRow_C;
+ RGB24ToYRow_NEON*;
+ RGB565ToARGB;
+ RGB565ToARGBRow_Any_NEON*;
+ RGB565ToARGBRow_C;
+ RGB565ToARGBRow_NEON*;
+ RGB565ToI420;
+ RGB565ToUVRow_Any_NEON*;
+ RGB565ToUVRow_C;
+ RGB565ToUVRow_NEON*;
+ RGB565ToYRow_Any_NEON*;
+ RGB565ToYRow_C;
+ RGB565ToYRow_NEON*;
+ RGBAToARGB;
+ RGBAToI420;
+ RGBAToUVRow_Any_NEON*;
+ RGBAToUVRow_C;
+ RGBAToUVRow_NEON*;
+ RGBAToYRow_Any_NEON*;
+ RGBAToYRow_C;
+ RGBAToYRow_NEON*;
+ RGBColorMatrix;
+ RGBColorTable;
+ RGBColorTableRow_C;
+ Scale;
+ ScaleAddRow_16_C;
+ ScaleAddRow_C;
+ ScaleAddRows_NEON*;
+ ScaleARGBCols64_C;
+ ScaleARGBCols_Any_NEON*;
+ ScaleARGBCols_C;
+ ScaleARGBCols_NEON*;
+ ScaleARGBColsUp2_C;
+ ScaleARGBFilterCols64_C;
+ ScaleARGBFilterCols_Any_NEON*;
+ ScaleARGBFilterCols_C;
+ ScaleARGBFilterCols_NEON*;
+ ScaleARGBRowDown2_Any_NEON*;
+ ScaleARGBRowDown2Box_Any_NEON*;
+ ScaleARGBRowDown2Box_C;
+ ScaleARGBRowDown2Box_NEON*;
+ ScaleARGBRowDown2_C;
+ ScaleARGBRowDown2Linear_Any_NEON*;
+ ScaleARGBRowDown2Linear_C;
+ ScaleARGBRowDown2Linear_NEON*;
+ ScaleARGBRowDown2_NEON*;
+ ScaleARGBRowDownEven_Any_NEON*;
+ ScaleARGBRowDownEvenBox_Any_NEON*;
+ ScaleARGBRowDownEvenBox_C;
+ ScaleARGBRowDownEvenBox_NEON*;
+ ScaleARGBRowDownEven_C;
+ ScaleARGBRowDownEven_NEON*;
+ ScaleCols_16_C;
+ ScaleCols_C;
+ ScaleColsUp2_16_C;
+ ScaleColsUp2_C;
+ ScaleFilterCols_16_C;
+ ScaleFilterCols64_16_C;
+ ScaleFilterCols64_C;
+ ScaleFilterCols_Any_NEON*;
+ ScaleFilterCols_C;
+ ScaleFilterCols_NEON*;
+ ScaleFilterReduce;
+ ScaleFilterRows_NEON*;
+ ScaleOffset;
+ ScalePlane;
+ ScalePlane_16;
+ ScalePlaneBilinearDown;
+ ScalePlaneBilinearDown_16;
+ ScalePlaneBilinearUp;
+ ScalePlaneBilinearUp_16;
+ ScalePlaneVertical;
+ ScalePlaneVertical_16;
+ ScaleRowDown2_16_C;
+ ScaleRowDown2_Any_NEON*;
+ ScaleRowDown2Box_16_C;
+ ScaleRowDown2Box_Any_NEON*;
+ ScaleRowDown2Box_C;
+ ScaleRowDown2Box_NEON*;
+ ScaleRowDown2Box_Odd_C;
+ ScaleRowDown2Box_Odd_NEON*;
+ ScaleRowDown2_C;
+ ScaleRowDown2Linear_16_C;
+ ScaleRowDown2Linear_Any_NEON*;
+ ScaleRowDown2Linear_C;
+ ScaleRowDown2Linear_NEON*;
+ ScaleRowDown2_NEON*;
+ ScaleRowDown34_0_Box_16_C;
+ ScaleRowDown34_0_Box_Any_NEON*;
+ ScaleRowDown34_0_Box_C;
+ ScaleRowDown34_0_Box_NEON*;
+ ScaleRowDown34_16_C;
+ ScaleRowDown34_1_Box_16_C;
+ ScaleRowDown34_1_Box_Any_NEON*;
+ ScaleRowDown34_1_Box_C;
+ ScaleRowDown34_1_Box_NEON*;
+ ScaleRowDown34_Any_NEON*;
+ ScaleRowDown34_C;
+ ScaleRowDown34_NEON*;
+ ScaleRowDown38_16_C;
+ ScaleRowDown38_2_Box_16_C;
+ ScaleRowDown38_2_Box_Any_NEON*;
+ ScaleRowDown38_2_Box_C;
+ ScaleRowDown38_2_Box_NEON*;
+ ScaleRowDown38_3_Box_16_C;
+ ScaleRowDown38_3_Box_Any_NEON*;
+ ScaleRowDown38_3_Box_C;
+ ScaleRowDown38_3_Box_NEON*;
+ ScaleRowDown38_Any_NEON*;
+ ScaleRowDown38_C;
+ ScaleRowDown38_NEON*;
+ ScaleRowDown4_16_C;
+ ScaleRowDown4_Any_NEON*;
+ ScaleRowDown4Box_16_C;
+ ScaleRowDown4Box_Any_NEON*;
+ ScaleRowDown4Box_C;
+ ScaleRowDown4Box_NEON*;
+ ScaleRowDown4_C;
+ ScaleRowDown4_NEON*;
+ ScaleSlope;
+ SetPlane;
+ SetRow_Any_NEON*;
+ SetRow_C;
+ SetRow_NEON*;
+ SobelRow_Any_NEON*;
+ SobelRow_C;
+ SobelRow_NEON*;
+ SobelToPlaneRow_Any_NEON*;
+ SobelToPlaneRow_C;
+ SobelToPlaneRow_NEON*;
+ SobelXRow_C;
+ SobelXRow_NEON*;
+ SobelXYRow_Any_NEON*;
+ SobelXYRow_C;
+ SobelXYRow_NEON*;
+ SobelYRow_C;
+ SobelYRow_NEON*;
+ SplitUVPlane;
+ SplitUVRow_Any_NEON*;
+ SplitUVRow_C;
+ SplitUVRow_NEON*;
+ UYVYToARGB;
+ UYVYToARGBRow_Any_NEON*;
+ UYVYToARGBRow_C;
+ UYVYToARGBRow_NEON*;
+ UYVYToI420;
+ UYVYToI422;
+ UYVYToNV12;
+ UYVYToUV422Row_Any_NEON*;
+ UYVYToUV422Row_C;
+ UYVYToUV422Row_NEON*;
+ UYVYToUVRow_Any_NEON*;
+ UYVYToUVRow_C;
+ UYVYToUVRow_NEON*;
+ UYVYToYRow_Any_NEON*;
+ UYVYToYRow_C;
+ UYVYToYRow_NEON*;
+ YUY2ToARGB;
+ YUY2ToARGBRow_Any_NEON*;
+ YUY2ToARGBRow_C;
+ YUY2ToARGBRow_NEON*;
+ YUY2ToI420;
+ YUY2ToI422;
+ YUY2ToNV12;
+ YUY2ToUV422Row_Any_NEON*;
+ YUY2ToUV422Row_C;
+ YUY2ToUV422Row_NEON*;
+ YUY2ToUVRow_Any_NEON*;
+ YUY2ToUVRow_C;
+ YUY2ToUVRow_NEON*;
+ YUY2ToY;
+ YUY2ToYRow_Any_NEON*;
+ YUY2ToYRow_C;
+ YUY2ToYRow_NEON*;
+};
diff --git a/media/libstagefright/flac/dec/Android.bp b/media/libstagefright/flac/dec/Android.bp
index 1b9fe0f..8d486cf 100644
--- a/media/libstagefright/flac/dec/Android.bp
+++ b/media/libstagefright/flac/dec/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
name: "libstagefright_flacdec",
vendor_available: true,
vndk: {
@@ -9,6 +9,8 @@
"FLACDecoder.cpp",
],
+ export_include_dirs: [ "." ],
+
include_dirs: [
"external/flac/include",
"frameworks/av/media/libstagefright/include",
@@ -27,10 +29,15 @@
},
},
- static_libs: ["libFLAC"],
+ static: {
+ whole_static_libs: ["libFLAC"],
+ },
+
+ shared: {
+ static_libs: ["libFLAC"],
+ },
shared_libs: [
- "libcutils",
"liblog",
"libstagefright_foundation",
"libutils",
diff --git a/media/libstagefright/flac/dec/FLACDecoder.cpp b/media/libstagefright/flac/dec/FLACDecoder.cpp
index 8c7137c..a2b6ab7 100644
--- a/media/libstagefright/flac/dec/FLACDecoder.cpp
+++ b/media/libstagefright/flac/dec/FLACDecoder.cpp
@@ -220,9 +220,10 @@
}
// static
-sp<FLACDecoder> FLACDecoder::Create() {
- sp<FLACDecoder> decoder = new FLACDecoder();
- if (decoder->init() != OK) {
+FLACDecoder *FLACDecoder::Create() {
+ FLACDecoder *decoder = new (std::nothrow) FLACDecoder();
+ if (decoder == NULL || decoder->init() != OK) {
+ delete decoder;
return NULL;
}
return decoder;
@@ -422,22 +423,16 @@
short *outBuffer, size_t *outBufferLen) {
ALOGV("decodeOneFrame: input size(%zu)", inBufferLen);
- if (inBufferLen == 0) {
- ALOGV("decodeOneFrame: no input data");
- if (outBufferLen) {
- *outBufferLen = 0;
- }
- return OK;
- }
-
if (!mStreamInfoValid) {
ALOGW("decodeOneFrame: no streaminfo metadata block");
}
- status_t err = addDataToBuffer(inBuffer, inBufferLen);
- if (err != OK) {
- ALOGW("decodeOneFrame: addDataToBuffer returns error %d", err);
- return err;
+ if (inBufferLen != 0) {
+ status_t err = addDataToBuffer(inBuffer, inBufferLen);
+ if (err != OK) {
+ ALOGW("decodeOneFrame: addDataToBuffer returns error %d", err);
+ return err;
+ }
}
mWriteRequested = true;
diff --git a/media/libstagefright/flac/dec/FLACDecoder.h b/media/libstagefright/flac/dec/FLACDecoder.h
index 36282a8..1a33cae 100644
--- a/media/libstagefright/flac/dec/FLACDecoder.h
+++ b/media/libstagefright/flac/dec/FLACDecoder.h
@@ -26,14 +26,14 @@
namespace android {
// packet based FLAC decoder, wrapps libFLAC stream decoder.
-class FLACDecoder : public RefBase {
+class FLACDecoder {
public:
enum {
kMaxChannels = 8,
};
- static sp<FLACDecoder> Create();
+ static FLACDecoder *Create();
FLAC__StreamMetadata_StreamInfo getStreamInfo() const {
return mStreamInfo;
@@ -43,10 +43,10 @@
status_t decodeOneFrame(const uint8_t *inBuffer, size_t inBufferLen,
short *outBuffer, size_t *outBufferLen);
void flush();
+ virtual ~FLACDecoder();
protected:
FLACDecoder();
- virtual ~FLACDecoder() override;
private:
// stream properties
diff --git a/media/libstagefright/foundation/ABuffer.cpp b/media/libstagefright/foundation/ABuffer.cpp
index 804046a..c8965d9 100644
--- a/media/libstagefright/foundation/ABuffer.cpp
+++ b/media/libstagefright/foundation/ABuffer.cpp
@@ -19,13 +19,11 @@
#include "ADebug.h"
#include "ALooper.h"
#include "AMessage.h"
-#include "MediaBufferBase.h"
namespace android {
ABuffer::ABuffer(size_t capacity)
- : mMediaBufferBase(NULL),
- mRangeOffset(0),
+ : mRangeOffset(0),
mInt32Data(0),
mOwnsData(true) {
mData = malloc(capacity);
@@ -39,8 +37,7 @@
}
ABuffer::ABuffer(void *data, size_t capacity)
- : mMediaBufferBase(NULL),
- mData(data),
+ : mData(data),
mCapacity(capacity),
mRangeOffset(0),
mRangeLength(capacity),
@@ -66,8 +63,6 @@
mData = NULL;
}
}
-
- setMediaBufferBase(NULL);
}
void ABuffer::setRange(size_t offset, size_t size) {
@@ -85,19 +80,5 @@
return mMeta;
}
-MediaBufferBase *ABuffer::getMediaBufferBase() {
- if (mMediaBufferBase != NULL) {
- mMediaBufferBase->add_ref();
- }
- return mMediaBufferBase;
-}
-
-void ABuffer::setMediaBufferBase(MediaBufferBase *mediaBuffer) {
- if (mMediaBufferBase != NULL) {
- mMediaBufferBase->release();
- }
- mMediaBufferBase = mediaBuffer;
-}
-
} // namespace android
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index f55de64..df66ac6 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -116,6 +116,7 @@
default:
break;
}
+ item->mType = kTypeInt32; // clear type
}
#ifdef DUMP_STATS
@@ -196,6 +197,7 @@
CHECK(mNumItems < kMaxNumItems);
i = mNumItems++;
item = &mItems[i];
+ item->mType = kTypeInt32;
item->setName(name, len);
}
@@ -944,4 +946,163 @@
return mItems[index].mName;
}
+AMessage::ItemData AMessage::getEntryAt(size_t index) const {
+ ItemData it;
+ if (index < mNumItems) {
+ switch (mItems[index].mType) {
+ case kTypeInt32: it.set(mItems[index].u.int32Value); break;
+ case kTypeInt64: it.set(mItems[index].u.int64Value); break;
+ case kTypeSize: it.set(mItems[index].u.sizeValue); break;
+ case kTypeFloat: it.set(mItems[index].u.floatValue); break;
+ case kTypeDouble: it.set(mItems[index].u.doubleValue); break;
+ case kTypePointer: it.set(mItems[index].u.ptrValue); break;
+ case kTypeRect: it.set(mItems[index].u.rectValue); break;
+ case kTypeString: it.set(*mItems[index].u.stringValue); break;
+ case kTypeObject: {
+ sp<RefBase> obj = mItems[index].u.refValue;
+ it.set(obj);
+ break;
+ }
+ case kTypeMessage: {
+ sp<AMessage> msg = static_cast<AMessage *>(mItems[index].u.refValue);
+ it.set(msg);
+ break;
+ }
+ case kTypeBuffer: {
+ sp<ABuffer> buf = static_cast<ABuffer *>(mItems[index].u.refValue);
+ it.set(buf);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ return it;
+}
+
+status_t AMessage::setEntryNameAt(size_t index, const char *name) {
+ if (index >= mNumItems) {
+ return BAD_INDEX;
+ }
+ if (name == nullptr) {
+ return BAD_VALUE;
+ }
+ if (!strcmp(name, mItems[index].mName)) {
+ return OK; // name has not changed
+ }
+ size_t len = strlen(name);
+ if (findItemIndex(name, len) < mNumItems) {
+ return ALREADY_EXISTS;
+ }
+ delete[] mItems[index].mName;
+ mItems[index].mName = nullptr;
+ mItems[index].setName(name, len);
+ return OK;
+}
+
+status_t AMessage::setEntryAt(size_t index, const ItemData &item) {
+ AString stringValue;
+ sp<RefBase> refValue;
+ sp<AMessage> msgValue;
+ sp<ABuffer> bufValue;
+
+ if (index >= mNumItems) {
+ return BAD_INDEX;
+ }
+ if (!item.used()) {
+ return BAD_VALUE;
+ }
+ Item *dst = &mItems[index];
+ freeItemValue(dst);
+
+ // some values can be directly set with the getter. others need items to be allocated
+ if (item.find(&dst->u.int32Value)) {
+ dst->mType = kTypeInt32;
+ } else if (item.find(&dst->u.int64Value)) {
+ dst->mType = kTypeInt64;
+ } else if (item.find(&dst->u.sizeValue)) {
+ dst->mType = kTypeSize;
+ } else if (item.find(&dst->u.floatValue)) {
+ dst->mType = kTypeFloat;
+ } else if (item.find(&dst->u.doubleValue)) {
+ dst->mType = kTypeDouble;
+ } else if (item.find(&dst->u.ptrValue)) {
+ dst->mType = kTypePointer;
+ } else if (item.find(&dst->u.rectValue)) {
+ dst->mType = kTypeRect;
+ } else if (item.find(&stringValue)) {
+ dst->u.stringValue = new AString(stringValue);
+ dst->mType = kTypeString;
+ } else if (item.find(&refValue)) {
+ if (refValue != NULL) { refValue->incStrong(this); }
+ dst->u.refValue = refValue.get();
+ dst->mType = kTypeObject;
+ } else if (item.find(&msgValue)) {
+ if (msgValue != NULL) { msgValue->incStrong(this); }
+ dst->u.refValue = msgValue.get();
+ dst->mType = kTypeMessage;
+ } else if (item.find(&bufValue)) {
+ if (bufValue != NULL) { bufValue->incStrong(this); }
+ dst->u.refValue = bufValue.get();
+ dst->mType = kTypeBuffer;
+ } else {
+ // unsupported item - we should not be here.
+ dst->mType = kTypeInt32;
+ dst->u.int32Value = 0xDEADDEAD;
+ return BAD_TYPE;
+ }
+ return OK;
+}
+
+status_t AMessage::removeEntryAt(size_t index) {
+ if (index >= mNumItems) {
+ return BAD_INDEX;
+ }
+ // delete entry data and objects
+ --mNumItems;
+ delete[] mItems[index].mName;
+ mItems[index].mName = nullptr;
+ freeItemValue(&mItems[index]);
+
+ // swap entry with last entry and clear last entry's data
+ if (index < mNumItems) {
+ mItems[index] = mItems[mNumItems];
+ mItems[mNumItems].mName = nullptr;
+ mItems[mNumItems].mType = kTypeInt32;
+ }
+ return OK;
+}
+
+void AMessage::setItem(const char *name, const ItemData &item) {
+ if (item.used()) {
+ Item *it = allocateItem(name);
+ if (it != nullptr) {
+ setEntryAt(it - mItems, item);
+ }
+ }
+}
+
+AMessage::ItemData AMessage::findItem(const char *name) const {
+ return getEntryAt(findEntryByName(name));
+}
+
+void AMessage::extend(const sp<AMessage> &other) {
+ // ignore null messages
+ if (other == nullptr) {
+ return;
+ }
+
+ for (size_t ix = 0; ix < other->mNumItems; ++ix) {
+ Item *it = allocateItem(other->mItems[ix].mName);
+ if (it != nullptr) {
+ ItemData data = other->getEntryAt(ix);
+ setEntryAt(it - mItems, data);
+ }
+ }
+}
+
+size_t AMessage::findEntryByName(const char *name) const {
+ return name == nullptr ? countEntries() : findItemIndex(name, strlen(name));
+}
+
} // namespace android
diff --git a/media/libstagefright/foundation/ANetworkSession.cpp b/media/libstagefright/foundation/ANetworkSession.cpp
index f8b7b41..eafdc37 100644
--- a/media/libstagefright/foundation/ANetworkSession.cpp
+++ b/media/libstagefright/foundation/ANetworkSession.cpp
@@ -33,22 +33,11 @@
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ByteUtils.h>
#include <media/stagefright/foundation/hexdump.h>
namespace android {
-static uint16_t U16_AT(const uint8_t *ptr) {
- return ptr[0] << 8 | ptr[1];
-}
-
-static uint32_t U32_AT(const uint8_t *ptr) {
- return ptr[0] << 24 | ptr[1] << 16 | ptr[2] << 8 | ptr[3];
-}
-
-static uint64_t U64_AT(const uint8_t *ptr) {
- return ((uint64_t)U32_AT(ptr)) << 32 | U32_AT(ptr + 4);
-}
-
static const size_t kMaxUDPSize = 1500;
static const int32_t kMaxUDPRetries = 200;
diff --git a/media/libstagefright/foundation/AWakeLock.cpp b/media/libstagefright/foundation/AWakeLock.cpp
deleted file mode 100644
index d9277ac..0000000
--- a/media/libstagefright/foundation/AWakeLock.cpp
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "AWakeLock"
-#include <utils/Log.h>
-
-#include "ADebug.h"
-#include "AWakeLock.h"
-
-#include <binder/IPCThreadState.h>
-#include <binder/IServiceManager.h>
-#include <powermanager/PowerManager.h>
-
-
-namespace android {
-
-AWakeLock::AWakeLock() :
- mPowerManager(NULL),
- mWakeLockToken(NULL),
- mWakeLockCount(0),
- mDeathRecipient(new PMDeathRecipient(this)) {}
-
-AWakeLock::~AWakeLock() {
- if (mPowerManager != NULL) {
- sp<IBinder> binder = IInterface::asBinder(mPowerManager);
- binder->unlinkToDeath(mDeathRecipient);
- }
- clearPowerManager();
-}
-
-bool AWakeLock::acquire() {
- if (mWakeLockCount == 0) {
- CHECK(mWakeLockToken == NULL);
- if (mPowerManager == NULL) {
- // use checkService() to avoid blocking if power service is not up yet
- sp<IBinder> binder =
- defaultServiceManager()->checkService(String16("power"));
- if (binder == NULL) {
- ALOGW("could not get the power manager service");
- } else {
- mPowerManager = interface_cast<IPowerManager>(binder);
- binder->linkToDeath(mDeathRecipient);
- }
- }
- if (mPowerManager != NULL) {
- sp<IBinder> binder = new BBinder();
- int64_t token = IPCThreadState::self()->clearCallingIdentity();
- status_t status = mPowerManager->acquireWakeLock(
- POWERMANAGER_PARTIAL_WAKE_LOCK,
- binder, String16("AWakeLock"), String16("media"));
- IPCThreadState::self()->restoreCallingIdentity(token);
- if (status == NO_ERROR) {
- mWakeLockToken = binder;
- mWakeLockCount++;
- return true;
- }
- }
- } else {
- mWakeLockCount++;
- return true;
- }
- return false;
-}
-
-void AWakeLock::release(bool force) {
- if (mWakeLockCount == 0) {
- return;
- }
- if (force) {
- // Force wakelock release below by setting reference count to 1.
- mWakeLockCount = 1;
- }
- if (--mWakeLockCount == 0) {
- CHECK(mWakeLockToken != NULL);
- if (mPowerManager != NULL) {
- int64_t token = IPCThreadState::self()->clearCallingIdentity();
- mPowerManager->releaseWakeLock(mWakeLockToken, 0 /* flags */);
- IPCThreadState::self()->restoreCallingIdentity(token);
- }
- mWakeLockToken.clear();
- }
-}
-
-void AWakeLock::clearPowerManager() {
- release(true);
- mPowerManager.clear();
-}
-
-void AWakeLock::PMDeathRecipient::binderDied(const wp<IBinder>& who __unused) {
- if (mWakeLock != NULL) {
- mWakeLock->clearPowerManager();
- }
-}
-
-} // namespace android
diff --git a/media/libstagefright/foundation/Android.bp b/media/libstagefright/foundation/Android.bp
index a07fcb0..6b384c0 100644
--- a/media/libstagefright/foundation/Android.bp
+++ b/media/libstagefright/foundation/Android.bp
@@ -4,7 +4,7 @@
vendor_available: true,
}
-cc_library_shared {
+cc_library {
name: "libstagefright_foundation",
vendor_available: true,
vndk: {
@@ -14,6 +14,8 @@
include_dirs: [
"frameworks/av/include",
"frameworks/native/include",
+ "frameworks/native/libs/arect/include",
+ "frameworks/native/libs/nativebase/include",
],
local_include_dirs: [
@@ -23,15 +25,16 @@
header_libs: [
"libhardware_headers",
"libstagefright_foundation_headers",
+ "media_plugin_headers",
],
export_header_lib_headers: [
"libstagefright_foundation_headers",
+ "media_plugin_headers",
],
export_shared_lib_headers: [
"libbinder",
- "libui",
],
cflags: [
@@ -43,10 +46,8 @@
shared_libs: [
"libbinder",
"libutils",
- "libui",
"libcutils",
"liblog",
- "libpowermanager",
],
srcs: [
@@ -62,23 +63,16 @@
"ANetworkSession.cpp",
"AString.cpp",
"AStringUtils.cpp",
- "AWakeLock.cpp",
+ "ByteUtils.cpp",
"ColorUtils.cpp",
- "MediaBuffer.cpp",
- "MediaBufferGroup.cpp",
- "MetaData.cpp",
+ "MediaDefs.cpp",
+ "MediaKeys.cpp",
"ParsedMessage.cpp",
+ "avc_utils.cpp",
"base64.cpp",
"hexdump.cpp",
],
- target: {
- vendor: {
- exclude_shared_libs: ["libpowermanager"],
- exclude_srcs: ["AWakeLock.cpp"],
- },
- },
-
clang: true,
sanitize: {
diff --git a/media/libstagefright/foundation/ByteUtils.cpp b/media/libstagefright/foundation/ByteUtils.cpp
new file mode 100644
index 0000000..14d40aa
--- /dev/null
+++ b/media/libstagefright/foundation/ByteUtils.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ByteUtils"
+
+#include <media/stagefright/foundation/ByteUtils.h>
+
+namespace android {
+
+uint16_t U16_AT(const uint8_t *ptr) {
+ return ptr[0] << 8 | ptr[1];
+}
+
+uint32_t U32_AT(const uint8_t *ptr) {
+ return ptr[0] << 24 | ptr[1] << 16 | ptr[2] << 8 | ptr[3];
+}
+
+uint64_t U64_AT(const uint8_t *ptr) {
+ return ((uint64_t)U32_AT(ptr)) << 32 | U32_AT(ptr + 4);
+}
+
+uint16_t U16LE_AT(const uint8_t *ptr) {
+ return ptr[0] | (ptr[1] << 8);
+}
+
+uint32_t U32LE_AT(const uint8_t *ptr) {
+ return ptr[3] << 24 | ptr[2] << 16 | ptr[1] << 8 | ptr[0];
+}
+
+uint64_t U64LE_AT(const uint8_t *ptr) {
+ return ((uint64_t)U32LE_AT(ptr + 4)) << 32 | U32LE_AT(ptr);
+}
+
+// XXX warning: these won't work on big-endian host.
+uint64_t ntoh64(uint64_t x) {
+ return ((uint64_t)ntohl(x & 0xffffffff) << 32) | ntohl(x >> 32);
+}
+
+uint64_t hton64(uint64_t x) {
+ return ((uint64_t)htonl(x & 0xffffffff) << 32) | htonl(x >> 32);
+}
+
+void MakeFourCCString(uint32_t x, char *s) {
+ s[0] = x >> 24;
+ s[1] = (x >> 16) & 0xff;
+ s[2] = (x >> 8) & 0xff;
+ s[3] = x & 0xff;
+ s[4] = '\0';
+}
+
+} // namespace android
diff --git a/media/libstagefright/foundation/ColorUtils.cpp b/media/libstagefright/foundation/ColorUtils.cpp
index 88a8351..c4eaa27 100644
--- a/media/libstagefright/foundation/ColorUtils.cpp
+++ b/media/libstagefright/foundation/ColorUtils.cpp
@@ -398,6 +398,7 @@
}
// TODO: move this into a Video HAL
+const static
ALookup<CU::ColorStandard, std::pair<CA::Primaries, CA::MatrixCoeffs>> sStandardFallbacks {
{
{ CU::kColorStandardBT601_625, { CA::PrimariesBT709_5, CA::MatrixBT470_6M } },
@@ -420,6 +421,7 @@
}
};
+const static
ALookup<CU::ColorStandard, CA::Primaries> sStandardPrimariesFallbacks {
{
{ CU::kColorStandardFilm, CA::PrimariesGenericFilm },
@@ -430,7 +432,8 @@
}
};
-static ALookup<android_dataspace, android_dataspace> sLegacyDataSpaceToV0 {
+const static
+ALookup<android_dataspace, android_dataspace> sLegacyDataSpaceToV0 {
{
{ HAL_DATASPACE_SRGB, HAL_DATASPACE_V0_SRGB },
{ HAL_DATASPACE_BT709, HAL_DATASPACE_V0_BT709 },
@@ -441,6 +444,73 @@
}
};
+#define GET_HAL_ENUM(class, name) HAL_DATASPACE_##class##name
+#define GET_HAL_BITFIELD(class, name) (GET_HAL_ENUM(class, _##name) >> GET_HAL_ENUM(class, _SHIFT))
+
+const static
+ALookup<CU::ColorStandard, uint32_t> sGfxStandards {
+ {
+ { CU::kColorStandardUnspecified, GET_HAL_BITFIELD(STANDARD, UNSPECIFIED) },
+ { CU::kColorStandardBT709, GET_HAL_BITFIELD(STANDARD, BT709) },
+ { CU::kColorStandardBT601_625, GET_HAL_BITFIELD(STANDARD, BT601_625) },
+ { CU::kColorStandardBT601_625_Unadjusted, GET_HAL_BITFIELD(STANDARD, BT601_625_UNADJUSTED) },
+ { CU::kColorStandardBT601_525, GET_HAL_BITFIELD(STANDARD, BT601_525) },
+ { CU::kColorStandardBT601_525_Unadjusted, GET_HAL_BITFIELD(STANDARD, BT601_525_UNADJUSTED) },
+ { CU::kColorStandardBT2020, GET_HAL_BITFIELD(STANDARD, BT2020) },
+ { CU::kColorStandardBT2020Constant, GET_HAL_BITFIELD(STANDARD, BT2020_CONSTANT_LUMINANCE) },
+ { CU::kColorStandardBT470M, GET_HAL_BITFIELD(STANDARD, BT470M) },
+ { CU::kColorStandardFilm, GET_HAL_BITFIELD(STANDARD, FILM) },
+ { CU::kColorStandardDCI_P3, GET_HAL_BITFIELD(STANDARD, DCI_P3) },
+ }
+};
+
+// verify public values are stable
+static_assert(CU::kColorStandardUnspecified == 0, "SDK mismatch"); // N
+static_assert(CU::kColorStandardBT709 == 1, "SDK mismatch"); // N
+static_assert(CU::kColorStandardBT601_625 == 2, "SDK mismatch"); // N
+static_assert(CU::kColorStandardBT601_525 == 4, "SDK mismatch"); // N
+static_assert(CU::kColorStandardBT2020 == 6, "SDK mismatch"); // N
+
+const static
+ALookup<CU::ColorTransfer, uint32_t> sGfxTransfers {
+ {
+ { CU::kColorTransferUnspecified, GET_HAL_BITFIELD(TRANSFER, UNSPECIFIED) },
+ { CU::kColorTransferLinear, GET_HAL_BITFIELD(TRANSFER, LINEAR) },
+ { CU::kColorTransferSRGB, GET_HAL_BITFIELD(TRANSFER, SRGB) },
+ { CU::kColorTransferSMPTE_170M, GET_HAL_BITFIELD(TRANSFER, SMPTE_170M) },
+ { CU::kColorTransferGamma22, GET_HAL_BITFIELD(TRANSFER, GAMMA2_2) },
+ { CU::kColorTransferGamma28, GET_HAL_BITFIELD(TRANSFER, GAMMA2_8) },
+ { CU::kColorTransferST2084, GET_HAL_BITFIELD(TRANSFER, ST2084) },
+ { CU::kColorTransferHLG, GET_HAL_BITFIELD(TRANSFER, HLG) },
+ }
+};
+
+// verify public values are stable
+static_assert(CU::kColorTransferUnspecified == 0, "SDK mismatch"); // N
+static_assert(CU::kColorTransferLinear == 1, "SDK mismatch"); // N
+static_assert(CU::kColorTransferSRGB == 2, "SDK mismatch"); // N
+static_assert(CU::kColorTransferSMPTE_170M == 3, "SDK mismatch"); // N
+static_assert(CU::kColorTransferST2084 == 6, "SDK mismatch"); // N
+static_assert(CU::kColorTransferHLG == 7, "SDK mismatch"); // N
+
+const static
+ALookup<CU::ColorRange, uint32_t> sGfxRanges {
+ {
+ { CU::kColorRangeUnspecified, GET_HAL_BITFIELD(RANGE, UNSPECIFIED) },
+ { CU::kColorRangeFull, GET_HAL_BITFIELD(RANGE, FULL) },
+ { CU::kColorRangeLimited, GET_HAL_BITFIELD(RANGE, LIMITED) },
+ }
+};
+
+// verify public values are stable
+static_assert(CU::kColorRangeUnspecified == 0, "SDK mismatch"); // N
+static_assert(CU::kColorRangeFull == 1, "SDK mismatch"); // N
+static_assert(CU::kColorRangeLimited == 2, "SDK mismatch"); // N
+
+#undef GET_HAL_BITFIELD
+#undef GET_HAL_ENUM
+
+
bool ColorUtils::convertDataSpaceToV0(android_dataspace &dataSpace) {
(void)sLegacyDataSpaceToV0.lookup(dataSpace, &dataSpace);
return (dataSpace & 0xC000FFFF) == 0;
@@ -507,9 +577,23 @@
}
}
+ // assume 1-to-1 mapping to HAL values (to deal with potential vendor extensions)
+ uint32_t gfxRange = range;
+ uint32_t gfxStandard = standard;
+ uint32_t gfxTransfer = transfer;
+ // TRICKY: use & to ensure all three mappings are completed
+ if (!(sGfxRanges.map(range, &gfxRange) & sGfxStandards.map(standard, &gfxStandard)
+ & sGfxTransfers.map(transfer, &gfxTransfer))) {
+ ALOGW("could not safely map platform color aspects (R:%u(%s) S:%u(%s) T:%u(%s) to "
+ "graphics dataspace (R:%u S:%u T:%u)",
+ range, asString(range), standard, asString(standard), transfer, asString(transfer),
+ gfxRange, gfxStandard, gfxTransfer);
+ }
+
android_dataspace dataSpace = (android_dataspace)(
- (range << HAL_DATASPACE_RANGE_SHIFT) | (standard << HAL_DATASPACE_STANDARD_SHIFT) |
- (transfer << HAL_DATASPACE_TRANSFER_SHIFT));
+ (gfxRange << HAL_DATASPACE_RANGE_SHIFT) |
+ (gfxStandard << HAL_DATASPACE_STANDARD_SHIFT) |
+ (gfxTransfer << HAL_DATASPACE_TRANSFER_SHIFT));
(void)sLegacyDataSpaceToV0.rlookup(dataSpace, &dataSpace);
if (!mayExpand) {
diff --git a/media/libstagefright/foundation/MediaBuffer.cpp b/media/libstagefright/foundation/MediaBuffer.cpp
deleted file mode 100644
index 16000ef..0000000
--- a/media/libstagefright/foundation/MediaBuffer.cpp
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MediaBuffer"
-#include <utils/Log.h>
-
-#include <errno.h>
-#include <pthread.h>
-#include <stdlib.h>
-
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MetaData.h>
-
-#include <ui/GraphicBuffer.h>
-
-namespace android {
-
-/* static */
-std::atomic_int_least32_t MediaBuffer::mUseSharedMemory(0);
-
-MediaBuffer::MediaBuffer(void *data, size_t size)
- : mObserver(NULL),
- mRefCount(0),
- mData(data),
- mSize(size),
- mRangeOffset(0),
- mRangeLength(size),
- mOwnsData(false),
- mMetaData(new MetaData),
- mOriginal(NULL) {
-}
-
-MediaBuffer::MediaBuffer(size_t size)
- : mObserver(NULL),
- mRefCount(0),
- mData(NULL),
- mSize(size),
- mRangeOffset(0),
- mRangeLength(size),
- mOwnsData(true),
- mMetaData(new MetaData),
- mOriginal(NULL) {
- if (size < kSharedMemThreshold
- || std::atomic_load_explicit(&mUseSharedMemory, std::memory_order_seq_cst) == 0) {
- mData = malloc(size);
- } else {
- ALOGV("creating memoryDealer");
- sp<MemoryDealer> memoryDealer =
- new MemoryDealer(size + sizeof(SharedControl), "MediaBuffer");
- mMemory = memoryDealer->allocate(size + sizeof(SharedControl));
- if (mMemory == NULL) {
- ALOGW("Failed to allocate shared memory, trying regular allocation!");
- mData = malloc(size);
- if (mData == NULL) {
- ALOGE("Out of memory");
- }
- } else {
- getSharedControl()->clear();
- mData = (uint8_t *)mMemory->pointer() + sizeof(SharedControl);
- ALOGV("Allocated shared mem buffer of size %zu @ %p", size, mData);
- }
- }
-}
-
-MediaBuffer::MediaBuffer(const sp<GraphicBuffer>& graphicBuffer)
- : mObserver(NULL),
- mRefCount(0),
- mData(NULL),
- mSize(1),
- mRangeOffset(0),
- mRangeLength(mSize),
- mGraphicBuffer(graphicBuffer),
- mOwnsData(false),
- mMetaData(new MetaData),
- mOriginal(NULL) {
-}
-
-MediaBuffer::MediaBuffer(const sp<ABuffer> &buffer)
- : mObserver(NULL),
- mRefCount(0),
- mData(buffer->data()),
- mSize(buffer->size()),
- mRangeOffset(0),
- mRangeLength(mSize),
- mBuffer(buffer),
- mOwnsData(false),
- mMetaData(new MetaData),
- mOriginal(NULL) {
-}
-
-void MediaBuffer::release() {
- if (mObserver == NULL) {
- // Legacy contract for MediaBuffer without a MediaBufferGroup.
- CHECK_EQ(mRefCount, 0);
- delete this;
- return;
- }
-
- int prevCount = __sync_fetch_and_sub(&mRefCount, 1);
- if (prevCount == 1) {
- if (mObserver == NULL) {
- delete this;
- return;
- }
-
- mObserver->signalBufferReturned(this);
- }
- CHECK(prevCount > 0);
-}
-
-void MediaBuffer::claim() {
- CHECK(mObserver != NULL);
- CHECK_EQ(mRefCount, 1);
-
- mRefCount = 0;
-}
-
-void MediaBuffer::add_ref() {
- (void) __sync_fetch_and_add(&mRefCount, 1);
-}
-
-void *MediaBuffer::data() const {
- CHECK(mGraphicBuffer == NULL);
- return mData;
-}
-
-size_t MediaBuffer::size() const {
- CHECK(mGraphicBuffer == NULL);
- return mSize;
-}
-
-size_t MediaBuffer::range_offset() const {
- return mRangeOffset;
-}
-
-size_t MediaBuffer::range_length() const {
- return mRangeLength;
-}
-
-void MediaBuffer::set_range(size_t offset, size_t length) {
- if ((mGraphicBuffer == NULL) && (offset + length > mSize)) {
- ALOGE("offset = %zu, length = %zu, mSize = %zu", offset, length, mSize);
- }
- CHECK((mGraphicBuffer != NULL) || (offset + length <= mSize));
-
- mRangeOffset = offset;
- mRangeLength = length;
-}
-
-sp<GraphicBuffer> MediaBuffer::graphicBuffer() const {
- return mGraphicBuffer;
-}
-
-sp<MetaData> MediaBuffer::meta_data() {
- return mMetaData;
-}
-
-void MediaBuffer::reset() {
- mMetaData->clear();
- set_range(0, mSize);
-}
-
-MediaBuffer::~MediaBuffer() {
- CHECK(mObserver == NULL);
-
- if (mOwnsData && mData != NULL && mMemory == NULL) {
- free(mData);
- mData = NULL;
- }
-
- if (mOriginal != NULL) {
- mOriginal->release();
- mOriginal = NULL;
- }
-
- if (mMemory.get() != nullptr) {
- getSharedControl()->setDeadObject();
- }
-}
-
-void MediaBuffer::setObserver(MediaBufferObserver *observer) {
- CHECK(observer == NULL || mObserver == NULL);
- mObserver = observer;
-}
-
-MediaBuffer *MediaBuffer::clone() {
- CHECK(mGraphicBuffer == NULL);
-
- MediaBuffer *buffer = new MediaBuffer(mData, mSize);
- buffer->set_range(mRangeOffset, mRangeLength);
- buffer->mMetaData = new MetaData(*mMetaData.get());
-
- add_ref();
- buffer->mOriginal = this;
-
- return buffer;
-}
-
-} // namespace android
diff --git a/media/libstagefright/foundation/MediaBufferGroup.cpp b/media/libstagefright/foundation/MediaBufferGroup.cpp
deleted file mode 100644
index cb62d92..0000000
--- a/media/libstagefright/foundation/MediaBufferGroup.cpp
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MediaBufferGroup"
-#include <utils/Log.h>
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaBufferGroup.h>
-
-namespace android {
-
-// std::min is not constexpr in C++11
-template<typename T>
-constexpr T MIN(const T &a, const T &b) { return a <= b ? a : b; }
-
-// MediaBufferGroup may create shared memory buffers at a
-// smaller threshold than an isolated new MediaBuffer.
-static const size_t kSharedMemoryThreshold = MIN(
- (size_t)MediaBuffer::kSharedMemThreshold, (size_t)(4 * 1024));
-
-MediaBufferGroup::MediaBufferGroup(size_t growthLimit) :
- mGrowthLimit(growthLimit) {
-}
-
-MediaBufferGroup::MediaBufferGroup(size_t buffers, size_t buffer_size, size_t growthLimit)
- : mGrowthLimit(growthLimit) {
-
- if (mGrowthLimit > 0 && buffers > mGrowthLimit) {
- ALOGW("Preallocated buffers %zu > growthLimit %zu, increasing growthLimit",
- buffers, mGrowthLimit);
- mGrowthLimit = buffers;
- }
-
- if (buffer_size >= kSharedMemoryThreshold) {
- ALOGD("creating MemoryDealer");
- // Using a single MemoryDealer is efficient for a group of shared memory objects.
- // This loop guarantees that we use shared memory (no fallback to malloc).
-
- size_t alignment = MemoryDealer::getAllocationAlignment();
- size_t augmented_size = buffer_size + sizeof(MediaBuffer::SharedControl);
- size_t total = (augmented_size + alignment - 1) / alignment * alignment * buffers;
- sp<MemoryDealer> memoryDealer = new MemoryDealer(total, "MediaBufferGroup");
-
- for (size_t i = 0; i < buffers; ++i) {
- sp<IMemory> mem = memoryDealer->allocate(augmented_size);
- if (mem.get() == nullptr || mem->pointer() == nullptr) {
- ALOGW("Only allocated %zu shared buffers of size %zu", i, buffer_size);
- break;
- }
- MediaBuffer *buffer = new MediaBuffer(mem);
- buffer->getSharedControl()->clear();
- add_buffer(buffer);
- }
- return;
- }
-
- // Non-shared memory allocation.
- for (size_t i = 0; i < buffers; ++i) {
- MediaBuffer *buffer = new MediaBuffer(buffer_size);
- if (buffer->data() == nullptr) {
- delete buffer; // don't call release, it's not properly formed
- ALOGW("Only allocated %zu malloc buffers of size %zu", i, buffer_size);
- break;
- }
- add_buffer(buffer);
- }
-}
-
-MediaBufferGroup::~MediaBufferGroup() {
- for (MediaBuffer *buffer : mBuffers) {
- if (buffer->refcount() != 0) {
- const int localRefcount = buffer->localRefcount();
- const int remoteRefcount = buffer->remoteRefcount();
-
- // Fatal if we have a local refcount.
- LOG_ALWAYS_FATAL_IF(localRefcount != 0,
- "buffer(%p) localRefcount %d != 0, remoteRefcount %d",
- buffer, localRefcount, remoteRefcount);
-
- // Log an error if we have a remaining remote refcount,
- // as the remote process may have died or may have inappropriate behavior.
- // The shared memory associated with the MediaBuffer will
- // automatically be reclaimed when there are no remaining fds
- // associated with it.
- ALOGE("buffer(%p) has residual remoteRefcount %d",
- buffer, remoteRefcount);
- }
- // gracefully delete.
- buffer->setObserver(nullptr);
- buffer->release();
- }
-}
-
-void MediaBufferGroup::add_buffer(MediaBuffer *buffer) {
- Mutex::Autolock autoLock(mLock);
-
- // if we're above our growth limit, release buffers if we can
- for (auto it = mBuffers.begin();
- mGrowthLimit > 0
- && mBuffers.size() >= mGrowthLimit
- && it != mBuffers.end();) {
- if ((*it)->refcount() == 0) {
- (*it)->setObserver(nullptr);
- (*it)->release();
- it = mBuffers.erase(it);
- } else {
- ++it;
- }
- }
-
- buffer->setObserver(this);
- mBuffers.emplace_back(buffer);
-}
-
-bool MediaBufferGroup::has_buffers() {
- if (mBuffers.size() < mGrowthLimit) {
- return true; // We can add more buffers internally.
- }
- for (MediaBuffer *buffer : mBuffers) {
- if (buffer->refcount() == 0) {
- return true;
- }
- }
- return false;
-}
-
-status_t MediaBufferGroup::acquire_buffer(
- MediaBuffer **out, bool nonBlocking, size_t requestedSize) {
- Mutex::Autolock autoLock(mLock);
- for (;;) {
- size_t smallest = requestedSize;
- MediaBuffer *buffer = nullptr;
- auto free = mBuffers.end();
- for (auto it = mBuffers.begin(); it != mBuffers.end(); ++it) {
- if ((*it)->refcount() == 0) {
- const size_t size = (*it)->size();
- if (size >= requestedSize) {
- buffer = *it;
- break;
- }
- if (size < smallest) {
- smallest = size; // always free the smallest buf
- free = it;
- }
- }
- }
- if (buffer == nullptr
- && (free != mBuffers.end() || mBuffers.size() < mGrowthLimit)) {
- // We alloc before we free so failure leaves group unchanged.
- const size_t allocateSize = requestedSize < SIZE_MAX / 3 * 2 /* NB: ordering */ ?
- requestedSize * 3 / 2 : requestedSize;
- buffer = new MediaBuffer(allocateSize);
- if (buffer->data() == nullptr) {
- ALOGE("Allocation failure for size %zu", allocateSize);
- delete buffer; // Invalid alloc, prefer not to call release.
- buffer = nullptr;
- } else {
- buffer->setObserver(this);
- if (free != mBuffers.end()) {
- ALOGV("reallocate buffer, requested size %zu vs available %zu",
- requestedSize, (*free)->size());
- (*free)->setObserver(nullptr);
- (*free)->release();
- *free = buffer; // in-place replace
- } else {
- ALOGV("allocate buffer, requested size %zu", requestedSize);
- mBuffers.emplace_back(buffer);
- }
- }
- }
- if (buffer != nullptr) {
- buffer->add_ref();
- buffer->reset();
- *out = buffer;
- return OK;
- }
- if (nonBlocking) {
- *out = nullptr;
- return WOULD_BLOCK;
- }
- // All buffers are in use, block until one of them is returned.
- mCondition.wait(mLock);
- }
- // Never gets here.
-}
-
-void MediaBufferGroup::signalBufferReturned(MediaBuffer *) {
- Mutex::Autolock autoLock(mLock);
- mCondition.signal();
-}
-
-} // namespace android
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
new file mode 100644
index 0000000..1695c75
--- /dev/null
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/stagefright/foundation/MediaDefs.h>
+
+namespace android {
+
+const char *MEDIA_MIMETYPE_IMAGE_JPEG = "image/jpeg";
+const char *MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC = "image/vnd.android.heic";
+
+const char *MEDIA_MIMETYPE_VIDEO_VP8 = "video/x-vnd.on2.vp8";
+const char *MEDIA_MIMETYPE_VIDEO_VP9 = "video/x-vnd.on2.vp9";
+const char *MEDIA_MIMETYPE_VIDEO_AVC = "video/avc";
+const char *MEDIA_MIMETYPE_VIDEO_HEVC = "video/hevc";
+const char *MEDIA_MIMETYPE_VIDEO_MPEG4 = "video/mp4v-es";
+const char *MEDIA_MIMETYPE_VIDEO_H263 = "video/3gpp";
+const char *MEDIA_MIMETYPE_VIDEO_MPEG2 = "video/mpeg2";
+const char *MEDIA_MIMETYPE_VIDEO_RAW = "video/raw";
+const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION = "video/dolby-vision";
+const char *MEDIA_MIMETYPE_VIDEO_SCRAMBLED = "video/scrambled";
+
+const char *MEDIA_MIMETYPE_AUDIO_AMR_NB = "audio/3gpp";
+const char *MEDIA_MIMETYPE_AUDIO_AMR_WB = "audio/amr-wb";
+const char *MEDIA_MIMETYPE_AUDIO_MPEG = "audio/mpeg";
+const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I = "audio/mpeg-L1";
+const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II = "audio/mpeg-L2";
+const char *MEDIA_MIMETYPE_AUDIO_MIDI = "audio/midi";
+const char *MEDIA_MIMETYPE_AUDIO_AAC = "audio/mp4a-latm";
+const char *MEDIA_MIMETYPE_AUDIO_QCELP = "audio/qcelp";
+const char *MEDIA_MIMETYPE_AUDIO_VORBIS = "audio/vorbis";
+const char *MEDIA_MIMETYPE_AUDIO_OPUS = "audio/opus";
+const char *MEDIA_MIMETYPE_AUDIO_G711_ALAW = "audio/g711-alaw";
+const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW = "audio/g711-mlaw";
+const char *MEDIA_MIMETYPE_AUDIO_RAW = "audio/raw";
+const char *MEDIA_MIMETYPE_AUDIO_FLAC = "audio/flac";
+const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS = "audio/aac-adts";
+const char *MEDIA_MIMETYPE_AUDIO_MSGSM = "audio/gsm";
+const char *MEDIA_MIMETYPE_AUDIO_AC3 = "audio/ac3";
+const char *MEDIA_MIMETYPE_AUDIO_EAC3 = "audio/eac3";
+const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
+
+const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
+const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav";
+const char *MEDIA_MIMETYPE_CONTAINER_OGG = "application/ogg";
+const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA = "video/x-matroska";
+const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS = "video/mp2ts";
+const char *MEDIA_MIMETYPE_CONTAINER_AVI = "video/avi";
+const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS = "video/mp2p";
+const char *MEDIA_MIMETYPE_CONTAINER_HEIF = "image/heif";
+
+const char *MEDIA_MIMETYPE_TEXT_3GPP = "text/3gpp-tt";
+const char *MEDIA_MIMETYPE_TEXT_SUBRIP = "application/x-subrip";
+const char *MEDIA_MIMETYPE_TEXT_VTT = "text/vtt";
+const char *MEDIA_MIMETYPE_TEXT_CEA_608 = "text/cea-608";
+const char *MEDIA_MIMETYPE_TEXT_CEA_708 = "text/cea-708";
+const char *MEDIA_MIMETYPE_DATA_TIMED_ID3 = "application/x-id3v4";
+
+} // namespace android
diff --git a/media/libstagefright/foundation/MediaKeys.cpp b/media/libstagefright/foundation/MediaKeys.cpp
new file mode 100644
index 0000000..53920c9
--- /dev/null
+++ b/media/libstagefright/foundation/MediaKeys.cpp
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/stagefright/foundation/MediaKeys.h>
+
+namespace android {
+
+const char *const kIStreamListenerKeyDiscontinuityMask = "discontinuity-mask";
+const char *const kATSParserKeyResumeAtPTS = "resume-at-PTS";
+const char *const kATSParserKeyMediaTimeUs = "media-time-us";
+const char *const kATSParserKeyRecentMediaTimeUs = "recent-media-time-us";
+
+} // namespace android
diff --git a/media/libstagefright/foundation/MetaData.cpp b/media/libstagefright/foundation/MetaData.cpp
deleted file mode 100644
index a8965f0..0000000
--- a/media/libstagefright/foundation/MetaData.cpp
+++ /dev/null
@@ -1,486 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MetaData"
-#include <inttypes.h>
-#include <utils/Log.h>
-
-#include <stdlib.h>
-#include <string.h>
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/MetaData.h>
-
-namespace android {
-
-MetaData::MetaData() {
-}
-
-MetaData::MetaData(const MetaData &from)
- : RefBase(),
- mItems(from.mItems) {
-}
-
-MetaData::~MetaData() {
- clear();
-}
-
-void MetaData::clear() {
- mItems.clear();
-}
-
-bool MetaData::remove(uint32_t key) {
- ssize_t i = mItems.indexOfKey(key);
-
- if (i < 0) {
- return false;
- }
-
- mItems.removeItemsAt(i);
-
- return true;
-}
-
-bool MetaData::setCString(uint32_t key, const char *value) {
- return setData(key, TYPE_C_STRING, value, strlen(value) + 1);
-}
-
-bool MetaData::setInt32(uint32_t key, int32_t value) {
- return setData(key, TYPE_INT32, &value, sizeof(value));
-}
-
-bool MetaData::setInt64(uint32_t key, int64_t value) {
- return setData(key, TYPE_INT64, &value, sizeof(value));
-}
-
-bool MetaData::setFloat(uint32_t key, float value) {
- return setData(key, TYPE_FLOAT, &value, sizeof(value));
-}
-
-bool MetaData::setPointer(uint32_t key, void *value) {
- return setData(key, TYPE_POINTER, &value, sizeof(value));
-}
-
-bool MetaData::setRect(
- uint32_t key,
- int32_t left, int32_t top,
- int32_t right, int32_t bottom) {
- Rect r;
- r.mLeft = left;
- r.mTop = top;
- r.mRight = right;
- r.mBottom = bottom;
-
- return setData(key, TYPE_RECT, &r, sizeof(r));
-}
-
-/**
- * Note that the returned pointer becomes invalid when additional metadata is set.
- */
-bool MetaData::findCString(uint32_t key, const char **value) {
- uint32_t type;
- const void *data;
- size_t size;
- if (!findData(key, &type, &data, &size) || type != TYPE_C_STRING) {
- return false;
- }
-
- *value = (const char *)data;
-
- return true;
-}
-
-bool MetaData::findInt32(uint32_t key, int32_t *value) {
- uint32_t type = 0;
- const void *data;
- size_t size;
- if (!findData(key, &type, &data, &size) || type != TYPE_INT32) {
- return false;
- }
-
- CHECK_EQ(size, sizeof(*value));
-
- *value = *(int32_t *)data;
-
- return true;
-}
-
-bool MetaData::findInt64(uint32_t key, int64_t *value) {
- uint32_t type = 0;
- const void *data;
- size_t size;
- if (!findData(key, &type, &data, &size) || type != TYPE_INT64) {
- return false;
- }
-
- CHECK_EQ(size, sizeof(*value));
-
- *value = *(int64_t *)data;
-
- return true;
-}
-
-bool MetaData::findFloat(uint32_t key, float *value) {
- uint32_t type = 0;
- const void *data;
- size_t size;
- if (!findData(key, &type, &data, &size) || type != TYPE_FLOAT) {
- return false;
- }
-
- CHECK_EQ(size, sizeof(*value));
-
- *value = *(float *)data;
-
- return true;
-}
-
-bool MetaData::findPointer(uint32_t key, void **value) {
- uint32_t type = 0;
- const void *data;
- size_t size;
- if (!findData(key, &type, &data, &size) || type != TYPE_POINTER) {
- return false;
- }
-
- CHECK_EQ(size, sizeof(*value));
-
- *value = *(void **)data;
-
- return true;
-}
-
-bool MetaData::findRect(
- uint32_t key,
- int32_t *left, int32_t *top,
- int32_t *right, int32_t *bottom) {
- uint32_t type = 0;
- const void *data;
- size_t size;
- if (!findData(key, &type, &data, &size) || type != TYPE_RECT) {
- return false;
- }
-
- CHECK_EQ(size, sizeof(Rect));
-
- const Rect *r = (const Rect *)data;
- *left = r->mLeft;
- *top = r->mTop;
- *right = r->mRight;
- *bottom = r->mBottom;
-
- return true;
-}
-
-bool MetaData::setData(
- uint32_t key, uint32_t type, const void *data, size_t size) {
- bool overwrote_existing = true;
-
- ssize_t i = mItems.indexOfKey(key);
- if (i < 0) {
- typed_data item;
- i = mItems.add(key, item);
-
- overwrote_existing = false;
- }
-
- typed_data &item = mItems.editValueAt(i);
-
- item.setData(type, data, size);
-
- return overwrote_existing;
-}
-
-bool MetaData::findData(uint32_t key, uint32_t *type,
- const void **data, size_t *size) const {
- ssize_t i = mItems.indexOfKey(key);
-
- if (i < 0) {
- return false;
- }
-
- const typed_data &item = mItems.valueAt(i);
-
- item.getData(type, data, size);
-
- return true;
-}
-
-bool MetaData::hasData(uint32_t key) const {
- ssize_t i = mItems.indexOfKey(key);
-
- if (i < 0) {
- return false;
- }
-
- return true;
-}
-
-MetaData::typed_data::typed_data()
- : mType(0),
- mSize(0) {
-}
-
-MetaData::typed_data::~typed_data() {
- clear();
-}
-
-MetaData::typed_data::typed_data(const typed_data &from)
- : mType(from.mType),
- mSize(0) {
-
- void *dst = allocateStorage(from.mSize);
- if (dst) {
- memcpy(dst, from.storage(), mSize);
- }
-}
-
-MetaData::typed_data &MetaData::typed_data::operator=(
- const MetaData::typed_data &from) {
- if (this != &from) {
- clear();
- mType = from.mType;
- void *dst = allocateStorage(from.mSize);
- if (dst) {
- memcpy(dst, from.storage(), mSize);
- }
- }
-
- return *this;
-}
-
-void MetaData::typed_data::clear() {
- freeStorage();
-
- mType = 0;
-}
-
-void MetaData::typed_data::setData(
- uint32_t type, const void *data, size_t size) {
- clear();
-
- mType = type;
-
- void *dst = allocateStorage(size);
- if (dst) {
- memcpy(dst, data, size);
- }
-}
-
-void MetaData::typed_data::getData(
- uint32_t *type, const void **data, size_t *size) const {
- *type = mType;
- *size = mSize;
- *data = storage();
-}
-
-void *MetaData::typed_data::allocateStorage(size_t size) {
- mSize = size;
-
- if (usesReservoir()) {
- return &u.reservoir;
- }
-
- u.ext_data = malloc(mSize);
- if (u.ext_data == NULL) {
- ALOGE("Couldn't allocate %zu bytes for item", size);
- mSize = 0;
- }
- return u.ext_data;
-}
-
-void MetaData::typed_data::freeStorage() {
- if (!usesReservoir()) {
- if (u.ext_data) {
- free(u.ext_data);
- u.ext_data = NULL;
- }
- }
-
- mSize = 0;
-}
-
-String8 MetaData::typed_data::asString(bool verbose) const {
- String8 out;
- const void *data = storage();
- switch(mType) {
- case TYPE_NONE:
- out = String8::format("no type, size %zu)", mSize);
- break;
- case TYPE_C_STRING:
- out = String8::format("(char*) %s", (const char *)data);
- break;
- case TYPE_INT32:
- out = String8::format("(int32_t) %d", *(int32_t *)data);
- break;
- case TYPE_INT64:
- out = String8::format("(int64_t) %" PRId64, *(int64_t *)data);
- break;
- case TYPE_FLOAT:
- out = String8::format("(float) %f", *(float *)data);
- break;
- case TYPE_POINTER:
- out = String8::format("(void*) %p", *(void **)data);
- break;
- case TYPE_RECT:
- {
- const Rect *r = (const Rect *)data;
- out = String8::format("Rect(%d, %d, %d, %d)",
- r->mLeft, r->mTop, r->mRight, r->mBottom);
- break;
- }
-
- default:
- out = String8::format("(unknown type %d, size %zu)", mType, mSize);
- if (verbose && mSize <= 48) { // if it's less than three lines of hex data, dump it
- AString foo;
- hexdump(data, mSize, 0, &foo);
- out.append("\n");
- out.append(foo.c_str());
- }
- break;
- }
- return out;
-}
-
-static void MakeFourCCString(uint32_t x, char *s) {
- s[0] = x >> 24;
- s[1] = (x >> 16) & 0xff;
- s[2] = (x >> 8) & 0xff;
- s[3] = x & 0xff;
- s[4] = '\0';
-}
-
-String8 MetaData::toString() const {
- String8 s;
- for (int i = mItems.size(); --i >= 0;) {
- int32_t key = mItems.keyAt(i);
- char cc[5];
- MakeFourCCString(key, cc);
- const typed_data &item = mItems.valueAt(i);
- s.appendFormat("%s: %s", cc, item.asString(false).string());
- if (i != 0) {
- s.append(", ");
- }
- }
- return s;
-}
-void MetaData::dumpToLog() const {
- for (int i = mItems.size(); --i >= 0;) {
- int32_t key = mItems.keyAt(i);
- char cc[5];
- MakeFourCCString(key, cc);
- const typed_data &item = mItems.valueAt(i);
- ALOGI("%s: %s", cc, item.asString(true /* verbose */).string());
- }
-}
-
-status_t MetaData::writeToParcel(Parcel &parcel) {
- status_t ret;
- size_t numItems = mItems.size();
- ret = parcel.writeUint32(uint32_t(numItems));
- if (ret) {
- return ret;
- }
- for (size_t i = 0; i < numItems; i++) {
- int32_t key = mItems.keyAt(i);
- const typed_data &item = mItems.valueAt(i);
- uint32_t type;
- const void *data;
- size_t size;
- item.getData(&type, &data, &size);
- ret = parcel.writeInt32(key);
- if (ret) {
- return ret;
- }
- ret = parcel.writeUint32(type);
- if (ret) {
- return ret;
- }
- if (type == TYPE_NONE) {
- android::Parcel::WritableBlob blob;
- ret = parcel.writeUint32(static_cast<uint32_t>(size));
- if (ret) {
- return ret;
- }
- ret = parcel.writeBlob(size, false, &blob);
- if (ret) {
- return ret;
- }
- memcpy(blob.data(), data, size);
- blob.release();
- } else {
- ret = parcel.writeByteArray(size, (uint8_t*)data);
- if (ret) {
- return ret;
- }
- }
- }
- return OK;
-}
-
-status_t MetaData::updateFromParcel(const Parcel &parcel) {
- uint32_t numItems;
- if (parcel.readUint32(&numItems) == OK) {
-
- for (size_t i = 0; i < numItems; i++) {
- int32_t key;
- uint32_t type;
- uint32_t size;
- status_t ret = parcel.readInt32(&key);
- ret |= parcel.readUint32(&type);
- ret |= parcel.readUint32(&size);
- if (ret != OK) {
- break;
- }
- // copy data from Blob, which may be inline in Parcel storage,
- // then advance position
- if (type == TYPE_NONE) {
- android::Parcel::ReadableBlob blob;
- ret = parcel.readBlob(size, &blob);
- if (ret != OK) {
- break;
- }
- setData(key, type, blob.data(), size);
- blob.release();
- } else {
- // copy data directly from Parcel storage, then advance position
- setData(key, type, parcel.readInplace(size), size);
- }
- }
-
- return OK;
- }
- ALOGW("no metadata in parcel");
- return UNKNOWN_ERROR;
-}
-
-
-/* static */
-sp<MetaData> MetaData::createFromParcel(const Parcel &parcel) {
-
- sp<MetaData> meta = new MetaData();
- meta->updateFromParcel(parcel);
- return meta;
-}
-
-
-
-} // namespace android
-
diff --git a/media/libstagefright/foundation/avc_utils.cpp b/media/libstagefright/foundation/avc_utils.cpp
new file mode 100644
index 0000000..e8a6083
--- /dev/null
+++ b/media/libstagefright/foundation/avc_utils.cpp
@@ -0,0 +1,773 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "avc_utils"
+#include <utils/Log.h>
+
+
+#include <media/stagefright/foundation/ABitReader.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/avc_utils.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <utils/misc.h>
+
+namespace android {
+
+unsigned parseUE(ABitReader *br) {
+ unsigned numZeroes = 0;
+ while (br->getBits(1) == 0) {
+ ++numZeroes;
+ }
+
+ unsigned x = br->getBits(numZeroes);
+
+ return x + (1u << numZeroes) - 1;
+}
+
+unsigned parseUEWithFallback(ABitReader *br, unsigned fallback) {
+ unsigned numZeroes = 0;
+ while (br->getBitsWithFallback(1, 1) == 0) {
+ ++numZeroes;
+ }
+ uint32_t x;
+ if (numZeroes < 32) {
+ if (br->getBitsGraceful(numZeroes, &x)) {
+ return x + (1u << numZeroes) - 1;
+ } else {
+ return fallback;
+ }
+ } else {
+ br->skipBits(numZeroes);
+ return fallback;
+ }
+}
+
+signed parseSE(ABitReader *br) {
+ unsigned codeNum = parseUE(br);
+
+ return (codeNum & 1) ? (codeNum + 1) / 2 : -signed(codeNum / 2);
+}
+
+signed parseSEWithFallback(ABitReader *br, signed fallback) {
+ // NOTE: parseUE cannot normally return ~0 as the max supported value is 0xFFFE
+ unsigned codeNum = parseUEWithFallback(br, ~0U);
+ if (codeNum == ~0U) {
+ return fallback;
+ }
+ return (codeNum & 1) ? (codeNum + 1) / 2 : -signed(codeNum / 2);
+}
+
+static void skipScalingList(ABitReader *br, size_t sizeOfScalingList) {
+ size_t lastScale = 8;
+ size_t nextScale = 8;
+ for (size_t j = 0; j < sizeOfScalingList; ++j) {
+ if (nextScale != 0) {
+ signed delta_scale = parseSE(br);
+ // ISO_IEC_14496-10_201402-ITU, 7.4.2.1.1.1, The value of delta_scale
+ // shall be in the range of −128 to +127, inclusive.
+ if (delta_scale < -128) {
+ ALOGW("delta_scale (%d) is below range, capped to -128", delta_scale);
+ delta_scale = -128;
+ } else if (delta_scale > 127) {
+ ALOGW("delta_scale (%d) is above range, capped to 127", delta_scale);
+ delta_scale = 127;
+ }
+ nextScale = (lastScale + (delta_scale + 256)) % 256;
+ }
+
+ lastScale = (nextScale == 0) ? lastScale : nextScale;
+ }
+}
+
+// Determine video dimensions from the sequence parameterset.
+void FindAVCDimensions(
+ const sp<ABuffer> &seqParamSet,
+ int32_t *width, int32_t *height,
+ int32_t *sarWidth, int32_t *sarHeight) {
+ ABitReader br(seqParamSet->data() + 1, seqParamSet->size() - 1);
+
+ unsigned profile_idc = br.getBits(8);
+ br.skipBits(16);
+ parseUE(&br); // seq_parameter_set_id
+
+ unsigned chroma_format_idc = 1; // 4:2:0 chroma format
+
+ if (profile_idc == 100 || profile_idc == 110
+ || profile_idc == 122 || profile_idc == 244
+ || profile_idc == 44 || profile_idc == 83 || profile_idc == 86) {
+ chroma_format_idc = parseUE(&br);
+ if (chroma_format_idc == 3) {
+ br.skipBits(1); // residual_colour_transform_flag
+ }
+ parseUE(&br); // bit_depth_luma_minus8
+ parseUE(&br); // bit_depth_chroma_minus8
+ br.skipBits(1); // qpprime_y_zero_transform_bypass_flag
+
+ if (br.getBits(1)) { // seq_scaling_matrix_present_flag
+ for (size_t i = 0; i < 8; ++i) {
+ if (br.getBits(1)) { // seq_scaling_list_present_flag[i]
+
+ // WARNING: the code below has not ever been exercised...
+ // need a real-world example.
+
+ if (i < 6) {
+ // ScalingList4x4[i],16,...
+ skipScalingList(&br, 16);
+ } else {
+ // ScalingList8x8[i-6],64,...
+ skipScalingList(&br, 64);
+ }
+ }
+ }
+ }
+ }
+
+ parseUE(&br); // log2_max_frame_num_minus4
+ unsigned pic_order_cnt_type = parseUE(&br);
+
+ if (pic_order_cnt_type == 0) {
+ parseUE(&br); // log2_max_pic_order_cnt_lsb_minus4
+ } else if (pic_order_cnt_type == 1) {
+ // offset_for_non_ref_pic, offset_for_top_to_bottom_field and
+ // offset_for_ref_frame are technically se(v), but since we are
+ // just skipping over them the midpoint does not matter.
+
+ br.getBits(1); // delta_pic_order_always_zero_flag
+ parseUE(&br); // offset_for_non_ref_pic
+ parseUE(&br); // offset_for_top_to_bottom_field
+
+ unsigned num_ref_frames_in_pic_order_cnt_cycle = parseUE(&br);
+ for (unsigned i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; ++i) {
+ parseUE(&br); // offset_for_ref_frame
+ }
+ }
+
+ parseUE(&br); // num_ref_frames
+ br.getBits(1); // gaps_in_frame_num_value_allowed_flag
+
+ unsigned pic_width_in_mbs_minus1 = parseUE(&br);
+ unsigned pic_height_in_map_units_minus1 = parseUE(&br);
+ unsigned frame_mbs_only_flag = br.getBits(1);
+
+ *width = pic_width_in_mbs_minus1 * 16 + 16;
+
+ *height = (2 - frame_mbs_only_flag)
+ * (pic_height_in_map_units_minus1 * 16 + 16);
+
+ if (!frame_mbs_only_flag) {
+ br.getBits(1); // mb_adaptive_frame_field_flag
+ }
+
+ br.getBits(1); // direct_8x8_inference_flag
+
+ if (br.getBits(1)) { // frame_cropping_flag
+ unsigned frame_crop_left_offset = parseUE(&br);
+ unsigned frame_crop_right_offset = parseUE(&br);
+ unsigned frame_crop_top_offset = parseUE(&br);
+ unsigned frame_crop_bottom_offset = parseUE(&br);
+
+ unsigned cropUnitX, cropUnitY;
+ if (chroma_format_idc == 0 /* monochrome */) {
+ cropUnitX = 1;
+ cropUnitY = 2 - frame_mbs_only_flag;
+ } else {
+ unsigned subWidthC = (chroma_format_idc == 3) ? 1 : 2;
+ unsigned subHeightC = (chroma_format_idc == 1) ? 2 : 1;
+
+ cropUnitX = subWidthC;
+ cropUnitY = subHeightC * (2 - frame_mbs_only_flag);
+ }
+
+ ALOGV("frame_crop = (%u, %u, %u, %u), cropUnitX = %u, cropUnitY = %u",
+ frame_crop_left_offset, frame_crop_right_offset,
+ frame_crop_top_offset, frame_crop_bottom_offset,
+ cropUnitX, cropUnitY);
+
+
+ // *width -= (frame_crop_left_offset + frame_crop_right_offset) * cropUnitX;
+ if(__builtin_add_overflow(frame_crop_left_offset, frame_crop_right_offset, &frame_crop_left_offset) ||
+ __builtin_mul_overflow(frame_crop_left_offset, cropUnitX, &frame_crop_left_offset) ||
+ __builtin_sub_overflow(*width, frame_crop_left_offset, width) ||
+ *width < 0) {
+ *width = 0;
+ }
+
+ //*height -= (frame_crop_top_offset + frame_crop_bottom_offset) * cropUnitY;
+ if(__builtin_add_overflow(frame_crop_top_offset, frame_crop_bottom_offset, &frame_crop_top_offset) ||
+ __builtin_mul_overflow(frame_crop_top_offset, cropUnitY, &frame_crop_top_offset) ||
+ __builtin_sub_overflow(*height, frame_crop_top_offset, height) ||
+ *height < 0) {
+ *height = 0;
+ }
+ }
+
+ if (sarWidth != NULL) {
+ *sarWidth = 0;
+ }
+
+ if (sarHeight != NULL) {
+ *sarHeight = 0;
+ }
+
+ if (br.getBits(1)) { // vui_parameters_present_flag
+ unsigned sar_width = 0, sar_height = 0;
+
+ if (br.getBits(1)) { // aspect_ratio_info_present_flag
+ unsigned aspect_ratio_idc = br.getBits(8);
+
+ if (aspect_ratio_idc == 255 /* extendedSAR */) {
+ sar_width = br.getBits(16);
+ sar_height = br.getBits(16);
+ } else {
+ static const struct { unsigned width, height; } kFixedSARs[] = {
+ { 0, 0 }, // Invalid
+ { 1, 1 },
+ { 12, 11 },
+ { 10, 11 },
+ { 16, 11 },
+ { 40, 33 },
+ { 24, 11 },
+ { 20, 11 },
+ { 32, 11 },
+ { 80, 33 },
+ { 18, 11 },
+ { 15, 11 },
+ { 64, 33 },
+ { 160, 99 },
+ { 4, 3 },
+ { 3, 2 },
+ { 2, 1 },
+ };
+
+ if (aspect_ratio_idc > 0 && aspect_ratio_idc < NELEM(kFixedSARs)) {
+ sar_width = kFixedSARs[aspect_ratio_idc].width;
+ sar_height = kFixedSARs[aspect_ratio_idc].height;
+ }
+ }
+ }
+
+ ALOGV("sample aspect ratio = %u : %u", sar_width, sar_height);
+
+ if (sarWidth != NULL) {
+ *sarWidth = sar_width;
+ }
+
+ if (sarHeight != NULL) {
+ *sarHeight = sar_height;
+ }
+ }
+}
+
+status_t getNextNALUnit(
+ const uint8_t **_data, size_t *_size,
+ const uint8_t **nalStart, size_t *nalSize,
+ bool startCodeFollows) {
+ const uint8_t *data = *_data;
+ size_t size = *_size;
+
+ *nalStart = NULL;
+ *nalSize = 0;
+
+ if (size < 3) {
+ return -EAGAIN;
+ }
+
+ size_t offset = 0;
+
+ // A valid startcode consists of at least two 0x00 bytes followed by 0x01.
+ for (; offset + 2 < size; ++offset) {
+ if (data[offset + 2] == 0x01 && data[offset] == 0x00
+ && data[offset + 1] == 0x00) {
+ break;
+ }
+ }
+ if (offset + 2 >= size) {
+ *_data = &data[offset];
+ *_size = 2;
+ return -EAGAIN;
+ }
+ offset += 3;
+
+ size_t startOffset = offset;
+
+ for (;;) {
+ while (offset < size && data[offset] != 0x01) {
+ ++offset;
+ }
+
+ if (offset == size) {
+ if (startCodeFollows) {
+ offset = size + 2;
+ break;
+ }
+
+ return -EAGAIN;
+ }
+
+ if (data[offset - 1] == 0x00 && data[offset - 2] == 0x00) {
+ break;
+ }
+
+ ++offset;
+ }
+
+ size_t endOffset = offset - 2;
+ while (endOffset > startOffset + 1 && data[endOffset - 1] == 0x00) {
+ --endOffset;
+ }
+
+ *nalStart = &data[startOffset];
+ *nalSize = endOffset - startOffset;
+
+ if (offset + 2 < size) {
+ *_data = &data[offset - 2];
+ *_size = size - offset + 2;
+ } else {
+ *_data = NULL;
+ *_size = 0;
+ }
+
+ return OK;
+}
+
+static sp<ABuffer> FindNAL(const uint8_t *data, size_t size, unsigned nalType) {
+ const uint8_t *nalStart;
+ size_t nalSize;
+ while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
+ if (nalSize > 0 && (nalStart[0] & 0x1f) == nalType) {
+ sp<ABuffer> buffer = new ABuffer(nalSize);
+ memcpy(buffer->data(), nalStart, nalSize);
+ return buffer;
+ }
+ }
+
+ return NULL;
+}
+
+const char *AVCProfileToString(uint8_t profile) {
+ switch (profile) {
+ case kAVCProfileBaseline:
+ return "Baseline";
+ case kAVCProfileMain:
+ return "Main";
+ case kAVCProfileExtended:
+ return "Extended";
+ case kAVCProfileHigh:
+ return "High";
+ case kAVCProfileHigh10:
+ return "High 10";
+ case kAVCProfileHigh422:
+ return "High 422";
+ case kAVCProfileHigh444:
+ return "High 444";
+ case kAVCProfileCAVLC444Intra:
+ return "CAVLC 444 Intra";
+ default: return "Unknown";
+ }
+}
+
+sp<ABuffer> MakeAVCCodecSpecificData(
+ const sp<ABuffer> &accessUnit, int32_t *width, int32_t *height,
+ int32_t *sarWidth, int32_t *sarHeight) {
+ const uint8_t *data = accessUnit->data();
+ size_t size = accessUnit->size();
+
+ sp<ABuffer> seqParamSet = FindNAL(data, size, 7);
+ if (seqParamSet == NULL) {
+ return NULL;
+ }
+
+ FindAVCDimensions(
+ seqParamSet, width, height, sarWidth, sarHeight);
+
+ sp<ABuffer> picParamSet = FindNAL(data, size, 8);
+ CHECK(picParamSet != NULL);
+
+ size_t csdSize =
+ 1 + 3 + 1 + 1
+ + 2 * 1 + seqParamSet->size()
+ + 1 + 2 * 1 + picParamSet->size();
+
+ sp<ABuffer> csd = new ABuffer(csdSize);
+ uint8_t *out = csd->data();
+
+ *out++ = 0x01; // configurationVersion
+ memcpy(out, seqParamSet->data() + 1, 3); // profile/level...
+
+ uint8_t profile = out[0];
+ uint8_t level = out[2];
+
+ out += 3;
+ *out++ = (0x3f << 2) | 1; // lengthSize == 2 bytes
+ *out++ = 0xe0 | 1;
+
+ *out++ = seqParamSet->size() >> 8;
+ *out++ = seqParamSet->size() & 0xff;
+ memcpy(out, seqParamSet->data(), seqParamSet->size());
+ out += seqParamSet->size();
+
+ *out++ = 1;
+
+ *out++ = picParamSet->size() >> 8;
+ *out++ = picParamSet->size() & 0xff;
+ memcpy(out, picParamSet->data(), picParamSet->size());
+
+#if 0
+ ALOGI("AVC seq param set");
+ hexdump(seqParamSet->data(), seqParamSet->size());
+#endif
+
+
+ if (sarWidth != nullptr && sarHeight != nullptr) {
+ if ((*sarWidth > 0 && *sarHeight > 0) && (*sarWidth != 1 || *sarHeight != 1)) {
+ ALOGI("found AVC codec config (%d x %d, %s-profile level %d.%d) "
+ "SAR %d : %d",
+ *width,
+ *height,
+ AVCProfileToString(profile),
+ level / 10,
+ level % 10,
+ *sarWidth,
+ *sarHeight);
+ } else {
+ // We treat *:0 and 0:* (unspecified) as 1:1.
+ *sarWidth = 0;
+ *sarHeight = 0;
+ ALOGI("found AVC codec config (%d x %d, %s-profile level %d.%d)",
+ *width,
+ *height,
+ AVCProfileToString(profile),
+ level / 10,
+ level % 10);
+ }
+ }
+
+ return csd;
+}
+
+bool IsIDR(const uint8_t *data, size_t size) {
+// const uint8_t *data = buffer->data();
+// size_t size = buffer->size();
+ bool foundIDR = false;
+
+ const uint8_t *nalStart;
+ size_t nalSize;
+ while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
+ if (nalSize == 0u) {
+ ALOGW("skipping empty nal unit from potentially malformed bitstream");
+ continue;
+ }
+
+ unsigned nalType = nalStart[0] & 0x1f;
+
+ if (nalType == 5) {
+ foundIDR = true;
+ break;
+ }
+ }
+
+ return foundIDR;
+}
+
+bool IsAVCReferenceFrame(const sp<ABuffer> &accessUnit) {
+ const uint8_t *data = accessUnit->data();
+ size_t size = accessUnit->size();
+ if (data == NULL) {
+ ALOGE("IsAVCReferenceFrame: called on NULL data (%p, %zu)", accessUnit.get(), size);
+ return false;
+ }
+
+ const uint8_t *nalStart;
+ size_t nalSize;
+ while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
+ if (nalSize == 0) {
+ ALOGE("IsAVCReferenceFrame: invalid nalSize: 0 (%p, %zu)", accessUnit.get(), size);
+ return false;
+ }
+
+ unsigned nalType = nalStart[0] & 0x1f;
+
+ if (nalType == 5) {
+ return true;
+ } else if (nalType == 1) {
+ unsigned nal_ref_idc = (nalStart[0] >> 5) & 3;
+ return nal_ref_idc != 0;
+ }
+ }
+
+ return true;
+}
+
+uint32_t FindAVCLayerId(const uint8_t *data, size_t size) {
+ CHECK(data != NULL);
+
+ const unsigned kSvcNalType = 0xE;
+ const unsigned kSvcNalSearchRange = 32;
+ // SVC NAL
+ // |---0 1110|1--- ----|---- ----|iii- ---|
+ // ^ ^
+ // NAL-type = 0xE layer-Id
+ //
+ // layer_id 0 is for base layer, while 1, 2, ... are enhancement layers.
+ // Layer n uses reference frames from layer 0, 1, ..., n-1.
+
+ uint32_t layerId = 0;
+ sp<ABuffer> svcNAL = FindNAL(
+ data, size > kSvcNalSearchRange ? kSvcNalSearchRange : size, kSvcNalType);
+ if (svcNAL != NULL && svcNAL->size() >= 4) {
+ layerId = (*(svcNAL->data() + 3) >> 5) & 0x7;
+ }
+ return layerId;
+}
+
+bool ExtractDimensionsFromVOLHeader(
+ const uint8_t *data, size_t size, int32_t *width, int32_t *height) {
+ ABitReader br(&data[4], size - 4);
+ br.skipBits(1); // random_accessible_vol
+ unsigned video_object_type_indication = br.getBits(8);
+
+ CHECK_NE(video_object_type_indication,
+ 0x21u /* Fine Granularity Scalable */);
+
+ unsigned video_object_layer_verid __unused;
+ unsigned video_object_layer_priority __unused;
+ if (br.getBits(1)) {
+ video_object_layer_verid = br.getBits(4);
+ video_object_layer_priority = br.getBits(3);
+ }
+ unsigned aspect_ratio_info = br.getBits(4);
+ if (aspect_ratio_info == 0x0f /* extended PAR */) {
+ br.skipBits(8); // par_width
+ br.skipBits(8); // par_height
+ }
+ if (br.getBits(1)) { // vol_control_parameters
+ br.skipBits(2); // chroma_format
+ br.skipBits(1); // low_delay
+ if (br.getBits(1)) { // vbv_parameters
+ br.skipBits(15); // first_half_bit_rate
+ CHECK(br.getBits(1)); // marker_bit
+ br.skipBits(15); // latter_half_bit_rate
+ CHECK(br.getBits(1)); // marker_bit
+ br.skipBits(15); // first_half_vbv_buffer_size
+ CHECK(br.getBits(1)); // marker_bit
+ br.skipBits(3); // latter_half_vbv_buffer_size
+ br.skipBits(11); // first_half_vbv_occupancy
+ CHECK(br.getBits(1)); // marker_bit
+ br.skipBits(15); // latter_half_vbv_occupancy
+ CHECK(br.getBits(1)); // marker_bit
+ }
+ }
+ unsigned video_object_layer_shape = br.getBits(2);
+ CHECK_EQ(video_object_layer_shape, 0x00u /* rectangular */);
+
+ CHECK(br.getBits(1)); // marker_bit
+ unsigned vop_time_increment_resolution = br.getBits(16);
+ CHECK(br.getBits(1)); // marker_bit
+
+ if (br.getBits(1)) { // fixed_vop_rate
+ // range [0..vop_time_increment_resolution)
+
+ // vop_time_increment_resolution
+ // 2 => 0..1, 1 bit
+ // 3 => 0..2, 2 bits
+ // 4 => 0..3, 2 bits
+ // 5 => 0..4, 3 bits
+ // ...
+
+ CHECK_GT(vop_time_increment_resolution, 0u);
+ --vop_time_increment_resolution;
+
+ unsigned numBits = 0;
+ while (vop_time_increment_resolution > 0) {
+ ++numBits;
+ vop_time_increment_resolution >>= 1;
+ }
+
+ br.skipBits(numBits); // fixed_vop_time_increment
+ }
+
+ CHECK(br.getBits(1)); // marker_bit
+ unsigned video_object_layer_width = br.getBits(13);
+ CHECK(br.getBits(1)); // marker_bit
+ unsigned video_object_layer_height = br.getBits(13);
+ CHECK(br.getBits(1)); // marker_bit
+
+ unsigned interlaced __unused = br.getBits(1);
+
+ *width = video_object_layer_width;
+ *height = video_object_layer_height;
+
+ return true;
+}
+
+bool GetMPEGAudioFrameSize(
+ uint32_t header, size_t *frame_size,
+ int *out_sampling_rate, int *out_channels,
+ int *out_bitrate, int *out_num_samples) {
+ *frame_size = 0;
+
+ if (out_sampling_rate) {
+ *out_sampling_rate = 0;
+ }
+
+ if (out_channels) {
+ *out_channels = 0;
+ }
+
+ if (out_bitrate) {
+ *out_bitrate = 0;
+ }
+
+ if (out_num_samples) {
+ *out_num_samples = 1152;
+ }
+
+ if ((header & 0xffe00000) != 0xffe00000) {
+ return false;
+ }
+
+ unsigned version = (header >> 19) & 3;
+
+ if (version == 0x01) {
+ return false;
+ }
+
+ unsigned layer = (header >> 17) & 3;
+
+ if (layer == 0x00) {
+ return false;
+ }
+
+ unsigned protection __unused = (header >> 16) & 1;
+
+ unsigned bitrate_index = (header >> 12) & 0x0f;
+
+ if (bitrate_index == 0 || bitrate_index == 0x0f) {
+ // Disallow "free" bitrate.
+ return false;
+ }
+
+ unsigned sampling_rate_index = (header >> 10) & 3;
+
+ if (sampling_rate_index == 3) {
+ return false;
+ }
+
+ static const int kSamplingRateV1[] = { 44100, 48000, 32000 };
+ int sampling_rate = kSamplingRateV1[sampling_rate_index];
+ if (version == 2 /* V2 */) {
+ sampling_rate /= 2;
+ } else if (version == 0 /* V2.5 */) {
+ sampling_rate /= 4;
+ }
+
+ unsigned padding = (header >> 9) & 1;
+
+ if (layer == 3) {
+ // layer I
+
+ static const int kBitrateV1[] = {
+ 32, 64, 96, 128, 160, 192, 224, 256,
+ 288, 320, 352, 384, 416, 448
+ };
+
+ static const int kBitrateV2[] = {
+ 32, 48, 56, 64, 80, 96, 112, 128,
+ 144, 160, 176, 192, 224, 256
+ };
+
+ int bitrate =
+ (version == 3 /* V1 */)
+ ? kBitrateV1[bitrate_index - 1]
+ : kBitrateV2[bitrate_index - 1];
+
+ if (out_bitrate) {
+ *out_bitrate = bitrate;
+ }
+
+ *frame_size = (12000 * bitrate / sampling_rate + padding) * 4;
+
+ if (out_num_samples) {
+ *out_num_samples = 384;
+ }
+ } else {
+ // layer II or III
+
+ static const int kBitrateV1L2[] = {
+ 32, 48, 56, 64, 80, 96, 112, 128,
+ 160, 192, 224, 256, 320, 384
+ };
+
+ static const int kBitrateV1L3[] = {
+ 32, 40, 48, 56, 64, 80, 96, 112,
+ 128, 160, 192, 224, 256, 320
+ };
+
+ static const int kBitrateV2[] = {
+ 8, 16, 24, 32, 40, 48, 56, 64,
+ 80, 96, 112, 128, 144, 160
+ };
+
+ int bitrate;
+ if (version == 3 /* V1 */) {
+ bitrate = (layer == 2 /* L2 */)
+ ? kBitrateV1L2[bitrate_index - 1]
+ : kBitrateV1L3[bitrate_index - 1];
+
+ if (out_num_samples) {
+ *out_num_samples = 1152;
+ }
+ } else {
+ // V2 (or 2.5)
+
+ bitrate = kBitrateV2[bitrate_index - 1];
+ if (out_num_samples) {
+ *out_num_samples = (layer == 1 /* L3 */) ? 576 : 1152;
+ }
+ }
+
+ if (out_bitrate) {
+ *out_bitrate = bitrate;
+ }
+
+ if (version == 3 /* V1 */) {
+ *frame_size = 144000 * bitrate / sampling_rate + padding;
+ } else {
+ // V2 or V2.5
+ size_t tmp = (layer == 1 /* L3 */) ? 72000 : 144000;
+ *frame_size = tmp * bitrate / sampling_rate + padding;
+ }
+ }
+
+ if (out_sampling_rate) {
+ *out_sampling_rate = sampling_rate;
+ }
+
+ if (out_channels) {
+ int channel_mode = (header >> 6) & 3;
+
+ *out_channels = (channel_mode == 3) ? 1 : 2;
+ }
+
+ return true;
+}
+
+} // namespace android
+
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/ABuffer.h b/media/libstagefright/foundation/include/media/stagefright/foundation/ABuffer.h
index ef11434..8fe9f8d 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/ABuffer.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/ABuffer.h
@@ -27,7 +27,6 @@
namespace android {
struct AMessage;
-class MediaBufferBase;
struct ABuffer : public RefBase {
explicit ABuffer(size_t capacity);
@@ -49,17 +48,12 @@
sp<AMessage> meta();
- MediaBufferBase *getMediaBufferBase();
- void setMediaBufferBase(MediaBufferBase *mediaBuffer);
-
protected:
virtual ~ABuffer();
private:
sp<AMessage> mMeta;
- MediaBufferBase *mMediaBufferBase;
-
void *mData;
size_t mCapacity;
size_t mRangeOffset;
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AData.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AData.h
index 49aa0dc..85e4378 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/AData.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AData.h
@@ -25,6 +25,9 @@
#include <media/stagefright/foundation/TypeTraits.h>
#include <media/stagefright/foundation/Flagged.h>
+#undef HIDE
+#define HIDE __attribute__((visibility("hidden")))
+
namespace android {
/**
@@ -78,7 +81,7 @@
* This class is needed as member function specialization is not allowed for a
* templated class.
*/
-struct _AUnion_impl {
+struct HIDE _AUnion_impl {
/**
* Calls placement constuctor for type T with arbitrary arguments for a storage at an address.
* Storage MUST be large enough to contain T.
@@ -113,13 +116,13 @@
/** Constructor specialization for void type */
template<>
-inline void _AUnion_impl::emplace<void>(size_t totalSize, void *addr) {
+HIDE inline void _AUnion_impl::emplace<void>(size_t totalSize, void *addr) {
memset(addr, 0, totalSize);
}
/** Destructor specialization for void type */
template<>
-inline void _AUnion_impl::del<void>(void *) {
+HIDE inline void _AUnion_impl::del<void>(void *) {
}
/// \endcond
@@ -221,7 +224,7 @@
template<
typename T,
bool=std::is_copy_assignable<T>::value>
-struct _AData_copier {
+struct HIDE _AData_copier {
static_assert(std::is_copy_assignable<T>::value, "T must be copy assignable here");
/**
@@ -294,7 +297,7 @@
*
*/
template<typename T>
-struct _AData_copier<T, false> {
+struct HIDE _AData_copier<T, false> {
static_assert(!std::is_copy_assignable<T>::value, "T must not be copy assignable here");
static_assert(std::is_copy_constructible<T>::value, "T must be copy constructible here");
@@ -318,7 +321,7 @@
template<
typename T,
bool=std::is_move_assignable<T>::value>
-struct _AData_mover {
+struct HIDE _AData_mover {
static_assert(std::is_move_assignable<T>::value, "T must be move assignable here");
/**
@@ -389,7 +392,7 @@
*
*/
template<typename T>
-struct _AData_mover<T, false> {
+struct HIDE _AData_mover<T, false> {
static_assert(!std::is_move_assignable<T>::value, "T must not be move assignable here");
static_assert(std::is_move_constructible<T>::value, "T must be move constructible here");
@@ -407,13 +410,13 @@
* \param Ts types to consider for the member
*/
template<typename Flagger, typename U, typename ...Ts>
-struct _AData_deleter;
+struct HIDE _AData_deleter;
/**
* Template specialization when there are still types to consider (T and rest)
*/
template<typename Flagger, typename U, typename T, typename ...Ts>
-struct _AData_deleter<Flagger, U, T, Ts...> {
+struct HIDE _AData_deleter<Flagger, U, T, Ts...> {
static bool del(typename Flagger::type flags, U &data) {
if (Flagger::canDeleteAs(flags, Flagger::flagFor((T*)0))) {
data.template del<T>();
@@ -427,13 +430,101 @@
* Template specialization when there are no more types to consider.
*/
template<typename Flagger, typename U>
-struct _AData_deleter<Flagger, U> {
+struct HIDE _AData_deleter<Flagger, U> {
inline static bool del(typename Flagger::type, U &) {
return false;
}
};
/**
+ * Helper template that copy assigns an object of a specific type (member) in an
+ * AUnion.
+ *
+ * \param Flagger type flagger class (see AData)
+ * \param U AUnion object in which the member should be copy assigned
+ * \param Ts types to consider for the member
+ */
+template<typename Flagger, typename U, typename ...Ts>
+struct HIDE _AData_copy_assigner;
+
+/**
+ * Template specialization when there are still types to consider (T and rest)
+ */
+template<typename Flagger, typename U, typename T, typename ...Ts>
+struct HIDE _AData_copy_assigner<Flagger, U, T, Ts...> {
+ static bool assign(typename Flagger::type flags, U &dst, const U &src) {
+ static_assert(std::is_copy_constructible<T>::value, "T must be copy constructible");
+ // if we can delete as, we can also assign as
+ if (Flagger::canDeleteAs(flags, Flagger::flagFor((T*)0))) {
+ dst.template emplace<T>(src.template get<T>());
+ return true;
+ }
+ return _AData_copy_assigner<Flagger, U, Ts...>::assign(flags, dst, src);
+ }
+};
+
+/**
+ * Template specialization when there are no more types to consider.
+ */
+template<typename Flagger, typename U>
+struct HIDE _AData_copy_assigner<Flagger, U> {
+ inline static bool assign(typename Flagger::type, U &, const U &) {
+ return false;
+ }
+};
+
+/**
+ * Helper template that move assigns an object of a specific type (member) in an
+ * AUnion.
+ *
+ * \param Flagger type flagger class (see AData)
+ * \param U AUnion object in which the member should be copy assigned
+ * \param Ts types to consider for the member
+ */
+template<typename Flagger, typename U, typename ...Ts>
+struct HIDE _AData_move_assigner;
+
+/**
+ * Template specialization when there are still types to consider (T and rest)
+ */
+template<typename Flagger, typename U, typename T, typename ...Ts>
+struct HIDE _AData_move_assigner<Flagger, U, T, Ts...> {
+ template<typename V = T>
+ static typename std::enable_if<std::is_move_constructible<V>::value, bool>::type
+ assign(typename Flagger::type flags, U &dst, U &src) {
+ // if we can delete as, we can also assign as
+ if (Flagger::canDeleteAs(flags, Flagger::flagFor((T*)0))) {
+ dst.template emplace<T>(std::move(src.template get<T>()));
+ return true;
+ }
+ return _AData_move_assigner<Flagger, U, Ts...>::assign(flags, dst, src);
+ }
+
+ // Fall back to copy construction if T is not move constructible
+ template<typename V = T>
+ static typename std::enable_if<!std::is_move_constructible<V>::value, bool>::type
+ assign(typename Flagger::type flags, U &dst, U &src) {
+ static_assert(std::is_copy_constructible<T>::value, "T must be copy constructible");
+ // if we can delete as, we can also assign as
+ if (Flagger::canDeleteAs(flags, Flagger::flagFor((T*)0))) {
+ dst.template emplace<T>(src.template get<T>());
+ return true;
+ }
+ return _AData_move_assigner<Flagger, U, Ts...>::assign(flags, dst, src);
+ }
+};
+
+/**
+ * Template specialization when there are no more types to consider.
+ */
+template<typename Flagger, typename U>
+struct HIDE _AData_move_assigner<Flagger, U> {
+ inline static bool assign(typename Flagger::type, U &, U &) {
+ return false;
+ }
+};
+
+/**
* Container that can store an arbitrary object of a set of specified types.
*
* This struct is an outer class that contains various inner classes based on desired type
@@ -654,6 +745,61 @@
Custom() : base_t(Flagger::flagFor((void*)0)) { }
/**
+ * Copy assignment operator.
+ */
+ Custom& operator=(const Custom &o) {
+ if (&o != this) {
+ if (this->used() && !this->clear()) {
+ __builtin_trap();
+ }
+ if (o.used()) {
+ if (_AData_copy_assigner<Flagger, data_t, Ts...>::assign(
+ o.flags(), this->get(), o.get())) {
+ this->setFlags(o.flags());
+ } else {
+ __builtin_trap();
+ }
+ }
+ }
+ return *this;
+ }
+
+ /**
+ * Copy constructor.
+ */
+ Custom(const Custom &o) : Custom() {
+ *this = o;
+ }
+
+ /**
+ * Move assignment operator.
+ */
+ Custom& operator=(Custom &&o) {
+ if (&o != this) {
+ if (this->used() && !this->clear()) {
+ __builtin_trap();
+ }
+ if (o.used()) {
+ if (_AData_move_assigner<Flagger, data_t, Ts...>::assign(
+ o.flags(), this->get(), o.get())) {
+ this->setFlags(o.flags());
+ o.clear();
+ } else {
+ __builtin_trap();
+ }
+ }
+ }
+ return *this;
+ }
+
+ /**
+ * Move constructor.
+ */
+ Custom(Custom &&o) : Custom() {
+ *this = std::move(o);
+ }
+
+ /**
* Removes the contained object, if any.
*/
~Custom() {
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
index 8580eb5..742651e 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
@@ -19,6 +19,7 @@
#define A_MESSAGE_H_
#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/AData.h>
#include <media/stagefright/foundation/ALooper.h>
#include <utils/KeyedVector.h>
#include <utils/RefBase.h>
@@ -155,6 +156,9 @@
// their refcount incremented.
sp<AMessage> dup() const;
+ // Adds all items from other into this.
+ void extend(const sp<AMessage> &other);
+
// Performs a shallow or deep comparison of |this| and |other| and returns
// an AMessage with the differences.
// Warning: RefBase items, i.e. "objects" are _not_ copied but only have
@@ -180,9 +184,81 @@
kTypeBuffer,
};
+ struct Rect {
+ int32_t mLeft, mTop, mRight, mBottom;
+ };
+
size_t countEntries() const;
const char *getEntryNameAt(size_t index, Type *type) const;
+ /**
+ * Retrieves the item at a specific index.
+ */
+ typedef AData<
+ int32_t, int64_t, size_t, float, double, Rect, AString,
+ void *, sp<AMessage>, sp<ABuffer>, sp<RefBase>>::Basic ItemData;
+
+ /**
+ * Finds an item by name. This can be used if the type is unknown.
+ *
+ * \param name name of the item
+ * Returns an empty item if no item is present with that name.
+ */
+ ItemData findItem(const char *name) const;
+
+ /**
+ * Sets an item of arbitrary type. Does nothing if the item value is empty.
+ *
+ * \param name name of the item
+ * \param item value of the item
+ */
+ void setItem(const char *name, const ItemData &item);
+
+ ItemData getEntryAt(size_t index) const;
+
+ /**
+ * Finds an entry by name and returns its index.
+ *
+ * \retval countEntries() if the entry is not found.
+ */
+ size_t findEntryByName(const char *name) const;
+
+ /**
+ * Sets the name of an entry based on index.
+ *
+ * \param index index of the entry
+ * \param name (new) name of the entry
+ *
+ * \retval OK the name was set successfully
+ * \retval BAD_INDEX invalid index
+ * \retval BAD_VALUE name is invalid (null)
+ * \retval ALREADY_EXISTS name is already used by another entry
+ */
+ status_t setEntryNameAt(size_t index, const char *name);
+
+ /**
+ * Sets the item of an entry based on index.
+ *
+ * \param index index of the entry
+ * \param item new item of the entry
+ *
+ * \retval OK the item was set successfully
+ * \retval BAD_INDEX invalid index
+ * \retval BAD_VALUE item is invalid (null)
+ * \retval BAD_TYPE type is unsupported (should not happen)
+ */
+ status_t setEntryAt(size_t index, const ItemData &item);
+
+ /**
+ * Removes an entry based on index.
+ *
+ * \param index index of the entry
+ *
+ * \retval OK the entry was removed successfully
+ * \retval BAD_INDEX invalid index
+ */
+ status_t removeEntryAt(size_t index);
+
protected:
virtual ~AMessage();
@@ -197,10 +273,6 @@
wp<AHandler> mHandler;
wp<ALooper> mLooper;
- struct Rect {
- int32_t mLeft, mTop, mRight, mBottom;
- };
-
struct Item {
union {
int32_t int32Value;
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AUtils.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AUtils.h
index 255a0f4..af6b357 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/AUtils.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AUtils.h
@@ -22,28 +22,28 @@
/* T must be integer type, den must not be 0 */
template<class T>
-inline static const T divRound(const T &nom, const T &den) {
- if ((nom >= 0) ^ (den >= 0)) {
- return (nom - den / 2) / den;
+inline static const T divRound(const T &num, const T &den) {
+ if ((num >= 0) ^ (den >= 0)) {
+ return (num - den / 2) / den;
} else {
- return (nom + den / 2) / den;
+ return (num + den / 2) / den;
}
}
-/* == ceil(nom / den). T must be integer type, den must not be 0 */
+/* == ceil(num / den). T must be integer type, den must not be 0 */
template<class T>
-inline static const T divUp(const T &nom, const T &den) {
+inline static const T divUp(const T &num, const T &den) {
if (den < 0) {
- return (nom < 0 ? nom + den + 1 : nom) / den;
+ return (num < 0 ? num + den + 1 : num) / den;
} else {
- return (nom < 0 ? nom : nom + den - 1) / den;
+ return (num < 0 ? num : num + den - 1) / den;
}
}
-/* == ceil(nom / den) * den. T must be integer type, alignment must be positive power of 2 */
+/* == ceil(num / den) * den. T must be integer type, alignment must be positive power of 2 */
template<class T, class U>
-inline static const T align(const T &nom, const U &den) {
- return (nom + (T)(den - 1)) & (T)~(den - 1);
+inline static const T align(const T &num, const U &den) {
+ return (num + (T)(den - 1)) & (T)~(den - 1);
}
template<class T>
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/ByteUtils.h b/media/libstagefright/foundation/include/media/stagefright/foundation/ByteUtils.h
new file mode 100644
index 0000000..a434f81
--- /dev/null
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/ByteUtils.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BYTE_UTILS_H
+
+#define BYTE_UTILS_H
+
+#include <arpa/inet.h>
+
+namespace android {
+
+constexpr int FOURCC(unsigned char c1, unsigned char c2, unsigned char c3, unsigned char c4) {
+ return ((c1) << 24 | (c2) << 16 | (c3) << 8 | (c4));
+}
+
+template <size_t N>
+constexpr int32_t FOURCC(const char (&s) [N]) {
+ static_assert(N == 5, "fourcc: wrong length");
+ return
+ (unsigned char) s[0] << 24 |
+ (unsigned char) s[1] << 16 |
+ (unsigned char) s[2] << 8 |
+ (unsigned char) s[3] << 0;
+}
+
+
+uint16_t U16_AT(const uint8_t *ptr);
+uint32_t U32_AT(const uint8_t *ptr);
+uint64_t U64_AT(const uint8_t *ptr);
+
+uint16_t U16LE_AT(const uint8_t *ptr);
+uint32_t U32LE_AT(const uint8_t *ptr);
+uint64_t U64LE_AT(const uint8_t *ptr);
+
+uint64_t ntoh64(uint64_t x);
+uint64_t hton64(uint64_t x);
+
+void MakeFourCCString(uint32_t x, char *s);
+
+} // namespace android
+
+#endif // BYTE_UTILS_H
+
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h b/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h
index b889a02..d6c768d 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h
@@ -39,26 +39,28 @@
* vendor-extension section so they won't collide with future platform values.
*/
-#define GET_HAL_ENUM(class, name) HAL_DATASPACE_##class##name
-#define GET_HAL_BITFIELD(class, name) (GET_HAL_ENUM(class, _##name) >> GET_HAL_ENUM(class, _SHIFT))
-
+ /**
+ * graphic.h constants changed in Android 8.0 after ColorStandard values were already public
+ * in Android 7.0. We will not deal with the break in graphic.h here, but list the public
+ * Android SDK MediaFormat values here.
+ */
enum ColorStandard : uint32_t {
- kColorStandardUnspecified = GET_HAL_BITFIELD(STANDARD, UNSPECIFIED),
- kColorStandardBT709 = GET_HAL_BITFIELD(STANDARD, BT709),
- kColorStandardBT601_625 = GET_HAL_BITFIELD(STANDARD, BT601_625),
- kColorStandardBT601_625_Unadjusted = GET_HAL_BITFIELD(STANDARD, BT601_625_UNADJUSTED),
- kColorStandardBT601_525 = GET_HAL_BITFIELD(STANDARD, BT601_525),
- kColorStandardBT601_525_Unadjusted = GET_HAL_BITFIELD(STANDARD, BT601_525_UNADJUSTED),
- kColorStandardBT2020 = GET_HAL_BITFIELD(STANDARD, BT2020),
- kColorStandardBT2020Constant = GET_HAL_BITFIELD(STANDARD, BT2020_CONSTANT_LUMINANCE),
- kColorStandardBT470M = GET_HAL_BITFIELD(STANDARD, BT470M),
- kColorStandardFilm = GET_HAL_BITFIELD(STANDARD, FILM),
- kColorStandardMax = GET_HAL_BITFIELD(STANDARD, MASK),
+ kColorStandardUnspecified = 0,
+ kColorStandardBT709 = 1,
+ kColorStandardBT601_625 = 2,
+ kColorStandardBT601_625_Unadjusted = 3, // not in SDK
+ kColorStandardBT601_525 = 4,
+ kColorStandardBT601_525_Unadjusted = 5, // not in SDK
+ kColorStandardBT2020 = 6,
+ kColorStandardBT2020Constant = 7, // not in SDK
+ kColorStandardBT470M = 8, // not in SDK
+ kColorStandardFilm = 9, // not in SDK
+ kColorStandardDCI_P3 = 10, // not in SDK, new in Android 8.0
/* This marks a section of color-standard values that are not supported by graphics HAL,
but track defined color primaries-matrix coefficient combinations in media.
These are stable for a given release. */
- kColorStandardExtendedStart = kColorStandardMax + 1,
+ kColorStandardExtendedStart = 64,
/* This marks a section of color-standard values that are not supported by graphics HAL
nor using media defined color primaries or matrix coefficients. These may differ per
@@ -67,19 +69,19 @@
};
enum ColorTransfer : uint32_t {
- kColorTransferUnspecified = GET_HAL_BITFIELD(TRANSFER, UNSPECIFIED),
- kColorTransferLinear = GET_HAL_BITFIELD(TRANSFER, LINEAR),
- kColorTransferSRGB = GET_HAL_BITFIELD(TRANSFER, SRGB),
- kColorTransferSMPTE_170M = GET_HAL_BITFIELD(TRANSFER, SMPTE_170M),
- kColorTransferGamma22 = GET_HAL_BITFIELD(TRANSFER, GAMMA2_2),
- kColorTransferGamma28 = GET_HAL_BITFIELD(TRANSFER, GAMMA2_8),
- kColorTransferST2084 = GET_HAL_BITFIELD(TRANSFER, ST2084),
- kColorTransferHLG = GET_HAL_BITFIELD(TRANSFER, HLG),
- kColorTransferMax = GET_HAL_BITFIELD(TRANSFER, MASK),
+ kColorTransferUnspecified = 0,
+ kColorTransferLinear = 1,
+ kColorTransferSRGB = 2,
+ kColorTransferSMPTE_170M = 3, // not in SDK
+ kColorTransferGamma22 = 4, // not in SDK
+ kColorTransferGamma28 = 5, // not in SDK
+ kColorTransferST2084 = 6,
+ kColorTransferHLG = 7,
+ kColorTransferGamma26 = 8, // not in SDK, new in Android 8.0
/* This marks a section of color-transfer values that are not supported by graphics HAL,
but track media-defined color-transfer. These are stable for a given release. */
- kColorTransferExtendedStart = kColorTransferMax + 1,
+ kColorTransferExtendedStart = 32,
/* This marks a section of color-transfer values that are not supported by graphics HAL
nor defined by media. These may differ per device. */
@@ -87,23 +89,19 @@
};
enum ColorRange : uint32_t {
- kColorRangeUnspecified = GET_HAL_BITFIELD(RANGE, UNSPECIFIED),
- kColorRangeFull = GET_HAL_BITFIELD(RANGE, FULL),
- kColorRangeLimited = GET_HAL_BITFIELD(RANGE, LIMITED),
- kColorRangeMax = GET_HAL_BITFIELD(RANGE, MASK),
+ kColorRangeUnspecified = 0,
+ kColorRangeFull = 1,
+ kColorRangeLimited = 2,
/* This marks a section of color-transfer values that are not supported by graphics HAL,
but track media-defined color-transfer. These are stable for a given release. */
- kColorRangeExtendedStart = kColorRangeMax + 1,
+ kColorRangeExtendedStart = 8,
/* This marks a section of color-transfer values that are not supported by graphics HAL
nor defined by media. These may differ per device. */
kColorRangeVendorStart = 0x10000,
};
-#undef GET_HAL_BITFIELD
-#undef GET_HAL_ENUM
-
/*
* Static utilities for codec support
*/
@@ -197,7 +195,8 @@
case ColorUtils::kColorStandardBT2020Constant: return "BT2020Constant";
case ColorUtils::kColorStandardBT470M: return "BT470M";
case ColorUtils::kColorStandardFilm: return "Film";
- default: return def;
+ case ColorUtils::kColorStandardDCI_P3: return "DCI_P3";
+ default: return def;
}
}
@@ -212,7 +211,8 @@
case ColorUtils::kColorTransferGamma28: return "Gamma28";
case ColorUtils::kColorTransferST2084: return "ST2084";
case ColorUtils::kColorTransferHLG: return "HLG";
- default: return def;
+ case ColorUtils::kColorTransferGamma26: return "Gamma26";
+ default: return def;
}
}
@@ -222,7 +222,7 @@
case ColorUtils::kColorRangeUnspecified: return "Unspecified";
case ColorUtils::kColorRangeFull: return "Full";
case ColorUtils::kColorRangeLimited: return "Limited";
- default: return def;
+ default: return def;
}
}
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaBufferBase.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaBufferBase.h
deleted file mode 100644
index 99418fb..0000000
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaBufferBase.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_BUFFER_BASE_H_
-
-#define MEDIA_BUFFER_BASE_H_
-
-namespace android {
-
-class MediaBufferBase {
-public:
- MediaBufferBase() {}
-
- virtual void release() = 0;
- virtual void add_ref() = 0;
-
-protected:
- virtual ~MediaBufferBase() {}
-
-private:
- MediaBufferBase(const MediaBufferBase &);
- MediaBufferBase &operator=(const MediaBufferBase &);
-};
-
-} // namespace android
-
-#endif // MEDIA_BUFFER_BASE_H_
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
new file mode 100644
index 0000000..25be89f
--- /dev/null
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_DEFS_H_
+
+#define MEDIA_DEFS_H_
+
+namespace android {
+
+extern const char *MEDIA_MIMETYPE_IMAGE_JPEG;
+extern const char *MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC;
+
+extern const char *MEDIA_MIMETYPE_VIDEO_VP8;
+extern const char *MEDIA_MIMETYPE_VIDEO_VP9;
+extern const char *MEDIA_MIMETYPE_VIDEO_AVC;
+extern const char *MEDIA_MIMETYPE_VIDEO_HEVC;
+extern const char *MEDIA_MIMETYPE_VIDEO_MPEG4;
+extern const char *MEDIA_MIMETYPE_VIDEO_H263;
+extern const char *MEDIA_MIMETYPE_VIDEO_MPEG2;
+extern const char *MEDIA_MIMETYPE_VIDEO_RAW;
+extern const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION;
+extern const char *MEDIA_MIMETYPE_VIDEO_SCRAMBLED;
+
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_NB;
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEG; // layer III
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II;
+extern const char *MEDIA_MIMETYPE_AUDIO_MIDI;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC;
+extern const char *MEDIA_MIMETYPE_AUDIO_QCELP;
+extern const char *MEDIA_MIMETYPE_AUDIO_VORBIS;
+extern const char *MEDIA_MIMETYPE_AUDIO_OPUS;
+extern const char *MEDIA_MIMETYPE_AUDIO_G711_ALAW;
+extern const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW;
+extern const char *MEDIA_MIMETYPE_AUDIO_RAW;
+extern const char *MEDIA_MIMETYPE_AUDIO_FLAC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS;
+extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
+extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
+
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
+extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
+extern const char *MEDIA_MIMETYPE_CONTAINER_OGG;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS;
+extern const char *MEDIA_MIMETYPE_CONTAINER_AVI;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS;
+extern const char *MEDIA_MIMETYPE_CONTAINER_HEIF;
+
+extern const char *MEDIA_MIMETYPE_TEXT_3GPP;
+extern const char *MEDIA_MIMETYPE_TEXT_SUBRIP;
+extern const char *MEDIA_MIMETYPE_TEXT_VTT;
+extern const char *MEDIA_MIMETYPE_TEXT_CEA_608;
+extern const char *MEDIA_MIMETYPE_TEXT_CEA_708;
+extern const char *MEDIA_MIMETYPE_DATA_TIMED_ID3;
+
+// These are values exported to JAVA API that need to be in sync with
+// frameworks/base/media/java/android/media/AudioFormat.java. Unfortunately,
+// they are not defined in frameworks/av, so defining them here.
+enum AudioEncoding {
+ kAudioEncodingPcm16bit = 2,
+ kAudioEncodingPcm8bit = 3,
+ kAudioEncodingPcmFloat = 4,
+};
+
+} // namespace android
+
+#endif // MEDIA_DEFS_H_
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaKeys.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaKeys.h
new file mode 100644
index 0000000..db924bb
--- /dev/null
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaKeys.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_KEYS_H_
+
+#define MEDIA_KEYS_H_
+
+namespace android {
+
+// When signalling a discontinuity to IStreamListener you can optionally
+// specify the type(s) of discontinuity, i.e. if the audio format has changed,
+// the video format has changed, time has jumped or any combination thereof.
+// To do so, include a non-zero int32_t value under the key
+// "kIStreamListenerKeyDiscontinuityMask" when issuing the
+// IStreamListener::DISCONTINUITY command.
+// If there is a change in audio/video format, The new logical stream
+// must start with proper codec initialization
+// information for playback to continue, i.e. SPS and PPS in the case
+// of AVC video etc.
+// If this key is not present, only a time discontinuity is assumed.
+// The value should be a bitmask of values from
+// ATSParser::DiscontinuityType.
+extern const char *const kIStreamListenerKeyDiscontinuityMask;
+
+// When signalling a discontinuity to ATSParser you can optionally
+// specify an int64_t PTS timestamp in "extra".
+// If present, rendering of data following the discontinuity
+// will be suppressed until media time reaches this timestamp.
+extern const char *const kATSParserKeyResumeAtPTS;
+
+// When signalling a discontinuity to ATSParser you can optionally
+// specify an int64_t PTS timestamp in "extra".
+// It indicates the media time (in us) to be associated
+// with the next PTS occuring in the stream. The value is of type int64_t.
+extern const char *const kATSParserKeyMediaTimeUs;
+
+// When signalling a discontinuity to ATSParser you can optionally
+// specify an int64_t PTS timestamp in "extra".
+// It indicates the media time (in us) of a recent
+// sample from the same content, and is used as a hint for the parser to
+// handle PTS wraparound. This is required when a new parser is created
+// to continue parsing content from the same timeline.
+extern const char *const kATSParserKeyRecentMediaTimeUs;
+
+} // namespace android
+
+#endif // MEDIA_KEYS_H_
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/TypeTraits.h b/media/libstagefright/foundation/include/media/stagefright/foundation/TypeTraits.h
index 1250e9b..2041b22 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/TypeTraits.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/TypeTraits.h
@@ -19,6 +19,9 @@
#include <type_traits>
+#undef HIDE
+#define HIDE __attribute__((visibility("hidden")))
+
namespace android {
/**
@@ -31,7 +34,7 @@
* Type support utility class to check if a type is an integral type or an enum.
*/
template<typename T>
-struct is_integral_or_enum
+struct HIDE is_integral_or_enum
: std::integral_constant<bool, std::is_integral<T>::value || std::is_enum<T>::value> { };
/**
@@ -46,7 +49,7 @@
typename U=typename std::enable_if<is_integral_or_enum<T>::value>::type,
bool=std::is_enum<T>::value,
bool=std::is_integral<T>::value>
-struct underlying_integral_type {
+struct HIDE underlying_integral_type {
static_assert(!std::is_enum<T>::value, "T should not be enum here");
static_assert(!std::is_integral<T>::value, "T should not be integral here");
typedef U type;
@@ -54,7 +57,7 @@
/** Specialization for enums. */
template<typename T, typename U>
-struct underlying_integral_type<T, U, true, false> {
+struct HIDE underlying_integral_type<T, U, true, false> {
static_assert(std::is_enum<T>::value, "T should be enum here");
static_assert(!std::is_integral<T>::value, "T should not be integral here");
typedef typename std::underlying_type<T>::type type;
@@ -62,7 +65,7 @@
/** Specialization for non-enum std-integral types. */
template<typename T, typename U>
-struct underlying_integral_type<T, U, false, true> {
+struct HIDE underlying_integral_type<T, U, false, true> {
static_assert(!std::is_enum<T>::value, "T should not be enum here");
static_assert(std::is_integral<T>::value, "T should be integral here");
typedef T type;
@@ -72,7 +75,7 @@
* Type support utility class to check if the underlying integral type is signed.
*/
template<typename T>
-struct is_signed_integral
+struct HIDE is_signed_integral
: std::integral_constant<bool, std::is_signed<
typename underlying_integral_type<T, unsigned>::type>::value> { };
@@ -80,7 +83,7 @@
* Type support utility class to check if the underlying integral type is unsigned.
*/
template<typename T>
-struct is_unsigned_integral
+struct HIDE is_unsigned_integral
: std::integral_constant<bool, std::is_unsigned<
typename underlying_integral_type<T, signed>::type>::value> {
};
@@ -92,26 +95,26 @@
* member constant |value| equal to true. Otherwise value is false.
*/
template<typename T, typename ...Us>
-struct is_one_of;
+struct HIDE is_one_of;
/// \if 0
/**
* Template specialization when first type matches the searched type.
*/
template<typename T, typename ...Us>
-struct is_one_of<T, T, Us...> : std::true_type {};
+struct HIDE is_one_of<T, T, Us...> : std::true_type {};
/**
* Template specialization when first type does not match the searched type.
*/
template<typename T, typename U, typename ...Us>
-struct is_one_of<T, U, Us...> : is_one_of<T, Us...> {};
+struct HIDE is_one_of<T, U, Us...> : is_one_of<T, Us...> {};
/**
* Template specialization when there are no types to search.
*/
template<typename T>
-struct is_one_of<T> : std::false_type {};
+struct HIDE is_one_of<T> : std::false_type {};
/// \endif
/**
@@ -121,44 +124,44 @@
* Otherwise value is false.
*/
template<typename ...Us>
-struct are_unique;
+struct HIDE are_unique;
/// \if 0
/**
* Template specialization when there are no types.
*/
template<>
-struct are_unique<> : std::true_type {};
+struct HIDE are_unique<> : std::true_type {};
/**
* Template specialization when there is at least one type to check.
*/
template<typename T, typename ...Us>
-struct are_unique<T, Us...>
+struct HIDE are_unique<T, Us...>
: std::integral_constant<bool, are_unique<Us...>::value && !is_one_of<T, Us...>::value> {};
/// \endif
/// \if 0
template<size_t Base, typename T, typename ...Us>
-struct _find_first_impl;
+struct HIDE _find_first_impl;
/**
* Template specialization when there are no types to search.
*/
template<size_t Base, typename T>
-struct _find_first_impl<Base, T> : std::integral_constant<size_t, 0> {};
+struct HIDE _find_first_impl<Base, T> : std::integral_constant<size_t, 0> {};
/**
* Template specialization when T is the first type in Us.
*/
template<size_t Base, typename T, typename ...Us>
-struct _find_first_impl<Base, T, T, Us...> : std::integral_constant<size_t, Base> {};
+struct HIDE _find_first_impl<Base, T, T, Us...> : std::integral_constant<size_t, Base> {};
/**
* Template specialization when T is not the first type in Us.
*/
template<size_t Base, typename T, typename U, typename ...Us>
-struct _find_first_impl<Base, T, U, Us...>
+struct HIDE _find_first_impl<Base, T, U, Us...>
: std::integral_constant<size_t, _find_first_impl<Base + 1, T, Us...>::value> {};
/// \endif
@@ -169,7 +172,7 @@
* If T occurs in Us, index is the 1-based left-most index of T in Us. Otherwise, index is 0.
*/
template<typename T, typename ...Us>
-struct find_first {
+struct HIDE find_first {
static constexpr size_t index = _find_first_impl<1, T, Us...>::value;
};
@@ -180,13 +183,13 @@
* Adds a base index.
*/
template<size_t Base, typename T, typename ...Us>
-struct _find_first_convertible_to_helper;
+struct HIDE _find_first_convertible_to_helper;
/**
* Template specialization for when there are more types to consider
*/
template<size_t Base, typename T, typename U, typename ...Us>
-struct _find_first_convertible_to_helper<Base, T, U, Us...> {
+struct HIDE _find_first_convertible_to_helper<Base, T, U, Us...> {
static constexpr size_t index =
std::is_convertible<T, U>::value ? Base :
_find_first_convertible_to_helper<Base + 1, T, Us...>::index;
@@ -199,7 +202,7 @@
* Template specialization for when there are no more types to consider
*/
template<size_t Base, typename T>
-struct _find_first_convertible_to_helper<Base, T> {
+struct HIDE _find_first_convertible_to_helper<Base, T> {
static constexpr size_t index = 0;
typedef void type;
};
@@ -216,7 +219,7 @@
* \param Us types into which the conversion is considered
*/
template<typename T, typename ...Us>
-struct find_first_convertible_to : public _find_first_convertible_to_helper<1, T, Us...> { };
+struct HIDE find_first_convertible_to : public _find_first_convertible_to_helper<1, T, Us...> { };
} // namespace android
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/avc_utils.h b/media/libstagefright/foundation/include/media/stagefright/foundation/avc_utils.h
new file mode 100644
index 0000000..c287559
--- /dev/null
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/avc_utils.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AVC_UTILS_H_
+
+#define AVC_UTILS_H_
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <utils/Errors.h>
+
+namespace android {
+
+class ABitReader;
+
+enum {
+ kAVCProfileBaseline = 0x42,
+ kAVCProfileMain = 0x4d,
+ kAVCProfileExtended = 0x58,
+ kAVCProfileHigh = 0x64,
+ kAVCProfileHigh10 = 0x6e,
+ kAVCProfileHigh422 = 0x7a,
+ kAVCProfileHigh444 = 0xf4,
+ kAVCProfileCAVLC444Intra = 0x2c
+};
+
+struct NALPosition {
+ uint32_t nalOffset;
+ uint32_t nalSize;
+};
+
+// Optionally returns sample aspect ratio as well.
+void FindAVCDimensions(
+ const sp<ABuffer> &seqParamSet,
+ int32_t *width, int32_t *height,
+ int32_t *sarWidth = NULL, int32_t *sarHeight = NULL);
+
+// Gets and returns an unsigned exp-golomb (ue) value from a bit reader |br|. Aborts if the value
+// is more than 64 bits long (>=0xFFFF (!)) or the bit reader overflows.
+unsigned parseUE(ABitReader *br);
+
+// Gets and returns a signed exp-golomb (se) value from a bit reader |br|. Aborts if the value is
+// more than 64 bits long (>0x7FFF || <-0x7FFF (!)) or the bit reader overflows.
+signed parseSE(ABitReader *br);
+
+// Gets an unsigned exp-golomb (ue) value from a bit reader |br|, and returns it if it was
+// successful. Returns |fallback| if it was unsuccessful. Note: if the value was longer that 64
+// bits, it reads past the value and still returns |fallback|.
+unsigned parseUEWithFallback(ABitReader *br, unsigned fallback);
+
+// Gets a signed exp-golomb (se) value from a bit reader |br|, and returns it if it was successful.
+// Returns |fallback| if it was unsuccessful. Note: if the value was longer that 64 bits, it reads
+// past the value and still returns |fallback|.
+signed parseSEWithFallback(ABitReader *br, signed fallback);
+
+// Skips an unsigned exp-golomb (ue) value from bit reader |br|.
+inline void skipUE(ABitReader *br) {
+ (void)parseUEWithFallback(br, 0U);
+}
+
+// Skips a signed exp-golomb (se) value from bit reader |br|.
+inline void skipSE(ABitReader *br) {
+ (void)parseSEWithFallback(br, 0);
+}
+
+status_t getNextNALUnit(
+ const uint8_t **_data, size_t *_size,
+ const uint8_t **nalStart, size_t *nalSize,
+ bool startCodeFollows = false);
+
+sp<ABuffer> MakeAVCCodecSpecificData(
+ const sp<ABuffer> &accessUnit, int32_t *width, int32_t *height,
+ int32_t *sarWidth = nullptr, int32_t *sarHeight = nullptr);
+
+bool IsIDR(const uint8_t *data, size_t size);
+bool IsAVCReferenceFrame(const sp<ABuffer> &accessUnit);
+uint32_t FindAVCLayerId(const uint8_t *data, size_t size);
+
+const char *AVCProfileToString(uint8_t profile);
+
+// Given an MPEG4 video VOL-header chunk (starting with 0x00 0x00 0x01 0x2?)
+// parse it and fill in dimensions, returns true iff successful.
+bool ExtractDimensionsFromVOLHeader(
+ const uint8_t *data, size_t size, int32_t *width, int32_t *height);
+
+bool GetMPEGAudioFrameSize(
+ uint32_t header, size_t *frame_size,
+ int *out_sampling_rate = NULL, int *out_channels = NULL,
+ int *out_bitrate = NULL, int *out_num_samples = NULL);
+
+} // namespace android
+
+#endif // AVC_UTILS_H_
diff --git a/media/libstagefright/foundation/tests/AData_test.cpp b/media/libstagefright/foundation/tests/AData_test.cpp
index f014c25..2628a47 100644
--- a/media/libstagefright/foundation/tests/AData_test.cpp
+++ b/media/libstagefright/foundation/tests/AData_test.cpp
@@ -978,4 +978,63 @@
}
};
+TEST_F(ADataTest, AData_AssignmentTest) {
+ typedef AData<sp<ABuffer>, int32_t>::Basic Data;
+
+ sp<ABuffer> buf1 = new ABuffer((void *)"hello", 6);
+ wp<ABuffer> buf1w = buf1;
+
+ Data obj1;
+ obj1.set(buf1);
+ EXPECT_NE(buf1w.promote(), nullptr);
+ buf1.clear();
+ EXPECT_NE(buf1w.promote(), nullptr);
+ obj1.clear();
+ EXPECT_EQ(buf1w.promote(), nullptr);
+
+ buf1 = new ABuffer((void *)"again", 6);
+ buf1w = buf1;
+
+ obj1.set(buf1);
+ EXPECT_TRUE(obj1.used());
+ Data obj2 = obj1;
+
+ sp<ABuffer> buf2;
+ EXPECT_TRUE(obj2.find(&buf2));
+ EXPECT_EQ(buf2, buf1);
+ buf1.clear();
+ buf2.clear();
+ EXPECT_NE(buf1w.promote(), nullptr);
+ obj1.clear();
+ EXPECT_NE(buf1w.promote(), nullptr);
+ obj2.clear();
+ EXPECT_EQ(buf1w.promote(), nullptr);
+
+ buf1 = new ABuffer((void *)"still", 6);
+ buf1w = buf1;
+
+ obj1.set(buf1);
+ EXPECT_TRUE(obj1.used());
+ obj2 = std::move(obj1);
+ EXPECT_FALSE(obj1.used());
+
+ EXPECT_TRUE(obj2.find(&buf2));
+ EXPECT_EQ(buf2, buf1);
+ buf1.clear();
+ buf2.clear();
+ EXPECT_NE(buf1w.promote(), nullptr);
+ obj2.clear();
+ EXPECT_EQ(buf1w.promote(), nullptr);
+
+ typedef AData<sp<ABuffer>, std::unique_ptr<int32_t>>::Basic Data2;
+ Data2 obj3, obj4;
+
+ buf1 = new ABuffer((void *)"hence", 6);
+ obj3.set(buf1);
+ obj4 = std::move(obj3);
+ EXPECT_FALSE(obj3.used());
+ EXPECT_TRUE(obj4.find(&buf2));
+ EXPECT_EQ(buf2, buf1);
+}
+
} // namespace android
diff --git a/media/libstagefright/foundation/tests/Utils_test.cpp b/media/libstagefright/foundation/tests/Utils_test.cpp
index 0439d5c..fc2e044 100644
--- a/media/libstagefright/foundation/tests/Utils_test.cpp
+++ b/media/libstagefright/foundation/tests/Utils_test.cpp
@@ -22,7 +22,7 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AStringUtils.h>
#include <media/stagefright/foundation/AUtils.h>
-#include <media/stagefright/Utils.h> // for FOURCC
+#include <media/stagefright/foundation/ByteUtils.h> // for FOURCC
namespace android {
diff --git a/media/libstagefright/http/Android.bp b/media/libstagefright/http/Android.bp
index 5d90b0a..2e49fc4 100644
--- a/media/libstagefright/http/Android.bp
+++ b/media/libstagefright/http/Android.bp
@@ -12,7 +12,6 @@
shared_libs: [
"liblog",
"libutils",
- "libbinder",
"libandroid_runtime",
"libmedia",
],
diff --git a/media/libstagefright/http/MediaHTTP.cpp b/media/libstagefright/http/MediaHTTP.cpp
index 5b18814..7c9247e 100644
--- a/media/libstagefright/http/MediaHTTP.cpp
+++ b/media/libstagefright/http/MediaHTTP.cpp
@@ -25,11 +25,11 @@
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/Utils.h>
-#include <media/IMediaHTTPConnection.h>
+#include <media/MediaHTTPConnection.h>
namespace android {
-MediaHTTP::MediaHTTP(const sp<IMediaHTTPConnection> &conn)
+MediaHTTP::MediaHTTP(const sp<MediaHTTPConnection> &conn)
: mInitCheck((conn != NULL) ? OK : NO_INIT),
mHTTPConnection(conn),
mCachedSizeValid(false),
@@ -176,12 +176,6 @@
return mDecryptHandle;
}
-void MediaHTTP::getDrmInfo(
- sp<DecryptHandle> &handle, DrmManagerClient **client) {
- handle = mDecryptHandle;
- *client = mDrmManagerClient;
-}
-
String8 MediaHTTP::getUri() {
if (mInitCheck != OK) {
return String8::empty();
diff --git a/media/libstagefright/httplive/Android.bp b/media/libstagefright/httplive/Android.bp
index e415334..8a77401 100644
--- a/media/libstagefright/httplive/Android.bp
+++ b/media/libstagefright/httplive/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
name: "libstagefright_httplive",
srcs: [
@@ -36,8 +36,20 @@
"libcrypto",
"libcutils",
"libmedia",
+ "libmediaextractor",
"libstagefright",
"libstagefright_foundation",
"libutils",
+ "libhidlallocatorutils",
+ "libhidlbase",
+ "android.hardware.cas@1.0",
+ "android.hardware.cas.native@1.0",
],
+
+ static_libs: [
+ "libstagefright_id3",
+ "libstagefright_metadatautils",
+ "libstagefright_mpeg2support",
+ ],
+
}
diff --git a/media/libstagefright/httplive/HTTPDownloader.cpp b/media/libstagefright/httplive/HTTPDownloader.cpp
index 793695a..72604e3 100644
--- a/media/libstagefright/httplive/HTTPDownloader.cpp
+++ b/media/libstagefright/httplive/HTTPDownloader.cpp
@@ -21,12 +21,12 @@
#include "HTTPDownloader.h"
#include "M3UParser.h"
-#include <media/IMediaHTTPConnection.h>
-#include <media/IMediaHTTPService.h>
+#include <media/DataSource.h>
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaHTTP.h>
-#include <media/stagefright/DataSource.h>
#include <media/stagefright/FileSource.h>
#include <openssl/aes.h>
#include <openssl/md5.h>
@@ -36,7 +36,7 @@
namespace android {
HTTPDownloader::HTTPDownloader(
- const sp<IMediaHTTPService> &httpService,
+ const sp<MediaHTTPService> &httpService,
const KeyedVector<String8, String8> &headers) :
mHTTPDataSource(new MediaHTTP(httpService->makeHTTPConnection())),
mExtraHeaders(headers),
diff --git a/media/libstagefright/httplive/HTTPDownloader.h b/media/libstagefright/httplive/HTTPDownloader.h
index 1db4a48..0d4bd31 100644
--- a/media/libstagefright/httplive/HTTPDownloader.h
+++ b/media/libstagefright/httplive/HTTPDownloader.h
@@ -28,12 +28,12 @@
struct ABuffer;
class DataSource;
struct HTTPBase;
-struct IMediaHTTPService;
+struct MediaHTTPService;
struct M3UParser;
struct HTTPDownloader : public RefBase {
HTTPDownloader(
- const sp<IMediaHTTPService> &httpService,
+ const sp<MediaHTTPService> &httpService,
const KeyedVector<String8, String8> &headers);
void reconnect();
diff --git a/media/libstagefright/httplive/LiveDataSource.h b/media/libstagefright/httplive/LiveDataSource.h
index b7be637..91e9f9f 100644
--- a/media/libstagefright/httplive/LiveDataSource.h
+++ b/media/libstagefright/httplive/LiveDataSource.h
@@ -18,8 +18,8 @@
#define LIVE_DATA_SOURCE_H_
+#include <media/DataSource.h>
#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/DataSource.h>
#include <utils/threads.h>
#include <utils/List.h>
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 143fd59..7eff8eb 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -26,7 +26,7 @@
#include "mpeg2ts/AnotherPacketSource.h"
#include <cutils/properties.h>
-#include <media/IMediaHTTPService.h>
+#include <media/MediaHTTPService.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -49,6 +49,10 @@
const int64_t LiveSession::kUpSwitchMarginUs = 5000000ll;
const int64_t LiveSession::kResumeThresholdUs = 100000ll;
+//TODO: redefine this mark to a fair value
+// default buffer underflow mark
+static const int kUnderflowMarkMs = 1000; // 1 second
+
struct LiveSession::BandwidthEstimator : public RefBase {
BandwidthEstimator();
@@ -270,7 +274,7 @@
LiveSession::LiveSession(
const sp<AMessage> ¬ify, uint32_t flags,
- const sp<IMediaHTTPService> &httpService)
+ const sp<MediaHTTPService> &httpService)
: mNotify(notify),
mFlags(flags),
mHTTPService(httpService),
@@ -840,7 +844,7 @@
// (If we don't have that cushion we'd rather cancel and try again.)
int64_t delayUs =
switchUp ?
- (mBufferingSettings.mRebufferingWatermarkLowMs * 1000ll + 1000000ll)
+ (kUnderflowMarkMs * 1000ll + 1000000ll)
: 0;
bool needResumeUntil = false;
sp<AMessage> stopParams = msg;
@@ -1010,7 +1014,8 @@
mFetcherLooper = new ALooper();
mFetcherLooper->setName("Fetcher");
- mFetcherLooper->start(false, false);
+ mFetcherLooper->start(false, /* runOnCallingThread */
+ true /* canCallJava */);
}
// create fetcher to fetch the master playlist
@@ -2202,14 +2207,14 @@
++activeCount;
int64_t readyMarkUs =
(mInPreparationPhase ?
- mBufferingSettings.mInitialWatermarkMs :
- mBufferingSettings.mRebufferingWatermarkHighMs) * 1000ll;
+ mBufferingSettings.mInitialMarkMs :
+ mBufferingSettings.mResumePlaybackMarkMs) * 1000ll;
if (bufferedDurationUs > readyMarkUs
|| mPacketSources[i]->isFinished(0)) {
++readyCount;
}
if (!mPacketSources[i]->isFinished(0)) {
- if (bufferedDurationUs < mBufferingSettings.mRebufferingWatermarkLowMs * 1000ll) {
+ if (bufferedDurationUs < kUnderflowMarkMs * 1000ll) {
++underflowCount;
}
if (bufferedDurationUs > mUpSwitchMark) {
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index abf8cf0..7a6d487 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -33,7 +33,7 @@
struct AnotherPacketSource;
class DataSource;
struct HTTPBase;
-struct IMediaHTTPService;
+struct MediaHTTPService;
struct LiveDataSource;
struct M3UParser;
struct PlaylistFetcher;
@@ -71,7 +71,7 @@
LiveSession(
const sp<AMessage> ¬ify,
uint32_t flags,
- const sp<IMediaHTTPService> &httpService);
+ const sp<MediaHTTPService> &httpService);
void setBufferingSettings(const BufferingSettings &buffering);
@@ -187,7 +187,7 @@
sp<AMessage> mNotify;
uint32_t mFlags;
- sp<IMediaHTTPService> mHTTPService;
+ sp<MediaHTTPService> mHTTPService;
bool mBuffering;
bool mInPreparationPhase;
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 71feb9a..52791b9 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -23,6 +23,7 @@
#include <cutils/properties.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ByteUtils.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>
@@ -56,7 +57,7 @@
const char *language,
uint32_t flags);
- bool getActiveURI(AString *uri) const;
+ bool getActiveURI(AString *uri, const char *baseURL) const;
void pickRandomMediaItems();
status_t selectTrack(size_t index, bool select);
@@ -75,6 +76,7 @@
AString mURI;
AString mLanguage;
uint32_t mFlags;
+ AString makeURL(const char *baseURL) const;
};
Type mType;
@@ -227,12 +229,12 @@
return format;
}
-bool M3UParser::MediaGroup::getActiveURI(AString *uri) const {
+bool M3UParser::MediaGroup::getActiveURI(AString *uri, const char *baseURL) const {
for (size_t i = 0; i < mMediaItems.size(); ++i) {
if (mSelectedIndex >= 0 && i == (size_t)mSelectedIndex) {
const Media &item = mMediaItems.itemAt(i);
- *uri = item.mURI;
+ *uri = item.makeURL(baseURL);
return true;
}
}
@@ -321,7 +323,7 @@
}
if (uri) {
- *uri = mItems.itemAt(index).mURI;
+ *uri = mItems.itemAt(index).makeURL(mBaseURI.c_str());
}
if (meta) {
@@ -427,7 +429,7 @@
AString groupID;
if (!meta->findString(key, &groupID)) {
if (uri != NULL) {
- *uri = mItems.itemAt(index).mURI;
+ *uri = mItems.itemAt(index).makeURL(mBaseURI.c_str());
}
AString codecs;
@@ -458,7 +460,7 @@
// don't care about the active URI (or if there is an active one)
if (uri != NULL) {
sp<MediaGroup> group = mMediaGroups.valueFor(groupID);
- if (!group->getActiveURI(uri)) {
+ if (!group->getActiveURI(uri, mBaseURI.c_str())) {
return false;
}
@@ -483,6 +485,9 @@
// Base URL must be absolute
return false;
}
+ if (!strncasecmp("data:", url, 5)) {
+ return false;
+ }
const size_t schemeEnd = (strstr(baseURL, "//") - baseURL) + 2;
CHECK(schemeEnd == 7 || schemeEnd == 8);
@@ -544,6 +549,18 @@
return true;
}
+AString M3UParser::Item::makeURL(const char *baseURL) const {
+ AString out;
+ CHECK(MakeURL(baseURL, mURI.c_str(), &out));
+ return out;
+}
+
+AString M3UParser::MediaGroup::Media::makeURL(const char *baseURL) const {
+ AString out;
+ CHECK(MakeURL(baseURL, mURI.c_str(), &out));
+ return out;
+}
+
status_t M3UParser::parse(const void *_data, size_t size) {
int32_t lineNo = 0;
@@ -658,10 +675,13 @@
}
if (!line.startsWith("#")) {
+ if (itemMeta == NULL) {
+ ALOGV("itemMeta == NULL");
+ return ERROR_MALFORMED;
+ }
if (!mIsVariantPlaylist) {
int64_t durationUs;
- if (itemMeta == NULL
- || !itemMeta->findInt64("durationUs", &durationUs)) {
+ if (!itemMeta->findInt64("durationUs", &durationUs)) {
return ERROR_MALFORMED;
}
itemMeta->setInt32("discontinuity-sequence",
@@ -671,7 +691,7 @@
mItems.push();
Item *item = &mItems.editItemAt(mItems.size() - 1);
- CHECK(MakeURL(mBaseURI.c_str(), line.c_str(), &item->mURI));
+ item->mURI = line;
item->mMeta = itemMeta;
@@ -897,6 +917,9 @@
}
}
+ if (meta->get() == NULL) {
+ return ERROR_MALFORMED;
+ }
return OK;
}
@@ -1180,9 +1203,7 @@
AString tmp(val, 1, val.size() - 2);
- if (!MakeURL(mBaseURI.c_str(), tmp.c_str(), &groupURI)) {
- ALOGI("Failed to make absolute URI from '%s'.", tmp.c_str());
- }
+ groupURI = tmp;
haveGroupURI = true;
}
diff --git a/media/libstagefright/httplive/M3UParser.h b/media/libstagefright/httplive/M3UParser.h
index fa648ed..c85335a 100644
--- a/media/libstagefright/httplive/M3UParser.h
+++ b/media/libstagefright/httplive/M3UParser.h
@@ -64,6 +64,7 @@
struct Item {
AString mURI;
sp<AMessage> mMeta;
+ AString makeURL(const char *baseURL) const;
};
status_t mInitCheck;
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 00cf142..9f39b5e 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -23,7 +23,6 @@
#include "HTTPDownloader.h"
#include "LiveSession.h"
#include "M3UParser.h"
-#include "include/avc_utils.h"
#include "include/ID3.h"
#include "mpeg2ts/AnotherPacketSource.h"
#include "mpeg2ts/HlsSampleDecryptor.h"
@@ -31,8 +30,13 @@
#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/stagefright/foundation/MediaKeys.h>
+#include <media/stagefright/foundation/avc_utils.h>
+#include <media/stagefright/DataURISource.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MetaDataUtils.h>
#include <media/stagefright/Utils.h>
#include <ctype.h>
@@ -344,6 +348,16 @@
sp<ABuffer> key;
if (index >= 0) {
key = mAESKeyForURI.valueAt(index);
+ } else if (keyURI.startsWith("data:")) {
+ sp<DataSource> keySrc = DataURISource::Create(keyURI.c_str());
+ off64_t keyLen;
+ if (keySrc == NULL || keySrc->getSize(&keyLen) != OK || keyLen < 0) {
+ ALOGE("Malformed cipher key data uri.");
+ return ERROR_MALFORMED;
+ }
+ key = new ABuffer(keyLen);
+ keySrc->readAt(0, key->data(), keyLen);
+ key->setRange(0, keyLen);
} else {
ssize_t err = mHTTPDownloader->fetchFile(keyURI.c_str(), &key);
@@ -1015,7 +1029,8 @@
sp<AMessage> itemMeta;
int64_t itemDurationUs;
int32_t targetDuration;
- if (mPlaylist->meta()->findInt32("target-duration", &targetDuration)) {
+ if (mPlaylist->meta() != NULL
+ && mPlaylist->meta()->findInt32("target-duration", &targetDuration)) {
do {
--index;
if (!mPlaylist->itemAt(index, NULL /* uri */, &itemMeta)
@@ -1692,12 +1707,12 @@
sp<AMessage> extra = new AMessage;
// Since we are using absolute timestamps, signal an offset of 0 to prevent
// ATSParser from skewing the timestamps of access units.
- extra->setInt64(IStreamListener::kKeyMediaTimeUs, 0);
+ extra->setInt64(kATSParserKeyMediaTimeUs, 0);
// When adapting, signal a recent media time to the parser,
// so that PTS wrap around is handled for the new variant.
if (mStartTimeUs >= 0 && !mStartTimeUsRelative) {
- extra->setInt64(IStreamListener::kKeyRecentMediaTimeUs, mStartTimeUs);
+ extra->setInt64(kATSParserKeyRecentMediaTimeUs, mStartTimeUs);
}
mTSParser->signalDiscontinuity(
@@ -1828,7 +1843,7 @@
(long long)timeUs - mStartTimeUs,
mIDRFound);
if (isAvc) {
- if (IsIDR(accessUnit)) {
+ if (IsIDR(accessUnit->data(), accessUnit->size())) {
mVideoBuffer->clear();
FSLOGV(stream, "found IDR, clear mVideoBuffer");
mIDRFound = true;
@@ -2063,7 +2078,8 @@
CHECK_NE(channel_configuration, 0u);
bits.skipBits(2); // original_copy, home
- sp<MetaData> meta = MakeAACCodecSpecificData(
+ sp<MetaData> meta = new MetaData();
+ MakeAACCodecSpecificData(*meta,
profile, sampling_freq_index, channel_configuration);
meta->setInt32(kKeyIsADTS, true);
diff --git a/media/libstagefright/id3/Android.bp b/media/libstagefright/id3/Android.bp
index 30008d9..37f9d50 100644
--- a/media/libstagefright/id3/Android.bp
+++ b/media/libstagefright/id3/Android.bp
@@ -16,8 +16,6 @@
cfi: true,
},
},
-
- shared_libs: ["libmedia"],
}
//###############################################################################
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index 8d1ad66..a0a62f4 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -20,9 +20,9 @@
#include "../include/ID3.h"
+#include <media/DataSource.h>
#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/Utils.h>
+#include <media/stagefright/foundation/ByteUtils.h>
#include <utils/String8.h>
#include <byteswap.h>
@@ -30,7 +30,7 @@
static const size_t kMaxMetadataSize = 3 * 1024 * 1024;
-struct MemorySource : public DataSource {
+struct MemorySource : public DataSourceBase {
MemorySource(const uint8_t *data, size_t size)
: mData(data),
mSize(size) {
@@ -56,7 +56,7 @@
DISALLOW_EVIL_CONSTRUCTORS(MemorySource);
};
-ID3::ID3(const sp<DataSource> &source, bool ignoreV1, off64_t offset)
+ID3::ID3(DataSourceBase *source, bool ignoreV1, off64_t offset)
: mIsValid(false),
mData(NULL),
mSize(0),
@@ -77,7 +77,7 @@
mFirstFrameOffset(0),
mVersion(ID3_UNKNOWN),
mRawSize(0) {
- sp<MemorySource> source = new (std::nothrow) MemorySource(data, size);
+ MemorySource *source = new (std::nothrow) MemorySource(data, size);
if (source == NULL)
return;
@@ -87,6 +87,7 @@
if (!mIsValid && !ignoreV1) {
mIsValid = parseV1(source);
}
+ delete source;
}
ID3::~ID3() {
@@ -118,7 +119,7 @@
return true;
}
-bool ID3::parseV2(const sp<DataSource> &source, off64_t offset) {
+bool ID3::parseV2(DataSourceBase *source, off64_t offset) {
struct id3_header {
char id[3];
uint8_t version_major;
@@ -328,12 +329,25 @@
}
void ID3::removeUnsynchronization() {
- for (size_t i = 0; i + 1 < mSize; ++i) {
- if (mData[i] == 0xff && mData[i + 1] == 0x00) {
- memmove(&mData[i + 1], &mData[i + 2], mSize - i - 2);
- --mSize;
+
+ // This file has "unsynchronization", so we have to replace occurrences
+ // of 0xff 0x00 with just 0xff in order to get the real data.
+
+ size_t writeOffset = 1;
+ for (size_t readOffset = 1; readOffset < mSize; ++readOffset) {
+ if (mData[readOffset - 1] == 0xff && mData[readOffset] == 0x00) {
+ continue;
}
+ // Only move data if there's actually something to move.
+ // This handles the special case of the data being only [0xff, 0x00]
+ // which should be converted to just 0xff if unsynchronization is on.
+ mData[writeOffset++] = mData[readOffset];
}
+
+ if (writeOffset < mSize) {
+ mSize = writeOffset;
+ }
+
}
static void WriteSyncsafeInteger(uint8_t *dst, size_t x) {
@@ -392,7 +406,12 @@
--mSize;
--dataSize;
}
- mData[writeOffset++] = mData[readOffset++];
+ if (i + 1 < dataSize) {
+ // Only move data if there's actually something to move.
+ // This handles the special case of the data being only [0xff, 0x00]
+ // which should be converted to just 0xff if unsynchronization is on.
+ mData[writeOffset++] = mData[readOffset++];
+ }
}
// move the remaining data following this frame
if (readOffset <= oldSize) {
@@ -588,6 +607,9 @@
// UCS-2
// API wants number of characters, not number of bytes...
int len = n / 2;
+ if (len == 0) {
+ return;
+ }
const char16_t *framedata = (const char16_t *) (frameData + 1);
char16_t *framedatacopy = NULL;
if (*framedata == 0xfffe) {
@@ -955,7 +977,7 @@
return NULL;
}
-bool ID3::parseV1(const sp<DataSource> &source) {
+bool ID3::parseV1(DataSourceBase *source) {
const size_t V1_TAG_SIZE = 128;
off64_t size;
diff --git a/media/libstagefright/id3/testid3.cpp b/media/libstagefright/id3/testid3.cpp
index 442a3ff..86e6adf 100644
--- a/media/libstagefright/id3/testid3.cpp
+++ b/media/libstagefright/id3/testid3.cpp
@@ -72,7 +72,7 @@
sp<FileSource> file = new FileSource(path);
CHECK_EQ(file->initCheck(), (status_t)OK);
- ID3 tag(file);
+ ID3 tag(file.get());
if (!tag.isValid()) {
printf("FAIL %s\n", path);
} else {
diff --git a/media/libstagefright/include/AACExtractor.h b/media/libstagefright/include/AACExtractor.h
deleted file mode 100644
index bd4c41c..0000000
--- a/media/libstagefright/include/AACExtractor.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AAC_EXTRACTOR_H_
-
-#define AAC_EXTRACTOR_H_
-
-#include <media/stagefright/MediaExtractor.h>
-
-#include <utils/Vector.h>
-
-namespace android {
-
-struct AMessage;
-class String8;
-
-class AACExtractor : public MediaExtractor {
-public:
- AACExtractor(const sp<DataSource> &source, const sp<AMessage> &meta);
-
- virtual size_t countTracks();
- virtual sp<IMediaSource> getTrack(size_t index);
- virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
-
- virtual sp<MetaData> getMetaData();
- virtual const char * name() { return "AACExtractor"; }
-
-protected:
- virtual ~AACExtractor();
-
-private:
- sp<DataSource> mDataSource;
- sp<MetaData> mMeta;
- status_t mInitCheck;
-
- Vector<uint64_t> mOffsetVector;
- int64_t mFrameDurationUs;
-
- AACExtractor(const AACExtractor &);
- AACExtractor &operator=(const AACExtractor &);
-};
-
-bool SniffAAC(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *);
-
-} // namespace android
-
-#endif // AAC_EXTRACTOR_H_
diff --git a/media/libstagefright/include/ACodecBufferChannel.h b/media/libstagefright/include/ACodecBufferChannel.h
index f253a52..7c01e45 100644
--- a/media/libstagefright/include/ACodecBufferChannel.h
+++ b/media/libstagefright/include/ACodecBufferChannel.h
@@ -29,8 +29,10 @@
#include <media/IOMX.h>
namespace android {
-
-using hardware::hidl_memory;
+namespace hardware {
+class HidlMemory;
+};
+using hardware::HidlMemory;
/**
* BufferChannelBase implementation for ACodec.
@@ -119,7 +121,7 @@
sp<MemoryDealer> mDealer;
sp<IMemory> mDecryptDestination;
int32_t mHeapSeqNum;
- hidl_memory mHidlMemory;
+ sp<HidlMemory> mHidlMemory;
// These should only be accessed via std::atomic_* functions.
//
diff --git a/media/libstagefright/include/AMRExtractor.h b/media/libstagefright/include/AMRExtractor.h
deleted file mode 100644
index 8abcb12..0000000
--- a/media/libstagefright/include/AMRExtractor.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AMR_EXTRACTOR_H_
-
-#define AMR_EXTRACTOR_H_
-
-#include <utils/Errors.h>
-#include <media/stagefright/MediaExtractor.h>
-
-namespace android {
-
-struct AMessage;
-class String8;
-#define OFFSET_TABLE_LEN 300
-
-class AMRExtractor : public MediaExtractor {
-public:
- explicit AMRExtractor(const sp<DataSource> &source);
-
- virtual size_t countTracks();
- virtual sp<IMediaSource> getTrack(size_t index);
- virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
-
- virtual sp<MetaData> getMetaData();
- virtual const char * name() { return "AMRExtractor"; }
-
-protected:
- virtual ~AMRExtractor();
-
-private:
- sp<DataSource> mDataSource;
- sp<MetaData> mMeta;
- status_t mInitCheck;
- bool mIsWide;
-
- off64_t mOffsetTable[OFFSET_TABLE_LEN]; //5 min
- size_t mOffsetTableLength;
-
- AMRExtractor(const AMRExtractor &);
- AMRExtractor &operator=(const AMRExtractor &);
-};
-
-bool SniffAMR(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *);
-
-} // namespace android
-
-#endif // AMR_EXTRACTOR_H_
diff --git a/media/libstagefright/include/CallbackDataSource.h b/media/libstagefright/include/CallbackDataSource.h
index 0d775e6..9f413cd 100644
--- a/media/libstagefright/include/CallbackDataSource.h
+++ b/media/libstagefright/include/CallbackDataSource.h
@@ -17,7 +17,7 @@
#ifndef ANDROID_CALLBACKDATASOURCE_H
#define ANDROID_CALLBACKDATASOURCE_H
-#include <media/stagefright/DataSource.h>
+#include <media/DataSource.h>
#include <media/stagefright/foundation/ADebug.h>
namespace android {
diff --git a/media/libstagefright/include/FLACExtractor.h b/media/libstagefright/include/FLACExtractor.h
deleted file mode 100644
index 51bc139..0000000
--- a/media/libstagefright/include/FLACExtractor.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FLAC_EXTRACTOR_H_
-#define FLAC_EXTRACTOR_H_
-
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <utils/String8.h>
-
-namespace android {
-
-class FLACParser;
-
-class FLACExtractor : public MediaExtractor {
-
-public:
- // Extractor assumes ownership of source
- explicit FLACExtractor(const sp<DataSource> &source);
-
- virtual size_t countTracks();
- virtual sp<IMediaSource> getTrack(size_t index);
- virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
-
- virtual sp<MetaData> getMetaData();
- virtual const char * name() { return "FLACExtractor"; }
-
-protected:
- virtual ~FLACExtractor();
-
-private:
- sp<DataSource> mDataSource;
- sp<FLACParser> mParser;
- status_t mInitCheck;
- sp<MetaData> mFileMetadata;
-
- // There is only one track
- sp<MetaData> mTrackMetadata;
-
- status_t init();
-
- FLACExtractor(const FLACExtractor &);
- FLACExtractor &operator=(const FLACExtractor &);
-
-};
-
-bool SniffFLAC(const sp<DataSource> &source, String8 *mimeType,
- float *confidence, sp<AMessage> *);
-
-} // namespace android
-
-#endif // FLAC_EXTRACTOR_H_
diff --git a/media/libstagefright/include/FrameDecoder.h b/media/libstagefright/include/FrameDecoder.h
new file mode 100644
index 0000000..dc58c15
--- /dev/null
+++ b/media/libstagefright/include/FrameDecoder.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FRAME_DECODER_H_
+#define FRAME_DECODER_H_
+
+#include <memory>
+#include <vector>
+
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/ABase.h>
+#include <media/MediaSource.h>
+#include <media/openmax/OMX_Video.h>
+#include <system/graphics-base.h>
+
+namespace android {
+
+struct AMessage;
+class MediaCodecBuffer;
+class IMediaSource;
+class VideoFrame;
+struct MediaCodec;
+
+struct FrameRect {
+ int32_t left, top, right, bottom;
+};
+
+struct FrameDecoder : public RefBase {
+ FrameDecoder(
+ const AString &componentName,
+ const sp<MetaData> &trackMeta,
+ const sp<IMediaSource> &source);
+
+ status_t init(
+ int64_t frameTimeUs, size_t numFrames, int option, int colorFormat);
+
+ sp<IMemory> extractFrame(FrameRect *rect = NULL);
+
+ status_t extractFrames(std::vector<sp<IMemory> >* frames);
+
+ static sp<IMemory> getMetadataOnly(
+ const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail = false);
+
+protected:
+ virtual ~FrameDecoder();
+
+ virtual sp<AMessage> onGetFormatAndSeekOptions(
+ int64_t frameTimeUs,
+ size_t numFrames,
+ int seekMode,
+ MediaSource::ReadOptions *options) = 0;
+
+ virtual status_t onExtractRect(FrameRect *rect) = 0;
+
+ virtual status_t onInputReceived(
+ const sp<MediaCodecBuffer> &codecBuffer,
+ MetaDataBase &sampleMeta,
+ bool firstSample,
+ uint32_t *flags) = 0;
+
+ virtual status_t onOutputReceived(
+ const sp<MediaCodecBuffer> &videoFrameBuffer,
+ const sp<AMessage> &outputFormat,
+ int64_t timeUs,
+ bool *done) = 0;
+
+ sp<MetaData> trackMeta() const { return mTrackMeta; }
+ OMX_COLOR_FORMATTYPE dstFormat() const { return mDstFormat; }
+ int32_t dstBpp() const { return mDstBpp; }
+
+ void addFrame(const sp<IMemory> &frame) {
+ mFrames.push_back(frame);
+ }
+
+private:
+ AString mComponentName;
+ sp<MetaData> mTrackMeta;
+ sp<IMediaSource> mSource;
+ OMX_COLOR_FORMATTYPE mDstFormat;
+ int32_t mDstBpp;
+ std::vector<sp<IMemory> > mFrames;
+ MediaSource::ReadOptions mReadOptions;
+ sp<MediaCodec> mDecoder;
+ sp<AMessage> mOutputFormat;
+ bool mHaveMoreInputs;
+ bool mFirstSample;
+
+ status_t extractInternal();
+
+ DISALLOW_EVIL_CONSTRUCTORS(FrameDecoder);
+};
+
+struct VideoFrameDecoder : public FrameDecoder {
+ VideoFrameDecoder(
+ const AString &componentName,
+ const sp<MetaData> &trackMeta,
+ const sp<IMediaSource> &source);
+
+protected:
+ virtual sp<AMessage> onGetFormatAndSeekOptions(
+ int64_t frameTimeUs,
+ size_t numFrames,
+ int seekMode,
+ MediaSource::ReadOptions *options) override;
+
+ virtual status_t onExtractRect(FrameRect *rect) override {
+ // Rect extraction for sequences is not supported for now.
+ return (rect == NULL) ? OK : ERROR_UNSUPPORTED;
+ }
+
+ virtual status_t onInputReceived(
+ const sp<MediaCodecBuffer> &codecBuffer,
+ MetaDataBase &sampleMeta,
+ bool firstSample,
+ uint32_t *flags) override;
+
+ virtual status_t onOutputReceived(
+ const sp<MediaCodecBuffer> &videoFrameBuffer,
+ const sp<AMessage> &outputFormat,
+ int64_t timeUs,
+ bool *done) override;
+
+private:
+ bool mIsAvcOrHevc;
+ MediaSource::ReadOptions::SeekMode mSeekMode;
+ int64_t mTargetTimeUs;
+ size_t mNumFrames;
+ size_t mNumFramesDecoded;
+};
+
+struct ImageDecoder : public FrameDecoder {
+ ImageDecoder(
+ const AString &componentName,
+ const sp<MetaData> &trackMeta,
+ const sp<IMediaSource> &source);
+
+protected:
+ virtual sp<AMessage> onGetFormatAndSeekOptions(
+ int64_t frameTimeUs,
+ size_t numFrames,
+ int seekMode,
+ MediaSource::ReadOptions *options) override;
+
+ virtual status_t onExtractRect(FrameRect *rect) override;
+
+ virtual status_t onInputReceived(
+ const sp<MediaCodecBuffer> &codecBuffer __unused,
+ MetaDataBase &sampleMeta __unused,
+ bool firstSample __unused,
+ uint32_t *flags __unused) override { return OK; }
+
+ virtual status_t onOutputReceived(
+ const sp<MediaCodecBuffer> &videoFrameBuffer,
+ const sp<AMessage> &outputFormat,
+ int64_t timeUs,
+ bool *done) override;
+
+private:
+ VideoFrame *mFrame;
+ int32_t mWidth;
+ int32_t mHeight;
+ int32_t mGridRows;
+ int32_t mGridCols;
+ int32_t mTileWidth;
+ int32_t mTileHeight;
+ int32_t mTilesDecoded;
+ int32_t mTargetTiles;
+};
+
+} // namespace android
+
+#endif // FRAME_DECODER_H_
diff --git a/media/libstagefright/include/HTTPBase.h b/media/libstagefright/include/HTTPBase.h
index d325e30..a924197 100644
--- a/media/libstagefright/include/HTTPBase.h
+++ b/media/libstagefright/include/HTTPBase.h
@@ -18,9 +18,10 @@
#define HTTP_BASE_H_
+#include <media/DataSource.h>
#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaErrors.h>
+#include <utils/List.h>
#include <utils/threads.h>
namespace android {
diff --git a/media/libstagefright/include/ID3.h b/media/libstagefright/include/ID3.h
index 11682f8..7c2391e 100644
--- a/media/libstagefright/include/ID3.h
+++ b/media/libstagefright/include/ID3.h
@@ -22,7 +22,7 @@
namespace android {
-class DataSource;
+class DataSourceBase;
class String8;
struct ID3 {
@@ -35,7 +35,7 @@
ID3_V2_4,
};
- explicit ID3(const sp<DataSource> &source, bool ignoreV1 = false, off64_t offset = 0);
+ explicit ID3(DataSourceBase *source, bool ignoreV1 = false, off64_t offset = 0);
ID3(const uint8_t *data, size_t size, bool ignoreV1 = false);
~ID3();
@@ -85,8 +85,8 @@
// only valid for IDV2+
size_t mRawSize;
- bool parseV1(const sp<DataSource> &source);
- bool parseV2(const sp<DataSource> &source, off64_t offset);
+ bool parseV1(DataSourceBase *source);
+ bool parseV2(DataSourceBase *source, off64_t offset);
void removeUnsynchronization();
bool removeUnsynchronizationV2_4(bool iTunesHack);
diff --git a/media/libstagefright/include/ItemTable.h b/media/libstagefright/include/ItemTable.h
deleted file mode 100644
index 5a6af5e..0000000
--- a/media/libstagefright/include/ItemTable.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ITEM_TABLE_H_
-#define ITEM_TABLE_H_
-
-#include <set>
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <utils/KeyedVector.h>
-#include <utils/RefBase.h>
-
-namespace android {
-
-class DataSource;
-class MetaData;
-
-namespace heif {
-
-struct AssociationEntry;
-struct ImageItem;
-struct ItemLoc;
-struct ItemInfo;
-struct ItemProperty;
-struct ItemReference;
-
-/*
- * ItemTable keeps track of all image items (including coded images, grids and
- * tiles) inside a HEIF still image (ISO/IEC FDIS 23008-12.2:2017(E)).
- */
-
-class ItemTable : public RefBase {
-public:
- explicit ItemTable(const sp<DataSource> &source);
-
- status_t parse(uint32_t type, off64_t offset, size_t size);
-
- bool isValid() { return mImageItemsValid; }
- sp<MetaData> getImageMeta();
- uint32_t countImages() const;
- status_t findPrimaryImage(uint32_t *imageIndex);
- status_t findThumbnail(uint32_t *thumbnailIndex);
- status_t getImageOffsetAndSize(
- uint32_t *imageIndex, off64_t *offset, size_t *size);
-
-protected:
- ~ItemTable();
-
-private:
- sp<DataSource> mDataSource;
-
- KeyedVector<uint32_t, ItemLoc> mItemLocs;
- Vector<ItemInfo> mItemInfos;
- Vector<AssociationEntry> mAssociations;
- Vector<sp<ItemProperty> > mItemProperties;
- Vector<sp<ItemReference> > mItemReferences;
-
- uint32_t mPrimaryItemId;
- off64_t mIdatOffset;
- size_t mIdatSize;
-
- std::set<uint32_t> mRequiredBoxes;
- std::set<uint32_t> mBoxesSeen;
-
- bool mImageItemsValid;
- uint32_t mCurrentImageIndex;
- KeyedVector<uint32_t, ImageItem> mItemIdToImageMap;
-
- status_t parseIlocBox(off64_t offset, size_t size);
- status_t parseIinfBox(off64_t offset, size_t size);
- status_t parsePitmBox(off64_t offset, size_t size);
- status_t parseIprpBox(off64_t offset, size_t size);
- status_t parseIdatBox(off64_t offset, size_t size);
- status_t parseIrefBox(off64_t offset, size_t size);
-
- void attachProperty(const AssociationEntry &association);
- status_t buildImageItemsIfPossible(uint32_t type);
-
- DISALLOW_EVIL_CONSTRUCTORS(ItemTable);
-};
-
-} // namespace heif
-} // namespace android
-
-#endif // ITEM_TABLE_H_
diff --git a/media/libstagefright/include/MP3Extractor.h b/media/libstagefright/include/MP3Extractor.h
deleted file mode 100644
index 2fd04f2..0000000
--- a/media/libstagefright/include/MP3Extractor.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MP3_EXTRACTOR_H_
-
-#define MP3_EXTRACTOR_H_
-
-#include <utils/Errors.h>
-#include <media/stagefright/MediaExtractor.h>
-
-namespace android {
-
-struct AMessage;
-class DataSource;
-struct MP3Seeker;
-class String8;
-
-class MP3Extractor : public MediaExtractor {
-public:
- // Extractor assumes ownership of "source".
- MP3Extractor(const sp<DataSource> &source, const sp<AMessage> &meta);
-
- virtual size_t countTracks();
- virtual sp<IMediaSource> getTrack(size_t index);
- virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
-
- virtual sp<MetaData> getMetaData();
- virtual const char * name() { return "MP3Extractor"; }
-
-private:
- status_t mInitCheck;
-
- sp<DataSource> mDataSource;
- off64_t mFirstFramePos;
- sp<MetaData> mMeta;
- uint32_t mFixedHeader;
- sp<MP3Seeker> mSeeker;
-
- MP3Extractor(const MP3Extractor &);
- MP3Extractor &operator=(const MP3Extractor &);
-};
-
-bool SniffMP3(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *meta);
-
-} // namespace android
-
-#endif // MP3_EXTRACTOR_H_
diff --git a/media/libstagefright/include/MP3Seeker.h b/media/libstagefright/include/MP3Seeker.h
deleted file mode 100644
index 599542e..0000000
--- a/media/libstagefright/include/MP3Seeker.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MP3_SEEKER_H_
-
-#define MP3_SEEKER_H_
-
-#include <media/stagefright/foundation/ABase.h>
-#include <utils/RefBase.h>
-
-namespace android {
-
-struct MP3Seeker : public RefBase {
- MP3Seeker() {}
-
- virtual bool getDuration(int64_t *durationUs) = 0;
-
- // Given a request seek time in "*timeUs", find the byte offset closest
- // to that position and return it in "*pos". Update "*timeUs" to reflect
- // the actual time that seekpoint represents.
- virtual bool getOffsetForTime(int64_t *timeUs, off64_t *pos) = 0;
-
-protected:
- virtual ~MP3Seeker() {}
-
-private:
- DISALLOW_EVIL_CONSTRUCTORS(MP3Seeker);
-};
-
-} // namespace android
-
-#endif // MP3_SEEKER_H_
-
diff --git a/media/libstagefright/include/MPEG2PSExtractor.h b/media/libstagefright/include/MPEG2PSExtractor.h
deleted file mode 100644
index f5471b3..0000000
--- a/media/libstagefright/include/MPEG2PSExtractor.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MPEG2_PS_EXTRACTOR_H_
-
-#define MPEG2_PS_EXTRACTOR_H_
-
-#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <utils/threads.h>
-#include <utils/KeyedVector.h>
-
-namespace android {
-
-struct ABuffer;
-struct AMessage;
-struct Track;
-class String8;
-
-struct MPEG2PSExtractor : public MediaExtractor {
- explicit MPEG2PSExtractor(const sp<DataSource> &source);
-
- virtual size_t countTracks();
- virtual sp<IMediaSource> getTrack(size_t index);
- virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
-
- virtual sp<MetaData> getMetaData();
-
- virtual uint32_t flags() const;
- virtual const char * name() { return "MPEG2PSExtractor"; }
-
-protected:
- virtual ~MPEG2PSExtractor();
-
-private:
- struct Track;
- struct WrappedTrack;
-
- mutable Mutex mLock;
- sp<DataSource> mDataSource;
-
- off64_t mOffset;
- status_t mFinalResult;
- sp<ABuffer> mBuffer;
- KeyedVector<unsigned, sp<Track> > mTracks;
- bool mScanning;
-
- bool mProgramStreamMapValid;
- KeyedVector<unsigned, unsigned> mStreamTypeByESID;
-
- status_t feedMore();
-
- status_t dequeueChunk();
- ssize_t dequeuePack();
- ssize_t dequeueSystemHeader();
- ssize_t dequeuePES();
-
- DISALLOW_EVIL_CONSTRUCTORS(MPEG2PSExtractor);
-};
-
-bool SniffMPEG2PS(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *);
-
-} // namespace android
-
-#endif // MPEG2_PS_EXTRACTOR_H_
-
diff --git a/media/libstagefright/include/MPEG2TSExtractor.h b/media/libstagefright/include/MPEG2TSExtractor.h
deleted file mode 100644
index ac93b5e..0000000
--- a/media/libstagefright/include/MPEG2TSExtractor.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MPEG2_TS_EXTRACTOR_H_
-
-#define MPEG2_TS_EXTRACTOR_H_
-
-#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MediaSource.h>
-#include <utils/threads.h>
-#include <utils/KeyedVector.h>
-#include <utils/Vector.h>
-
-#include "mpeg2ts/ATSParser.h"
-
-namespace android {
-
-struct AMessage;
-struct AnotherPacketSource;
-struct ATSParser;
-class DataSource;
-struct MPEG2TSSource;
-class String8;
-
-struct MPEG2TSExtractor : public MediaExtractor {
- explicit MPEG2TSExtractor(const sp<DataSource> &source);
-
- virtual size_t countTracks();
- virtual sp<IMediaSource> getTrack(size_t index);
- virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
-
- virtual sp<MetaData> getMetaData();
-
- virtual status_t setMediaCas(const HInterfaceToken &casToken) override;
-
- virtual uint32_t flags() const;
- virtual const char * name() { return "MPEG2TSExtractor"; }
-
-private:
- friend struct MPEG2TSSource;
-
- mutable Mutex mLock;
-
- sp<DataSource> mDataSource;
-
- sp<ATSParser> mParser;
-
- // Used to remember SyncEvent occurred in feedMore() when called from init(),
- // because init() needs to update |mSourceImpls| before adding SyncPoint.
- ATSParser::SyncEvent mLastSyncEvent;
-
- Vector<sp<AnotherPacketSource> > mSourceImpls;
-
- Vector<KeyedVector<int64_t, off64_t> > mSyncPoints;
- // Sync points used for seeking --- normally one for video track is used.
- // If no video track is present, audio track will be used instead.
- KeyedVector<int64_t, off64_t> *mSeekSyncPoints;
-
- off64_t mOffset;
-
- static bool isScrambledFormat(const sp<MetaData> &format);
-
- void init();
- void addSource(const sp<AnotherPacketSource> &impl);
- // Try to feed more data from source to parser.
- // |isInit| means this function is called inside init(). This is a signal to
- // save SyncEvent so that init() can add SyncPoint after it updates |mSourceImpls|.
- // This function returns OK if expected amount of data is fed from DataSource to
- // parser and is successfully parsed. Otherwise, various error codes could be
- // returned, e.g., ERROR_END_OF_STREAM, or no data availalbe from DataSource, or
- // the data has syntax error during parsing, etc.
- status_t feedMore(bool isInit = false);
- status_t seek(int64_t seekTimeUs,
- const MediaSource::ReadOptions::SeekMode& seekMode);
- status_t queueDiscontinuityForSeek(int64_t actualSeekTimeUs);
- status_t seekBeyond(int64_t seekTimeUs);
-
- status_t feedUntilBufferAvailable(const sp<AnotherPacketSource> &impl);
-
- // Add a SynPoint derived from |event|.
- void addSyncPoint_l(const ATSParser::SyncEvent &event);
-
- status_t estimateDurationsFromTimesUsAtEnd();
-
- DISALLOW_EVIL_CONSTRUCTORS(MPEG2TSExtractor);
-};
-
-bool SniffMPEG2TS(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *);
-
-} // namespace android
-
-#endif // MPEG2_TS_EXTRACTOR_H_
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
deleted file mode 100644
index 214a3de..0000000
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MPEG4_EXTRACTOR_H_
-
-#define MPEG4_EXTRACTOR_H_
-
-#include <arpa/inet.h>
-
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/Utils.h>
-#include <utils/List.h>
-#include <utils/Vector.h>
-#include <utils/String8.h>
-
-namespace android {
-struct AMessage;
-class DataSource;
-class SampleTable;
-class String8;
-namespace heif {
-class ItemTable;
-}
-using heif::ItemTable;
-
-struct SidxEntry {
- size_t mSize;
- uint32_t mDurationUs;
-};
-
-struct Trex {
- uint32_t track_ID;
- uint32_t default_sample_description_index;
- uint32_t default_sample_duration;
- uint32_t default_sample_size;
- uint32_t default_sample_flags;
-};
-
-class MPEG4Extractor : public MediaExtractor {
-public:
- // Extractor assumes ownership of "source".
- explicit MPEG4Extractor(const sp<DataSource> &source);
-
- virtual size_t countTracks();
- virtual sp<IMediaSource> getTrack(size_t index);
- virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
-
- virtual sp<MetaData> getMetaData();
- virtual uint32_t flags() const;
- virtual const char * name() { return "MPEG4Extractor"; }
- virtual void release();
-
- // for DRM
- virtual char* getDrmTrackInfo(size_t trackID, int *len);
-
-protected:
- virtual ~MPEG4Extractor();
-
- virtual void populateMetrics();
-
-private:
-
- struct PsshInfo {
- uint8_t uuid[16];
- uint32_t datalen;
- uint8_t *data;
- };
- struct Track {
- Track *next;
- sp<MetaData> meta;
- uint32_t timescale;
- sp<SampleTable> sampleTable;
- bool includes_expensive_metadata;
- bool skipTrack;
- bool has_elst;
- int64_t elst_media_time;
- uint64_t elst_segment_duration;
- };
-
- Vector<SidxEntry> mSidxEntries;
- off64_t mMoofOffset;
- bool mMoofFound;
- bool mMdatFound;
-
- Vector<PsshInfo> mPssh;
-
- Vector<Trex> mTrex;
-
- sp<DataSource> mDataSource;
- status_t mInitCheck;
- uint32_t mHeaderTimescale;
- bool mIsQT;
- bool mIsHEIF;
-
- Track *mFirstTrack, *mLastTrack;
-
- sp<MetaData> mFileMetaData;
-
- Vector<uint32_t> mPath;
- String8 mLastCommentMean;
- String8 mLastCommentName;
- String8 mLastCommentData;
-
- KeyedVector<uint32_t, AString> mMetaKeyMap;
-
- status_t readMetaData();
- status_t parseChunk(off64_t *offset, int depth);
- status_t parseITunesMetaData(off64_t offset, size_t size);
- status_t parseColorInfo(off64_t offset, size_t size);
- status_t parse3GPPMetaData(off64_t offset, size_t size, int depth);
- void parseID3v2MetaData(off64_t offset);
- status_t parseQTMetaKey(off64_t data_offset, size_t data_size);
- status_t parseQTMetaVal(int32_t keyId, off64_t data_offset, size_t data_size);
-
- status_t updateAudioTrackInfoFromESDS_MPEG4Audio(
- const void *esds_data, size_t esds_size);
-
- static status_t verifyTrack(Track *track);
-
- struct SINF {
- SINF *next;
- uint16_t trackID;
- uint8_t IPMPDescriptorID;
- ssize_t len;
- char *IPMPData;
- };
-
- SINF *mFirstSINF;
-
- bool mIsDrm;
- sp<ItemTable> mItemTable;
-
- status_t parseDrmSINF(off64_t *offset, off64_t data_offset);
-
- status_t parseTrackHeader(off64_t data_offset, off64_t data_size);
-
- status_t parseSegmentIndex(off64_t data_offset, size_t data_size);
-
- Track *findTrackByMimePrefix(const char *mimePrefix);
-
- status_t parseAC3SampleEntry(off64_t offset);
- status_t parseAC3SpecificBox(off64_t offset, uint16_t sampleRate);
-
- MPEG4Extractor(const MPEG4Extractor &);
- MPEG4Extractor &operator=(const MPEG4Extractor &);
-};
-
-bool SniffMPEG4(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *);
-
-} // namespace android
-
-#endif // MPEG4_EXTRACTOR_H_
diff --git a/media/libstagefright/include/MidiExtractor.h b/media/libstagefright/include/MidiExtractor.h
deleted file mode 100644
index 94d2d08..0000000
--- a/media/libstagefright/include/MidiExtractor.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MIDI_EXTRACTOR_H_
-#define MIDI_EXTRACTOR_H_
-
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/MidiIoWrapper.h>
-#include <utils/String8.h>
-#include <libsonivox/eas.h>
-
-namespace android {
-
-class MidiEngine : public RefBase {
-public:
- MidiEngine(const sp<DataSource> &dataSource,
- const sp<MetaData> &fileMetadata,
- const sp<MetaData> &trackMetadata);
- ~MidiEngine();
-
- status_t initCheck();
-
- status_t allocateBuffers();
- status_t releaseBuffers();
- status_t seekTo(int64_t positionUs);
- MediaBuffer* readBuffer();
-private:
- sp<MidiIoWrapper> mIoWrapper;
- MediaBufferGroup *mGroup;
- EAS_DATA_HANDLE mEasData;
- EAS_HANDLE mEasHandle;
- const S_EAS_LIB_CONFIG* mEasConfig;
- bool mIsInitialized;
-};
-
-class MidiExtractor : public MediaExtractor {
-
-public:
- // Extractor assumes ownership of source
- explicit MidiExtractor(const sp<DataSource> &source);
-
- virtual size_t countTracks();
- virtual sp<IMediaSource> getTrack(size_t index);
- virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
-
- virtual sp<MetaData> getMetaData();
- virtual const char * name() { return "MidiExtractor"; }
-
-protected:
- virtual ~MidiExtractor();
-
-private:
- sp<DataSource> mDataSource;
- status_t mInitCheck;
- sp<MetaData> mFileMetadata;
-
- // There is only one track
- sp<MetaData> mTrackMetadata;
-
- sp<MidiEngine> mEngine;
-
- EAS_DATA_HANDLE mEasData;
- EAS_HANDLE mEasHandle;
- EAS_PCM* mAudioBuffer;
- EAS_I32 mPlayTime;
- EAS_I32 mDuration;
- EAS_STATE mState;
- EAS_FILE mFileLocator;
-
- MidiExtractor(const MidiExtractor &);
- MidiExtractor &operator=(const MidiExtractor &);
-
-};
-
-bool SniffMidi(const sp<DataSource> &source, String8 *mimeType,
- float *confidence, sp<AMessage> *);
-
-} // namespace android
-
-#endif // MIDI_EXTRACTOR_H_
diff --git a/media/libstagefright/include/NuCachedSource2.h b/media/libstagefright/include/NuCachedSource2.h
index 2639280..f439a1c 100644
--- a/media/libstagefright/include/NuCachedSource2.h
+++ b/media/libstagefright/include/NuCachedSource2.h
@@ -18,9 +18,9 @@
#define NU_CACHED_SOURCE_2_H_
+#include <media/DataSource.h>
#include <media/stagefright/foundation/ABase.h>
#include <media/stagefright/foundation/AHandlerReflector.h>
-#include <media/stagefright/DataSource.h>
namespace android {
@@ -43,7 +43,6 @@
virtual uint32_t flags();
virtual sp<DecryptHandle> DrmInitialization(const char* mime);
- virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client);
virtual String8 getUri();
virtual String8 getMIMEType() const;
diff --git a/media/libstagefright/include/OggExtractor.h b/media/libstagefright/include/OggExtractor.h
deleted file mode 100644
index 55aafed..0000000
--- a/media/libstagefright/include/OggExtractor.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef OGG_EXTRACTOR_H_
-
-#define OGG_EXTRACTOR_H_
-
-#include <utils/Errors.h>
-#include <media/stagefright/MediaExtractor.h>
-
-namespace android {
-
-struct AMessage;
-class DataSource;
-class String8;
-
-struct MyOggExtractor;
-struct OggSource;
-
-struct OggExtractor : public MediaExtractor {
- explicit OggExtractor(const sp<DataSource> &source);
-
- virtual size_t countTracks();
- virtual sp<IMediaSource> getTrack(size_t index);
- virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
-
- virtual sp<MetaData> getMetaData();
- virtual const char * name() { return "OggExtractor"; }
-
-protected:
- virtual ~OggExtractor();
-
-private:
- friend struct OggSource;
-
- sp<DataSource> mDataSource;
- status_t mInitCheck;
-
- MyOggExtractor *mImpl;
-
- OggExtractor(const OggExtractor &);
- OggExtractor &operator=(const OggExtractor &);
-};
-
-bool SniffOgg(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *);
-
-void parseVorbisComment(
- const sp<MetaData> &fileMeta, const char *comment, size_t commentLength);
-
-} // namespace android
-
-#endif // OGG_EXTRACTOR_H_
diff --git a/media/libstagefright/include/OmxNodeOwner.h b/media/libstagefright/include/OmxNodeOwner.h
deleted file mode 100644
index 64ec7f7..0000000
--- a/media/libstagefright/include/OmxNodeOwner.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef OMX_NODE_OWNER_H_
-
-#define OMX_NODE_OWNER_H_
-
-namespace android {
-
-struct OMXNodeInstance;
-
-/**
- * This struct is needed to separate OMX from OMXNodeInstance.
- *
- * TODO: This might not be needed after Treble transition is complete.
- */
-struct OmxNodeOwner {
- virtual status_t freeNode(const sp<OMXNodeInstance> &instance) = 0;
- virtual ~OmxNodeOwner() {}
-};
-
-}
-
-#endif // OMX_NODE_OWNER_H_
diff --git a/media/libstagefright/include/SDPLoader.h b/media/libstagefright/include/SDPLoader.h
index 2c4f543..b901c97 100644
--- a/media/libstagefright/include/SDPLoader.h
+++ b/media/libstagefright/include/SDPLoader.h
@@ -25,7 +25,7 @@
namespace android {
struct HTTPBase;
-struct IMediaHTTPService;
+struct MediaHTTPService;
struct SDPLoader : public AHandler {
enum Flags {
@@ -38,7 +38,7 @@
SDPLoader(
const sp<AMessage> ¬ify,
uint32_t flags,
- const sp<IMediaHTTPService> &httpService);
+ const sp<MediaHTTPService> &httpService);
void load(const char* url, const KeyedVector<String8, String8> *headers);
diff --git a/media/libstagefright/include/SampleTable.h b/media/libstagefright/include/SampleTable.h
deleted file mode 100644
index eb1a674..0000000
--- a/media/libstagefright/include/SampleTable.h
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef SAMPLE_TABLE_H_
-
-#define SAMPLE_TABLE_H_
-
-#include <sys/types.h>
-#include <stdint.h>
-
-#include <media/stagefright/MediaErrors.h>
-#include <utils/RefBase.h>
-#include <utils/threads.h>
-
-namespace android {
-
-class DataSource;
-struct SampleIterator;
-
-class SampleTable : public RefBase {
-public:
- explicit SampleTable(const sp<DataSource> &source);
-
- bool isValid() const;
-
- // type can be 'stco' or 'co64'.
- status_t setChunkOffsetParams(
- uint32_t type, off64_t data_offset, size_t data_size);
-
- status_t setSampleToChunkParams(off64_t data_offset, size_t data_size);
-
- // type can be 'stsz' or 'stz2'.
- status_t setSampleSizeParams(
- uint32_t type, off64_t data_offset, size_t data_size);
-
- status_t setTimeToSampleParams(off64_t data_offset, size_t data_size);
-
- status_t setCompositionTimeToSampleParams(
- off64_t data_offset, size_t data_size);
-
- status_t setSyncSampleParams(off64_t data_offset, size_t data_size);
-
- ////////////////////////////////////////////////////////////////////////////
-
- uint32_t countChunkOffsets() const;
-
- uint32_t countSamples() const;
-
- status_t getMaxSampleSize(size_t *size);
-
- status_t getMetaDataForSample(
- uint32_t sampleIndex,
- off64_t *offset,
- size_t *size,
- uint32_t *compositionTime,
- bool *isSyncSample = NULL,
- uint32_t *sampleDuration = NULL);
-
- enum {
- kFlagBefore,
- kFlagAfter,
- kFlagClosest
- };
- status_t findSampleAtTime(
- uint64_t req_time, uint64_t scale_num, uint64_t scale_den,
- uint32_t *sample_index, uint32_t flags);
-
- status_t findSyncSampleNear(
- uint32_t start_sample_index, uint32_t *sample_index,
- uint32_t flags);
-
- status_t findThumbnailSample(uint32_t *sample_index);
-
-protected:
- ~SampleTable();
-
-private:
- struct CompositionDeltaLookup;
-
- static const uint32_t kChunkOffsetType32;
- static const uint32_t kChunkOffsetType64;
- static const uint32_t kSampleSizeType32;
- static const uint32_t kSampleSizeTypeCompact;
-
- // Limit the total size of all internal tables to 200MiB.
- static const size_t kMaxTotalSize = 200 * (1 << 20);
-
- sp<DataSource> mDataSource;
- Mutex mLock;
-
- off64_t mChunkOffsetOffset;
- uint32_t mChunkOffsetType;
- uint32_t mNumChunkOffsets;
-
- off64_t mSampleToChunkOffset;
- uint32_t mNumSampleToChunkOffsets;
-
- off64_t mSampleSizeOffset;
- uint32_t mSampleSizeFieldSize;
- uint32_t mDefaultSampleSize;
- uint32_t mNumSampleSizes;
-
- bool mHasTimeToSample;
- uint32_t mTimeToSampleCount;
- uint32_t* mTimeToSample;
-
- struct SampleTimeEntry {
- uint32_t mSampleIndex;
- uint32_t mCompositionTime;
- };
- SampleTimeEntry *mSampleTimeEntries;
-
- int32_t *mCompositionTimeDeltaEntries;
- size_t mNumCompositionTimeDeltaEntries;
- CompositionDeltaLookup *mCompositionDeltaLookup;
-
- off64_t mSyncSampleOffset;
- uint32_t mNumSyncSamples;
- uint32_t *mSyncSamples;
- size_t mLastSyncSampleIndex;
-
- SampleIterator *mSampleIterator;
-
- struct SampleToChunkEntry {
- uint32_t startChunk;
- uint32_t samplesPerChunk;
- uint32_t chunkDesc;
- };
- SampleToChunkEntry *mSampleToChunkEntries;
-
- // Approximate size of all tables combined.
- uint64_t mTotalSize;
-
- friend struct SampleIterator;
-
- // normally we don't round
- inline uint64_t getSampleTime(
- size_t sample_index, uint64_t scale_num, uint64_t scale_den) const {
- return (sample_index < (size_t)mNumSampleSizes && mSampleTimeEntries != NULL
- && scale_den != 0)
- ? (mSampleTimeEntries[sample_index].mCompositionTime * scale_num) / scale_den : 0;
- }
-
- status_t getSampleSize_l(uint32_t sample_index, size_t *sample_size);
- int32_t getCompositionTimeOffset(uint32_t sampleIndex);
-
- static int CompareIncreasingTime(const void *, const void *);
-
- void buildSampleEntriesTable();
-
- SampleTable(const SampleTable &);
- SampleTable &operator=(const SampleTable &);
-};
-
-} // namespace android
-
-#endif // SAMPLE_TABLE_H_
diff --git a/media/libstagefright/include/SoftwareRenderer.h b/media/libstagefright/include/SoftwareRenderer.h
index 258511a..c286516 100644
--- a/media/libstagefright/include/SoftwareRenderer.h
+++ b/media/libstagefright/include/SoftwareRenderer.h
@@ -22,6 +22,7 @@
#include <media/stagefright/FrameRenderTracker.h>
#include <utils/RefBase.h>
#include <system/window.h>
+#include <media/hardware/VideoAPI.h>
#include <list>
@@ -38,7 +39,7 @@
std::list<FrameRenderTracker::Info> render(
const void *data, size_t size, int64_t mediaTimeUs, nsecs_t renderTimeNs,
- void *platformPrivate, const sp<AMessage> &format);
+ size_t numOutputBuffers, const sp<AMessage> &format);
void clearTracker();
private:
@@ -55,12 +56,14 @@
int32_t mCropWidth, mCropHeight;
int32_t mRotationDegrees;
android_dataspace mDataSpace;
+ HDRStaticInfo mHDRStaticInfo;
FrameRenderTracker mRenderTracker;
+ void resetFormatIfChanged(
+ const sp<AMessage> &format, size_t numOutputBuffers);
+
SoftwareRenderer(const SoftwareRenderer &);
SoftwareRenderer &operator=(const SoftwareRenderer &);
-
- void resetFormatIfChanged(const sp<AMessage> &format);
};
} // namespace android
diff --git a/media/libstagefright/include/StagefrightMetadataRetriever.h b/media/libstagefright/include/StagefrightMetadataRetriever.h
index 277eb3e..f78e125 100644
--- a/media/libstagefright/include/StagefrightMetadataRetriever.h
+++ b/media/libstagefright/include/StagefrightMetadataRetriever.h
@@ -27,8 +27,10 @@
class DataSource;
class MediaExtractor;
+struct ImageDecoder;
+struct FrameRect;
-struct StagefrightMetadataRetriever : public MediaMetadataRetrieverInterface {
+struct StagefrightMetadataRetriever : public MediaMetadataRetrieverBase {
StagefrightMetadataRetriever();
virtual ~StagefrightMetadataRetriever();
@@ -40,7 +42,16 @@
virtual status_t setDataSource(int fd, int64_t offset, int64_t length);
virtual status_t setDataSource(const sp<DataSource>& source, const char *mime);
- virtual VideoFrame *getFrameAtTime(int64_t timeUs, int option, int colorFormat, bool metaOnly);
+ virtual sp<IMemory> getFrameAtTime(
+ int64_t timeUs, int option, int colorFormat, bool metaOnly);
+ virtual sp<IMemory> getImageAtIndex(
+ int index, int colorFormat, bool metaOnly, bool thumbnail);
+ virtual sp<IMemory> getImageRectAtIndex(
+ int index, int colorFormat, int left, int top, int right, int bottom);
+ virtual status_t getFrameAtIndex(
+ std::vector<sp<IMemory> >* frames,
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly);
+
virtual MediaAlbumArt *extractAlbumArt();
virtual const char *extractMetadata(int keyCode);
@@ -52,10 +63,18 @@
KeyedVector<int, String8> mMetaData;
MediaAlbumArt *mAlbumArt;
+ sp<ImageDecoder> mImageDecoder;
+ int mLastImageIndex;
void parseMetaData();
// Delete album art and clear metadata.
void clearMetadata();
+ status_t getFrameInternal(
+ int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
+ sp<IMemory>* outFrame, std::vector<sp<IMemory> >* outFrames);
+ virtual sp<IMemory> getImageInternal(
+ int index, int colorFormat, bool metaOnly, bool thumbnail, FrameRect* rect);
+
StagefrightMetadataRetriever(const StagefrightMetadataRetriever &);
StagefrightMetadataRetriever &operator=(
diff --git a/media/libstagefright/include/ThrottledSource.h b/media/libstagefright/include/ThrottledSource.h
index 673268b..71e62f7 100644
--- a/media/libstagefright/include/ThrottledSource.h
+++ b/media/libstagefright/include/ThrottledSource.h
@@ -18,7 +18,7 @@
#define THROTTLED_SOURCE_H_
-#include <media/stagefright/DataSource.h>
+#include <media/DataSource.h>
#include <utils/threads.h>
namespace android {
@@ -58,10 +58,6 @@
return mSource->DrmInitialization(mime);
}
- virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client) {
- mSource->getDrmInfo(handle, client);
- };
-
virtual String8 getMIMEType() const {
return mSource->getMIMEType();
}
diff --git a/media/libstagefright/include/VBRISeeker.h b/media/libstagefright/include/VBRISeeker.h
deleted file mode 100644
index c57d571..0000000
--- a/media/libstagefright/include/VBRISeeker.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef VBRI_SEEKER_H_
-
-#define VBRI_SEEKER_H_
-
-#include "include/MP3Seeker.h"
-
-#include <utils/Vector.h>
-
-namespace android {
-
-class DataSource;
-
-struct VBRISeeker : public MP3Seeker {
- static sp<VBRISeeker> CreateFromSource(
- const sp<DataSource> &source, off64_t post_id3_pos);
-
- virtual bool getDuration(int64_t *durationUs);
- virtual bool getOffsetForTime(int64_t *timeUs, off64_t *pos);
-
-private:
- off64_t mBasePos;
- int64_t mDurationUs;
- Vector<uint32_t> mSegments;
-
- VBRISeeker();
-
- DISALLOW_EVIL_CONSTRUCTORS(VBRISeeker);
-};
-
-} // namespace android
-
-#endif // VBRI_SEEKER_H_
-
-
diff --git a/media/libstagefright/include/WAVExtractor.h b/media/libstagefright/include/WAVExtractor.h
deleted file mode 100644
index 12ad441..0000000
--- a/media/libstagefright/include/WAVExtractor.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef WAV_EXTRACTOR_H_
-
-#define WAV_EXTRACTOR_H_
-
-#include <utils/Errors.h>
-#include <media/stagefright/MediaExtractor.h>
-
-namespace android {
-
-struct AMessage;
-class DataSource;
-class String8;
-
-class WAVExtractor : public MediaExtractor {
-public:
- // Extractor assumes ownership of "source".
- explicit WAVExtractor(const sp<DataSource> &source);
-
- virtual size_t countTracks();
- virtual sp<IMediaSource> getTrack(size_t index);
- virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
-
- virtual sp<MetaData> getMetaData();
- virtual const char * name() { return "WAVExtractor"; }
-
-protected:
- virtual ~WAVExtractor();
-
-private:
- sp<DataSource> mDataSource;
- status_t mInitCheck;
- bool mValidFormat;
- uint16_t mWaveFormat;
- uint16_t mNumChannels;
- uint32_t mChannelMask;
- uint32_t mSampleRate;
- uint16_t mBitsPerSample;
- off64_t mDataOffset;
- size_t mDataSize;
- sp<MetaData> mTrackMeta;
-
- status_t init();
-
- WAVExtractor(const WAVExtractor &);
- WAVExtractor &operator=(const WAVExtractor &);
-};
-
-bool SniffWAV(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *);
-
-} // namespace android
-
-#endif // WAV_EXTRACTOR_H_
-
diff --git a/media/libstagefright/include/XINGSeeker.h b/media/libstagefright/include/XINGSeeker.h
deleted file mode 100644
index cce04f0..0000000
--- a/media/libstagefright/include/XINGSeeker.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef XING_SEEKER_H_
-
-#define XING_SEEKER_H_
-
-#include "include/MP3Seeker.h"
-
-namespace android {
-
-class DataSource;
-
-struct XINGSeeker : public MP3Seeker {
- static sp<XINGSeeker> CreateFromSource(
- const sp<DataSource> &source, off64_t first_frame_pos);
-
- virtual bool getDuration(int64_t *durationUs);
- virtual bool getOffsetForTime(int64_t *timeUs, off64_t *pos);
-
- virtual int32_t getEncoderDelay();
- virtual int32_t getEncoderPadding();
-
-private:
- int64_t mFirstFramePos;
- int64_t mDurationUs;
- int32_t mSizeBytes;
- int32_t mEncoderDelay;
- int32_t mEncoderPadding;
-
- // TOC entries in XING header. Skip the first one since it's always 0.
- unsigned char mTOC[99];
- bool mTOCValid;
-
- XINGSeeker();
-
- DISALLOW_EVIL_CONSTRUCTORS(XINGSeeker);
-};
-
-} // namespace android
-
-#endif // XING_SEEKER_H_
-
diff --git a/media/libstagefright/include/avc_utils.h b/media/libstagefright/include/avc_utils.h
deleted file mode 100644
index d05906a..0000000
--- a/media/libstagefright/include/avc_utils.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AVC_UTILS_H_
-
-#define AVC_UTILS_H_
-
-#include <media/MediaCodecBuffer.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <utils/Errors.h>
-
-namespace android {
-
-class ABitReader;
-
-enum {
- kAVCProfileBaseline = 0x42,
- kAVCProfileMain = 0x4d,
- kAVCProfileExtended = 0x58,
- kAVCProfileHigh = 0x64,
- kAVCProfileHigh10 = 0x6e,
- kAVCProfileHigh422 = 0x7a,
- kAVCProfileHigh444 = 0xf4,
- kAVCProfileCAVLC444Intra = 0x2c
-};
-
-struct NALPosition {
- uint32_t nalOffset;
- uint32_t nalSize;
-};
-
-// Optionally returns sample aspect ratio as well.
-void FindAVCDimensions(
- const sp<ABuffer> &seqParamSet,
- int32_t *width, int32_t *height,
- int32_t *sarWidth = NULL, int32_t *sarHeight = NULL);
-
-// Gets and returns an unsigned exp-golomb (ue) value from a bit reader |br|. Aborts if the value
-// is more than 64 bits long (>=0xFFFF (!)) or the bit reader overflows.
-unsigned parseUE(ABitReader *br);
-
-// Gets and returns a signed exp-golomb (se) value from a bit reader |br|. Aborts if the value is
-// more than 64 bits long (>0x7FFF || <-0x7FFF (!)) or the bit reader overflows.
-signed parseSE(ABitReader *br);
-
-// Gets an unsigned exp-golomb (ue) value from a bit reader |br|, and returns it if it was
-// successful. Returns |fallback| if it was unsuccessful. Note: if the value was longer that 64
-// bits, it reads past the value and still returns |fallback|.
-unsigned parseUEWithFallback(ABitReader *br, unsigned fallback);
-
-// Gets a signed exp-golomb (se) value from a bit reader |br|, and returns it if it was successful.
-// Returns |fallback| if it was unsuccessful. Note: if the value was longer that 64 bits, it reads
-// past the value and still returns |fallback|.
-signed parseSEWithFallback(ABitReader *br, signed fallback);
-
-// Skips an unsigned exp-golomb (ue) value from bit reader |br|.
-inline void skipUE(ABitReader *br) {
- (void)parseUEWithFallback(br, 0U);
-}
-
-// Skips a signed exp-golomb (se) value from bit reader |br|.
-inline void skipSE(ABitReader *br) {
- (void)parseSEWithFallback(br, 0);
-}
-
-status_t getNextNALUnit(
- const uint8_t **_data, size_t *_size,
- const uint8_t **nalStart, size_t *nalSize,
- bool startCodeFollows = false);
-
-class MetaData;
-sp<MetaData> MakeAVCCodecSpecificData(const sp<ABuffer> &accessUnit);
-
-bool IsIDR(const sp<ABuffer> &accessUnit);
-bool IsIDR(const sp<MediaCodecBuffer> &accessUnit);
-bool IsAVCReferenceFrame(const sp<ABuffer> &accessUnit);
-uint32_t FindAVCLayerId(const uint8_t *data, size_t size);
-
-const char *AVCProfileToString(uint8_t profile);
-
-sp<MetaData> MakeAACCodecSpecificData(
- unsigned profile, unsigned sampling_freq_index,
- unsigned channel_configuration);
-
-// Given an MPEG4 video VOL-header chunk (starting with 0x00 0x00 0x01 0x2?)
-// parse it and fill in dimensions, returns true iff successful.
-bool ExtractDimensionsFromVOLHeader(
- const uint8_t *data, size_t size, int32_t *width, int32_t *height);
-
-bool GetMPEGAudioFrameSize(
- uint32_t header, size_t *frame_size,
- int *out_sampling_rate = NULL, int *out_channels = NULL,
- int *out_bitrate = NULL, int *out_num_samples = NULL);
-
-} // namespace android
-
-#endif // AVC_UTILS_H_
diff --git a/media/libstagefright/include/media/stagefright/AACWriter.h b/media/libstagefright/include/media/stagefright/AACWriter.h
index a1f63d7..7c63ddd 100644
--- a/media/libstagefright/include/media/stagefright/AACWriter.h
+++ b/media/libstagefright/include/media/stagefright/AACWriter.h
@@ -24,14 +24,13 @@
namespace android {
struct MediaSource;
-class MetaData;
struct AACWriter : public MediaWriter {
AACWriter(int fd);
status_t initCheck() const;
- virtual status_t addSource(const sp<IMediaSource> &source);
+ virtual status_t addSource(const sp<MediaSource> &source);
virtual bool reachedEOS();
virtual status_t start(MetaData *params = NULL);
virtual status_t stop() { return reset(); }
@@ -48,7 +47,7 @@
int mFd;
status_t mInitCheck;
- sp<IMediaSource> mSource;
+ sp<MediaSource> mSource;
bool mStarted;
volatile bool mPaused;
volatile bool mResumed;
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 424246d..97d15a7 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -95,11 +95,6 @@
static status_t getOMXChannelMapping(size_t numChannels, OMX_AUDIO_CHANNELTYPE map[]);
- // Save the flag.
- void setTrebleFlag(bool trebleFlag);
- // Return the saved flag.
- bool getTrebleFlag() const;
-
protected:
virtual ~ACodec();
@@ -233,15 +228,14 @@
sp<IOMX> mOMX;
sp<IOMXNode> mOMXNode;
int32_t mNodeGeneration;
- bool mTrebleFlag;
sp<TAllocator> mAllocator[2];
- sp<MemoryDealer> mDealer[2];
bool mUsingNativeWindow;
sp<ANativeWindow> mNativeWindow;
int mNativeWindowUsageBits;
android_native_rect_t mLastNativeWindowCrop;
int32_t mLastNativeWindowDataSpace;
+ HDRStaticInfo mLastHDRStaticInfo;
sp<AMessage> mConfigFormat;
sp<AMessage> mInputFormat;
sp<AMessage> mOutputFormat;
@@ -259,6 +253,7 @@
sp<AMessage> mLastOutputFormat;
bool mIsVideo;
+ bool mIsImage;
bool mIsEncoder;
bool mFatalError;
bool mShutdownInProgress;
@@ -451,6 +446,7 @@
int32_t heavyCompression;
int32_t targetRefLevel;
int32_t encodedTargetLevel;
+ int32_t effectType;
} drcParams_t;
status_t setupAACCodec(
@@ -495,13 +491,15 @@
status_t setupMPEG4EncoderParameters(const sp<AMessage> &msg);
status_t setupH263EncoderParameters(const sp<AMessage> &msg);
status_t setupAVCEncoderParameters(const sp<AMessage> &msg);
- status_t setupHEVCEncoderParameters(const sp<AMessage> &msg);
+ status_t setupHEVCEncoderParameters(const sp<AMessage> &msg, sp<AMessage> &outputFormat);
status_t setupVPXEncoderParameters(const sp<AMessage> &msg, sp<AMessage> &outputFormat);
- status_t verifySupportForProfileAndLevel(int32_t profile, int32_t level);
+ status_t verifySupportForProfileAndLevel(
+ OMX_U32 portIndex, int32_t profile, int32_t level);
+ status_t configureImageGrid(const sp<AMessage> &msg, sp<AMessage> &outputFormat);
status_t configureBitrate(
- int32_t bitrate, OMX_VIDEO_CONTROLRATETYPE bitrateMode);
+ OMX_VIDEO_CONTROLRATETYPE bitrateMode, int32_t bitrate, int32_t quality = 0);
void configureEncoderLatency(const sp<AMessage> &msg);
status_t setupErrorCorrectionParameters();
diff --git a/media/libstagefright/include/media/stagefright/AMRWriter.h b/media/libstagefright/include/media/stagefright/AMRWriter.h
index fbbdf2e..2ea2f78 100644
--- a/media/libstagefright/include/media/stagefright/AMRWriter.h
+++ b/media/libstagefright/include/media/stagefright/AMRWriter.h
@@ -20,20 +20,17 @@
#include <stdio.h>
-#include <media/IMediaSource.h>
#include <media/stagefright/MediaWriter.h>
#include <utils/threads.h>
namespace android {
-class MetaData;
-
struct AMRWriter : public MediaWriter {
AMRWriter(int fd);
status_t initCheck() const;
- virtual status_t addSource(const sp<IMediaSource> &source);
+ virtual status_t addSource(const sp<MediaSource> &source);
virtual bool reachedEOS();
virtual status_t start(MetaData *params = NULL);
virtual status_t stop() { return reset(); }
@@ -45,7 +42,7 @@
private:
int mFd;
status_t mInitCheck;
- sp<IMediaSource> mSource;
+ sp<MediaSource> mSource;
bool mStarted;
volatile bool mPaused;
volatile bool mResumed;
diff --git a/media/libstagefright/include/media/stagefright/AudioPlayer.h b/media/libstagefright/include/media/stagefright/AudioPlayer.h
index f7499b6..7c2c36f 100644
--- a/media/libstagefright/include/media/stagefright/AudioPlayer.h
+++ b/media/libstagefright/include/media/stagefright/AudioPlayer.h
@@ -18,7 +18,7 @@
#define AUDIO_PLAYER_H_
-#include <media/IMediaSource.h>
+#include <media/MediaSource.h>
#include <media/MediaPlayerInterface.h>
#include <media/stagefright/MediaBuffer.h>
#include <utils/threads.h>
@@ -50,7 +50,7 @@
virtual ~AudioPlayer();
// Caller retains ownership of "source".
- void setSource(const sp<IMediaSource> &source);
+ void setSource(const sp<MediaSource> &source);
status_t start(bool sourceAlreadyStarted = false);
@@ -66,10 +66,10 @@
status_t getPlaybackRate(AudioPlaybackRate *rate /* nonnull */);
private:
- sp<IMediaSource> mSource;
+ sp<MediaSource> mSource;
sp<AudioTrack> mAudioTrack;
- MediaBuffer *mInputBuffer;
+ MediaBufferBase *mInputBuffer;
int mSampleRate;
int64_t mLatencyUs;
@@ -91,7 +91,7 @@
bool mIsFirstBuffer;
status_t mFirstBufferResult;
- MediaBuffer *mFirstBuffer;
+ MediaBufferBase *mFirstBuffer;
sp<MediaPlayerBase::AudioSink> mAudioSink;
diff --git a/media/libstagefright/include/media/stagefright/AudioSource.h b/media/libstagefright/include/media/stagefright/AudioSource.h
index 1595be4..206d322 100644
--- a/media/libstagefright/include/media/stagefright/AudioSource.h
+++ b/media/libstagefright/include/media/stagefright/AudioSource.h
@@ -20,12 +20,15 @@
#include <media/AudioRecord.h>
#include <media/AudioSystem.h>
-#include <media/stagefright/MediaSource.h>
+#include <media/MediaSource.h>
+#include <media/MicrophoneInfo.h>
#include <media/stagefright/MediaBuffer.h>
#include <utils/List.h>
#include <system/audio.h>
+#include <vector>
+
namespace android {
class AudioRecord;
@@ -40,7 +43,8 @@
uint32_t channels,
uint32_t outSampleRate = 0,
uid_t uid = -1,
- pid_t pid = -1);
+ pid_t pid = -1,
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
status_t initCheck() const;
@@ -52,11 +56,19 @@
int16_t getMaxAmplitude();
virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL);
+ MediaBufferBase **buffer, const ReadOptions *options = NULL);
virtual status_t setStopTimeUs(int64_t stopTimeUs);
status_t dataCallback(const AudioRecord::Buffer& buffer);
- virtual void signalBufferReturned(MediaBuffer *buffer);
+ virtual void signalBufferReturned(MediaBufferBase *buffer);
+
+ status_t setInputDevice(audio_port_handle_t deviceId);
+ status_t getRoutedDeviceId(audio_port_handle_t* deviceId);
+ status_t addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
+ status_t removeAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
+
+ status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
+
protected:
virtual ~AudioSource();
diff --git a/media/libstagefright/include/media/stagefright/CallbackMediaSource.h b/media/libstagefright/include/media/stagefright/CallbackMediaSource.h
new file mode 100644
index 0000000..33453fa
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/CallbackMediaSource.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CALLBACK_MEDIA_SOURCE_H_
+#define CALLBACK_MEDIA_SOURCE_H_
+
+#include <media/MediaSource.h>
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+class IMediaSource;
+
+// A stagefright MediaSource that wraps a binder IMediaSource.
+class CallbackMediaSource : public MediaSource {
+public:
+ explicit CallbackMediaSource(const sp<IMediaSource> &source);
+ virtual ~CallbackMediaSource();
+ virtual status_t start(MetaData *params = NULL);
+ virtual status_t stop();
+ virtual sp<MetaData> getFormat();
+ virtual status_t read(
+ MediaBufferBase **buffer, const ReadOptions *options = NULL);
+ virtual status_t pause();
+
+private:
+ sp<IMediaSource> mSource;
+
+ DISALLOW_EVIL_CONSTRUCTORS(CallbackMediaSource);
+};
+
+} // namespace android
+
+#endif // CALLBACK_MEDIA_SOURCE_H_
diff --git a/media/libstagefright/include/media/stagefright/CameraSource.h b/media/libstagefright/include/media/stagefright/CameraSource.h
index d6149c0..475976b 100644
--- a/media/libstagefright/include/media/stagefright/CameraSource.h
+++ b/media/libstagefright/include/media/stagefright/CameraSource.h
@@ -19,8 +19,8 @@
#define CAMERA_SOURCE_H_
#include <deque>
+#include <media/MediaSource.h>
#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaSource.h>
#include <camera/android/hardware/ICamera.h>
#include <camera/ICameraRecordingProxy.h>
#include <camera/ICameraRecordingProxyListener.h>
@@ -97,7 +97,7 @@
virtual status_t start(MetaData *params = NULL);
virtual status_t stop() { return reset(); }
virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL);
+ MediaBufferBase **buffer, const ReadOptions *options = NULL);
virtual status_t setStopTimeUs(int64_t stopTimeUs);
/**
@@ -127,7 +127,7 @@
*/
MetadataBufferType metaDataStoredInVideoBuffers() const;
- virtual void signalBufferReturned(MediaBuffer* buffer);
+ virtual void signalBufferReturned(MediaBufferBase* buffer);
protected:
diff --git a/media/libstagefright/include/media/stagefright/CameraSourceTimeLapse.h b/media/libstagefright/include/media/stagefright/CameraSourceTimeLapse.h
index b066f9a..533e33b 100644
--- a/media/libstagefright/include/media/stagefright/CameraSourceTimeLapse.h
+++ b/media/libstagefright/include/media/stagefright/CameraSourceTimeLapse.h
@@ -107,7 +107,7 @@
// Stores a copy of the MediaBuffer read in the last read() call after
// mQuickStop was true.
- MediaBuffer* mLastReadBufferCopy;
+ MediaBufferBase* mLastReadBufferCopy;
// Status code for last read.
status_t mLastReadStatus;
@@ -128,10 +128,10 @@
// Wrapper over CameraSource::signalBufferReturned() to implement quick stop.
// It only handles the case when mLastReadBufferCopy is signalled. Otherwise
// it calls the base class' function.
- virtual void signalBufferReturned(MediaBuffer* buffer);
+ virtual void signalBufferReturned(MediaBufferBase* buffer);
// Wrapper over CameraSource::read() to implement quick stop.
- virtual status_t read(MediaBuffer **buffer, const ReadOptions *options = NULL);
+ virtual status_t read(MediaBufferBase **buffer, const ReadOptions *options = NULL);
// mSkipCurrentFrame is set to true in dataCallbackTimestamp() if the current
// frame needs to be skipped and this function just returns the value of mSkipCurrentFrame.
@@ -170,7 +170,7 @@
// Convenience function to fill mLastReadBufferCopy from the just read
// buffer.
- void fillLastReadBufferCopy(MediaBuffer& sourceBuffer);
+ void fillLastReadBufferCopy(MediaBufferBase& sourceBuffer);
// If the passed in size (width x height) is a supported video/preview size,
// the function sets the camera's video/preview size to it and returns true.
diff --git a/media/libstagefright/include/media/stagefright/CodecBase.h b/media/libstagefright/include/media/stagefright/CodecBase.h
index 9197f7b..ad60f46 100644
--- a/media/libstagefright/include/media/stagefright/CodecBase.h
+++ b/media/libstagefright/include/media/stagefright/CodecBase.h
@@ -18,6 +18,7 @@
#define CODEC_BASE_H_
+#include <list>
#include <memory>
#include <stdint.h>
@@ -26,7 +27,6 @@
#include <media/hardware/CryptoAPI.h>
#include <media/hardware/HardwareAPI.h>
-#include <media/IOMX.h>
#include <media/MediaCodecInfo.h>
#include <media/stagefright/foundation/AHandler.h>
#include <media/stagefright/foundation/ColorUtils.h>
@@ -222,9 +222,8 @@
virtual void signalSetParameters(const sp<AMessage> &msg) = 0;
virtual void signalEndOfInputStream() = 0;
- /*
- * Codec-related defines
- */
+ typedef CodecBase *(*CreateCodecFunc)(void);
+ typedef PersistentSurface *(*CreateInputSurfaceFunc)(void);
protected:
CodecBase() = default;
@@ -324,4 +323,3 @@
} // namespace android
#endif // CODEC_BASE_H_
-
diff --git a/media/libstagefright/include/media/stagefright/ColorConverter.h b/media/libstagefright/include/media/stagefright/ColorConverter.h
index 7ac9b37..5b3543d 100644
--- a/media/libstagefright/include/media/stagefright/ColorConverter.h
+++ b/media/libstagefright/include/media/stagefright/ColorConverter.h
@@ -33,6 +33,8 @@
bool isValid() const;
+ bool isDstRGB() const;
+
status_t convert(
const void *srcBits,
size_t srcWidth, size_t srcHeight,
@@ -76,6 +78,15 @@
status_t convertYUV420PlanarUseLibYUV(
const BitmapParams &src, const BitmapParams &dst);
+ status_t convertYUV420Planar16(
+ const BitmapParams &src, const BitmapParams &dst);
+
+ status_t convertYUV420Planar16ToY410(
+ const BitmapParams &src, const BitmapParams &dst);
+
+ status_t convertYUV420Planar16ToRGB(
+ const BitmapParams &src, const BitmapParams &dst);
+
status_t convertQCOMYUV420SemiPlanar(
const BitmapParams &src, const BitmapParams &dst);
@@ -85,10 +96,6 @@
status_t convertTIYUV420PackedSemiPlanar(
const BitmapParams &src, const BitmapParams &dst);
- void writeToDst(void *dst_ptr, uint8_t *kAdjustedClip, bool uncropped,
- signed r1, signed g1, signed b1,
- signed r2, signed g2, signed b2);
-
ColorConverter(const ColorConverter &);
ColorConverter &operator=(const ColorConverter &);
};
diff --git a/media/libstagefright/include/media/stagefright/DataSource.h b/media/libstagefright/include/media/stagefright/DataSource.h
deleted file mode 100644
index bd863ba..0000000
--- a/media/libstagefright/include/media/stagefright/DataSource.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef DATA_SOURCE_H_
-
-#define DATA_SOURCE_H_
-
-#include <sys/types.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/MediaErrors.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/List.h>
-#include <utils/RefBase.h>
-#include <utils/threads.h>
-#include <drm/DrmManagerClient.h>
-
-namespace android {
-
-struct AMessage;
-struct AString;
-class IDataSource;
-struct IMediaHTTPService;
-class String8;
-struct HTTPBase;
-
-class DataSource : public RefBase {
-public:
- enum Flags {
- kWantsPrefetching = 1,
- kStreamedFromLocalHost = 2,
- kIsCachingDataSource = 4,
- kIsHTTPBasedSource = 8,
- kIsLocalFileSource = 16,
- };
-
- static sp<DataSource> CreateFromURI(
- const sp<IMediaHTTPService> &httpService,
- const char *uri,
- const KeyedVector<String8, String8> *headers = NULL,
- String8 *contentType = NULL,
- HTTPBase *httpSource = NULL);
-
- static sp<DataSource> CreateMediaHTTP(const sp<IMediaHTTPService> &httpService);
- static sp<DataSource> CreateFromIDataSource(const sp<IDataSource> &source);
- static sp<DataSource> CreateFromFd(int fd, int64_t offset, int64_t length);
-
- DataSource() {}
-
- virtual status_t initCheck() const = 0;
-
- // Returns the number of bytes read, or -1 on failure. It's not an error if
- // this returns zero; it just means the given offset is equal to, or
- // beyond, the end of the source.
- virtual ssize_t readAt(off64_t offset, void *data, size_t size) = 0;
-
- // Convenience methods:
- bool getUInt16(off64_t offset, uint16_t *x);
- bool getUInt24(off64_t offset, uint32_t *x); // 3 byte int, returned as a 32-bit int
- bool getUInt32(off64_t offset, uint32_t *x);
- bool getUInt64(off64_t offset, uint64_t *x);
-
- // read either int<N> or int<2N> into a uint<2N>_t, size is the int size in bytes.
- bool getUInt16Var(off64_t offset, uint16_t *x, size_t size);
- bool getUInt32Var(off64_t offset, uint32_t *x, size_t size);
- bool getUInt64Var(off64_t offset, uint64_t *x, size_t size);
-
- // Reads in "count" entries of type T into vector *x.
- // Returns true if "count" entries can be read.
- // If fewer than "count" entries can be read, return false. In this case,
- // the output vector *x will still have those entries that were read. Call
- // x->size() to obtain the number of entries read.
- // The optional parameter chunkSize specifies how many entries should be
- // read from the data source at one time into a temporary buffer. Increasing
- // chunkSize can improve the performance at the cost of extra memory usage.
- // The default value for chunkSize is set to read at least 4k bytes at a
- // time, depending on sizeof(T).
- template <typename T>
- bool getVector(off64_t offset, Vector<T>* x, size_t count,
- size_t chunkSize = (4095 / sizeof(T)) + 1);
-
- // May return ERROR_UNSUPPORTED.
- virtual status_t getSize(off64_t *size);
-
- virtual uint32_t flags() {
- return 0;
- }
-
- virtual String8 toString() {
- return String8("<unspecified>");
- }
-
- virtual status_t reconnectAtOffset(off64_t /*offset*/) {
- return ERROR_UNSUPPORTED;
- }
-
- ////////////////////////////////////////////////////////////////////////////
-
- // for DRM
- virtual sp<DecryptHandle> DrmInitialization(const char * /*mime*/ = NULL) {
- return NULL;
- }
- virtual void getDrmInfo(sp<DecryptHandle> &/*handle*/, DrmManagerClient ** /*client*/) {};
-
- virtual String8 getUri() {
- return String8();
- }
-
- virtual String8 getMIMEType() const;
-
- virtual void close() {};
-
- // creates an IDataSource wrapper to the DataSource.
- virtual sp<IDataSource> asIDataSource();
-
- // returns a pointer to IDataSource if it is wrapped.
- virtual sp<IDataSource> getIDataSource() const;
-
-protected:
- virtual ~DataSource() {}
-
-private:
- DataSource(const DataSource &);
- DataSource &operator=(const DataSource &);
-};
-
-template <typename T>
-bool DataSource::getVector(off64_t offset, Vector<T>* x, size_t count,
- size_t chunkSize)
-{
- x->clear();
- if (chunkSize == 0) {
- return false;
- }
- if (count == 0) {
- return true;
- }
-
- T tmp[chunkSize];
- ssize_t numBytesRead;
- size_t numBytesPerChunk = chunkSize * sizeof(T);
- size_t i;
-
- for (i = 0; i + chunkSize < count; i += chunkSize) {
- // This loops is executed when more than chunkSize records need to be
- // read.
- numBytesRead = this->readAt(offset, (void*)&tmp, numBytesPerChunk);
- if (numBytesRead == -1) { // If readAt() returns -1, there is an error.
- return false;
- }
- if (static_cast<size_t>(numBytesRead) < numBytesPerChunk) {
- // This case is triggered when the stream ends before the whole
- // chunk is read.
- x->appendArray(tmp, (size_t)numBytesRead / sizeof(T));
- return false;
- }
- x->appendArray(tmp, chunkSize);
- offset += numBytesPerChunk;
- }
-
- // There are (count - i) more records to read.
- // Right now, (count - i) <= chunkSize.
- // We do the same thing as above, but with chunkSize replaced by count - i.
- numBytesRead = this->readAt(offset, (void*)&tmp, (count - i) * sizeof(T));
- if (numBytesRead == -1) {
- return false;
- }
- x->appendArray(tmp, (size_t)numBytesRead / sizeof(T));
- return x->size() == count;
-}
-
-} // namespace android
-
-#endif // DATA_SOURCE_H_
diff --git a/media/libstagefright/include/media/stagefright/DataSourceFactory.h b/media/libstagefright/include/media/stagefright/DataSourceFactory.h
new file mode 100644
index 0000000..2a1d491
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/DataSourceFactory.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DATA_SOURCE_FACTORY_H_
+
+#define DATA_SOURCE_FACTORY_H_
+
+#include <sys/types.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+struct MediaHTTPService;
+class String8;
+struct HTTPBase;
+
+class DataSourceFactory {
+public:
+ static sp<DataSource> CreateFromURI(
+ const sp<MediaHTTPService> &httpService,
+ const char *uri,
+ const KeyedVector<String8, String8> *headers = NULL,
+ String8 *contentType = NULL,
+ HTTPBase *httpSource = NULL);
+
+ static sp<DataSource> CreateMediaHTTP(const sp<MediaHTTPService> &httpService);
+ static sp<DataSource> CreateFromFd(int fd, int64_t offset, int64_t length);
+};
+
+} // namespace android
+
+#endif // DATA_SOURCE_FACTORY_H_
diff --git a/media/libstagefright/include/media/stagefright/DataURISource.h b/media/libstagefright/include/media/stagefright/DataURISource.h
index 693562e..cf8d68e 100644
--- a/media/libstagefright/include/media/stagefright/DataURISource.h
+++ b/media/libstagefright/include/media/stagefright/DataURISource.h
@@ -18,7 +18,7 @@
#define DATA_URI_SOURCE_H_
-#include <media/stagefright/DataSource.h>
+#include <media/DataSource.h>
#include <media/stagefright/foundation/ABase.h>
namespace android {
diff --git a/media/libstagefright/include/media/stagefright/FileSource.h b/media/libstagefright/include/media/stagefright/FileSource.h
index 7267e9a..8604890 100644
--- a/media/libstagefright/include/media/stagefright/FileSource.h
+++ b/media/libstagefright/include/media/stagefright/FileSource.h
@@ -20,7 +20,7 @@
#include <stdio.h>
-#include <media/stagefright/DataSource.h>
+#include <media/DataSource.h>
#include <media/stagefright/MediaErrors.h>
#include <utils/threads.h>
#include <drm/DrmManagerClient.h>
@@ -45,8 +45,6 @@
virtual sp<DecryptHandle> DrmInitialization(const char *mime);
- virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client);
-
virtual String8 toString() {
return mName;
}
diff --git a/media/libstagefright/include/media/stagefright/InterfaceUtils.h b/media/libstagefright/include/media/stagefright/InterfaceUtils.h
new file mode 100644
index 0000000..f0ebd48
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/InterfaceUtils.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INTERFACE_UTILS_H_
+#define INTERFACE_UTILS_H_
+
+#include <utils/RefBase.h>
+#include <media/MediaExtractor.h>
+#include <media/stagefright/RemoteMediaExtractor.h>
+#include <media/MediaSource.h>
+#include <media/IMediaExtractor.h>
+#include <media/IMediaSource.h>
+
+namespace android {
+
+class DataSource;
+class MediaExtractor;
+struct MediaSource;
+class IDataSource;
+class IMediaExtractor;
+class IMediaSource;
+
+// Creates a DataSource which wraps the given IDataSource object.
+sp<DataSource> CreateDataSourceFromIDataSource(const sp<IDataSource> &source);
+
+// creates an IDataSource wrapper to the DataSource.
+sp<IDataSource> CreateIDataSourceFromDataSource(const sp<DataSource> &source);
+
+// Creates an IMediaExtractor wrapper to the given MediaExtractor.
+sp<IMediaExtractor> CreateIMediaExtractorFromMediaExtractor(
+ MediaExtractor *extractor,
+ const sp<DataSource> &source,
+ const sp<RefBase> &plugin);
+
+// Creates a MediaSource which wraps the given IMediaSource object.
+sp<MediaSource> CreateMediaSourceFromIMediaSource(const sp<IMediaSource> &source);
+
+// Creates an IMediaSource wrapper to the given MediaSource.
+sp<IMediaSource> CreateIMediaSourceFromMediaSourceBase(
+ const sp<RemoteMediaExtractor> &extractor,
+ MediaTrack *source, const sp<RefBase> &plugin);
+
+} // namespace android
+
+#endif // INTERFACE_UTILS_H_
diff --git a/media/libstagefright/include/media/stagefright/JPEGSource.h b/media/libstagefright/include/media/stagefright/JPEGSource.h
index 1b7e91b..8ab3d11 100644
--- a/media/libstagefright/include/media/stagefright/JPEGSource.h
+++ b/media/libstagefright/include/media/stagefright/JPEGSource.h
@@ -18,7 +18,7 @@
#define JPEG_SOURCE_H_
-#include <media/stagefright/MediaSource.h>
+#include <media/MediaSource.h>
namespace android {
@@ -33,7 +33,7 @@
virtual sp<MetaData> getFormat();
virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL);
+ MediaBufferBase **buffer, const ReadOptions *options = NULL);
protected:
virtual ~JPEGSource();
diff --git a/media/libstagefright/include/media/stagefright/MPEG2TSWriter.h b/media/libstagefright/include/media/stagefright/MPEG2TSWriter.h
index 4516fb6..3d7960b 100644
--- a/media/libstagefright/include/media/stagefright/MPEG2TSWriter.h
+++ b/media/libstagefright/include/media/stagefright/MPEG2TSWriter.h
@@ -34,7 +34,7 @@
void *cookie,
ssize_t (*write)(void *cookie, const void *data, size_t size));
- virtual status_t addSource(const sp<IMediaSource> &source);
+ virtual status_t addSource(const sp<MediaSource> &source);
virtual status_t start(MetaData *param = NULL);
virtual status_t stop() { return reset(); }
virtual status_t pause();
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index 1c7b4a6..f18940d 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -20,7 +20,6 @@
#include <stdio.h>
-#include <media/IMediaSource.h>
#include <media/stagefright/MediaWriter.h>
#include <utils/List.h>
#include <utils/threads.h>
@@ -31,7 +30,7 @@
struct AMessage;
class MediaBuffer;
-class MetaData;
+struct ABuffer;
class MPEG4Writer : public MediaWriter {
public:
@@ -40,7 +39,7 @@
// Limitations
// No more than one video and/or one audio source can be added, but
// multiple metadata sources can be added.
- virtual status_t addSource(const sp<IMediaSource> &source);
+ virtual status_t addSource(const sp<MediaSource> &source);
// Returns INVALID_OPERATION if there is no source or track.
virtual status_t start(MetaData *param = NULL);
@@ -101,12 +100,12 @@
bool mSendNotify;
off64_t mOffset;
off_t mMdatOffset;
- uint8_t *mMoovBoxBuffer;
- off64_t mMoovBoxBufferOffset;
- bool mWriteMoovBoxToMemory;
+ uint8_t *mInMemoryCache;
+ off64_t mInMemoryCacheOffset;
+ off64_t mInMemoryCacheSize;
+ bool mWriteBoxToMemory;
off64_t mFreeBoxOffset;
bool mStreamableFile;
- off64_t mEstimatedMoovBoxSize;
off64_t mMoovExtraSize;
uint32_t mInterleaveDurationUs;
int32_t mTimeScale;
@@ -133,6 +132,8 @@
status_t startTracks(MetaData *params);
size_t numTracks();
int64_t estimateMoovBoxSize(int32_t bitRate);
+ int64_t estimateFileLevelMetaSize(MetaData *params);
+ void writeCachedBoxToFile(const char *type);
struct Chunk {
Track *mTrack; // Owner
@@ -165,6 +166,50 @@
List<ChunkInfo> mChunkInfos; // Chunk infos
Condition mChunkReadyCondition; // Signal that chunks are available
+ // HEIF writing
+ typedef key_value_pair_t< const char *, Vector<uint16_t> > ItemRefs;
+ typedef struct _ItemInfo {
+ bool isGrid() const { return !strcmp("grid", itemType); }
+ bool isImage() const { return !strcmp("hvc1", itemType) || isGrid(); }
+ const char *itemType;
+ uint16_t itemId;
+ bool isPrimary;
+ bool isHidden;
+ union {
+ // image item
+ struct {
+ uint32_t offset;
+ uint32_t size;
+ };
+ // grid item
+ struct {
+ uint32_t rows;
+ uint32_t cols;
+ uint32_t width;
+ uint32_t height;
+ };
+ };
+ Vector<uint16_t> properties;
+ Vector<ItemRefs> refsList;
+ } ItemInfo;
+
+ typedef struct _ItemProperty {
+ uint32_t type;
+ int32_t width;
+ int32_t height;
+ int32_t rotation;
+ sp<ABuffer> hvcc;
+ } ItemProperty;
+
+ bool mHasFileLevelMeta;
+ bool mHasMoovBox;
+ uint32_t mPrimaryItemId;
+ uint32_t mAssociationEntryCount;
+ uint32_t mNumGrids;
+ bool mHasRefs;
+ Vector<ItemInfo> mItems;
+ Vector<ItemProperty> mProperties;
+
// Writer thread handling
status_t startWriterThread();
void stopWriterThread();
@@ -210,9 +255,12 @@
void initInternal(int fd, bool isFirstSession);
// Acquire lock before calling these methods
- off64_t addSample_l(MediaBuffer *buffer);
- off64_t addLengthPrefixedSample_l(MediaBuffer *buffer);
- off64_t addMultipleLengthPrefixedSamples_l(MediaBuffer *buffer);
+ off64_t addSample_l(MediaBuffer *buffer, bool usePrefix, bool isExif, size_t *bytesWritten);
+ void addLengthPrefixedSample_l(MediaBuffer *buffer);
+ void addMultipleLengthPrefixedSamples_l(MediaBuffer *buffer);
+ uint16_t addProperty_l(const ItemProperty &);
+ uint16_t addItem_l(const ItemInfo &);
+ void addRefs_l(uint16_t itemId, const ItemRefs &);
bool exceedsFileSizeLimit();
bool use32BitFileOffset() const;
@@ -231,10 +279,23 @@
void finishCurrentSession();
void addDeviceMeta();
- void writeHdlr();
+ void writeHdlr(const char *handlerType);
void writeKeys();
void writeIlst();
- void writeMetaBox();
+ void writeMoovLevelMetaBox();
+
+ // HEIF writing
+ void writeIlocBox();
+ void writeInfeBox(uint16_t itemId, const char *type, uint32_t flags);
+ void writeIinfBox();
+ void writeIpcoBox();
+ void writeIpmaBox();
+ void writeIprpBox();
+ void writeIdatBox();
+ void writeIrefBox();
+ void writePitmBox();
+ void writeFileLevelMetaBox();
+
void sendSessionSummary();
void release();
status_t switchFd();
diff --git a/media/libstagefright/include/media/stagefright/MediaAdapter.h b/media/libstagefright/include/media/stagefright/MediaAdapter.h
index 369fce6..589c827 100644
--- a/media/libstagefright/include/media/stagefright/MediaAdapter.h
+++ b/media/libstagefright/include/media/stagefright/MediaAdapter.h
@@ -17,8 +17,8 @@
#ifndef MEDIA_ADAPTER_H
#define MEDIA_ADAPTER_H
+#include <media/MediaSource.h>
#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MetaData.h>
#include <utils/threads.h>
@@ -40,13 +40,13 @@
virtual status_t stop();
virtual sp<MetaData> getFormat();
virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL);
+ MediaBufferBase **buffer, const ReadOptions *options = NULL);
/////////////////////////////////////////////////
// Inherited functions from MediaBufferObserver
/////////////////////////////////////////////////
- virtual void signalBufferReturned(MediaBuffer *buffer);
+ virtual void signalBufferReturned(MediaBufferBase *buffer);
/////////////////////////////////////////////////
// Non-inherited functions:
diff --git a/media/libstagefright/include/media/stagefright/MediaBuffer.h b/media/libstagefright/include/media/stagefright/MediaBuffer.h
deleted file mode 100644
index e74410d..0000000
--- a/media/libstagefright/include/media/stagefright/MediaBuffer.h
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_BUFFER_H_
-
-#define MEDIA_BUFFER_H_
-
-#include <atomic>
-#include <list>
-#include <media/stagefright/foundation/MediaBufferBase.h>
-
-#include <pthread.h>
-
-#include <binder/MemoryDealer.h>
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-
-namespace android {
-
-struct ABuffer;
-class GraphicBuffer;
-class MediaBuffer;
-class MediaBufferObserver;
-class MetaData;
-
-class MediaBufferObserver {
-public:
- MediaBufferObserver() {}
- virtual ~MediaBufferObserver() {}
-
- virtual void signalBufferReturned(MediaBuffer *buffer) = 0;
-
-private:
- MediaBufferObserver(const MediaBufferObserver &);
- MediaBufferObserver &operator=(const MediaBufferObserver &);
-};
-
-class MediaBuffer : public MediaBufferBase {
-public:
- // allocations larger than or equal to this will use shared memory.
- static const size_t kSharedMemThreshold = 64 * 1024;
-
- // The underlying data remains the responsibility of the caller!
- MediaBuffer(void *data, size_t size);
-
- explicit MediaBuffer(size_t size);
-
- explicit MediaBuffer(const sp<GraphicBuffer>& graphicBuffer);
-
- explicit MediaBuffer(const sp<ABuffer> &buffer);
-
- MediaBuffer(const sp<IMemory> &mem) :
- MediaBuffer((uint8_t *)mem->pointer() + sizeof(SharedControl), mem->size()) {
- // delegate and override mMemory
- mMemory = mem;
- }
-
- // If MediaBufferGroup is set, decrement the local reference count;
- // if the local reference count drops to 0, return the buffer to the
- // associated MediaBufferGroup.
- //
- // If no MediaBufferGroup is set, the local reference count must be zero
- // when called, whereupon the MediaBuffer is deleted.
- virtual void release();
-
- // Increments the local reference count.
- // Use only when MediaBufferGroup is set.
- virtual void add_ref();
-
- void *data() const;
- size_t size() const;
-
- size_t range_offset() const;
- size_t range_length() const;
-
- void set_range(size_t offset, size_t length);
-
- sp<GraphicBuffer> graphicBuffer() const;
-
- sp<MetaData> meta_data();
-
- // Clears meta data and resets the range to the full extent.
- void reset();
-
- void setObserver(MediaBufferObserver *group);
-
- // Returns a clone of this MediaBuffer increasing its reference count.
- // The clone references the same data but has its own range and
- // MetaData.
- MediaBuffer *clone();
-
- // sum of localRefcount() and remoteRefcount()
- int refcount() const {
- return localRefcount() + remoteRefcount();
- }
-
- int localRefcount() const {
- return mRefCount;
- }
-
- int remoteRefcount() const {
- if (mMemory.get() == nullptr || mMemory->pointer() == nullptr) return 0;
- int32_t remoteRefcount =
- reinterpret_cast<SharedControl *>(mMemory->pointer())->getRemoteRefcount();
- // Sanity check so that remoteRefCount() is non-negative.
- return remoteRefcount >= 0 ? remoteRefcount : 0; // do not allow corrupted data.
- }
-
- // returns old value
- int addRemoteRefcount(int32_t value) {
- if (mMemory.get() == nullptr || mMemory->pointer() == nullptr) return 0;
- return reinterpret_cast<SharedControl *>(mMemory->pointer())->addRemoteRefcount(value);
- }
-
- bool isDeadObject() const {
- return isDeadObject(mMemory);
- }
-
- static bool isDeadObject(const sp<IMemory> &memory) {
- if (memory.get() == nullptr || memory->pointer() == nullptr) return false;
- return reinterpret_cast<SharedControl *>(memory->pointer())->isDeadObject();
- }
-
- // Sticky on enabling of shared memory MediaBuffers. By default we don't use
- // shared memory for MediaBuffers, but we enable this for those processes
- // that export MediaBuffers.
- static void useSharedMemory() {
- std::atomic_store_explicit(
- &mUseSharedMemory, (int_least32_t)1, std::memory_order_seq_cst);
- }
-
-protected:
- // true if MediaBuffer is observed (part of a MediaBufferGroup).
- inline bool isObserved() const {
- return mObserver != nullptr;
- }
-
- virtual ~MediaBuffer();
-
- sp<IMemory> mMemory;
-
-private:
- friend class MediaBufferGroup;
- friend class OMXDecoder;
- friend class BnMediaSource;
- friend class BpMediaSource;
-
- // For use by OMXDecoder, reference count must be 1, drop reference
- // count to 0 without signalling the observer.
- void claim();
-
- MediaBufferObserver *mObserver;
- int mRefCount;
-
- void *mData;
- size_t mSize, mRangeOffset, mRangeLength;
- sp<GraphicBuffer> mGraphicBuffer;
- sp<ABuffer> mBuffer;
-
- bool mOwnsData;
-
- sp<MetaData> mMetaData;
-
- MediaBuffer *mOriginal;
-
- static std::atomic_int_least32_t mUseSharedMemory;
-
- MediaBuffer(const MediaBuffer &);
- MediaBuffer &operator=(const MediaBuffer &);
-
- // SharedControl block at the start of IMemory.
- struct SharedControl {
- enum {
- FLAG_DEAD_OBJECT = (1 << 0),
- };
-
- // returns old value
- inline int32_t addRemoteRefcount(int32_t value) {
- return std::atomic_fetch_add_explicit(
- &mRemoteRefcount, (int_least32_t)value, std::memory_order_seq_cst);
- }
-
- inline int32_t getRemoteRefcount() const {
- return std::atomic_load_explicit(&mRemoteRefcount, std::memory_order_seq_cst);
- }
-
- inline void setRemoteRefcount(int32_t value) {
- std::atomic_store_explicit(
- &mRemoteRefcount, (int_least32_t)value, std::memory_order_seq_cst);
- }
-
- inline bool isDeadObject() const {
- return (std::atomic_load_explicit(
- &mFlags, std::memory_order_seq_cst) & FLAG_DEAD_OBJECT) != 0;
- }
-
- inline void setDeadObject() {
- (void)std::atomic_fetch_or_explicit(
- &mFlags, (int_least32_t)FLAG_DEAD_OBJECT, std::memory_order_seq_cst);
- }
-
- inline void clear() {
- std::atomic_store_explicit(
- &mFlags, (int_least32_t)0, std::memory_order_seq_cst);
- std::atomic_store_explicit(
- &mRemoteRefcount, (int_least32_t)0, std::memory_order_seq_cst);
- }
-
- private:
- // Caution: atomic_int_fast32_t is 64 bits on LP64.
- std::atomic_int_least32_t mFlags;
- std::atomic_int_least32_t mRemoteRefcount;
- int32_t unused[6] __attribute__((__unused__)); // additional buffer space
- };
-
- inline SharedControl *getSharedControl() const {
- return reinterpret_cast<SharedControl *>(mMemory->pointer());
- }
-};
-
-} // namespace android
-
-#endif // MEDIA_BUFFER_H_
diff --git a/media/libstagefright/include/media/stagefright/MediaBuffer.h b/media/libstagefright/include/media/stagefright/MediaBuffer.h
new file mode 120000
index 0000000..1d49c1a
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MediaBuffer.h
@@ -0,0 +1 @@
+../../../../libmediaextractor/include/media/stagefright/MediaBuffer.h
\ No newline at end of file
diff --git a/media/libstagefright/include/media/stagefright/MediaBufferBase.h b/media/libstagefright/include/media/stagefright/MediaBufferBase.h
new file mode 120000
index 0000000..80e49b0
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MediaBufferBase.h
@@ -0,0 +1 @@
+../../../../libmediaextractor/include/media/stagefright/MediaBufferBase.h
\ No newline at end of file
diff --git a/media/libstagefright/include/media/stagefright/MediaBufferGroup.h b/media/libstagefright/include/media/stagefright/MediaBufferGroup.h
deleted file mode 100644
index 3051406..0000000
--- a/media/libstagefright/include/media/stagefright/MediaBufferGroup.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_BUFFER_GROUP_H_
-
-#define MEDIA_BUFFER_GROUP_H_
-
-#include <media/stagefright/MediaBuffer.h>
-#include <utils/Errors.h>
-#include <utils/threads.h>
-
-namespace android {
-
-class MediaBuffer;
-class MetaData;
-
-class MediaBufferGroup : public MediaBufferObserver {
-public:
- MediaBufferGroup(size_t growthLimit = 0);
-
- // create a media buffer group with preallocated buffers
- MediaBufferGroup(size_t buffers, size_t buffer_size, size_t growthLimit = 0);
-
- ~MediaBufferGroup();
-
- void add_buffer(MediaBuffer *buffer);
-
- bool has_buffers();
-
- // If nonBlocking is false, it blocks until a buffer is available and
- // passes it to the caller in *buffer, while returning OK.
- // The returned buffer will have a reference count of 1.
- // If nonBlocking is true and a buffer is not immediately available,
- // buffer is set to NULL and it returns WOULD_BLOCK.
- // If requestedSize is 0, any free MediaBuffer will be returned.
- // If requestedSize is > 0, the returned MediaBuffer should have buffer
- // size of at least requstedSize.
- status_t acquire_buffer(
- MediaBuffer **buffer, bool nonBlocking = false, size_t requestedSize = 0);
-
- size_t buffers() const { return mBuffers.size(); }
-
- // If buffer is nullptr, have acquire_buffer() check for remote release.
- virtual void signalBufferReturned(MediaBuffer *buffer);
-
-private:
- friend class MediaBuffer;
-
- Mutex mLock;
- Condition mCondition;
- size_t mGrowthLimit; // Do not automatically grow group larger than this.
- std::list<MediaBuffer *> mBuffers;
-
- MediaBufferGroup(const MediaBufferGroup &);
- MediaBufferGroup &operator=(const MediaBufferGroup &);
-};
-
-} // namespace android
-
-#endif // MEDIA_BUFFER_GROUP_H_
diff --git a/media/libstagefright/include/media/stagefright/MediaBufferGroup.h b/media/libstagefright/include/media/stagefright/MediaBufferGroup.h
new file mode 120000
index 0000000..009b3d9
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MediaBufferGroup.h
@@ -0,0 +1 @@
+../../../../libmediaextractor/include/media/stagefright/MediaBufferGroup.h
\ No newline at end of file
diff --git a/media/libstagefright/include/media/stagefright/MediaClock.h b/media/libstagefright/include/media/stagefright/MediaClock.h
index dd1a809..3ddeb82 100644
--- a/media/libstagefright/include/media/stagefright/MediaClock.h
+++ b/media/libstagefright/include/media/stagefright/MediaClock.h
@@ -18,7 +18,8 @@
#define MEDIA_CLOCK_H_
-#include <media/stagefright/foundation/ABase.h>
+#include <list>
+#include <media/stagefright/foundation/AHandler.h>
#include <utils/Mutex.h>
#include <utils/RefBase.h>
@@ -26,8 +27,14 @@
struct AMessage;
-struct MediaClock : public RefBase {
+struct MediaClock : public AHandler {
+ enum {
+ TIMER_REASON_REACHED = 0,
+ TIMER_REASON_RESET = 1,
+ };
+
MediaClock();
+ void init();
void setStartingTimeMedia(int64_t startingTimeMediaUs);
@@ -54,15 +61,45 @@
// The result is saved in |outRealUs|.
status_t getRealTimeFor(int64_t targetMediaUs, int64_t *outRealUs) const;
+ // request to set up a timer. The target time is |mediaTimeUs|, adjusted by
+ // system time of |adjustRealUs|. In other words, the wake up time is
+ // mediaTimeUs + (adjustRealUs / playbackRate)
+ void addTimer(const sp<AMessage> ¬ify, int64_t mediaTimeUs, int64_t adjustRealUs = 0);
+
+ void setNotificationMessage(const sp<AMessage> &msg);
+
+ void reset();
+
protected:
virtual ~MediaClock();
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
private:
+ enum {
+ kWhatTimeIsUp = 'tIsU',
+ };
+
+ struct Timer {
+ Timer(const sp<AMessage> ¬ify, int64_t mediaTimeUs, int64_t adjustRealUs);
+ const sp<AMessage> mNotify;
+ int64_t mMediaTimeUs;
+ int64_t mAdjustRealUs;
+ };
+
status_t getMediaTime_l(
int64_t realUs,
int64_t *outMediaUs,
bool allowPastMaxTime) const;
+ void processTimers_l();
+
+ void updateAnchorTimesAndPlaybackRate_l(
+ int64_t anchorTimeMediaUs, int64_t anchorTimeRealUs , float playbackRate);
+
+ void notifyDiscontinuity_l();
+
+ sp<ALooper> mLooper;
mutable Mutex mLock;
int64_t mAnchorTimeMediaUs;
@@ -72,6 +109,10 @@
float mPlaybackRate;
+ int32_t mGeneration;
+ std::list<Timer> mTimers;
+ sp<AMessage> mNotify;
+
DISALLOW_EVIL_CONSTRUCTORS(MediaClock);
};
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index 1030407..ad02004 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -61,9 +61,11 @@
};
enum BufferFlags {
- BUFFER_FLAG_SYNCFRAME = 1,
- BUFFER_FLAG_CODECCONFIG = 2,
- BUFFER_FLAG_EOS = 4,
+ BUFFER_FLAG_SYNCFRAME = 1,
+ BUFFER_FLAG_CODECCONFIG = 2,
+ BUFFER_FLAG_EOS = 4,
+ BUFFER_FLAG_PARTIAL_FRAME = 8,
+ BUFFER_FLAG_MUXER_DATA = 16,
};
enum {
@@ -184,6 +186,8 @@
status_t getName(AString *componentName) const;
+ status_t getCodecInfo(sp<MediaCodecInfo> *codecInfo) const;
+
status_t getMetrics(MediaAnalyticsItem * &reply);
status_t setParameters(const sp<AMessage> ¶ms);
@@ -217,6 +221,7 @@
STOPPING,
RELEASING,
};
+ std::string stateString(State state);
enum {
kPortIndexInput = 0,
@@ -247,6 +252,7 @@
kWhatRequestIDRFrame = 'ridr',
kWhatRequestActivityNotification = 'racN',
kWhatGetName = 'getN',
+ kWhatGetCodecInfo = 'gCoI',
kWhatSetParameters = 'setP',
kWhatSetCallback = 'setC',
kWhatSetNotification = 'setN',
@@ -307,6 +313,7 @@
sp<ALooper> mCodecLooper;
sp<CodecBase> mCodec;
AString mComponentName;
+ sp<MediaCodecInfo> mCodecInfo;
sp<AReplyToken> mReplyID;
uint32_t mFlags;
status_t mStickyError;
@@ -315,7 +322,9 @@
MediaAnalyticsItem *mAnalyticsItem;
void initAnalyticsItem();
+ void updateAnalyticsItem();
void flushAnalyticsItem();
+ void updateEphemeralAnalytics(MediaAnalyticsItem *item);
sp<AMessage> mOutputFormat;
sp<AMessage> mInputFormat;
@@ -333,8 +342,6 @@
// initial create parameters
AString mInitName;
- bool mInitNameIsType;
- bool mInitIsEncoder;
// configure parameter
sp<AMessage> mConfigureMsg;
@@ -364,19 +371,20 @@
bool mHaveInputSurface;
bool mHavePendingInputBuffers;
+ bool mCpuBoostRequested;
std::shared_ptr<BufferChannelBase> mBufferChannel;
MediaCodec(const sp<ALooper> &looper, pid_t pid, uid_t uid);
- static sp<CodecBase> GetCodecBase(const AString &name, bool nameIsType = false);
+ static sp<CodecBase> GetCodecBase(const AString &name);
static status_t PostAndAwaitResponse(
const sp<AMessage> &msg, sp<AMessage> *response);
void PostReplyWithError(const sp<AReplyToken> &replyID, int32_t err);
- status_t init(const AString &name, bool nameIsType, bool encoder);
+ status_t init(const AString &name);
void setState(State newState);
void returnBuffersToCodec(bool isReclaim = false);
@@ -420,6 +428,7 @@
uint64_t getGraphicBufferSize();
void addResource(MediaResource::Type type, MediaResource::SubType subtype, uint64_t value);
+ void requestCpuBoostIfNeeded();
bool hasPendingBuffer(int portIndex);
bool hasPendingBuffer();
@@ -438,6 +447,63 @@
void onReleaseCrypto(const sp<AMessage>& msg);
+ // managing time-of-flight aka latency
+ typedef struct {
+ int64_t presentationUs;
+ int64_t startedNs;
+ } BufferFlightTiming_t;
+ std::deque<BufferFlightTiming_t> mBuffersInFlight;
+ Mutex mLatencyLock;
+ int64_t mLatencyUnknown; // buffers for which we couldn't calculate latency
+
+ void statsBufferSent(int64_t presentationUs);
+ void statsBufferReceived(int64_t presentationUs);
+
+ enum {
+ // the default shape of our latency histogram buckets
+ // XXX: should these be configurable in some way?
+ kLatencyHistBuckets = 20,
+ kLatencyHistWidth = 2000,
+ kLatencyHistFloor = 2000,
+
+ // how many samples are in the 'recent latency' histogram
+ // 300 frames = 5 sec @ 60fps or ~12 sec @ 24fps
+ kRecentLatencyFrames = 300,
+
+ // how we initialize mRecentSamples
+ kRecentSampleInvalid = -1,
+ };
+
+ int64_t mRecentSamples[kRecentLatencyFrames];
+ int mRecentHead;
+ Mutex mRecentLock;
+
+ class Histogram {
+ public:
+ Histogram() : mFloor(0), mWidth(0), mBelow(0), mAbove(0),
+ mMin(INT64_MAX), mMax(INT64_MIN), mSum(0), mCount(0),
+ mBucketCount(0), mBuckets(NULL) {};
+ ~Histogram() { clear(); };
+ void clear() { if (mBuckets != NULL) free(mBuckets); mBuckets = NULL; };
+ bool setup(int nbuckets, int64_t width, int64_t floor = 0);
+ void insert(int64_t sample);
+ int64_t getMin() const { return mMin; }
+ int64_t getMax() const { return mMax; }
+ int64_t getCount() const { return mCount; }
+ int64_t getSum() const { return mSum; }
+ int64_t getAvg() const { return mSum / (mCount == 0 ? 1 : mCount); }
+ std::string emit();
+ private:
+ int64_t mFloor, mCeiling, mWidth;
+ int64_t mBelow, mAbove;
+ int64_t mMin, mMax, mSum, mCount;
+
+ int mBucketCount;
+ int64_t *mBuckets;
+ };
+
+ Histogram mLatencyHist;
+
DISALLOW_EVIL_CONSTRUCTORS(MediaCodec);
};
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
new file mode 100644
index 0000000..3ef4c0e
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -0,0 +1,420 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef MEDIA_CODEC_CONSTANTS_H_
+#define MEDIA_CODEC_CONSTANTS_H_
+
+namespace {
+
+// from MediaCodecInfo.java
+constexpr int32_t AVCProfileBaseline = 0x01;
+constexpr int32_t AVCProfileMain = 0x02;
+constexpr int32_t AVCProfileExtended = 0x04;
+constexpr int32_t AVCProfileHigh = 0x08;
+constexpr int32_t AVCProfileHigh10 = 0x10;
+constexpr int32_t AVCProfileHigh422 = 0x20;
+constexpr int32_t AVCProfileHigh444 = 0x40;
+constexpr int32_t AVCProfileConstrainedBaseline = 0x10000;
+constexpr int32_t AVCProfileConstrainedHigh = 0x80000;
+
+constexpr int32_t AVCLevel1 = 0x01;
+constexpr int32_t AVCLevel1b = 0x02;
+constexpr int32_t AVCLevel11 = 0x04;
+constexpr int32_t AVCLevel12 = 0x08;
+constexpr int32_t AVCLevel13 = 0x10;
+constexpr int32_t AVCLevel2 = 0x20;
+constexpr int32_t AVCLevel21 = 0x40;
+constexpr int32_t AVCLevel22 = 0x80;
+constexpr int32_t AVCLevel3 = 0x100;
+constexpr int32_t AVCLevel31 = 0x200;
+constexpr int32_t AVCLevel32 = 0x400;
+constexpr int32_t AVCLevel4 = 0x800;
+constexpr int32_t AVCLevel41 = 0x1000;
+constexpr int32_t AVCLevel42 = 0x2000;
+constexpr int32_t AVCLevel5 = 0x4000;
+constexpr int32_t AVCLevel51 = 0x8000;
+constexpr int32_t AVCLevel52 = 0x10000;
+
+constexpr int32_t H263ProfileBaseline = 0x01;
+constexpr int32_t H263ProfileH320Coding = 0x02;
+constexpr int32_t H263ProfileBackwardCompatible = 0x04;
+constexpr int32_t H263ProfileISWV2 = 0x08;
+constexpr int32_t H263ProfileISWV3 = 0x10;
+constexpr int32_t H263ProfileHighCompression = 0x20;
+constexpr int32_t H263ProfileInternet = 0x40;
+constexpr int32_t H263ProfileInterlace = 0x80;
+constexpr int32_t H263ProfileHighLatency = 0x100;
+
+constexpr int32_t H263Level10 = 0x01;
+constexpr int32_t H263Level20 = 0x02;
+constexpr int32_t H263Level30 = 0x04;
+constexpr int32_t H263Level40 = 0x08;
+constexpr int32_t H263Level45 = 0x10;
+constexpr int32_t H263Level50 = 0x20;
+constexpr int32_t H263Level60 = 0x40;
+constexpr int32_t H263Level70 = 0x80;
+
+constexpr int32_t MPEG4ProfileSimple = 0x01;
+constexpr int32_t MPEG4ProfileSimpleScalable = 0x02;
+constexpr int32_t MPEG4ProfileCore = 0x04;
+constexpr int32_t MPEG4ProfileMain = 0x08;
+constexpr int32_t MPEG4ProfileNbit = 0x10;
+constexpr int32_t MPEG4ProfileScalableTexture = 0x20;
+constexpr int32_t MPEG4ProfileSimpleFace = 0x40;
+constexpr int32_t MPEG4ProfileSimpleFBA = 0x80;
+constexpr int32_t MPEG4ProfileBasicAnimated = 0x100;
+constexpr int32_t MPEG4ProfileHybrid = 0x200;
+constexpr int32_t MPEG4ProfileAdvancedRealTime = 0x400;
+constexpr int32_t MPEG4ProfileCoreScalable = 0x800;
+constexpr int32_t MPEG4ProfileAdvancedCoding = 0x1000;
+constexpr int32_t MPEG4ProfileAdvancedCore = 0x2000;
+constexpr int32_t MPEG4ProfileAdvancedScalable = 0x4000;
+constexpr int32_t MPEG4ProfileAdvancedSimple = 0x8000;
+
+constexpr int32_t MPEG4Level0 = 0x01;
+constexpr int32_t MPEG4Level0b = 0x02;
+constexpr int32_t MPEG4Level1 = 0x04;
+constexpr int32_t MPEG4Level2 = 0x08;
+constexpr int32_t MPEG4Level3 = 0x10;
+constexpr int32_t MPEG4Level3b = 0x18;
+constexpr int32_t MPEG4Level4 = 0x20;
+constexpr int32_t MPEG4Level4a = 0x40;
+constexpr int32_t MPEG4Level5 = 0x80;
+constexpr int32_t MPEG4Level6 = 0x100;
+
+constexpr int32_t MPEG2ProfileSimple = 0x00;
+constexpr int32_t MPEG2ProfileMain = 0x01;
+constexpr int32_t MPEG2Profile422 = 0x02;
+constexpr int32_t MPEG2ProfileSNR = 0x03;
+constexpr int32_t MPEG2ProfileSpatial = 0x04;
+constexpr int32_t MPEG2ProfileHigh = 0x05;
+
+constexpr int32_t MPEG2LevelLL = 0x00;
+constexpr int32_t MPEG2LevelML = 0x01;
+constexpr int32_t MPEG2LevelH14 = 0x02;
+constexpr int32_t MPEG2LevelHL = 0x03;
+constexpr int32_t MPEG2LevelHP = 0x04;
+
+constexpr int32_t AACObjectMain = 1;
+constexpr int32_t AACObjectLC = 2;
+constexpr int32_t AACObjectSSR = 3;
+constexpr int32_t AACObjectLTP = 4;
+constexpr int32_t AACObjectHE = 5;
+constexpr int32_t AACObjectScalable = 6;
+constexpr int32_t AACObjectERLC = 17;
+constexpr int32_t AACObjectERScalable = 20;
+constexpr int32_t AACObjectLD = 23;
+constexpr int32_t AACObjectHE_PS = 29;
+constexpr int32_t AACObjectELD = 39;
+constexpr int32_t AACObjectXHE = 42;
+
+constexpr int32_t VP8Level_Version0 = 0x01;
+constexpr int32_t VP8Level_Version1 = 0x02;
+constexpr int32_t VP8Level_Version2 = 0x04;
+constexpr int32_t VP8Level_Version3 = 0x08;
+
+constexpr int32_t VP8ProfileMain = 0x01;
+
+constexpr int32_t VP9Profile0 = 0x01;
+constexpr int32_t VP9Profile1 = 0x02;
+constexpr int32_t VP9Profile2 = 0x04;
+constexpr int32_t VP9Profile3 = 0x08;
+constexpr int32_t VP9Profile2HDR = 0x1000;
+constexpr int32_t VP9Profile3HDR = 0x2000;
+
+constexpr int32_t VP9Level1 = 0x1;
+constexpr int32_t VP9Level11 = 0x2;
+constexpr int32_t VP9Level2 = 0x4;
+constexpr int32_t VP9Level21 = 0x8;
+constexpr int32_t VP9Level3 = 0x10;
+constexpr int32_t VP9Level31 = 0x20;
+constexpr int32_t VP9Level4 = 0x40;
+constexpr int32_t VP9Level41 = 0x80;
+constexpr int32_t VP9Level5 = 0x100;
+constexpr int32_t VP9Level51 = 0x200;
+constexpr int32_t VP9Level52 = 0x400;
+constexpr int32_t VP9Level6 = 0x800;
+constexpr int32_t VP9Level61 = 0x1000;
+constexpr int32_t VP9Level62 = 0x2000;
+
+constexpr int32_t HEVCProfileMain = 0x01;
+constexpr int32_t HEVCProfileMain10 = 0x02;
+constexpr int32_t HEVCProfileMainStill = 0x04;
+constexpr int32_t HEVCProfileMain10HDR10 = 0x1000;
+
+constexpr int32_t HEVCMainTierLevel1 = 0x1;
+constexpr int32_t HEVCHighTierLevel1 = 0x2;
+constexpr int32_t HEVCMainTierLevel2 = 0x4;
+constexpr int32_t HEVCHighTierLevel2 = 0x8;
+constexpr int32_t HEVCMainTierLevel21 = 0x10;
+constexpr int32_t HEVCHighTierLevel21 = 0x20;
+constexpr int32_t HEVCMainTierLevel3 = 0x40;
+constexpr int32_t HEVCHighTierLevel3 = 0x80;
+constexpr int32_t HEVCMainTierLevel31 = 0x100;
+constexpr int32_t HEVCHighTierLevel31 = 0x200;
+constexpr int32_t HEVCMainTierLevel4 = 0x400;
+constexpr int32_t HEVCHighTierLevel4 = 0x800;
+constexpr int32_t HEVCMainTierLevel41 = 0x1000;
+constexpr int32_t HEVCHighTierLevel41 = 0x2000;
+constexpr int32_t HEVCMainTierLevel5 = 0x4000;
+constexpr int32_t HEVCHighTierLevel5 = 0x8000;
+constexpr int32_t HEVCMainTierLevel51 = 0x10000;
+constexpr int32_t HEVCHighTierLevel51 = 0x20000;
+constexpr int32_t HEVCMainTierLevel52 = 0x40000;
+constexpr int32_t HEVCHighTierLevel52 = 0x80000;
+constexpr int32_t HEVCMainTierLevel6 = 0x100000;
+constexpr int32_t HEVCHighTierLevel6 = 0x200000;
+constexpr int32_t HEVCMainTierLevel61 = 0x400000;
+constexpr int32_t HEVCHighTierLevel61 = 0x800000;
+constexpr int32_t HEVCMainTierLevel62 = 0x1000000;
+constexpr int32_t HEVCHighTierLevel62 = 0x2000000;
+
+constexpr int32_t DolbyVisionProfileDvavPer = 0x1;
+constexpr int32_t DolbyVisionProfileDvavPen = 0x2;
+constexpr int32_t DolbyVisionProfileDvheDer = 0x4;
+constexpr int32_t DolbyVisionProfileDvheDen = 0x8;
+constexpr int32_t DolbyVisionProfileDvheDtr = 0x10;
+constexpr int32_t DolbyVisionProfileDvheStn = 0x20;
+constexpr int32_t DolbyVisionProfileDvheDth = 0x40;
+constexpr int32_t DolbyVisionProfileDvheDtb = 0x80;
+constexpr int32_t DolbyVisionProfileDvheSt = 0x100;
+constexpr int32_t DolbyVisionProfileDvavSe = 0x200;
+
+constexpr int32_t DolbyVisionLevelHd24 = 0x1;
+constexpr int32_t DolbyVisionLevelHd30 = 0x2;
+constexpr int32_t DolbyVisionLevelFhd24 = 0x4;
+constexpr int32_t DolbyVisionLevelFhd30 = 0x8;
+constexpr int32_t DolbyVisionLevelFhd60 = 0x10;
+constexpr int32_t DolbyVisionLevelUhd24 = 0x20;
+constexpr int32_t DolbyVisionLevelUhd30 = 0x40;
+constexpr int32_t DolbyVisionLevelUhd48 = 0x80;
+constexpr int32_t DolbyVisionLevelUhd60 = 0x100;
+
+constexpr int32_t BITRATE_MODE_CBR = 2;
+constexpr int32_t BITRATE_MODE_CQ = 0;
+constexpr int32_t BITRATE_MODE_VBR = 1;
+
+constexpr int32_t COLOR_Format12bitRGB444 = 3;
+constexpr int32_t COLOR_Format16bitARGB1555 = 5;
+constexpr int32_t COLOR_Format16bitARGB4444 = 4;
+constexpr int32_t COLOR_Format16bitBGR565 = 7;
+constexpr int32_t COLOR_Format16bitRGB565 = 6;
+constexpr int32_t COLOR_Format18bitARGB1665 = 9;
+constexpr int32_t COLOR_Format18BitBGR666 = 41;
+constexpr int32_t COLOR_Format18bitRGB666 = 8;
+constexpr int32_t COLOR_Format19bitARGB1666 = 10;
+constexpr int32_t COLOR_Format24BitABGR6666 = 43;
+constexpr int32_t COLOR_Format24bitARGB1887 = 13;
+constexpr int32_t COLOR_Format24BitARGB6666 = 42;
+constexpr int32_t COLOR_Format24bitBGR888 = 12;
+constexpr int32_t COLOR_Format24bitRGB888 = 11;
+constexpr int32_t COLOR_Format25bitARGB1888 = 14;
+constexpr int32_t COLOR_Format32bitABGR8888 = 0x7F00A000;
+constexpr int32_t COLOR_Format32bitARGB8888 = 16;
+constexpr int32_t COLOR_Format32bitBGRA8888 = 15;
+constexpr int32_t COLOR_Format8bitRGB332 = 2;
+constexpr int32_t COLOR_FormatCbYCrY = 27;
+constexpr int32_t COLOR_FormatCrYCbY = 28;
+constexpr int32_t COLOR_FormatL16 = 36;
+constexpr int32_t COLOR_FormatL2 = 33;
+constexpr int32_t COLOR_FormatL24 = 37;
+constexpr int32_t COLOR_FormatL32 = 38;
+constexpr int32_t COLOR_FormatL4 = 34;
+constexpr int32_t COLOR_FormatL8 = 35;
+constexpr int32_t COLOR_FormatMonochrome = 1;
+constexpr int32_t COLOR_FormatRawBayer10bit = 31;
+constexpr int32_t COLOR_FormatRawBayer8bit = 30;
+constexpr int32_t COLOR_FormatRawBayer8bitcompressed = 32;
+constexpr int32_t COLOR_FormatRGBAFlexible = 0x7F36A888;
+constexpr int32_t COLOR_FormatRGBFlexible = 0x7F36B888;
+constexpr int32_t COLOR_FormatSurface = 0x7F000789;
+constexpr int32_t COLOR_FormatYCbYCr = 25;
+constexpr int32_t COLOR_FormatYCrYCb = 26;
+constexpr int32_t COLOR_FormatYUV411PackedPlanar = 18;
+constexpr int32_t COLOR_FormatYUV411Planar = 17;
+constexpr int32_t COLOR_FormatYUV420Flexible = 0x7F420888;
+constexpr int32_t COLOR_FormatYUV420PackedPlanar = 20;
+constexpr int32_t COLOR_FormatYUV420PackedSemiPlanar = 39;
+constexpr int32_t COLOR_FormatYUV420Planar = 19;
+constexpr int32_t COLOR_FormatYUV420SemiPlanar = 21;
+constexpr int32_t COLOR_FormatYUV422Flexible = 0x7F422888;
+constexpr int32_t COLOR_FormatYUV422PackedPlanar = 23;
+constexpr int32_t COLOR_FormatYUV422PackedSemiPlanar = 40;
+constexpr int32_t COLOR_FormatYUV422Planar = 22;
+constexpr int32_t COLOR_FormatYUV422SemiPlanar = 24;
+constexpr int32_t COLOR_FormatYUV444Flexible = 0x7F444888;
+constexpr int32_t COLOR_FormatYUV444Interleaved = 29;
+constexpr int32_t COLOR_QCOM_FormatYUV420SemiPlanar = 0x7fa30c00;
+constexpr int32_t COLOR_TI_FormatYUV420PackedSemiPlanar = 0x7f000100;
+
+constexpr char FEATURE_AdaptivePlayback[] = "adaptive-playback";
+constexpr char FEATURE_IntraRefresh[] = "intra-refresh";
+constexpr char FEATURE_PartialFrame[] = "partial-frame";
+constexpr char FEATURE_SecurePlayback[] = "secure-playback";
+constexpr char FEATURE_TunneledPlayback[] = "tunneled-playback";
+
+// from MediaFormat.java
+constexpr char MIMETYPE_VIDEO_VP8[] = "video/x-vnd.on2.vp8";
+constexpr char MIMETYPE_VIDEO_VP9[] = "video/x-vnd.on2.vp9";
+constexpr char MIMETYPE_VIDEO_AVC[] = "video/avc";
+constexpr char MIMETYPE_VIDEO_HEVC[] = "video/hevc";
+constexpr char MIMETYPE_VIDEO_MPEG4[] = "video/mp4v-es";
+constexpr char MIMETYPE_VIDEO_H263[] = "video/3gpp";
+constexpr char MIMETYPE_VIDEO_MPEG2[] = "video/mpeg2";
+constexpr char MIMETYPE_VIDEO_RAW[] = "video/raw";
+constexpr char MIMETYPE_VIDEO_DOLBY_VISION[] = "video/dolby-vision";
+constexpr char MIMETYPE_VIDEO_SCRAMBLED[] = "video/scrambled";
+
+constexpr char MIMETYPE_AUDIO_AMR_NB[] = "audio/3gpp";
+constexpr char MIMETYPE_AUDIO_AMR_WB[] = "audio/amr-wb";
+constexpr char MIMETYPE_AUDIO_MPEG[] = "audio/mpeg";
+constexpr char MIMETYPE_AUDIO_AAC[] = "audio/mp4a-latm";
+constexpr char MIMETYPE_AUDIO_QCELP[] = "audio/qcelp";
+constexpr char MIMETYPE_AUDIO_VORBIS[] = "audio/vorbis";
+constexpr char MIMETYPE_AUDIO_OPUS[] = "audio/opus";
+constexpr char MIMETYPE_AUDIO_G711_ALAW[] = "audio/g711-alaw";
+constexpr char MIMETYPE_AUDIO_G711_MLAW[] = "audio/g711-mlaw";
+constexpr char MIMETYPE_AUDIO_RAW[] = "audio/raw";
+constexpr char MIMETYPE_AUDIO_FLAC[] = "audio/flac";
+constexpr char MIMETYPE_AUDIO_MSGSM[] = "audio/gsm";
+constexpr char MIMETYPE_AUDIO_AC3[] = "audio/ac3";
+constexpr char MIMETYPE_AUDIO_EAC3[] = "audio/eac3";
+constexpr char MIMETYPE_AUDIO_SCRAMBLED[] = "audio/scrambled";
+
+constexpr char MIMETYPE_IMAGE_ANDROID_HEIC[] = "image/vnd.android.heic";
+
+constexpr char MIMETYPE_TEXT_CEA_608[] = "text/cea-608";
+constexpr char MIMETYPE_TEXT_CEA_708[] = "text/cea-708";
+constexpr char MIMETYPE_TEXT_SUBRIP[] = "application/x-subrip";
+constexpr char MIMETYPE_TEXT_VTT[] = "text/vtt";
+
+constexpr int32_t COLOR_RANGE_FULL = 1;
+constexpr int32_t COLOR_RANGE_LIMITED = 2;
+constexpr int32_t COLOR_STANDARD_BT2020 = 6;
+constexpr int32_t COLOR_STANDARD_BT601_NTSC = 4;
+constexpr int32_t COLOR_STANDARD_BT601_PAL = 2;
+constexpr int32_t COLOR_STANDARD_BT709 = 1;
+constexpr int32_t COLOR_TRANSFER_HLG = 7;
+constexpr int32_t COLOR_TRANSFER_LINEAR = 1;
+constexpr int32_t COLOR_TRANSFER_SDR_VIDEO = 3;
+constexpr int32_t COLOR_TRANSFER_ST2084 = 6;
+
+constexpr char KEY_AAC_DRC_ATTENUATION_FACTOR[] = "aac-drc-cut-level";
+constexpr char KEY_AAC_DRC_BOOST_FACTOR[] = "aac-drc-boost-level";
+constexpr char KEY_AAC_DRC_EFFECT_TYPE[] = "aac-drc-effect-type";
+constexpr char KEY_AAC_DRC_HEAVY_COMPRESSION[] = "aac-drc-heavy-compression";
+constexpr char KEY_AAC_DRC_TARGET_REFERENCE_LEVEL[] = "aac-target-ref-level";
+constexpr char KEY_AAC_ENCODED_TARGET_LEVEL[] = "aac-encoded-target-level";
+constexpr char KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT[] = "aac-max-output-channel_count";
+constexpr char KEY_AAC_PROFILE[] = "aac-profile";
+constexpr char KEY_AAC_SBR_MODE[] = "aac-sbr-mode";
+constexpr char KEY_AUDIO_SESSION_ID[] = "audio-session-id";
+constexpr char KEY_BIT_RATE[] = "bitrate";
+constexpr char KEY_BITRATE_MODE[] = "bitrate-mode";
+constexpr char KEY_CA_SESSION_ID[] = "ca-session-id";
+constexpr char KEY_CA_SYSTEM_ID[] = "ca-system-id";
+constexpr char KEY_CAPTURE_RATE[] = "capture-rate";
+constexpr char KEY_CHANNEL_COUNT[] = "channel-count";
+constexpr char KEY_CHANNEL_MASK[] = "channel-mask";
+constexpr char KEY_COLOR_FORMAT[] = "color-format";
+constexpr char KEY_COLOR_RANGE[] = "color-range";
+constexpr char KEY_COLOR_STANDARD[] = "color-standard";
+constexpr char KEY_COLOR_TRANSFER[] = "color-transfer";
+constexpr char KEY_COMPLEXITY[] = "complexity";
+constexpr char KEY_DURATION[] = "durationUs";
+constexpr char KEY_FEATURE_[] = "feature-";
+constexpr char KEY_FLAC_COMPRESSION_LEVEL[] = "flac-compression-level";
+constexpr char KEY_FRAME_RATE[] = "frame-rate";
+constexpr char KEY_GRID_COLUMNS[] = "grid-cols";
+constexpr char KEY_GRID_ROWS[] = "grid-rows";
+constexpr char KEY_HDR_STATIC_INFO[] = "hdr-static-info";
+constexpr char KEY_HEIGHT[] = "height";
+constexpr char KEY_I_FRAME_INTERVAL[] = "i-frame-interval";
+constexpr char KEY_INTRA_REFRESH_PERIOD[] = "intra-refresh-period";
+constexpr char KEY_IS_ADTS[] = "is-adts";
+constexpr char KEY_IS_AUTOSELECT[] = "is-autoselect";
+constexpr char KEY_IS_DEFAULT[] = "is-default";
+constexpr char KEY_IS_FORCED_SUBTITLE[] = "is-forced-subtitle";
+constexpr char KEY_IS_TIMED_TEXT[] = "is-timed-text";
+constexpr char KEY_LANGUAGE[] = "language";
+constexpr char KEY_LATENCY[] = "latency";
+constexpr char KEY_LEVEL[] = "level";
+constexpr char KEY_MAX_BIT_RATE[] = "max-bitrate";
+constexpr char KEY_MAX_HEIGHT[] = "max-height";
+constexpr char KEY_MAX_INPUT_SIZE[] = "max-input-size";
+constexpr char KEY_MAX_WIDTH[] = "max-width";
+constexpr char KEY_MIME[] = "mime";
+constexpr char KEY_OPERATING_RATE[] = "operating-rate";
+constexpr char KEY_OUTPUT_REORDER_DEPTH[] = "output-reorder-depth";
+constexpr char KEY_PCM_ENCODING[] = "pcm-encoding";
+constexpr char KEY_PRIORITY[] = "priority";
+constexpr char KEY_PROFILE[] = "profile";
+constexpr char KEY_PUSH_BLANK_BUFFERS_ON_STOP[] = "push-blank-buffers-on-shutdown";
+constexpr char KEY_QUALITY[] = "quality";
+constexpr char KEY_REPEAT_PREVIOUS_FRAME_AFTER[] = "repeat-previous-frame-after";
+constexpr char KEY_ROTATION[] = "rotation-degrees";
+constexpr char KEY_SAMPLE_RATE[] = "sample-rate";
+constexpr char KEY_SLICE_HEIGHT[] = "slice-height";
+constexpr char KEY_STRIDE[] = "stride";
+constexpr char KEY_TEMPORAL_LAYERING[] = "ts-schema";
+constexpr char KEY_TILE_HEIGHT[] = "tile-height";
+constexpr char KEY_TILE_WIDTH[] = "tile-width";
+constexpr char KEY_TRACK_ID[] = "track-id";
+constexpr char KEY_WIDTH[] = "width";
+
+// from MediaCodec.java
+constexpr int32_t ERROR_INSUFFICIENT_OUTPUT_PROTECTION = 4;
+constexpr int32_t ERROR_INSUFFICIENT_RESOURCE = 1100;
+constexpr int32_t ERROR_KEY_EXPIRED = 2;
+constexpr int32_t ERROR_NO_KEY = 1;
+constexpr int32_t ERROR_RECLAIMED = 1101;
+constexpr int32_t ERROR_RESOURCE_BUSY = 3;
+constexpr int32_t ERROR_SESSION_NOT_OPENED = 5;
+constexpr int32_t ERROR_UNSUPPORTED_OPERATION = 6;
+constexpr char CODEC[] = "android.media.mediacodec.codec";
+constexpr char ENCODER[] = "android.media.mediacodec.encoder";
+constexpr char HEIGHT[] = "android.media.mediacodec.height";
+constexpr char MIME_TYPE[] = "android.media.mediacodec.mime";
+constexpr char MODE[] = "android.media.mediacodec.mode";
+constexpr char MODE_AUDIO[] = "audio";
+constexpr char MODE_VIDEO[] = "video";
+constexpr char ROTATION[] = "android.media.mediacodec.rotation";
+constexpr char SECURE[] = "android.media.mediacodec.secure";
+constexpr char WIDTH[] = "android.media.mediacodec.width";
+
+constexpr int32_t BUFFER_FLAG_CODEC_CONFIG = 2;
+constexpr int32_t BUFFER_FLAG_END_OF_STREAM = 4;
+constexpr int32_t BUFFER_FLAG_KEY_FRAME = 1;
+constexpr int32_t BUFFER_FLAG_PARTIAL_FRAME = 8;
+constexpr int32_t BUFFER_FLAG_SYNC_FRAME = 1;
+constexpr int32_t CONFIGURE_FLAG_ENCODE = 1;
+constexpr int32_t CRYPTO_MODE_AES_CBC = 2;
+constexpr int32_t CRYPTO_MODE_AES_CTR = 1;
+constexpr int32_t CRYPTO_MODE_UNENCRYPTED = 0;
+constexpr int32_t INFO_OUTPUT_BUFFERS_CHANGED = -3;
+constexpr int32_t INFO_OUTPUT_FORMAT_CHANGED = -2;
+constexpr int32_t INFO_TRY_AGAIN_LATER = -1;
+constexpr int32_t VIDEO_SCALING_MODE_SCALE_TO_FIT = 1;
+constexpr int32_t VIDEO_SCALING_MODE_SCALE_TO_FIT_WITH_CROPPING = 2;
+constexpr char PARAMETER_KEY_REQUEST_SYNC_FRAME[] = "request-sync";
+constexpr char PARAMETER_KEY_SUSPEND[] = "drop-input-frames";
+constexpr char PARAMETER_KEY_VIDEO_BITRATE[] = "video-bitrate";
+
+}
+
+#endif // MEDIA_CODEC_CONSTANTS_H_
+
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecList.h b/media/libstagefright/include/media/stagefright/MediaCodecList.h
index f2bd496..e44b0a4 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecList.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecList.h
@@ -18,8 +18,11 @@
#define MEDIA_CODEC_LIST_H_
+#include <vector>
+
#include <media/stagefright/foundation/ABase.h>
#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/MediaCodecListWriter.h>
#include <media/IMediaCodecList.h>
#include <media/MediaCodecInfo.h>
@@ -35,8 +38,6 @@
struct AMessage;
-struct MediaCodecListBuilderBase;
-
struct MediaCodecList : public BnMediaCodecList {
static sp<IMediaCodecList> getInstance();
@@ -72,8 +73,7 @@
const char *mime,
bool createEncoder,
uint32_t flags,
- Vector<AString> *matchingCodecs,
- Vector<AString> *owners = nullptr);
+ Vector<AString> *matchingCodecs);
static bool isSoftwareCodec(const AString &componentName);
@@ -94,9 +94,9 @@
/**
* This constructor will call `buildMediaCodecList()` from the given
- * `MediaCodecListBuilderBase` object.
+ * `MediaCodecListBuilderBase` objects.
*/
- MediaCodecList(MediaCodecListBuilderBase* builder);
+ MediaCodecList(std::vector<MediaCodecListBuilderBase*> builders);
~MediaCodecList();
@@ -104,66 +104,6 @@
MediaCodecList(const MediaCodecList&) = delete;
MediaCodecList& operator=(const MediaCodecList&) = delete;
-
- friend MediaCodecListWriter;
-};
-
-/**
- * This class is to be used by a `MediaCodecListBuilderBase` instance to add
- * information to the associated `MediaCodecList` object.
- */
-struct MediaCodecListWriter {
- /**
- * Add a key-value pair to a `MediaCodecList`'s global settings.
- *
- * @param key Key.
- * @param value Value.
- */
- void addGlobalSetting(const char* key, const char* value);
- /**
- * Create an add a new `MediaCodecInfo` object to a `MediaCodecList`, and
- * return a `MediaCodecInfoWriter` object associated with the newly added
- * `MediaCodecInfo`.
- *
- * @return The `MediaCodecInfoWriter` object associated with the newly
- * added `MediaCodecInfo` object.
- */
- std::unique_ptr<MediaCodecInfoWriter> addMediaCodecInfo();
-private:
- /**
- * The associated `MediaCodecList` object.
- */
- MediaCodecList* mList;
-
- /**
- * Construct this writer object associated with the given `MediaCodecList`
- * object.
- *
- * @param list The "base" `MediaCodecList` object.
- */
- MediaCodecListWriter(MediaCodecList* list);
-
- friend MediaCodecList;
-};
-
-/**
- * This interface is to be used by `MediaCodecList` to fill its members with
- * appropriate information. `buildMediaCodecList()` will be called from a
- * `MediaCodecList` object during its construction.
- */
-struct MediaCodecListBuilderBase {
- /**
- * Build the `MediaCodecList` via the given `MediaCodecListWriter` interface.
- *
- * @param writer The writer interface.
- * @return The status of the construction. `NO_ERROR` means success.
- */
- virtual status_t buildMediaCodecList(MediaCodecListWriter* writer) = 0;
-
- /**
- * The default destructor does nothing.
- */
- virtual ~MediaCodecListBuilderBase();
};
} // namespace android
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecListWriter.h b/media/libstagefright/include/media/stagefright/MediaCodecListWriter.h
new file mode 100644
index 0000000..59f57c7
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MediaCodecListWriter.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_CODEC_LIST_WRITER_H_
+
+#define MEDIA_CODEC_LIST_WRITER_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/MediaCodecListWriter.h>
+#include <media/MediaCodecInfo.h>
+
+#include <utils/Errors.h>
+#include <utils/StrongPointer.h>
+
+namespace android {
+
+/**
+ * This class is to be used by a `MediaCodecListBuilderBase` instance to add
+ * information to the destination `MediaCodecList` object.
+ */
+struct MediaCodecListWriter {
+ /**
+ * Add a key-value pair to a `MediaCodecList`'s global settings.
+ *
+ * @param key Key.
+ * @param value Value.
+ */
+ void addGlobalSetting(const char* key, const char* value);
+ /**
+ * Create an add a new `MediaCodecInfo` object for a `MediaCodecList`, and
+ * return a `MediaCodecInfoWriter` object associated with the newly added
+ * `MediaCodecInfo`.
+ *
+ * @return The `MediaCodecInfoWriter` object associated with the newly
+ * added `MediaCodecInfo` object.
+ */
+ std::unique_ptr<MediaCodecInfoWriter> addMediaCodecInfo();
+private:
+ MediaCodecListWriter() = default;
+
+ void writeGlobalSettings(const sp<AMessage> &globalSettings) const;
+ void writeCodecInfos(std::vector<sp<MediaCodecInfo>> *codecInfos) const;
+
+ std::vector<std::pair<std::string, std::string>> mGlobalSettings;
+ std::vector<sp<MediaCodecInfo>> mCodecInfos;
+
+ friend struct MediaCodecList;
+};
+
+/**
+ * This interface is to be used by `MediaCodecList` to fill its members with
+ * appropriate information. `buildMediaCodecList()` will be called from a
+ * `MediaCodecList` object during its construction.
+ */
+struct MediaCodecListBuilderBase {
+ /**
+ * Build the `MediaCodecList` via the given `MediaCodecListWriter` interface.
+ *
+ * @param writer The writer interface.
+ * @return The status of the construction. `NO_ERROR` means success.
+ */
+ virtual status_t buildMediaCodecList(MediaCodecListWriter* writer) = 0;
+
+ /**
+ * The default destructor does nothing.
+ */
+ virtual ~MediaCodecListBuilderBase() = default;
+
+ typedef MediaCodecListBuilderBase *(*CreateBuilderFunc)(void);
+};
+
+} // namespace android
+
+#endif // MEDIA_CODEC_LIST_WRITER_H_
+
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecSource.h b/media/libstagefright/include/media/stagefright/MediaCodecSource.h
index 3ac539e..a68cc19 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecSource.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecSource.h
@@ -17,10 +17,10 @@
#ifndef MediaCodecSource_H_
#define MediaCodecSource_H_
+#include <media/MediaSource.h>
#include <media/stagefright/foundation/ABase.h>
#include <media/stagefright/foundation/AHandlerReflector.h>
#include <media/stagefright/foundation/Mutexed.h>
-#include <media/stagefright/MediaSource.h>
#include <media/stagefright/PersistentSurface.h>
namespace android {
@@ -30,7 +30,6 @@
struct AReplyToken;
class IGraphicBufferProducer;
struct MediaCodec;
-class MetaData;
struct MediaCodecSource : public MediaSource,
public MediaBufferObserver {
@@ -58,13 +57,13 @@
virtual status_t pause(MetaData *params);
virtual sp<MetaData> getFormat();
virtual status_t read(
- MediaBuffer **buffer,
+ MediaBufferBase **buffer,
const ReadOptions *options = NULL);
virtual status_t setStopTimeUs(int64_t stopTimeUs);
// MediaBufferObserver
- virtual void signalBufferReturned(MediaBuffer *buffer);
+ virtual void signalBufferReturned(MediaBufferBase *buffer);
// for AHandlerReflector
void onMessageReceived(const sp<AMessage> &msg);
@@ -137,7 +136,7 @@
sp<AMessage> mEncoderActivityNotify;
sp<IGraphicBufferProducer> mGraphicBufferProducer;
sp<PersistentSurface> mPersistentSurface;
- List<MediaBuffer *> mInputBufferQueue;
+ List<MediaBufferBase *> mInputBufferQueue;
List<size_t> mAvailEncoderInputIndices;
List<int64_t> mDecodingTimeQueue; // decoding time (us) for video
int64_t mInputBufferTimeOffsetUs;
@@ -150,7 +149,7 @@
struct Output {
Output();
- List<MediaBuffer*> mBufferQueue;
+ List<MediaBufferBase*> mBufferQueue;
bool mEncoderReachedEOS;
status_t mErrorCode;
Condition mCond;
diff --git a/media/libstagefright/include/media/stagefright/MediaDefs.h b/media/libstagefright/include/media/stagefright/MediaDefs.h
index 359fb69..d20c5da 100644
--- a/media/libstagefright/include/media/stagefright/MediaDefs.h
+++ b/media/libstagefright/include/media/stagefright/MediaDefs.h
@@ -26,6 +26,6 @@
*
*/
-#include <media/MediaDefs.h>
+#include <media/stagefright/foundation/MediaDefs.h>
#endif // STAGEFRIGHT_MEDIA_DEFS_H_
diff --git a/media/libstagefright/include/media/stagefright/MediaExtractor.h b/media/libstagefright/include/media/stagefright/MediaExtractor.h
deleted file mode 100644
index 6ec7eaf..0000000
--- a/media/libstagefright/include/media/stagefright/MediaExtractor.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_EXTRACTOR_H_
-
-#define MEDIA_EXTRACTOR_H_
-
-#include <media/IMediaExtractor.h>
-#include <media/IMediaSource.h>
-#include <media/MediaAnalyticsItem.h>
-
-namespace android {
-class DataSource;
-struct MediaSource;
-class MetaData;
-
-class MediaExtractor : public BnMediaExtractor {
-public:
- static sp<IMediaExtractor> Create(
- const sp<DataSource> &source, const char *mime = NULL);
- static sp<MediaExtractor> CreateFromService(
- const sp<DataSource> &source, const char *mime = NULL);
-
- virtual size_t countTracks() = 0;
- virtual sp<IMediaSource> getTrack(size_t index) = 0;
-
- enum GetTrackMetaDataFlags {
- kIncludeExtensiveMetaData = 1
- };
- virtual sp<MetaData> getTrackMetaData(
- size_t index, uint32_t flags = 0) = 0;
-
- // Return container specific meta-data. The default implementation
- // returns an empty metadata object.
- virtual sp<MetaData> getMetaData();
-
- status_t getMetrics(Parcel *reply);
-
- enum Flags {
- CAN_SEEK_BACKWARD = 1, // the "seek 10secs back button"
- CAN_SEEK_FORWARD = 2, // the "seek 10secs forward button"
- CAN_PAUSE = 4,
- CAN_SEEK = 8, // the "seek bar"
- };
-
- // If subclasses do _not_ override this, the default is
- // CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE
- virtual uint32_t flags() const;
-
- // for DRM
- virtual char* getDrmTrackInfo(size_t /*trackID*/, int * /*len*/) {
- return NULL;
- }
- virtual void setUID(uid_t /*uid*/) {
- }
- virtual status_t setMediaCas(const HInterfaceToken &/*casToken*/) override {
- return INVALID_OPERATION;
- }
-
- virtual const char * name() { return "<unspecified>"; }
-
- virtual void release() {}
-
-protected:
- MediaExtractor();
- virtual ~MediaExtractor();
-
- MediaAnalyticsItem *mAnalyticsItem;
-
- virtual void populateMetrics();
-
-private:
-
- typedef bool (*SnifferFunc)(
- const sp<DataSource> &source, String8 *mimeType,
- float *confidence, sp<AMessage> *meta);
-
- static Mutex gSnifferMutex;
- static List<SnifferFunc> gSniffers;
- static bool gSniffersRegistered;
-
- // The sniffer can optionally fill in "meta" with an AMessage containing
- // a dictionary of values that helps the corresponding extractor initialize
- // its state without duplicating effort already exerted by the sniffer.
- static void RegisterSniffer_l(SnifferFunc func);
-
- static bool sniff(const sp<DataSource> &source,
- String8 *mimeType, float *confidence, sp<AMessage> *meta);
-
- static void RegisterDefaultSniffers();
-
- MediaExtractor(const MediaExtractor &);
- MediaExtractor &operator=(const MediaExtractor &);
-};
-
-} // namespace android
-
-#endif // MEDIA_EXTRACTOR_H_
diff --git a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
new file mode 100644
index 0000000..fb9f5bd
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_EXTRACTOR_FACTORY_H_
+
+#define MEDIA_EXTRACTOR_FACTORY_H_
+
+#include <stdio.h>
+
+#include <media/IMediaExtractor.h>
+#include <media/MediaExtractor.h>
+#include <utils/List.h>
+
+namespace android {
+
+class DataSource;
+struct ExtractorPlugin;
+
+class MediaExtractorFactory {
+public:
+ static sp<IMediaExtractor> Create(
+ const sp<DataSource> &source, const char *mime = NULL);
+ static sp<IMediaExtractor> CreateFromService(
+ const sp<DataSource> &source, const char *mime = NULL);
+ static void LoadPlugins(const ::std::string& apkPath);
+ static status_t dump(int fd, const Vector<String16>& args);
+
+private:
+ static Mutex gPluginMutex;
+ static std::shared_ptr<List<sp<ExtractorPlugin>>> gPlugins;
+ static bool gPluginsRegistered;
+
+ static void RegisterExtractorsInApk(
+ const char *apkPath, List<sp<ExtractorPlugin>> &pluginList);
+ static void RegisterExtractorsInSystem(
+ const char *libDirPath, List<sp<ExtractorPlugin>> &pluginList);
+ static void RegisterExtractor(
+ const sp<ExtractorPlugin> &plugin, List<sp<ExtractorPlugin>> &pluginList);
+
+ static MediaExtractor::CreatorFunc sniff(DataSourceBase *source,
+ float *confidence, void **meta, MediaExtractor::FreeMetaFunc *freeMeta,
+ sp<ExtractorPlugin> &plugin);
+
+ static void UpdateExtractors(const char *newUpdateApkPath);
+};
+
+} // namespace android
+
+#endif // MEDIA_EXTRACTOR_FACTORY_H_
diff --git a/media/libstagefright/include/media/stagefright/MediaFilter.h b/media/libstagefright/include/media/stagefright/MediaFilter.h
index 0c10d11..a28c49d 100644
--- a/media/libstagefright/include/media/stagefright/MediaFilter.h
+++ b/media/libstagefright/include/media/stagefright/MediaFilter.h
@@ -57,7 +57,7 @@
OWNED_BY_UPSTREAM,
};
- IOMX::buffer_id mBufferID;
+ uint32_t mBufferID;
int32_t mGeneration;
int32_t mOutputFlags;
Status mStatus;
@@ -121,7 +121,7 @@
status_t allocateBuffersOnPort(OMX_U32 portIndex);
BufferInfo *findBufferByID(
- uint32_t portIndex, IOMX::buffer_id bufferID,
+ uint32_t portIndex, uint32_t bufferID,
ssize_t *index = NULL);
void postFillThisBuffer(BufferInfo *info);
void postDrainThisBuffer(BufferInfo *info);
diff --git a/media/libstagefright/include/media/stagefright/MediaHTTP.h b/media/libstagefright/include/media/stagefright/MediaHTTP.h
index 006d8d8..fe0e613 100644
--- a/media/libstagefright/include/media/stagefright/MediaHTTP.h
+++ b/media/libstagefright/include/media/stagefright/MediaHTTP.h
@@ -24,10 +24,10 @@
namespace android {
-struct IMediaHTTPConnection;
+struct MediaHTTPConnection;
struct MediaHTTP : public HTTPBase {
- MediaHTTP(const sp<IMediaHTTPConnection> &conn);
+ MediaHTTP(const sp<MediaHTTPConnection> &conn);
virtual status_t connect(
const char *uri,
@@ -50,13 +50,12 @@
virtual ~MediaHTTP();
virtual sp<DecryptHandle> DrmInitialization(const char* mime);
- virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client);
virtual String8 getUri();
virtual String8 getMIMEType() const;
private:
status_t mInitCheck;
- sp<IMediaHTTPConnection> mHTTPConnection;
+ sp<MediaHTTPConnection> mHTTPConnection;
KeyedVector<String8, String8> mLastHeaders;
AString mLastURI;
diff --git a/media/libstagefright/include/media/stagefright/MediaMuxer.h b/media/libstagefright/include/media/stagefright/MediaMuxer.h
index 63c3ca5..66f4d72 100644
--- a/media/libstagefright/include/media/stagefright/MediaMuxer.h
+++ b/media/libstagefright/include/media/stagefright/MediaMuxer.h
@@ -48,6 +48,7 @@
OUTPUT_FORMAT_MPEG_4 = 0,
OUTPUT_FORMAT_WEBM = 1,
OUTPUT_FORMAT_THREE_GPP = 2,
+ OUTPUT_FORMAT_HEIF = 3,
OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
};
diff --git a/media/libstagefright/include/media/stagefright/MediaSource.h b/media/libstagefright/include/media/stagefright/MediaSource.h
deleted file mode 100644
index 14adb05..0000000
--- a/media/libstagefright/include/media/stagefright/MediaSource.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_SOURCE_H_
-
-#define MEDIA_SOURCE_H_
-
-#include <sys/types.h>
-
-#include <media/IMediaSource.h>
-#include <media/stagefright/MediaErrors.h>
-#include <utils/RefBase.h>
-#include <utils/Vector.h>
-
-namespace android {
-
-class MediaBuffer;
-class MetaData;
-
-struct MediaSource : public BnMediaSource {
- MediaSource();
-
- // To be called before any other methods on this object, except
- // getFormat().
- virtual status_t start(MetaData *params = NULL) = 0;
-
- // Any blocking read call returns immediately with a result of NO_INIT.
- // It is an error to call any methods other than start after this call
- // returns. Any buffers the object may be holding onto at the time of
- // the stop() call are released.
- // Also, it is imperative that any buffers output by this object and
- // held onto by callers be released before a call to stop() !!!
- virtual status_t stop() = 0;
-
- // Returns the format of the data output by this media source.
- virtual sp<MetaData> getFormat() = 0;
-
- // Returns a new buffer of data. Call blocks until a
- // buffer is available, an error is encountered of the end of the stream
- // is reached.
- // End of stream is signalled by a result of ERROR_END_OF_STREAM.
- // A result of INFO_FORMAT_CHANGED indicates that the format of this
- // MediaSource has changed mid-stream, the client can continue reading
- // but should be prepared for buffers of the new configuration.
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL) = 0;
-
- // Causes this source to suspend pulling data from its upstream source
- // until a subsequent read-with-seek. This is currently not supported
- // as such by any source. E.g. MediaCodecSource does not suspend its
- // upstream source, and instead discard upstream data while paused.
- virtual status_t pause() {
- return ERROR_UNSUPPORTED;
- }
-
- // The consumer of this media source requests that the given buffers
- // are to be returned exclusively in response to read calls.
- // This will be called after a successful start() and before the
- // first read() call.
- // Callee assumes ownership of the buffers if no error is returned.
- virtual status_t setBuffers(const Vector<MediaBuffer *> & /* buffers */) {
- return ERROR_UNSUPPORTED;
- }
-
- // The consumer of this media source requests the source stops sending
- // buffers with timestamp larger than or equal to stopTimeUs. stopTimeUs
- // must be in the same time base as the startTime passed in start(). If
- // source does not support this request, ERROR_UNSUPPORTED will be returned.
- // If stopTimeUs is invalid, BAD_VALUE will be returned. This could be
- // called at any time even before source starts and it could be called
- // multiple times. Setting stopTimeUs to be -1 will effectively cancel the stopTimeUs
- // set previously. If stopTimeUs is larger than or equal to last buffer's timestamp,
- // source will start to drop buffer when it gets a buffer with timestamp larger
- // than or equal to stopTimeUs. If stopTimeUs is smaller than or equal to last
- // buffer's timestamp, source will drop all the incoming buffers immediately.
- // After setting stopTimeUs, source may still stop sending buffers with timestamp
- // less than stopTimeUs if it is stopped by the consumer.
- virtual status_t setStopTimeUs(int64_t /* stopTimeUs */) {
- return ERROR_UNSUPPORTED;
- }
-
-protected:
- virtual ~MediaSource();
-
-private:
- MediaSource(const MediaSource &);
- MediaSource &operator=(const MediaSource &);
-};
-
-} // namespace android
-
-#endif // MEDIA_SOURCE_H_
diff --git a/media/libstagefright/include/media/stagefright/MediaWriter.h b/media/libstagefright/include/media/stagefright/MediaWriter.h
index cd4af4d..2c12a87 100644
--- a/media/libstagefright/include/media/stagefright/MediaWriter.h
+++ b/media/libstagefright/include/media/stagefright/MediaWriter.h
@@ -19,20 +19,18 @@
#define MEDIA_WRITER_H_
#include <utils/RefBase.h>
+#include <media/MediaSource.h>
#include <media/IMediaRecorderClient.h>
-#include <media/IMediaSource.h>
namespace android {
-class MetaData;
-
struct MediaWriter : public RefBase {
MediaWriter()
: mMaxFileSizeLimitBytes(0),
mMaxFileDurationLimitUs(0) {
}
- virtual status_t addSource(const sp<IMediaSource> &source) = 0;
+ virtual status_t addSource(const sp<MediaSource> &source) = 0;
virtual bool reachedEOS() = 0;
virtual status_t start(MetaData *params = NULL) = 0;
virtual status_t stop() = 0;
diff --git a/media/libstagefright/include/media/stagefright/MetaData.h b/media/libstagefright/include/media/stagefright/MetaData.h
deleted file mode 100644
index 6cfde9c..0000000
--- a/media/libstagefright/include/media/stagefright/MetaData.h
+++ /dev/null
@@ -1,335 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef META_DATA_H_
-
-#define META_DATA_H_
-
-#include <sys/types.h>
-
-#include <stdint.h>
-
-#include <binder/Parcel.h>
-#include <utils/RefBase.h>
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-
-namespace android {
-
-// The following keys map to int32_t data unless indicated otherwise.
-enum {
- kKeyMIMEType = 'mime', // cstring
- kKeyWidth = 'widt', // int32_t, image pixel
- kKeyHeight = 'heig', // int32_t, image pixel
- kKeyDisplayWidth = 'dWid', // int32_t, display/presentation
- kKeyDisplayHeight = 'dHgt', // int32_t, display/presentation
- kKeySARWidth = 'sarW', // int32_t, sampleAspectRatio width
- kKeySARHeight = 'sarH', // int32_t, sampleAspectRatio height
- kKeyThumbnailWidth = 'thbW', // int32_t, thumbnail width
- kKeyThumbnailHeight = 'thbH', // int32_t, thumbnail height
-
- // a rectangle, if absent assumed to be (0, 0, width - 1, height - 1)
- kKeyCropRect = 'crop',
-
- kKeyRotation = 'rotA', // int32_t (angle in degrees)
- kKeyIFramesInterval = 'ifiv', // int32_t
- kKeyStride = 'strd', // int32_t
- kKeySliceHeight = 'slht', // int32_t
- kKeyChannelCount = '#chn', // int32_t
- kKeyChannelMask = 'chnm', // int32_t
- kKeySampleRate = 'srte', // int32_t (audio sampling rate Hz)
- kKeyPcmEncoding = 'PCMe', // int32_t (audio encoding enum)
- kKeyFrameRate = 'frmR', // int32_t (video frame rate fps)
- kKeyBitRate = 'brte', // int32_t (bps)
- kKeyMaxBitRate = 'mxBr', // int32_t (bps)
- kKeyStreamHeader = 'stHd', // raw data
- kKeyESDS = 'esds', // raw data
- kKeyAACProfile = 'aacp', // int32_t
- kKeyAVCC = 'avcc', // raw data
- kKeyHVCC = 'hvcc', // raw data
- kKeyThumbnailHVCC = 'thvc', // raw data
- kKeyD263 = 'd263', // raw data
- kKeyVorbisInfo = 'vinf', // raw data
- kKeyVorbisBooks = 'vboo', // raw data
- kKeyOpusHeader = 'ohdr', // raw data
- kKeyOpusCodecDelay = 'ocod', // uint64_t (codec delay in ns)
- kKeyOpusSeekPreRoll = 'ospr', // uint64_t (seek preroll in ns)
- kKeyFlacMetadata = 'flMd', // raw data
- kKeyVp9CodecPrivate = 'vp9p', // raw data (vp9 csd information)
- kKeyWantsNALFragments = 'NALf',
- kKeyIsSyncFrame = 'sync', // int32_t (bool)
- kKeyIsCodecConfig = 'conf', // int32_t (bool)
- kKeyTime = 'time', // int64_t (usecs)
- kKeyDecodingTime = 'decT', // int64_t (decoding timestamp in usecs)
- kKeyNTPTime = 'ntpT', // uint64_t (ntp-timestamp)
- kKeyTargetTime = 'tarT', // int64_t (usecs)
- kKeyDriftTime = 'dftT', // int64_t (usecs)
- kKeyAnchorTime = 'ancT', // int64_t (usecs)
- kKeyDuration = 'dura', // int64_t (usecs)
- kKeyPixelFormat = 'pixf', // int32_t
- kKeyColorFormat = 'colf', // int32_t
- kKeyColorSpace = 'cols', // int32_t
- kKeyPlatformPrivate = 'priv', // pointer
- kKeyDecoderComponent = 'decC', // cstring
- kKeyBufferID = 'bfID',
- kKeyMaxInputSize = 'inpS',
- kKeyMaxWidth = 'maxW',
- kKeyMaxHeight = 'maxH',
- kKeyThumbnailTime = 'thbT', // int64_t (usecs)
- kKeyTrackID = 'trID',
- kKeyIsDRM = 'idrm', // int32_t (bool)
- kKeyEncoderDelay = 'encd', // int32_t (frames)
- kKeyEncoderPadding = 'encp', // int32_t (frames)
-
- kKeyAlbum = 'albu', // cstring
- kKeyArtist = 'arti', // cstring
- kKeyAlbumArtist = 'aart', // cstring
- kKeyComposer = 'comp', // cstring
- kKeyGenre = 'genr', // cstring
- kKeyTitle = 'titl', // cstring
- kKeyYear = 'year', // cstring
- kKeyAlbumArt = 'albA', // compressed image data
- kKeyAlbumArtMIME = 'alAM', // cstring
- kKeyAuthor = 'auth', // cstring
- kKeyCDTrackNumber = 'cdtr', // cstring
- kKeyDiscNumber = 'dnum', // cstring
- kKeyDate = 'date', // cstring
- kKeyWriter = 'writ', // cstring
- kKeyCompilation = 'cpil', // cstring
- kKeyLocation = 'loc ', // cstring
- kKeyTimeScale = 'tmsl', // int32_t
- kKeyCaptureFramerate = 'capF', // float (capture fps)
-
- // video profile and level
- kKeyVideoProfile = 'vprf', // int32_t
- kKeyVideoLevel = 'vlev', // int32_t
-
- // Set this key to enable authoring files in 64-bit offset
- kKey64BitFileOffset = 'fobt', // int32_t (bool)
- kKey2ByteNalLength = '2NAL', // int32_t (bool)
-
- // Identify the file output format for authoring
- // Please see <media/mediarecorder.h> for the supported
- // file output formats.
- kKeyFileType = 'ftyp', // int32_t
-
- // Track authoring progress status
- // kKeyTrackTimeStatus is used to track progress in elapsed time
- kKeyTrackTimeStatus = 'tktm', // int64_t
-
- kKeyRealTimeRecording = 'rtrc', // bool (int32_t)
- kKeyNumBuffers = 'nbbf', // int32_t
-
- // Ogg files can be tagged to be automatically looping...
- kKeyAutoLoop = 'autL', // bool (int32_t)
-
- kKeyValidSamples = 'valD', // int32_t
-
- kKeyIsUnreadable = 'unre', // bool (int32_t)
-
- // An indication that a video buffer has been rendered.
- kKeyRendered = 'rend', // bool (int32_t)
-
- // The language code for this media
- kKeyMediaLanguage = 'lang', // cstring
-
- // To store the timed text format data
- kKeyTextFormatData = 'text', // raw data
-
- kKeyRequiresSecureBuffers = 'secu', // bool (int32_t)
-
- kKeyIsADTS = 'adts', // bool (int32_t)
- kKeyAACAOT = 'aaot', // int32_t
-
- // If a MediaBuffer's data represents (at least partially) encrypted
- // data, the following fields aid in decryption.
- // The data can be thought of as pairs of plain and encrypted data
- // fragments, i.e. plain and encrypted data alternate.
- // The first fragment is by convention plain data (if that's not the
- // case, simply specify plain fragment size of 0).
- // kKeyEncryptedSizes and kKeyPlainSizes each map to an array of
- // size_t values. The sum total of all size_t values of both arrays
- // must equal the amount of data (i.e. MediaBuffer's range_length()).
- // If both arrays are present, they must be of the same size.
- // If only encrypted sizes are present it is assumed that all
- // plain sizes are 0, i.e. all fragments are encrypted.
- // To programmatically set these array, use the MetaData::setData API, i.e.
- // const size_t encSizes[];
- // meta->setData(
- // kKeyEncryptedSizes, 0 /* type */, encSizes, sizeof(encSizes));
- // A plain sizes array by itself makes no sense.
- kKeyEncryptedSizes = 'encr', // size_t[]
- kKeyPlainSizes = 'plai', // size_t[]
- kKeyCryptoKey = 'cryK', // uint8_t[16]
- kKeyCryptoIV = 'cryI', // uint8_t[16]
- kKeyCryptoMode = 'cryM', // int32_t
-
- kKeyCryptoDefaultIVSize = 'cryS', // int32_t
-
- kKeyPssh = 'pssh', // raw data
- kKeyCASystemID = 'caid', // int32_t
- kKeyCASessionID = 'seid', // raw data
-
- // Please see MediaFormat.KEY_IS_AUTOSELECT.
- kKeyTrackIsAutoselect = 'auto', // bool (int32_t)
- // Please see MediaFormat.KEY_IS_DEFAULT.
- kKeyTrackIsDefault = 'dflt', // bool (int32_t)
- // Similar to MediaFormat.KEY_IS_FORCED_SUBTITLE but pertains to av tracks as well.
- kKeyTrackIsForced = 'frcd', // bool (int32_t)
-
- // H264 supplemental enhancement information offsets/sizes
- kKeySEI = 'sei ', // raw data
-
- // MPEG user data offsets
- kKeyMpegUserData = 'mpud', // size_t[]
-
- // Size of NALU length in mkv/mp4
- kKeyNalLengthSize = 'nals', // int32_t
-
- // HDR related
- kKeyHdrStaticInfo = 'hdrS', // HDRStaticInfo
-
- // color aspects
- kKeyColorRange = 'cRng', // int32_t, color range, value defined by ColorAspects.Range
- kKeyColorPrimaries = 'cPrm', // int32_t,
- // color Primaries, value defined by ColorAspects.Primaries
- kKeyTransferFunction = 'tFun', // int32_t,
- // transfer Function, value defined by ColorAspects.Transfer.
- kKeyColorMatrix = 'cMtx', // int32_t,
- // color Matrix, value defined by ColorAspects.MatrixCoeffs.
- kKeyTemporalLayerId = 'iLyr', // int32_t, temporal layer-id. 0-based (0 => base layer)
- kKeyTemporalLayerCount = 'cLyr', // int32_t, number of temporal layers encoded
-
- kKeyGridWidth = 'grdW', // int32_t, HEIF grid width
- kKeyGridHeight = 'grdH', // int32_t, HEIF grid height
- kKeyIccProfile = 'prof', // raw data, ICC prifile data
-};
-
-enum {
- kTypeESDS = 'esds',
- kTypeAVCC = 'avcc',
- kTypeHVCC = 'hvcc',
- kTypeD263 = 'd263',
-};
-
-class MetaData : public RefBase {
-public:
- MetaData();
- MetaData(const MetaData &from);
-
- enum Type {
- TYPE_NONE = 'none',
- TYPE_C_STRING = 'cstr',
- TYPE_INT32 = 'in32',
- TYPE_INT64 = 'in64',
- TYPE_FLOAT = 'floa',
- TYPE_POINTER = 'ptr ',
- TYPE_RECT = 'rect',
- };
-
- void clear();
- bool remove(uint32_t key);
-
- bool setCString(uint32_t key, const char *value);
- bool setInt32(uint32_t key, int32_t value);
- bool setInt64(uint32_t key, int64_t value);
- bool setFloat(uint32_t key, float value);
- bool setPointer(uint32_t key, void *value);
-
- bool setRect(
- uint32_t key,
- int32_t left, int32_t top,
- int32_t right, int32_t bottom);
-
- bool findCString(uint32_t key, const char **value);
- bool findInt32(uint32_t key, int32_t *value);
- bool findInt64(uint32_t key, int64_t *value);
- bool findFloat(uint32_t key, float *value);
- bool findPointer(uint32_t key, void **value);
-
- bool findRect(
- uint32_t key,
- int32_t *left, int32_t *top,
- int32_t *right, int32_t *bottom);
-
- bool setData(uint32_t key, uint32_t type, const void *data, size_t size);
-
- bool findData(uint32_t key, uint32_t *type,
- const void **data, size_t *size) const;
-
- bool hasData(uint32_t key) const;
-
- String8 toString() const;
- void dumpToLog() const;
-
- status_t writeToParcel(Parcel &parcel);
- status_t updateFromParcel(const Parcel &parcel);
- static sp<MetaData> createFromParcel(const Parcel &parcel);
-
-protected:
- virtual ~MetaData();
-
-private:
- struct typed_data {
- typed_data();
- ~typed_data();
-
- typed_data(const MetaData::typed_data &);
- typed_data &operator=(const MetaData::typed_data &);
-
- void clear();
- void setData(uint32_t type, const void *data, size_t size);
- void getData(uint32_t *type, const void **data, size_t *size) const;
- // may include hexdump of binary data if verbose=true
- String8 asString(bool verbose) const;
-
- private:
- uint32_t mType;
- size_t mSize;
-
- union {
- void *ext_data;
- float reservoir;
- } u;
-
- bool usesReservoir() const {
- return mSize <= sizeof(u.reservoir);
- }
-
- void *allocateStorage(size_t size);
- void freeStorage();
-
- void *storage() {
- return usesReservoir() ? &u.reservoir : u.ext_data;
- }
-
- const void *storage() const {
- return usesReservoir() ? &u.reservoir : u.ext_data;
- }
- };
-
- struct Rect {
- int32_t mLeft, mTop, mRight, mBottom;
- };
-
- KeyedVector<uint32_t, typed_data> mItems;
-
- // MetaData &operator=(const MetaData &);
-};
-
-} // namespace android
-
-#endif // META_DATA_H_
diff --git a/media/libstagefright/include/media/stagefright/MetaData.h b/media/libstagefright/include/media/stagefright/MetaData.h
new file mode 120000
index 0000000..160f8d3
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MetaData.h
@@ -0,0 +1 @@
+../../../../libmediaextractor/include/media/stagefright/MetaData.h
\ No newline at end of file
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
new file mode 120000
index 0000000..1e12193
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -0,0 +1 @@
+../../../../libmediaextractor/include/media/stagefright/MetaDataBase.h
\ No newline at end of file
diff --git a/media/libstagefright/include/media/stagefright/MetaDataUtils.h b/media/libstagefright/include/media/stagefright/MetaDataUtils.h
new file mode 100644
index 0000000..d5a8080
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/MetaDataUtils.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef META_DATA_UTILS_H_
+
+#define META_DATA_UTILS_H_
+
+#include <media/stagefright/MetaData.h>
+
+namespace android {
+
+struct ABuffer;
+bool MakeAVCCodecSpecificData(MetaDataBase &meta, const uint8_t *data, size_t size);
+bool MakeAACCodecSpecificData(MetaDataBase &meta, unsigned profile, unsigned sampling_freq_index,
+ unsigned channel_configuration);
+
+} // namespace android
+
+#endif // META_DATA_UTILS_H_
diff --git a/media/libstagefright/include/media/stagefright/NdkUtils.h b/media/libstagefright/include/media/stagefright/NdkUtils.h
new file mode 100644
index 0000000..a68884a
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/NdkUtils.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NDK_UTILS_H_
+
+#define NDK_UTILS_H_
+
+#include <media/stagefright/MetaData.h>
+#include <media/NdkWrapper.h>
+
+namespace android {
+
+sp<MetaData> convertMediaFormatWrapperToMetaData(
+ const sp<AMediaFormatWrapper> &fmt);
+
+} // namespace android
+
+#endif // NDK_UTILS_H_
diff --git a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
index 6a93bd5..54a7095 100644
--- a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
@@ -17,9 +17,11 @@
#ifndef NU_MEDIA_EXTRACTOR_H_
#define NU_MEDIA_EXTRACTOR_H_
+#include <list>
+#include <media/mediaplayer.h>
#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/MediaSource.h>
#include <media/IMediaExtractor.h>
+#include <media/MediaSource.h>
#include <utils/Errors.h>
#include <utils/KeyedVector.h>
#include <utils/RefBase.h>
@@ -32,7 +34,7 @@
struct ABuffer;
struct AMessage;
class DataSource;
-struct IMediaHTTPService;
+struct MediaHTTPService;
class MediaBuffer;
class MediaExtractor;
struct MediaSource;
@@ -52,7 +54,7 @@
NuMediaExtractor();
status_t setDataSource(
- const sp<IMediaHTTPService> &httpService,
+ const sp<MediaHTTPService> &httpService,
const char *path,
const KeyedVector<String8, String8> *headers = NULL);
@@ -62,12 +64,18 @@
status_t setMediaCas(const HInterfaceToken &casToken);
+ void disconnect();
+
size_t countTracks() const;
status_t getTrackFormat(size_t index, sp<AMessage> *format, uint32_t flags = 0) const;
status_t getFileFormat(sp<AMessage> *format) const;
- status_t selectTrack(size_t index);
+ status_t getExifOffsetSize(off64_t *offset, size_t *size) const;
+
+ status_t selectTrack(size_t index, int64_t startTimeUs = -1ll,
+ MediaSource::ReadOptions::SeekMode mode =
+ MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
status_t unselectTrack(size_t index);
status_t seekTo(
@@ -75,8 +83,13 @@
MediaSource::ReadOptions::SeekMode mode =
MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
+ // Each selected track has a read pointer.
+ // advance() advances the read pointer with the lowest timestamp.
status_t advance();
+ // readSampleData() reads the sample with the lowest timestamp.
status_t readSampleData(const sp<ABuffer> &buffer);
+
+ status_t getSampleSize(size_t *sampleSize);
status_t getSampleTrackIndex(size_t *trackIndex);
status_t getSampleTime(int64_t *sampleTimeUs);
status_t getSampleMeta(sp<MetaData> *sampleMeta);
@@ -96,12 +109,20 @@
kMaxTrackCount = 16384,
};
+ struct Sample {
+ Sample();
+ Sample(MediaBufferBase *buffer, int64_t timeUs);
+ MediaBufferBase *mBuffer;
+ int64_t mSampleTimeUs;
+ };
+
struct TrackInfo {
sp<IMediaSource> mSource;
size_t mTrackIndex;
+ media_track_type mTrackType;
+ size_t mMaxFetchCount;
status_t mFinalResult;
- MediaBuffer *mSample;
- int64_t mSampleTimeUs;
+ std::list<Sample> mSamples;
uint32_t mTrackFlags; // bitmask of "TrackFlags"
};
@@ -117,16 +138,23 @@
int64_t mTotalBitrate; // in bits/sec
int64_t mDurationUs;
- ssize_t fetchTrackSamples(
+ ssize_t fetchAllTrackSamples(
+ int64_t seekTimeUs = -1ll,
+ MediaSource::ReadOptions::SeekMode mode =
+ MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
+ void fetchTrackSamples(
+ TrackInfo *info,
int64_t seekTimeUs = -1ll,
MediaSource::ReadOptions::SeekMode mode =
MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
- void releaseTrackSamples();
+ void releaseOneSample(TrackInfo *info);
+ void releaseTrackSamples(TrackInfo *info);
+ void releaseAllTrackSamples();
bool getTotalBitrate(int64_t *bitRate) const;
status_t updateDurationAndBitrate();
- status_t appendVorbisNumPageSamples(TrackInfo *info, const sp<ABuffer> &buffer);
+ status_t appendVorbisNumPageSamples(MediaBufferBase *mbuf, const sp<ABuffer> &buffer);
DISALLOW_EVIL_CONSTRUCTORS(NuMediaExtractor);
};
diff --git a/media/libstagefright/include/media/stagefright/OMXClient.h b/media/libstagefright/include/media/stagefright/OMXClient.h
index 2f159b0..bb133d3 100644
--- a/media/libstagefright/include/media/stagefright/OMXClient.h
+++ b/media/libstagefright/include/media/stagefright/OMXClient.h
@@ -27,16 +27,10 @@
OMXClient();
status_t connect();
- status_t connect(bool* trebleFlag);
- status_t connect(const char* name, bool* trebleFlag = nullptr);
-
- status_t connectLegacy();
- status_t connectTreble(const char* name = "default");
+ status_t connect(const char* name);
void disconnect();
- sp<IOMX> interface() {
- return mOMX;
- }
+ sp<IOMX> interface();
private:
sp<IOMX> mOMX;
diff --git a/media/libstagefright/include/media/stagefright/OmxInfoBuilder.h b/media/libstagefright/include/media/stagefright/OmxInfoBuilder.h
index 1b4d873..28f6094 100644
--- a/media/libstagefright/include/media/stagefright/OmxInfoBuilder.h
+++ b/media/libstagefright/include/media/stagefright/OmxInfoBuilder.h
@@ -25,6 +25,7 @@
class OmxInfoBuilder : public MediaCodecListBuilderBase {
public:
OmxInfoBuilder();
+ ~OmxInfoBuilder() override = default;
status_t buildMediaCodecList(MediaCodecListWriter* writer) override;
};
diff --git a/media/libstagefright/include/media/stagefright/PersistentSurface.h b/media/libstagefright/include/media/stagefright/PersistentSurface.h
index d8b75a2..49b36c9 100644
--- a/media/libstagefright/include/media/stagefright/PersistentSurface.h
+++ b/media/libstagefright/include/media/stagefright/PersistentSurface.h
@@ -18,22 +18,34 @@
#define PERSISTENT_SURFACE_H_
-#include <gui/IGraphicBufferProducer.h>
#include <android/IGraphicBufferSource.h>
-#include <media/stagefright/foundation/ABase.h>
#include <binder/Parcel.h>
+#include <hidl/HidlSupport.h>
+#include <hidl/HybridInterface.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <media/stagefright/foundation/ABase.h>
+
+using android::hidl::base::V1_0::IBase;
namespace android {
struct PersistentSurface : public RefBase {
PersistentSurface() {}
+ // create an OMX persistent surface
PersistentSurface(
const sp<IGraphicBufferProducer>& bufferProducer,
const sp<IGraphicBufferSource>& bufferSource) :
mBufferProducer(bufferProducer),
mBufferSource(bufferSource) { }
+ // create a HIDL persistent surface
+ PersistentSurface(
+ const sp<IGraphicBufferProducer>& bufferProducer,
+ const sp<IBase>& hidlTarget) :
+ mBufferProducer(bufferProducer),
+ mHidlTarget(hidlTarget) { }
+
sp<IGraphicBufferProducer> getBufferProducer() const {
return mBufferProducer;
}
@@ -42,9 +54,25 @@
return mBufferSource;
}
+ sp<IBase> getHidlTarget() const {
+ return mHidlTarget;
+ }
+
status_t writeToParcel(Parcel *parcel) const {
parcel->writeStrongBinder(IInterface::asBinder(mBufferProducer));
+ // this can handle null
parcel->writeStrongBinder(IInterface::asBinder(mBufferSource));
+ // write hidl target
+ if (mHidlTarget != nullptr) {
+ HalToken token;
+ bool result = createHalToken(mHidlTarget, &token);
+ parcel->writeBool(result);
+ if (result) {
+ parcel->writeByteArray(token.size(), token.data());
+ }
+ } else {
+ parcel->writeBool(false);
+ }
return NO_ERROR;
}
@@ -53,12 +81,24 @@
parcel->readStrongBinder());
mBufferSource = interface_cast<IGraphicBufferSource>(
parcel->readStrongBinder());
+ // read hidl target
+ bool haveHidlTarget = parcel->readBool();
+ if (haveHidlTarget) {
+ std::vector<uint8_t> tokenVector;
+ parcel->readByteVector(&tokenVector);
+ HalToken token = HalToken(tokenVector);
+ mHidlTarget = retrieveHalInterface(token);
+ deleteHalToken(token);
+ } else {
+ mHidlTarget.clear();
+ }
return NO_ERROR;
}
private:
sp<IGraphicBufferProducer> mBufferProducer;
sp<IGraphicBufferSource> mBufferSource;
+ sp<IBase> mHidlTarget;
DISALLOW_EVIL_CONSTRUCTORS(PersistentSurface);
};
diff --git a/media/libstagefright/include/media/stagefright/RemoteDataSource.h b/media/libstagefright/include/media/stagefright/RemoteDataSource.h
index c91ddfc..e191e6a 100644
--- a/media/libstagefright/include/media/stagefright/RemoteDataSource.h
+++ b/media/libstagefright/include/media/stagefright/RemoteDataSource.h
@@ -19,8 +19,8 @@
#include <binder/IMemory.h>
#include <binder/MemoryDealer.h>
+#include <media/DataSource.h>
#include <media/IDataSource.h>
-#include <media/stagefright/DataSource.h>
namespace android {
diff --git a/media/libstagefright/include/media/stagefright/RemoteMediaExtractor.h b/media/libstagefright/include/media/stagefright/RemoteMediaExtractor.h
new file mode 100644
index 0000000..509e669
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/RemoteMediaExtractor.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef REMOTE_MEDIA_EXTRACTOR_H_
+#define REMOTE_MEDIA_EXTRACTOR_H_
+
+#include <media/IMediaExtractor.h>
+#include <media/MediaExtractor.h>
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+class MediaAnalyticsItem;
+
+// IMediaExtractor wrapper to the MediaExtractor.
+class RemoteMediaExtractor : public BnMediaExtractor {
+public:
+ static sp<IMediaExtractor> wrap(
+ MediaExtractor *extractor,
+ const sp<DataSource> &source,
+ const sp<RefBase> &plugin);
+
+ virtual ~RemoteMediaExtractor();
+ virtual size_t countTracks();
+ virtual sp<IMediaSource> getTrack(size_t index);
+ virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags = 0);
+ virtual sp<MetaData> getMetaData();
+ virtual status_t getMetrics(Parcel *reply);
+ virtual uint32_t flags() const;
+ virtual status_t setMediaCas(const HInterfaceToken &casToken);
+ virtual const char * name();
+
+private:
+ MediaExtractor *mExtractor;
+ sp<DataSource> mSource;
+ sp<RefBase> mExtractorPlugin;
+
+ MediaAnalyticsItem *mAnalyticsItem;
+
+ explicit RemoteMediaExtractor(
+ MediaExtractor *extractor,
+ const sp<DataSource> &source,
+ const sp<RefBase> &plugin);
+
+ DISALLOW_EVIL_CONSTRUCTORS(RemoteMediaExtractor);
+};
+
+} // namespace android
+
+#endif // REMOTE_MEDIA_EXTRACTOR_H_
diff --git a/media/libstagefright/include/media/stagefright/RemoteMediaSource.h b/media/libstagefright/include/media/stagefright/RemoteMediaSource.h
new file mode 100644
index 0000000..1d720af
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/RemoteMediaSource.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef REMOTE_MEDIA_SOURCE_H_
+#define REMOTE_MEDIA_SOURCE_H_
+
+#include <media/IMediaSource.h>
+#include <media/MediaSource.h>
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+// IMediaSrouce wrapper to the MediaSource.
+class RemoteMediaSource : public BnMediaSource {
+public:
+ static sp<IMediaSource> wrap(
+ const sp<RemoteMediaExtractor> &extractor,
+ MediaTrack *source,
+ const sp<RefBase> &plugin);
+ virtual ~RemoteMediaSource();
+ virtual status_t start(MetaData *params = NULL);
+ virtual status_t stop();
+ virtual sp<MetaData> getFormat();
+ virtual status_t read(
+ MediaBufferBase **buffer,
+ const MediaSource::ReadOptions *options = NULL);
+ virtual status_t pause();
+ virtual status_t setStopTimeUs(int64_t stopTimeUs);
+
+private:
+ sp<RemoteMediaExtractor> mExtractor;
+ MediaTrack *mSource;
+ sp<RefBase> mExtractorPlugin;
+
+ explicit RemoteMediaSource(
+ const sp<RemoteMediaExtractor> &extractor,
+ MediaTrack *source,
+ const sp<RefBase> &plugin);
+
+ DISALLOW_EVIL_CONSTRUCTORS(RemoteMediaSource);
+};
+
+} // namespace android
+
+#endif // REMOTE_MEDIA_SOURCE_H_
diff --git a/media/libstagefright/include/media/stagefright/SimpleDecodingSource.h b/media/libstagefright/include/media/stagefright/SimpleDecodingSource.h
index a000fde..23defb4 100644
--- a/media/libstagefright/include/media/stagefright/SimpleDecodingSource.h
+++ b/media/libstagefright/include/media/stagefright/SimpleDecodingSource.h
@@ -17,7 +17,7 @@
#ifndef SIMPLE_DECODING_SOURCE_H_
#define SIMPLE_DECODING_SOURCE_H_
-#include <media/stagefright/MediaSource.h>
+#include <media/MediaSource.h>
#include <media/stagefright/foundation/AString.h>
#include <media/stagefright/foundation/Mutexed.h>
@@ -45,12 +45,13 @@
// does not support secure input or pausing.
// if |desiredCodec| is given, use this specific codec.
static sp<SimpleDecodingSource> Create(
- const sp<IMediaSource> &source, uint32_t flags,
+ const sp<MediaSource> &source, uint32_t flags,
const sp<ANativeWindow> &nativeWindow,
- const char *desiredCodec = NULL);
+ const char *desiredCodec = NULL,
+ bool skipMediaCodecList = false);
static sp<SimpleDecodingSource> Create(
- const sp<IMediaSource> &source, uint32_t flags = 0);
+ const sp<MediaSource> &source, uint32_t flags = 0);
virtual ~SimpleDecodingSource();
@@ -64,20 +65,19 @@
virtual sp<MetaData> getFormat();
// reads from the source. This call always blocks.
- virtual status_t read(MediaBuffer **buffer, const ReadOptions *options);
+ virtual status_t read(MediaBufferBase **buffer, const ReadOptions *options);
// unsupported methods
virtual status_t pause() { return INVALID_OPERATION; }
- virtual status_t setBuffers(const Vector<MediaBuffer *> &) { return INVALID_OPERATION; }
private:
// Construct this using a codec, source and looper.
SimpleDecodingSource(
- const sp<MediaCodec> &codec, const sp<IMediaSource> &source, const sp<ALooper> &looper,
+ const sp<MediaCodec> &codec, const sp<MediaSource> &source, const sp<ALooper> &looper,
bool usingSurface, bool isVorbis, const sp<AMessage> &format);
sp<MediaCodec> mCodec;
- sp<IMediaSource> mSource;
+ sp<MediaSource> mSource;
sp<ALooper> mLooper;
bool mUsingSurface;
bool mIsVorbis;
@@ -104,7 +104,8 @@
// do the actual reading
status_t doRead(
- Mutexed<ProtectedState>::Locked &me, MediaBuffer **buffer, const ReadOptions *options);
+ Mutexed<ProtectedState>::Locked &me, MediaBufferBase **buffer,
+ const ReadOptions *options);
};
} // namespace android
diff --git a/media/libstagefright/include/media/stagefright/SurfaceMediaSource.h b/media/libstagefright/include/media/stagefright/SurfaceMediaSource.h
deleted file mode 100644
index d1677fa..0000000
--- a/media/libstagefright/include/media/stagefright/SurfaceMediaSource.h
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_GUI_SURFACEMEDIASOURCE_H
-#define ANDROID_GUI_SURFACEMEDIASOURCE_H
-
-#include <gui/IGraphicBufferProducer.h>
-#include <gui/BufferQueue.h>
-
-#include <utils/threads.h>
-#include <utils/Vector.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MediaBuffer.h>
-
-#include <media/hardware/MetadataBufferType.h>
-
-#include "foundation/ABase.h"
-
-namespace android {
-// ----------------------------------------------------------------------------
-
-class String8;
-class GraphicBuffer;
-
-// ASSUMPTIONS
-// 1. SurfaceMediaSource is initialized with width*height which
-// can never change. However, deqeueue buffer does not currently
-// enforce this as in BufferQueue, dequeue can be used by Surface
-// which can modify the default width and heght. Also neither the width
-// nor height can be 0.
-// 2. setSynchronousMode is never used (basically no one should call
-// setSynchronousMode(false)
-// 3. setCrop, setTransform, setScalingMode should never be used
-// 4. queueBuffer returns a filled buffer to the SurfaceMediaSource. In addition, a
-// timestamp must be provided for the buffer. The timestamp is in
-// nanoseconds, and must be monotonically increasing. Its other semantics
-// (zero point, etc) are client-dependent and should be documented by the
-// client.
-// 5. Once disconnected, SurfaceMediaSource can be reused (can not
-// connect again)
-// 6. Stop is a hard stop, the last few frames held by the encoder
-// may be dropped. It is possible to wait for the buffers to be
-// returned (but not implemented)
-
-#define DEBUG_PENDING_BUFFERS 0
-
-class SurfaceMediaSource : public MediaSource,
- public MediaBufferObserver,
- protected ConsumerListener {
-public:
- enum { MIN_UNDEQUEUED_BUFFERS = 4};
-
- struct FrameAvailableListener : public virtual RefBase {
- // onFrameAvailable() is called from queueBuffer() is the FIFO is
- // empty. You can use SurfaceMediaSource::getQueuedCount() to
- // figure out if there are more frames waiting.
- // This is called without any lock held can be called concurrently by
- // multiple threads.
- virtual void onFrameAvailable() = 0;
- };
-
- SurfaceMediaSource(uint32_t bufferWidth, uint32_t bufferHeight);
-
- virtual ~SurfaceMediaSource();
-
- // For the MediaSource interface for use by StageFrightRecorder:
- virtual status_t start(MetaData *params = NULL);
- virtual status_t stop();
- virtual status_t read(MediaBuffer **buffer,
- const ReadOptions *options = NULL);
- virtual sp<MetaData> getFormat();
-
- // Get / Set the frame rate used for encoding. Default fps = 30
- status_t setFrameRate(int32_t fps) ;
- int32_t getFrameRate( ) const;
-
- // The call for the StageFrightRecorder to tell us that
- // it is done using the MediaBuffer data so that its state
- // can be set to FREE for dequeuing
- virtual void signalBufferReturned(MediaBuffer* buffer);
- // end of MediaSource interface
-
- // getTimestamp retrieves the timestamp associated with the image
- // set by the most recent call to read()
- //
- // The timestamp is in nanoseconds, and is monotonically increasing. Its
- // other semantics (zero point, etc) are source-dependent and should be
- // documented by the source.
- int64_t getTimestamp();
-
- // setFrameAvailableListener sets the listener object that will be notified
- // when a new frame becomes available.
- void setFrameAvailableListener(const sp<FrameAvailableListener>& listener);
-
- // dump our state in a String
- void dumpState(String8& result) const;
- void dumpState(String8& result, const char* prefix, char* buffer,
- size_t SIZE) const;
-
- // metaDataStoredInVideoBuffers tells the encoder what kind of metadata
- // is passed through the buffers. Currently, it is set to ANWBuffer
- MetadataBufferType metaDataStoredInVideoBuffers() const;
-
- sp<IGraphicBufferProducer> getProducer() const { return mProducer; }
-
- // To be called before start()
- status_t setMaxAcquiredBufferCount(size_t count);
-
- // To be called before start()
- status_t setUseAbsoluteTimestamps();
-
-protected:
-
- // Implementation of the BufferQueue::ConsumerListener interface. These
- // calls are used to notify the Surface of asynchronous events in the
- // BufferQueue.
- virtual void onFrameAvailable(const BufferItem& item);
-
- // Used as a hook to BufferQueue::disconnect()
- // This is called by the client side when it is done
- // TODO: Currently, this also sets mStopped to true which
- // is needed for unblocking the encoder which might be
- // waiting to read more frames. So if on the client side,
- // the same thread supplies the frames and also calls stop
- // on the encoder, the client has to call disconnect before
- // it calls stop.
- // In the case of the camera,
- // that need not be required since the thread supplying the
- // frames is separate than the one calling stop.
- virtual void onBuffersReleased();
-
- // SurfaceMediaSource can't handle sideband streams, so this is not expected
- // to ever be called. Does nothing.
- virtual void onSidebandStreamChanged();
-
- static bool isExternalFormat(uint32_t format);
-
-private:
- // A BufferQueue, represented by these interfaces, is the exchange point
- // between the producer and this consumer
- sp<IGraphicBufferProducer> mProducer;
- sp<IGraphicBufferConsumer> mConsumer;
-
- struct SlotData {
- sp<GraphicBuffer> mGraphicBuffer;
- uint64_t mFrameNumber;
- };
-
- // mSlots caches GraphicBuffers and frameNumbers from the buffer queue
- SlotData mSlots[BufferQueue::NUM_BUFFER_SLOTS];
-
- // The permenent width and height of SMS buffers
- int mWidth;
- int mHeight;
-
- // mCurrentSlot is the buffer slot index of the buffer that is currently
- // being used by buffer consumer
- // (e.g. StageFrightRecorder in the case of SurfaceMediaSource or GLTexture
- // in the case of Surface).
- // It is initialized to INVALID_BUFFER_SLOT,
- // indicating that no buffer slot is currently bound to the texture. Note,
- // however, that a value of INVALID_BUFFER_SLOT does not necessarily mean
- // that no buffer is bound to the texture. A call to setBufferCount will
- // reset mCurrentTexture to INVALID_BUFFER_SLOT.
- int mCurrentSlot;
-
- // mCurrentBuffers is a list of the graphic buffers that are being used by
- // buffer consumer (i.e. the video encoder). It's possible that these
- // buffers are not associated with any buffer slots, so we must track them
- // separately. Buffers are added to this list in read, and removed from
- // this list in signalBufferReturned
- Vector<sp<GraphicBuffer> > mCurrentBuffers;
-
- size_t mNumPendingBuffers;
-
-#if DEBUG_PENDING_BUFFERS
- Vector<MediaBuffer *> mPendingBuffers;
-#endif
-
- // mCurrentTimestamp is the timestamp for the current texture. It
- // gets set to mLastQueuedTimestamp each time updateTexImage is called.
- int64_t mCurrentTimestamp;
-
- // mFrameAvailableListener is the listener object that will be called when a
- // new frame becomes available. If it is not NULL it will be called from
- // queueBuffer.
- sp<FrameAvailableListener> mFrameAvailableListener;
-
- // mMutex is the mutex used to prevent concurrent access to the member
- // variables of SurfaceMediaSource objects. It must be locked whenever the
- // member variables are accessed.
- mutable Mutex mMutex;
-
- ////////////////////////// For MediaSource
- // Set to a default of 30 fps if not specified by the client side
- int32_t mFrameRate;
-
- // mStarted is a flag to check if the recording is going on
- bool mStarted;
-
- // mNumFramesReceived indicates the number of frames recieved from
- // the client side
- int mNumFramesReceived;
- // mNumFramesEncoded indicates the number of frames passed on to the
- // encoder
- int mNumFramesEncoded;
-
- // mFirstFrameTimestamp is the timestamp of the first received frame.
- // It is used to offset the output timestamps so recording starts at time 0.
- int64_t mFirstFrameTimestamp;
- // mStartTimeNs is the start time passed into the source at start, used to
- // offset timestamps.
- int64_t mStartTimeNs;
-
- size_t mMaxAcquiredBufferCount;
-
- bool mUseAbsoluteTimestamps;
-
- // mFrameAvailableCondition condition used to indicate whether there
- // is a frame available for dequeuing
- Condition mFrameAvailableCondition;
-
- Condition mMediaBuffersAvailableCondition;
-
- // Allocate and return a new MediaBuffer and pass the ANW buffer as metadata into it.
- void passMetadataBuffer_l(MediaBuffer **buffer, ANativeWindowBuffer *bufferHandle) const;
-
- // Avoid copying and equating and default constructor
- DISALLOW_EVIL_CONSTRUCTORS(SurfaceMediaSource);
-};
-
-// ----------------------------------------------------------------------------
-}; // namespace android
-
-#endif // ANDROID_GUI_SURFACEMEDIASOURCE_H
diff --git a/media/libstagefright/include/media/stagefright/SurfaceUtils.h b/media/libstagefright/include/media/stagefright/SurfaceUtils.h
index a7747c7..689e458 100644
--- a/media/libstagefright/include/media/stagefright/SurfaceUtils.h
+++ b/media/libstagefright/include/media/stagefright/SurfaceUtils.h
@@ -24,6 +24,8 @@
namespace android {
+struct HDRStaticInfo;
+
/**
* Configures |nativeWindow| for given |width|x|height|, pixel |format|, |rotation| and |usage|.
* If |reconnect| is true, reconnects to the native window before hand.
@@ -32,6 +34,8 @@
status_t setNativeWindowSizeFormatAndUsage(
ANativeWindow *nativeWindow /* nonnull */,
int width, int height, int format, int rotation, int usage, bool reconnect);
+void setNativeWindowHdrMetadata(
+ ANativeWindow *nativeWindow /* nonnull */, HDRStaticInfo *info /* nonnull */);
status_t pushBlankBuffersToNativeWindow(ANativeWindow *nativeWindow /* nonnull */);
status_t nativeWindowConnect(ANativeWindow *surface, const char *reason);
status_t nativeWindowDisconnect(ANativeWindow *surface, const char *reason);
diff --git a/media/libstagefright/include/media/stagefright/Utils.h b/media/libstagefright/include/media/stagefright/Utils.h
index 77cbd4c..6a28e0b 100644
--- a/media/libstagefright/include/media/stagefright/Utils.h
+++ b/media/libstagefright/include/media/stagefright/Utils.h
@@ -28,21 +28,6 @@
namespace android {
-#define FOURCC(c1, c2, c3, c4) \
- ((c1) << 24 | (c2) << 16 | (c3) << 8 | (c4))
-
-uint16_t U16_AT(const uint8_t *ptr);
-uint32_t U32_AT(const uint8_t *ptr);
-uint64_t U64_AT(const uint8_t *ptr);
-
-uint16_t U16LE_AT(const uint8_t *ptr);
-uint32_t U32LE_AT(const uint8_t *ptr);
-uint64_t U64LE_AT(const uint8_t *ptr);
-
-uint64_t ntoh64(uint64_t x);
-uint64_t hton64(uint64_t x);
-
-class MetaData;
struct AMessage;
status_t convertMetaDataToMessage(
const sp<MetaData> &meta, sp<AMessage> *format);
@@ -95,7 +80,6 @@
void readFromAMessage(const sp<AMessage> &msg, BufferingSettings *buffering /* nonnull */);
AString nameForFd(int fd);
-void MakeFourCCString(uint32_t x, char *s);
} // namespace android
#endif // UTILS_H_
diff --git a/media/libstagefright/matroska/Android.bp b/media/libstagefright/matroska/Android.bp
deleted file mode 100644
index ec2fb4b..0000000
--- a/media/libstagefright/matroska/Android.bp
+++ /dev/null
@@ -1,35 +0,0 @@
-cc_library_static {
- name: "libstagefright_matroska",
-
- srcs: ["MatroskaExtractor.cpp"],
-
- include_dirs: [
- "external/flac/include",
- "external/libvpx/libwebm",
- "frameworks/native/include/media/openmax",
- "frameworks/av/media/libstagefright/flac/dec",
- "frameworks/av/media/libstagefright/include",
- ],
-
- cflags: [
- "-Wno-multichar",
- "-Werror",
- "-Wall",
- ],
-
- sanitize: {
- misc_undefined: [
- "signed-integer-overflow",
- "unsigned-integer-overflow",
- ],
- cfi: true,
- diag: {
- cfi: true,
- },
- },
-
- shared_libs: [
- "libmedia",
- "libstagefright_flacdec"
- ],
-}
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
deleted file mode 100644
index 462eff6..0000000
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ /dev/null
@@ -1,1538 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MatroskaExtractor"
-#include <utils/Log.h>
-
-#include "FLACDecoder.h"
-#include "MatroskaExtractor.h"
-#include "avc_utils.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AUtils.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ColorUtils.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-#include <utils/String8.h>
-
-#include <inttypes.h>
-
-namespace android {
-
-struct DataSourceReader : public mkvparser::IMkvReader {
- explicit DataSourceReader(const sp<DataSource> &source)
- : mSource(source) {
- }
-
- virtual int Read(long long position, long length, unsigned char* buffer) {
- CHECK(position >= 0);
- CHECK(length >= 0);
-
- if (length == 0) {
- return 0;
- }
-
- ssize_t n = mSource->readAt(position, buffer, length);
-
- if (n <= 0) {
- return -1;
- }
-
- return 0;
- }
-
- virtual int Length(long long* total, long long* available) {
- off64_t size;
- if (mSource->getSize(&size) != OK) {
- *total = -1;
- *available = (long long)((1ull << 63) - 1);
-
- return 0;
- }
-
- if (total) {
- *total = size;
- }
-
- if (available) {
- *available = size;
- }
-
- return 0;
- }
-
-private:
- sp<DataSource> mSource;
-
- DataSourceReader(const DataSourceReader &);
- DataSourceReader &operator=(const DataSourceReader &);
-};
-
-////////////////////////////////////////////////////////////////////////////////
-
-struct BlockIterator {
- BlockIterator(MatroskaExtractor *extractor, unsigned long trackNum, unsigned long index);
-
- bool eos() const;
-
- void advance();
- void reset();
-
- void seek(
- int64_t seekTimeUs, bool isAudio,
- int64_t *actualFrameTimeUs);
-
- const mkvparser::Block *block() const;
- int64_t blockTimeUs() const;
-
-private:
- MatroskaExtractor *mExtractor;
- long long mTrackNum;
- unsigned long mIndex;
-
- const mkvparser::Cluster *mCluster;
- const mkvparser::BlockEntry *mBlockEntry;
- long mBlockEntryIndex;
-
- void advance_l();
-
- BlockIterator(const BlockIterator &);
- BlockIterator &operator=(const BlockIterator &);
-};
-
-struct MatroskaSource : public MediaSource {
- MatroskaSource(
- const sp<MatroskaExtractor> &extractor, size_t index);
-
- virtual status_t start(MetaData *params);
- virtual status_t stop();
-
- virtual sp<MetaData> getFormat();
-
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options);
-
-protected:
- virtual ~MatroskaSource();
-
-private:
- enum Type {
- AVC,
- AAC,
- HEVC,
- OTHER
- };
-
- sp<MatroskaExtractor> mExtractor;
- size_t mTrackIndex;
- Type mType;
- bool mIsAudio;
- BlockIterator mBlockIter;
- ssize_t mNALSizeLen; // for type AVC or HEVC
-
- List<MediaBuffer *> mPendingFrames;
-
- status_t advance();
-
- status_t setWebmBlockCryptoInfo(MediaBuffer *mbuf);
- status_t readBlock();
- void clearPendingFrames();
-
- MatroskaSource(const MatroskaSource &);
- MatroskaSource &operator=(const MatroskaSource &);
-};
-
-const mkvparser::Track* MatroskaExtractor::TrackInfo::getTrack() const {
- return mExtractor->mSegment->GetTracks()->GetTrackByNumber(mTrackNum);
-}
-
-// This function does exactly the same as mkvparser::Cues::Find, except that it
-// searches in our own track based vectors. We should not need this once mkvparser
-// adds the same functionality.
-const mkvparser::CuePoint::TrackPosition *MatroskaExtractor::TrackInfo::find(
- long long timeNs) const {
- ALOGV("mCuePoints.size %zu", mCuePoints.size());
- if (mCuePoints.empty()) {
- return NULL;
- }
-
- const mkvparser::CuePoint* cp = mCuePoints.itemAt(0);
- const mkvparser::Track* track = getTrack();
- if (timeNs <= cp->GetTime(mExtractor->mSegment)) {
- return cp->Find(track);
- }
-
- // Binary searches through relevant cues; assumes cues are ordered by timecode.
- // If we do detect out-of-order cues, return NULL.
- size_t lo = 0;
- size_t hi = mCuePoints.size();
- while (lo < hi) {
- const size_t mid = lo + (hi - lo) / 2;
- const mkvparser::CuePoint* const midCp = mCuePoints.itemAt(mid);
- const long long cueTimeNs = midCp->GetTime(mExtractor->mSegment);
- if (cueTimeNs <= timeNs) {
- lo = mid + 1;
- } else {
- hi = mid;
- }
- }
-
- if (lo == 0) {
- return NULL;
- }
-
- cp = mCuePoints.itemAt(lo - 1);
- if (cp->GetTime(mExtractor->mSegment) > timeNs) {
- return NULL;
- }
-
- return cp->Find(track);
-}
-
-MatroskaSource::MatroskaSource(
- const sp<MatroskaExtractor> &extractor, size_t index)
- : mExtractor(extractor),
- mTrackIndex(index),
- mType(OTHER),
- mIsAudio(false),
- mBlockIter(mExtractor.get(),
- mExtractor->mTracks.itemAt(index).mTrackNum,
- index),
- mNALSizeLen(-1) {
- sp<MetaData> meta = mExtractor->mTracks.itemAt(index).mMeta;
-
- const char *mime;
- CHECK(meta->findCString(kKeyMIMEType, &mime));
-
- mIsAudio = !strncasecmp("audio/", mime, 6);
-
- if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
- mType = AVC;
-
- uint32_t dummy;
- const uint8_t *avcc;
- size_t avccSize;
- int32_t nalSizeLen = 0;
- if (meta->findInt32(kKeyNalLengthSize, &nalSizeLen)) {
- if (nalSizeLen >= 0 && nalSizeLen <= 4) {
- mNALSizeLen = nalSizeLen;
- }
- } else if (meta->findData(kKeyAVCC, &dummy, (const void **)&avcc, &avccSize)
- && avccSize >= 5u) {
- mNALSizeLen = 1 + (avcc[4] & 3);
- ALOGV("mNALSizeLen = %zd", mNALSizeLen);
- } else {
- ALOGE("No mNALSizeLen");
- }
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
- mType = HEVC;
-
- uint32_t dummy;
- const uint8_t *hvcc;
- size_t hvccSize;
- if (meta->findData(kKeyHVCC, &dummy, (const void **)&hvcc, &hvccSize)
- && hvccSize >= 22u) {
- mNALSizeLen = 1 + (hvcc[14+7] & 3);
- ALOGV("mNALSizeLen = %zu", mNALSizeLen);
- } else {
- ALOGE("No mNALSizeLen");
- }
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
- mType = AAC;
- }
-}
-
-MatroskaSource::~MatroskaSource() {
- clearPendingFrames();
-}
-
-status_t MatroskaSource::start(MetaData * /* params */) {
- if (mType == AVC && mNALSizeLen < 0) {
- return ERROR_MALFORMED;
- }
-
- mBlockIter.reset();
-
- return OK;
-}
-
-status_t MatroskaSource::stop() {
- clearPendingFrames();
-
- return OK;
-}
-
-sp<MetaData> MatroskaSource::getFormat() {
- return mExtractor->mTracks.itemAt(mTrackIndex).mMeta;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-BlockIterator::BlockIterator(
- MatroskaExtractor *extractor, unsigned long trackNum, unsigned long index)
- : mExtractor(extractor),
- mTrackNum(trackNum),
- mIndex(index),
- mCluster(NULL),
- mBlockEntry(NULL),
- mBlockEntryIndex(0) {
- reset();
-}
-
-bool BlockIterator::eos() const {
- return mCluster == NULL || mCluster->EOS();
-}
-
-void BlockIterator::advance() {
- Mutex::Autolock autoLock(mExtractor->mLock);
- advance_l();
-}
-
-void BlockIterator::advance_l() {
- for (;;) {
- long res = mCluster->GetEntry(mBlockEntryIndex, mBlockEntry);
- ALOGV("GetEntry returned %ld", res);
-
- long long pos;
- long len;
- if (res < 0) {
- // Need to parse this cluster some more
-
- CHECK_EQ(res, mkvparser::E_BUFFER_NOT_FULL);
-
- res = mCluster->Parse(pos, len);
- ALOGV("Parse returned %ld", res);
-
- if (res < 0) {
- // I/O error
-
- ALOGE("Cluster::Parse returned result %ld", res);
-
- mCluster = NULL;
- break;
- }
-
- continue;
- } else if (res == 0) {
- // We're done with this cluster
-
- const mkvparser::Cluster *nextCluster;
- res = mExtractor->mSegment->ParseNext(
- mCluster, nextCluster, pos, len);
- ALOGV("ParseNext returned %ld", res);
-
- if (res != 0) {
- // EOF or error
-
- mCluster = NULL;
- break;
- }
-
- CHECK_EQ(res, 0);
- CHECK(nextCluster != NULL);
- CHECK(!nextCluster->EOS());
-
- mCluster = nextCluster;
-
- res = mCluster->Parse(pos, len);
- ALOGV("Parse (2) returned %ld", res);
- CHECK_GE(res, 0);
-
- mBlockEntryIndex = 0;
- continue;
- }
-
- CHECK(mBlockEntry != NULL);
- CHECK(mBlockEntry->GetBlock() != NULL);
- ++mBlockEntryIndex;
-
- if (mBlockEntry->GetBlock()->GetTrackNumber() == mTrackNum) {
- break;
- }
- }
-}
-
-void BlockIterator::reset() {
- Mutex::Autolock autoLock(mExtractor->mLock);
-
- mCluster = mExtractor->mSegment->GetFirst();
- mBlockEntry = NULL;
- mBlockEntryIndex = 0;
-
- do {
- advance_l();
- } while (!eos() && block()->GetTrackNumber() != mTrackNum);
-}
-
-void BlockIterator::seek(
- int64_t seekTimeUs, bool isAudio,
- int64_t *actualFrameTimeUs) {
- Mutex::Autolock autoLock(mExtractor->mLock);
-
- *actualFrameTimeUs = -1ll;
-
- if (seekTimeUs > INT64_MAX / 1000ll ||
- seekTimeUs < INT64_MIN / 1000ll ||
- (mExtractor->mSeekPreRollNs > 0 &&
- (seekTimeUs * 1000ll) < INT64_MIN + mExtractor->mSeekPreRollNs) ||
- (mExtractor->mSeekPreRollNs < 0 &&
- (seekTimeUs * 1000ll) > INT64_MAX + mExtractor->mSeekPreRollNs)) {
- ALOGE("cannot seek to %lld", (long long) seekTimeUs);
- return;
- }
-
- const int64_t seekTimeNs = seekTimeUs * 1000ll - mExtractor->mSeekPreRollNs;
-
- mkvparser::Segment* const pSegment = mExtractor->mSegment;
-
- // Special case the 0 seek to avoid loading Cues when the application
- // extraneously seeks to 0 before playing.
- if (seekTimeNs <= 0) {
- ALOGV("Seek to beginning: %" PRId64, seekTimeUs);
- mCluster = pSegment->GetFirst();
- mBlockEntryIndex = 0;
- do {
- advance_l();
- } while (!eos() && block()->GetTrackNumber() != mTrackNum);
- return;
- }
-
- ALOGV("Seeking to: %" PRId64, seekTimeUs);
-
- // If the Cues have not been located then find them.
- const mkvparser::Cues* pCues = pSegment->GetCues();
- const mkvparser::SeekHead* pSH = pSegment->GetSeekHead();
- if (!pCues && pSH) {
- const size_t count = pSH->GetCount();
- const mkvparser::SeekHead::Entry* pEntry;
- ALOGV("No Cues yet");
-
- for (size_t index = 0; index < count; index++) {
- pEntry = pSH->GetEntry(index);
-
- if (pEntry->id == 0x0C53BB6B) { // Cues ID
- long len; long long pos;
- pSegment->ParseCues(pEntry->pos, pos, len);
- pCues = pSegment->GetCues();
- ALOGV("Cues found");
- break;
- }
- }
-
- if (!pCues) {
- ALOGE("No Cues in file");
- return;
- }
- }
- else if (!pSH) {
- ALOGE("No SeekHead");
- return;
- }
-
- const mkvparser::CuePoint* pCP;
- mkvparser::Tracks const *pTracks = pSegment->GetTracks();
- while (!pCues->DoneParsing()) {
- pCues->LoadCuePoint();
- pCP = pCues->GetLast();
- CHECK(pCP);
-
- size_t trackCount = mExtractor->mTracks.size();
- for (size_t index = 0; index < trackCount; ++index) {
- MatroskaExtractor::TrackInfo& track = mExtractor->mTracks.editItemAt(index);
- const mkvparser::Track *pTrack = pTracks->GetTrackByNumber(track.mTrackNum);
- if (pTrack && pTrack->GetType() == 1 && pCP->Find(pTrack)) { // VIDEO_TRACK
- track.mCuePoints.push_back(pCP);
- }
- }
-
- if (pCP->GetTime(pSegment) >= seekTimeNs) {
- ALOGV("Parsed past relevant Cue");
- break;
- }
- }
-
- const mkvparser::CuePoint::TrackPosition *pTP = NULL;
- const mkvparser::Track *thisTrack = pTracks->GetTrackByNumber(mTrackNum);
- if (thisTrack->GetType() == 1) { // video
- MatroskaExtractor::TrackInfo& track = mExtractor->mTracks.editItemAt(mIndex);
- pTP = track.find(seekTimeNs);
- } else {
- // The Cue index is built around video keyframes
- unsigned long int trackCount = pTracks->GetTracksCount();
- for (size_t index = 0; index < trackCount; ++index) {
- const mkvparser::Track *pTrack = pTracks->GetTrackByIndex(index);
- if (pTrack && pTrack->GetType() == 1 && pCues->Find(seekTimeNs, pTrack, pCP, pTP)) {
- ALOGV("Video track located at %zu", index);
- break;
- }
- }
- }
-
-
- // Always *search* based on the video track, but finalize based on mTrackNum
- if (!pTP) {
- ALOGE("Did not locate the video track for seeking");
- return;
- }
-
- mCluster = pSegment->FindOrPreloadCluster(pTP->m_pos);
-
- CHECK(mCluster);
- CHECK(!mCluster->EOS());
-
- // mBlockEntryIndex starts at 0 but m_block starts at 1
- CHECK_GT(pTP->m_block, 0);
- mBlockEntryIndex = pTP->m_block - 1;
-
- for (;;) {
- advance_l();
-
- if (eos()) break;
-
- if (isAudio || block()->IsKey()) {
- // Accept the first key frame
- int64_t frameTimeUs = (block()->GetTime(mCluster) + 500LL) / 1000LL;
- if (thisTrack->GetType() == 1 || frameTimeUs >= seekTimeUs) {
- *actualFrameTimeUs = frameTimeUs;
- ALOGV("Requested seek point: %" PRId64 " actual: %" PRId64,
- seekTimeUs, *actualFrameTimeUs);
- break;
- }
- }
- }
-}
-
-const mkvparser::Block *BlockIterator::block() const {
- CHECK(!eos());
-
- return mBlockEntry->GetBlock();
-}
-
-int64_t BlockIterator::blockTimeUs() const {
- if (mCluster == NULL || mBlockEntry == NULL) {
- return -1;
- }
- return (mBlockEntry->GetBlock()->GetTime(mCluster) + 500ll) / 1000ll;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-static unsigned U24_AT(const uint8_t *ptr) {
- return ptr[0] << 16 | ptr[1] << 8 | ptr[2];
-}
-
-void MatroskaSource::clearPendingFrames() {
- while (!mPendingFrames.empty()) {
- MediaBuffer *frame = *mPendingFrames.begin();
- mPendingFrames.erase(mPendingFrames.begin());
-
- frame->release();
- frame = NULL;
- }
-}
-
-status_t MatroskaSource::setWebmBlockCryptoInfo(MediaBuffer *mbuf) {
- if (mbuf->range_length() < 1 || mbuf->range_length() - 1 > INT32_MAX) {
- // 1-byte signal
- return ERROR_MALFORMED;
- }
-
- const uint8_t *data = (const uint8_t *)mbuf->data() + mbuf->range_offset();
- bool blockEncrypted = data[0] & 0x1;
- if (blockEncrypted && mbuf->range_length() < 9) {
- // 1-byte signal + 8-byte IV
- return ERROR_MALFORMED;
- }
-
- sp<MetaData> meta = mbuf->meta_data();
- if (blockEncrypted) {
- /*
- * 0 1 2 3
- * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | Signal Byte | |
- * +-+-+-+-+-+-+-+-+ IV |
- * | |
- * | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | | |
- * |-+-+-+-+-+-+-+-+ |
- * : Bytes 1..N of encrypted frame :
- * | |
- * | |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- */
- int32_t plainSizes[] = { 0 };
- int32_t encryptedSizes[] = { static_cast<int32_t>(mbuf->range_length() - 9) };
- uint8_t ctrCounter[16] = { 0 };
- uint32_t type;
- const uint8_t *keyId;
- size_t keyIdSize;
- sp<MetaData> trackMeta = mExtractor->mTracks.itemAt(mTrackIndex).mMeta;
- CHECK(trackMeta->findData(kKeyCryptoKey, &type, (const void **)&keyId, &keyIdSize));
- meta->setData(kKeyCryptoKey, 0, keyId, keyIdSize);
- memcpy(ctrCounter, data + 1, 8);
- meta->setData(kKeyCryptoIV, 0, ctrCounter, 16);
- meta->setData(kKeyPlainSizes, 0, plainSizes, sizeof(plainSizes));
- meta->setData(kKeyEncryptedSizes, 0, encryptedSizes, sizeof(encryptedSizes));
- mbuf->set_range(9, mbuf->range_length() - 9);
- } else {
- /*
- * 0 1 2 3
- * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | Signal Byte | |
- * +-+-+-+-+-+-+-+-+ |
- * : Bytes 1..N of unencrypted frame :
- * | |
- * | |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- */
- int32_t plainSizes[] = { static_cast<int32_t>(mbuf->range_length() - 1) };
- int32_t encryptedSizes[] = { 0 };
- meta->setData(kKeyPlainSizes, 0, plainSizes, sizeof(plainSizes));
- meta->setData(kKeyEncryptedSizes, 0, encryptedSizes, sizeof(encryptedSizes));
- mbuf->set_range(1, mbuf->range_length() - 1);
- }
-
- return OK;
-}
-
-status_t MatroskaSource::readBlock() {
- CHECK(mPendingFrames.empty());
-
- if (mBlockIter.eos()) {
- return ERROR_END_OF_STREAM;
- }
-
- const mkvparser::Block *block = mBlockIter.block();
-
- int64_t timeUs = mBlockIter.blockTimeUs();
-
- for (int i = 0; i < block->GetFrameCount(); ++i) {
- MatroskaExtractor::TrackInfo *trackInfo = &mExtractor->mTracks.editItemAt(mTrackIndex);
- const mkvparser::Block::Frame &frame = block->GetFrame(i);
- size_t len = frame.len;
- if (SIZE_MAX - len < trackInfo->mHeaderLen) {
- return ERROR_MALFORMED;
- }
-
- len += trackInfo->mHeaderLen;
- MediaBuffer *mbuf = new MediaBuffer(len);
- uint8_t *data = static_cast<uint8_t *>(mbuf->data());
- if (trackInfo->mHeader) {
- memcpy(data, trackInfo->mHeader, trackInfo->mHeaderLen);
- }
-
- mbuf->meta_data()->setInt64(kKeyTime, timeUs);
- mbuf->meta_data()->setInt32(kKeyIsSyncFrame, block->IsKey());
-
- status_t err = frame.Read(mExtractor->mReader, data + trackInfo->mHeaderLen);
- if (err == OK
- && mExtractor->mIsWebm
- && trackInfo->mEncrypted) {
- err = setWebmBlockCryptoInfo(mbuf);
- }
-
- if (err != OK) {
- mPendingFrames.clear();
-
- mBlockIter.advance();
- mbuf->release();
- return err;
- }
-
- mPendingFrames.push_back(mbuf);
- }
-
- mBlockIter.advance();
-
- return OK;
-}
-
-status_t MatroskaSource::read(
- MediaBuffer **out, const ReadOptions *options) {
- *out = NULL;
-
- int64_t targetSampleTimeUs = -1ll;
-
- int64_t seekTimeUs;
- ReadOptions::SeekMode mode;
- if (options && options->getSeekTo(&seekTimeUs, &mode)
- && !mExtractor->isLiveStreaming()) {
- clearPendingFrames();
-
- // The audio we want is located by using the Cues to seek the video
- // stream to find the target Cluster then iterating to finalize for
- // audio.
- int64_t actualFrameTimeUs;
- mBlockIter.seek(seekTimeUs, mIsAudio, &actualFrameTimeUs);
-
- if (mode == ReadOptions::SEEK_CLOSEST) {
- targetSampleTimeUs = actualFrameTimeUs;
- }
- }
-
- while (mPendingFrames.empty()) {
- status_t err = readBlock();
-
- if (err != OK) {
- clearPendingFrames();
-
- return err;
- }
- }
-
- MediaBuffer *frame = *mPendingFrames.begin();
- mPendingFrames.erase(mPendingFrames.begin());
-
- if ((mType != AVC && mType != HEVC) || mNALSizeLen == 0) {
- if (targetSampleTimeUs >= 0ll) {
- frame->meta_data()->setInt64(
- kKeyTargetTime, targetSampleTimeUs);
- }
-
- *out = frame;
-
- return OK;
- }
-
- // Each input frame contains one or more NAL fragments, each fragment
- // is prefixed by mNALSizeLen bytes giving the fragment length,
- // followed by a corresponding number of bytes containing the fragment.
- // We output all these fragments into a single large buffer separated
- // by startcodes (0x00 0x00 0x00 0x01).
- //
- // When mNALSizeLen is 0, we assume the data is already in the format
- // desired.
-
- const uint8_t *srcPtr =
- (const uint8_t *)frame->data() + frame->range_offset();
-
- size_t srcSize = frame->range_length();
-
- size_t dstSize = 0;
- MediaBuffer *buffer = NULL;
- uint8_t *dstPtr = NULL;
-
- for (int32_t pass = 0; pass < 2; ++pass) {
- size_t srcOffset = 0;
- size_t dstOffset = 0;
- while (srcOffset + mNALSizeLen <= srcSize) {
- size_t NALsize;
- switch (mNALSizeLen) {
- case 1: NALsize = srcPtr[srcOffset]; break;
- case 2: NALsize = U16_AT(srcPtr + srcOffset); break;
- case 3: NALsize = U24_AT(srcPtr + srcOffset); break;
- case 4: NALsize = U32_AT(srcPtr + srcOffset); break;
- default:
- TRESPASS();
- }
-
- if (srcOffset + mNALSizeLen + NALsize <= srcOffset + mNALSizeLen) {
- frame->release();
- frame = NULL;
-
- return ERROR_MALFORMED;
- } else if (srcOffset + mNALSizeLen + NALsize > srcSize) {
- break;
- }
-
- if (pass == 1) {
- memcpy(&dstPtr[dstOffset], "\x00\x00\x00\x01", 4);
-
- if (frame != buffer) {
- memcpy(&dstPtr[dstOffset + 4],
- &srcPtr[srcOffset + mNALSizeLen],
- NALsize);
- }
- }
-
- dstOffset += 4; // 0x00 00 00 01
- dstOffset += NALsize;
-
- srcOffset += mNALSizeLen + NALsize;
- }
-
- if (srcOffset < srcSize) {
- // There were trailing bytes or not enough data to complete
- // a fragment.
-
- frame->release();
- frame = NULL;
-
- return ERROR_MALFORMED;
- }
-
- if (pass == 0) {
- dstSize = dstOffset;
-
- if (dstSize == srcSize && mNALSizeLen == 4) {
- // In this special case we can re-use the input buffer by substituting
- // each 4-byte nal size with a 4-byte start code
- buffer = frame;
- } else {
- buffer = new MediaBuffer(dstSize);
- }
-
- int64_t timeUs;
- CHECK(frame->meta_data()->findInt64(kKeyTime, &timeUs));
- int32_t isSync;
- CHECK(frame->meta_data()->findInt32(kKeyIsSyncFrame, &isSync));
-
- buffer->meta_data()->setInt64(kKeyTime, timeUs);
- buffer->meta_data()->setInt32(kKeyIsSyncFrame, isSync);
-
- dstPtr = (uint8_t *)buffer->data();
- }
- }
-
- if (frame != buffer) {
- frame->release();
- frame = NULL;
- }
-
- if (targetSampleTimeUs >= 0ll) {
- buffer->meta_data()->setInt64(
- kKeyTargetTime, targetSampleTimeUs);
- }
-
- *out = buffer;
-
- return OK;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-MatroskaExtractor::MatroskaExtractor(const sp<DataSource> &source)
- : mDataSource(source),
- mReader(new DataSourceReader(mDataSource)),
- mSegment(NULL),
- mExtractedThumbnails(false),
- mIsWebm(false),
- mSeekPreRollNs(0) {
- off64_t size;
- mIsLiveStreaming =
- (mDataSource->flags()
- & (DataSource::kWantsPrefetching
- | DataSource::kIsCachingDataSource))
- && mDataSource->getSize(&size) != OK;
-
- mkvparser::EBMLHeader ebmlHeader;
- long long pos;
- if (ebmlHeader.Parse(mReader, pos) < 0) {
- return;
- }
-
- if (ebmlHeader.m_docType && !strcmp("webm", ebmlHeader.m_docType)) {
- mIsWebm = true;
- }
-
- long long ret =
- mkvparser::Segment::CreateInstance(mReader, pos, mSegment);
-
- if (ret) {
- CHECK(mSegment == NULL);
- return;
- }
-
- // from mkvparser::Segment::Load(), but stop at first cluster
- ret = mSegment->ParseHeaders();
- if (ret == 0) {
- long len;
- ret = mSegment->LoadCluster(pos, len);
- if (ret >= 1) {
- // no more clusters
- ret = 0;
- }
- } else if (ret > 0) {
- ret = mkvparser::E_BUFFER_NOT_FULL;
- }
-
- if (ret < 0) {
- ALOGW("Corrupt %s source: %s", mIsWebm ? "webm" : "matroska",
- uriDebugString(mDataSource->getUri()).c_str());
- delete mSegment;
- mSegment = NULL;
- return;
- }
-
-#if 0
- const mkvparser::SegmentInfo *info = mSegment->GetInfo();
- ALOGI("muxing app: %s, writing app: %s",
- info->GetMuxingAppAsUTF8(),
- info->GetWritingAppAsUTF8());
-#endif
-
- addTracks();
-}
-
-MatroskaExtractor::~MatroskaExtractor() {
- delete mSegment;
- mSegment = NULL;
-
- delete mReader;
- mReader = NULL;
-}
-
-size_t MatroskaExtractor::countTracks() {
- return mTracks.size();
-}
-
-sp<IMediaSource> MatroskaExtractor::getTrack(size_t index) {
- if (index >= mTracks.size()) {
- return NULL;
- }
-
- return new MatroskaSource(this, index);
-}
-
-sp<MetaData> MatroskaExtractor::getTrackMetaData(
- size_t index, uint32_t flags) {
- if (index >= mTracks.size()) {
- return NULL;
- }
-
- if ((flags & kIncludeExtensiveMetaData) && !mExtractedThumbnails
- && !isLiveStreaming()) {
- findThumbnails();
- mExtractedThumbnails = true;
- }
-
- return mTracks.itemAt(index).mMeta;
-}
-
-bool MatroskaExtractor::isLiveStreaming() const {
- return mIsLiveStreaming;
-}
-
-static int bytesForSize(size_t size) {
- // use at most 28 bits (4 times 7)
- CHECK(size <= 0xfffffff);
-
- if (size > 0x1fffff) {
- return 4;
- } else if (size > 0x3fff) {
- return 3;
- } else if (size > 0x7f) {
- return 2;
- }
- return 1;
-}
-
-static void storeSize(uint8_t *data, size_t &idx, size_t size) {
- int numBytes = bytesForSize(size);
- idx += numBytes;
-
- data += idx;
- size_t next = 0;
- while (numBytes--) {
- *--data = (size & 0x7f) | next;
- size >>= 7;
- next = 0x80;
- }
-}
-
-static void addESDSFromCodecPrivate(
- const sp<MetaData> &meta,
- bool isAudio, const void *priv, size_t privSize) {
-
- int privSizeBytesRequired = bytesForSize(privSize);
- int esdsSize2 = 14 + privSizeBytesRequired + privSize;
- int esdsSize2BytesRequired = bytesForSize(esdsSize2);
- int esdsSize1 = 4 + esdsSize2BytesRequired + esdsSize2;
- int esdsSize1BytesRequired = bytesForSize(esdsSize1);
- size_t esdsSize = 1 + esdsSize1BytesRequired + esdsSize1;
- uint8_t *esds = new uint8_t[esdsSize];
-
- size_t idx = 0;
- esds[idx++] = 0x03;
- storeSize(esds, idx, esdsSize1);
- esds[idx++] = 0x00; // ES_ID
- esds[idx++] = 0x00; // ES_ID
- esds[idx++] = 0x00; // streamDependenceFlag, URL_Flag, OCRstreamFlag
- esds[idx++] = 0x04;
- storeSize(esds, idx, esdsSize2);
- esds[idx++] = isAudio ? 0x40 // Audio ISO/IEC 14496-3
- : 0x20; // Visual ISO/IEC 14496-2
- for (int i = 0; i < 12; i++) {
- esds[idx++] = 0x00;
- }
- esds[idx++] = 0x05;
- storeSize(esds, idx, privSize);
- memcpy(esds + idx, priv, privSize);
-
- meta->setData(kKeyESDS, 0, esds, esdsSize);
-
- delete[] esds;
- esds = NULL;
-}
-
-status_t addVorbisCodecInfo(
- const sp<MetaData> &meta,
- const void *_codecPrivate, size_t codecPrivateSize) {
- // hexdump(_codecPrivate, codecPrivateSize);
-
- if (codecPrivateSize < 1) {
- return ERROR_MALFORMED;
- }
-
- const uint8_t *codecPrivate = (const uint8_t *)_codecPrivate;
-
- if (codecPrivate[0] != 0x02) {
- return ERROR_MALFORMED;
- }
-
- // codecInfo starts with two lengths, len1 and len2, that are
- // "Xiph-style-lacing encoded"...
-
- size_t offset = 1;
- size_t len1 = 0;
- while (offset < codecPrivateSize && codecPrivate[offset] == 0xff) {
- if (len1 > (SIZE_MAX - 0xff)) {
- return ERROR_MALFORMED; // would overflow
- }
- len1 += 0xff;
- ++offset;
- }
- if (offset >= codecPrivateSize) {
- return ERROR_MALFORMED;
- }
- if (len1 > (SIZE_MAX - codecPrivate[offset])) {
- return ERROR_MALFORMED; // would overflow
- }
- len1 += codecPrivate[offset++];
-
- size_t len2 = 0;
- while (offset < codecPrivateSize && codecPrivate[offset] == 0xff) {
- if (len2 > (SIZE_MAX - 0xff)) {
- return ERROR_MALFORMED; // would overflow
- }
- len2 += 0xff;
- ++offset;
- }
- if (offset >= codecPrivateSize) {
- return ERROR_MALFORMED;
- }
- if (len2 > (SIZE_MAX - codecPrivate[offset])) {
- return ERROR_MALFORMED; // would overflow
- }
- len2 += codecPrivate[offset++];
-
- if (len1 > SIZE_MAX - len2 || offset > SIZE_MAX - (len1 + len2) ||
- codecPrivateSize < offset + len1 + len2) {
- return ERROR_MALFORMED;
- }
-
- if (codecPrivate[offset] != 0x01) {
- return ERROR_MALFORMED;
- }
- meta->setData(kKeyVorbisInfo, 0, &codecPrivate[offset], len1);
-
- offset += len1;
- if (codecPrivate[offset] != 0x03) {
- return ERROR_MALFORMED;
- }
-
- offset += len2;
- if (codecPrivate[offset] != 0x05) {
- return ERROR_MALFORMED;
- }
-
- meta->setData(
- kKeyVorbisBooks, 0, &codecPrivate[offset],
- codecPrivateSize - offset);
-
- return OK;
-}
-
-static status_t addFlacMetadata(
- const sp<MetaData> &meta,
- const void *codecPrivate, size_t codecPrivateSize) {
- // hexdump(codecPrivate, codecPrivateSize);
-
- meta->setData(kKeyFlacMetadata, 0, codecPrivate, codecPrivateSize);
-
- int32_t maxInputSize = 64 << 10;
- sp<FLACDecoder> flacDecoder = FLACDecoder::Create();
- if (flacDecoder != NULL
- && flacDecoder->parseMetadata((const uint8_t*)codecPrivate, codecPrivateSize) == OK) {
- FLAC__StreamMetadata_StreamInfo streamInfo = flacDecoder->getStreamInfo();
- maxInputSize = streamInfo.max_framesize;
- if (maxInputSize == 0) {
- // In case max framesize is not available, use raw data size as max framesize,
- // assuming there is no expansion.
- if (streamInfo.max_blocksize != 0
- && streamInfo.channels != 0
- && ((streamInfo.bits_per_sample + 7) / 8) >
- INT32_MAX / streamInfo.max_blocksize / streamInfo.channels) {
- return ERROR_MALFORMED;
- }
- maxInputSize = ((streamInfo.bits_per_sample + 7) / 8)
- * streamInfo.max_blocksize * streamInfo.channels;
- }
- }
- meta->setInt32(kKeyMaxInputSize, maxInputSize);
-
- return OK;
-}
-
-status_t MatroskaExtractor::synthesizeAVCC(TrackInfo *trackInfo, size_t index) {
- BlockIterator iter(this, trackInfo->mTrackNum, index);
- if (iter.eos()) {
- return ERROR_MALFORMED;
- }
-
- const mkvparser::Block *block = iter.block();
- if (block->GetFrameCount() <= 0) {
- return ERROR_MALFORMED;
- }
-
- const mkvparser::Block::Frame &frame = block->GetFrame(0);
- sp<ABuffer> abuf = new ABuffer(frame.len);
- long n = frame.Read(mReader, abuf->data());
- if (n != 0) {
- return ERROR_MALFORMED;
- }
-
- sp<MetaData> avcMeta = MakeAVCCodecSpecificData(abuf);
- if (avcMeta == NULL) {
- return ERROR_MALFORMED;
- }
-
- // Override the synthesized nal length size, which is arbitrary
- avcMeta->setInt32(kKeyNalLengthSize, 0);
- trackInfo->mMeta = avcMeta;
- return OK;
-}
-
-static inline bool isValidInt32ColourValue(long long value) {
- return value != mkvparser::Colour::kValueNotPresent
- && value >= INT32_MIN
- && value <= INT32_MAX;
-}
-
-static inline bool isValidUint16ColourValue(long long value) {
- return value != mkvparser::Colour::kValueNotPresent
- && value >= 0
- && value <= UINT16_MAX;
-}
-
-static inline bool isValidPrimary(const mkvparser::PrimaryChromaticity *primary) {
- return primary != NULL && primary->x >= 0 && primary->x <= 1
- && primary->y >= 0 && primary->y <= 1;
-}
-
-void MatroskaExtractor::getColorInformation(
- const mkvparser::VideoTrack *vtrack, sp<MetaData> &meta) {
- const mkvparser::Colour *color = vtrack->GetColour();
- if (color == NULL) {
- return;
- }
-
- // Color Aspects
- {
- int32_t primaries = 2; // ISO unspecified
- int32_t transfer = 2; // ISO unspecified
- int32_t coeffs = 2; // ISO unspecified
- bool fullRange = false; // default
- bool rangeSpecified = false;
-
- if (isValidInt32ColourValue(color->primaries)) {
- primaries = color->primaries;
- }
- if (isValidInt32ColourValue(color->transfer_characteristics)) {
- transfer = color->transfer_characteristics;
- }
- if (isValidInt32ColourValue(color->matrix_coefficients)) {
- coeffs = color->matrix_coefficients;
- }
- if (color->range != mkvparser::Colour::kValueNotPresent
- && color->range != 0 /* MKV unspecified */) {
- // We only support MKV broadcast range (== limited) and full range.
- // We treat all other value as the default limited range.
- fullRange = color->range == 2 /* MKV fullRange */;
- rangeSpecified = true;
- }
-
- ColorAspects aspects;
- ColorUtils::convertIsoColorAspectsToCodecAspects(
- primaries, transfer, coeffs, fullRange, aspects);
- meta->setInt32(kKeyColorPrimaries, aspects.mPrimaries);
- meta->setInt32(kKeyTransferFunction, aspects.mTransfer);
- meta->setInt32(kKeyColorMatrix, aspects.mMatrixCoeffs);
- meta->setInt32(
- kKeyColorRange, rangeSpecified ? aspects.mRange : ColorAspects::RangeUnspecified);
- }
-
- // HDR Static Info
- {
- HDRStaticInfo info, nullInfo; // nullInfo is a fully unspecified static info
- memset(&info, 0, sizeof(info));
- memset(&nullInfo, 0, sizeof(nullInfo));
- if (isValidUint16ColourValue(color->max_cll)) {
- info.sType1.mMaxContentLightLevel = color->max_cll;
- }
- if (isValidUint16ColourValue(color->max_fall)) {
- info.sType1.mMaxFrameAverageLightLevel = color->max_fall;
- }
- const mkvparser::MasteringMetadata *mastering = color->mastering_metadata;
- if (mastering != NULL) {
- // Convert matroska values to HDRStaticInfo equivalent values for each fully specified
- // group. See CTA-681.3 section 3.2.1 for more info.
- if (mastering->luminance_max >= 0.5 && mastering->luminance_max < 65535.5) {
- info.sType1.mMaxDisplayLuminance = (uint16_t)(mastering->luminance_max + 0.5);
- }
- if (mastering->luminance_min >= 0.00005 && mastering->luminance_min < 6.55355) {
- // HDRStaticInfo Type1 stores min luminance scaled 10000:1
- info.sType1.mMinDisplayLuminance =
- (uint16_t)(10000 * mastering->luminance_min + 0.5);
- }
- // HDRStaticInfo Type1 stores primaries scaled 50000:1
- if (isValidPrimary(mastering->white_point)) {
- info.sType1.mW.x = (uint16_t)(50000 * mastering->white_point->x + 0.5);
- info.sType1.mW.y = (uint16_t)(50000 * mastering->white_point->y + 0.5);
- }
- if (isValidPrimary(mastering->r) && isValidPrimary(mastering->g)
- && isValidPrimary(mastering->b)) {
- info.sType1.mR.x = (uint16_t)(50000 * mastering->r->x + 0.5);
- info.sType1.mR.y = (uint16_t)(50000 * mastering->r->y + 0.5);
- info.sType1.mG.x = (uint16_t)(50000 * mastering->g->x + 0.5);
- info.sType1.mG.y = (uint16_t)(50000 * mastering->g->y + 0.5);
- info.sType1.mB.x = (uint16_t)(50000 * mastering->b->x + 0.5);
- info.sType1.mB.y = (uint16_t)(50000 * mastering->b->y + 0.5);
- }
- }
- // Only advertise static info if at least one of the groups have been specified.
- if (memcmp(&info, &nullInfo, sizeof(info)) != 0) {
- info.mID = HDRStaticInfo::kType1;
- meta->setData(kKeyHdrStaticInfo, 'hdrS', &info, sizeof(info));
- }
- }
-}
-
-status_t MatroskaExtractor::initTrackInfo(
- const mkvparser::Track *track, const sp<MetaData> &meta, TrackInfo *trackInfo) {
- trackInfo->mTrackNum = track->GetNumber();
- trackInfo->mMeta = meta;
- trackInfo->mExtractor = this;
- trackInfo->mEncrypted = false;
- trackInfo->mHeader = NULL;
- trackInfo->mHeaderLen = 0;
-
- for(size_t i = 0; i < track->GetContentEncodingCount(); i++) {
- const mkvparser::ContentEncoding *encoding = track->GetContentEncodingByIndex(i);
- for(size_t j = 0; j < encoding->GetEncryptionCount(); j++) {
- const mkvparser::ContentEncoding::ContentEncryption *encryption;
- encryption = encoding->GetEncryptionByIndex(j);
- trackInfo->mMeta->setData(kKeyCryptoKey, 0, encryption->key_id, encryption->key_id_len);
- trackInfo->mEncrypted = true;
- break;
- }
-
- for(size_t j = 0; j < encoding->GetCompressionCount(); j++) {
- const mkvparser::ContentEncoding::ContentCompression *compression;
- compression = encoding->GetCompressionByIndex(j);
- ALOGV("compression algo %llu settings_len %lld",
- compression->algo, compression->settings_len);
- if (compression->algo == 3
- && compression->settings
- && compression->settings_len > 0) {
- trackInfo->mHeader = compression->settings;
- trackInfo->mHeaderLen = compression->settings_len;
- }
- }
- }
-
- return OK;
-}
-
-void MatroskaExtractor::addTracks() {
- const mkvparser::Tracks *tracks = mSegment->GetTracks();
-
- for (size_t index = 0; index < tracks->GetTracksCount(); ++index) {
- const mkvparser::Track *track = tracks->GetTrackByIndex(index);
-
- if (track == NULL) {
- // Apparently this is currently valid (if unexpected) behaviour
- // of the mkv parser lib.
- continue;
- }
-
- const char *const codecID = track->GetCodecId();
- ALOGV("codec id = %s", codecID);
- ALOGV("codec name = %s", track->GetCodecNameAsUTF8());
-
- if (codecID == NULL) {
- ALOGW("unknown codecID is not supported.");
- continue;
- }
-
- size_t codecPrivateSize;
- const unsigned char *codecPrivate =
- track->GetCodecPrivate(codecPrivateSize);
-
- enum { VIDEO_TRACK = 1, AUDIO_TRACK = 2 };
-
- sp<MetaData> meta = new MetaData;
-
- status_t err = OK;
-
- switch (track->GetType()) {
- case VIDEO_TRACK:
- {
- const mkvparser::VideoTrack *vtrack =
- static_cast<const mkvparser::VideoTrack *>(track);
-
- if (!strcmp("V_MPEG4/ISO/AVC", codecID)) {
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
- meta->setData(kKeyAVCC, 0, codecPrivate, codecPrivateSize);
- } else if (!strcmp("V_MPEGH/ISO/HEVC", codecID)) {
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_HEVC);
- if (codecPrivateSize > 0) {
- meta->setData(kKeyHVCC, kTypeHVCC, codecPrivate, codecPrivateSize);
- } else {
- ALOGW("HEVC is detected, but does not have configuration.");
- continue;
- }
- } else if (!strcmp("V_MPEG4/ISO/ASP", codecID)) {
- if (codecPrivateSize > 0) {
- meta->setCString(
- kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4);
- addESDSFromCodecPrivate(
- meta, false, codecPrivate, codecPrivateSize);
- } else {
- ALOGW("%s is detected, but does not have configuration.",
- codecID);
- continue;
- }
- } else if (!strcmp("V_VP8", codecID)) {
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_VP8);
- } else if (!strcmp("V_VP9", codecID)) {
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_VP9);
- if (codecPrivateSize > 0) {
- // 'csd-0' for VP9 is the Blob of Codec Private data as
- // specified in http://www.webmproject.org/vp9/profiles/.
- meta->setData(
- kKeyVp9CodecPrivate, 0, codecPrivate,
- codecPrivateSize);
- }
- } else {
- ALOGW("%s is not supported.", codecID);
- continue;
- }
-
- const long long width = vtrack->GetWidth();
- const long long height = vtrack->GetHeight();
- if (width <= 0 || width > INT32_MAX) {
- ALOGW("track width exceeds int32_t, %lld", width);
- continue;
- }
- if (height <= 0 || height > INT32_MAX) {
- ALOGW("track height exceeds int32_t, %lld", height);
- continue;
- }
- meta->setInt32(kKeyWidth, (int32_t)width);
- meta->setInt32(kKeyHeight, (int32_t)height);
-
- // setting display width/height is optional
- const long long displayUnit = vtrack->GetDisplayUnit();
- const long long displayWidth = vtrack->GetDisplayWidth();
- const long long displayHeight = vtrack->GetDisplayHeight();
- if (displayWidth > 0 && displayWidth <= INT32_MAX
- && displayHeight > 0 && displayHeight <= INT32_MAX) {
- switch (displayUnit) {
- case 0: // pixels
- meta->setInt32(kKeyDisplayWidth, (int32_t)displayWidth);
- meta->setInt32(kKeyDisplayHeight, (int32_t)displayHeight);
- break;
- case 1: // centimeters
- case 2: // inches
- case 3: // aspect ratio
- {
- // Physical layout size is treated the same as aspect ratio.
- // Note: displayWidth and displayHeight are never zero as they are
- // checked in the if above.
- const long long computedWidth =
- std::max(width, height * displayWidth / displayHeight);
- const long long computedHeight =
- std::max(height, width * displayHeight / displayWidth);
- if (computedWidth <= INT32_MAX && computedHeight <= INT32_MAX) {
- meta->setInt32(kKeyDisplayWidth, (int32_t)computedWidth);
- meta->setInt32(kKeyDisplayHeight, (int32_t)computedHeight);
- }
- break;
- }
- default: // unknown display units, perhaps future version of spec.
- break;
- }
- }
-
- getColorInformation(vtrack, meta);
-
- break;
- }
-
- case AUDIO_TRACK:
- {
- const mkvparser::AudioTrack *atrack =
- static_cast<const mkvparser::AudioTrack *>(track);
-
- if (!strcmp("A_AAC", codecID)) {
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC);
- CHECK(codecPrivateSize >= 2);
-
- addESDSFromCodecPrivate(
- meta, true, codecPrivate, codecPrivateSize);
- } else if (!strcmp("A_VORBIS", codecID)) {
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_VORBIS);
-
- err = addVorbisCodecInfo(
- meta, codecPrivate, codecPrivateSize);
- } else if (!strcmp("A_OPUS", codecID)) {
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_OPUS);
- meta->setData(kKeyOpusHeader, 0, codecPrivate, codecPrivateSize);
- meta->setInt64(kKeyOpusCodecDelay, track->GetCodecDelay());
- meta->setInt64(kKeyOpusSeekPreRoll, track->GetSeekPreRoll());
- mSeekPreRollNs = track->GetSeekPreRoll();
- } else if (!strcmp("A_MPEG/L3", codecID)) {
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
- } else if (!strcmp("A_FLAC", codecID)) {
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_FLAC);
- err = addFlacMetadata(meta, codecPrivate, codecPrivateSize);
- } else {
- ALOGW("%s is not supported.", codecID);
- continue;
- }
-
- meta->setInt32(kKeySampleRate, atrack->GetSamplingRate());
- meta->setInt32(kKeyChannelCount, atrack->GetChannels());
- break;
- }
-
- default:
- continue;
- }
-
- const char *language = track->GetLanguage();
- if (language != NULL) {
- char lang[4];
- strncpy(lang, language, 3);
- lang[3] = '\0';
- meta->setCString(kKeyMediaLanguage, lang);
- }
-
- if (err != OK) {
- ALOGE("skipping track, codec specific data was malformed.");
- continue;
- }
-
- long long durationNs = mSegment->GetDuration();
- meta->setInt64(kKeyDuration, (durationNs + 500) / 1000);
-
- mTracks.push();
- size_t n = mTracks.size() - 1;
- TrackInfo *trackInfo = &mTracks.editItemAt(n);
- initTrackInfo(track, meta, trackInfo);
-
- if (!strcmp("V_MPEG4/ISO/AVC", codecID) && codecPrivateSize == 0) {
- // Attempt to recover from AVC track without codec private data
- err = synthesizeAVCC(trackInfo, n);
- if (err != OK) {
- mTracks.pop();
- }
- }
- }
-}
-
-void MatroskaExtractor::findThumbnails() {
- for (size_t i = 0; i < mTracks.size(); ++i) {
- TrackInfo *info = &mTracks.editItemAt(i);
-
- const char *mime;
- CHECK(info->mMeta->findCString(kKeyMIMEType, &mime));
-
- if (strncasecmp(mime, "video/", 6)) {
- continue;
- }
-
- BlockIterator iter(this, info->mTrackNum, i);
- int32_t j = 0;
- int64_t thumbnailTimeUs = 0;
- size_t maxBlockSize = 0;
- while (!iter.eos() && j < 20) {
- if (iter.block()->IsKey()) {
- ++j;
-
- size_t blockSize = 0;
- for (int k = 0; k < iter.block()->GetFrameCount(); ++k) {
- blockSize += iter.block()->GetFrame(k).len;
- }
-
- if (blockSize > maxBlockSize) {
- maxBlockSize = blockSize;
- thumbnailTimeUs = iter.blockTimeUs();
- }
- }
- iter.advance();
- }
- info->mMeta->setInt64(kKeyThumbnailTime, thumbnailTimeUs);
- }
-}
-
-sp<MetaData> MatroskaExtractor::getMetaData() {
- sp<MetaData> meta = new MetaData;
-
- meta->setCString(
- kKeyMIMEType,
- mIsWebm ? "video/webm" : MEDIA_MIMETYPE_CONTAINER_MATROSKA);
-
- return meta;
-}
-
-uint32_t MatroskaExtractor::flags() const {
- uint32_t x = CAN_PAUSE;
- if (!isLiveStreaming()) {
- x |= CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK;
- }
-
- return x;
-}
-
-bool SniffMatroska(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *) {
- DataSourceReader reader(source);
- mkvparser::EBMLHeader ebmlHeader;
- long long pos;
- if (ebmlHeader.Parse(&reader, pos) < 0) {
- return false;
- }
-
- mimeType->setTo(MEDIA_MIMETYPE_CONTAINER_MATROSKA);
- *confidence = 0.6;
-
- return true;
-}
-
-} // namespace android
diff --git a/media/libstagefright/matroska/MatroskaExtractor.h b/media/libstagefright/matroska/MatroskaExtractor.h
deleted file mode 100644
index 19775ce..0000000
--- a/media/libstagefright/matroska/MatroskaExtractor.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MATROSKA_EXTRACTOR_H_
-
-#define MATROSKA_EXTRACTOR_H_
-
-#include "mkvparser/mkvparser.h"
-
-#include <media/stagefright/MediaExtractor.h>
-#include <utils/Vector.h>
-#include <utils/threads.h>
-
-namespace android {
-
-struct AMessage;
-class String8;
-
-class MetaData;
-struct DataSourceReader;
-struct MatroskaSource;
-
-struct MatroskaExtractor : public MediaExtractor {
- explicit MatroskaExtractor(const sp<DataSource> &source);
-
- virtual size_t countTracks();
-
- virtual sp<IMediaSource> getTrack(size_t index);
-
- virtual sp<MetaData> getTrackMetaData(
- size_t index, uint32_t flags);
-
- virtual sp<MetaData> getMetaData();
-
- virtual uint32_t flags() const;
-
- virtual const char * name() { return "MatroskaExtractor"; }
-
-protected:
- virtual ~MatroskaExtractor();
-
-private:
- friend struct MatroskaSource;
- friend struct BlockIterator;
-
- struct TrackInfo {
- unsigned long mTrackNum;
- bool mEncrypted;
- sp<MetaData> mMeta;
- const MatroskaExtractor *mExtractor;
- Vector<const mkvparser::CuePoint*> mCuePoints;
-
- // mHeader points to memory managed by mkvparser;
- // mHeader would be deleted when mSegment is deleted
- // in ~MatroskaExtractor.
- unsigned char *mHeader;
- size_t mHeaderLen;
-
- const mkvparser::Track* getTrack() const;
- const mkvparser::CuePoint::TrackPosition *find(long long timeNs) const;
- };
-
- Mutex mLock;
- Vector<TrackInfo> mTracks;
-
- sp<DataSource> mDataSource;
- DataSourceReader *mReader;
- mkvparser::Segment *mSegment;
- bool mExtractedThumbnails;
- bool mIsLiveStreaming;
- bool mIsWebm;
- int64_t mSeekPreRollNs;
-
- status_t synthesizeAVCC(TrackInfo *trackInfo, size_t index);
- status_t initTrackInfo(const mkvparser::Track *track, const sp<MetaData> &meta, TrackInfo *trackInfo);
- void addTracks();
- void findThumbnails();
- void getColorInformation(const mkvparser::VideoTrack *vtrack, sp<MetaData> &meta);
- bool isLiveStreaming() const;
-
- MatroskaExtractor(const MatroskaExtractor &);
- MatroskaExtractor &operator=(const MatroskaExtractor &);
-};
-
-bool SniffMatroska(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *);
-
-} // namespace android
-
-#endif // MATROSKA_EXTRACTOR_H_
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index a256a4d..5cc5093 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -21,19 +21,24 @@
#include "AnotherPacketSource.h"
#include "CasManager.h"
#include "ESQueue.h"
-#include "include/avc_utils.h"
#include <android/hardware/cas/native/1.0/IDescrambler.h>
+#include <binder/IMemory.h>
+#include <binder/MemoryDealer.h>
#include <cutils/native_handle.h>
+#include <hidlmemory/FrameworkUtils.h>
+#include <media/cas/DescramblerAPI.h>
#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/stagefright/foundation/MediaKeys.h>
+#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
#include <media/IStreamSource.h>
#include <utils/KeyedVector.h>
#include <utils/Vector.h>
@@ -41,10 +46,10 @@
#include <inttypes.h>
namespace android {
-using hardware::hidl_handle;
-using hardware::hidl_memory;
+using hardware::fromHeap;
using hardware::hidl_string;
using hardware::hidl_vec;
+using hardware::HidlMemory;
using namespace hardware::cas::V1_0;
using namespace hardware::cas::native::V1_0;
@@ -76,7 +81,7 @@
void signalEOS(status_t finalResult);
- sp<MediaSource> getSource(SourceType type);
+ sp<AnotherPacketSource> getSource(SourceType type);
bool hasSource(SourceType type) const;
int64_t convertPTSToTimestamp(uint64_t PTS);
@@ -169,7 +174,7 @@
void signalEOS(status_t finalResult);
SourceType getSourceType();
- sp<MediaSource> getSource(SourceType type);
+ sp<AnotherPacketSource> getSource(SourceType type);
bool isAudio() const;
bool isVideo() const;
@@ -207,6 +212,7 @@
sp<AMessage> mSampleAesKeyItem;
sp<IMemory> mMem;
sp<MemoryDealer> mDealer;
+ sp<HidlMemory> mHidlMemory;
hardware::cas::native::V1_0::SharedBuffer mDescramblerSrcBuffer;
sp<ABuffer> mDescrambledBuffer;
List<SubSampleInfo> mSubSamples;
@@ -273,7 +279,7 @@
ATSParser::SyncEvent::SyncEvent(off64_t offset)
: mHasReturnedData(false), mOffset(offset), mTimeUs(0) {}
-void ATSParser::SyncEvent::init(off64_t offset, const sp<MediaSource> &source,
+void ATSParser::SyncEvent::init(off64_t offset, const sp<AnotherPacketSource> &source,
int64_t timeUs, SourceType type) {
mHasReturnedData = true;
mOffset = offset;
@@ -341,7 +347,7 @@
if ((type & DISCONTINUITY_TIME)
&& extra != NULL
&& extra->findInt64(
- IStreamListener::kKeyMediaTimeUs, &mediaTimeUs)) {
+ kATSParserKeyMediaTimeUs, &mediaTimeUs)) {
mFirstPTSValid = false;
}
@@ -640,9 +646,9 @@
return mLastRecoveredPTS;
}
-sp<MediaSource> ATSParser::Program::getSource(SourceType type) {
+sp<AnotherPacketSource> ATSParser::Program::getSource(SourceType type) {
for (size_t i = 0; i < mStreams.size(); ++i) {
- sp<MediaSource> source = mStreams.editValueAt(i)->getSource(type);
+ sp<AnotherPacketSource> source = mStreams.editValueAt(i)->getSource(type);
if (source != NULL) {
return source;
}
@@ -849,14 +855,9 @@
if (heap == NULL) {
return false;
}
- native_handle_t* nativeHandle = native_handle_create(1, 0);
- if (!nativeHandle) {
- ALOGE("[stream %d] failed to create native handle", mElementaryPID);
- return false;
- }
- nativeHandle->data[0] = heap->getHeapID();
- mDescramblerSrcBuffer.heapBase = hidl_memory("ashmem",
- hidl_handle(nativeHandle), heap->getSize());
+
+ mHidlMemory = fromHeap(heap);
+ mDescramblerSrcBuffer.heapBase = *mHidlMemory;
mDescramblerSrcBuffer.offset = (uint64_t) offset;
mDescramblerSrcBuffer.size = (uint64_t) size;
@@ -1032,7 +1033,7 @@
uint64_t resumeAtPTS;
if (extra != NULL
&& extra->findInt64(
- IStreamListener::kKeyResumeAtPTS,
+ kATSParserKeyResumeAtPTS,
(int64_t *)&resumeAtPTS)) {
int64_t resumeAtMediaTimeUs =
mProgram->convertPTSToTimestamp(resumeAtPTS);
@@ -1387,6 +1388,9 @@
uint32_t sctrl = tsScramblingControl != 0 ?
tsScramblingControl : pesScramblingControl;
+ if (mQueue->isScrambled()) {
+ sctrl |= DescramblerPlugin::kScrambling_Flag_PesHeader;
+ }
// Perform the 1st pass descrambling if needed
if (descrambleBytes > 0) {
@@ -1606,7 +1610,7 @@
return NUM_SOURCE_TYPES;
}
-sp<MediaSource> ATSParser::Stream::getSource(SourceType type) {
+sp<AnotherPacketSource> ATSParser::Stream::getSource(SourceType type) {
switch (type) {
case VIDEO:
{
@@ -1694,12 +1698,12 @@
DiscontinuityType type, const sp<AMessage> &extra) {
int64_t mediaTimeUs;
if ((type & DISCONTINUITY_TIME) && extra != NULL) {
- if (extra->findInt64(IStreamListener::kKeyMediaTimeUs, &mediaTimeUs)) {
+ if (extra->findInt64(kATSParserKeyMediaTimeUs, &mediaTimeUs)) {
mAbsoluteTimeAnchorUs = mediaTimeUs;
}
if ((mFlags & TS_TIMESTAMPS_ARE_ABSOLUTE)
&& extra->findInt64(
- IStreamListener::kKeyRecentMediaTimeUs, &mediaTimeUs)) {
+ kATSParserKeyRecentMediaTimeUs, &mediaTimeUs)) {
if (mAbsoluteTimeAnchorUs >= 0ll) {
mediaTimeUs -= mAbsoluteTimeAnchorUs;
}
@@ -2041,11 +2045,11 @@
return err;
}
-sp<MediaSource> ATSParser::getSource(SourceType type) {
- sp<MediaSource> firstSourceFound;
+sp<AnotherPacketSource> ATSParser::getSource(SourceType type) {
+ sp<AnotherPacketSource> firstSourceFound;
for (size_t i = 0; i < mPrograms.size(); ++i) {
const sp<Program> &program = mPrograms.editItemAt(i);
- sp<MediaSource> source = program->getSource(type);
+ sp<AnotherPacketSource> source = program->getSource(type);
if (source == NULL) {
continue;
}
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 41c19cd..45ca06b 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -20,9 +20,9 @@
#include <sys/types.h>
+#include <media/MediaSource.h>
#include <media/stagefright/foundation/ABase.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaSource.h>
#include <utils/KeyedVector.h>
#include <utils/Vector.h>
#include <utils/RefBase.h>
@@ -81,13 +81,13 @@
struct SyncEvent {
explicit SyncEvent(off64_t offset);
- void init(off64_t offset, const sp<MediaSource> &source,
+ void init(off64_t offset, const sp<AnotherPacketSource> &source,
int64_t timeUs, SourceType type);
bool hasReturnedData() const { return mHasReturnedData; }
void reset();
off64_t getOffset() const { return mOffset; }
- const sp<MediaSource> &getMediaSource() const { return mMediaSource; }
+ const sp<AnotherPacketSource> &getMediaSource() const { return mMediaSource; }
int64_t getTimeUs() const { return mTimeUs; }
SourceType getType() const { return mType; }
@@ -100,7 +100,7 @@
*/
off64_t mOffset;
/* The media source object for this event. */
- sp<MediaSource> mMediaSource;
+ sp<AnotherPacketSource> mMediaSource;
/* The timestamp of the sync frame. */
int64_t mTimeUs;
SourceType mType;
@@ -126,7 +126,7 @@
void signalEOS(status_t finalResult);
- sp<MediaSource> getSource(SourceType type);
+ sp<AnotherPacketSource> getSource(SourceType type);
bool hasSource(SourceType type) const;
bool PTSTimeDeltaEstablished();
diff --git a/media/libstagefright/mpeg2ts/Android.bp b/media/libstagefright/mpeg2ts/Android.bp
index 21259c4..fbf1496 100644
--- a/media/libstagefright/mpeg2ts/Android.bp
+++ b/media/libstagefright/mpeg2ts/Android.bp
@@ -1,5 +1,5 @@
cc_library_static {
- name: "libstagefright_mpeg2ts",
+ name: "libstagefright_mpeg2support",
srcs: [
"AnotherPacketSource.cpp",
@@ -7,8 +7,6 @@
"CasManager.cpp",
"ESQueue.cpp",
"HlsSampleDecryptor.cpp",
- "MPEG2PSExtractor.cpp",
- "MPEG2TSExtractor.cpp",
],
include_dirs: [
@@ -35,8 +33,12 @@
shared_libs: [
"libcrypto",
"libmedia",
- "libhidlmemory",
+ "libhidlallocatorutils",
"android.hardware.cas.native@1.0",
"android.hidl.memory@1.0",
],
+
+ whole_static_libs: [
+ "libstagefright_metadatautils",
+ ],
}
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 433b1fc..ece0692 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -19,13 +19,12 @@
#include "AnotherPacketSource.h"
-#include "include/avc_utils.h"
-
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AString.h>
#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaData.h>
@@ -164,7 +163,7 @@
}
status_t AnotherPacketSource::read(
- MediaBuffer **out, const ReadOptions *) {
+ MediaBufferBase **out, const ReadOptions *) {
*out = NULL;
Mutex::Autolock autoLock(mLock);
@@ -203,24 +202,24 @@
seg.mMaxDequeTimeUs = timeUs;
}
- MediaBuffer *mediaBuffer = new MediaBuffer(buffer);
- sp<MetaData> bufmeta = mediaBuffer->meta_data();
+ MediaBufferBase *mediaBuffer = new MediaBuffer(buffer);
+ MetaDataBase &bufmeta = mediaBuffer->meta_data();
- bufmeta->setInt64(kKeyTime, timeUs);
+ bufmeta.setInt64(kKeyTime, timeUs);
int32_t isSync;
if (buffer->meta()->findInt32("isSync", &isSync)) {
- bufmeta->setInt32(kKeyIsSyncFrame, isSync);
+ bufmeta.setInt32(kKeyIsSyncFrame, isSync);
}
sp<ABuffer> sei;
if (buffer->meta()->findBuffer("sei", &sei) && sei != NULL) {
- bufmeta->setData(kKeySEI, 0, sei->data(), sei->size());
+ bufmeta.setData(kKeySEI, 0, sei->data(), sei->size());
}
sp<ABuffer> mpegUserData;
- if (buffer->meta()->findBuffer("mpegUserData", &mpegUserData) && mpegUserData != NULL) {
- bufmeta->setData(
+ if (buffer->meta()->findBuffer("mpeg-user-data", &mpegUserData) && mpegUserData != NULL) {
+ bufmeta.setData(
kKeyMpegUserData, 0, mpegUserData->data(), mpegUserData->size());
}
@@ -235,18 +234,18 @@
CHECK(buffer->meta()->findBuffer("encBytes", &encBytesBuffer)
&& encBytesBuffer != NULL);
- bufmeta->setInt32(kKeyCryptoMode, cryptoMode);
+ bufmeta.setInt32(kKeyCryptoMode, cryptoMode);
uint8_t array[16] = {0};
- bufmeta->setData(kKeyCryptoIV, 0, array, 16);
+ bufmeta.setData(kKeyCryptoIV, 0, array, 16);
array[0] = (uint8_t) (cryptoKey & 0xff);
- bufmeta->setData(kKeyCryptoKey, 0, array, 16);
+ bufmeta.setData(kKeyCryptoKey, 0, array, 16);
- bufmeta->setData(kKeyPlainSizes, 0,
+ bufmeta.setData(kKeyPlainSizes, 0,
clearBytesBuffer->data(), clearBytesBuffer->size());
- bufmeta->setData(kKeyEncryptedSizes, 0,
+ bufmeta.setData(kKeyEncryptedSizes, 0,
encBytesBuffer->data(), encBytesBuffer->size());
}
@@ -663,7 +662,7 @@
&& !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
}
}
- if (isAvc && !IsIDR(buffer)) {
+ if (isAvc && !IsIDR(buffer->data(), buffer->size())) {
continue;
}
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
index b0890d7..f4a6acb 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.h
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
@@ -18,8 +18,8 @@
#define ANOTHER_PACKET_SOURCE_H_
+#include <media/MediaSource.h>
#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/MediaSource.h>
#include <utils/threads.h>
#include <utils/List.h>
@@ -39,7 +39,7 @@
virtual sp<MetaData> getFormat();
virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL);
+ MediaBufferBase **buffer, const ReadOptions *options = NULL);
void clear();
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 1cf9744..0fa9fcb 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -24,15 +24,15 @@
#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
+#include <media/stagefright/MetaDataUtils.h>
#include <media/cas/DescramblerAPI.h>
#include <media/hardware/CryptoAPI.h>
-#include "include/avc_utils.h"
-
#include <inttypes.h>
#include <netinet/in.h>
@@ -633,7 +633,10 @@
mBuffer->setRange(0, mBuffer->size() - info.mLength);
if (mFormat == NULL) {
- mFormat = MakeAVCCodecSpecificData(accessUnit);
+ mFormat = new MetaData;
+ if (!MakeAVCCodecSpecificData(*mFormat, accessUnit->data(), accessUnit->size())) {
+ mFormat.clear();
+ }
}
return accessUnit;
@@ -862,7 +865,8 @@
}
bits.skipBits(2); // original_copy, home
- mFormat = MakeAACCodecSpecificData(
+ mFormat = new MetaData;
+ MakeAACCodecSpecificData(*mFormat,
profile, sampling_freq_index, channel_configuration);
mFormat->setInt32(kKeyIsADTS, true);
@@ -1005,9 +1009,9 @@
return NULL;
}
if (mFormat == NULL) {
- mFormat = MakeAVCCodecSpecificData(mBuffer);
- if (mFormat == NULL) {
- ALOGI("Creating dummy AVC format for scrambled content");
+ mFormat = new MetaData;
+ if (!MakeAVCCodecSpecificData(*mFormat, mBuffer->data(), mBuffer->size())) {
+ ALOGW("Creating dummy AVC format for scrambled content");
mFormat = new MetaData;
mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
mFormat->setInt32(kKeyWidth, 1280);
@@ -1167,7 +1171,12 @@
}
if (mFormat == NULL) {
- mFormat = MakeAVCCodecSpecificData(accessUnit);
+ mFormat = new MetaData;
+ if (!MakeAVCCodecSpecificData(*mFormat,
+ accessUnit->data(),
+ accessUnit->size())) {
+ mFormat.clear();
+ }
}
if (mSampleDecryptor != NULL && shrunkBytes > 0) {
@@ -1468,7 +1477,7 @@
mpegUserData->data() + i * sizeof(size_t),
&userDataPositions[i], sizeof(size_t));
}
- accessUnit->meta()->setBuffer("mpegUserData", mpegUserData);
+ accessUnit->meta()->setBuffer("mpeg-user-data", mpegUserData);
}
}
@@ -1486,7 +1495,9 @@
const uint8_t *data, size_t size) {
static const char kStartCode[] = "\x00\x00\x01";
- if (size < 3) {
+ // per ISO/IEC 14496-2 6.2.1, a chunk has a 3-byte prefix + 1-byte start code
+ // we need at least <prefix><start><next prefix> to successfully scan
+ if (size < 3 + 1 + 3) {
return -EAGAIN;
}
@@ -1494,7 +1505,7 @@
return -EAGAIN;
}
- size_t offset = 3;
+ size_t offset = 4;
while (offset + 2 < size) {
if (!memcmp(&data[offset], kStartCode, 3)) {
return offset;
@@ -1545,6 +1556,9 @@
state = EXPECT_VISUAL_OBJECT_START;
} else {
discard = true;
+ offset += chunkSize;
+ ALOGW("b/74114680, advance to next chunk");
+ android_errorWriteLog(0x534e4554, "74114680");
}
break;
}
diff --git a/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
deleted file mode 100644
index 078a5f0..0000000
--- a/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
+++ /dev/null
@@ -1,772 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MPEG2PSExtractor"
-#include <utils/Log.h>
-
-#include "include/MPEG2PSExtractor.h"
-
-#include "AnotherPacketSource.h"
-#include "ESQueue.h"
-
-#include <media/stagefright/foundation/ABitReader.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-#include <utils/String8.h>
-
-#include <inttypes.h>
-
-namespace android {
-
-struct MPEG2PSExtractor::Track : public MediaSource {
- Track(MPEG2PSExtractor *extractor,
- unsigned stream_id, unsigned stream_type);
-
- virtual status_t start(MetaData *params);
- virtual status_t stop();
- virtual sp<MetaData> getFormat();
-
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options);
-
-protected:
- virtual ~Track();
-
-private:
- friend struct MPEG2PSExtractor;
-
- MPEG2PSExtractor *mExtractor;
-
- unsigned mStreamID;
- unsigned mStreamType;
- ElementaryStreamQueue *mQueue;
- sp<AnotherPacketSource> mSource;
-
- status_t appendPESData(
- unsigned PTS_DTS_flags,
- uint64_t PTS, uint64_t DTS,
- const uint8_t *data, size_t size);
-
- DISALLOW_EVIL_CONSTRUCTORS(Track);
-};
-
-struct MPEG2PSExtractor::WrappedTrack : public MediaSource {
- WrappedTrack(const sp<MPEG2PSExtractor> &extractor, const sp<Track> &track);
-
- virtual status_t start(MetaData *params);
- virtual status_t stop();
- virtual sp<MetaData> getFormat();
-
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options);
-
-protected:
- virtual ~WrappedTrack();
-
-private:
- sp<MPEG2PSExtractor> mExtractor;
- sp<MPEG2PSExtractor::Track> mTrack;
-
- DISALLOW_EVIL_CONSTRUCTORS(WrappedTrack);
-};
-
-////////////////////////////////////////////////////////////////////////////////
-
-MPEG2PSExtractor::MPEG2PSExtractor(const sp<DataSource> &source)
- : mDataSource(source),
- mOffset(0),
- mFinalResult(OK),
- mBuffer(new ABuffer(0)),
- mScanning(true),
- mProgramStreamMapValid(false) {
- for (size_t i = 0; i < 500; ++i) {
- if (feedMore() != OK) {
- break;
- }
- }
-
- // Remove all tracks that were unable to determine their format.
- for (size_t i = mTracks.size(); i > 0;) {
- i--;
- if (mTracks.valueAt(i)->getFormat() == NULL) {
- mTracks.removeItemsAt(i);
- }
- }
-
- mScanning = false;
-}
-
-MPEG2PSExtractor::~MPEG2PSExtractor() {
-}
-
-size_t MPEG2PSExtractor::countTracks() {
- return mTracks.size();
-}
-
-sp<IMediaSource> MPEG2PSExtractor::getTrack(size_t index) {
- if (index >= mTracks.size()) {
- return NULL;
- }
-
- return new WrappedTrack(this, mTracks.valueAt(index));
-}
-
-sp<MetaData> MPEG2PSExtractor::getTrackMetaData(
- size_t index, uint32_t /* flags */) {
- if (index >= mTracks.size()) {
- return NULL;
- }
-
- return mTracks.valueAt(index)->getFormat();
-}
-
-sp<MetaData> MPEG2PSExtractor::getMetaData() {
- sp<MetaData> meta = new MetaData;
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG2PS);
-
- return meta;
-}
-
-uint32_t MPEG2PSExtractor::flags() const {
- return CAN_PAUSE;
-}
-
-status_t MPEG2PSExtractor::feedMore() {
- Mutex::Autolock autoLock(mLock);
-
- // How much data we're reading at a time
- static const size_t kChunkSize = 8192;
-
- for (;;) {
- status_t err = dequeueChunk();
-
- if (err == -EAGAIN && mFinalResult == OK) {
- memmove(mBuffer->base(), mBuffer->data(), mBuffer->size());
- mBuffer->setRange(0, mBuffer->size());
-
- if (mBuffer->size() + kChunkSize > mBuffer->capacity()) {
- size_t newCapacity = mBuffer->capacity() + kChunkSize;
- sp<ABuffer> newBuffer = new ABuffer(newCapacity);
- memcpy(newBuffer->data(), mBuffer->data(), mBuffer->size());
- newBuffer->setRange(0, mBuffer->size());
- mBuffer = newBuffer;
- }
-
- ssize_t n = mDataSource->readAt(
- mOffset, mBuffer->data() + mBuffer->size(), kChunkSize);
-
- if (n < (ssize_t)kChunkSize) {
- mFinalResult = (n < 0) ? (status_t)n : ERROR_END_OF_STREAM;
- return mFinalResult;
- }
-
- mBuffer->setRange(mBuffer->offset(), mBuffer->size() + n);
- mOffset += n;
- } else if (err != OK) {
- mFinalResult = err;
- return err;
- } else {
- return OK;
- }
- }
-}
-
-status_t MPEG2PSExtractor::dequeueChunk() {
- if (mBuffer->size() < 4) {
- return -EAGAIN;
- }
-
- if (memcmp("\x00\x00\x01", mBuffer->data(), 3)) {
- return ERROR_MALFORMED;
- }
-
- unsigned chunkType = mBuffer->data()[3];
-
- ssize_t res;
-
- switch (chunkType) {
- case 0xba:
- {
- res = dequeuePack();
- break;
- }
-
- case 0xbb:
- {
- res = dequeueSystemHeader();
- break;
- }
-
- default:
- {
- res = dequeuePES();
- break;
- }
- }
-
- if (res > 0) {
- if (mBuffer->size() < (size_t)res) {
- return -EAGAIN;
- }
-
- mBuffer->setRange(mBuffer->offset() + res, mBuffer->size() - res);
- res = OK;
- }
-
- return res;
-}
-
-ssize_t MPEG2PSExtractor::dequeuePack() {
- // 32 + 2 + 3 + 1 + 15 + 1 + 15+ 1 + 9 + 1 + 22 + 1 + 1 | +5
-
- if (mBuffer->size() < 14) {
- return -EAGAIN;
- }
-
- unsigned pack_stuffing_length = mBuffer->data()[13] & 7;
-
- return pack_stuffing_length + 14;
-}
-
-ssize_t MPEG2PSExtractor::dequeueSystemHeader() {
- if (mBuffer->size() < 6) {
- return -EAGAIN;
- }
-
- unsigned header_length = U16_AT(mBuffer->data() + 4);
-
- return header_length + 6;
-}
-
-ssize_t MPEG2PSExtractor::dequeuePES() {
- if (mBuffer->size() < 6) {
- return -EAGAIN;
- }
-
- unsigned PES_packet_length = U16_AT(mBuffer->data() + 4);
- if (PES_packet_length == 0u) {
- ALOGE("PES_packet_length is 0");
- return -EAGAIN;
- }
-
- size_t n = PES_packet_length + 6;
-
- if (mBuffer->size() < n) {
- return -EAGAIN;
- }
-
- ABitReader br(mBuffer->data(), n);
-
- unsigned packet_startcode_prefix = br.getBits(24);
-
- ALOGV("packet_startcode_prefix = 0x%08x", packet_startcode_prefix);
-
- if (packet_startcode_prefix != 1) {
- ALOGV("Supposedly payload_unit_start=1 unit does not start "
- "with startcode.");
-
- return ERROR_MALFORMED;
- }
-
- if (packet_startcode_prefix != 0x000001u) {
- ALOGE("Wrong PES prefix");
- return ERROR_MALFORMED;
- }
-
- unsigned stream_id = br.getBits(8);
- ALOGV("stream_id = 0x%02x", stream_id);
-
- /* unsigned PES_packet_length = */br.getBits(16);
-
- if (stream_id == 0xbc) {
- // program_stream_map
-
- if (!mScanning) {
- return n;
- }
-
- mStreamTypeByESID.clear();
-
- /* unsigned current_next_indicator = */br.getBits(1);
- /* unsigned reserved = */br.getBits(2);
- /* unsigned program_stream_map_version = */br.getBits(5);
- /* unsigned reserved = */br.getBits(7);
- /* unsigned marker_bit = */br.getBits(1);
- unsigned program_stream_info_length = br.getBits(16);
-
- size_t offset = 0;
- while (offset < program_stream_info_length) {
- if (offset + 2 > program_stream_info_length) {
- return ERROR_MALFORMED;
- }
-
- unsigned descriptor_tag = br.getBits(8);
- unsigned descriptor_length = br.getBits(8);
-
- ALOGI("found descriptor tag 0x%02x of length %u",
- descriptor_tag, descriptor_length);
-
- if (offset + 2 + descriptor_length > program_stream_info_length) {
- return ERROR_MALFORMED;
- }
-
- br.skipBits(8 * descriptor_length);
-
- offset += 2 + descriptor_length;
- }
-
- unsigned elementary_stream_map_length = br.getBits(16);
-
- offset = 0;
- while (offset < elementary_stream_map_length) {
- if (offset + 4 > elementary_stream_map_length) {
- return ERROR_MALFORMED;
- }
-
- unsigned stream_type = br.getBits(8);
- unsigned elementary_stream_id = br.getBits(8);
-
- ALOGI("elementary stream id 0x%02x has stream type 0x%02x",
- elementary_stream_id, stream_type);
-
- mStreamTypeByESID.add(elementary_stream_id, stream_type);
-
- unsigned elementary_stream_info_length = br.getBits(16);
-
- if (offset + 4 + elementary_stream_info_length
- > elementary_stream_map_length) {
- return ERROR_MALFORMED;
- }
-
- offset += 4 + elementary_stream_info_length;
- }
-
- /* unsigned CRC32 = */br.getBits(32);
-
- mProgramStreamMapValid = true;
- } else if (stream_id != 0xbe // padding_stream
- && stream_id != 0xbf // private_stream_2
- && stream_id != 0xf0 // ECM
- && stream_id != 0xf1 // EMM
- && stream_id != 0xff // program_stream_directory
- && stream_id != 0xf2 // DSMCC
- && stream_id != 0xf8) { // H.222.1 type E
- /* unsigned PES_marker_bits = */br.getBits(2); // should be 0x2(hex)
- /* unsigned PES_scrambling_control = */br.getBits(2);
- /* unsigned PES_priority = */br.getBits(1);
- /* unsigned data_alignment_indicator = */br.getBits(1);
- /* unsigned copyright = */br.getBits(1);
- /* unsigned original_or_copy = */br.getBits(1);
-
- unsigned PTS_DTS_flags = br.getBits(2);
- ALOGV("PTS_DTS_flags = %u", PTS_DTS_flags);
-
- unsigned ESCR_flag = br.getBits(1);
- ALOGV("ESCR_flag = %u", ESCR_flag);
-
- unsigned ES_rate_flag = br.getBits(1);
- ALOGV("ES_rate_flag = %u", ES_rate_flag);
-
- unsigned DSM_trick_mode_flag = br.getBits(1);
- ALOGV("DSM_trick_mode_flag = %u", DSM_trick_mode_flag);
-
- unsigned additional_copy_info_flag = br.getBits(1);
- ALOGV("additional_copy_info_flag = %u", additional_copy_info_flag);
-
- /* unsigned PES_CRC_flag = */br.getBits(1);
- /* PES_extension_flag = */br.getBits(1);
-
- unsigned PES_header_data_length = br.getBits(8);
- ALOGV("PES_header_data_length = %u", PES_header_data_length);
-
- unsigned optional_bytes_remaining = PES_header_data_length;
-
- uint64_t PTS = 0, DTS = 0;
-
- if (PTS_DTS_flags == 2 || PTS_DTS_flags == 3) {
- if (optional_bytes_remaining < 5u) {
- return ERROR_MALFORMED;
- }
-
- if (br.getBits(4) != PTS_DTS_flags) {
- return ERROR_MALFORMED;
- }
-
- PTS = ((uint64_t)br.getBits(3)) << 30;
- if (br.getBits(1) != 1u) {
- return ERROR_MALFORMED;
- }
- PTS |= ((uint64_t)br.getBits(15)) << 15;
- if (br.getBits(1) != 1u) {
- return ERROR_MALFORMED;
- }
- PTS |= br.getBits(15);
- if (br.getBits(1) != 1u) {
- return ERROR_MALFORMED;
- }
-
- ALOGV("PTS = %" PRIu64, PTS);
- // ALOGI("PTS = %.2f secs", PTS / 90000.0f);
-
- optional_bytes_remaining -= 5;
-
- if (PTS_DTS_flags == 3) {
- if (optional_bytes_remaining < 5u) {
- return ERROR_MALFORMED;
- }
-
- if (br.getBits(4) != 1u) {
- return ERROR_MALFORMED;
- }
-
- DTS = ((uint64_t)br.getBits(3)) << 30;
- if (br.getBits(1) != 1u) {
- return ERROR_MALFORMED;
- }
- DTS |= ((uint64_t)br.getBits(15)) << 15;
- if (br.getBits(1) != 1u) {
- return ERROR_MALFORMED;
- }
- DTS |= br.getBits(15);
- if (br.getBits(1) != 1u) {
- return ERROR_MALFORMED;
- }
-
- ALOGV("DTS = %" PRIu64, DTS);
-
- optional_bytes_remaining -= 5;
- }
- }
-
- if (ESCR_flag) {
- if (optional_bytes_remaining < 6u) {
- return ERROR_MALFORMED;
- }
-
- br.getBits(2);
-
- uint64_t ESCR = ((uint64_t)br.getBits(3)) << 30;
- if (br.getBits(1) != 1u) {
- return ERROR_MALFORMED;
- }
- ESCR |= ((uint64_t)br.getBits(15)) << 15;
- if (br.getBits(1) != 1u) {
- return ERROR_MALFORMED;
- }
- ESCR |= br.getBits(15);
- if (br.getBits(1) != 1u) {
- return ERROR_MALFORMED;
- }
-
- ALOGV("ESCR = %" PRIu64, ESCR);
- /* unsigned ESCR_extension = */br.getBits(9);
-
- if (br.getBits(1) != 1u) {
- return ERROR_MALFORMED;
- }
-
- optional_bytes_remaining -= 6;
- }
-
- if (ES_rate_flag) {
- if (optional_bytes_remaining < 3u) {
- return ERROR_MALFORMED;
- }
-
- if (br.getBits(1) != 1u) {
- return ERROR_MALFORMED;
- }
- /* unsigned ES_rate = */br.getBits(22);
- if (br.getBits(1) != 1u) {
- return ERROR_MALFORMED;
- }
-
- optional_bytes_remaining -= 3;
- }
-
- if (br.numBitsLeft() < optional_bytes_remaining * 8) {
- return ERROR_MALFORMED;
- }
-
- br.skipBits(optional_bytes_remaining * 8);
-
- // ES data follows.
-
- if (PES_packet_length < PES_header_data_length + 3) {
- return ERROR_MALFORMED;
- }
-
- unsigned dataLength =
- PES_packet_length - 3 - PES_header_data_length;
-
- if (br.numBitsLeft() < dataLength * 8) {
- ALOGE("PES packet does not carry enough data to contain "
- "payload. (numBitsLeft = %zu, required = %u)",
- br.numBitsLeft(), dataLength * 8);
-
- return ERROR_MALFORMED;
- }
-
- if (br.numBitsLeft() < dataLength * 8) {
- return ERROR_MALFORMED;
- }
-
- ssize_t index = mTracks.indexOfKey(stream_id);
- if (index < 0 && mScanning) {
- unsigned streamType;
-
- ssize_t streamTypeIndex;
- if (mProgramStreamMapValid
- && (streamTypeIndex =
- mStreamTypeByESID.indexOfKey(stream_id)) >= 0) {
- streamType = mStreamTypeByESID.valueAt(streamTypeIndex);
- } else if ((stream_id & ~0x1f) == 0xc0) {
- // ISO/IEC 13818-3 or ISO/IEC 11172-3 or ISO/IEC 13818-7
- // or ISO/IEC 14496-3 audio
- streamType = ATSParser::STREAMTYPE_MPEG2_AUDIO;
- } else if ((stream_id & ~0x0f) == 0xe0) {
- // ISO/IEC 13818-2 or ISO/IEC 11172-2 or ISO/IEC 14496-2 video
- streamType = ATSParser::STREAMTYPE_MPEG2_VIDEO;
- } else {
- streamType = ATSParser::STREAMTYPE_RESERVED;
- }
-
- index = mTracks.add(
- stream_id, new Track(this, stream_id, streamType));
- }
-
- status_t err = OK;
-
- if (index >= 0) {
- err =
- mTracks.editValueAt(index)->appendPESData(
- PTS_DTS_flags, PTS, DTS, br.data(), dataLength);
- }
-
- br.skipBits(dataLength * 8);
-
- if (err != OK) {
- return err;
- }
- } else if (stream_id == 0xbe) { // padding_stream
- if (PES_packet_length == 0u) {
- return ERROR_MALFORMED;
- }
- br.skipBits(PES_packet_length * 8);
- } else {
- if (PES_packet_length == 0u) {
- return ERROR_MALFORMED;
- }
- br.skipBits(PES_packet_length * 8);
- }
-
- return n;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-MPEG2PSExtractor::Track::Track(
- MPEG2PSExtractor *extractor, unsigned stream_id, unsigned stream_type)
- : mExtractor(extractor),
- mStreamID(stream_id),
- mStreamType(stream_type),
- mQueue(NULL) {
- bool supported = true;
- ElementaryStreamQueue::Mode mode;
-
- switch (mStreamType) {
- case ATSParser::STREAMTYPE_H264:
- mode = ElementaryStreamQueue::H264;
- break;
- case ATSParser::STREAMTYPE_MPEG2_AUDIO_ADTS:
- mode = ElementaryStreamQueue::AAC;
- break;
- case ATSParser::STREAMTYPE_MPEG1_AUDIO:
- case ATSParser::STREAMTYPE_MPEG2_AUDIO:
- mode = ElementaryStreamQueue::MPEG_AUDIO;
- break;
-
- case ATSParser::STREAMTYPE_MPEG1_VIDEO:
- case ATSParser::STREAMTYPE_MPEG2_VIDEO:
- mode = ElementaryStreamQueue::MPEG_VIDEO;
- break;
-
- case ATSParser::STREAMTYPE_MPEG4_VIDEO:
- mode = ElementaryStreamQueue::MPEG4_VIDEO;
- break;
-
- default:
- supported = false;
- break;
- }
-
- if (supported) {
- mQueue = new ElementaryStreamQueue(mode);
- } else {
- ALOGI("unsupported stream ID 0x%02x", stream_id);
- }
-}
-
-MPEG2PSExtractor::Track::~Track() {
- delete mQueue;
- mQueue = NULL;
-}
-
-status_t MPEG2PSExtractor::Track::start(MetaData *params) {
- if (mSource == NULL) {
- return NO_INIT;
- }
-
- return mSource->start(params);
-}
-
-status_t MPEG2PSExtractor::Track::stop() {
- if (mSource == NULL) {
- return NO_INIT;
- }
-
- return mSource->stop();
-}
-
-sp<MetaData> MPEG2PSExtractor::Track::getFormat() {
- if (mSource == NULL) {
- return NULL;
- }
-
- return mSource->getFormat();
-}
-
-status_t MPEG2PSExtractor::Track::read(
- MediaBuffer **buffer, const ReadOptions *options) {
- if (mSource == NULL) {
- return NO_INIT;
- }
-
- status_t finalResult;
- while (!mSource->hasBufferAvailable(&finalResult)) {
- if (finalResult != OK) {
- return ERROR_END_OF_STREAM;
- }
-
- status_t err = mExtractor->feedMore();
-
- if (err != OK) {
- mSource->signalEOS(err);
- }
- }
-
- return mSource->read(buffer, options);
-}
-
-status_t MPEG2PSExtractor::Track::appendPESData(
- unsigned PTS_DTS_flags,
- uint64_t PTS, uint64_t /* DTS */,
- const uint8_t *data, size_t size) {
- if (mQueue == NULL) {
- return OK;
- }
-
- int64_t timeUs;
- if (PTS_DTS_flags == 2 || PTS_DTS_flags == 3) {
- timeUs = (PTS * 100) / 9;
- } else {
- timeUs = 0;
- }
-
- status_t err = mQueue->appendData(data, size, timeUs);
-
- if (err != OK) {
- return err;
- }
-
- sp<ABuffer> accessUnit;
- while ((accessUnit = mQueue->dequeueAccessUnit()) != NULL) {
- if (mSource == NULL) {
- sp<MetaData> meta = mQueue->getFormat();
-
- if (meta != NULL) {
- ALOGV("Stream ID 0x%02x now has data.", mStreamID);
-
- mSource = new AnotherPacketSource(meta);
- mSource->queueAccessUnit(accessUnit);
- }
- } else if (mQueue->getFormat() != NULL) {
- mSource->queueAccessUnit(accessUnit);
- }
- }
-
- return OK;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-MPEG2PSExtractor::WrappedTrack::WrappedTrack(
- const sp<MPEG2PSExtractor> &extractor, const sp<Track> &track)
- : mExtractor(extractor),
- mTrack(track) {
-}
-
-MPEG2PSExtractor::WrappedTrack::~WrappedTrack() {
-}
-
-status_t MPEG2PSExtractor::WrappedTrack::start(MetaData *params) {
- return mTrack->start(params);
-}
-
-status_t MPEG2PSExtractor::WrappedTrack::stop() {
- return mTrack->stop();
-}
-
-sp<MetaData> MPEG2PSExtractor::WrappedTrack::getFormat() {
- return mTrack->getFormat();
-}
-
-status_t MPEG2PSExtractor::WrappedTrack::read(
- MediaBuffer **buffer, const ReadOptions *options) {
- return mTrack->read(buffer, options);
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-bool SniffMPEG2PS(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *) {
- uint8_t header[5];
- if (source->readAt(0, header, sizeof(header)) < (ssize_t)sizeof(header)) {
- return false;
- }
-
- if (memcmp("\x00\x00\x01\xba", header, 4) || (header[4] >> 6) != 1) {
- return false;
- }
-
- *confidence = 0.25f; // Slightly larger than .mp3 extractor's confidence
-
- mimeType->setTo(MEDIA_MIMETYPE_CONTAINER_MPEG2PS);
-
- return true;
-}
-
-} // namespace android
diff --git a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
deleted file mode 100644
index 9d684e0..0000000
--- a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
+++ /dev/null
@@ -1,666 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MPEG2TSExtractor"
-
-#include <inttypes.h>
-#include <utils/Log.h>
-
-#include "include/MPEG2TSExtractor.h"
-#include "include/NuCachedSource2.h"
-
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/foundation/AUtils.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <media/IStreamSource.h>
-#include <utils/String8.h>
-
-#include "AnotherPacketSource.h"
-#include "ATSParser.h"
-
-#include <hidl/HybridInterface.h>
-#include <android/hardware/cas/1.0/ICas.h>
-
-namespace android {
-
-using hardware::cas::V1_0::ICas;
-
-static const size_t kTSPacketSize = 188;
-static const int kMaxDurationReadSize = 250000LL;
-static const int kMaxDurationRetry = 6;
-
-struct MPEG2TSSource : public MediaSource {
- MPEG2TSSource(
- const sp<MPEG2TSExtractor> &extractor,
- const sp<AnotherPacketSource> &impl,
- bool doesSeek);
-
- virtual status_t start(MetaData *params = NULL);
- virtual status_t stop();
- virtual sp<MetaData> getFormat();
-
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-private:
- sp<MPEG2TSExtractor> mExtractor;
- sp<AnotherPacketSource> mImpl;
-
- // If there are both audio and video streams, only the video stream
- // will signal seek on the extractor; otherwise the single stream will seek.
- bool mDoesSeek;
-
- DISALLOW_EVIL_CONSTRUCTORS(MPEG2TSSource);
-};
-
-MPEG2TSSource::MPEG2TSSource(
- const sp<MPEG2TSExtractor> &extractor,
- const sp<AnotherPacketSource> &impl,
- bool doesSeek)
- : mExtractor(extractor),
- mImpl(impl),
- mDoesSeek(doesSeek) {
-}
-
-status_t MPEG2TSSource::start(MetaData *params) {
- return mImpl->start(params);
-}
-
-status_t MPEG2TSSource::stop() {
- return mImpl->stop();
-}
-
-sp<MetaData> MPEG2TSSource::getFormat() {
- return mImpl->getFormat();
-}
-
-status_t MPEG2TSSource::read(
- MediaBuffer **out, const ReadOptions *options) {
- *out = NULL;
-
- int64_t seekTimeUs;
- ReadOptions::SeekMode seekMode;
- if (mDoesSeek && options && options->getSeekTo(&seekTimeUs, &seekMode)) {
- // seek is needed
- status_t err = mExtractor->seek(seekTimeUs, seekMode);
- if (err != OK) {
- return err;
- }
- }
-
- if (mExtractor->feedUntilBufferAvailable(mImpl) != OK) {
- return ERROR_END_OF_STREAM;
- }
-
- return mImpl->read(out, options);
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-MPEG2TSExtractor::MPEG2TSExtractor(const sp<DataSource> &source)
- : mDataSource(source),
- mParser(new ATSParser),
- mLastSyncEvent(0),
- mOffset(0) {
- init();
-}
-
-size_t MPEG2TSExtractor::countTracks() {
- return mSourceImpls.size();
-}
-
-sp<IMediaSource> MPEG2TSExtractor::getTrack(size_t index) {
- if (index >= mSourceImpls.size()) {
- return NULL;
- }
-
- // The seek reference track (video if present; audio otherwise) performs
- // seek requests, while other tracks ignore requests.
- return new MPEG2TSSource(this, mSourceImpls.editItemAt(index),
- (mSeekSyncPoints == &mSyncPoints.editItemAt(index)));
-}
-
-sp<MetaData> MPEG2TSExtractor::getTrackMetaData(
- size_t index, uint32_t /* flags */) {
- return index < mSourceImpls.size()
- ? mSourceImpls.editItemAt(index)->getFormat() : NULL;
-}
-
-sp<MetaData> MPEG2TSExtractor::getMetaData() {
- sp<MetaData> meta = new MetaData;
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG2TS);
-
- return meta;
-}
-
-//static
-bool MPEG2TSExtractor::isScrambledFormat(const sp<MetaData> &format) {
- const char *mime;
- return format->findCString(kKeyMIMEType, &mime)
- && (!strcasecmp(MEDIA_MIMETYPE_VIDEO_SCRAMBLED, mime)
- || !strcasecmp(MEDIA_MIMETYPE_AUDIO_SCRAMBLED, mime));
-}
-
-status_t MPEG2TSExtractor::setMediaCas(const HInterfaceToken &casToken) {
- HalToken halToken;
- halToken.setToExternal((uint8_t*)casToken.data(), casToken.size());
- sp<ICas> cas = ICas::castFrom(retrieveHalInterface(halToken));
- ALOGD("setMediaCas: %p", cas.get());
-
- status_t err = mParser->setMediaCas(cas);
- if (err == OK) {
- ALOGI("All tracks now have descramblers");
- init();
- }
- return err;
-}
-
-void MPEG2TSExtractor::addSource(const sp<AnotherPacketSource> &impl) {
- bool found = false;
- for (size_t i = 0; i < mSourceImpls.size(); i++) {
- if (mSourceImpls[i] == impl) {
- found = true;
- break;
- }
- }
- if (!found) {
- mSourceImpls.push(impl);
- }
-}
-
-void MPEG2TSExtractor::init() {
- bool haveAudio = false;
- bool haveVideo = false;
- int64_t startTime = ALooper::GetNowUs();
-
- status_t err;
- while ((err = feedMore(true /* isInit */)) == OK
- || err == ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED) {
- if (haveAudio && haveVideo) {
- addSyncPoint_l(mLastSyncEvent);
- mLastSyncEvent.reset();
- break;
- }
- if (!haveVideo) {
- sp<AnotherPacketSource> impl =
- (AnotherPacketSource *)mParser->getSource(
- ATSParser::VIDEO).get();
-
- if (impl != NULL) {
- sp<MetaData> format = impl->getFormat();
- if (format != NULL) {
- haveVideo = true;
- addSource(impl);
- if (!isScrambledFormat(format)) {
- mSyncPoints.push();
- mSeekSyncPoints = &mSyncPoints.editTop();
- }
- }
- }
- }
-
- if (!haveAudio) {
- sp<AnotherPacketSource> impl =
- (AnotherPacketSource *)mParser->getSource(
- ATSParser::AUDIO).get();
-
- if (impl != NULL) {
- sp<MetaData> format = impl->getFormat();
- if (format != NULL) {
- haveAudio = true;
- addSource(impl);
- if (!isScrambledFormat(format)) {
- mSyncPoints.push();
- if (!haveVideo) {
- mSeekSyncPoints = &mSyncPoints.editTop();
- }
- }
- }
- }
- }
-
- addSyncPoint_l(mLastSyncEvent);
- mLastSyncEvent.reset();
-
- // ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED is returned when the mpeg2ts
- // is scrambled but we don't have a MediaCas object set. The extraction
- // will only continue when setMediaCas() is called successfully.
- if (err == ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED) {
- ALOGI("stopped parsing scrambled content, "
- "haveAudio=%d, haveVideo=%d, elaspedTime=%" PRId64,
- haveAudio, haveVideo, ALooper::GetNowUs() - startTime);
- return;
- }
-
- // Wait only for 2 seconds to detect audio/video streams.
- if (ALooper::GetNowUs() - startTime > 2000000ll) {
- break;
- }
- }
-
- off64_t size;
- if (mDataSource->getSize(&size) == OK && (haveAudio || haveVideo)) {
- sp<AnotherPacketSource> impl = haveVideo
- ? (AnotherPacketSource *)mParser->getSource(
- ATSParser::VIDEO).get()
- : (AnotherPacketSource *)mParser->getSource(
- ATSParser::AUDIO).get();
- size_t prevSyncSize = 1;
- int64_t durationUs = -1;
- List<int64_t> durations;
- // Estimate duration --- stabilize until you get <500ms deviation.
- while (feedMore() == OK
- && ALooper::GetNowUs() - startTime <= 2000000ll) {
- if (mSeekSyncPoints->size() > prevSyncSize) {
- prevSyncSize = mSeekSyncPoints->size();
- int64_t diffUs = mSeekSyncPoints->keyAt(prevSyncSize - 1)
- - mSeekSyncPoints->keyAt(0);
- off64_t diffOffset = mSeekSyncPoints->valueAt(prevSyncSize - 1)
- - mSeekSyncPoints->valueAt(0);
- int64_t currentDurationUs = size * diffUs / diffOffset;
- durations.push_back(currentDurationUs);
- if (durations.size() > 5) {
- durations.erase(durations.begin());
- int64_t min = *durations.begin();
- int64_t max = *durations.begin();
- for (auto duration : durations) {
- if (min > duration) {
- min = duration;
- }
- if (max < duration) {
- max = duration;
- }
- }
- if (max - min < 500 * 1000) {
- durationUs = currentDurationUs;
- break;
- }
- }
- }
- }
- status_t err;
- int64_t bufferedDurationUs;
- bufferedDurationUs = impl->getBufferedDurationUs(&err);
- if (err == ERROR_END_OF_STREAM) {
- durationUs = bufferedDurationUs;
- }
- if (durationUs > 0) {
- const sp<MetaData> meta = impl->getFormat();
- meta->setInt64(kKeyDuration, durationUs);
- impl->setFormat(meta);
- } else {
- estimateDurationsFromTimesUsAtEnd();
- }
- }
-
- ALOGI("haveAudio=%d, haveVideo=%d, elaspedTime=%" PRId64,
- haveAudio, haveVideo, ALooper::GetNowUs() - startTime);
-}
-
-status_t MPEG2TSExtractor::feedMore(bool isInit) {
- Mutex::Autolock autoLock(mLock);
-
- uint8_t packet[kTSPacketSize];
- ssize_t n = mDataSource->readAt(mOffset, packet, kTSPacketSize);
-
- if (n < (ssize_t)kTSPacketSize) {
- if (n >= 0) {
- mParser->signalEOS(ERROR_END_OF_STREAM);
- }
- return (n < 0) ? (status_t)n : ERROR_END_OF_STREAM;
- }
-
- ATSParser::SyncEvent event(mOffset);
- mOffset += n;
- status_t err = mParser->feedTSPacket(packet, kTSPacketSize, &event);
- if (event.hasReturnedData()) {
- if (isInit) {
- mLastSyncEvent = event;
- } else {
- addSyncPoint_l(event);
- }
- }
- return err;
-}
-
-void MPEG2TSExtractor::addSyncPoint_l(const ATSParser::SyncEvent &event) {
- if (!event.hasReturnedData()) {
- return;
- }
-
- for (size_t i = 0; i < mSourceImpls.size(); ++i) {
- if (mSourceImpls[i].get() == event.getMediaSource().get()) {
- KeyedVector<int64_t, off64_t> *syncPoints = &mSyncPoints.editItemAt(i);
- syncPoints->add(event.getTimeUs(), event.getOffset());
- // We're keeping the size of the sync points at most 5mb per a track.
- size_t size = syncPoints->size();
- if (size >= 327680) {
- int64_t firstTimeUs = syncPoints->keyAt(0);
- int64_t lastTimeUs = syncPoints->keyAt(size - 1);
- if (event.getTimeUs() - firstTimeUs > lastTimeUs - event.getTimeUs()) {
- syncPoints->removeItemsAt(0, 4096);
- } else {
- syncPoints->removeItemsAt(size - 4096, 4096);
- }
- }
- break;
- }
- }
-}
-
-status_t MPEG2TSExtractor::estimateDurationsFromTimesUsAtEnd() {
- if (!(mDataSource->flags() & DataSource::kIsLocalFileSource)) {
- return ERROR_UNSUPPORTED;
- }
-
- off64_t size = 0;
- status_t err = mDataSource->getSize(&size);
- if (err != OK) {
- return err;
- }
-
- uint8_t packet[kTSPacketSize];
- const off64_t zero = 0;
- off64_t offset = max(zero, size - kMaxDurationReadSize);
- if (mDataSource->readAt(offset, &packet, 0) < 0) {
- return ERROR_IO;
- }
-
- int retry = 0;
- bool allDurationsFound = false;
- int64_t timeAnchorUs = mParser->getFirstPTSTimeUs();
- do {
- int bytesRead = 0;
- sp<ATSParser> parser = new ATSParser(ATSParser::TS_TIMESTAMPS_ARE_ABSOLUTE);
- ATSParser::SyncEvent ev(0);
- offset = max(zero, size - (kMaxDurationReadSize << retry));
- offset = (offset / kTSPacketSize) * kTSPacketSize;
- for (;;) {
- if (bytesRead >= kMaxDurationReadSize << max(0, retry - 1)) {
- break;
- }
-
- ssize_t n = mDataSource->readAt(offset, packet, kTSPacketSize);
- if (n < 0) {
- return n;
- } else if (n < (ssize_t)kTSPacketSize) {
- break;
- }
-
- offset += kTSPacketSize;
- bytesRead += kTSPacketSize;
- err = parser->feedTSPacket(packet, kTSPacketSize, &ev);
- if (err != OK) {
- return err;
- }
-
- if (ev.hasReturnedData()) {
- int64_t durationUs = ev.getTimeUs();
- ATSParser::SourceType type = ev.getType();
- ev.reset();
-
- int64_t firstTimeUs;
- sp<AnotherPacketSource> src =
- (AnotherPacketSource *)mParser->getSource(type).get();
- if (src == NULL || src->nextBufferTime(&firstTimeUs) != OK) {
- continue;
- }
- durationUs += src->getEstimatedBufferDurationUs();
- durationUs -= timeAnchorUs;
- durationUs -= firstTimeUs;
- if (durationUs > 0) {
- int64_t origDurationUs, lastDurationUs;
- const sp<MetaData> meta = src->getFormat();
- const uint32_t kKeyLastDuration = 'ldur';
- // Require two consecutive duration calculations to be within 1 sec before
- // updating; use MetaData to store previous duration estimate in per-stream
- // context.
- if (!meta->findInt64(kKeyDuration, &origDurationUs)
- || !meta->findInt64(kKeyLastDuration, &lastDurationUs)
- || (origDurationUs < durationUs
- && abs(durationUs - lastDurationUs) < 60000000)) {
- meta->setInt64(kKeyDuration, durationUs);
- }
- meta->setInt64(kKeyLastDuration, durationUs);
- }
- }
- }
-
- if (!allDurationsFound) {
- allDurationsFound = true;
- for (auto t: {ATSParser::VIDEO, ATSParser::AUDIO}) {
- sp<AnotherPacketSource> src = (AnotherPacketSource *)mParser->getSource(t).get();
- if (src == NULL) {
- continue;
- }
- int64_t durationUs;
- const sp<MetaData> meta = src->getFormat();
- if (!meta->findInt64(kKeyDuration, &durationUs)) {
- allDurationsFound = false;
- break;
- }
- }
- }
-
- ++retry;
- } while(!allDurationsFound && offset > 0 && retry <= kMaxDurationRetry);
-
- return allDurationsFound? OK : ERROR_UNSUPPORTED;
-}
-
-uint32_t MPEG2TSExtractor::flags() const {
- return CAN_PAUSE | CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD;
-}
-
-status_t MPEG2TSExtractor::seek(int64_t seekTimeUs,
- const MediaSource::ReadOptions::SeekMode &seekMode) {
- if (mSeekSyncPoints == NULL || mSeekSyncPoints->isEmpty()) {
- ALOGW("No sync point to seek to.");
- // ... and therefore we have nothing useful to do here.
- return OK;
- }
-
- // Determine whether we're seeking beyond the known area.
- bool shouldSeekBeyond =
- (seekTimeUs > mSeekSyncPoints->keyAt(mSeekSyncPoints->size() - 1));
-
- // Determine the sync point to seek.
- size_t index = 0;
- for (; index < mSeekSyncPoints->size(); ++index) {
- int64_t timeUs = mSeekSyncPoints->keyAt(index);
- if (timeUs > seekTimeUs) {
- break;
- }
- }
-
- switch (seekMode) {
- case MediaSource::ReadOptions::SEEK_NEXT_SYNC:
- if (index == mSeekSyncPoints->size()) {
- ALOGW("Next sync not found; starting from the latest sync.");
- --index;
- }
- break;
- case MediaSource::ReadOptions::SEEK_CLOSEST_SYNC:
- case MediaSource::ReadOptions::SEEK_CLOSEST:
- ALOGW("seekMode not supported: %d; falling back to PREVIOUS_SYNC",
- seekMode);
- // fall-through
- case MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC:
- if (index == 0) {
- ALOGW("Previous sync not found; starting from the earliest "
- "sync.");
- } else {
- --index;
- }
- break;
- }
- if (!shouldSeekBeyond || mOffset <= mSeekSyncPoints->valueAt(index)) {
- int64_t actualSeekTimeUs = mSeekSyncPoints->keyAt(index);
- mOffset = mSeekSyncPoints->valueAt(index);
- status_t err = queueDiscontinuityForSeek(actualSeekTimeUs);
- if (err != OK) {
- return err;
- }
- }
-
- if (shouldSeekBeyond) {
- status_t err = seekBeyond(seekTimeUs);
- if (err != OK) {
- return err;
- }
- }
-
- // Fast-forward to sync frame.
- for (size_t i = 0; i < mSourceImpls.size(); ++i) {
- const sp<AnotherPacketSource> &impl = mSourceImpls[i];
- status_t err;
- feedUntilBufferAvailable(impl);
- while (impl->hasBufferAvailable(&err)) {
- sp<AMessage> meta = impl->getMetaAfterLastDequeued(0);
- sp<ABuffer> buffer;
- if (meta == NULL) {
- return UNKNOWN_ERROR;
- }
- int32_t sync;
- if (meta->findInt32("isSync", &sync) && sync) {
- break;
- }
- err = impl->dequeueAccessUnit(&buffer);
- if (err != OK) {
- return err;
- }
- feedUntilBufferAvailable(impl);
- }
- }
-
- return OK;
-}
-
-status_t MPEG2TSExtractor::queueDiscontinuityForSeek(int64_t actualSeekTimeUs) {
- // Signal discontinuity
- sp<AMessage> extra(new AMessage);
- extra->setInt64(IStreamListener::kKeyMediaTimeUs, actualSeekTimeUs);
- mParser->signalDiscontinuity(ATSParser::DISCONTINUITY_TIME, extra);
-
- // After discontinuity, impl should only have discontinuities
- // with the last being what we queued. Dequeue them all here.
- for (size_t i = 0; i < mSourceImpls.size(); ++i) {
- const sp<AnotherPacketSource> &impl = mSourceImpls.itemAt(i);
- sp<ABuffer> buffer;
- status_t err;
- while (impl->hasBufferAvailable(&err)) {
- if (err != OK) {
- return err;
- }
- err = impl->dequeueAccessUnit(&buffer);
- // If the source contains anything but discontinuity, that's
- // a programming mistake.
- CHECK(err == INFO_DISCONTINUITY);
- }
- }
-
- // Feed until we have a buffer for each source.
- for (size_t i = 0; i < mSourceImpls.size(); ++i) {
- const sp<AnotherPacketSource> &impl = mSourceImpls.itemAt(i);
- sp<ABuffer> buffer;
- status_t err = feedUntilBufferAvailable(impl);
- if (err != OK) {
- return err;
- }
- }
-
- return OK;
-}
-
-status_t MPEG2TSExtractor::seekBeyond(int64_t seekTimeUs) {
- // If we're seeking beyond where we know --- read until we reach there.
- size_t syncPointsSize = mSeekSyncPoints->size();
-
- while (seekTimeUs > mSeekSyncPoints->keyAt(
- mSeekSyncPoints->size() - 1)) {
- status_t err;
- if (syncPointsSize < mSeekSyncPoints->size()) {
- syncPointsSize = mSeekSyncPoints->size();
- int64_t syncTimeUs = mSeekSyncPoints->keyAt(syncPointsSize - 1);
- // Dequeue buffers before sync point in order to avoid too much
- // cache building up.
- sp<ABuffer> buffer;
- for (size_t i = 0; i < mSourceImpls.size(); ++i) {
- const sp<AnotherPacketSource> &impl = mSourceImpls[i];
- int64_t timeUs;
- while ((err = impl->nextBufferTime(&timeUs)) == OK) {
- if (timeUs < syncTimeUs) {
- impl->dequeueAccessUnit(&buffer);
- } else {
- break;
- }
- }
- if (err != OK && err != -EWOULDBLOCK) {
- return err;
- }
- }
- }
- if (feedMore() != OK) {
- return ERROR_END_OF_STREAM;
- }
- }
-
- return OK;
-}
-
-status_t MPEG2TSExtractor::feedUntilBufferAvailable(
- const sp<AnotherPacketSource> &impl) {
- status_t finalResult;
- while (!impl->hasBufferAvailable(&finalResult)) {
- if (finalResult != OK) {
- return finalResult;
- }
-
- status_t err = feedMore();
- if (err != OK) {
- impl->signalEOS(err);
- }
- }
- return OK;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-bool SniffMPEG2TS(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *) {
- for (int i = 0; i < 5; ++i) {
- char header;
- if (source->readAt(kTSPacketSize * i, &header, 1) != 1
- || header != 0x47) {
- return false;
- }
- }
-
- *confidence = 0.1f;
- mimeType->setTo(MEDIA_MIMETYPE_CONTAINER_MPEG2TS);
-
- return true;
-}
-
-} // namespace android
diff --git a/media/libstagefright/omx/1.0/Omx.cpp b/media/libstagefright/omx/1.0/Omx.cpp
index fe50656..4e2d398 100644
--- a/media/libstagefright/omx/1.0/Omx.cpp
+++ b/media/libstagefright/omx/1.0/Omx.cpp
@@ -24,7 +24,7 @@
#include <media/stagefright/omx/OMXUtils.h>
#include <media/stagefright/omx/OMXMaster.h>
-#include <media/stagefright/omx/GraphicBufferSource.h>
+#include <media/stagefright/omx/OmxGraphicBufferSource.h>
#include <media/stagefright/omx/1.0/WOmxNode.h>
#include <media/stagefright/omx/1.0/WOmxObserver.h>
@@ -148,7 +148,7 @@
Return<void> Omx::createInputSurface(createInputSurface_cb _hidl_cb) {
sp<::android::IGraphicBufferProducer> bufferProducer;
- sp<GraphicBufferSource> graphicBufferSource = new GraphicBufferSource();
+ sp<OmxGraphicBufferSource> graphicBufferSource = new OmxGraphicBufferSource();
status_t err = graphicBufferSource->initCheck();
if (err != OK) {
LOG(ERROR) << "Failed to create persistent input surface: "
diff --git a/media/libstagefright/omx/1.0/WGraphicBufferProducer.cpp b/media/libstagefright/omx/1.0/WGraphicBufferProducer.cpp
deleted file mode 100644
index c4499dc..0000000
--- a/media/libstagefright/omx/1.0/WGraphicBufferProducer.cpp
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright 2016, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "WGraphicBufferProducer-impl"
-
-#include <android-base/logging.h>
-
-#include <media/stagefright/omx/1.0/WGraphicBufferProducer.h>
-#include <media/stagefright/omx/1.0/WProducerListener.h>
-#include <media/stagefright/omx/1.0/Conversion.h>
-#include <system/window.h>
-
-namespace android {
-namespace hardware {
-namespace media {
-namespace omx {
-namespace V1_0 {
-namespace implementation {
-
-// TWGraphicBufferProducer
-TWGraphicBufferProducer::TWGraphicBufferProducer(
- sp<BGraphicBufferProducer> const& base):
- mBase(base) {
-}
-
-Return<void> TWGraphicBufferProducer::requestBuffer(
- int32_t slot, requestBuffer_cb _hidl_cb) {
- sp<GraphicBuffer> buf;
- status_t status = mBase->requestBuffer(slot, &buf);
- AnwBuffer anwBuffer;
- if (buf != nullptr) {
- wrapAs(&anwBuffer, *buf);
- }
- _hidl_cb(static_cast<int32_t>(status), anwBuffer);
- return Void();
-}
-
-Return<int32_t> TWGraphicBufferProducer::setMaxDequeuedBufferCount(
- int32_t maxDequeuedBuffers) {
- return static_cast<int32_t>(mBase->setMaxDequeuedBufferCount(
- static_cast<int>(maxDequeuedBuffers)));
-}
-
-Return<int32_t> TWGraphicBufferProducer::setAsyncMode(bool async) {
- return static_cast<int32_t>(mBase->setAsyncMode(async));
-}
-
-Return<void> TWGraphicBufferProducer::dequeueBuffer(
- uint32_t width, uint32_t height,
- PixelFormat format, uint32_t usage,
- bool getFrameTimestamps, dequeueBuffer_cb _hidl_cb) {
- int slot;
- sp<Fence> fence;
- ::android::FrameEventHistoryDelta outTimestamps;
- status_t status = mBase->dequeueBuffer(
- &slot, &fence, width, height,
- static_cast<::android::PixelFormat>(format), usage, nullptr,
- getFrameTimestamps ? &outTimestamps : nullptr);
- hidl_handle tFence;
- FrameEventHistoryDelta tOutTimestamps;
-
- native_handle_t* nh = nullptr;
- if ((fence == nullptr) || !wrapAs(&tFence, &nh, *fence)) {
- LOG(ERROR) << "TWGraphicBufferProducer::dequeueBuffer - "
- "Invalid output fence";
- _hidl_cb(static_cast<int32_t>(status),
- static_cast<int32_t>(slot),
- tFence,
- tOutTimestamps);
- return Void();
- }
- std::vector<std::vector<native_handle_t*> > nhAA;
- if (getFrameTimestamps && !wrapAs(&tOutTimestamps, &nhAA, outTimestamps)) {
- LOG(ERROR) << "TWGraphicBufferProducer::dequeueBuffer - "
- "Invalid output timestamps";
- _hidl_cb(static_cast<int32_t>(status),
- static_cast<int32_t>(slot),
- tFence,
- tOutTimestamps);
- native_handle_delete(nh);
- return Void();
- }
-
- _hidl_cb(static_cast<int32_t>(status),
- static_cast<int32_t>(slot),
- tFence,
- tOutTimestamps);
- native_handle_delete(nh);
- if (getFrameTimestamps) {
- for (auto& nhA : nhAA) {
- for (auto& handle : nhA) {
- native_handle_delete(handle);
- }
- }
- }
- return Void();
-}
-
-Return<int32_t> TWGraphicBufferProducer::detachBuffer(int32_t slot) {
- return static_cast<int32_t>(mBase->detachBuffer(slot));
-}
-
-Return<void> TWGraphicBufferProducer::detachNextBuffer(
- detachNextBuffer_cb _hidl_cb) {
- sp<GraphicBuffer> outBuffer;
- sp<Fence> outFence;
- status_t status = mBase->detachNextBuffer(&outBuffer, &outFence);
- AnwBuffer tBuffer;
- hidl_handle tFence;
-
- if (outBuffer == nullptr) {
- LOG(ERROR) << "TWGraphicBufferProducer::detachNextBuffer - "
- "Invalid output buffer";
- _hidl_cb(static_cast<int32_t>(status), tBuffer, tFence);
- return Void();
- }
- wrapAs(&tBuffer, *outBuffer);
- native_handle_t* nh = nullptr;
- if ((outFence != nullptr) && !wrapAs(&tFence, &nh, *outFence)) {
- LOG(ERROR) << "TWGraphicBufferProducer::detachNextBuffer - "
- "Invalid output fence";
- _hidl_cb(static_cast<int32_t>(status), tBuffer, tFence);
- return Void();
- }
-
- _hidl_cb(static_cast<int32_t>(status), tBuffer, tFence);
- native_handle_delete(nh);
- return Void();
-}
-
-Return<void> TWGraphicBufferProducer::attachBuffer(
- const AnwBuffer& buffer,
- attachBuffer_cb _hidl_cb) {
- int outSlot;
- sp<GraphicBuffer> lBuffer = new GraphicBuffer();
- if (!convertTo(lBuffer.get(), buffer)) {
- LOG(ERROR) << "TWGraphicBufferProducer::attachBuffer - "
- "Invalid input native window buffer";
- _hidl_cb(static_cast<int32_t>(BAD_VALUE), -1);
- return Void();
- }
- status_t status = mBase->attachBuffer(&outSlot, lBuffer);
-
- _hidl_cb(static_cast<int32_t>(status), static_cast<int32_t>(outSlot));
- return Void();
-}
-
-Return<void> TWGraphicBufferProducer::queueBuffer(
- int32_t slot, const QueueBufferInput& input,
- queueBuffer_cb _hidl_cb) {
- QueueBufferOutput tOutput;
- BGraphicBufferProducer::QueueBufferInput lInput(
- 0, false, HAL_DATASPACE_UNKNOWN,
- ::android::Rect(0, 0, 1, 1),
- NATIVE_WINDOW_SCALING_MODE_FREEZE,
- 0, ::android::Fence::NO_FENCE);
- if (!convertTo(&lInput, input)) {
- LOG(ERROR) << "TWGraphicBufferProducer::queueBuffer - "
- "Invalid input";
- _hidl_cb(static_cast<int32_t>(BAD_VALUE), tOutput);
- return Void();
- }
- BGraphicBufferProducer::QueueBufferOutput lOutput;
- status_t status = mBase->queueBuffer(
- static_cast<int>(slot), lInput, &lOutput);
-
- std::vector<std::vector<native_handle_t*> > nhAA;
- if (!wrapAs(&tOutput, &nhAA, lOutput)) {
- LOG(ERROR) << "TWGraphicBufferProducer::queueBuffer - "
- "Invalid output";
- _hidl_cb(static_cast<int32_t>(BAD_VALUE), tOutput);
- return Void();
- }
-
- _hidl_cb(static_cast<int32_t>(status), tOutput);
- for (auto& nhA : nhAA) {
- for (auto& nh : nhA) {
- native_handle_delete(nh);
- }
- }
- return Void();
-}
-
-Return<int32_t> TWGraphicBufferProducer::cancelBuffer(
- int32_t slot, const hidl_handle& fence) {
- sp<Fence> lFence = new Fence();
- if (!convertTo(lFence.get(), fence)) {
- LOG(ERROR) << "TWGraphicBufferProducer::cancelBuffer - "
- "Invalid input fence";
- return static_cast<int32_t>(BAD_VALUE);
- }
- return static_cast<int32_t>(mBase->cancelBuffer(static_cast<int>(slot), lFence));
-}
-
-Return<void> TWGraphicBufferProducer::query(int32_t what, query_cb _hidl_cb) {
- int lValue;
- int lReturn = mBase->query(static_cast<int>(what), &lValue);
- _hidl_cb(static_cast<int32_t>(lReturn), static_cast<int32_t>(lValue));
- return Void();
-}
-
-Return<void> TWGraphicBufferProducer::connect(
- const sp<HProducerListener>& listener,
- int32_t api, bool producerControlledByApp, connect_cb _hidl_cb) {
- sp<BProducerListener> lListener = listener == nullptr ?
- nullptr : new LWProducerListener(listener);
- BGraphicBufferProducer::QueueBufferOutput lOutput;
- status_t status = mBase->connect(lListener,
- static_cast<int>(api),
- producerControlledByApp,
- &lOutput);
-
- QueueBufferOutput tOutput;
- std::vector<std::vector<native_handle_t*> > nhAA;
- if (!wrapAs(&tOutput, &nhAA, lOutput)) {
- LOG(ERROR) << "TWGraphicBufferProducer::connect - "
- "Invalid output";
- _hidl_cb(static_cast<int32_t>(status), tOutput);
- return Void();
- }
-
- _hidl_cb(static_cast<int32_t>(status), tOutput);
- for (auto& nhA : nhAA) {
- for (auto& nh : nhA) {
- native_handle_delete(nh);
- }
- }
- return Void();
-}
-
-Return<int32_t> TWGraphicBufferProducer::disconnect(
- int32_t api, DisconnectMode mode) {
- return static_cast<int32_t>(mBase->disconnect(
- static_cast<int>(api),
- toGuiDisconnectMode(mode)));
-}
-
-Return<int32_t> TWGraphicBufferProducer::setSidebandStream(const hidl_handle& stream) {
- return static_cast<int32_t>(mBase->setSidebandStream(NativeHandle::create(
- stream ? native_handle_clone(stream) : NULL, true)));
-}
-
-Return<void> TWGraphicBufferProducer::allocateBuffers(
- uint32_t width, uint32_t height, PixelFormat format, uint32_t usage) {
- mBase->allocateBuffers(
- width, height,
- static_cast<::android::PixelFormat>(format),
- usage);
- return Void();
-}
-
-Return<int32_t> TWGraphicBufferProducer::allowAllocation(bool allow) {
- return static_cast<int32_t>(mBase->allowAllocation(allow));
-}
-
-Return<int32_t> TWGraphicBufferProducer::setGenerationNumber(uint32_t generationNumber) {
- return static_cast<int32_t>(mBase->setGenerationNumber(generationNumber));
-}
-
-Return<void> TWGraphicBufferProducer::getConsumerName(getConsumerName_cb _hidl_cb) {
- _hidl_cb(mBase->getConsumerName().string());
- return Void();
-}
-
-Return<int32_t> TWGraphicBufferProducer::setSharedBufferMode(bool sharedBufferMode) {
- return static_cast<int32_t>(mBase->setSharedBufferMode(sharedBufferMode));
-}
-
-Return<int32_t> TWGraphicBufferProducer::setAutoRefresh(bool autoRefresh) {
- return static_cast<int32_t>(mBase->setAutoRefresh(autoRefresh));
-}
-
-Return<int32_t> TWGraphicBufferProducer::setDequeueTimeout(int64_t timeoutNs) {
- return static_cast<int32_t>(mBase->setDequeueTimeout(timeoutNs));
-}
-
-Return<void> TWGraphicBufferProducer::getLastQueuedBuffer(
- getLastQueuedBuffer_cb _hidl_cb) {
- sp<GraphicBuffer> lOutBuffer = new GraphicBuffer();
- sp<Fence> lOutFence = new Fence();
- float lOutTransformMatrix[16];
- status_t status = mBase->getLastQueuedBuffer(
- &lOutBuffer, &lOutFence, lOutTransformMatrix);
-
- AnwBuffer tOutBuffer;
- if (lOutBuffer != nullptr) {
- wrapAs(&tOutBuffer, *lOutBuffer);
- }
- hidl_handle tOutFence;
- native_handle_t* nh = nullptr;
- if ((lOutFence == nullptr) || !wrapAs(&tOutFence, &nh, *lOutFence)) {
- LOG(ERROR) << "TWGraphicBufferProducer::getLastQueuedBuffer - "
- "Invalid output fence";
- _hidl_cb(static_cast<int32_t>(status),
- tOutBuffer,
- tOutFence,
- hidl_array<float, 16>());
- return Void();
- }
- hidl_array<float, 16> tOutTransformMatrix(lOutTransformMatrix);
-
- _hidl_cb(static_cast<int32_t>(status), tOutBuffer, tOutFence, tOutTransformMatrix);
- native_handle_delete(nh);
- return Void();
-}
-
-Return<void> TWGraphicBufferProducer::getFrameTimestamps(
- getFrameTimestamps_cb _hidl_cb) {
- ::android::FrameEventHistoryDelta lDelta;
- mBase->getFrameTimestamps(&lDelta);
-
- FrameEventHistoryDelta tDelta;
- std::vector<std::vector<native_handle_t*> > nhAA;
- if (!wrapAs(&tDelta, &nhAA, lDelta)) {
- LOG(ERROR) << "TWGraphicBufferProducer::getFrameTimestamps - "
- "Invalid output frame timestamps";
- _hidl_cb(tDelta);
- return Void();
- }
-
- _hidl_cb(tDelta);
- for (auto& nhA : nhAA) {
- for (auto& nh : nhA) {
- native_handle_delete(nh);
- }
- }
- return Void();
-}
-
-Return<void> TWGraphicBufferProducer::getUniqueId(getUniqueId_cb _hidl_cb) {
- uint64_t outId;
- status_t status = mBase->getUniqueId(&outId);
- _hidl_cb(static_cast<int32_t>(status), outId);
- return Void();
-}
-
-} // namespace implementation
-} // namespace V1_0
-} // namespace omx
-} // namespace media
-} // namespace hardware
-} // namespace android
diff --git a/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp b/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
index 3201c32..ed272bb 100644
--- a/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
+++ b/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
@@ -79,9 +79,9 @@
};
struct TWGraphicBufferSource::TWOmxBufferSource : public IOmxBufferSource {
- sp<GraphicBufferSource> mSource;
+ sp<OmxGraphicBufferSource> mSource;
- TWOmxBufferSource(const sp<GraphicBufferSource> &source): mSource(source) {
+ TWOmxBufferSource(const sp<OmxGraphicBufferSource> &source): mSource(source) {
}
Return<void> onOmxExecuting() override {
@@ -115,7 +115,7 @@
// TWGraphicBufferSource
TWGraphicBufferSource::TWGraphicBufferSource(
- sp<GraphicBufferSource> const& base) :
+ sp<OmxGraphicBufferSource> const& base) :
mBase(base),
mOmxBufferSource(new TWOmxBufferSource(base)) {
}
diff --git a/media/libstagefright/omx/1.0/WOmxNode.cpp b/media/libstagefright/omx/1.0/WOmxNode.cpp
index 9f82283..1dc7c7b 100644
--- a/media/libstagefright/omx/1.0/WOmxNode.cpp
+++ b/media/libstagefright/omx/1.0/WOmxNode.cpp
@@ -154,7 +154,8 @@
hidl_handle const& outNativeHandle) {
fnStatus = toStatusT(status);
*buffer = outBuffer;
- *native_handle = NativeHandle::create(
+ *native_handle = outNativeHandle.getNativeHandle() == nullptr ?
+ nullptr : NativeHandle::create(
native_handle_clone(outNativeHandle), true);
}));
return transStatus == NO_ERROR ? fnStatus : transStatus;
diff --git a/media/libstagefright/omx/1.0/WProducerListener.cpp b/media/libstagefright/omx/1.0/WProducerListener.cpp
deleted file mode 100644
index bdc3aa1..0000000
--- a/media/libstagefright/omx/1.0/WProducerListener.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright 2016, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <media/stagefright/omx/1.0/WProducerListener.h>
-
-namespace android {
-namespace hardware {
-namespace media {
-namespace omx {
-namespace V1_0 {
-namespace implementation {
-
-// TWProducerListener
-TWProducerListener::TWProducerListener(
- sp<BProducerListener> const& base):
- mBase(base) {
-}
-
-Return<void> TWProducerListener::onBufferReleased() {
- mBase->onBufferReleased();
- return Void();
-}
-
-Return<bool> TWProducerListener::needsReleaseNotify() {
- return mBase->needsReleaseNotify();
-}
-
-// LWProducerListener
-LWProducerListener::LWProducerListener(
- sp<HProducerListener> const& base):
- mBase(base) {
-}
-
-void LWProducerListener::onBufferReleased() {
- mBase->onBufferReleased();
-}
-
-bool LWProducerListener::needsReleaseNotify() {
- return static_cast<bool>(mBase->needsReleaseNotify());
-}
-
-} // namespace implementation
-} // namespace V1_0
-} // namespace omx
-} // namespace media
-} // namespace hardware
-} // namespace android
diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp
index bd3c1c6..3e6942b 100644
--- a/media/libstagefright/omx/Android.bp
+++ b/media/libstagefright/omx/Android.bp
@@ -6,14 +6,11 @@
},
srcs: [
- "FrameDropper.cpp",
- "GraphicBufferSource.cpp",
"BWGraphicBufferSource.cpp",
- "OMX.cpp",
- "OMXStore.cpp",
"OMXMaster.cpp",
"OMXNodeInstance.cpp",
"OMXUtils.cpp",
+ "OmxGraphicBufferSource.cpp",
"SimpleSoftOMXComponent.cpp",
"SoftOMXComponent.cpp",
"SoftOMXPlugin.cpp",
@@ -21,8 +18,6 @@
"SoftVideoEncoderOMXComponent.cpp",
"1.0/Omx.cpp",
"1.0/OmxStore.cpp",
- "1.0/WGraphicBufferProducer.cpp",
- "1.0/WProducerListener.cpp",
"1.0/WGraphicBufferSource.cpp",
"1.0/WOmxNode.cpp",
"1.0/WOmxObserver.cpp",
@@ -51,25 +46,23 @@
"libgui",
"libcutils",
"libstagefright_foundation",
+ "libstagefright_bufferqueue_helper",
"libstagefright_xmlparser",
"libdl",
"libhidlbase",
"libhidlmemory",
"libhidltransport",
"libnativewindow", // TODO(b/62923479): use header library
- "android.hidl.memory@1.0",
- "android.hidl.token@1.0-utils",
- "android.hardware.media@1.0",
+ "libvndksupport",
"android.hardware.media.omx@1.0",
- "android.hardware.graphics.common@1.0",
"android.hardware.graphics.bufferqueue@1.0",
],
export_shared_lib_headers: [
- "android.hidl.memory@1.0",
"libmedia_omx",
"libstagefright_foundation",
"libstagefright_xmlparser",
+ "libutils",
],
cflags: [
@@ -109,6 +102,7 @@
],
shared_libs: [
"libmedia_omx",
+ "libstagefright_foundation",
"liblog",
],
export_shared_lib_headers: [
diff --git a/media/libstagefright/omx/BWGraphicBufferSource.cpp b/media/libstagefright/omx/BWGraphicBufferSource.cpp
index 94ef598..fa30a46 100644
--- a/media/libstagefright/omx/BWGraphicBufferSource.cpp
+++ b/media/libstagefright/omx/BWGraphicBufferSource.cpp
@@ -55,9 +55,9 @@
};
struct BWGraphicBufferSource::BWOMXBufferSource : public BnOMXBufferSource {
- sp<GraphicBufferSource> mSource;
+ sp<OmxGraphicBufferSource> mSource;
- BWOMXBufferSource(const sp<GraphicBufferSource> &source): mSource(source) {
+ BWOMXBufferSource(const sp<OmxGraphicBufferSource> &source): mSource(source) {
}
Status onOmxExecuting() override {
@@ -83,7 +83,7 @@
};
BWGraphicBufferSource::BWGraphicBufferSource(
- sp<GraphicBufferSource> const& base) :
+ sp<OmxGraphicBufferSource> const& base) :
mBase(base),
mOMXBufferSource(new BWOMXBufferSource(base)) {
}
diff --git a/media/libstagefright/omx/FrameDropper.cpp b/media/libstagefright/omx/FrameDropper.cpp
deleted file mode 100644
index 0c50c58..0000000
--- a/media/libstagefright/omx/FrameDropper.cpp
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "FrameDropper"
-#include <utils/Log.h>
-
-#include <media/stagefright/omx/FrameDropper.h>
-#include <media/stagefright/foundation/ADebug.h>
-
-namespace android {
-
-static const int64_t kMaxJitterUs = 2000;
-
-FrameDropper::FrameDropper()
- : mDesiredMinTimeUs(-1),
- mMinIntervalUs(0) {
-}
-
-FrameDropper::~FrameDropper() {
-}
-
-status_t FrameDropper::setMaxFrameRate(float maxFrameRate) {
- if (maxFrameRate <= 0) {
- ALOGE("framerate should be positive but got %f.", maxFrameRate);
- return BAD_VALUE;
- }
- mMinIntervalUs = (int64_t) (1000000.0f / maxFrameRate);
- return OK;
-}
-
-bool FrameDropper::shouldDrop(int64_t timeUs) {
- if (mMinIntervalUs <= 0) {
- return false;
- }
-
- if (mDesiredMinTimeUs < 0) {
- mDesiredMinTimeUs = timeUs + mMinIntervalUs;
- ALOGV("first frame %lld, next desired frame %lld",
- (long long)timeUs, (long long)mDesiredMinTimeUs);
- return false;
- }
-
- if (timeUs < (mDesiredMinTimeUs - kMaxJitterUs)) {
- ALOGV("drop frame %lld, desired frame %lld, diff %lld",
- (long long)timeUs, (long long)mDesiredMinTimeUs,
- (long long)(mDesiredMinTimeUs - timeUs));
- return true;
- }
-
- int64_t n = (timeUs - mDesiredMinTimeUs + kMaxJitterUs) / mMinIntervalUs;
- mDesiredMinTimeUs += (n + 1) * mMinIntervalUs;
- ALOGV("keep frame %lld, next desired frame %lld, diff %lld",
- (long long)timeUs, (long long)mDesiredMinTimeUs,
- (long long)(mDesiredMinTimeUs - timeUs));
- return false;
-}
-
-} // namespace android
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
deleted file mode 100644
index 1917d2a..0000000
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ /dev/null
@@ -1,1337 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <inttypes.h>
-
-#define LOG_TAG "GraphicBufferSource"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#define STRINGIFY_ENUMS // for asString in HardwareAPI.h/VideoAPI.h
-
-#include <media/stagefright/omx/GraphicBufferSource.h>
-#include <media/stagefright/omx/FrameDropper.h>
-#include <media/stagefright/omx/OMXUtils.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/ColorUtils.h>
-#include <media/stagefright/foundation/FileDescriptor.h>
-
-#include <media/hardware/MetadataBufferType.h>
-#include <ui/GraphicBuffer.h>
-#include <gui/BufferItem.h>
-#include <media/hardware/HardwareAPI.h>
-#include <media/openmax/OMX_Component.h>
-#include <media/openmax/OMX_IndexExt.h>
-#include <media/OMXBuffer.h>
-
-#include <inttypes.h>
-
-#include <functional>
-#include <memory>
-#include <cmath>
-
-namespace android {
-
-/**
- * A copiable object managing a buffer in the buffer cache managed by the producer. This object
- * holds a reference to the buffer, and maintains which buffer slot it belongs to (if any), and
- * whether it is still in a buffer slot. It also maintains whether there are any outstanging acquire
- * references to it (by buffers acquired from the slot) mainly so that we can keep a debug
- * count of how many buffers we need to still release back to the producer.
- */
-struct GraphicBufferSource::CachedBuffer {
- /**
- * Token that is used to track acquire counts (as opposed to all references to this object).
- */
- struct Acquirable { };
-
- /**
- * Create using a buffer cached in a slot.
- */
- CachedBuffer(slot_id slot, const sp<GraphicBuffer> &graphicBuffer)
- : mIsCached(true),
- mSlot(slot),
- mGraphicBuffer(graphicBuffer),
- mAcquirable(std::make_shared<Acquirable>()) {
- }
-
- /**
- * Returns the cache slot that this buffer is cached in, or -1 if it is no longer cached.
- *
- * This assumes that -1 slot id is invalid; though, it is just a benign collision used for
- * debugging. This object explicitly manages whether it is still cached.
- */
- slot_id getSlot() const {
- return mIsCached ? mSlot : -1;
- }
-
- /**
- * Returns the cached buffer.
- */
- sp<GraphicBuffer> getGraphicBuffer() const {
- return mGraphicBuffer;
- }
-
- /**
- * Checks whether this buffer is still in the buffer cache.
- */
- bool isCached() const {
- return mIsCached;
- }
-
- /**
- * Checks whether this buffer has an acquired reference.
- */
- bool isAcquired() const {
- return mAcquirable.use_count() > 1;
- }
-
- /**
- * Gets and returns a shared acquired reference.
- */
- std::shared_ptr<Acquirable> getAcquirable() {
- return mAcquirable;
- }
-
-private:
- friend void GraphicBufferSource::discardBufferAtSlotIndex_l(ssize_t);
-
- /**
- * This method to be called when the buffer is no longer in the buffer cache.
- * Called from discardBufferAtSlotIndex_l.
- */
- void onDroppedFromCache() {
- CHECK_DBG(mIsCached);
- mIsCached = false;
- }
-
- bool mIsCached;
- slot_id mSlot;
- sp<GraphicBuffer> mGraphicBuffer;
- std::shared_ptr<Acquirable> mAcquirable;
-};
-
-/**
- * A copiable object managing a buffer acquired from the producer. This must always be a cached
- * buffer. This objects also manages its acquire fence and any release fences that may be returned
- * by the encoder for this buffer (this buffer may be queued to the encoder multiple times).
- * If no release fences are added by the encoder, the acquire fence is returned as the release
- * fence for this - as it is assumed that noone waited for the acquire fence. Otherwise, it is
- * assumed that the encoder has waited for the acquire fence (or returned it as the release
- * fence).
- */
-struct GraphicBufferSource::AcquiredBuffer {
- AcquiredBuffer(
- const std::shared_ptr<CachedBuffer> &buffer,
- std::function<void(AcquiredBuffer *)> onReleased,
- const sp<Fence> &acquireFence)
- : mBuffer(buffer),
- mAcquirable(buffer->getAcquirable()),
- mAcquireFence(acquireFence),
- mGotReleaseFences(false),
- mOnReleased(onReleased) {
- }
-
- /**
- * Adds a release fence returned by the encoder to this object. If this is called with an
- * valid file descriptor, it is added to the list of release fences. These are returned to the
- * producer on release() as a merged fence. Regardless of the validity of the file descriptor,
- * we take note that a release fence was attempted to be added and the acquire fence can now be
- * assumed as acquired.
- */
- void addReleaseFenceFd(int fenceFd) {
- // save all release fences - these will be propagated to the producer if this buffer is
- // ever released to it
- if (fenceFd >= 0) {
- mReleaseFenceFds.push_back(fenceFd);
- }
- mGotReleaseFences = true;
- }
-
- /**
- * Returns the acquire fence file descriptor associated with this object.
- */
- int getAcquireFenceFd() {
- if (mAcquireFence == nullptr || !mAcquireFence->isValid()) {
- return -1;
- }
- return mAcquireFence->dup();
- }
-
- /**
- * Returns whether the buffer is still in the buffer cache.
- */
- bool isCached() const {
- return mBuffer->isCached();
- }
-
- /**
- * Returns the acquired buffer.
- */
- sp<GraphicBuffer> getGraphicBuffer() const {
- return mBuffer->getGraphicBuffer();
- }
-
- /**
- * Returns the slot that this buffer is cached at, or -1 otherwise.
- *
- * This assumes that -1 slot id is invalid; though, it is just a benign collision used for
- * debugging. This object explicitly manages whether it is still cached.
- */
- slot_id getSlot() const {
- return mBuffer->getSlot();
- }
-
- /**
- * Creates and returns a release fence object from the acquire fence and/or any release fences
- * added. If no release fences were added (even if invalid), returns the acquire fence.
- * Otherwise, it returns a merged fence from all the valid release fences added.
- */
- sp<Fence> getReleaseFence() {
- // If did not receive release fences, we assume this buffer was not consumed (it was
- // discarded or dropped). In this case release the acquire fence as the release fence.
- // We do this here to avoid a dup, close and recreation of the Fence object.
- if (!mGotReleaseFences) {
- return mAcquireFence;
- }
- sp<Fence> ret = getReleaseFence(0, mReleaseFenceFds.size());
- // clear fds as fence took ownership of them
- mReleaseFenceFds.clear();
- return ret;
- }
-
- // this video buffer is no longer referenced by the codec (or kept for later encoding)
- // it is now safe to release to the producer
- ~AcquiredBuffer() {
- //mAcquirable.clear();
- mOnReleased(this);
- // mOnRelease method should call getReleaseFence() that releases all fds but just in case
- ALOGW_IF(!mReleaseFenceFds.empty(), "release fences were not obtained, closing fds");
- for (int fildes : mReleaseFenceFds) {
- ::close(fildes);
- TRESPASS_DBG();
- }
- }
-
-private:
- std::shared_ptr<GraphicBufferSource::CachedBuffer> mBuffer;
- std::shared_ptr<GraphicBufferSource::CachedBuffer::Acquirable> mAcquirable;
- sp<Fence> mAcquireFence;
- Vector<int> mReleaseFenceFds;
- bool mGotReleaseFences;
- std::function<void(AcquiredBuffer *)> mOnReleased;
-
- /**
- * Creates and returns a release fence from 0 or more release fence file descriptors in from
- * the specified range in the array.
- *
- * @param start start index
- * @param num number of release fds to merge
- */
- sp<Fence> getReleaseFence(size_t start, size_t num) const {
- if (num == 0) {
- return Fence::NO_FENCE;
- } else if (num == 1) {
- return new Fence(mReleaseFenceFds[start]);
- } else {
- return Fence::merge("GBS::AB",
- getReleaseFence(start, num >> 1),
- getReleaseFence(start + (num >> 1), num - (num >> 1)));
- }
- }
-};
-
-GraphicBufferSource::GraphicBufferSource() :
- mInitCheck(UNKNOWN_ERROR),
- mNumAvailableUnacquiredBuffers(0),
- mNumOutstandingAcquires(0),
- mEndOfStream(false),
- mEndOfStreamSent(false),
- mLastDataspace(HAL_DATASPACE_UNKNOWN),
- mExecuting(false),
- mSuspended(false),
- mLastFrameTimestampUs(-1),
- mStopTimeUs(-1),
- mLastActionTimeUs(-1ll),
- mSkipFramesBeforeNs(-1ll),
- mFrameRepeatIntervalUs(-1ll),
- mRepeatLastFrameGeneration(0),
- mOutstandingFrameRepeatCount(0),
- mFrameRepeatBlockedOnCodecBuffer(false),
- mFps(-1.0),
- mCaptureFps(-1.0),
- mBaseCaptureUs(-1ll),
- mBaseFrameUs(-1ll),
- mFrameCount(0),
- mPrevCaptureUs(-1ll),
- mPrevFrameUs(-1ll),
- mInputBufferTimeOffsetUs(0ll) {
- ALOGV("GraphicBufferSource");
-
- String8 name("GraphicBufferSource");
-
- BufferQueue::createBufferQueue(&mProducer, &mConsumer);
- mConsumer->setConsumerName(name);
-
- // Note that we can't create an sp<...>(this) in a ctor that will not keep a
- // reference once the ctor ends, as that would cause the refcount of 'this'
- // dropping to 0 at the end of the ctor. Since all we need is a wp<...>
- // that's what we create.
- wp<BufferQueue::ConsumerListener> listener =
- static_cast<BufferQueue::ConsumerListener*>(this);
- sp<IConsumerListener> proxy =
- new BufferQueue::ProxyConsumerListener(listener);
-
- mInitCheck = mConsumer->consumerConnect(proxy, false);
- if (mInitCheck != NO_ERROR) {
- ALOGE("Error connecting to BufferQueue: %s (%d)",
- strerror(-mInitCheck), mInitCheck);
- return;
- }
-
- memset(&mDefaultColorAspectsPacked, 0, sizeof(mDefaultColorAspectsPacked));
-
- CHECK(mInitCheck == NO_ERROR);
-}
-
-GraphicBufferSource::~GraphicBufferSource() {
- ALOGV("~GraphicBufferSource");
- {
- // all acquired buffers must be freed with the mutex locked otherwise our debug assertion
- // may trigger
- Mutex::Autolock autoLock(mMutex);
- mAvailableBuffers.clear();
- mSubmittedCodecBuffers.clear();
- mLatestBuffer.mBuffer.reset();
- }
-
- if (mNumOutstandingAcquires != 0) {
- ALOGW("potential buffer leak: acquired=%d", mNumOutstandingAcquires);
- TRESPASS_DBG();
- }
- if (mConsumer != NULL) {
- status_t err = mConsumer->consumerDisconnect();
- if (err != NO_ERROR) {
- ALOGW("consumerDisconnect failed: %d", err);
- }
- }
-}
-
-Status GraphicBufferSource::onOmxExecuting() {
- Mutex::Autolock autoLock(mMutex);
- ALOGV("--> executing; available=%zu, submittable=%zd",
- mAvailableBuffers.size(), mFreeCodecBuffers.size());
- CHECK(!mExecuting);
- mExecuting = true;
- mLastDataspace = HAL_DATASPACE_UNKNOWN;
- ALOGV("clearing last dataSpace");
-
- // Start by loading up as many buffers as possible. We want to do this,
- // rather than just submit the first buffer, to avoid a degenerate case:
- // if all BQ buffers arrive before we start executing, and we only submit
- // one here, the other BQ buffers will just sit until we get notified
- // that the codec buffer has been released. We'd then acquire and
- // submit a single additional buffer, repeatedly, never using more than
- // one codec buffer simultaneously. (We could instead try to submit
- // all BQ buffers whenever any codec buffer is freed, but if we get the
- // initial conditions right that will never be useful.)
- while (haveAvailableBuffers_l()) {
- if (!fillCodecBuffer_l()) {
- ALOGV("stop load with available=%zu+%d",
- mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
- break;
- }
- }
-
- ALOGV("done loading initial frames, available=%zu+%d",
- mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
-
- // If EOS has already been signaled, and there are no more frames to
- // submit, try to send EOS now as well.
- if (mStopTimeUs == -1 && mEndOfStream && !haveAvailableBuffers_l()) {
- submitEndOfInputStream_l();
- }
-
- if (mFrameRepeatIntervalUs > 0ll && mLooper == NULL) {
- mReflector = new AHandlerReflector<GraphicBufferSource>(this);
-
- mLooper = new ALooper;
- mLooper->registerHandler(mReflector);
- mLooper->start();
-
- if (mLatestBuffer.mBuffer != nullptr) {
- queueFrameRepeat_l();
- }
- }
-
- return Status::ok();
-}
-
-Status GraphicBufferSource::onOmxIdle() {
- ALOGV("omxIdle");
-
- Mutex::Autolock autoLock(mMutex);
-
- if (mExecuting) {
- // We are only interested in the transition from executing->idle,
- // not loaded->idle.
- mExecuting = false;
- }
- return Status::ok();
-}
-
-Status GraphicBufferSource::onOmxLoaded(){
- Mutex::Autolock autoLock(mMutex);
- if (mLooper != NULL) {
- mLooper->unregisterHandler(mReflector->id());
- mReflector.clear();
-
- mLooper->stop();
- mLooper.clear();
- }
-
- ALOGV("--> loaded; available=%zu+%d eos=%d eosSent=%d acquired=%d",
- mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers,
- mEndOfStream, mEndOfStreamSent, mNumOutstandingAcquires);
-
- // Codec is no longer executing. Releasing all buffers to bq.
- mFreeCodecBuffers.clear();
- mSubmittedCodecBuffers.clear();
- mLatestBuffer.mBuffer.reset();
- mOMXNode.clear();
- mExecuting = false;
-
- return Status::ok();
-}
-
-Status GraphicBufferSource::onInputBufferAdded(codec_buffer_id bufferId) {
- Mutex::Autolock autoLock(mMutex);
-
- if (mExecuting) {
- // This should never happen -- buffers can only be allocated when
- // transitioning from "loaded" to "idle".
- ALOGE("addCodecBuffer: buffer added while executing");
- return Status::fromServiceSpecificError(INVALID_OPERATION);
- }
-
- ALOGV("addCodecBuffer: bufferId=%u", bufferId);
-
- mFreeCodecBuffers.push_back(bufferId);
- return Status::ok();
-}
-
-Status GraphicBufferSource::onInputBufferEmptied(codec_buffer_id bufferId, int fenceFd) {
- Mutex::Autolock autoLock(mMutex);
- FileDescriptor::Autoclose fence(fenceFd);
-
- ssize_t cbi = mSubmittedCodecBuffers.indexOfKey(bufferId);
- if (cbi < 0) {
- // This should never happen.
- ALOGE("onInputBufferEmptied: buffer not recognized (bufferId=%u)", bufferId);
- return Status::fromServiceSpecificError(BAD_VALUE);
- }
-
- std::shared_ptr<AcquiredBuffer> buffer = mSubmittedCodecBuffers.valueAt(cbi);
-
- // Move buffer to available buffers
- mSubmittedCodecBuffers.removeItemsAt(cbi);
- mFreeCodecBuffers.push_back(bufferId);
-
- // header->nFilledLen may not be the original value, so we can't compare
- // that to zero to see of this was the EOS buffer. Instead we just
- // see if there is a null AcquiredBuffer, which should only ever happen for EOS.
- if (buffer == nullptr) {
- if (!(mEndOfStream && mEndOfStreamSent)) {
- // This can happen when broken code sends us the same buffer twice in a row.
- ALOGE("onInputBufferEmptied: non-EOS null buffer (bufferId=%u)", bufferId);
- } else {
- ALOGV("onInputBufferEmptied: EOS null buffer (bufferId=%u@%zd)", bufferId, cbi);
- }
- // No GraphicBuffer to deal with, no additional input or output is expected, so just return.
- return Status::fromServiceSpecificError(BAD_VALUE);
- }
-
- if (!mExecuting) {
- // this is fine since this could happen when going from Idle to Loaded
- ALOGV("onInputBufferEmptied: no longer executing (bufferId=%u@%zd)", bufferId, cbi);
- return Status::fromServiceSpecificError(OK);
- }
-
- ALOGV("onInputBufferEmptied: bufferId=%d@%zd [slot=%d, useCount=%ld, handle=%p] acquired=%d",
- bufferId, cbi, buffer->getSlot(), buffer.use_count(), buffer->getGraphicBuffer()->handle,
- mNumOutstandingAcquires);
-
- buffer->addReleaseFenceFd(fence.release());
- // release codec reference for video buffer just in case remove does not it
- buffer.reset();
-
- if (haveAvailableBuffers_l()) {
- // Fill this codec buffer.
- CHECK(!mEndOfStreamSent);
- ALOGV("onInputBufferEmptied: buffer freed, feeding codec (available=%zu+%d, eos=%d)",
- mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers, mEndOfStream);
- fillCodecBuffer_l();
- } else if (mEndOfStream && mStopTimeUs == -1) {
- // No frames available, but EOS is pending and no stop time, so use this buffer to
- // send that.
- ALOGV("onInputBufferEmptied: buffer freed, submitting EOS");
- submitEndOfInputStream_l();
- } else if (mFrameRepeatBlockedOnCodecBuffer) {
- bool success = repeatLatestBuffer_l();
- ALOGV("onInputBufferEmptied: completing deferred repeatLatestBuffer_l %s",
- success ? "SUCCESS" : "FAILURE");
- mFrameRepeatBlockedOnCodecBuffer = false;
- }
-
- // releaseReleasableBuffers_l();
- return Status::ok();
-}
-
-void GraphicBufferSource::onDataspaceChanged_l(
- android_dataspace dataspace, android_pixel_format pixelFormat) {
- ALOGD("got buffer with new dataSpace #%x", dataspace);
- mLastDataspace = dataspace;
-
- if (ColorUtils::convertDataSpaceToV0(dataspace)) {
- mOMXNode->dispatchDataSpaceChanged(mLastDataspace, mDefaultColorAspectsPacked, pixelFormat);
- }
-}
-
-bool GraphicBufferSource::fillCodecBuffer_l() {
- CHECK(mExecuting && haveAvailableBuffers_l());
-
- if (mFreeCodecBuffers.empty()) {
- // No buffers available, bail.
- ALOGV("fillCodecBuffer_l: no codec buffers, available=%zu+%d",
- mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
- return false;
- }
-
- VideoBuffer item;
- if (mAvailableBuffers.empty()) {
- ALOGV("fillCodecBuffer_l: acquiring available buffer, available=%zu+%d",
- mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
- if (acquireBuffer_l(&item) != OK) {
- ALOGE("fillCodecBuffer_l: failed to acquire available buffer");
- return false;
- }
- } else {
- ALOGV("fillCodecBuffer_l: getting available buffer, available=%zu+%d",
- mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
- item = *mAvailableBuffers.begin();
- mAvailableBuffers.erase(mAvailableBuffers.begin());
- }
-
- int64_t itemTimeUs = item.mTimestampNs / 1000;
-
- // Process ActionItem in the Queue if there is any. If a buffer's timestamp
- // is smaller than the first action's timestamp, no action need to be performed.
- // If buffer's timestamp is larger or equal than the last action's timestamp,
- // only the last action needs to be performed as all the acitions before the
- // the action are overridden by the last action. For the other cases, traverse
- // the Queue to find the newest action that with timestamp smaller or equal to
- // the buffer's timestamp. For example, an action queue like
- // [pause 1us], [resume 2us], [pause 3us], [resume 4us], [pause 5us].... Upon
- // receiving a buffer with timestamp 3.5us, only the action [pause, 3us] needs
- // to be handled and [pause, 1us], [resume 2us] will be discarded.
- bool done = false;
- bool seeStopAction = false;
- if (!mActionQueue.empty()) {
- // First scan to check if bufferTimestamp is smaller than first action's timestamp.
- ActionItem nextAction = *(mActionQueue.begin());
- if (itemTimeUs < nextAction.mActionTimeUs) {
- ALOGV("No action. buffer timestamp %lld us < action timestamp: %lld us",
- (long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
- // All the actions are ahead. No action need to perform now.
- // Release the buffer if is in suspended state, or process the buffer
- // if not in suspended state.
- done = true;
- }
-
- if (!done) {
- // Find the newest action that with timestamp smaller than itemTimeUs. Then
- // remove all the actions before and include the newest action.
- List<ActionItem>::iterator it = mActionQueue.begin();
- while (it != mActionQueue.end() && it->mActionTimeUs <= itemTimeUs
- && nextAction.mAction != ActionItem::STOP) {
- nextAction = *it;
- ++it;
- }
- mActionQueue.erase(mActionQueue.begin(), it);
-
- CHECK(itemTimeUs >= nextAction.mActionTimeUs);
- switch (nextAction.mAction) {
- case ActionItem::PAUSE:
- {
- mSuspended = true;
- ALOGV("RUNNING/PAUSE -> PAUSE at buffer %lld us PAUSE Time: %lld us",
- (long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
- break;
- }
- case ActionItem::RESUME:
- {
- mSuspended = false;
- ALOGV("PAUSE/RUNNING -> RUNNING at buffer %lld us RESUME Time: %lld us",
- (long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
- break;
- }
- case ActionItem::STOP:
- {
- ALOGV("RUNNING/PAUSE -> STOP at buffer %lld us STOP Time: %lld us",
- (long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
- // Clear the whole ActionQueue as recording is done
- mActionQueue.clear();
- seeStopAction = true;
- break;
- }
- default:
- TRESPASS_DBG("Unknown action type");
- // return true here because we did consume an available buffer, so the
- // loop in onOmxExecuting will eventually terminate even if we hit this.
- return false;
- }
- }
- }
-
- if (seeStopAction) {
- // Clear all the buffers before setting mEndOfStream and signal EndOfInputStream.
- releaseAllAvailableBuffers_l();
- mEndOfStream = true;
- submitEndOfInputStream_l();
- return true;
- }
-
- if (mSuspended) {
- return true;
- }
-
- int err = UNKNOWN_ERROR;
-
- // only submit sample if start time is unspecified, or sample
- // is queued after the specified start time
- if (mSkipFramesBeforeNs < 0ll || item.mTimestampNs >= mSkipFramesBeforeNs) {
- // if start time is set, offset time stamp by start time
- if (mSkipFramesBeforeNs > 0) {
- item.mTimestampNs -= mSkipFramesBeforeNs;
- }
-
- int64_t timeUs = item.mTimestampNs / 1000;
- if (mFrameDropper != NULL && mFrameDropper->shouldDrop(timeUs)) {
- ALOGV("skipping frame (%lld) to meet max framerate", static_cast<long long>(timeUs));
- // set err to OK so that the skipped frame can still be saved as the lastest frame
- err = OK;
- } else {
- err = submitBuffer_l(item); // this takes shared ownership of the acquired buffer on succeess
- }
- }
-
- if (err != OK) {
- ALOGV("submitBuffer_l failed, will release bq slot %d", item.mBuffer->getSlot());
- return true;
- } else {
- // Don't set the last buffer id if we're not repeating,
- // we'll be holding on to the last buffer for nothing.
- if (mFrameRepeatIntervalUs > 0ll) {
- setLatestBuffer_l(item);
- }
- ALOGV("buffer submitted [slot=%d, useCount=%ld] acquired=%d",
- item.mBuffer->getSlot(), item.mBuffer.use_count(), mNumOutstandingAcquires);
- mLastFrameTimestampUs = itemTimeUs;
- }
-
- return true;
-}
-
-bool GraphicBufferSource::repeatLatestBuffer_l() {
- CHECK(mExecuting && !haveAvailableBuffers_l());
-
- if (mLatestBuffer.mBuffer == nullptr || mSuspended) {
- return false;
- }
-
- if (mFreeCodecBuffers.empty()) {
- // No buffers available, bail.
- ALOGV("repeatLatestBuffer_l: no codec buffers.");
- return false;
- }
-
- if (!mLatestBuffer.mBuffer->isCached()) {
- ALOGV("repeatLatestBuffer_l: slot was discarded, but repeating our own reference");
- }
-
- // it is ok to update the timestamp of latest buffer as it is only used for submission
- status_t err = submitBuffer_l(mLatestBuffer);
- if (err != OK) {
- return false;
- }
-
- /* repeat last frame up to kRepeatLastFrameCount times.
- * in case of static scene, a single repeat might not get rid of encoder
- * ghosting completely, refresh a couple more times to get better quality
- */
- if (--mOutstandingFrameRepeatCount > 0) {
- // set up timestamp for repeat frame
- mLatestBuffer.mTimestampNs += mFrameRepeatIntervalUs * 1000;
- queueFrameRepeat_l();
- }
-
- return true;
-}
-
-void GraphicBufferSource::setLatestBuffer_l(const VideoBuffer &item) {
- mLatestBuffer = item;
-
- ALOGV("setLatestBuffer_l: [slot=%d, useCount=%ld]",
- mLatestBuffer.mBuffer->getSlot(), mLatestBuffer.mBuffer.use_count());
-
- mOutstandingFrameRepeatCount = kRepeatLastFrameCount;
- // set up timestamp for repeat frame
- mLatestBuffer.mTimestampNs += mFrameRepeatIntervalUs * 1000;
- queueFrameRepeat_l();
-}
-
-void GraphicBufferSource::queueFrameRepeat_l() {
- mFrameRepeatBlockedOnCodecBuffer = false;
-
- if (mReflector != NULL) {
- sp<AMessage> msg = new AMessage(kWhatRepeatLastFrame, mReflector);
- msg->setInt32("generation", ++mRepeatLastFrameGeneration);
- msg->post(mFrameRepeatIntervalUs);
- }
-}
-
-bool GraphicBufferSource::calculateCodecTimestamp_l(
- nsecs_t bufferTimeNs, int64_t *codecTimeUs) {
- int64_t timeUs = bufferTimeNs / 1000;
- timeUs += mInputBufferTimeOffsetUs;
-
- if (mCaptureFps > 0.
- && (mFps > 2 * mCaptureFps
- || mCaptureFps > 2 * mFps)) {
- // Time lapse or slow motion mode
- if (mPrevCaptureUs < 0ll) {
- // first capture
- mPrevCaptureUs = mBaseCaptureUs = timeUs;
- // adjust the first sample timestamp.
- mPrevFrameUs = mBaseFrameUs =
- std::llround((timeUs * mCaptureFps) / mFps);
- mFrameCount = 0;
- } else {
- // snap to nearest capture point
- int64_t nFrames = std::llround(
- (timeUs - mPrevCaptureUs) * mCaptureFps / 1000000);
- if (nFrames <= 0) {
- // skip this frame as it's too close to previous capture
- ALOGV("skipping frame, timeUs %lld", static_cast<long long>(timeUs));
- return false;
- }
- mFrameCount += nFrames;
- mPrevCaptureUs = mBaseCaptureUs + std::llround(
- mFrameCount * 1000000 / mCaptureFps);
- mPrevFrameUs = mBaseFrameUs + std::llround(
- mFrameCount * 1000000 / mFps);
- }
-
- ALOGV("timeUs %lld, captureUs %lld, frameUs %lld",
- static_cast<long long>(timeUs),
- static_cast<long long>(mPrevCaptureUs),
- static_cast<long long>(mPrevFrameUs));
- } else {
- if (timeUs <= mPrevFrameUs) {
- // Drop the frame if it's going backward in time. Bad timestamp
- // could disrupt encoder's rate control completely.
- ALOGW("Dropping frame that's going backward in time");
- return false;
- }
-
- mPrevFrameUs = timeUs;
- }
-
- *codecTimeUs = mPrevFrameUs;
- return true;
-}
-
-status_t GraphicBufferSource::submitBuffer_l(const VideoBuffer &item) {
- CHECK(!mFreeCodecBuffers.empty());
- IOMX::buffer_id codecBufferId = *mFreeCodecBuffers.begin();
-
- ALOGV("submitBuffer_l [slot=%d, bufferId=%d]", item.mBuffer->getSlot(), codecBufferId);
-
- int64_t codecTimeUs;
- if (!calculateCodecTimestamp_l(item.mTimestampNs, &codecTimeUs)) {
- return UNKNOWN_ERROR;
- }
-
- if ((android_dataspace)item.mDataspace != mLastDataspace) {
- onDataspaceChanged_l(
- item.mDataspace,
- (android_pixel_format)item.mBuffer->getGraphicBuffer()->format);
- }
-
- std::shared_ptr<AcquiredBuffer> buffer = item.mBuffer;
- // use a GraphicBuffer for now as OMXNodeInstance is using GraphicBuffers to hold references
- // and it requires this graphic buffer to be able to hold its reference
- // and thus we would need to create a new GraphicBuffer from an ANWBuffer separate from the
- // acquired GraphicBuffer.
- // TODO: this can be reworked globally to use ANWBuffer references
- sp<GraphicBuffer> graphicBuffer = buffer->getGraphicBuffer();
- status_t err = mOMXNode->emptyBuffer(
- codecBufferId, OMX_BUFFERFLAG_ENDOFFRAME, graphicBuffer, codecTimeUs,
- buffer->getAcquireFenceFd());
-
- if (err != OK) {
- ALOGW("WARNING: emptyGraphicBuffer failed: 0x%x", err);
- return err;
- }
-
- mFreeCodecBuffers.erase(mFreeCodecBuffers.begin());
-
- ssize_t cbix = mSubmittedCodecBuffers.add(codecBufferId, buffer);
- ALOGV("emptyGraphicBuffer succeeded, bufferId=%u@%zd bufhandle=%p",
- codecBufferId, cbix, graphicBuffer->handle);
- return OK;
-}
-
-void GraphicBufferSource::submitEndOfInputStream_l() {
- CHECK(mEndOfStream);
- if (mEndOfStreamSent) {
- ALOGV("EOS already sent");
- return;
- }
-
- if (mFreeCodecBuffers.empty()) {
- ALOGV("submitEndOfInputStream_l: no codec buffers available");
- return;
- }
- IOMX::buffer_id codecBufferId = *mFreeCodecBuffers.begin();
-
- // We reject any additional incoming graphic buffers. There is no acquired buffer used for EOS
- status_t err = mOMXNode->emptyBuffer(
- codecBufferId, OMX_BUFFERFLAG_ENDOFFRAME | OMX_BUFFERFLAG_EOS);
- if (err != OK) {
- ALOGW("emptyDirectBuffer EOS failed: 0x%x", err);
- } else {
- mFreeCodecBuffers.erase(mFreeCodecBuffers.begin());
- ssize_t cbix = mSubmittedCodecBuffers.add(codecBufferId, nullptr);
- ALOGV("submitEndOfInputStream_l: buffer submitted, bufferId=%u@%zd", codecBufferId, cbix);
- mEndOfStreamSent = true;
-
- // no need to hold onto any buffers for frame repeating
- ++mRepeatLastFrameGeneration;
- mLatestBuffer.mBuffer.reset();
- }
-}
-
-status_t GraphicBufferSource::acquireBuffer_l(VideoBuffer *ab) {
- BufferItem bi;
- status_t err = mConsumer->acquireBuffer(&bi, 0);
- if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
- // shouldn't happen
- ALOGW("acquireBuffer_l: frame was not available");
- return err;
- } else if (err != OK) {
- ALOGW("acquireBuffer_l: failed with err=%d", err);
- return err;
- }
- --mNumAvailableUnacquiredBuffers;
-
- // Manage our buffer cache.
- std::shared_ptr<CachedBuffer> buffer;
- ssize_t bsi = mBufferSlots.indexOfKey(bi.mSlot);
- if (bi.mGraphicBuffer != NULL) {
- // replace/initialize slot with new buffer
- ALOGV("acquireBuffer_l: %s buffer slot %d", bsi < 0 ? "setting" : "UPDATING", bi.mSlot);
- if (bsi >= 0) {
- discardBufferAtSlotIndex_l(bsi);
- } else {
- bsi = mBufferSlots.add(bi.mSlot, nullptr);
- }
- buffer = std::make_shared<CachedBuffer>(bi.mSlot, bi.mGraphicBuffer);
- mBufferSlots.replaceValueAt(bsi, buffer);
- } else {
- buffer = mBufferSlots.valueAt(bsi);
- }
- int64_t frameNum = bi.mFrameNumber;
-
- std::shared_ptr<AcquiredBuffer> acquiredBuffer =
- std::make_shared<AcquiredBuffer>(
- buffer,
- [frameNum, this](AcquiredBuffer *buffer){
- // AcquiredBuffer's destructor should always be called when mMutex is locked.
- // If we had a reentrant mutex, we could just lock it again to ensure this.
- if (mMutex.tryLock() == 0) {
- TRESPASS_DBG();
- mMutex.unlock();
- }
-
- // we can release buffers immediately if not using adapters
- // alternately, we could add them to mSlotsToRelease, but we would
- // somehow need to propagate frame number to that queue
- if (buffer->isCached()) {
- --mNumOutstandingAcquires;
- mConsumer->releaseBuffer(
- buffer->getSlot(), frameNum, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR,
- buffer->getReleaseFence());
- }
- },
- bi.mFence);
- VideoBuffer videoBuffer{acquiredBuffer, bi.mTimestamp, bi.mDataSpace};
- *ab = videoBuffer;
- ++mNumOutstandingAcquires;
- return OK;
-}
-
-// BufferQueue::ConsumerListener callback
-void GraphicBufferSource::onFrameAvailable(const BufferItem& item __unused) {
- Mutex::Autolock autoLock(mMutex);
-
- ALOGV("onFrameAvailable: executing=%d available=%zu+%d",
- mExecuting, mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
- ++mNumAvailableUnacquiredBuffers;
-
- // For BufferQueue we cannot acquire a buffer if we cannot immediately feed it to the codec
- // UNLESS we are discarding this buffer (acquiring and immediately releasing it), which makes
- // this an ugly logic.
- // NOTE: We could also rely on our debug counter but that is meant only as a debug counter.
- if (!areWeDiscardingAvailableBuffers_l() && mFreeCodecBuffers.empty()) {
- // we may not be allowed to acquire a possibly encodable buffer, so just note that
- // it is available
- ALOGV("onFrameAvailable: cannot acquire buffer right now, do it later");
-
- ++mRepeatLastFrameGeneration; // cancel any pending frame repeat
- return;
- }
-
- VideoBuffer buffer;
- status_t err = acquireBuffer_l(&buffer);
- if (err != OK) {
- ALOGE("onFrameAvailable: acquireBuffer returned err=%d", err);
- } else {
- onBufferAcquired_l(buffer);
- }
-}
-
-bool GraphicBufferSource::areWeDiscardingAvailableBuffers_l() {
- return mEndOfStreamSent // already sent EOS to codec
- || mOMXNode == nullptr // there is no codec connected
- || (mSuspended && mActionQueue.empty()) // we are suspended and not waiting for
- // any further action
- || !mExecuting;
-}
-
-void GraphicBufferSource::onBufferAcquired_l(const VideoBuffer &buffer) {
- if (mEndOfStreamSent) {
- // This should only be possible if a new buffer was queued after
- // EOS was signaled, i.e. the app is misbehaving.
- ALOGW("onFrameAvailable: EOS is sent, ignoring frame");
- } else if (mOMXNode == NULL || (mSuspended && mActionQueue.empty())) {
- // FIXME: if we are suspended but have a resume queued we will stop repeating the last
- // frame. Is that the desired behavior?
- ALOGV("onFrameAvailable: suspended, ignoring frame");
- } else {
- ++mRepeatLastFrameGeneration; // cancel any pending frame repeat
- mAvailableBuffers.push_back(buffer);
- if (mExecuting) {
- fillCodecBuffer_l();
- }
- }
-}
-
-// BufferQueue::ConsumerListener callback
-void GraphicBufferSource::onBuffersReleased() {
- Mutex::Autolock lock(mMutex);
-
- uint64_t slotMask;
- uint64_t releaseMask;
- if (mConsumer->getReleasedBuffers(&releaseMask) != NO_ERROR) {
- slotMask = 0xffffffffffffffffULL;
- ALOGW("onBuffersReleased: unable to get released buffer set");
- } else {
- slotMask = releaseMask;
- ALOGV("onBuffersReleased: 0x%016" PRIx64, slotMask);
- }
-
- AString unpopulated;
- for (int i = 0; i < BufferQueue::NUM_BUFFER_SLOTS; i++) {
- if ((slotMask & 0x01) != 0) {
- if (!discardBufferInSlot_l(i)) {
- if (!unpopulated.empty()) {
- unpopulated.append(", ");
- }
- unpopulated.append(i);
- }
- }
- slotMask >>= 1;
- }
- if (!unpopulated.empty()) {
- ALOGW("released unpopulated slots: [%s]", unpopulated.c_str());
- }
-}
-
-bool GraphicBufferSource::discardBufferInSlot_l(GraphicBufferSource::slot_id i) {
- ssize_t bsi = mBufferSlots.indexOfKey(i);
- if (bsi < 0) {
- return false;
- } else {
- discardBufferAtSlotIndex_l(bsi);
- mBufferSlots.removeItemsAt(bsi);
- return true;
- }
-}
-
-void GraphicBufferSource::discardBufferAtSlotIndex_l(ssize_t bsi) {
- const std::shared_ptr<CachedBuffer>& buffer = mBufferSlots.valueAt(bsi);
- // use -2 if there is no latest buffer, and -1 if it is no longer cached
- slot_id latestBufferSlot =
- mLatestBuffer.mBuffer == nullptr ? -2 : mLatestBuffer.mBuffer->getSlot();
- ALOGV("releasing acquired buffer: [slot=%d, useCount=%ld], latest: [slot=%d]",
- mBufferSlots.keyAt(bsi), buffer.use_count(), latestBufferSlot);
- mBufferSlots.valueAt(bsi)->onDroppedFromCache();
-
- // If the slot of an acquired buffer is discarded, that buffer will not have to be
- // released to the producer, so account it here. However, it is possible that the
- // acquired buffer has already been discarded so check if it still is.
- if (buffer->isAcquired()) {
- --mNumOutstandingAcquires;
- }
-
- // clear the buffer reference (not technically needed as caller either replaces or deletes
- // it; done here for safety).
- mBufferSlots.editValueAt(bsi).reset();
- CHECK_DBG(buffer == nullptr);
-}
-
-void GraphicBufferSource::releaseAllAvailableBuffers_l() {
- mAvailableBuffers.clear();
- while (mNumAvailableUnacquiredBuffers > 0) {
- VideoBuffer item;
- if (acquireBuffer_l(&item) != OK) {
- ALOGW("releaseAllAvailableBuffers: failed to acquire available unacquired buffer");
- break;
- }
- }
-}
-
-// BufferQueue::ConsumerListener callback
-void GraphicBufferSource::onSidebandStreamChanged() {
- ALOG_ASSERT(false, "GraphicBufferSource can't consume sideband streams");
-}
-
-status_t GraphicBufferSource::configure(
- const sp<IOmxNodeWrapper>& omxNode,
- int32_t dataSpace,
- int32_t bufferCount,
- uint32_t frameWidth,
- uint32_t frameHeight,
- uint32_t consumerUsage) {
- if (omxNode == NULL) {
- return BAD_VALUE;
- }
-
-
- // Call setMaxAcquiredBufferCount without lock.
- // setMaxAcquiredBufferCount could call back to onBuffersReleased
- // if the buffer count change results in releasing of existing buffers,
- // which would lead to deadlock.
- status_t err = mConsumer->setMaxAcquiredBufferCount(bufferCount);
- if (err != NO_ERROR) {
- ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
- bufferCount, err);
- return err;
- }
-
- {
- Mutex::Autolock autoLock(mMutex);
- mOMXNode = omxNode;
-
- err = mConsumer->setDefaultBufferSize(frameWidth, frameHeight);
- if (err != NO_ERROR) {
- ALOGE("Unable to set BQ default buffer size to %ux%u: %d",
- frameWidth, frameHeight, err);
- return err;
- }
-
- consumerUsage |= GRALLOC_USAGE_HW_VIDEO_ENCODER;
- mConsumer->setConsumerUsageBits(consumerUsage);
-
- // Sets the default buffer data space
- ALOGD("setting dataspace: %#x, acquired=%d", dataSpace, mNumOutstandingAcquires);
- mConsumer->setDefaultBufferDataSpace((android_dataspace)dataSpace);
- mLastDataspace = (android_dataspace)dataSpace;
-
- mExecuting = false;
- mSuspended = false;
- mEndOfStream = false;
- mEndOfStreamSent = false;
- mSkipFramesBeforeNs = -1ll;
- mFrameRepeatIntervalUs = -1ll;
- mRepeatLastFrameGeneration = 0;
- mOutstandingFrameRepeatCount = 0;
- mLatestBuffer.mBuffer.reset();
- mFrameRepeatBlockedOnCodecBuffer = false;
- mFps = -1.0;
- mCaptureFps = -1.0;
- mBaseCaptureUs = -1ll;
- mBaseFrameUs = -1ll;
- mPrevCaptureUs = -1ll;
- mPrevFrameUs = -1ll;
- mFrameCount = 0;
- mInputBufferTimeOffsetUs = 0;
- mStopTimeUs = -1;
- mActionQueue.clear();
- }
-
- return OK;
-}
-
-status_t GraphicBufferSource::setSuspend(bool suspend, int64_t suspendStartTimeUs) {
- ALOGV("setSuspend=%d at time %lld us", suspend, (long long)suspendStartTimeUs);
-
- Mutex::Autolock autoLock(mMutex);
-
- if (mStopTimeUs != -1) {
- ALOGE("setSuspend failed as STOP action is pending");
- return INVALID_OPERATION;
- }
-
- // Push the action to the queue.
- if (suspendStartTimeUs != -1) {
- // suspendStartTimeUs must be smaller or equal to current systemTime.
- int64_t currentSystemTimeUs = systemTime() / 1000;
- if (suspendStartTimeUs > currentSystemTimeUs) {
- ALOGE("setSuspend failed. %lld is larger than current system time %lld us",
- (long long)suspendStartTimeUs, (long long)currentSystemTimeUs);
- return INVALID_OPERATION;
- }
- if (mLastActionTimeUs != -1 && suspendStartTimeUs < mLastActionTimeUs) {
- ALOGE("setSuspend failed. %lld is smaller than last action time %lld us",
- (long long)suspendStartTimeUs, (long long)mLastActionTimeUs);
- return INVALID_OPERATION;
- }
- mLastActionTimeUs = suspendStartTimeUs;
- ActionItem action;
- action.mAction = suspend ? ActionItem::PAUSE : ActionItem::RESUME;
- action.mActionTimeUs = suspendStartTimeUs;
- ALOGV("Push %s action into actionQueue", suspend ? "PAUSE" : "RESUME");
- mActionQueue.push_back(action);
- } else {
- if (suspend) {
- mSuspended = true;
- releaseAllAvailableBuffers_l();
- return OK;
- } else {
- mSuspended = false;
- if (mExecuting && !haveAvailableBuffers_l()
- && mFrameRepeatBlockedOnCodecBuffer) {
- if (repeatLatestBuffer_l()) {
- ALOGV("suspend/deferred repeatLatestBuffer_l SUCCESS");
- mFrameRepeatBlockedOnCodecBuffer = false;
- } else {
- ALOGV("suspend/deferred repeatLatestBuffer_l FAILURE");
- }
- }
- }
- }
- return OK;
-}
-
-status_t GraphicBufferSource::setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) {
- ALOGV("setRepeatPreviousFrameDelayUs: delayUs=%lld", (long long)repeatAfterUs);
-
- Mutex::Autolock autoLock(mMutex);
-
- if (mExecuting || repeatAfterUs <= 0ll) {
- return INVALID_OPERATION;
- }
-
- mFrameRepeatIntervalUs = repeatAfterUs;
- return OK;
-}
-
-status_t GraphicBufferSource::setTimeOffsetUs(int64_t timeOffsetUs) {
- Mutex::Autolock autoLock(mMutex);
-
- // timeOffsetUs must be negative for adjustment.
- if (timeOffsetUs >= 0ll) {
- return INVALID_OPERATION;
- }
-
- mInputBufferTimeOffsetUs = timeOffsetUs;
- return OK;
-}
-
-status_t GraphicBufferSource::setMaxFps(float maxFps) {
- ALOGV("setMaxFps: maxFps=%lld", (long long)maxFps);
-
- Mutex::Autolock autoLock(mMutex);
-
- if (mExecuting) {
- return INVALID_OPERATION;
- }
-
- mFrameDropper = new FrameDropper();
- status_t err = mFrameDropper->setMaxFrameRate(maxFps);
- if (err != OK) {
- mFrameDropper.clear();
- return err;
- }
-
- return OK;
-}
-
-status_t GraphicBufferSource::setStartTimeUs(int64_t skipFramesBeforeUs) {
- ALOGV("setStartTimeUs: skipFramesBeforeUs=%lld", (long long)skipFramesBeforeUs);
-
- Mutex::Autolock autoLock(mMutex);
-
- mSkipFramesBeforeNs =
- (skipFramesBeforeUs > 0) ? (skipFramesBeforeUs * 1000) : -1ll;
-
- return OK;
-}
-
-status_t GraphicBufferSource::setStopTimeUs(int64_t stopTimeUs) {
- ALOGV("setStopTimeUs: %lld us", (long long)stopTimeUs);
- Mutex::Autolock autoLock(mMutex);
-
- if (mStopTimeUs != -1) {
- // Ignore if stop time has already been set
- return OK;
- }
-
- // stopTimeUs must be smaller or equal to current systemTime.
- int64_t currentSystemTimeUs = systemTime() / 1000;
- if (stopTimeUs > currentSystemTimeUs) {
- ALOGE("setStopTimeUs failed. %lld is larger than current system time %lld us",
- (long long)stopTimeUs, (long long)currentSystemTimeUs);
- return INVALID_OPERATION;
- }
- if (mLastActionTimeUs != -1 && stopTimeUs < mLastActionTimeUs) {
- ALOGE("setSuspend failed. %lld is smaller than last action time %lld us",
- (long long)stopTimeUs, (long long)mLastActionTimeUs);
- return INVALID_OPERATION;
- }
- mLastActionTimeUs = stopTimeUs;
- ActionItem action;
- action.mAction = ActionItem::STOP;
- action.mActionTimeUs = stopTimeUs;
- mActionQueue.push_back(action);
- mStopTimeUs = stopTimeUs;
- return OK;
-}
-
-status_t GraphicBufferSource::getStopTimeOffsetUs(int64_t *stopTimeOffsetUs) {
- ALOGV("getStopTimeOffsetUs");
- Mutex::Autolock autoLock(mMutex);
- if (mStopTimeUs == -1) {
- ALOGW("Fail to return stopTimeOffsetUs as stop time is not set");
- return INVALID_OPERATION;
- }
- *stopTimeOffsetUs =
- mLastFrameTimestampUs == -1 ? 0 : mStopTimeUs - mLastFrameTimestampUs;
- return OK;
-}
-
-status_t GraphicBufferSource::setTimeLapseConfig(double fps, double captureFps) {
- ALOGV("setTimeLapseConfig: fps=%lg, captureFps=%lg",
- fps, captureFps);
- Mutex::Autolock autoLock(mMutex);
-
- if (mExecuting || !(fps > 0) || !(captureFps > 0)) {
- return INVALID_OPERATION;
- }
-
- mFps = fps;
- mCaptureFps = captureFps;
-
- return OK;
-}
-
-status_t GraphicBufferSource::setColorAspects(int32_t aspectsPacked) {
- Mutex::Autolock autoLock(mMutex);
- mDefaultColorAspectsPacked = aspectsPacked;
- ColorAspects colorAspects = ColorUtils::unpackToColorAspects(aspectsPacked);
- ALOGD("requesting color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s))",
- colorAspects.mRange, asString(colorAspects.mRange),
- colorAspects.mPrimaries, asString(colorAspects.mPrimaries),
- colorAspects.mMatrixCoeffs, asString(colorAspects.mMatrixCoeffs),
- colorAspects.mTransfer, asString(colorAspects.mTransfer));
-
- return OK;
-}
-
-status_t GraphicBufferSource::signalEndOfInputStream() {
- Mutex::Autolock autoLock(mMutex);
- ALOGV("signalEndOfInputStream: executing=%d available=%zu+%d eos=%d",
- mExecuting, mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers, mEndOfStream);
-
- if (mEndOfStream) {
- ALOGE("EOS was already signaled");
- return INVALID_OPERATION;
- }
-
- // Set the end-of-stream flag. If no frames are pending from the
- // BufferQueue, and a codec buffer is available, and we're executing,
- // and there is no stop timestamp, we initiate the EOS from here.
- // Otherwise, we'll let codecBufferEmptied() (or omxExecuting) do it.
- //
- // Note: if there are no pending frames and all codec buffers are
- // available, we *must* submit the EOS from here or we'll just
- // stall since no future events are expected.
- mEndOfStream = true;
-
- if (mStopTimeUs == -1 && mExecuting && !haveAvailableBuffers_l()) {
- submitEndOfInputStream_l();
- }
-
- return OK;
-}
-
-void GraphicBufferSource::onMessageReceived(const sp<AMessage> &msg) {
- switch (msg->what()) {
- case kWhatRepeatLastFrame:
- {
- Mutex::Autolock autoLock(mMutex);
-
- int32_t generation;
- CHECK(msg->findInt32("generation", &generation));
-
- if (generation != mRepeatLastFrameGeneration) {
- // stale
- break;
- }
-
- if (!mExecuting || haveAvailableBuffers_l()) {
- break;
- }
-
- bool success = repeatLatestBuffer_l();
- if (success) {
- ALOGV("repeatLatestBuffer_l SUCCESS");
- } else {
- ALOGV("repeatLatestBuffer_l FAILURE");
- mFrameRepeatBlockedOnCodecBuffer = true;
- }
- break;
- }
-
- default:
- TRESPASS();
- }
-}
-
-} // namespace android
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
deleted file mode 100644
index 09c4019..0000000
--- a/media/libstagefright/omx/OMX.cpp
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <inttypes.h>
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "OMX"
-#include <utils/Log.h>
-
-#include <dlfcn.h>
-
-#include <media/stagefright/omx/OMX.h>
-#include <media/stagefright/omx/OMXNodeInstance.h>
-#include <media/stagefright/omx/BWGraphicBufferSource.h>
-#include <media/stagefright/omx/OMXMaster.h>
-#include <media/stagefright/omx/OMXUtils.h>
-#include <media/stagefright/foundation/ADebug.h>
-
-namespace android {
-
-// node ids are created by concatenating the pid with a 16-bit counter
-static size_t kMaxNodeInstances = (1 << 16);
-
-OMX::OMX() : mMaster(new OMXMaster), mParser() {
-}
-
-OMX::~OMX() {
- delete mMaster;
- mMaster = NULL;
-}
-
-void OMX::binderDied(const wp<IBinder> &the_late_who) {
- sp<OMXNodeInstance> instance;
-
- {
- Mutex::Autolock autoLock(mLock);
-
- ssize_t index = mLiveNodes.indexOfKey(the_late_who);
-
- if (index < 0) {
- ALOGE("b/27597103, nonexistent observer on binderDied");
- android_errorWriteLog(0x534e4554, "27597103");
- return;
- }
-
- instance = mLiveNodes.editValueAt(index);
- mLiveNodes.removeItemsAt(index);
- }
-
- instance->onObserverDied();
-}
-
-status_t OMX::listNodes(List<ComponentInfo> *list) {
- list->clear();
-
- OMX_U32 index = 0;
- char componentName[256];
- while (mMaster->enumerateComponents(
- componentName, sizeof(componentName), index) == OMX_ErrorNone) {
- list->push_back(ComponentInfo());
- ComponentInfo &info = *--list->end();
-
- info.mName = componentName;
-
- Vector<String8> roles;
- OMX_ERRORTYPE err =
- mMaster->getRolesOfComponent(componentName, &roles);
-
- if (err == OMX_ErrorNone) {
- for (OMX_U32 i = 0; i < roles.size(); ++i) {
- info.mRoles.push_back(roles[i]);
- }
- }
-
- ++index;
- }
-
- return OK;
-}
-
-status_t OMX::allocateNode(
- const char *name, const sp<IOMXObserver> &observer,
- sp<IOMXNode> *omxNode) {
- Mutex::Autolock autoLock(mLock);
-
- omxNode->clear();
-
- if (mLiveNodes.size() == kMaxNodeInstances) {
- return NO_MEMORY;
- }
-
- sp<OMXNodeInstance> instance = new OMXNodeInstance(this, observer, name);
-
- OMX_COMPONENTTYPE *handle;
- OMX_ERRORTYPE err = mMaster->makeComponentInstance(
- name, &OMXNodeInstance::kCallbacks,
- instance.get(), &handle);
-
- if (err != OMX_ErrorNone) {
- ALOGE("FAILED to allocate omx component '%s' err=%s(%#x)", name, asString(err), err);
-
- return StatusFromOMXError(err);
- }
- instance->setHandle(handle);
-
- // Find quirks from mParser
- const auto& codec = mParser.getCodecMap().find(name);
- if (codec == mParser.getCodecMap().cend()) {
- ALOGW("Failed to obtain quirks for omx component '%s' from XML files",
- name);
- } else {
- uint32_t quirks = 0;
- for (const auto& quirk : codec->second.quirkSet) {
- if (quirk == "requires-allocate-on-input-ports") {
- quirks |= OMXNodeInstance::
- kRequiresAllocateBufferOnInputPorts;
- }
- if (quirk == "requires-allocate-on-output-ports") {
- quirks |= OMXNodeInstance::
- kRequiresAllocateBufferOnOutputPorts;
- }
- }
- instance->setQuirks(quirks);
- }
-
- mLiveNodes.add(IInterface::asBinder(observer), instance);
- IInterface::asBinder(observer)->linkToDeath(this);
-
- *omxNode = instance;
-
- return OK;
-}
-
-status_t OMX::freeNode(const sp<OMXNodeInstance> &instance) {
- if (instance == NULL) {
- return OK;
- }
-
- {
- Mutex::Autolock autoLock(mLock);
- ssize_t index = mLiveNodes.indexOfKey(IInterface::asBinder(instance->observer()));
- if (index < 0) {
- // This could conceivably happen if the observer dies at roughly the
- // same time that a client attempts to free the node explicitly.
-
- // NOTE: it's guaranteed that this method is called at most once per
- // instance.
- ALOGV("freeNode: instance already removed from book-keeping.");
- } else {
- mLiveNodes.removeItemsAt(index);
- IInterface::asBinder(instance->observer())->unlinkToDeath(this);
- }
- }
-
- CHECK(instance->handle() != NULL);
- OMX_ERRORTYPE err = mMaster->destroyComponentInstance(
- static_cast<OMX_COMPONENTTYPE *>(instance->handle()));
- ALOGV("freeNode: handle destroyed: %p", instance->handle());
-
- return StatusFromOMXError(err);
-}
-
-status_t OMX::createInputSurface(
- sp<IGraphicBufferProducer> *bufferProducer,
- sp<IGraphicBufferSource> *bufferSource) {
- if (bufferProducer == NULL || bufferSource == NULL) {
- ALOGE("b/25884056");
- return BAD_VALUE;
- }
-
- sp<GraphicBufferSource> graphicBufferSource = new GraphicBufferSource();
- status_t err = graphicBufferSource->initCheck();
- if (err != OK) {
- ALOGE("Failed to create persistent input surface: %s (%d)",
- strerror(-err), err);
- return err;
- }
-
- *bufferProducer = graphicBufferSource->getIGraphicBufferProducer();
- *bufferSource = new BWGraphicBufferSource(graphicBufferSource);
-
- return OK;
-}
-
-} // namespace android
diff --git a/media/libstagefright/omx/OMXMaster.cpp b/media/libstagefright/omx/OMXMaster.cpp
index fd97fdc..0967b5f 100644
--- a/media/libstagefright/omx/OMXMaster.cpp
+++ b/media/libstagefright/omx/OMXMaster.cpp
@@ -22,6 +22,8 @@
#include <media/stagefright/omx/SoftOMXPlugin.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <vndksupport/linker.h>
+
#include <dlfcn.h>
#include <fcntl.h>
@@ -67,7 +69,7 @@
}
void OMXMaster::addPlugin(const char *libname) {
- mVendorLibHandle = dlopen(libname, RTLD_NOW);
+ mVendorLibHandle = android_load_sphal_library(libname, RTLD_NOW);
if (mVendorLibHandle == NULL) {
return;
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 015a148..7d2c2dd 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -344,7 +344,7 @@
////////////////////////////////////////////////////////////////////////////////
OMXNodeInstance::OMXNodeInstance(
- OmxNodeOwner *owner, const sp<IOMXObserver> &observer, const char *name)
+ Omx *owner, const sp<IOMXObserver> &observer, const char *name)
: mOwner(owner),
mHandle(NULL),
mObserver(observer),
@@ -354,7 +354,7 @@
mQuirks(0),
mBufferIDCount(0),
mRestorePtsFailed(false),
- mMaxTimestampGapUs(-1ll),
+ mMaxTimestampGapUs(0ll),
mPrevOriginalTimeUs(-1ll),
mPrevModifiedTimeUs(-1ll)
{
@@ -686,6 +686,7 @@
CLOG_CONFIG(setPortMode, "%s(%d), port %d", asString(mode), mode, portIndex);
+ status_t err = OK;
switch (mode) {
case IOMX::kPortModeDynamicANWBuffer:
{
@@ -694,17 +695,19 @@
CLOG_INTERNAL(setPortMode, "Legacy adaptive experiment: "
"not setting port mode to %s(%d) on output",
asString(mode), mode);
- return StatusFromOMXError(OMX_ErrorUnsupportedIndex);
+ err = StatusFromOMXError(OMX_ErrorUnsupportedIndex);
+ break;
}
- status_t err = enableNativeBuffers_l(
+ err = enableNativeBuffers_l(
portIndex, OMX_TRUE /*graphic*/, OMX_TRUE);
if (err != OK) {
- return err;
+ break;
}
}
(void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
- return storeMetaDataInBuffers_l(portIndex, OMX_TRUE, NULL);
+ err = storeMetaDataInBuffers_l(portIndex, OMX_TRUE, NULL);
+ break;
}
case IOMX::kPortModeDynamicNativeHandle:
@@ -712,13 +715,15 @@
if (portIndex != kPortIndexInput) {
CLOG_ERROR(setPortMode, BAD_VALUE,
"%s(%d) mode is only supported on input port", asString(mode), mode);
- return BAD_VALUE;
+ err = BAD_VALUE;
+ break;
}
(void)enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_FALSE);
(void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
MetadataBufferType metaType = kMetadataBufferTypeNativeHandleSource;
- return storeMetaDataInBuffers_l(portIndex, OMX_TRUE, &metaType);
+ err = storeMetaDataInBuffers_l(portIndex, OMX_TRUE, &metaType);
+ break;
}
case IOMX::kPortModePresetSecureBuffer:
@@ -726,7 +731,8 @@
// Allow on both input and output.
(void)storeMetaDataInBuffers_l(portIndex, OMX_FALSE, NULL);
(void)enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_FALSE);
- return enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_TRUE);
+ err = enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_TRUE);
+ break;
}
case IOMX::kPortModePresetANWBuffer:
@@ -734,7 +740,8 @@
if (portIndex != kPortIndexOutput) {
CLOG_ERROR(setPortMode, BAD_VALUE,
"%s(%d) mode is only supported on output port", asString(mode), mode);
- return BAD_VALUE;
+ err = BAD_VALUE;
+ break;
}
// Check if we're simulating legacy mode with metadata mode,
@@ -743,7 +750,7 @@
if (storeMetaDataInBuffers_l(portIndex, OMX_TRUE, NULL) == OK) {
CLOG_INTERNAL(setPortMode, "Legacy adaptive experiment: "
"metdata mode enabled successfully");
- return OK;
+ break;
}
CLOG_INTERNAL(setPortMode, "Legacy adaptive experiment: "
@@ -754,15 +761,15 @@
// Disable secure buffer and enable graphic buffer
(void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
- status_t err = enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_TRUE);
+ err = enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_TRUE);
if (err != OK) {
- return err;
+ break;
}
// Not running experiment, or metadata is not supported.
// Disable metadata mode and use legacy mode.
(void)storeMetaDataInBuffers_l(portIndex, OMX_FALSE, NULL);
- return OK;
+ break;
}
case IOMX::kPortModePresetByteBuffer:
@@ -771,15 +778,19 @@
(void)enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_FALSE);
(void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
(void)storeMetaDataInBuffers_l(portIndex, OMX_FALSE, NULL);
- return OK;
- }
-
- default:
break;
}
- CLOG_ERROR(setPortMode, BAD_VALUE, "invalid port mode %d", mode);
- return BAD_VALUE;
+ default:
+ CLOG_ERROR(setPortMode, BAD_VALUE, "invalid port mode %d", mode);
+ err = BAD_VALUE;
+ break;
+ }
+
+ if (err == OK) {
+ mPortMode[portIndex] = mode;
+ }
+ return err;
}
status_t OMXNodeInstance::enableNativeBuffers_l(
@@ -1057,28 +1068,51 @@
}
switch (omxBuffer.mBufferType) {
- case OMXBuffer::kBufferTypePreset:
+ case OMXBuffer::kBufferTypePreset: {
+ if (mPortMode[portIndex] != IOMX::kPortModeDynamicANWBuffer
+ && mPortMode[portIndex] != IOMX::kPortModeDynamicNativeHandle) {
+ break;
+ }
return useBuffer_l(portIndex, NULL, NULL, buffer);
+ }
- case OMXBuffer::kBufferTypeSharedMem:
+ case OMXBuffer::kBufferTypeSharedMem: {
+ if (mPortMode[portIndex] != IOMX::kPortModePresetByteBuffer
+ && mPortMode[portIndex] != IOMX::kPortModeDynamicANWBuffer) {
+ break;
+ }
return useBuffer_l(portIndex, omxBuffer.mMem, NULL, buffer);
+ }
- case OMXBuffer::kBufferTypeANWBuffer:
+ case OMXBuffer::kBufferTypeANWBuffer: {
+ if (mPortMode[portIndex] != IOMX::kPortModePresetANWBuffer) {
+ break;
+ }
return useGraphicBuffer_l(portIndex, omxBuffer.mGraphicBuffer, buffer);
+ }
case OMXBuffer::kBufferTypeHidlMemory: {
+ if (mPortMode[portIndex] != IOMX::kPortModePresetByteBuffer
+ && mPortMode[portIndex] != IOMX::kPortModeDynamicANWBuffer
+ && mPortMode[portIndex] != IOMX::kPortModeDynamicNativeHandle) {
+ break;
+ }
sp<IHidlMemory> hidlMemory = mapMemory(omxBuffer.mHidlMemory);
if (hidlMemory == nullptr) {
ALOGE("OMXNodeInstance useBuffer() failed to map memory");
return NO_MEMORY;
}
return useBuffer_l(portIndex, NULL, hidlMemory, buffer);
- }
+ }
default:
+ return BAD_VALUE;
break;
}
- return BAD_VALUE;
+ ALOGE("b/77486542 : bufferType = %d vs. portMode = %d",
+ omxBuffer.mBufferType, mPortMode[portIndex]);
+ android_errorWriteLog(0x534e4554, "77486542");
+ return INVALID_OPERATION;
}
status_t OMXNodeInstance::useBuffer_l(
@@ -1514,6 +1548,11 @@
android_errorWriteLog(0x534e4554, "35467458");
return BAD_VALUE;
}
+ if (mPortMode[portIndex] != IOMX::kPortModePresetSecureBuffer) {
+ ALOGE("b/77486542");
+ android_errorWriteLog(0x534e4554, "77486542");
+ return INVALID_OPERATION;
+ }
BufferMeta *buffer_meta = new BufferMeta(portIndex);
OMX_BUFFERHEADERTYPE *header;
@@ -1843,7 +1882,9 @@
return BAD_VALUE;
}
- mMaxTimestampGapUs = (int64_t)((OMX_PARAM_U32TYPE*)params)->nU32;
+ // The incoming number is an int32_t contained in OMX_U32.
+ // Cast to int32_t first then int64_t.
+ mMaxTimestampGapUs = (int32_t)((OMX_PARAM_U32TYPE*)params)->nU32;
return OK;
}
@@ -1867,12 +1908,26 @@
ALOGV("IN timestamp: %lld -> %lld",
static_cast<long long>(originalTimeUs),
static_cast<long long>(timestamp));
+ } else if (mMaxTimestampGapUs < 0ll) {
+ /*
+ * Apply a fixed timestamp gap between adjacent frames.
+ *
+ * This is used by scenarios like still image capture where timestamps
+ * on frames could go forward or backward. Some encoders may silently
+ * drop frames when it goes backward (or even stay unchanged).
+ */
+ if (mPrevOriginalTimeUs >= 0ll) {
+ timestamp = mPrevModifiedTimeUs - mMaxTimestampGapUs;
+ }
+ ALOGV("IN timestamp: %lld -> %lld",
+ static_cast<long long>(originalTimeUs),
+ static_cast<long long>(timestamp));
}
mPrevOriginalTimeUs = originalTimeUs;
mPrevModifiedTimeUs = timestamp;
- if (mMaxTimestampGapUs > 0ll && !mRestorePtsFailed) {
+ if (mMaxTimestampGapUs != 0ll && !mRestorePtsFailed) {
mOriginalTimeUs.add(timestamp, originalTimeUs);
}
@@ -1905,7 +1960,7 @@
void OMXNodeInstance::codecBufferFilled(omx_message &msg) {
Mutex::Autolock autoLock(mLock);
- if (mMaxTimestampGapUs <= 0ll || mRestorePtsFailed) {
+ if (mMaxTimestampGapUs == 0ll || mRestorePtsFailed) {
return;
}
diff --git a/media/libstagefright/omx/OMXStore.cpp b/media/libstagefright/omx/OMXStore.cpp
deleted file mode 100644
index 345336d..0000000
--- a/media/libstagefright/omx/OMXStore.cpp
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "OMXStore"
-#include <utils/Log.h>
-
-#include <media/stagefright/omx/OMXUtils.h>
-#include <media/stagefright/omx/OMX.h>
-#include <media/stagefright/omx/OMXStore.h>
-#include <media/stagefright/xmlparser/MediaCodecsXmlParser.h>
-
-#include <map>
-#include <string>
-
-namespace android {
-
-namespace {
- struct RoleProperties {
- std::string type;
- bool isEncoder;
- bool preferPlatformNodes;
- std::multimap<size_t, IOMXStore::NodeInfo> nodeList;
- };
-} // Unnamed namespace
-
-OMXStore::OMXStore(
- const char* owner,
- const char* const* searchDirs,
- const char* mainXmlName,
- const char* performanceXmlName,
- const char* profilingResultsXmlPath) {
- MediaCodecsXmlParser parser(
- searchDirs,
- mainXmlName,
- performanceXmlName,
- profilingResultsXmlPath);
- mParsingStatus = parser.getParsingStatus();
-
- const auto& serviceAttributeMap = parser.getServiceAttributeMap();
- mServiceAttributeList.reserve(serviceAttributeMap.size());
- for (const auto& attributePair : serviceAttributeMap) {
- Attribute attribute;
- attribute.key = attributePair.first;
- attribute.value = attributePair.second;
- mServiceAttributeList.push_back(std::move(attribute));
- }
-
- const auto& roleMap = parser.getRoleMap();
- mRoleList.reserve(roleMap.size());
- for (const auto& rolePair : roleMap) {
- RoleInfo role;
- role.role = rolePair.first;
- role.type = rolePair.second.type;
- role.isEncoder = rolePair.second.isEncoder;
- // TODO: Currently, preferPlatformNodes information is not available in
- // the xml file. Once we have a way to provide this information, it
- // should be parsed properly.
- role.preferPlatformNodes = rolePair.first.compare(0, 5, "audio") == 0;
- std::vector<NodeInfo>& nodeList = role.nodes;
- nodeList.reserve(rolePair.second.nodeList.size());
- for (const auto& nodePair : rolePair.second.nodeList) {
- NodeInfo node;
- node.name = nodePair.second.name;
- node.owner = owner;
- std::vector<Attribute>& attributeList = node.attributes;
- attributeList.reserve(nodePair.second.attributeList.size());
- for (const auto& attributePair : nodePair.second.attributeList) {
- Attribute attribute;
- attribute.key = attributePair.first;
- attribute.value = attributePair.second;
- attributeList.push_back(std::move(attribute));
- }
- nodeList.push_back(std::move(node));
- }
- mRoleList.push_back(std::move(role));
- }
-
- mPrefix = parser.getCommonPrefix();
-}
-
-status_t OMXStore::listServiceAttributes(std::vector<Attribute>* attributes) {
- *attributes = mServiceAttributeList;
- return mParsingStatus;
-}
-
-status_t OMXStore::getNodePrefix(std::string* prefix) {
- *prefix = mPrefix;
- return mParsingStatus;
-}
-
-status_t OMXStore::listRoles(std::vector<RoleInfo>* roleList) {
- *roleList = mRoleList;
- return mParsingStatus;
-}
-
-status_t OMXStore::getOmx(const std::string& name, sp<IOMX>* omx) {
- *omx = new OMX();
- return NO_ERROR;
-}
-
-OMXStore::~OMXStore() {
-}
-
-} // namespace android
-
diff --git a/media/libstagefright/omx/OMXUtils.cpp b/media/libstagefright/omx/OMXUtils.cpp
index 5894837..f7b569d 100644
--- a/media/libstagefright/omx/OMXUtils.cpp
+++ b/media/libstagefright/omx/OMXUtils.cpp
@@ -22,9 +22,9 @@
#include <media/stagefright/omx/OMXUtils.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/hardware/HardwareAPI.h>
-#include <media/MediaDefs.h>
#include <system/graphics-base.h>
namespace android {
@@ -163,6 +163,8 @@
"audio_decoder.ac3", "audio_encoder.ac3" },
{ MEDIA_MIMETYPE_AUDIO_EAC3,
"audio_decoder.eac3", "audio_encoder.eac3" },
+ { MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC,
+ "image_decoder.heic", "image_encoder.heic" },
};
static const size_t kNumMimeToRole =
@@ -215,6 +217,9 @@
fmt != OMX_COLOR_FormatYUV420PackedSemiPlanar &&
fmt != (OMX_COLOR_FORMATTYPE)HAL_PIXEL_FORMAT_YV12) {
ALOGW("do not know color format 0x%x = %d", fmt, fmt);
+ if (fmt == OMX_COLOR_FormatYUV420Planar16) {
+ ALOGW("Cannot describe color format OMX_COLOR_FormatYUV420Planar16");
+ }
return false;
}
diff --git a/media/libstagefright/omx/OmxGraphicBufferSource.cpp b/media/libstagefright/omx/OmxGraphicBufferSource.cpp
new file mode 100644
index 0000000..8de1f4f
--- /dev/null
+++ b/media/libstagefright/omx/OmxGraphicBufferSource.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#define LOG_TAG "OmxGraphicBufferSource"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <media/stagefright/bqhelper/ComponentWrapper.h>
+#include <media/stagefright/bqhelper/GraphicBufferSource.h>
+#include <media/stagefright/omx/OmxGraphicBufferSource.h>
+
+namespace android {
+
+namespace {
+
+class OmxComponentWrapper : public ComponentWrapper {
+public:
+ explicit OmxComponentWrapper(const sp<IOmxNodeWrapper> &node)
+ : mOmxNode(node) {}
+ virtual ~OmxComponentWrapper() = default;
+
+ status_t submitBuffer(
+ int32_t bufferId, const sp<GraphicBuffer> &buffer,
+ int64_t timestamp, int fenceFd) override {
+ return mOmxNode->emptyBuffer(
+ bufferId, OMX_BUFFERFLAG_ENDOFFRAME, buffer, timestamp, fenceFd);
+ }
+
+ status_t submitEos(int32_t bufferId) override {
+ return mOmxNode->emptyBuffer(bufferId, OMX_BUFFERFLAG_ENDOFFRAME | OMX_BUFFERFLAG_EOS);
+ }
+
+ void dispatchDataSpaceChanged(
+ int32_t dataSpace, int32_t aspects, int32_t pixelFormat) override {
+ mOmxNode->dispatchDataSpaceChanged(dataSpace, aspects, pixelFormat);
+ }
+
+private:
+ sp<IOmxNodeWrapper> mOmxNode;
+
+ DISALLOW_EVIL_CONSTRUCTORS(OmxComponentWrapper);
+};
+
+} // namespace
+
+Status OmxGraphicBufferSource::onOmxExecuting() {
+ return start();
+}
+
+Status OmxGraphicBufferSource::onOmxIdle() {
+ return stop();
+}
+
+Status OmxGraphicBufferSource::onOmxLoaded(){
+ return release();
+}
+
+status_t OmxGraphicBufferSource::configure(
+ const sp<IOmxNodeWrapper>& omxNode,
+ int32_t dataSpace,
+ int32_t bufferCount,
+ uint32_t frameWidth,
+ uint32_t frameHeight,
+ uint32_t consumerUsage) {
+ if (omxNode == NULL) {
+ return BAD_VALUE;
+ }
+
+ return GraphicBufferSource::configure(
+ new OmxComponentWrapper(omxNode), dataSpace, bufferCount,
+ frameWidth, frameHeight, consumerUsage);
+}
+
+} // namespace android
diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
index 1ba5852..55afe04 100644
--- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
@@ -41,7 +41,7 @@
mLooper->start(
false, // runOnCallingThread
false, // canCallJava
- ANDROID_PRIORITY_FOREGROUND);
+ ANDROID_PRIORITY_VIDEO);
}
void SimpleSoftOMXComponent::prepareForDestruction() {
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index 4946ada..1f3e8c1 100644
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -34,7 +34,12 @@
const char *mRole;
} kComponents[] = {
+ // two choices for aac decoding.
+ // configurable in media/libstagefright/data/media_codecs_google_audio.xml
+ // default implementation
{ "OMX.google.aac.decoder", "aacdec", "audio_decoder.aac" },
+ // alternate implementation
+ { "OMX.google.xaac.decoder", "xaacdec", "audio_decoder.aac" },
{ "OMX.google.aac.encoder", "aacenc", "audio_encoder.aac" },
{ "OMX.google.amrnb.decoder", "amrdec", "audio_decoder.amrnb" },
{ "OMX.google.amrnb.encoder", "amrnbenc", "audio_encoder.amrnb" },
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index cb811a0..935dc34 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -26,8 +26,8 @@
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/MediaDefs.h>
#include <media/hardware/HardwareAPI.h>
-#include <media/MediaDefs.h>
namespace android {
@@ -61,6 +61,7 @@
mCropTop(0),
mCropWidth(width),
mCropHeight(height),
+ mOutputFormat(OMX_COLOR_FormatYUV420Planar),
mOutputPortSettingsChange(NONE),
mUpdateColorAspects(false),
mMinInputBufferSize(384), // arbitrary, using one uncompressed macroblock
@@ -74,6 +75,7 @@
memset(&mDefaultColorAspects, 0, sizeof(ColorAspects));
memset(&mBitstreamColorAspects, 0, sizeof(ColorAspects));
memset(&mFinalColorAspects, 0, sizeof(ColorAspects));
+ memset(&mHdrStaticInfo, 0, sizeof(HDRStaticInfo));
}
void SoftVideoDecoderOMXComponent::initPorts(
@@ -140,7 +142,6 @@
def.format.video.xFramerate = 0;
def.format.video.bFlagErrorConcealment = OMX_FALSE;
def.format.video.eCompressionFormat = OMX_VIDEO_CodingUnused;
- def.format.video.eColorFormat = OMX_COLOR_FormatYUV420Planar;
def.format.video.pNativeWindow = NULL;
addPort(def);
@@ -152,11 +153,13 @@
OMX_PARAM_PORTDEFINITIONTYPE *outDef = &editPortInfo(kOutputPortIndex)->mDef;
outDef->format.video.nFrameWidth = outputBufferWidth();
outDef->format.video.nFrameHeight = outputBufferHeight();
+ outDef->format.video.eColorFormat = mOutputFormat;
outDef->format.video.nStride = outDef->format.video.nFrameWidth;
outDef->format.video.nSliceHeight = outDef->format.video.nFrameHeight;
+ int32_t bpp = (mOutputFormat == OMX_COLOR_FormatYUV420Planar16) ? 2 : 1;
outDef->nBufferSize =
- (outDef->format.video.nStride * outDef->format.video.nSliceHeight * 3) / 2;
+ (outDef->format.video.nStride * outDef->format.video.nSliceHeight * bpp * 3) / 2;
OMX_PARAM_PORTDEFINITIONTYPE *inDef = &editPortInfo(kInputPortIndex)->mDef;
inDef->format.video.nFrameWidth = mWidth;
@@ -191,9 +194,11 @@
void SoftVideoDecoderOMXComponent::handlePortSettingsChange(
bool *portWillReset, uint32_t width, uint32_t height,
+ OMX_COLOR_FORMATTYPE outputFormat,
CropSettingsMode cropSettingsMode, bool fakeStride) {
*portWillReset = false;
bool sizeChanged = (width != mWidth || height != mHeight);
+ bool formatChanged = (outputFormat != mOutputFormat);
bool updateCrop = (cropSettingsMode == kCropUnSet);
bool cropChanged = (cropSettingsMode == kCropChanged);
bool strideChanged = false;
@@ -205,13 +210,18 @@
}
}
- if (sizeChanged || cropChanged || strideChanged) {
+ if (formatChanged || sizeChanged || cropChanged || strideChanged) {
+ if (formatChanged) {
+ ALOGD("formatChanged: 0x%08x -> 0x%08x", mOutputFormat, outputFormat);
+ }
+ mOutputFormat = outputFormat;
mWidth = width;
mHeight = height;
if ((sizeChanged && !mIsAdaptive)
|| width > mAdaptiveMaxWidth
- || height > mAdaptiveMaxHeight) {
+ || height > mAdaptiveMaxHeight
+ || formatChanged) {
if (mIsAdaptive) {
if (width > mAdaptiveMaxWidth) {
mAdaptiveMaxWidth = width;
@@ -305,27 +315,30 @@
void SoftVideoDecoderOMXComponent::copyYV12FrameToOutputBuffer(
uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
size_t srcYStride, size_t srcUStride, size_t srcVStride) {
- size_t dstYStride = outputBufferWidth();
+ OMX_PARAM_PORTDEFINITIONTYPE *outDef = &editPortInfo(kOutputPortIndex)->mDef;
+ int32_t bpp = (outDef->format.video.eColorFormat == OMX_COLOR_FormatYUV420Planar16) ? 2 : 1;
+
+ size_t dstYStride = outputBufferWidth() * bpp;
size_t dstUVStride = dstYStride / 2;
size_t dstHeight = outputBufferHeight();
uint8_t *dstStart = dst;
for (size_t i = 0; i < mHeight; ++i) {
- memcpy(dst, srcY, mWidth);
+ memcpy(dst, srcY, mWidth * bpp);
srcY += srcYStride;
dst += dstYStride;
}
dst = dstStart + dstYStride * dstHeight;
for (size_t i = 0; i < mHeight / 2; ++i) {
- memcpy(dst, srcU, mWidth / 2);
+ memcpy(dst, srcU, mWidth / 2 * bpp);
srcU += srcUStride;
dst += dstUVStride;
}
dst = dstStart + (5 * dstYStride * dstHeight) / 4;
for (size_t i = 0; i < mHeight / 2; ++i) {
- memcpy(dst, srcV, mWidth / 2);
+ memcpy(dst, srcV, mWidth / 2 * bpp);
srcV += srcVStride;
dst += dstUVStride;
}
@@ -550,6 +563,10 @@
DescribeColorAspectsParams* colorAspectsParams =
(DescribeColorAspectsParams *)params;
+ if (!isValidOMXParam(colorAspectsParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (colorAspectsParams->nPortIndex != kOutputPortIndex) {
return OMX_ErrorBadParameter;
}
@@ -562,6 +579,28 @@
return OMX_ErrorNone;
}
+ case kDescribeHdrStaticInfoIndex:
+ {
+ if (!supportDescribeHdrStaticInfo()) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ DescribeHDRStaticInfoParams* hdrStaticInfoParams =
+ (DescribeHDRStaticInfoParams *)params;
+
+ if (!isValidOMXParam(hdrStaticInfoParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
+ if (hdrStaticInfoParams->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorBadPortIndex;
+ }
+
+ hdrStaticInfoParams->sInfo = mHdrStaticInfo;
+
+ return OMX_ErrorNone;
+ }
+
default:
return OMX_ErrorUnsupportedIndex;
}
@@ -595,6 +634,29 @@
return OMX_ErrorNone;
}
+ case kDescribeHdrStaticInfoIndex:
+ {
+ if (!supportDescribeHdrStaticInfo()) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ const DescribeHDRStaticInfoParams* hdrStaticInfoParams =
+ (DescribeHDRStaticInfoParams *)params;
+
+ if (!isValidOMXParam(hdrStaticInfoParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
+ if (hdrStaticInfoParams->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorBadPortIndex;
+ }
+
+ mHdrStaticInfo = hdrStaticInfoParams->sInfo;
+ updatePortDefinitions(false);
+
+ return OMX_ErrorNone;
+ }
+
default:
return OMX_ErrorUnsupportedIndex;
}
@@ -610,6 +672,10 @@
&& supportsDescribeColorAspects()) {
*(int32_t*)index = kDescribeColorAspectsIndex;
return OMX_ErrorNone;
+ } else if (!strcmp(name, "OMX.google.android.index.describeHDRStaticInfo")
+ && supportDescribeHdrStaticInfo()) {
+ *(int32_t*)index = kDescribeHdrStaticInfoIndex;
+ return OMX_ErrorNone;
}
return SimpleSoftOMXComponent::getExtensionIndex(name, index);
@@ -623,6 +689,10 @@
return kNotSupported;
}
+bool SoftVideoDecoderOMXComponent::supportDescribeHdrStaticInfo() {
+ return false;
+}
+
void SoftVideoDecoderOMXComponent::onReset() {
mOutputPortSettingsChange = NONE;
}
diff --git a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
index f33bdc0..2fbbb44 100644
--- a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
@@ -26,9 +26,9 @@
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/MediaDefs.h>
#include <media/hardware/HardwareAPI.h>
#include <media/openmax/OMX_IndexExt.h>
-#include <media/MediaDefs.h>
#include <ui/Fence.h>
#include <ui/GraphicBufferMapper.h>
@@ -664,4 +664,17 @@
return SimpleSoftOMXComponent::getExtensionIndex(name, index);
}
+OMX_ERRORTYPE SoftVideoEncoderOMXComponent::validateInputBuffer(
+ const OMX_BUFFERHEADERTYPE *inputBufferHeader) {
+ size_t frameSize = mInputDataIsMeta ?
+ max(sizeof(VideoNativeMetadata), sizeof(VideoGrallocMetadata))
+ : mWidth * mHeight * 3 / 2;
+ if (inputBufferHeader->nFilledLen < frameSize) {
+ return OMX_ErrorUndefined;
+ } else if (inputBufferHeader->nFilledLen > frameSize) {
+ ALOGW("Input buffer contains more data than expected.");
+ }
+ return OMX_ErrorNone;
+}
+
} // namespace android
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h
index 8d8a2d9..a9fce55 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h
@@ -20,6 +20,7 @@
#include <vector>
#include <list>
+#include <cinttypes>
#include <unistd.h>
#include <hidl/MQDescriptor.h>
@@ -35,6 +36,8 @@
#include <media/OMXFenceParcelable.h>
#include <media/OMXBuffer.h>
#include <media/hardware/VideoAPI.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/bqhelper/Conversion.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <android/hardware/graphics/bufferqueue/1.0/IProducerListener.h>
@@ -99,51 +102,13 @@
typedef ::android::IGraphicBufferProducer
BGraphicBufferProducer;
-// native_handle_t helper functions.
+// We want to use all functions declared in ::android::conversion
+using namespace ::android::conversion;
-/**
- * \brief Take an fd and create a native handle containing only the given fd.
- * The created handle will need to be deleted manually with
- * `native_handle_delete()`.
- *
- * \param[in] fd The source file descriptor (of type `int`).
- * \return The create `native_handle_t*` that contains the given \p fd. If the
- * supplied \p fd is negative, the created native handle will contain no file
- * descriptors.
- *
- * If the native handle cannot be created, the return value will be
- * `nullptr`.
- *
- * This function does not duplicate the file descriptor.
- */
-inline native_handle_t* native_handle_create_from_fd(int fd) {
- if (fd < 0) {
- return native_handle_create(0, 0);
- }
- native_handle_t* nh = native_handle_create(1, 0);
- if (nh == nullptr) {
- return nullptr;
- }
- nh->data[0] = fd;
- return nh;
-}
-
-/**
- * \brief Extract a file descriptor from a native handle.
- *
- * \param[in] nh The source `native_handle_t*`.
- * \param[in] index The index of the file descriptor in \p nh to read from. This
- * input has the default value of `0`.
- * \return The `index`-th file descriptor in \p nh. If \p nh does not have
- * enough file descriptors, the returned value will be `-1`.
- *
- * This function does not duplicate the file descriptor.
- */
-inline int native_handle_read_fd(native_handle_t const* nh, int index = 0) {
- return ((nh == nullptr) || (nh->numFds == 0) ||
- (nh->numFds <= index) || (index < 0)) ?
- -1 : nh->data[index];
-}
+// Now specifically inject these two functions here, because we're going to
+// declare functions with the same name in this namespace.
+using ::android::conversion::convertTo;
+using ::android::conversion::toStatusT;
/**
* Conversion functions
@@ -178,17 +143,34 @@
*/
/**
- * \brief Convert `Return<void>` to `binder::Status`.
+ * \brief Convert `Status` to `status_t`. This is for legacy binder calls.
*
- * \param[in] t The source `Return<void>`.
- * \return The corresponding `binder::Status`.
+ * \param[in] t The source `Status`.
+ * \return the corresponding `status_t`.
*/
-// convert: Return<void> -> ::android::binder::Status
-inline ::android::binder::Status toBinderStatus(
- Return<void> const& t) {
- return ::android::binder::Status::fromExceptionCode(
- t.isOk() ? OK : UNKNOWN_ERROR,
- t.description().c_str());
+// convert: Status -> status_t
+inline status_t toStatusT(Status const& t) {
+ switch (t) {
+ case Status::NO_ERROR:
+ case Status::NAME_NOT_FOUND:
+ case Status::WOULD_BLOCK:
+ case Status::NO_MEMORY:
+ case Status::ALREADY_EXISTS:
+ case Status::NO_INIT:
+ case Status::BAD_VALUE:
+ case Status::DEAD_OBJECT:
+ case Status::INVALID_OPERATION:
+ case Status::TIMED_OUT:
+ case Status::ERROR_UNSUPPORTED:
+ case Status::UNKNOWN_ERROR:
+ case Status::RELEASE_ALL_BUFFERS:
+ return static_cast<status_t>(t);
+ case Status::BUFFER_NEEDS_REALLOCATION:
+ return NOT_ENOUGH_DATA;
+ default:
+ ALOGW("Unrecognized status value: %" PRId32, static_cast<int32_t>(t));
+ return static_cast<status_t>(t);
+ }
}
/**
@@ -208,29 +190,7 @@
*/
// convert: Status -> status_t
inline status_t toStatusT(Return<Status> const& t) {
- return t.isOk() ? static_cast<status_t>(static_cast<Status>(t)) : UNKNOWN_ERROR;
-}
-
-/**
- * \brief Convert `Return<void>` to `status_t`. This is for legacy binder calls.
- *
- * \param[in] t The source `Return<void>`.
- * \return The corresponding `status_t`.
- */
-// convert: Return<void> -> status_t
-inline status_t toStatusT(Return<void> const& t) {
- return t.isOk() ? OK : UNKNOWN_ERROR;
-}
-
-/**
- * \brief Convert `Status` to `status_t`. This is for legacy binder calls.
- *
- * \param[in] t The source `Status`.
- * \return the corresponding `status_t`.
- */
-// convert: Status -> status_t
-inline status_t toStatusT(Status const& t) {
- return static_cast<status_t>(t);
+ return t.isOk() ? toStatusT(static_cast<Status>(t)) : UNKNOWN_ERROR;
}
/**
@@ -241,18 +201,28 @@
*/
// convert: status_t -> Status
inline Status toStatus(status_t l) {
- return static_cast<Status>(l);
-}
-
-/**
- * \brief Wrap `native_handle_t*` in `hidl_handle`.
- *
- * \param[in] nh The source `native_handle_t*`.
- * \return The `hidl_handle` that points to \p nh.
- */
-// wrap: native_handle_t* -> hidl_handle
-inline hidl_handle inHidlHandle(native_handle_t const* nh) {
- return hidl_handle(nh);
+ switch (l) {
+ case NO_ERROR:
+ case NAME_NOT_FOUND:
+ case WOULD_BLOCK:
+ case NO_MEMORY:
+ case ALREADY_EXISTS:
+ case NO_INIT:
+ case BAD_VALUE:
+ case DEAD_OBJECT:
+ case INVALID_OPERATION:
+ case TIMED_OUT:
+ case ERROR_UNSUPPORTED:
+ case UNKNOWN_ERROR:
+ case IGraphicBufferProducer::RELEASE_ALL_BUFFERS:
+ case IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION:
+ return static_cast<Status>(l);
+ case NOT_ENOUGH_DATA:
+ return Status::BUFFER_NEEDS_REALLOCATION;
+ default:
+ ALOGW("Unrecognized status value: %" PRId32, static_cast<int32_t>(l));
+ return static_cast<Status>(l);
+ }
}
/**
@@ -490,128 +460,6 @@
}
/**
- * \brief Convert `int32_t` to `Dataspace`.
- *
- * \param[in] l The source `int32_t`.
- * \result The corresponding `Dataspace`.
- */
-// convert: int32_t -> Dataspace
-inline Dataspace toHardwareDataspace(int32_t l) {
- return static_cast<Dataspace>(l);
-}
-
-/**
- * \brief Convert `Dataspace` to `int32_t`.
- *
- * \param[in] t The source `Dataspace`.
- * \result The corresponding `int32_t`.
- */
-// convert: Dataspace -> int32_t
-inline int32_t toRawDataspace(Dataspace const& t) {
- return static_cast<int32_t>(t);
-}
-
-/**
- * \brief Wrap an opaque buffer inside a `hidl_vec<uint8_t>`.
- *
- * \param[in] l The pointer to the beginning of the opaque buffer.
- * \param[in] size The size of the buffer.
- * \return A `hidl_vec<uint8_t>` that points to the buffer.
- */
-// wrap: void*, size_t -> hidl_vec<uint8_t>
-inline hidl_vec<uint8_t> inHidlBytes(void const* l, size_t size) {
- hidl_vec<uint8_t> t;
- t.setToExternal(static_cast<uint8_t*>(const_cast<void*>(l)), size, false);
- return t;
-}
-
-/**
- * \brief Create a `hidl_vec<uint8_t>` that is a copy of an opaque buffer.
- *
- * \param[in] l The pointer to the beginning of the opaque buffer.
- * \param[in] size The size of the buffer.
- * \return A `hidl_vec<uint8_t>` that is a copy of the input buffer.
- */
-// convert: void*, size_t -> hidl_vec<uint8_t>
-inline hidl_vec<uint8_t> toHidlBytes(void const* l, size_t size) {
- hidl_vec<uint8_t> t;
- t.resize(size);
- uint8_t const* src = static_cast<uint8_t const*>(l);
- std::copy(src, src + size, t.data());
- return t;
-}
-
-/**
- * \brief Wrap `GraphicBuffer` in `AnwBuffer`.
- *
- * \param[out] t The wrapper of type `AnwBuffer`.
- * \param[in] l The source `GraphicBuffer`.
- */
-// wrap: GraphicBuffer -> AnwBuffer
-inline void wrapAs(AnwBuffer* t, GraphicBuffer const& l) {
- t->attr.width = l.getWidth();
- t->attr.height = l.getHeight();
- t->attr.stride = l.getStride();
- t->attr.format = static_cast<PixelFormat>(l.getPixelFormat());
- t->attr.layerCount = l.getLayerCount();
- t->attr.usage = l.getUsage();
- t->attr.id = l.getId();
- t->attr.generationNumber = l.getGenerationNumber();
- t->nativeHandle = hidl_handle(l.handle);
-}
-
-/**
- * \brief Convert `AnwBuffer` to `GraphicBuffer`.
- *
- * \param[out] l The destination `GraphicBuffer`.
- * \param[in] t The source `AnwBuffer`.
- *
- * This function will duplicate all file descriptors in \p t.
- */
-// convert: AnwBuffer -> GraphicBuffer
-// Ref: frameworks/native/libs/ui/GraphicBuffer.cpp: GraphicBuffer::flatten
-inline bool convertTo(GraphicBuffer* l, AnwBuffer const& t) {
- native_handle_t* handle = t.nativeHandle == nullptr ?
- nullptr : native_handle_clone(t.nativeHandle);
-
- size_t const numInts = 12 + (handle ? handle->numInts : 0);
- int32_t* ints = new int32_t[numInts];
-
- size_t numFds = static_cast<size_t>(handle ? handle->numFds : 0);
- int* fds = new int[numFds];
-
- ints[0] = 'GBFR';
- ints[1] = static_cast<int32_t>(t.attr.width);
- ints[2] = static_cast<int32_t>(t.attr.height);
- ints[3] = static_cast<int32_t>(t.attr.stride);
- ints[4] = static_cast<int32_t>(t.attr.format);
- ints[5] = static_cast<int32_t>(t.attr.layerCount);
- ints[6] = static_cast<int32_t>(t.attr.usage);
- ints[7] = static_cast<int32_t>(t.attr.id >> 32);
- ints[8] = static_cast<int32_t>(t.attr.id & 0xFFFFFFFF);
- ints[9] = static_cast<int32_t>(t.attr.generationNumber);
- ints[10] = 0;
- ints[11] = 0;
- if (handle) {
- ints[10] = static_cast<int32_t>(handle->numFds);
- ints[11] = static_cast<int32_t>(handle->numInts);
- int* intsStart = handle->data + handle->numFds;
- std::copy(handle->data, intsStart, fds);
- std::copy(intsStart, intsStart + handle->numInts, &ints[12]);
- }
-
- void const* constBuffer = static_cast<void const*>(ints);
- size_t size = numInts * sizeof(int32_t);
- int const* constFds = static_cast<int const*>(fds);
- status_t status = l->unflatten(constBuffer, size, constFds, numFds);
-
- delete [] fds;
- delete [] ints;
- native_handle_delete(handle);
- return status == NO_ERROR;
-}
-
-/**
* \brief Wrap `GraphicBuffer` in `CodecBuffer`.
*
* \param[out] t The wrapper of type `CodecBuffer`.
@@ -896,1281 +744,6 @@
#endif
}
-/**
- * Conversion functions for types outside media
- * ============================================
- *
- * Some objects in libui and libgui that were made to go through binder calls do
- * not expose ways to read or write their fields to the public. To pass an
- * object of this kind through the HIDL boundary, translation functions need to
- * work around the access restriction by using the publicly available
- * `flatten()` and `unflatten()` functions.
- *
- * All `flatten()` and `unflatten()` overloads follow the same convention as
- * follows:
- *
- * status_t flatten(ObjectType const& object,
- * [OtherType const& other, ...]
- * void*& buffer, size_t& size,
- * int*& fds, size_t& numFds)
- *
- * status_t unflatten(ObjectType* object,
- * [OtherType* other, ...,]
- * void*& buffer, size_t& size,
- * int*& fds, size_t& numFds)
- *
- * The number of `other` parameters varies depending on the `ObjectType`. For
- * example, in the process of unflattening an object that contains
- * `hidl_handle`, `other` is needed to hold `native_handle_t` objects that will
- * be created.
- *
- * The last four parameters always work the same way in all overloads of
- * `flatten()` and `unflatten()`:
- * - For `flatten()`, `buffer` is the pointer to the non-fd buffer to be filled,
- * `size` is the size (in bytes) of the non-fd buffer pointed to by `buffer`,
- * `fds` is the pointer to the fd buffer to be filled, and `numFds` is the
- * size (in ints) of the fd buffer pointed to by `fds`.
- * - For `unflatten()`, `buffer` is the pointer to the non-fd buffer to be read
- * from, `size` is the size (in bytes) of the non-fd buffer pointed to by
- * `buffer`, `fds` is the pointer to the fd buffer to be read from, and
- * `numFds` is the size (in ints) of the fd buffer pointed to by `fds`.
- * - After a successful call to `flatten()` or `unflatten()`, `buffer` and `fds`
- * will be advanced, while `size` and `numFds` will be decreased to reflect
- * how much storage/data of the two buffers (fd and non-fd) have been used.
- * - After an unsuccessful call, the values of `buffer`, `size`, `fds` and
- * `numFds` are invalid.
- *
- * The return value of a successful `flatten()` or `unflatten()` call will be
- * `OK` (also aliased as `NO_ERROR`). Any other values indicate a failure.
- *
- * For each object type that supports flattening, there will be two accompanying
- * functions: `getFlattenedSize()` and `getFdCount()`. `getFlattenedSize()` will
- * return the size of the non-fd buffer that the object will need for
- * flattening. `getFdCount()` will return the size of the fd buffer that the
- * object will need for flattening.
- *
- * The set of these four functions, `getFlattenedSize()`, `getFdCount()`,
- * `flatten()` and `unflatten()`, are similar to functions of the same name in
- * the abstract class `Flattenable`. The only difference is that functions in
- * this file are not member functions of the object type. For example, we write
- *
- * flatten(x, buffer, size, fds, numFds)
- *
- * instead of
- *
- * x.flatten(buffer, size, fds, numFds)
- *
- * because we cannot modify the type of `x`.
- *
- * There is one exception to the naming convention: `hidl_handle` that
- * represents a fence. The four functions for this "Fence" type have the word
- * "Fence" attched to their names because the object type, which is
- * `hidl_handle`, does not carry the special meaning that the object itself can
- * only contain zero or one file descriptor.
- */
-
-// Ref: frameworks/native/libs/ui/Fence.cpp
-
-/**
- * \brief Return the size of the non-fd buffer required to flatten a fence.
- *
- * \param[in] fence The input fence of type `hidl_handle`.
- * \return The required size of the flat buffer.
- *
- * The current version of this function always returns 4, which is the number of
- * bytes required to store the number of file descriptors contained in the fd
- * part of the flat buffer.
- */
-inline size_t getFenceFlattenedSize(hidl_handle const& /* fence */) {
- return 4;
-};
-
-/**
- * \brief Return the number of file descriptors contained in a fence.
- *
- * \param[in] fence The input fence of type `hidl_handle`.
- * \return `0` if \p fence does not contain a valid file descriptor, or `1`
- * otherwise.
- */
-inline size_t getFenceFdCount(hidl_handle const& fence) {
- return native_handle_read_fd(fence) == -1 ? 0 : 1;
-}
-
-/**
- * \brief Unflatten `Fence` to `hidl_handle`.
- *
- * \param[out] fence The destination `hidl_handle`.
- * \param[out] nh The underlying native handle.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * If the return value is `NO_ERROR`, \p nh will point to a newly created
- * native handle, which needs to be deleted with `native_handle_delete()`
- * afterwards.
- */
-inline status_t unflattenFence(hidl_handle* fence, native_handle_t** nh,
- void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
- if (size < 4) {
- return NO_MEMORY;
- }
-
- uint32_t numFdsInHandle;
- FlattenableUtils::read(buffer, size, numFdsInHandle);
-
- if (numFdsInHandle > 1) {
- return BAD_VALUE;
- }
-
- if (numFds < numFdsInHandle) {
- return NO_MEMORY;
- }
-
- if (numFdsInHandle) {
- *nh = native_handle_create_from_fd(*fds);
- if (*nh == nullptr) {
- return NO_MEMORY;
- }
- *fence = *nh;
- ++fds;
- --numFds;
- } else {
- *nh = nullptr;
- *fence = hidl_handle();
- }
-
- return NO_ERROR;
-}
-
-/**
- * \brief Flatten `hidl_handle` as `Fence`.
- *
- * \param[in] t The source `hidl_handle`.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- */
-inline status_t flattenFence(hidl_handle const& fence,
- void*& buffer, size_t& size, int*& fds, size_t& numFds) {
- if (size < getFenceFlattenedSize(fence) ||
- numFds < getFenceFdCount(fence)) {
- return NO_MEMORY;
- }
- // Cast to uint32_t since the size of a size_t can vary between 32- and
- // 64-bit processes
- FlattenableUtils::write(buffer, size,
- static_cast<uint32_t>(getFenceFdCount(fence)));
- int fd = native_handle_read_fd(fence);
- if (fd != -1) {
- *fds = fd;
- ++fds;
- --numFds;
- }
- return NO_ERROR;
-}
-
-/**
- * \brief Wrap `Fence` in `hidl_handle`.
- *
- * \param[out] t The wrapper of type `hidl_handle`.
- * \param[out] nh The native handle pointed to by \p t.
- * \param[in] l The source `Fence`.
- *
- * On success, \p nh will hold a newly created native handle, which must be
- * deleted manually with `native_handle_delete()` afterwards.
- */
-// wrap: Fence -> hidl_handle
-inline bool wrapAs(hidl_handle* t, native_handle_t** nh, Fence const& l) {
- size_t const baseSize = l.getFlattenedSize();
- std::unique_ptr<uint8_t[]> baseBuffer(
- new (std::nothrow) uint8_t[baseSize]);
- if (!baseBuffer) {
- return false;
- }
-
- size_t const baseNumFds = l.getFdCount();
- std::unique_ptr<int[]> baseFds(
- new (std::nothrow) int[baseNumFds]);
- if (!baseFds) {
- return false;
- }
-
- void* buffer = static_cast<void*>(baseBuffer.get());
- size_t size = baseSize;
- int* fds = static_cast<int*>(baseFds.get());
- size_t numFds = baseNumFds;
- if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
- return false;
- }
-
- void const* constBuffer = static_cast<void const*>(baseBuffer.get());
- size = baseSize;
- int const* constFds = static_cast<int const*>(baseFds.get());
- numFds = baseNumFds;
- if (unflattenFence(t, nh, constBuffer, size, constFds, numFds)
- != NO_ERROR) {
- return false;
- }
-
- return true;
-}
-
-/**
- * \brief Convert `hidl_handle` to `Fence`.
- *
- * \param[out] l The destination `Fence`. `l` must not have been used
- * (`l->isValid()` must return `false`) before this function is called.
- * \param[in] t The source `hidl_handle`.
- *
- * If \p t contains a valid file descriptor, it will be duplicated.
- */
-// convert: hidl_handle -> Fence
-inline bool convertTo(Fence* l, hidl_handle const& t) {
- int fd = native_handle_read_fd(t);
- if (fd != -1) {
- fd = dup(fd);
- if (fd == -1) {
- return false;
- }
- }
- native_handle_t* nh = native_handle_create_from_fd(fd);
- if (nh == nullptr) {
- if (fd != -1) {
- close(fd);
- }
- return false;
- }
-
- size_t const baseSize = getFenceFlattenedSize(t);
- std::unique_ptr<uint8_t[]> baseBuffer(
- new (std::nothrow) uint8_t[baseSize]);
- if (!baseBuffer) {
- native_handle_delete(nh);
- return false;
- }
-
- size_t const baseNumFds = getFenceFdCount(t);
- std::unique_ptr<int[]> baseFds(
- new (std::nothrow) int[baseNumFds]);
- if (!baseFds) {
- native_handle_delete(nh);
- return false;
- }
-
- void* buffer = static_cast<void*>(baseBuffer.get());
- size_t size = baseSize;
- int* fds = static_cast<int*>(baseFds.get());
- size_t numFds = baseNumFds;
- if (flattenFence(hidl_handle(nh), buffer, size, fds, numFds) != NO_ERROR) {
- native_handle_delete(nh);
- return false;
- }
- native_handle_delete(nh);
-
- void const* constBuffer = static_cast<void const*>(baseBuffer.get());
- size = baseSize;
- int const* constFds = static_cast<int const*>(baseFds.get());
- numFds = baseNumFds;
- if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
- return false;
- }
-
- return true;
-}
-
-// Ref: frameworks/native/libs/ui/FenceTime.cpp: FenceTime::Snapshot
-
-/**
- * \brief Return the size of the non-fd buffer required to flatten
- * `FenceTimeSnapshot`.
- *
- * \param[in] t The input `FenceTimeSnapshot`.
- * \return The required size of the flat buffer.
- */
-inline size_t getFlattenedSize(
- HGraphicBufferProducer::FenceTimeSnapshot const& t) {
- constexpr size_t min = sizeof(t.state);
- switch (t.state) {
- case HGraphicBufferProducer::FenceTimeSnapshot::State::EMPTY:
- return min;
- case HGraphicBufferProducer::FenceTimeSnapshot::State::FENCE:
- return min + getFenceFlattenedSize(t.fence);
- case HGraphicBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME:
- return min + sizeof(
- ::android::FenceTime::Snapshot::signalTime);
- }
- return 0;
-}
-
-/**
- * \brief Return the number of file descriptors contained in
- * `FenceTimeSnapshot`.
- *
- * \param[in] t The input `FenceTimeSnapshot`.
- * \return The number of file descriptors contained in \p snapshot.
- */
-inline size_t getFdCount(
- HGraphicBufferProducer::FenceTimeSnapshot const& t) {
- return t.state ==
- HGraphicBufferProducer::FenceTimeSnapshot::State::FENCE ?
- getFenceFdCount(t.fence) : 0;
-}
-
-/**
- * \brief Flatten `FenceTimeSnapshot`.
- *
- * \param[in] t The source `FenceTimeSnapshot`.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * This function will duplicate the file descriptor in `t.fence` if `t.state ==
- * FENCE`.
- */
-inline status_t flatten(HGraphicBufferProducer::FenceTimeSnapshot const& t,
- void*& buffer, size_t& size, int*& fds, size_t& numFds) {
- if (size < getFlattenedSize(t)) {
- return NO_MEMORY;
- }
-
- switch (t.state) {
- case HGraphicBufferProducer::FenceTimeSnapshot::State::EMPTY:
- FlattenableUtils::write(buffer, size,
- ::android::FenceTime::Snapshot::State::EMPTY);
- return NO_ERROR;
- case HGraphicBufferProducer::FenceTimeSnapshot::State::FENCE:
- FlattenableUtils::write(buffer, size,
- ::android::FenceTime::Snapshot::State::FENCE);
- return flattenFence(t.fence, buffer, size, fds, numFds);
- case HGraphicBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME:
- FlattenableUtils::write(buffer, size,
- ::android::FenceTime::Snapshot::State::SIGNAL_TIME);
- FlattenableUtils::write(buffer, size, t.signalTimeNs);
- return NO_ERROR;
- }
- return NO_ERROR;
-}
-
-/**
- * \brief Unflatten `FenceTimeSnapshot`.
- *
- * \param[out] t The destination `FenceTimeSnapshot`.
- * \param[out] nh The underlying native handle.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * If the return value is `NO_ERROR` and the constructed snapshot contains a
- * file descriptor, \p nh will be created to hold that file descriptor. In this
- * case, \p nh needs to be deleted with `native_handle_delete()` afterwards.
- */
-inline status_t unflatten(
- HGraphicBufferProducer::FenceTimeSnapshot* t, native_handle_t** nh,
- void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
- if (size < sizeof(t->state)) {
- return NO_MEMORY;
- }
-
- *nh = nullptr;
- ::android::FenceTime::Snapshot::State state;
- FlattenableUtils::read(buffer, size, state);
- switch (state) {
- case ::android::FenceTime::Snapshot::State::EMPTY:
- t->state = HGraphicBufferProducer::FenceTimeSnapshot::State::EMPTY;
- return NO_ERROR;
- case ::android::FenceTime::Snapshot::State::FENCE:
- t->state = HGraphicBufferProducer::FenceTimeSnapshot::State::FENCE;
- return unflattenFence(&t->fence, nh, buffer, size, fds, numFds);
- case ::android::FenceTime::Snapshot::State::SIGNAL_TIME:
- t->state = HGraphicBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME;
- if (size < sizeof(t->signalTimeNs)) {
- return NO_MEMORY;
- }
- FlattenableUtils::read(buffer, size, t->signalTimeNs);
- return NO_ERROR;
- }
- return NO_ERROR;
-}
-
-// Ref: frameworks/native/libs/gui/FrameTimestamps.cpp: FrameEventsDelta
-
-/**
- * \brief Return a lower bound on the size of the non-fd buffer required to
- * flatten `FrameEventsDelta`.
- *
- * \param[in] t The input `FrameEventsDelta`.
- * \return A lower bound on the size of the flat buffer.
- */
-constexpr size_t minFlattenedSize(
- HGraphicBufferProducer::FrameEventsDelta const& /* t */) {
- return sizeof(uint64_t) + // mFrameNumber
- sizeof(uint8_t) + // mIndex
- sizeof(uint8_t) + // mAddPostCompositeCalled
- sizeof(uint8_t) + // mAddRetireCalled
- sizeof(uint8_t) + // mAddReleaseCalled
- sizeof(nsecs_t) + // mPostedTime
- sizeof(nsecs_t) + // mRequestedPresentTime
- sizeof(nsecs_t) + // mLatchTime
- sizeof(nsecs_t) + // mFirstRefreshStartTime
- sizeof(nsecs_t); // mLastRefreshStartTime
-}
-
-/**
- * \brief Return the size of the non-fd buffer required to flatten
- * `FrameEventsDelta`.
- *
- * \param[in] t The input `FrameEventsDelta`.
- * \return The required size of the flat buffer.
- */
-inline size_t getFlattenedSize(
- HGraphicBufferProducer::FrameEventsDelta const& t) {
- return minFlattenedSize(t) +
- getFlattenedSize(t.gpuCompositionDoneFence) +
- getFlattenedSize(t.displayPresentFence) +
- getFlattenedSize(t.displayRetireFence) +
- getFlattenedSize(t.releaseFence);
-};
-
-/**
- * \brief Return the number of file descriptors contained in
- * `FrameEventsDelta`.
- *
- * \param[in] t The input `FrameEventsDelta`.
- * \return The number of file descriptors contained in \p t.
- */
-inline size_t getFdCount(
- HGraphicBufferProducer::FrameEventsDelta const& t) {
- return getFdCount(t.gpuCompositionDoneFence) +
- getFdCount(t.displayPresentFence) +
- getFdCount(t.displayRetireFence) +
- getFdCount(t.releaseFence);
-};
-
-/**
- * \brief Unflatten `FrameEventsDelta`.
- *
- * \param[out] t The destination `FrameEventsDelta`.
- * \param[out] nh The underlying array of native handles.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * If the return value is `NO_ERROR`, \p nh will have length 4, and it will be
- * populated with `nullptr` or newly created handles. Each non-null slot in \p
- * nh will need to be deleted manually with `native_handle_delete()`.
- */
-inline status_t unflatten(HGraphicBufferProducer::FrameEventsDelta* t,
- std::vector<native_handle_t*>* nh,
- void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
- if (size < minFlattenedSize(*t)) {
- return NO_MEMORY;
- }
- FlattenableUtils::read(buffer, size, t->frameNumber);
-
- // These were written as uint8_t for alignment.
- uint8_t temp = 0;
- FlattenableUtils::read(buffer, size, temp);
- size_t index = static_cast<size_t>(temp);
- if (index >= ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
- return BAD_VALUE;
- }
- t->index = static_cast<uint32_t>(index);
-
- FlattenableUtils::read(buffer, size, temp);
- t->addPostCompositeCalled = static_cast<bool>(temp);
- FlattenableUtils::read(buffer, size, temp);
- t->addRetireCalled = static_cast<bool>(temp);
- FlattenableUtils::read(buffer, size, temp);
- t->addReleaseCalled = static_cast<bool>(temp);
-
- FlattenableUtils::read(buffer, size, t->postedTimeNs);
- FlattenableUtils::read(buffer, size, t->requestedPresentTimeNs);
- FlattenableUtils::read(buffer, size, t->latchTimeNs);
- FlattenableUtils::read(buffer, size, t->firstRefreshStartTimeNs);
- FlattenableUtils::read(buffer, size, t->lastRefreshStartTimeNs);
- FlattenableUtils::read(buffer, size, t->dequeueReadyTime);
-
- // Fences
- HGraphicBufferProducer::FenceTimeSnapshot* tSnapshot[4];
- tSnapshot[0] = &t->gpuCompositionDoneFence;
- tSnapshot[1] = &t->displayPresentFence;
- tSnapshot[2] = &t->displayRetireFence;
- tSnapshot[3] = &t->releaseFence;
- nh->resize(4);
- for (size_t snapshotIndex = 0; snapshotIndex < 4; ++snapshotIndex) {
- status_t status = unflatten(
- tSnapshot[snapshotIndex], &((*nh)[snapshotIndex]),
- buffer, size, fds, numFds);
- if (status != NO_ERROR) {
- while (snapshotIndex > 0) {
- --snapshotIndex;
- if ((*nh)[snapshotIndex] != nullptr) {
- native_handle_delete((*nh)[snapshotIndex]);
- }
- }
- return status;
- }
- }
- return NO_ERROR;
-}
-
-/**
- * \brief Flatten `FrameEventsDelta`.
- *
- * \param[in] t The source `FrameEventsDelta`.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * This function will duplicate file descriptors contained in \p t.
- */
-// Ref: frameworks/native/libs/gui/FrameTimestamp.cpp:
-// FrameEventsDelta::flatten
-inline status_t flatten(HGraphicBufferProducer::FrameEventsDelta const& t,
- void*& buffer, size_t& size, int*& fds, size_t numFds) {
- // Check that t.index is within a valid range.
- if (t.index >= static_cast<uint32_t>(FrameEventHistory::MAX_FRAME_HISTORY)
- || t.index > std::numeric_limits<uint8_t>::max()) {
- return BAD_VALUE;
- }
-
- FlattenableUtils::write(buffer, size, t.frameNumber);
-
- // These are static_cast to uint8_t for alignment.
- FlattenableUtils::write(buffer, size, static_cast<uint8_t>(t.index));
- FlattenableUtils::write(
- buffer, size, static_cast<uint8_t>(t.addPostCompositeCalled));
- FlattenableUtils::write(
- buffer, size, static_cast<uint8_t>(t.addRetireCalled));
- FlattenableUtils::write(
- buffer, size, static_cast<uint8_t>(t.addReleaseCalled));
-
- FlattenableUtils::write(buffer, size, t.postedTimeNs);
- FlattenableUtils::write(buffer, size, t.requestedPresentTimeNs);
- FlattenableUtils::write(buffer, size, t.latchTimeNs);
- FlattenableUtils::write(buffer, size, t.firstRefreshStartTimeNs);
- FlattenableUtils::write(buffer, size, t.lastRefreshStartTimeNs);
- FlattenableUtils::write(buffer, size, t.dequeueReadyTime);
-
- // Fences
- HGraphicBufferProducer::FenceTimeSnapshot const* tSnapshot[4];
- tSnapshot[0] = &t.gpuCompositionDoneFence;
- tSnapshot[1] = &t.displayPresentFence;
- tSnapshot[2] = &t.displayRetireFence;
- tSnapshot[3] = &t.releaseFence;
- for (size_t snapshotIndex = 0; snapshotIndex < 4; ++snapshotIndex) {
- status_t status = flatten(
- *(tSnapshot[snapshotIndex]), buffer, size, fds, numFds);
- if (status != NO_ERROR) {
- return status;
- }
- }
- return NO_ERROR;
-}
-
-// Ref: frameworks/native/libs/gui/FrameTimestamps.cpp: FrameEventHistoryDelta
-
-/**
- * \brief Return the size of the non-fd buffer required to flatten
- * `HGraphicBufferProducer::FrameEventHistoryDelta`.
- *
- * \param[in] t The input `HGraphicBufferProducer::FrameEventHistoryDelta`.
- * \return The required size of the flat buffer.
- */
-inline size_t getFlattenedSize(
- HGraphicBufferProducer::FrameEventHistoryDelta const& t) {
- size_t size = 4 + // mDeltas.size()
- sizeof(t.compositorTiming);
- for (size_t i = 0; i < t.deltas.size(); ++i) {
- size += getFlattenedSize(t.deltas[i]);
- }
- return size;
-}
-
-/**
- * \brief Return the number of file descriptors contained in
- * `HGraphicBufferProducer::FrameEventHistoryDelta`.
- *
- * \param[in] t The input `HGraphicBufferProducer::FrameEventHistoryDelta`.
- * \return The number of file descriptors contained in \p t.
- */
-inline size_t getFdCount(
- HGraphicBufferProducer::FrameEventHistoryDelta const& t) {
- size_t numFds = 0;
- for (size_t i = 0; i < t.deltas.size(); ++i) {
- numFds += getFdCount(t.deltas[i]);
- }
- return numFds;
-}
-
-/**
- * \brief Unflatten `FrameEventHistoryDelta`.
- *
- * \param[out] t The destination `FrameEventHistoryDelta`.
- * \param[out] nh The underlying array of arrays of native handles.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * If the return value is `NO_ERROR`, \p nh will be populated with `nullptr` or
- * newly created handles. The second dimension of \p nh will be 4. Each non-null
- * slot in \p nh will need to be deleted manually with `native_handle_delete()`.
- */
-inline status_t unflatten(
- HGraphicBufferProducer::FrameEventHistoryDelta* t,
- std::vector<std::vector<native_handle_t*> >* nh,
- void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
- if (size < 4) {
- return NO_MEMORY;
- }
-
- FlattenableUtils::read(buffer, size, t->compositorTiming);
-
- uint32_t deltaCount = 0;
- FlattenableUtils::read(buffer, size, deltaCount);
- if (static_cast<size_t>(deltaCount) >
- ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
- return BAD_VALUE;
- }
- t->deltas.resize(deltaCount);
- nh->resize(deltaCount);
- for (size_t deltaIndex = 0; deltaIndex < deltaCount; ++deltaIndex) {
- status_t status = unflatten(
- &(t->deltas[deltaIndex]), &((*nh)[deltaIndex]),
- buffer, size, fds, numFds);
- if (status != NO_ERROR) {
- return status;
- }
- }
- return NO_ERROR;
-}
-
-/**
- * \brief Flatten `FrameEventHistoryDelta`.
- *
- * \param[in] t The source `FrameEventHistoryDelta`.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * This function will duplicate file descriptors contained in \p t.
- */
-inline status_t flatten(
- HGraphicBufferProducer::FrameEventHistoryDelta const& t,
- void*& buffer, size_t& size, int*& fds, size_t& numFds) {
- if (t.deltas.size() > ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
- return BAD_VALUE;
- }
- if (size < getFlattenedSize(t)) {
- return NO_MEMORY;
- }
-
- FlattenableUtils::write(buffer, size, t.compositorTiming);
-
- FlattenableUtils::write(buffer, size, static_cast<uint32_t>(t.deltas.size()));
- for (size_t deltaIndex = 0; deltaIndex < t.deltas.size(); ++deltaIndex) {
- status_t status = flatten(t.deltas[deltaIndex], buffer, size, fds, numFds);
- if (status != NO_ERROR) {
- return status;
- }
- }
- return NO_ERROR;
-}
-
-/**
- * \brief Wrap `::android::FrameEventHistoryData` in
- * `HGraphicBufferProducer::FrameEventHistoryDelta`.
- *
- * \param[out] t The wrapper of type
- * `HGraphicBufferProducer::FrameEventHistoryDelta`.
- * \param[out] nh The array of array of native handles that are referred to by
- * members of \p t.
- * \param[in] l The source `::android::FrameEventHistoryDelta`.
- *
- * On success, each member of \p nh will be either `nullptr` or a newly created
- * native handle. All the non-`nullptr` elements must be deleted individually
- * with `native_handle_delete()`.
- */
-inline bool wrapAs(HGraphicBufferProducer::FrameEventHistoryDelta* t,
- std::vector<std::vector<native_handle_t*> >* nh,
- ::android::FrameEventHistoryDelta const& l) {
-
- size_t const baseSize = l.getFlattenedSize();
- std::unique_ptr<uint8_t[]> baseBuffer(
- new (std::nothrow) uint8_t[baseSize]);
- if (!baseBuffer) {
- return false;
- }
-
- size_t const baseNumFds = l.getFdCount();
- std::unique_ptr<int[]> baseFds(
- new (std::nothrow) int[baseNumFds]);
- if (!baseFds) {
- return false;
- }
-
- void* buffer = static_cast<void*>(baseBuffer.get());
- size_t size = baseSize;
- int* fds = baseFds.get();
- size_t numFds = baseNumFds;
- if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
- return false;
- }
-
- void const* constBuffer = static_cast<void const*>(baseBuffer.get());
- size = baseSize;
- int const* constFds = static_cast<int const*>(baseFds.get());
- numFds = baseNumFds;
- if (unflatten(t, nh, constBuffer, size, constFds, numFds) != NO_ERROR) {
- return false;
- }
-
- return true;
-}
-
-/**
- * \brief Convert `HGraphicBufferProducer::FrameEventHistoryDelta` to
- * `::android::FrameEventHistoryDelta`.
- *
- * \param[out] l The destination `::android::FrameEventHistoryDelta`.
- * \param[in] t The source `HGraphicBufferProducer::FrameEventHistoryDelta`.
- *
- * This function will duplicate all file descriptors contained in \p t.
- */
-inline bool convertTo(
- ::android::FrameEventHistoryDelta* l,
- HGraphicBufferProducer::FrameEventHistoryDelta const& t) {
-
- size_t const baseSize = getFlattenedSize(t);
- std::unique_ptr<uint8_t[]> baseBuffer(
- new (std::nothrow) uint8_t[baseSize]);
- if (!baseBuffer) {
- return false;
- }
-
- size_t const baseNumFds = getFdCount(t);
- std::unique_ptr<int[]> baseFds(
- new (std::nothrow) int[baseNumFds]);
- if (!baseFds) {
- return false;
- }
-
- void* buffer = static_cast<void*>(baseBuffer.get());
- size_t size = baseSize;
- int* fds = static_cast<int*>(baseFds.get());
- size_t numFds = baseNumFds;
- if (flatten(t, buffer, size, fds, numFds) != NO_ERROR) {
- return false;
- }
-
- void const* constBuffer = static_cast<void const*>(baseBuffer.get());
- size = baseSize;
- int const* constFds = static_cast<int const*>(baseFds.get());
- numFds = baseNumFds;
- if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
- return false;
- }
-
- return true;
-}
-
-// Ref: frameworks/native/libs/ui/Region.cpp
-
-/**
- * \brief Return the size of the buffer required to flatten `Region`.
- *
- * \param[in] t The input `Region`.
- * \return The required size of the flat buffer.
- */
-inline size_t getFlattenedSize(Region const& t) {
- return sizeof(uint32_t) + t.size() * sizeof(::android::Rect);
-}
-
-/**
- * \brief Unflatten `Region`.
- *
- * \param[out] t The destination `Region`.
- * \param[in,out] buffer The pointer to the flat buffer.
- * \param[in,out] size The size of the flat buffer.
- * \return `NO_ERROR` on success; other value on failure.
- */
-inline status_t unflatten(Region* t, void const*& buffer, size_t& size) {
- if (size < sizeof(uint32_t)) {
- return NO_MEMORY;
- }
-
- uint32_t numRects = 0;
- FlattenableUtils::read(buffer, size, numRects);
- if (size < numRects * sizeof(Rect)) {
- return NO_MEMORY;
- }
- if (numRects > (UINT32_MAX / sizeof(Rect))) {
- return NO_MEMORY;
- }
-
- t->resize(numRects);
- for (size_t r = 0; r < numRects; ++r) {
- ::android::Rect rect(::android::Rect::EMPTY_RECT);
- status_t status = rect.unflatten(buffer, size);
- if (status != NO_ERROR) {
- return status;
- }
- FlattenableUtils::advance(buffer, size, sizeof(rect));
- (*t)[r] = Rect{
- static_cast<int32_t>(rect.left),
- static_cast<int32_t>(rect.top),
- static_cast<int32_t>(rect.right),
- static_cast<int32_t>(rect.bottom)};
- }
- return NO_ERROR;
-}
-
-/**
- * \brief Flatten `Region`.
- *
- * \param[in] t The source `Region`.
- * \param[in,out] buffer The pointer to the flat buffer.
- * \param[in,out] size The size of the flat buffer.
- * \return `NO_ERROR` on success; other value on failure.
- */
-inline status_t flatten(Region const& t, void*& buffer, size_t& size) {
- if (size < getFlattenedSize(t)) {
- return NO_MEMORY;
- }
-
- FlattenableUtils::write(buffer, size, static_cast<uint32_t>(t.size()));
- for (size_t r = 0; r < t.size(); ++r) {
- ::android::Rect rect(
- static_cast<int32_t>(t[r].left),
- static_cast<int32_t>(t[r].top),
- static_cast<int32_t>(t[r].right),
- static_cast<int32_t>(t[r].bottom));
- status_t status = rect.flatten(buffer, size);
- if (status != NO_ERROR) {
- return status;
- }
- FlattenableUtils::advance(buffer, size, sizeof(rect));
- }
- return NO_ERROR;
-}
-
-/**
- * \brief Convert `::android::Region` to `Region`.
- *
- * \param[out] t The destination `Region`.
- * \param[in] l The source `::android::Region`.
- */
-// convert: ::android::Region -> Region
-inline bool convertTo(Region* t, ::android::Region const& l) {
- size_t const baseSize = l.getFlattenedSize();
- std::unique_ptr<uint8_t[]> baseBuffer(
- new (std::nothrow) uint8_t[baseSize]);
- if (!baseBuffer) {
- return false;
- }
-
- void* buffer = static_cast<void*>(baseBuffer.get());
- size_t size = baseSize;
- if (l.flatten(buffer, size) != NO_ERROR) {
- return false;
- }
-
- void const* constBuffer = static_cast<void const*>(baseBuffer.get());
- size = baseSize;
- if (unflatten(t, constBuffer, size) != NO_ERROR) {
- return false;
- }
-
- return true;
-}
-
-/**
- * \brief Convert `Region` to `::android::Region`.
- *
- * \param[out] l The destination `::android::Region`.
- * \param[in] t The source `Region`.
- */
-// convert: Region -> ::android::Region
-inline bool convertTo(::android::Region* l, Region const& t) {
- size_t const baseSize = getFlattenedSize(t);
- std::unique_ptr<uint8_t[]> baseBuffer(
- new (std::nothrow) uint8_t[baseSize]);
- if (!baseBuffer) {
- return false;
- }
-
- void* buffer = static_cast<void*>(baseBuffer.get());
- size_t size = baseSize;
- if (flatten(t, buffer, size) != NO_ERROR) {
- return false;
- }
-
- void const* constBuffer = static_cast<void const*>(baseBuffer.get());
- size = baseSize;
- if (l->unflatten(constBuffer, size) != NO_ERROR) {
- return false;
- }
-
- return true;
-}
-
-// Ref: frameworks/native/libs/gui/BGraphicBufferProducer.cpp:
-// BGraphicBufferProducer::QueueBufferInput
-
-/**
- * \brief Return a lower bound on the size of the buffer required to flatten
- * `HGraphicBufferProducer::QueueBufferInput`.
- *
- * \param[in] t The input `HGraphicBufferProducer::QueueBufferInput`.
- * \return A lower bound on the size of the flat buffer.
- */
-constexpr size_t minFlattenedSize(
- HGraphicBufferProducer::QueueBufferInput const& /* t */) {
- return sizeof(int64_t) + // timestamp
- sizeof(int) + // isAutoTimestamp
- sizeof(android_dataspace) + // dataSpace
- sizeof(::android::Rect) + // crop
- sizeof(int) + // scalingMode
- sizeof(uint32_t) + // transform
- sizeof(uint32_t) + // stickyTransform
- sizeof(bool); // getFrameTimestamps
-}
-
-/**
- * \brief Return the size of the buffer required to flatten
- * `HGraphicBufferProducer::QueueBufferInput`.
- *
- * \param[in] t The input `HGraphicBufferProducer::QueueBufferInput`.
- * \return The required size of the flat buffer.
- */
-inline size_t getFlattenedSize(HGraphicBufferProducer::QueueBufferInput const& t) {
- return minFlattenedSize(t) +
- getFenceFlattenedSize(t.fence) +
- getFlattenedSize(t.surfaceDamage);
-}
-
-/**
- * \brief Return the number of file descriptors contained in
- * `HGraphicBufferProducer::QueueBufferInput`.
- *
- * \param[in] t The input `HGraphicBufferProducer::QueueBufferInput`.
- * \return The number of file descriptors contained in \p t.
- */
-inline size_t getFdCount(
- HGraphicBufferProducer::QueueBufferInput const& t) {
- return getFenceFdCount(t.fence);
-}
-
-/**
- * \brief Flatten `HGraphicBufferProducer::QueueBufferInput`.
- *
- * \param[in] t The source `HGraphicBufferProducer::QueueBufferInput`.
- * \param[out] nh The native handle cloned from `t.fence`.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * This function will duplicate the file descriptor in `t.fence`. */
-inline status_t flatten(HGraphicBufferProducer::QueueBufferInput const& t,
- native_handle_t** nh,
- void*& buffer, size_t& size, int*& fds, size_t& numFds) {
- if (size < getFlattenedSize(t)) {
- return NO_MEMORY;
- }
-
- FlattenableUtils::write(buffer, size, t.timestamp);
- FlattenableUtils::write(buffer, size, static_cast<int>(t.isAutoTimestamp));
- FlattenableUtils::write(buffer, size,
- static_cast<android_dataspace_t>(t.dataSpace));
- FlattenableUtils::write(buffer, size, ::android::Rect(
- static_cast<int32_t>(t.crop.left),
- static_cast<int32_t>(t.crop.top),
- static_cast<int32_t>(t.crop.right),
- static_cast<int32_t>(t.crop.bottom)));
- FlattenableUtils::write(buffer, size, static_cast<int>(t.scalingMode));
- FlattenableUtils::write(buffer, size, t.transform);
- FlattenableUtils::write(buffer, size, t.stickyTransform);
- FlattenableUtils::write(buffer, size, t.getFrameTimestamps);
-
- *nh = t.fence.getNativeHandle() == nullptr ?
- nullptr : native_handle_clone(t.fence);
- status_t status = flattenFence(hidl_handle(*nh), buffer, size, fds, numFds);
- if (status != NO_ERROR) {
- return status;
- }
- return flatten(t.surfaceDamage, buffer, size);
-}
-
-/**
- * \brief Unflatten `HGraphicBufferProducer::QueueBufferInput`.
- *
- * \param[out] t The destination `HGraphicBufferProducer::QueueBufferInput`.
- * \param[out] nh The underlying native handle for `t->fence`.
- * \param[in,out] buffer The pointer to the flat non-fd buffer.
- * \param[in,out] size The size of the flat non-fd buffer.
- * \param[in,out] fds The pointer to the flat fd buffer.
- * \param[in,out] numFds The size of the flat fd buffer.
- * \return `NO_ERROR` on success; other value on failure.
- *
- * If the return value is `NO_ERROR` and `t->fence` contains a valid file
- * descriptor, \p nh will be a newly created native handle holding that file
- * descriptor. \p nh needs to be deleted with `native_handle_delete()`
- * afterwards.
- */
-inline status_t unflatten(
- HGraphicBufferProducer::QueueBufferInput* t, native_handle_t** nh,
- void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
- if (size < minFlattenedSize(*t)) {
- return NO_MEMORY;
- }
-
- FlattenableUtils::read(buffer, size, t->timestamp);
- int lIsAutoTimestamp;
- FlattenableUtils::read(buffer, size, lIsAutoTimestamp);
- t->isAutoTimestamp = static_cast<int32_t>(lIsAutoTimestamp);
- android_dataspace_t lDataSpace;
- FlattenableUtils::read(buffer, size, lDataSpace);
- t->dataSpace = static_cast<Dataspace>(lDataSpace);
- Rect lCrop;
- FlattenableUtils::read(buffer, size, lCrop);
- t->crop = Rect{
- static_cast<int32_t>(lCrop.left),
- static_cast<int32_t>(lCrop.top),
- static_cast<int32_t>(lCrop.right),
- static_cast<int32_t>(lCrop.bottom)};
- int lScalingMode;
- FlattenableUtils::read(buffer, size, lScalingMode);
- t->scalingMode = static_cast<int32_t>(lScalingMode);
- FlattenableUtils::read(buffer, size, t->transform);
- FlattenableUtils::read(buffer, size, t->stickyTransform);
- FlattenableUtils::read(buffer, size, t->getFrameTimestamps);
-
- status_t status = unflattenFence(&(t->fence), nh,
- buffer, size, fds, numFds);
- if (status != NO_ERROR) {
- return status;
- }
- return unflatten(&(t->surfaceDamage), buffer, size);
-}
-
-/**
- * \brief Wrap `BGraphicBufferProducer::QueueBufferInput` in
- * `HGraphicBufferProducer::QueueBufferInput`.
- *
- * \param[out] t The wrapper of type
- * `HGraphicBufferProducer::QueueBufferInput`.
- * \param[out] nh The underlying native handle for `t->fence`.
- * \param[in] l The source `BGraphicBufferProducer::QueueBufferInput`.
- *
- * If the return value is `true` and `t->fence` contains a valid file
- * descriptor, \p nh will be a newly created native handle holding that file
- * descriptor. \p nh needs to be deleted with `native_handle_delete()`
- * afterwards.
- */
-inline bool wrapAs(
- HGraphicBufferProducer::QueueBufferInput* t,
- native_handle_t** nh,
- BGraphicBufferProducer::QueueBufferInput const& l) {
-
- size_t const baseSize = l.getFlattenedSize();
- std::unique_ptr<uint8_t[]> baseBuffer(
- new (std::nothrow) uint8_t[baseSize]);
- if (!baseBuffer) {
- return false;
- }
-
- size_t const baseNumFds = l.getFdCount();
- std::unique_ptr<int[]> baseFds(
- new (std::nothrow) int[baseNumFds]);
- if (!baseFds) {
- return false;
- }
-
- void* buffer = static_cast<void*>(baseBuffer.get());
- size_t size = baseSize;
- int* fds = baseFds.get();
- size_t numFds = baseNumFds;
- if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
- return false;
- }
-
- void const* constBuffer = static_cast<void const*>(baseBuffer.get());
- size = baseSize;
- int const* constFds = static_cast<int const*>(baseFds.get());
- numFds = baseNumFds;
- if (unflatten(t, nh, constBuffer, size, constFds, numFds) != NO_ERROR) {
- return false;
- }
-
- return true;
-}
-
-/**
- * \brief Convert `HGraphicBufferProducer::QueueBufferInput` to
- * `BGraphicBufferProducer::QueueBufferInput`.
- *
- * \param[out] l The destination `BGraphicBufferProducer::QueueBufferInput`.
- * \param[in] t The source `HGraphicBufferProducer::QueueBufferInput`.
- *
- * If `t.fence` has a valid file descriptor, it will be duplicated.
- */
-inline bool convertTo(
- BGraphicBufferProducer::QueueBufferInput* l,
- HGraphicBufferProducer::QueueBufferInput const& t) {
-
- size_t const baseSize = getFlattenedSize(t);
- std::unique_ptr<uint8_t[]> baseBuffer(
- new (std::nothrow) uint8_t[baseSize]);
- if (!baseBuffer) {
- return false;
- }
-
- size_t const baseNumFds = getFdCount(t);
- std::unique_ptr<int[]> baseFds(
- new (std::nothrow) int[baseNumFds]);
- if (!baseFds) {
- return false;
- }
-
- void* buffer = static_cast<void*>(baseBuffer.get());
- size_t size = baseSize;
- int* fds = baseFds.get();
- size_t numFds = baseNumFds;
- native_handle_t* nh;
- if (flatten(t, &nh, buffer, size, fds, numFds) != NO_ERROR) {
- return false;
- }
-
- void const* constBuffer = static_cast<void const*>(baseBuffer.get());
- size = baseSize;
- int const* constFds = static_cast<int const*>(baseFds.get());
- numFds = baseNumFds;
- if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
- if (nh != nullptr) {
- native_handle_close(nh);
- native_handle_delete(nh);
- }
- return false;
- }
-
- native_handle_delete(nh);
- return true;
-}
-
-// Ref: frameworks/native/libs/gui/BGraphicBufferProducer.cpp:
-// BGraphicBufferProducer::QueueBufferOutput
-
-/**
- * \brief Wrap `BGraphicBufferProducer::QueueBufferOutput` in
- * `HGraphicBufferProducer::QueueBufferOutput`.
- *
- * \param[out] t The wrapper of type
- * `HGraphicBufferProducer::QueueBufferOutput`.
- * \param[out] nh The array of array of native handles that are referred to by
- * members of \p t.
- * \param[in] l The source `BGraphicBufferProducer::QueueBufferOutput`.
- *
- * On success, each member of \p nh will be either `nullptr` or a newly created
- * native handle. All the non-`nullptr` elements must be deleted individually
- * with `native_handle_delete()`.
- */
-// wrap: BGraphicBufferProducer::QueueBufferOutput ->
-// HGraphicBufferProducer::QueueBufferOutput
-inline bool wrapAs(HGraphicBufferProducer::QueueBufferOutput* t,
- std::vector<std::vector<native_handle_t*> >* nh,
- BGraphicBufferProducer::QueueBufferOutput const& l) {
- if (!wrapAs(&(t->frameTimestamps), nh, l.frameTimestamps)) {
- return false;
- }
- t->width = l.width;
- t->height = l.height;
- t->transformHint = l.transformHint;
- t->numPendingBuffers = l.numPendingBuffers;
- t->nextFrameNumber = l.nextFrameNumber;
- t->bufferReplaced = l.bufferReplaced;
- return true;
-}
-
-/**
- * \brief Convert `HGraphicBufferProducer::QueueBufferOutput` to
- * `BGraphicBufferProducer::QueueBufferOutput`.
- *
- * \param[out] l The destination `BGraphicBufferProducer::QueueBufferOutput`.
- * \param[in] t The source `HGraphicBufferProducer::QueueBufferOutput`.
- *
- * This function will duplicate all file descriptors contained in \p t.
- */
-// convert: HGraphicBufferProducer::QueueBufferOutput ->
-// BGraphicBufferProducer::QueueBufferOutput
-inline bool convertTo(
- BGraphicBufferProducer::QueueBufferOutput* l,
- HGraphicBufferProducer::QueueBufferOutput const& t) {
- if (!convertTo(&(l->frameTimestamps), t.frameTimestamps)) {
- return false;
- }
- l->width = t.width;
- l->height = t.height;
- l->transformHint = t.transformHint;
- l->numPendingBuffers = t.numPendingBuffers;
- l->nextFrameNumber = t.nextFrameNumber;
- l->bufferReplaced = t.bufferReplaced;
- return true;
-}
-
-/**
- * \brief Convert `BGraphicBufferProducer::DisconnectMode` to
- * `HGraphicBufferProducer::DisconnectMode`.
- *
- * \param[in] l The source `BGraphicBufferProducer::DisconnectMode`.
- * \return The corresponding `HGraphicBufferProducer::DisconnectMode`.
- */
-inline HGraphicBufferProducer::DisconnectMode toOmxDisconnectMode(
- BGraphicBufferProducer::DisconnectMode l) {
- switch (l) {
- case BGraphicBufferProducer::DisconnectMode::Api:
- return HGraphicBufferProducer::DisconnectMode::API;
- case BGraphicBufferProducer::DisconnectMode::AllLocal:
- return HGraphicBufferProducer::DisconnectMode::ALL_LOCAL;
- }
- return HGraphicBufferProducer::DisconnectMode::API;
-}
-
-/**
- * \brief Convert `HGraphicBufferProducer::DisconnectMode` to
- * `BGraphicBufferProducer::DisconnectMode`.
- *
- * \param[in] l The source `HGraphicBufferProducer::DisconnectMode`.
- * \return The corresponding `BGraphicBufferProducer::DisconnectMode`.
- */
-inline BGraphicBufferProducer::DisconnectMode toGuiDisconnectMode(
- HGraphicBufferProducer::DisconnectMode t) {
- switch (t) {
- case HGraphicBufferProducer::DisconnectMode::API:
- return BGraphicBufferProducer::DisconnectMode::Api;
- case HGraphicBufferProducer::DisconnectMode::ALL_LOCAL:
- return BGraphicBufferProducer::DisconnectMode::AllLocal;
- }
- return BGraphicBufferProducer::DisconnectMode::Api;
-}
-
} // namespace implementation
} // namespace V1_0
} // namespace omx
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/1.0/Omx.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Omx.h
index a6a9d3e..5a46b26 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/1.0/Omx.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Omx.h
@@ -20,13 +20,15 @@
#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
-#include <media/stagefright/omx/OMXNodeInstance.h>
#include <media/stagefright/xmlparser/MediaCodecsXmlParser.h>
#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <utils/KeyedVector.h>
+#include <utils/Mutex.h>
namespace android {
struct OMXMaster;
+struct OMXNodeInstance;
namespace hardware {
namespace media {
@@ -50,10 +52,9 @@
using ::android::wp;
using ::android::OMXMaster;
-using ::android::OmxNodeOwner;
using ::android::OMXNodeInstance;
-struct Omx : public IOmx, public hidl_death_recipient, public OmxNodeOwner {
+struct Omx : public IOmx, public hidl_death_recipient {
Omx();
virtual ~Omx();
@@ -68,8 +69,8 @@
// Method from hidl_death_recipient
void serviceDied(uint64_t cookie, const wp<IBase>& who) override;
- // Method from OmxNodeOwner
- virtual status_t freeNode(sp<OMXNodeInstance> const& instance) override;
+ // Method for OMXNodeInstance
+ status_t freeNode(sp<OMXNodeInstance> const& instance);
protected:
OMXMaster* mMaster;
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferProducer.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferProducer.h
index 4a3fe0c..322a699 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferProducer.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferProducer.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2016, The Android Open Source Project
+ * Copyright 2018, The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,14 +17,7 @@
#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERPRODUCER_H
#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERPRODUCER_H
-#include <hidl/MQDescriptor.h>
-#include <hidl/Status.h>
-
-#include <binder/Binder.h>
-#include <gui/IGraphicBufferProducer.h>
-#include <gui/IProducerListener.h>
-
-#include <android/hardware/graphics/bufferqueue/1.0/IGraphicBufferProducer.h>
+#include <media/stagefright/bqhelper/WGraphicBufferProducer.h>
namespace android {
namespace hardware {
@@ -33,67 +26,8 @@
namespace V1_0 {
namespace implementation {
-using ::android::hardware::graphics::common::V1_0::PixelFormat;
-using ::android::hardware::media::V1_0::AnwBuffer;
-using ::android::hidl::base::V1_0::IBase;
-using ::android::hardware::hidl_array;
-using ::android::hardware::hidl_memory;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::sp;
-
-typedef ::android::hardware::graphics::bufferqueue::V1_0::
- IGraphicBufferProducer HGraphicBufferProducer;
-typedef ::android::hardware::graphics::bufferqueue::V1_0::
- IProducerListener HProducerListener;
-
-typedef ::android::IGraphicBufferProducer BGraphicBufferProducer;
-typedef ::android::IProducerListener BProducerListener;
-using ::android::BnGraphicBufferProducer;
-
-struct TWGraphicBufferProducer : public HGraphicBufferProducer {
- sp<BGraphicBufferProducer> mBase;
- TWGraphicBufferProducer(sp<BGraphicBufferProducer> const& base);
- Return<void> requestBuffer(int32_t slot, requestBuffer_cb _hidl_cb)
- override;
- Return<int32_t> setMaxDequeuedBufferCount(int32_t maxDequeuedBuffers)
- override;
- Return<int32_t> setAsyncMode(bool async) override;
- Return<void> dequeueBuffer(
- uint32_t width, uint32_t height, PixelFormat format, uint32_t usage,
- bool getFrameTimestamps, dequeueBuffer_cb _hidl_cb) override;
- Return<int32_t> detachBuffer(int32_t slot) override;
- Return<void> detachNextBuffer(detachNextBuffer_cb _hidl_cb) override;
- Return<void> attachBuffer(const AnwBuffer& buffer, attachBuffer_cb _hidl_cb)
- override;
- Return<void> queueBuffer(
- int32_t slot, const HGraphicBufferProducer::QueueBufferInput& input,
- queueBuffer_cb _hidl_cb) override;
- Return<int32_t> cancelBuffer(int32_t slot, const hidl_handle& fence)
- override;
- Return<void> query(int32_t what, query_cb _hidl_cb) override;
- Return<void> connect(const sp<HProducerListener>& listener,
- int32_t api, bool producerControlledByApp,
- connect_cb _hidl_cb) override;
- Return<int32_t> disconnect(
- int32_t api,
- HGraphicBufferProducer::DisconnectMode mode) override;
- Return<int32_t> setSidebandStream(const hidl_handle& stream) override;
- Return<void> allocateBuffers(
- uint32_t width, uint32_t height,
- PixelFormat format, uint32_t usage) override;
- Return<int32_t> allowAllocation(bool allow) override;
- Return<int32_t> setGenerationNumber(uint32_t generationNumber) override;
- Return<void> getConsumerName(getConsumerName_cb _hidl_cb) override;
- Return<int32_t> setSharedBufferMode(bool sharedBufferMode) override;
- Return<int32_t> setAutoRefresh(bool autoRefresh) override;
- Return<int32_t> setDequeueTimeout(int64_t timeoutNs) override;
- Return<void> getLastQueuedBuffer(getLastQueuedBuffer_cb _hidl_cb) override;
- Return<void> getFrameTimestamps(getFrameTimestamps_cb _hidl_cb) override;
- Return<void> getUniqueId(getUniqueId_cb _hidl_cb) override;
-};
+using TWGraphicBufferProducer = ::android::TWGraphicBufferProducer<
+ ::android::hardware::graphics::bufferqueue::V1_0::IGraphicBufferProducer>;
} // namespace implementation
} // namespace V1_0
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferSource.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferSource.h
index b9f22ab..4e56c98 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferSource.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferSource.h
@@ -28,7 +28,7 @@
#include <android/BnGraphicBufferSource.h>
-#include <media/stagefright/omx/GraphicBufferSource.h>
+#include <media/stagefright/omx/OmxGraphicBufferSource.h>
namespace android {
namespace hardware {
@@ -37,7 +37,7 @@
namespace V1_0 {
namespace implementation {
-using ::android::GraphicBufferSource;
+using ::android::OmxGraphicBufferSource;
using ::android::hardware::graphics::common::V1_0::Dataspace;
using ::android::hardware::media::omx::V1_0::ColorAspects;
using ::android::hardware::media::omx::V1_0::IGraphicBufferSource;
@@ -69,10 +69,10 @@
struct TWGraphicBufferSource : public TGraphicBufferSource {
struct TWOmxNodeWrapper;
struct TWOmxBufferSource;
- sp<GraphicBufferSource> mBase;
+ sp<OmxGraphicBufferSource> mBase;
sp<IOmxBufferSource> mOmxBufferSource;
- TWGraphicBufferSource(sp<GraphicBufferSource> const& base);
+ TWGraphicBufferSource(sp<OmxGraphicBufferSource> const& base);
Return<Status> configure(
const sp<IOmxNode>& omxNode, Dataspace dataspace) override;
Return<Status> setSuspend(bool suspend, int64_t timeUs) override;
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/1.0/WProducerListener.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/WProducerListener.h
deleted file mode 100644
index a75e48a..0000000
--- a/media/libstagefright/omx/include/media/stagefright/omx/1.0/WProducerListener.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright 2016, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXPRODUCERLISTENER_H
-#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXPRODUCERLISTENER_H
-
-#include <hidl/MQDescriptor.h>
-#include <hidl/Status.h>
-
-#include <binder/IBinder.h>
-#include <gui/IProducerListener.h>
-
-#include <android/hardware/graphics/bufferqueue/1.0/IProducerListener.h>
-
-namespace android {
-namespace hardware {
-namespace media {
-namespace omx {
-namespace V1_0 {
-namespace implementation {
-
-using ::android::hidl::base::V1_0::IBase;
-using ::android::hardware::hidl_array;
-using ::android::hardware::hidl_memory;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-using ::android::sp;
-
-typedef ::android::hardware::graphics::bufferqueue::V1_0::IProducerListener
- HProducerListener;
-typedef ::android::IProducerListener
- BProducerListener;
-using ::android::BnProducerListener;
-
-struct TWProducerListener : public HProducerListener {
- sp<BProducerListener> mBase;
- TWProducerListener(sp<BProducerListener> const& base);
- Return<void> onBufferReleased() override;
- Return<bool> needsReleaseNotify() override;
-};
-
-class LWProducerListener : public BnProducerListener {
-public:
- sp<HProducerListener> mBase;
- LWProducerListener(sp<HProducerListener> const& base);
- void onBufferReleased() override;
- bool needsReleaseNotify() override;
-};
-
-} // namespace implementation
-} // namespace V1_0
-} // namespace omx
-} // namespace media
-} // namespace hardware
-} // namespace android
-
-#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXPRODUCERLISTENER_H
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/BWGraphicBufferSource.h b/media/libstagefright/omx/include/media/stagefright/omx/BWGraphicBufferSource.h
index 0f78eb6..0efff22 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/BWGraphicBufferSource.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/BWGraphicBufferSource.h
@@ -23,14 +23,14 @@
#include <android/BnOMXBufferSource.h>
#include <media/IOMX.h>
-#include "GraphicBufferSource.h"
+#include "OmxGraphicBufferSource.h"
#include "IOmxNodeWrapper.h"
namespace android {
using ::android::binder::Status;
using ::android::BnGraphicBufferSource;
-using ::android::GraphicBufferSource;
+using ::android::OmxGraphicBufferSource;
using ::android::IOMXNode;
using ::android::sp;
@@ -38,10 +38,10 @@
struct BWOMXBufferSource;
struct BWOmxNodeWrapper;
- sp<GraphicBufferSource> mBase;
+ sp<OmxGraphicBufferSource> mBase;
sp<IOMXBufferSource> mOMXBufferSource;
- BWGraphicBufferSource(sp<GraphicBufferSource> const &base);
+ BWGraphicBufferSource(sp<OmxGraphicBufferSource> const &base);
Status configure(
const sp<IOMXNode>& omxNode, int32_t dataSpace) override;
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/FrameDropper.h b/media/libstagefright/omx/include/media/stagefright/omx/FrameDropper.h
deleted file mode 100644
index c5a6d4b..0000000
--- a/media/libstagefright/omx/include/media/stagefright/omx/FrameDropper.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef FRAME_DROPPER_H_
-
-#define FRAME_DROPPER_H_
-
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-
-#include <media/stagefright/foundation/ABase.h>
-
-namespace android {
-
-struct FrameDropper : public RefBase {
- // No frames will be dropped until a valid max frame rate is set.
- FrameDropper();
-
- // maxFrameRate required to be positive.
- status_t setMaxFrameRate(float maxFrameRate);
-
- // Returns false if max frame rate has not been set via setMaxFrameRate.
- bool shouldDrop(int64_t timeUs);
-
-protected:
- virtual ~FrameDropper();
-
-private:
- int64_t mDesiredMinTimeUs;
- int64_t mMinIntervalUs;
-
- DISALLOW_EVIL_CONSTRUCTORS(FrameDropper);
-};
-
-} // namespace android
-
-#endif // FRAME_DROPPER_H_
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/include/media/stagefright/omx/GraphicBufferSource.h
deleted file mode 100644
index 84fee6f..0000000
--- a/media/libstagefright/omx/include/media/stagefright/omx/GraphicBufferSource.h
+++ /dev/null
@@ -1,485 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef GRAPHIC_BUFFER_SOURCE_H_
-
-#define GRAPHIC_BUFFER_SOURCE_H_
-
-#include <gui/IGraphicBufferProducer.h>
-#include <gui/BufferQueue.h>
-#include <utils/RefBase.h>
-
-#include <media/hardware/VideoAPI.h>
-#include <media/IOMX.h>
-#include <media/OMXFenceParcelable.h>
-#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/foundation/AHandlerReflector.h>
-#include <media/stagefright/foundation/ALooper.h>
-
-#include <android/BnGraphicBufferSource.h>
-#include <android/BnOMXBufferSource.h>
-
-#include "IOmxNodeWrapper.h"
-
-namespace android {
-
-using ::android::binder::Status;
-
-struct FrameDropper;
-
-/*
- * This class is used to feed OMX codecs from a Surface via BufferQueue or
- * HW producer.
- *
- * Instances of the class don't run on a dedicated thread. Instead,
- * various events trigger data movement:
- *
- * - Availability of a new frame of data from the BufferQueue (notified
- * via the onFrameAvailable callback).
- * - The return of a codec buffer (via OnEmptyBufferDone).
- * - Application signaling end-of-stream.
- * - Transition to or from "executing" state.
- *
- * Frames of data (and, perhaps, the end-of-stream indication) can arrive
- * before the codec is in the "executing" state, so we need to queue
- * things up until we're ready to go.
- *
- * The GraphicBufferSource can be configure dynamically to discard frames
- * from the source:
- *
- * - if their timestamp is less than a start time
- * - if the source is suspended or stopped and the suspend/stop-time is reached
- * - if EOS was signaled
- * - if there is no encoder connected to it
- *
- * The source, furthermore, may choose to not encode (drop) frames if:
- *
- * - to throttle the frame rate (keep it under a certain limit)
- *
- * Finally the source may optionally hold onto the last non-discarded frame
- * (even if it was dropped) to reencode it after an interval if no further
- * frames are sent by the producer.
- */
-class GraphicBufferSource : public BufferQueue::ConsumerListener {
-public:
- GraphicBufferSource();
-
- virtual ~GraphicBufferSource();
-
- // We can't throw an exception if the constructor fails, so we just set
- // this and require that the caller test the value.
- status_t initCheck() const {
- return mInitCheck;
- }
-
- // Returns the handle to the producer side of the BufferQueue. Buffers
- // queued on this will be received by GraphicBufferSource.
- sp<IGraphicBufferProducer> getIGraphicBufferProducer() const {
- return mProducer;
- }
-
- // OmxBufferSource interface
- // ------------------------------
-
- // This is called when OMX transitions to OMX_StateExecuting, which means
- // we can start handing it buffers. If we already have buffers of data
- // sitting in the BufferQueue, this will send them to the codec.
- Status onOmxExecuting();
-
- // This is called when OMX transitions to OMX_StateIdle, indicating that
- // the codec is meant to return all buffers back to the client for them
- // to be freed. Do NOT submit any more buffers to the component.
- Status onOmxIdle();
-
- // This is called when OMX transitions to OMX_StateLoaded, indicating that
- // we are shutting down.
- Status onOmxLoaded();
-
- // A "codec buffer", i.e. a buffer that can be used to pass data into
- // the encoder, has been allocated. (This call does not call back into
- // OMXNodeInstance.)
- Status onInputBufferAdded(int32_t bufferId);
-
- // Called from OnEmptyBufferDone. If we have a BQ buffer available,
- // fill it with a new frame of data; otherwise, just mark it as available.
- Status onInputBufferEmptied(int32_t bufferId, int fenceFd);
-
- // IGraphicBufferSource interface
- // ------------------------------
-
- // Configure the buffer source to be used with an OMX node with the default
- // data space.
- status_t configure(
- const sp<IOmxNodeWrapper> &omxNode,
- int32_t dataSpace,
- int32_t bufferCount,
- uint32_t frameWidth,
- uint32_t frameHeight,
- uint32_t consumerUsage);
-
- // This is called after the last input frame has been submitted or buffer
- // timestamp is greater or equal than stopTimeUs. We need to submit an empty
- // buffer with the EOS flag set. If we don't have a codec buffer ready,
- // we just set the mEndOfStream flag.
- status_t signalEndOfInputStream();
-
- // If suspend is true, all incoming buffers (including those currently
- // in the BufferQueue) with timestamp larger than timeUs will be discarded
- // until the suspension is lifted. If suspend is false, all incoming buffers
- // including those currently in the BufferQueue) with timestamp larger than
- // timeUs will be processed. timeUs uses SYSTEM_TIME_MONOTONIC time base.
- status_t setSuspend(bool suspend, int64_t timeUs);
-
- // Specifies the interval after which we requeue the buffer previously
- // queued to the encoder. This is useful in the case of surface flinger
- // providing the input surface if the resulting encoded stream is to
- // be displayed "live". If we were not to push through the extra frame
- // the decoder on the remote end would be unable to decode the latest frame.
- // This API must be called before transitioning the encoder to "executing"
- // state and once this behaviour is specified it cannot be reset.
- status_t setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs);
-
- // Sets the input buffer timestamp offset.
- // When set, the sample's timestamp will be adjusted with the timeOffsetUs.
- status_t setTimeOffsetUs(int64_t timeOffsetUs);
-
- // When set, the max frame rate fed to the encoder will be capped at maxFps.
- status_t setMaxFps(float maxFps);
-
- // Sets the time lapse (or slow motion) parameters.
- // When set, the sample's timestamp will be modified to playback framerate,
- // and capture timestamp will be modified to capture rate.
- status_t setTimeLapseConfig(double fps, double captureFps);
-
- // Sets the start time us (in system time), samples before which should
- // be dropped and not submitted to encoder
- status_t setStartTimeUs(int64_t startTimeUs);
-
- // Sets the stop time us (in system time), samples after which should be dropped
- // and not submitted to encoder. timeUs uses SYSTEM_TIME_MONOTONIC time base.
- status_t setStopTimeUs(int64_t stopTimeUs);
-
- // Gets the stop time offset in us. This is the time offset between latest buffer
- // time and the stopTimeUs. If stop time is not set, INVALID_OPERATION will be returned.
- // If return is OK, *stopTimeOffsetUs will contain the valid offset. Otherwise,
- // *stopTimeOffsetUs will not be modified. Positive stopTimeOffsetUs means buffer time
- // larger than stopTimeUs.
- status_t getStopTimeOffsetUs(int64_t *stopTimeOffsetUs);
-
- // Sets the desired color aspects, e.g. to be used when producer does not specify a dataspace.
- status_t setColorAspects(int32_t aspectsPacked);
-
-protected:
- // BQ::ConsumerListener interface
- // ------------------------------
-
- // BufferQueue::ConsumerListener interface, called when a new frame of
- // data is available. If we're executing and a codec buffer is
- // available, we acquire the buffer, copy the GraphicBuffer reference
- // into the codec buffer, and call Empty[This]Buffer. If we're not yet
- // executing or there's no codec buffer available, we just increment
- // mNumFramesAvailable and return.
- void onFrameAvailable(const BufferItem& item) override;
-
- // BufferQueue::ConsumerListener interface, called when the client has
- // released one or more GraphicBuffers. We clear out the appropriate
- // set of mBufferSlot entries.
- void onBuffersReleased() override;
-
- // BufferQueue::ConsumerListener interface, called when the client has
- // changed the sideband stream. GraphicBufferSource doesn't handle sideband
- // streams so this is a no-op (and should never be called).
- void onSidebandStreamChanged() override;
-
-private:
- // Lock, covers all member variables.
- mutable Mutex mMutex;
-
- // Used to report constructor failure.
- status_t mInitCheck;
-
- // Graphic buffer reference objects
- // --------------------------------
-
- // These are used to keep a shared reference to GraphicBuffers and gralloc handles owned by the
- // GraphicBufferSource as well as to manage the cache slots. Separate references are owned by
- // the buffer cache (controlled by the buffer queue/buffer producer) and the codec.
-
- // When we get a buffer from the producer (BQ) it designates them to be cached into specific
- // slots. Each slot owns a shared reference to the graphic buffer (we track these using
- // CachedBuffer) that is in that slot, but the producer controls the slots.
- struct CachedBuffer;
-
- // When we acquire a buffer, we must release it back to the producer once we (or the codec)
- // no longer uses it (as long as the buffer is still in the cache slot). We use shared
- // AcquiredBuffer instances for this purpose - and we call release buffer when the last
- // reference is relinquished.
- struct AcquiredBuffer;
-
- // We also need to keep some extra metadata (other than the buffer reference) for acquired
- // buffers. These are tracked in VideoBuffer struct.
- struct VideoBuffer {
- std::shared_ptr<AcquiredBuffer> mBuffer;
- nsecs_t mTimestampNs;
- android_dataspace_t mDataspace;
- };
-
- // Cached and aquired buffers
- // --------------------------------
-
- typedef int slot_id;
-
- // Maps a slot to the cached buffer in that slot
- KeyedVector<slot_id, std::shared_ptr<CachedBuffer>> mBufferSlots;
-
- // Queue of buffers acquired in chronological order that are not yet submitted to the codec
- List<VideoBuffer> mAvailableBuffers;
-
- // Number of buffers that have been signaled by the producer that they are available, but
- // we've been unable to acquire them due to our max acquire count
- int32_t mNumAvailableUnacquiredBuffers;
-
- // Number of frames acquired from consumer (debug only)
- // (as in aquireBuffer called, and release needs to be called)
- int32_t mNumOutstandingAcquires;
-
- // Acquire a buffer from the BQ and store it in |item| if successful
- // \return OK on success, or error on failure.
- status_t acquireBuffer_l(VideoBuffer *item);
-
- // Called when a buffer was acquired from the producer
- void onBufferAcquired_l(const VideoBuffer &buffer);
-
- // marks the buffer at the slot no longer cached, and accounts for the outstanding
- // acquire count. Returns true if the slot was populated; otherwise, false.
- bool discardBufferInSlot_l(slot_id i);
-
- // marks the buffer at the slot index no longer cached, and accounts for the outstanding
- // acquire count
- void discardBufferAtSlotIndex_l(ssize_t bsi);
-
- // release all acquired and unacquired available buffers
- // This method will return if it fails to acquire an unacquired available buffer, which will
- // leave mNumAvailableUnacquiredBuffers positive on return.
- void releaseAllAvailableBuffers_l();
-
- // returns whether we have any available buffers (acquired or not-yet-acquired)
- bool haveAvailableBuffers_l() const {
- return !mAvailableBuffers.empty() || mNumAvailableUnacquiredBuffers > 0;
- }
-
- // Codec buffers
- // -------------
-
- // When we queue buffers to the encoder, we must hold the references to the graphic buffers
- // in those buffers - as the producer may free the slots.
-
- typedef int32_t codec_buffer_id;
-
- // set of codec buffer ID-s of buffers available to fill
- List<codec_buffer_id> mFreeCodecBuffers;
-
- // maps codec buffer ID-s to buffer info submitted to the codec. Used to keep a reference for
- // the graphics buffer.
- KeyedVector<codec_buffer_id, std::shared_ptr<AcquiredBuffer>> mSubmittedCodecBuffers;
-
- // Processes the next acquired frame. If there is no available codec buffer, it returns false
- // without any further action.
- //
- // Otherwise, it consumes the next acquired frame and determines if it needs to be discarded or
- // dropped. If neither are needed, it submits it to the codec. It also saves the latest
- // non-dropped frame and submits it for repeat encoding (if this is enabled).
- //
- // \require there must be an acquired frame (i.e. we're in the onFrameAvailable callback,
- // or if we're in codecBufferEmptied and mNumFramesAvailable is nonzero).
- // \require codec must be executing
- // \returns true if acquired (and handled) the next frame. Otherwise, false.
- bool fillCodecBuffer_l();
-
- // Calculates the media timestamp for |item| and on success it submits the buffer to the codec,
- // while also keeping a reference for it in mSubmittedCodecBuffers.
- // Returns UNKNOWN_ERROR if the buffer was not submitted due to buffer timestamp. Otherwise,
- // it returns any submit success or error value returned by the codec.
- status_t submitBuffer_l(const VideoBuffer &item);
-
- // Submits an empty buffer, with the EOS flag set if there is an available codec buffer and
- // sets mEndOfStreamSent flag. Does nothing if there is no codec buffer available.
- void submitEndOfInputStream_l();
-
- // Set to true if we want to send end-of-stream after we run out of available frames from the
- // producer
- bool mEndOfStream;
-
- // Flag that the EOS was submitted to the encoder
- bool mEndOfStreamSent;
-
- // Dataspace for the last frame submitted to the codec
- android_dataspace mLastDataspace;
-
- // Default color aspects for this source
- int32_t mDefaultColorAspectsPacked;
-
- // called when the data space of the input buffer changes
- void onDataspaceChanged_l(android_dataspace dataspace, android_pixel_format pixelFormat);
-
- // Pointer back to the Omx node that created us. We send buffers here.
- sp<IOmxNodeWrapper> mOMXNode;
-
- // Set by omxExecuting() / omxIdling().
- bool mExecuting;
-
- bool mSuspended;
-
- // returns true if this source is unconditionally discarding acquired buffers at the moment
- // regardless of the metadata of those buffers
- bool areWeDiscardingAvailableBuffers_l();
-
- int64_t mLastFrameTimestampUs;
-
- // Our BufferQueue interfaces. mProducer is passed to the producer through
- // getIGraphicBufferProducer, and mConsumer is used internally to retrieve
- // the buffers queued by the producer.
- sp<IGraphicBufferProducer> mProducer;
- sp<IGraphicBufferConsumer> mConsumer;
-
- // The time to stop sending buffers.
- int64_t mStopTimeUs;
-
- struct ActionItem {
- typedef enum {
- PAUSE,
- RESUME,
- STOP
- } ActionType;
- ActionType mAction;
- int64_t mActionTimeUs;
- };
-
- // Maintain last action timestamp to ensure all the action timestamps are
- // monotonically increasing.
- int64_t mLastActionTimeUs;
-
- // An action queue that queue up all the actions sent to GraphicBufferSource.
- // STOP action should only show up at the end of the list as all the actions
- // after a STOP action will be discarded. mActionQueue is protected by mMutex.
- List<ActionItem> mActionQueue;
-
- ////
- friend struct AHandlerReflector<GraphicBufferSource>;
-
- enum {
- kWhatRepeatLastFrame, ///< queue last frame for reencoding
- };
- enum {
- kRepeatLastFrameCount = 10,
- };
-
- int64_t mSkipFramesBeforeNs;
-
- sp<FrameDropper> mFrameDropper;
-
- sp<ALooper> mLooper;
- sp<AHandlerReflector<GraphicBufferSource> > mReflector;
-
- // Repeat last frame feature
- // -------------------------
- // configuration parameter: repeat interval for frame repeating (<0 if repeating is disabled)
- int64_t mFrameRepeatIntervalUs;
-
- // current frame repeat generation - used to cancel a pending frame repeat
- int32_t mRepeatLastFrameGeneration;
-
- // number of times to repeat latest frame (0 = none)
- int32_t mOutstandingFrameRepeatCount;
-
- // The previous buffer should've been repeated but
- // no codec buffer was available at the time.
- bool mFrameRepeatBlockedOnCodecBuffer;
-
- // hold a reference to the last acquired (and not discarded) frame for frame repeating
- VideoBuffer mLatestBuffer;
-
- // queue last frame for reencode after the repeat interval.
- void queueFrameRepeat_l();
-
- // save |item| as the latest buffer and queue it for reencode (repeat)
- void setLatestBuffer_l(const VideoBuffer &item);
-
- // submit last frame to encoder and queue it for reencode
- // \return true if buffer was submitted, false if it wasn't (e.g. source is suspended, there
- // is no available codec buffer)
- bool repeatLatestBuffer_l();
-
- // Time lapse / slow motion configuration
- // --------------------------------------
-
- // desired frame rate for encoding - value <= 0 if undefined
- double mFps;
-
- // desired frame rate for capture - value <= 0 if undefined
- double mCaptureFps;
-
- // Time lapse mode is enabled if the capture frame rate is defined and it is
- // smaller than half the encoding frame rate (if defined). In this mode,
- // frames that come in between the capture interval (the reciprocal of the
- // capture frame rate) are dropped and the encoding timestamp is adjusted to
- // match the desired encoding frame rate.
- //
- // Slow motion mode is enabled if both encoding and capture frame rates are
- // defined and the encoding frame rate is less than half the capture frame
- // rate. In this mode, the source is expected to produce frames with an even
- // timestamp interval (after rounding) with the configured capture fps. The
- // first source timestamp is used as the source base time. Afterwards, the
- // timestamp of each source frame is snapped to the nearest expected capture
- // timestamp and scaled to match the configured encoding frame rate.
-
- // These modes must be enabled before using this source.
-
- // adjusted capture timestamp of the base frame
- int64_t mBaseCaptureUs;
-
- // adjusted encoding timestamp of the base frame
- int64_t mBaseFrameUs;
-
- // number of frames from the base time
- int64_t mFrameCount;
-
- // adjusted capture timestamp for previous frame (negative if there were
- // none)
- int64_t mPrevCaptureUs;
-
- // adjusted media timestamp for previous frame (negative if there were none)
- int64_t mPrevFrameUs;
-
- // desired offset between media time and capture time
- int64_t mInputBufferTimeOffsetUs;
-
- // Calculates and outputs the timestamp to use for a buffer with a specific buffer timestamp
- // |bufferTimestampNs|. Returns false on failure (buffer too close or timestamp is moving
- // backwards). Otherwise, stores the media timestamp in |*codecTimeUs| and returns true.
- //
- // This method takes into account the start time offset and any time lapse or slow motion time
- // adjustment requests.
- bool calculateCodecTimestamp_l(nsecs_t bufferTimeNs, int64_t *codecTimeUs);
-
- void onMessageReceived(const sp<AMessage> &msg);
-
- DISALLOW_EVIL_CONSTRUCTORS(GraphicBufferSource);
-};
-
-} // namespace android
-
-#endif // GRAPHIC_BUFFER_SOURCE_H_
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/OMX.h b/media/libstagefright/omx/include/media/stagefright/omx/OMX.h
deleted file mode 100644
index 594b4c0..0000000
--- a/media/libstagefright/omx/include/media/stagefright/omx/OMX.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_OMX_H_
-#define ANDROID_OMX_H_
-
-#include <media/IOMX.h>
-#include <utils/threads.h>
-#include <utils/KeyedVector.h>
-#include <media/stagefright/xmlparser/MediaCodecsXmlParser.h>
-#include "OmxNodeOwner.h"
-
-namespace android {
-
-struct OMXMaster;
-struct OMXNodeInstance;
-
-class OMX : public BnOMX,
- public OmxNodeOwner,
- public IBinder::DeathRecipient {
-public:
- OMX();
-
- virtual status_t listNodes(List<ComponentInfo> *list);
-
- virtual status_t allocateNode(
- const char *name, const sp<IOMXObserver> &observer,
- sp<IOMXNode> *omxNode);
-
- virtual status_t createInputSurface(
- sp<IGraphicBufferProducer> *bufferProducer,
- sp<IGraphicBufferSource> *bufferSource);
-
- virtual void binderDied(const wp<IBinder> &the_late_who);
-
- virtual status_t freeNode(const sp<OMXNodeInstance>& instance);
-
-protected:
- virtual ~OMX();
-
-private:
- Mutex mLock;
- OMXMaster *mMaster;
- MediaCodecsXmlParser mParser;
-
- KeyedVector<wp<IBinder>, sp<OMXNodeInstance> > mLiveNodes;
-
- OMX(const OMX &);
- OMX &operator=(const OMX &);
-};
-
-} // namespace android
-
-#endif // ANDROID_OMX_H_
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/OMXNodeInstance.h b/media/libstagefright/omx/include/media/stagefright/omx/OMXNodeInstance.h
index 1065ca5..a761ef6 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/OMXNodeInstance.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/OMXNodeInstance.h
@@ -25,9 +25,9 @@
#include <utils/threads.h>
#include <utils/KeyedVector.h>
#include <utils/SortedVector.h>
-#include "OmxNodeOwner.h"
#include <android/hidl/memory/1.0/IMemory.h>
+#include <media/stagefright/omx/1.0/Omx.h>
namespace android {
class GraphicBuffer;
@@ -35,11 +35,12 @@
class IOMXObserver;
struct OMXMaster;
class OMXBuffer;
-typedef hidl::memory::V1_0::IMemory IHidlMemory;
+using IHidlMemory = hidl::memory::V1_0::IMemory;
+using hardware::media::omx::V1_0::implementation::Omx;
struct OMXNodeInstance : public BnOMXNode {
OMXNodeInstance(
- OmxNodeOwner *owner, const sp<IOMXObserver> &observer, const char *name);
+ Omx *owner, const sp<IOMXObserver> &observer, const char *name);
void setHandle(OMX_HANDLETYPE handle);
@@ -122,7 +123,7 @@
Mutex mLock;
- OmxNodeOwner *mOwner;
+ Omx *mOwner;
OMX_HANDLETYPE mHandle;
sp<IOMXObserver> mObserver;
sp<CallbackDispatcher> mDispatcher;
@@ -287,6 +288,21 @@
bool handleDataSpaceChanged(omx_message &msg);
+ /*
+ * Set the max pts gap between frames.
+ *
+ * When the pts gap number is positive, it indicates the maximum pts gap between
+ * two adjacent frames. If two frames are further apart, timestamps will be modified
+ * to meet this requirement before the frames are sent to the encoder.
+ *
+ * When the pts gap number is negative, it indicates that the original timestamp
+ * should always be modified such that all adjacent frames have the same pts gap
+ * equal to the absolute value of the passed in number. This option is typically
+ * used when client wants to make sure all frames are captured even when source
+ * potentially sends out-of-order frames.
+ *
+ * Timestamps will be restored to the original when the output is sent back to the client.
+ */
status_t setMaxPtsGapUs(const void *params, size_t size);
int64_t getCodecTimestamp(OMX_TICKS timestamp);
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/OMXStore.h b/media/libstagefright/omx/include/media/stagefright/omx/OMXStore.h
deleted file mode 100644
index e00d713..0000000
--- a/media/libstagefright/omx/include/media/stagefright/omx/OMXStore.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_OMXSTORE_H_
-#define ANDROID_OMXSTORE_H_
-
-#include <media/IOMXStore.h>
-#include <media/IOMX.h>
-#include <media/stagefright/xmlparser/MediaCodecsXmlParser.h>
-
-#include <vector>
-#include <string>
-
-namespace android {
-
-class OMXStore : public BnOMXStore {
-public:
- OMXStore(
- const char* owner = "default",
- const char* const* searchDirs
- = MediaCodecsXmlParser::defaultSearchDirs,
- const char* mainXmlName
- = MediaCodecsXmlParser::defaultMainXmlName,
- const char* performanceXmlName
- = MediaCodecsXmlParser::defaultPerformanceXmlName,
- const char* profilingResultsXmlPath
- = MediaCodecsXmlParser::defaultProfilingResultsXmlPath);
-
- status_t listServiceAttributes(
- std::vector<Attribute>* attributes) override;
-
- status_t getNodePrefix(std::string* prefix) override;
-
- status_t listRoles(std::vector<RoleInfo>* roleList) override;
-
- status_t getOmx(const std::string& name, sp<IOMX>* omx) override;
-
- ~OMXStore() override;
-
-protected:
- status_t mParsingStatus;
- std::string mPrefix;
- std::vector<Attribute> mServiceAttributeList;
- std::vector<RoleInfo> mRoleList;
-};
-
-} // namespace android
-
-#endif // ANDROID_OMXSTORE_H_
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/OmxGraphicBufferSource.h b/media/libstagefright/omx/include/media/stagefright/omx/OmxGraphicBufferSource.h
new file mode 100644
index 0000000..518e0cb
--- /dev/null
+++ b/media/libstagefright/omx/include/media/stagefright/omx/OmxGraphicBufferSource.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OMX_GRAPHIC_BUFFER_SOURCE_H_
+
+#define OMX_GRAPHIC_BUFFER_SOURCE_H_
+
+#include <media/stagefright/bqhelper/GraphicBufferSource.h>
+#include <media/stagefright/foundation/ABase.h>
+
+#include <android/BnGraphicBufferSource.h>
+#include <android/BnOMXBufferSource.h>
+
+#include "IOmxNodeWrapper.h"
+
+namespace android {
+
+using ::android::binder::Status;
+
+/*
+ * This class is used to feed OMX codecs from a Surface via BufferQueue or
+ * HW producer.
+ *
+ * See media/stagefright/bqhelper/GraphicBufferSource.h for documentation.
+ */
+class OmxGraphicBufferSource : public GraphicBufferSource {
+public:
+ OmxGraphicBufferSource() = default;
+ virtual ~OmxGraphicBufferSource() = default;
+
+ // OmxBufferSource interface
+ // ------------------------------
+
+ // This is called when OMX transitions to OMX_StateExecuting, which means
+ // we can start handing it buffers. If we already have buffers of data
+ // sitting in the BufferQueue, this will send them to the codec.
+ Status onOmxExecuting();
+
+ // This is called when OMX transitions to OMX_StateIdle, indicating that
+ // the codec is meant to return all buffers back to the client for them
+ // to be freed. Do NOT submit any more buffers to the component.
+ Status onOmxIdle();
+
+ // This is called when OMX transitions to OMX_StateLoaded, indicating that
+ // we are shutting down.
+ Status onOmxLoaded();
+
+ // Rest of the interface in GraphicBufferSource.
+
+ // IGraphicBufferSource interface
+ // ------------------------------
+
+ // Configure the buffer source to be used with an OMX node with the default
+ // data space.
+ status_t configure(
+ const sp<IOmxNodeWrapper> &omxNode,
+ int32_t dataSpace,
+ int32_t bufferCount,
+ uint32_t frameWidth,
+ uint32_t frameHeight,
+ uint32_t consumerUsage);
+
+ // Rest of the interface in GraphicBufferSource.
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(OmxGraphicBufferSource);
+};
+
+} // namespace android
+
+#endif // OMX_GRAPHIC_BUFFER_SOURCE_H_
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
index c9fd745..56fc691 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
@@ -23,6 +23,7 @@
#include <media/stagefright/foundation/AHandlerReflector.h>
#include <media/stagefright/foundation/ColorUtils.h>
#include <media/IOMX.h>
+#include <media/hardware/HardwareAPI.h>
#include <utils/RefBase.h>
#include <utils/threads.h>
@@ -46,6 +47,7 @@
protected:
enum {
kDescribeColorAspectsIndex = kPrepareForAdaptivePlaybackIndex + 1,
+ kDescribeHdrStaticInfoIndex = kPrepareForAdaptivePlaybackIndex + 2,
};
enum {
@@ -76,6 +78,8 @@
virtual int getColorAspectPreference();
+ virtual bool supportDescribeHdrStaticInfo();
+
// This function sets both minimum buffer count and actual buffer count of
// input port to be |numInputBuffers|. It will also set both minimum buffer
// count and actual buffer count of output port to be |numOutputBuffers|.
@@ -113,7 +117,9 @@
// It will trigger OMX_EventPortSettingsChanged event if necessary.
void handlePortSettingsChange(
bool *portWillReset, uint32_t width, uint32_t height,
- CropSettingsMode cropSettingsMode = kCropUnSet, bool fakeStride = false);
+ OMX_COLOR_FORMATTYPE outputFormat = OMX_COLOR_FormatYUV420Planar,
+ CropSettingsMode cropSettingsMode = kCropUnSet,
+ bool fakeStride = false);
void copyYV12FrameToOutputBuffer(
uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
@@ -129,7 +135,8 @@
uint32_t mAdaptiveMaxWidth, mAdaptiveMaxHeight;
uint32_t mWidth, mHeight;
uint32_t mCropLeft, mCropTop, mCropWidth, mCropHeight;
-
+ OMX_COLOR_FORMATTYPE mOutputFormat;
+ HDRStaticInfo mHdrStaticInfo;
enum {
NONE,
AWAITING_DISABLED,
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
index db5496a..2d6f31b 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoEncoderOMXComponent.h
@@ -67,6 +67,8 @@
virtual OMX_ERRORTYPE getExtensionIndex(const char *name, OMX_INDEXTYPE *index);
+ OMX_ERRORTYPE validateInputBuffer(const OMX_BUFFERHEADERTYPE *inputBufferHeader);
+
enum {
kInputPortIndex = 0,
kOutputPortIndex = 1,
diff --git a/media/libstagefright/omx/tests/Android.bp b/media/libstagefright/omx/tests/Android.bp
index 8bcb99e..3b521ab 100644
--- a/media/libstagefright/omx/tests/Android.bp
+++ b/media/libstagefright/omx/tests/Android.bp
@@ -8,6 +8,8 @@
"libstagefright",
"libbinder",
"libmedia",
+ "libmedia_omx",
+ "libmediaextractor",
"libutils",
"liblog",
"libstagefright_foundation",
@@ -32,21 +34,3 @@
compile_multilib: "32",
}
-
-cc_test {
- name: "FrameDropper_test",
-
- srcs: ["FrameDropper_test.cpp"],
-
- shared_libs: [
- "libstagefright_omx",
- "libutils",
- ],
-
- include_dirs: ["frameworks/av/media/libstagefright/omx"],
-
- cflags: [
- "-Werror",
- "-Wall",
- ],
-}
diff --git a/media/libstagefright/omx/tests/FrameDropper_test.cpp b/media/libstagefright/omx/tests/FrameDropper_test.cpp
deleted file mode 100644
index a925da6..0000000
--- a/media/libstagefright/omx/tests/FrameDropper_test.cpp
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "FrameDropper_test"
-#include <utils/Log.h>
-
-#include <gtest/gtest.h>
-
-#include <media/stagefright/omx/FrameDropper.h>
-#include <media/stagefright/foundation/ADebug.h>
-
-namespace android {
-
-struct TestFrame {
- int64_t timeUs;
- bool shouldDrop;
-};
-
-static const TestFrame testFrames20Fps[] = {
- {1000000, false}, {1050000, false}, {1100000, false}, {1150000, false},
- {1200000, false}, {1250000, false}, {1300000, false}, {1350000, false},
- {1400000, false}, {1450000, false}, {1500000, false}, {1550000, false},
- {1600000, false}, {1650000, false}, {1700000, false}, {1750000, false},
- {1800000, false}, {1850000, false}, {1900000, false}, {1950000, false},
-};
-
-static const TestFrame testFrames30Fps[] = {
- {1000000, false}, {1033333, false}, {1066667, false}, {1100000, false},
- {1133333, false}, {1166667, false}, {1200000, false}, {1233333, false},
- {1266667, false}, {1300000, false}, {1333333, false}, {1366667, false},
- {1400000, false}, {1433333, false}, {1466667, false}, {1500000, false},
- {1533333, false}, {1566667, false}, {1600000, false}, {1633333, false},
-};
-
-static const TestFrame testFrames40Fps[] = {
- {1000000, false}, {1025000, true}, {1050000, false}, {1075000, false},
- {1100000, false}, {1125000, true}, {1150000, false}, {1175000, false},
- {1200000, false}, {1225000, true}, {1250000, false}, {1275000, false},
- {1300000, false}, {1325000, true}, {1350000, false}, {1375000, false},
- {1400000, false}, {1425000, true}, {1450000, false}, {1475000, false},
-};
-
-static const TestFrame testFrames60Fps[] = {
- {1000000, false}, {1016667, true}, {1033333, false}, {1050000, true},
- {1066667, false}, {1083333, true}, {1100000, false}, {1116667, true},
- {1133333, false}, {1150000, true}, {1166667, false}, {1183333, true},
- {1200000, false}, {1216667, true}, {1233333, false}, {1250000, true},
- {1266667, false}, {1283333, true}, {1300000, false}, {1316667, true},
-};
-
-static const TestFrame testFramesVariableFps[] = {
- // 40fps
- {1000000, false}, {1025000, true}, {1050000, false}, {1075000, false},
- {1100000, false}, {1125000, true}, {1150000, false}, {1175000, false},
- {1200000, false}, {1225000, true}, {1250000, false}, {1275000, false},
- {1300000, false}, {1325000, true}, {1350000, false}, {1375000, false},
- {1400000, false}, {1425000, true}, {1450000, false}, {1475000, false},
- // a timestamp jump plus switch to 20fps
- {2000000, false}, {2050000, false}, {2100000, false}, {2150000, false},
- {2200000, false}, {2250000, false}, {2300000, false}, {2350000, false},
- {2400000, false}, {2450000, false}, {2500000, false}, {2550000, false},
- {2600000, false}, {2650000, false}, {2700000, false}, {2750000, false},
- {2800000, false}, {2850000, false}, {2900000, false}, {2950000, false},
- // 60fps
- {2966667, false}, {2983333, true}, {3000000, false}, {3016667, true},
- {3033333, false}, {3050000, true}, {3066667, false}, {3083333, true},
- {3100000, false}, {3116667, true}, {3133333, false}, {3150000, true},
- {3166667, false}, {3183333, true}, {3200000, false}, {3216667, true},
- {3233333, false}, {3250000, true}, {3266667, false}, {3283333, true},
-};
-
-static const int kMaxTestJitterUs = 2000;
-// return one of 1000, 0, -1000 as jitter.
-static int GetJitter(size_t i) {
- return (1 - (i % 3)) * (kMaxTestJitterUs / 2);
-}
-
-class FrameDropperTest : public ::testing::Test {
-public:
- FrameDropperTest() : mFrameDropper(new FrameDropper()) {
- EXPECT_EQ(OK, mFrameDropper->setMaxFrameRate(30.0));
- }
-
-protected:
- void RunTest(const TestFrame* frames, size_t size) {
- for (size_t i = 0; i < size; ++i) {
- int jitter = GetJitter(i);
- int64_t testTimeUs = frames[i].timeUs + jitter;
- printf("time %lld, testTime %lld, jitter %d\n",
- (long long)frames[i].timeUs, (long long)testTimeUs, jitter);
- EXPECT_EQ(frames[i].shouldDrop, mFrameDropper->shouldDrop(testTimeUs));
- }
- }
-
- sp<FrameDropper> mFrameDropper;
-};
-
-TEST_F(FrameDropperTest, TestInvalidMaxFrameRate) {
- EXPECT_NE(OK, mFrameDropper->setMaxFrameRate(-1.0));
- EXPECT_NE(OK, mFrameDropper->setMaxFrameRate(0));
-}
-
-TEST_F(FrameDropperTest, Test20Fps) {
- RunTest(testFrames20Fps, ARRAY_SIZE(testFrames20Fps));
-}
-
-TEST_F(FrameDropperTest, Test30Fps) {
- RunTest(testFrames30Fps, ARRAY_SIZE(testFrames30Fps));
-}
-
-TEST_F(FrameDropperTest, Test40Fps) {
- RunTest(testFrames40Fps, ARRAY_SIZE(testFrames40Fps));
-}
-
-TEST_F(FrameDropperTest, Test60Fps) {
- RunTest(testFrames60Fps, ARRAY_SIZE(testFrames60Fps));
-}
-
-TEST_F(FrameDropperTest, TestVariableFps) {
- RunTest(testFramesVariableFps, ARRAY_SIZE(testFramesVariableFps));
-}
-
-} // namespace android
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 3266439..895a4ce 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -25,20 +25,22 @@
#include <binder/ProcessState.h>
#include <binder/IServiceManager.h>
-#include <binder/MemoryDealer.h>
+#include <cutils/properties.h>
+#include <media/DataSource.h>
#include <media/IMediaHTTPService.h>
-#include <media/IMediaCodecService.h>
+#include <media/MediaExtractor.h>
+#include <media/MediaSource.h>
+#include <media/OMXBuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/DataSource.h>
+#include <media/stagefright/DataSourceFactory.h>
+#include <media/stagefright/InterfaceUtils.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaExtractorFactory.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/SimpleDecodingSource.h>
-#include <media/OMXBuffer.h>
#include <android/hardware/media/omx/1.0/IOmx.h>
#include <media/omx/1.0/WOmx.h>
#include <system/window.h>
@@ -67,7 +69,7 @@
/////////////////////////////////////////////////////////////////////
Harness::Harness()
- : mInitCheck(NO_INIT), mUseTreble(false) {
+ : mInitCheck(NO_INIT) {
mInitCheck = initOMX();
}
@@ -79,21 +81,12 @@
}
status_t Harness::initOMX() {
- if (property_get_bool("persist.media.treble_omx", true)) {
- using namespace ::android::hardware::media::omx::V1_0;
- sp<IOmx> tOmx = IOmx::getService();
- if (tOmx == nullptr) {
- return NO_INIT;
- }
- mOMX = new utils::LWOmx(tOmx);
- mUseTreble = true;
- } else {
- sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> binder = sm->getService(String16("media.codec"));
- sp<IMediaCodecService> service = interface_cast<IMediaCodecService>(binder);
- mOMX = service->getOMX();
- mUseTreble = false;
+ using namespace ::android::hardware::media::omx::V1_0;
+ sp<IOmx> tOmx = IOmx::getService();
+ if (tOmx == nullptr) {
+ return NO_INIT;
}
+ mOMX = new utils::LWOmx(tOmx);
return mOMX != 0 ? OK : NO_INIT;
}
@@ -221,25 +214,19 @@
for (OMX_U32 i = 0; i < def.nBufferCountActual; ++i) {
Buffer buffer;
buffer.mFlags = 0;
- if (mUseTreble) {
- bool success;
- auto transStatus = mAllocator->allocate(def.nBufferSize,
- [&success, &buffer](
- bool s,
- hidl_memory const& m) {
- success = s;
- buffer.mHidlMemory = m;
- });
- EXPECT(transStatus.isOk(),
- "Cannot call allocator");
- EXPECT(success,
- "Cannot allocate memory");
- err = mOMXNode->useBuffer(portIndex, buffer.mHidlMemory, &buffer.mID);
- } else {
- buffer.mMemory = mDealer->allocate(def.nBufferSize);
- CHECK(buffer.mMemory != NULL);
- err = mOMXNode->useBuffer(portIndex, buffer.mMemory, &buffer.mID);
- }
+ bool success;
+ auto transStatus = mAllocator->allocate(def.nBufferSize,
+ [&success, &buffer](
+ bool s,
+ hidl_memory const& m) {
+ success = s;
+ buffer.mHidlMemory = m;
+ });
+ EXPECT(transStatus.isOk(),
+ "Cannot call allocator");
+ EXPECT(success,
+ "Cannot allocate memory");
+ err = mOMXNode->useBuffer(portIndex, buffer.mHidlMemory, &buffer.mID);
EXPECT_SUCCESS(err, "useBuffer");
@@ -291,13 +278,13 @@
static sp<IMediaExtractor> CreateExtractorFromURI(const char *uri) {
sp<DataSource> source =
- DataSource::CreateFromURI(NULL /* httpService */, uri);
+ DataSourceFactory::CreateFromURI(NULL /* httpService */, uri);
if (source == NULL) {
return NULL;
}
- return MediaExtractor::Create(source);
+ return MediaExtractorFactory::Create(source);
}
status_t Harness::testStateTransitions(
@@ -308,13 +295,11 @@
return OK;
}
- if (mUseTreble) {
- mAllocator = IAllocator::getService("ashmem");
- EXPECT(mAllocator != nullptr,
- "Cannot obtain hidl AshmemAllocator");
- } else {
- mDealer = new MemoryDealer(16 * 1024 * 1024, "OMXHarness");
- }
+ mAllocator = IAllocator::getService("ashmem");
+ EXPECT(mAllocator != nullptr,
+ "Cannot obtain hidl AshmemAllocator");
+ // TODO: When Treble has MemoryHeap/MemoryDealer, we should specify the heap
+ // size to be 16 * 1024 * 1024.
sp<CodecObserver> observer = new CodecObserver(this, ++mCurGeneration);
@@ -543,7 +528,7 @@
return NULL;
}
-static sp<IMediaSource> CreateSourceForMime(const char *mime) {
+static sp<MediaSource> CreateSourceForMime(const char *mime) {
const char *url = GetURLForMime(mime);
if (url == NULL) {
@@ -564,7 +549,7 @@
CHECK(meta->findCString(kKeyMIMEType, &trackMime));
if (!strcasecmp(mime, trackMime)) {
- return extractor->getTrack(i);
+ return CreateMediaSourceFromIMediaSource(extractor->getTrack(i));
}
}
@@ -610,7 +595,7 @@
return OK;
}
- sp<IMediaSource> source = CreateSourceForMime(mime);
+ sp<MediaSource> source = CreateSourceForMime(mime);
if (source == NULL) {
printf(" * Unable to open test content for type '%s', "
@@ -620,14 +605,14 @@
return OK;
}
- sp<IMediaSource> seekSource = CreateSourceForMime(mime);
+ sp<MediaSource> seekSource = CreateSourceForMime(mime);
if (source == NULL || seekSource == NULL) {
return UNKNOWN_ERROR;
}
CHECK_EQ(seekSource->start(), (status_t)OK);
- sp<IMediaSource> codec = SimpleDecodingSource::Create(
+ sp<MediaSource> codec = SimpleDecodingSource::Create(
source, 0 /* flags */, NULL /* nativeWindow */, componentName);
CHECK(codec != NULL);
@@ -673,7 +658,7 @@
requestedSeekTimeUs, requestedSeekTimeUs / 1E6);
}
- MediaBuffer *buffer = NULL;
+ MediaBufferBase *buffer = NULL;
options.setSeekTo(
requestedSeekTimeUs, MediaSource::ReadOptions::SEEK_NEXT_SYNC);
@@ -682,7 +667,7 @@
actualSeekTimeUs = -1;
} else {
CHECK(buffer != NULL);
- CHECK(buffer->meta_data()->findInt64(kKeyTime, &actualSeekTimeUs));
+ CHECK(buffer->meta_data().findInt64(kKeyTime, &actualSeekTimeUs));
CHECK(actualSeekTimeUs >= 0);
buffer->release();
@@ -694,7 +679,7 @@
}
status_t err;
- MediaBuffer *buffer;
+ MediaBufferBase *buffer;
for (;;) {
err = codec->read(&buffer, &options);
options.clearSeekTo();
@@ -743,7 +728,7 @@
CHECK(buffer != NULL);
int64_t bufferTimeUs;
- CHECK(buffer->meta_data()->findInt64(kKeyTime, &bufferTimeUs));
+ CHECK(buffer->meta_data().findInt64(kKeyTime, &bufferTimeUs));
if (!CloseEnough(bufferTimeUs, actualSeekTimeUs)) {
printf("\n * Attempted seeking to %" PRId64 " us (%.2f secs)",
requestedSeekTimeUs, requestedSeekTimeUs / 1E6);
diff --git a/media/libstagefright/omx/tests/OMXHarness.h b/media/libstagefright/omx/tests/OMXHarness.h
index 4fc0f79..dca787c 100644
--- a/media/libstagefright/omx/tests/OMXHarness.h
+++ b/media/libstagefright/omx/tests/OMXHarness.h
@@ -93,8 +93,6 @@
Condition mMessageAddedCondition;
int32_t mLastMsgGeneration;
int32_t mCurGeneration;
- bool mUseTreble;
- sp<MemoryDealer> mDealer;
sp<IAllocator> mAllocator;
status_t initOMX();
diff --git a/media/libstagefright/rtsp/AH263Assembler.cpp b/media/libstagefright/rtsp/AH263Assembler.cpp
index 75cd911..3436e95 100644
--- a/media/libstagefright/rtsp/AH263Assembler.cpp
+++ b/media/libstagefright/rtsp/AH263Assembler.cpp
@@ -25,7 +25,7 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/Utils.h>
+#include <media/stagefright/foundation/ByteUtils.h>
namespace android {
diff --git a/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp b/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp
index dca5c89..0988774 100644
--- a/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp
@@ -26,10 +26,10 @@
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ByteUtils.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
namespace android {
diff --git a/media/libstagefright/rtsp/AMPEG2TSAssembler.h b/media/libstagefright/rtsp/AMPEG2TSAssembler.h
index f39c2b5..c987b5b 100644
--- a/media/libstagefright/rtsp/AMPEG2TSAssembler.h
+++ b/media/libstagefright/rtsp/AMPEG2TSAssembler.h
@@ -24,7 +24,6 @@
struct AMessage;
struct AString;
-class MetaData;
struct AMPEG2TSAssembler : public ARTPAssembler {
AMPEG2TSAssembler(
diff --git a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
index 156004c..1e434cb 100644
--- a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
@@ -27,8 +27,8 @@
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ByteUtils.h>
#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/Utils.h>
#include <ctype.h>
#include <stdint.h>
diff --git a/media/libstagefright/rtsp/APacketSource.cpp b/media/libstagefright/rtsp/APacketSource.cpp
index 8ba9e02..201a5df 100644
--- a/media/libstagefright/rtsp/APacketSource.cpp
+++ b/media/libstagefright/rtsp/APacketSource.cpp
@@ -23,8 +23,6 @@
#include "ARawAudioAssembler.h"
#include "ASessionDescription.h"
-#include "include/avc_utils.h"
-
#include <ctype.h>
#include <media/stagefright/foundation/ABitReader.h>
@@ -32,6 +30,7 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/foundation/base64.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/MediaDefs.h>
@@ -220,7 +219,7 @@
return csd;
}
-sp<ABuffer> MakeAACCodecSpecificData(const char *params) {
+static sp<ABuffer> MakeAACCodecSpecificData(const char *params) {
AString val;
CHECK(GetAttribute(params, "config", &val));
@@ -258,7 +257,7 @@
}
// From mpeg4-generic configuration data.
-sp<ABuffer> MakeAACCodecSpecificData2(const char *params) {
+static sp<ABuffer> MakeAACCodecSpecificData2(const char *params) {
AString val;
unsigned long objectType;
if (GetAttribute(params, "objectType", &val)) {
diff --git a/media/libstagefright/rtsp/ARTPWriter.cpp b/media/libstagefright/rtsp/ARTPWriter.cpp
index 1f6b6f7..0667df1 100644
--- a/media/libstagefright/rtsp/ARTPWriter.cpp
+++ b/media/libstagefright/rtsp/ARTPWriter.cpp
@@ -22,13 +22,13 @@
#include <fcntl.h>
+#include <media/MediaSource.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
#include <utils/ByteOrder.h>
@@ -104,7 +104,7 @@
mFd = -1;
}
-status_t ARTPWriter::addSource(const sp<IMediaSource> &source) {
+status_t ARTPWriter::addSource(const sp<MediaSource> &source) {
mSource = source;
return OK;
}
@@ -173,7 +173,7 @@
return OK;
}
-static void StripStartcode(MediaBuffer *buffer) {
+static void StripStartcode(MediaBufferBase *buffer) {
if (buffer->range_length() < 4) {
return;
}
@@ -195,7 +195,7 @@
#if 0
if (mMode == H264) {
- MediaBuffer *buffer;
+ MediaBufferBase *buffer;
CHECK_EQ(mSource->read(&buffer), (status_t)OK);
StripStartcode(buffer);
@@ -265,7 +265,7 @@
}
void ARTPWriter::onRead(const sp<AMessage> &msg) {
- MediaBuffer *mediaBuf;
+ MediaBufferBase *mediaBuf;
status_t err = mSource->read(&mediaBuf);
if (err != OK) {
@@ -523,7 +523,7 @@
ALOGI("%s", sdp.c_str());
}
-void ARTPWriter::makeH264SPropParamSets(MediaBuffer *buffer) {
+void ARTPWriter::makeH264SPropParamSets(MediaBufferBase *buffer) {
static const char kStartCode[] = "\x00\x00\x00\x01";
const uint8_t *data =
@@ -567,12 +567,12 @@
send(buffer, true /* isRTCP */);
}
-void ARTPWriter::sendAVCData(MediaBuffer *mediaBuf) {
+void ARTPWriter::sendAVCData(MediaBufferBase *mediaBuf) {
// 12 bytes RTP header + 2 bytes for the FU-indicator and FU-header.
CHECK_GE(kMaxPacketSize, 12u + 2u);
int64_t timeUs;
- CHECK(mediaBuf->meta_data()->findInt64(kKeyTime, &timeUs));
+ CHECK(mediaBuf->meta_data().findInt64(kKeyTime, &timeUs));
uint32_t rtpTime = mRTPTimeBase + (timeUs * 9 / 100ll);
@@ -663,11 +663,11 @@
mLastNTPTime = GetNowNTP();
}
-void ARTPWriter::sendH263Data(MediaBuffer *mediaBuf) {
+void ARTPWriter::sendH263Data(MediaBufferBase *mediaBuf) {
CHECK_GE(kMaxPacketSize, 12u + 2u);
int64_t timeUs;
- CHECK(mediaBuf->meta_data()->findInt64(kKeyTime, &timeUs));
+ CHECK(mediaBuf->meta_data().findInt64(kKeyTime, &timeUs));
uint32_t rtpTime = mRTPTimeBase + (timeUs * 9 / 100ll);
@@ -741,7 +741,7 @@
return frameSize;
}
-void ARTPWriter::sendAMRData(MediaBuffer *mediaBuf) {
+void ARTPWriter::sendAMRData(MediaBufferBase *mediaBuf) {
const uint8_t *mediaData =
(const uint8_t *)mediaBuf->data() + mediaBuf->range_offset();
@@ -752,7 +752,7 @@
const bool isWide = (mMode == AMR_WB);
int64_t timeUs;
- CHECK(mediaBuf->meta_data()->findInt64(kKeyTime, &timeUs));
+ CHECK(mediaBuf->meta_data().findInt64(kKeyTime, &timeUs));
uint32_t rtpTime = mRTPTimeBase + (timeUs / (isWide ? 250 : 125));
// hexdump(mediaData, mediaLength);
diff --git a/media/libstagefright/rtsp/ARTPWriter.h b/media/libstagefright/rtsp/ARTPWriter.h
index 3c7042e..2f13486 100644
--- a/media/libstagefright/rtsp/ARTPWriter.h
+++ b/media/libstagefright/rtsp/ARTPWriter.h
@@ -37,7 +37,7 @@
struct ARTPWriter : public MediaWriter {
explicit ARTPWriter(int fd);
- virtual status_t addSource(const sp<IMediaSource> &source);
+ virtual status_t addSource(const sp<MediaSource> &source);
virtual bool reachedEOS();
virtual status_t start(MetaData *params);
virtual status_t stop();
@@ -72,7 +72,7 @@
int mRTCPFd;
#endif
- sp<IMediaSource> mSource;
+ sp<MediaSource> mSource;
sp<ALooper> mLooper;
sp<AHandlerReflector<ARTPWriter> > mReflector;
@@ -110,13 +110,13 @@
void addSR(const sp<ABuffer> &buffer);
void addSDES(const sp<ABuffer> &buffer);
- void makeH264SPropParamSets(MediaBuffer *buffer);
+ void makeH264SPropParamSets(MediaBufferBase *buffer);
void dumpSessionDesc();
void sendBye();
- void sendAVCData(MediaBuffer *mediaBuf);
- void sendH263Data(MediaBuffer *mediaBuf);
- void sendAMRData(MediaBuffer *mediaBuf);
+ void sendAVCData(MediaBufferBase *mediaBuf);
+ void sendH263Data(MediaBufferBase *mediaBuf);
+ void sendAMRData(MediaBufferBase *mediaBuf);
void send(const sp<ABuffer> &buffer, bool isRTCP);
diff --git a/media/libstagefright/rtsp/SDPLoader.cpp b/media/libstagefright/rtsp/SDPLoader.cpp
index 0f46c83..d459cbd 100644
--- a/media/libstagefright/rtsp/SDPLoader.cpp
+++ b/media/libstagefright/rtsp/SDPLoader.cpp
@@ -22,8 +22,8 @@
#include "ASessionDescription.h"
-#include <media/IMediaHTTPConnection.h>
-#include <media/IMediaHTTPService.h>
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
#include <media/stagefright/MediaHTTP.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -36,7 +36,7 @@
SDPLoader::SDPLoader(
const sp<AMessage> ¬ify,
uint32_t flags,
- const sp<IMediaHTTPService> &httpService)
+ const sp<MediaHTTPService> &httpService)
: mNotify(notify),
mFlags(flags),
mNetLooper(new ALooper),
diff --git a/media/libstagefright/rtsp/VideoSource.h b/media/libstagefright/rtsp/VideoSource.h
index ae0c85b..4be9bf6 100644
--- a/media/libstagefright/rtsp/VideoSource.h
+++ b/media/libstagefright/rtsp/VideoSource.h
@@ -18,9 +18,9 @@
#define VIDEO_SOURCE_H_
+#include <media/MediaSource.h>
#include <media/stagefright/MediaBufferGroup.h>
#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
namespace android {
diff --git a/media/libstagefright/rtsp/rtp_test.cpp b/media/libstagefright/rtsp/rtp_test.cpp
index e612a8d..4590699 100644
--- a/media/libstagefright/rtsp/rtp_test.cpp
+++ b/media/libstagefright/rtsp/rtp_test.cpp
@@ -20,12 +20,11 @@
#include <binder/ProcessState.h>
+#include <media/DataSource.h>
#include <media/stagefright/foundation/base64.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MetaData.h>
#include <media/stagefright/SimpleDecodingSource.h>
#include "ARTPSession.h"
diff --git a/media/libstagefright/tests/Android.bp b/media/libstagefright/tests/Android.bp
index 35119c2..be10fdc 100644
--- a/media/libstagefright/tests/Android.bp
+++ b/media/libstagefright/tests/Android.bp
@@ -1,45 +1,6 @@
// Build the unit tests.
cc_test {
- name: "SurfaceMediaSource_test",
-
- srcs: [
- "SurfaceMediaSource_test.cpp",
- "DummyRecorder.cpp",
- ],
-
- shared_libs: [
- "libEGL",
- "libGLESv2",
- "libbinder",
- "libcutils",
- "libgui",
- "libmedia",
- "libstagefright",
- "libstagefright_foundation",
- "libstagefright_omx",
- "libsync",
- "libui",
- "libutils",
- "liblog",
- ],
-
- include_dirs: [
- "frameworks/av/media/libstagefright",
- "frameworks/av/media/libstagefright/include",
- "frameworks/native/include/media/openmax",
- "frameworks/native/include/media/hardware",
- ],
-
- cflags: [
- "-Werror",
- "-Wall",
- ],
-
- compile_multilib: "32",
-}
-
-cc_test {
name: "MediaCodecListOverrides_test",
srcs: ["MediaCodecListOverrides_test.cpp"],
diff --git a/media/libstagefright/tests/DummyRecorder.cpp b/media/libstagefright/tests/DummyRecorder.cpp
deleted file mode 100644
index 8f17088..0000000
--- a/media/libstagefright/tests/DummyRecorder.cpp
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "DummyRecorder"
-// #define LOG_NDEBUG 0
-
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaSource.h>
-#include "DummyRecorder.h"
-
-#include <utils/Log.h>
-
-namespace android {
-
-// static
-void *DummyRecorder::threadWrapper(void *pthis) {
- ALOGV("ThreadWrapper: %p", pthis);
- DummyRecorder *writer = static_cast<DummyRecorder *>(pthis);
- writer->readFromSource();
- return NULL;
-}
-
-
-status_t DummyRecorder::start() {
- ALOGV("Start");
- mStarted = true;
-
- mSource->start();
-
- pthread_attr_t attr;
- pthread_attr_init(&attr);
- pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
- int err = pthread_create(&mThread, &attr, threadWrapper, this);
- pthread_attr_destroy(&attr);
-
- if (err) {
- ALOGE("Error creating thread!");
- return -ENODEV;
- }
- return OK;
-}
-
-
-status_t DummyRecorder::stop() {
- ALOGV("Stop");
- mStarted = false;
-
- mSource->stop();
- void *dummy;
- pthread_join(mThread, &dummy);
- status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
-
- ALOGV("Ending the reading thread");
- return err;
-}
-
-// pretend to read the source buffers
-void DummyRecorder::readFromSource() {
- ALOGV("ReadFromSource");
- if (!mStarted) {
- return;
- }
-
- status_t err = OK;
- MediaBuffer *buffer;
- ALOGV("A fake writer accessing the frames");
- while (mStarted && (err = mSource->read(&buffer)) == OK){
- // if not getting a valid buffer from source, then exit
- if (buffer == NULL) {
- return;
- }
- buffer->release();
- buffer = NULL;
- }
-}
-
-
-} // end of namespace android
diff --git a/media/libstagefright/tests/DummyRecorder.h b/media/libstagefright/tests/DummyRecorder.h
deleted file mode 100644
index 0759777..0000000
--- a/media/libstagefright/tests/DummyRecorder.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef DUMMY_RECORDER_H_
-#define DUMMY_RECORDER_H_
-
-#include <pthread.h>
-#include <utils/String8.h>
-#include <media/stagefright/foundation/ABase.h>
-
-
-namespace android {
-
-struct MediaSource;
-class MediaBuffer;
-
-class DummyRecorder {
- public:
- // The media source from which this will receive frames
- sp<MediaSource> mSource;
- bool mStarted;
- pthread_t mThread;
-
- status_t start();
- status_t stop();
-
- // actual entry point for the thread
- void readFromSource();
-
- // static function to wrap the actual thread entry point
- static void *threadWrapper(void *pthis);
-
- explicit DummyRecorder(const sp<MediaSource> &source) : mSource(source)
- , mStarted(false) {}
- ~DummyRecorder( ) {}
-
- private:
-
- DISALLOW_EVIL_CONSTRUCTORS(DummyRecorder);
-};
-
-} // end of namespace android
-#endif
-
-
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
deleted file mode 100644
index 7c464ff..0000000
--- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp
+++ /dev/null
@@ -1,944 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "SurfaceMediaSource_test"
-
-#include <gtest/gtest.h>
-#include <utils/String8.h>
-#include <utils/String16.h>
-#include <utils/Errors.h>
-#include <fcntl.h>
-#include <unistd.h>
-
-#include <GLES2/gl2.h>
-
-#include <media/stagefright/SurfaceMediaSource.h>
-#include <media/mediarecorder.h>
-
-#include <ui/GraphicBuffer.h>
-#include <gui/Surface.h>
-#include <gui/ISurfaceComposer.h>
-#include <gui/Surface.h>
-#include <gui/SurfaceComposerClient.h>
-
-#include <binder/ProcessState.h>
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <OMX_Component.h>
-
-#include "DummyRecorder.h"
-
-
-namespace android {
-
-class GLTest : public ::testing::Test {
-protected:
-
- GLTest():
- mEglDisplay(EGL_NO_DISPLAY),
- mEglSurface(EGL_NO_SURFACE),
- mEglContext(EGL_NO_CONTEXT) {
- }
-
- virtual void SetUp() {
- ALOGV("GLTest::SetUp()");
- mEglDisplay = eglGetDisplay(EGL_DEFAULT_DISPLAY);
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
- ASSERT_NE(EGL_NO_DISPLAY, mEglDisplay);
-
- EGLint majorVersion;
- EGLint minorVersion;
- EXPECT_TRUE(eglInitialize(mEglDisplay, &majorVersion, &minorVersion));
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
- RecordProperty("EglVersionMajor", majorVersion);
- RecordProperty("EglVersionMajor", minorVersion);
-
- EGLint numConfigs = 0;
- EXPECT_TRUE(eglChooseConfig(mEglDisplay, getConfigAttribs(), &mGlConfig,
- 1, &numConfigs));
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
-
- char* displaySecsEnv = getenv("GLTEST_DISPLAY_SECS");
- if (displaySecsEnv != NULL) {
- mDisplaySecs = atoi(displaySecsEnv);
- if (mDisplaySecs < 0) {
- mDisplaySecs = 0;
- }
- } else {
- mDisplaySecs = 0;
- }
-
- if (mDisplaySecs > 0) {
- mComposerClient = new SurfaceComposerClient;
- ASSERT_EQ(NO_ERROR, mComposerClient->initCheck());
-
- mSurfaceControl = mComposerClient->createSurface(
- String8("Test Surface"),
- getSurfaceWidth(), getSurfaceHeight(),
- PIXEL_FORMAT_RGB_888, 0);
-
- ASSERT_TRUE(mSurfaceControl != NULL);
- ASSERT_TRUE(mSurfaceControl->isValid());
-
- SurfaceComposerClient::openGlobalTransaction();
- ASSERT_EQ(NO_ERROR, mSurfaceControl->setLayer(0x7FFFFFFF));
- ASSERT_EQ(NO_ERROR, mSurfaceControl->show());
- SurfaceComposerClient::closeGlobalTransaction();
-
- sp<ANativeWindow> window = mSurfaceControl->getSurface();
- mEglSurface = eglCreateWindowSurface(mEglDisplay, mGlConfig,
- window.get(), NULL);
- } else {
- ALOGV("No actual display. Choosing EGLSurface based on SurfaceMediaSource");
- sp<IGraphicBufferProducer> sms = (new SurfaceMediaSource(
- getSurfaceWidth(), getSurfaceHeight()))->getProducer();
- sp<Surface> stc = new Surface(sms);
- sp<ANativeWindow> window = stc;
-
- mEglSurface = eglCreateWindowSurface(mEglDisplay, mGlConfig,
- window.get(), NULL);
- }
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
- ASSERT_NE(EGL_NO_SURFACE, mEglSurface);
-
- mEglContext = eglCreateContext(mEglDisplay, mGlConfig, EGL_NO_CONTEXT,
- getContextAttribs());
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
- ASSERT_NE(EGL_NO_CONTEXT, mEglContext);
-
- EXPECT_TRUE(eglMakeCurrent(mEglDisplay, mEglSurface, mEglSurface,
- mEglContext));
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
-
- EGLint w, h;
- EXPECT_TRUE(eglQuerySurface(mEglDisplay, mEglSurface, EGL_WIDTH, &w));
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
- EXPECT_TRUE(eglQuerySurface(mEglDisplay, mEglSurface, EGL_HEIGHT, &h));
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
- RecordProperty("EglSurfaceWidth", w);
- RecordProperty("EglSurfaceHeight", h);
-
- glViewport(0, 0, w, h);
- ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
- }
-
- virtual void TearDown() {
- // Display the result
- if (mDisplaySecs > 0 && mEglSurface != EGL_NO_SURFACE) {
- eglSwapBuffers(mEglDisplay, mEglSurface);
- sleep(mDisplaySecs);
- }
-
- if (mComposerClient != NULL) {
- mComposerClient->dispose();
- }
- if (mEglContext != EGL_NO_CONTEXT) {
- eglDestroyContext(mEglDisplay, mEglContext);
- }
- if (mEglSurface != EGL_NO_SURFACE) {
- eglDestroySurface(mEglDisplay, mEglSurface);
- }
- if (mEglDisplay != EGL_NO_DISPLAY) {
- eglTerminate(mEglDisplay);
- }
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
- }
-
- virtual EGLint const* getConfigAttribs() {
- ALOGV("GLTest getConfigAttribs");
- static EGLint sDefaultConfigAttribs[] = {
- EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
- EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
- EGL_RED_SIZE, 8,
- EGL_GREEN_SIZE, 8,
- EGL_BLUE_SIZE, 8,
- EGL_ALPHA_SIZE, 8,
- EGL_DEPTH_SIZE, 16,
- EGL_STENCIL_SIZE, 8,
- EGL_NONE };
-
- return sDefaultConfigAttribs;
- }
-
- virtual EGLint const* getContextAttribs() {
- static EGLint sDefaultContextAttribs[] = {
- EGL_CONTEXT_CLIENT_VERSION, 2,
- EGL_NONE };
-
- return sDefaultContextAttribs;
- }
-
- virtual EGLint getSurfaceWidth() {
- return 512;
- }
-
- virtual EGLint getSurfaceHeight() {
- return 512;
- }
-
- void loadShader(GLenum shaderType, const char* pSource, GLuint* outShader) {
- GLuint shader = glCreateShader(shaderType);
- ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
- if (shader) {
- glShaderSource(shader, 1, &pSource, NULL);
- ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
- glCompileShader(shader);
- ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
- GLint compiled = 0;
- glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
- ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
- if (!compiled) {
- GLint infoLen = 0;
- glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen);
- ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
- if (infoLen) {
- char* buf = (char*) malloc(infoLen);
- if (buf) {
- glGetShaderInfoLog(shader, infoLen, NULL, buf);
- printf("Shader compile log:\n%s\n", buf);
- free(buf);
- FAIL();
- }
- } else {
- char* buf = (char*) malloc(0x1000);
- if (buf) {
- glGetShaderInfoLog(shader, 0x1000, NULL, buf);
- printf("Shader compile log:\n%s\n", buf);
- free(buf);
- FAIL();
- }
- }
- glDeleteShader(shader);
- shader = 0;
- }
- }
- ASSERT_TRUE(shader != 0);
- *outShader = shader;
- }
-
- void createProgram(const char* pVertexSource, const char* pFragmentSource,
- GLuint* outPgm) {
- GLuint vertexShader, fragmentShader;
- {
- SCOPED_TRACE("compiling vertex shader");
- loadShader(GL_VERTEX_SHADER, pVertexSource, &vertexShader);
- if (HasFatalFailure()) {
- return;
- }
- }
- {
- SCOPED_TRACE("compiling fragment shader");
- loadShader(GL_FRAGMENT_SHADER, pFragmentSource, &fragmentShader);
- if (HasFatalFailure()) {
- return;
- }
- }
-
- GLuint program = glCreateProgram();
- ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
- if (program) {
- glAttachShader(program, vertexShader);
- ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
- glAttachShader(program, fragmentShader);
- ASSERT_EQ(GLenum(GL_NO_ERROR), glGetError());
- glLinkProgram(program);
- GLint linkStatus = GL_FALSE;
- glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
- if (linkStatus != GL_TRUE) {
- GLint bufLength = 0;
- glGetProgramiv(program, GL_INFO_LOG_LENGTH, &bufLength);
- if (bufLength) {
- char* buf = (char*) malloc(bufLength);
- if (buf) {
- glGetProgramInfoLog(program, bufLength, NULL, buf);
- printf("Program link log:\n%s\n", buf);
- free(buf);
- FAIL();
- }
- }
- glDeleteProgram(program);
- program = 0;
- }
- }
- glDeleteShader(vertexShader);
- glDeleteShader(fragmentShader);
- ASSERT_TRUE(program != 0);
- *outPgm = program;
- }
-
- static int abs(int value) {
- return value > 0 ? value : -value;
- }
-
- ::testing::AssertionResult checkPixel(int x, int y, int r,
- int g, int b, int a, int tolerance=2) {
- GLubyte pixel[4];
- String8 msg;
- glReadPixels(x, y, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, pixel);
- GLenum err = glGetError();
- if (err != GL_NO_ERROR) {
- msg += String8::format("error reading pixel: %#x", err);
- while ((err = glGetError()) != GL_NO_ERROR) {
- msg += String8::format(", %#x", err);
- }
- fprintf(stderr, "pixel check failure: %s\n", msg.string());
- return ::testing::AssertionFailure(
- ::testing::Message(msg.string()));
- }
- if (r >= 0 && abs(r - int(pixel[0])) > tolerance) {
- msg += String8::format("r(%d isn't %d)", pixel[0], r);
- }
- if (g >= 0 && abs(g - int(pixel[1])) > tolerance) {
- if (!msg.isEmpty()) {
- msg += " ";
- }
- msg += String8::format("g(%d isn't %d)", pixel[1], g);
- }
- if (b >= 0 && abs(b - int(pixel[2])) > tolerance) {
- if (!msg.isEmpty()) {
- msg += " ";
- }
- msg += String8::format("b(%d isn't %d)", pixel[2], b);
- }
- if (a >= 0 && abs(a - int(pixel[3])) > tolerance) {
- if (!msg.isEmpty()) {
- msg += " ";
- }
- msg += String8::format("a(%d isn't %d)", pixel[3], a);
- }
- if (!msg.isEmpty()) {
- fprintf(stderr, "pixel check failure: %s\n", msg.string());
- return ::testing::AssertionFailure(
- ::testing::Message(msg.string()));
- } else {
- return ::testing::AssertionSuccess();
- }
- }
-
- int mDisplaySecs;
- sp<SurfaceComposerClient> mComposerClient;
- sp<SurfaceControl> mSurfaceControl;
-
- EGLDisplay mEglDisplay;
- EGLSurface mEglSurface;
- EGLContext mEglContext;
- EGLConfig mGlConfig;
-};
-
-///////////////////////////////////////////////////////////////////////
-// Class for the NON-GL tests
-///////////////////////////////////////////////////////////////////////
-class SurfaceMediaSourceTest : public ::testing::Test {
-public:
-
- SurfaceMediaSourceTest( ): mYuvTexWidth(176), mYuvTexHeight(144) { }
- void oneBufferPass(int width, int height );
- void oneBufferPassNoFill(int width, int height );
- static void fillYV12Buffer(uint8_t* buf, int w, int h, int stride) ;
- static void fillYV12BufferRect(uint8_t* buf, int w, int h,
- int stride, const android_native_rect_t& rect) ;
-protected:
-
- virtual void SetUp() {
- android::ProcessState::self()->startThreadPool();
- mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
- mSTC = new Surface(mSMS->getProducer());
- mANW = mSTC;
- }
-
- virtual void TearDown() {
- mSMS.clear();
- mSTC.clear();
- mANW.clear();
- }
-
- const int mYuvTexWidth;
- const int mYuvTexHeight;
-
- sp<SurfaceMediaSource> mSMS;
- sp<Surface> mSTC;
- sp<ANativeWindow> mANW;
-};
-
-///////////////////////////////////////////////////////////////////////
-// Class for the GL tests
-///////////////////////////////////////////////////////////////////////
-class SurfaceMediaSourceGLTest : public GLTest {
-public:
-
- SurfaceMediaSourceGLTest( ): mYuvTexWidth(176), mYuvTexHeight(144) { }
- virtual EGLint const* getConfigAttribs();
- void oneBufferPassGL(int num = 0);
- static sp<MediaRecorder> setUpMediaRecorder(int fileDescriptor, int videoSource,
- int outputFormat, int videoEncoder, int width, int height, int fps);
-protected:
-
- virtual void SetUp() {
- ALOGV("SMS-GLTest::SetUp()");
- android::ProcessState::self()->startThreadPool();
- mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
- mSTC = new Surface(mSMS->getProducer());
- mANW = mSTC;
-
- // Doing the setup related to the GL Side
- GLTest::SetUp();
- }
-
- virtual void TearDown() {
- mSMS.clear();
- mSTC.clear();
- mANW.clear();
- GLTest::TearDown();
- }
-
- void setUpEGLSurfaceFromMediaRecorder(sp<MediaRecorder>& mr);
-
- const int mYuvTexWidth;
- const int mYuvTexHeight;
-
- sp<SurfaceMediaSource> mSMS;
- sp<Surface> mSTC;
- sp<ANativeWindow> mANW;
-};
-
-/////////////////////////////////////////////////////////////////////
-// Methods in SurfaceMediaSourceGLTest
-/////////////////////////////////////////////////////////////////////
-EGLint const* SurfaceMediaSourceGLTest::getConfigAttribs() {
- ALOGV("SurfaceMediaSourceGLTest getConfigAttribs");
- static EGLint sDefaultConfigAttribs[] = {
- EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
- EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
- EGL_RED_SIZE, 8,
- EGL_GREEN_SIZE, 8,
- EGL_BLUE_SIZE, 8,
- EGL_RECORDABLE_ANDROID, EGL_TRUE,
- EGL_NONE };
-
- return sDefaultConfigAttribs;
-}
-
-// One pass of dequeuing and queuing a GLBuffer
-void SurfaceMediaSourceGLTest::oneBufferPassGL(int num) {
- int d = num % 50;
- float f = 0.2f; // 0.1f * d;
-
- glClearColor(0, 0.3, 0, 0.6);
- glClear(GL_COLOR_BUFFER_BIT);
-
- glEnable(GL_SCISSOR_TEST);
- glScissor(4 + d, 4 + d, 4, 4);
- glClearColor(1.0 - f, f, f, 1.0);
- glClear(GL_COLOR_BUFFER_BIT);
-
- glScissor(24 + d, 48 + d, 4, 4);
- glClearColor(f, 1.0 - f, f, 1.0);
- glClear(GL_COLOR_BUFFER_BIT);
-
- glScissor(37 + d, 17 + d, 4, 4);
- glClearColor(f, f, 1.0 - f, 1.0);
- glClear(GL_COLOR_BUFFER_BIT);
-
- // The following call dequeues and queues the buffer
- eglSwapBuffers(mEglDisplay, mEglSurface);
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
- glDisable(GL_SCISSOR_TEST);
-}
-
-// Set up the MediaRecorder which runs in the same process as mediaserver
-sp<MediaRecorder> SurfaceMediaSourceGLTest::setUpMediaRecorder(int fd, int videoSource,
- int outputFormat, int videoEncoder, int width, int height, int fps) {
- sp<MediaRecorder> mr = new MediaRecorder(String16());
- mr->setVideoSource(videoSource);
- mr->setOutputFormat(outputFormat);
- mr->setVideoEncoder(videoEncoder);
- mr->setOutputFile(fd);
- mr->setVideoSize(width, height);
- mr->setVideoFrameRate(fps);
- mr->prepare();
- ALOGV("Starting MediaRecorder...");
- CHECK_EQ((status_t)OK, mr->start());
- return mr;
-}
-
-// query the mediarecorder for a surfacemeidasource and create an egl surface with that
-void SurfaceMediaSourceGLTest::setUpEGLSurfaceFromMediaRecorder(sp<MediaRecorder>& mr) {
- sp<IGraphicBufferProducer> iST = mr->querySurfaceMediaSourceFromMediaServer();
- mSTC = new Surface(iST);
- mANW = mSTC;
-
- if (mEglSurface != EGL_NO_SURFACE) {
- EXPECT_TRUE(eglDestroySurface(mEglDisplay, mEglSurface));
- mEglSurface = EGL_NO_SURFACE;
- }
- mEglSurface = eglCreateWindowSurface(mEglDisplay, mGlConfig,
- mANW.get(), NULL);
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
- ASSERT_NE(EGL_NO_SURFACE, mEglSurface) ;
-
- EXPECT_TRUE(eglMakeCurrent(mEglDisplay, mEglSurface, mEglSurface,
- mEglContext));
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
-}
-
-
-/////////////////////////////////////////////////////////////////////
-// Methods in SurfaceMediaSourceTest
-/////////////////////////////////////////////////////////////////////
-
-// One pass of dequeuing and queuing the buffer. Fill it in with
-// cpu YV12 buffer
-void SurfaceMediaSourceTest::oneBufferPass(int width, int height ) {
- ANativeWindowBuffer* anb;
- ASSERT_EQ(NO_ERROR, native_window_dequeue_buffer_and_wait(mANW.get(), &anb));
- ASSERT_TRUE(anb != NULL);
-
-
- // Fill the buffer with the a checkerboard pattern
- uint8_t* img = NULL;
- sp<GraphicBuffer> buf(GraphicBuffer::from(anb));
- buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
- SurfaceMediaSourceTest::fillYV12Buffer(img, width, height, buf->getStride());
- buf->unlock();
-
- ASSERT_EQ(NO_ERROR, mANW->queueBuffer(mANW.get(), buf->getNativeBuffer(),
- -1));
-}
-
-// Dequeuing and queuing the buffer without really filling it in.
-void SurfaceMediaSourceTest::oneBufferPassNoFill(
- int /* width */, int /* height */) {
- ANativeWindowBuffer* anb;
- ASSERT_EQ(NO_ERROR, native_window_dequeue_buffer_and_wait(mANW.get(), &anb));
- ASSERT_TRUE(anb != NULL);
-
- // We do not fill the buffer in. Just queue it back.
- sp<GraphicBuffer> buf(GraphicBuffer::from(anb));
- ASSERT_EQ(NO_ERROR, mANW->queueBuffer(mANW.get(), buf->getNativeBuffer(),
- -1));
-}
-
-// Fill a YV12 buffer with a multi-colored checkerboard pattern
-void SurfaceMediaSourceTest::fillYV12Buffer(uint8_t* buf, int w, int h, int stride) {
- const int blockWidth = w > 16 ? w / 16 : 1;
- const int blockHeight = h > 16 ? h / 16 : 1;
- const int yuvTexOffsetY = 0;
- int yuvTexStrideY = stride;
- int yuvTexOffsetV = yuvTexStrideY * h;
- int yuvTexStrideV = (yuvTexStrideY/2 + 0xf) & ~0xf;
- int yuvTexOffsetU = yuvTexOffsetV + yuvTexStrideV * h/2;
- int yuvTexStrideU = yuvTexStrideV;
- for (int x = 0; x < w; x++) {
- for (int y = 0; y < h; y++) {
- int parityX = (x / blockWidth) & 1;
- int parityY = (y / blockHeight) & 1;
- unsigned char intensity = (parityX ^ parityY) ? 63 : 191;
- buf[yuvTexOffsetY + (y * yuvTexStrideY) + x] = intensity;
- if (x < w / 2 && y < h / 2) {
- buf[yuvTexOffsetU + (y * yuvTexStrideU) + x] = intensity;
- if (x * 2 < w / 2 && y * 2 < h / 2) {
- buf[yuvTexOffsetV + (y*2 * yuvTexStrideV) + x*2 + 0] =
- buf[yuvTexOffsetV + (y*2 * yuvTexStrideV) + x*2 + 1] =
- buf[yuvTexOffsetV + ((y*2+1) * yuvTexStrideV) + x*2 + 0] =
- buf[yuvTexOffsetV + ((y*2+1) * yuvTexStrideV) + x*2 + 1] =
- intensity;
- }
- }
- }
- }
-}
-
-// Fill a YV12 buffer with red outside a given rectangle and green inside it.
-void SurfaceMediaSourceTest::fillYV12BufferRect(uint8_t* buf, int w,
- int h, int stride, const android_native_rect_t& rect) {
- const int yuvTexOffsetY = 0;
- int yuvTexStrideY = stride;
- int yuvTexOffsetV = yuvTexStrideY * h;
- int yuvTexStrideV = (yuvTexStrideY/2 + 0xf) & ~0xf;
- int yuvTexOffsetU = yuvTexOffsetV + yuvTexStrideV * h/2;
- int yuvTexStrideU = yuvTexStrideV;
- for (int x = 0; x < w; x++) {
- for (int y = 0; y < h; y++) {
- bool inside = rect.left <= x && x < rect.right &&
- rect.top <= y && y < rect.bottom;
- buf[yuvTexOffsetY + (y * yuvTexStrideY) + x] = inside ? 240 : 64;
- if (x < w / 2 && y < h / 2) {
- bool inside = rect.left <= 2*x && 2*x < rect.right &&
- rect.top <= 2*y && 2*y < rect.bottom;
- buf[yuvTexOffsetU + (y * yuvTexStrideU) + x] = 16;
- buf[yuvTexOffsetV + (y * yuvTexStrideV) + x] =
- inside ? 16 : 255;
- }
- }
- }
-} ///////// End of class SurfaceMediaSourceTest
-
-///////////////////////////////////////////////////////////////////
-// Class to imitate the recording /////////////////////////////
-// ////////////////////////////////////////////////////////////////
-struct SimpleDummyRecorder {
- sp<MediaSource> mSource;
-
- explicit SimpleDummyRecorder
- (const sp<MediaSource> &source): mSource(source) {}
-
- status_t start() { return mSource->start();}
- status_t stop() { return mSource->stop();}
-
- // fakes reading from a media source
- status_t readFromSource() {
- MediaBuffer *buffer;
- status_t err = mSource->read(&buffer);
- if (err != OK) {
- return err;
- }
- buffer->release();
- buffer = NULL;
- return OK;
- }
-};
-///////////////////////////////////////////////////////////////////
-// TESTS
-// SurfaceMediaSourceTest class contains tests that fill the buffers
-// using the cpu calls
-// SurfaceMediaSourceGLTest class contains tests that fill the buffers
-// using the GL calls.
-// TODO: None of the tests actually verify the encoded images.. so at this point,
-// these are mostly functionality tests + visual inspection
-//////////////////////////////////////////////////////////////////////
-
-// Just pass one buffer from the native_window to the SurfaceMediaSource
-// Dummy Encoder
-static int testId = 1;
-TEST_F(SurfaceMediaSourceTest, DISABLED_DummyEncodingFromCpuFilledYV12BufferNpotOneBufferPass) {
- ALOGV("Test # %d", testId++);
- ALOGV("Testing OneBufferPass ******************************");
-
- ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(),
- HAL_PIXEL_FORMAT_YV12));
- oneBufferPass(mYuvTexWidth, mYuvTexHeight);
-}
-
-// Pass the buffer with the wrong height and weight and should not be accepted
-// Dummy Encoder
-TEST_F(SurfaceMediaSourceTest, DISABLED_DummyEncodingFromCpuFilledYV12BufferNpotWrongSizeBufferPass) {
- ALOGV("Test # %d", testId++);
- ALOGV("Testing Wrong size BufferPass ******************************");
-
- // setting the client side buffer size different than the server size
- ASSERT_EQ(NO_ERROR, native_window_set_buffers_dimensions(mANW.get(),
- 10, 10));
- ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(),
- HAL_PIXEL_FORMAT_YV12));
-
- ANativeWindowBuffer* anb;
-
- // Note: make sure we get an ERROR back when dequeuing!
- ASSERT_NE(NO_ERROR, native_window_dequeue_buffer_and_wait(mANW.get(), &anb));
-}
-
-// pass multiple buffers from the native_window the SurfaceMediaSource
-// Dummy Encoder
-TEST_F(SurfaceMediaSourceTest, DISABLED_DummyEncodingFromCpuFilledYV12BufferNpotMultiBufferPass) {
- ALOGV("Test # %d", testId++);
- ALOGV("Testing MultiBufferPass, Dummy Recorder *********************");
- ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(),
- HAL_PIXEL_FORMAT_YV12));
-
- SimpleDummyRecorder writer(mSMS);
- writer.start();
-
- int32_t nFramesCount = 0;
- while (nFramesCount < 300) {
- oneBufferPass(mYuvTexWidth, mYuvTexHeight);
-
- ASSERT_EQ(NO_ERROR, writer.readFromSource());
-
- nFramesCount++;
- }
- writer.stop();
-}
-
-// Delayed pass of multiple buffers from the native_window the SurfaceMediaSource
-// Dummy Encoder
-TEST_F(SurfaceMediaSourceTest, DummyLagEncodingFromCpuFilledYV12BufferNpotMultiBufferPass) {
- ALOGV("Test # %d", testId++);
- ALOGV("Testing MultiBufferPass, Dummy Recorder Lagging **************");
-
- ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(),
- HAL_PIXEL_FORMAT_YV12));
-
- SimpleDummyRecorder writer(mSMS);
- writer.start();
-
- int32_t nFramesCount = 1;
- const int FRAMES_LAG = SurfaceMediaSource::MIN_UNDEQUEUED_BUFFERS;
-
- while (nFramesCount <= 300) {
- ALOGV("Frame: %d", nFramesCount);
- oneBufferPass(mYuvTexWidth, mYuvTexHeight);
- // Forcing the writer to lag behind a few frames
- if (nFramesCount > FRAMES_LAG) {
- ASSERT_EQ(NO_ERROR, writer.readFromSource());
- }
- nFramesCount++;
- }
- writer.stop();
-}
-
-// pass multiple buffers from the native_window the SurfaceMediaSource
-// A dummy writer (MULTITHREADED) is used to simulate actual MPEG4Writer
-TEST_F(SurfaceMediaSourceTest, DummyThreadedEncodingFromCpuFilledYV12BufferNpotMultiBufferPass) {
- ALOGV("Test # %d", testId++);
- ALOGV("Testing MultiBufferPass, Dummy Recorder Multi-Threaded **********");
- ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(),
- HAL_PIXEL_FORMAT_YV12));
-
- DummyRecorder writer(mSMS);
- writer.start();
-
- int32_t nFramesCount = 0;
- while (nFramesCount <= 300) {
- ALOGV("Frame: %d", nFramesCount);
- oneBufferPass(mYuvTexWidth, mYuvTexHeight);
-
- nFramesCount++;
- }
- writer.stop();
-}
-
-// Test to examine actual encoding using mediarecorder
-// We use the mediaserver to create a mediarecorder and send
-// it back to us. So SurfaceMediaSource lives in the same process
-// as the mediaserver.
-// Very close to the actual camera, except that the
-// buffers are filled and queueud by the CPU instead of GL.
-TEST_F(SurfaceMediaSourceTest, DISABLED_EncodingFromCpuYV12BufferNpotWriteMediaServer) {
- ALOGV("Test # %d", testId++);
- ALOGV("************** Testing the whole pipeline with actual MediaRecorder ***********");
- ALOGV("************** SurfaceMediaSource is same process as mediaserver ***********");
-
- const char *fileName = "/sdcard/outputSurfEncMSource.mp4";
- int fd = open(fileName, O_RDWR | O_CREAT, 0744);
- if (fd < 0) {
- ALOGE("ERROR: Could not open the the file %s, fd = %d !!", fileName, fd);
- }
- CHECK(fd >= 0);
-
- sp<MediaRecorder> mr = SurfaceMediaSourceGLTest::setUpMediaRecorder(fd,
- VIDEO_SOURCE_SURFACE, OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264,
- mYuvTexWidth, mYuvTexHeight, 30);
- // get the reference to the surfacemediasource living in
- // mediaserver that is created by stagefrightrecorder
- sp<IGraphicBufferProducer> iST = mr->querySurfaceMediaSourceFromMediaServer();
- mSTC = new Surface(iST);
- mANW = mSTC;
- ASSERT_EQ(NO_ERROR, native_window_api_connect(mANW.get(), NATIVE_WINDOW_API_CPU));
- ASSERT_EQ(NO_ERROR, native_window_set_buffers_format(mANW.get(),
- HAL_PIXEL_FORMAT_YV12));
-
- int32_t nFramesCount = 0;
- while (nFramesCount <= 300) {
- oneBufferPassNoFill(mYuvTexWidth, mYuvTexHeight);
- nFramesCount++;
- ALOGV("framesCount = %d", nFramesCount);
- }
-
- ASSERT_EQ(NO_ERROR, native_window_api_disconnect(mANW.get(), NATIVE_WINDOW_API_CPU));
- ALOGV("Stopping MediaRecorder...");
- CHECK_EQ((status_t)OK, mr->stop());
- mr.clear();
- close(fd);
-}
-
-//////////////////////////////////////////////////////////////////////
-// GL tests
-/////////////////////////////////////////////////////////////////////
-
-// Test to examine whether we can choose the Recordable Android GLConfig
-// DummyRecorder used- no real encoding here
-TEST_F(SurfaceMediaSourceGLTest, ChooseAndroidRecordableEGLConfigDummyWriter) {
- ALOGV("Test # %d", testId++);
- ALOGV("Verify creating a surface w/ right config + dummy writer*********");
-
- mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
- mSTC = new Surface(mSMS->getProducer());
- mANW = mSTC;
-
- DummyRecorder writer(mSMS);
- writer.start();
-
- if (mEglSurface != EGL_NO_SURFACE) {
- EXPECT_TRUE(eglDestroySurface(mEglDisplay, mEglSurface));
- mEglSurface = EGL_NO_SURFACE;
- }
-
- mEglSurface = eglCreateWindowSurface(mEglDisplay, mGlConfig,
- mANW.get(), NULL);
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
- ASSERT_NE(EGL_NO_SURFACE, mEglSurface) ;
-
- EXPECT_TRUE(eglMakeCurrent(mEglDisplay, mEglSurface, mEglSurface,
- mEglContext));
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
-
- int32_t nFramesCount = 0;
- while (nFramesCount <= 300) {
- oneBufferPassGL();
- nFramesCount++;
- ALOGV("framesCount = %d", nFramesCount);
- }
-
- EXPECT_TRUE(eglMakeCurrent(mEglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE,
- EGL_NO_CONTEXT));
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
- eglDestroySurface(mEglDisplay, mEglSurface);
- mEglSurface = EGL_NO_SURFACE;
-
- writer.stop();
-}
-// Test to examine whether we can render GL buffers in to the surface
-// created with the native window handle
-TEST_F(SurfaceMediaSourceGLTest, RenderingToRecordableEGLSurfaceWorks) {
- ALOGV("Test # %d", testId++);
- ALOGV("RenderingToRecordableEGLSurfaceWorks *********************");
- // Do the producer side of things
- glClearColor(0.6, 0.6, 0.6, 0.6);
- glClear(GL_COLOR_BUFFER_BIT);
-
- glEnable(GL_SCISSOR_TEST);
- glScissor(4, 4, 4, 4);
- glClearColor(1.0, 0.0, 0.0, 1.0);
- glClear(GL_COLOR_BUFFER_BIT);
-
- glScissor(24, 48, 4, 4);
- glClearColor(0.0, 1.0, 0.0, 1.0);
- glClear(GL_COLOR_BUFFER_BIT);
-
- glScissor(37, 17, 4, 4);
- glClearColor(0.0, 0.0, 1.0, 1.0);
- glClear(GL_COLOR_BUFFER_BIT);
-
- EXPECT_TRUE(checkPixel( 0, 0, 153, 153, 153, 153));
- EXPECT_TRUE(checkPixel(63, 0, 153, 153, 153, 153));
- EXPECT_TRUE(checkPixel(63, 63, 153, 153, 153, 153));
- EXPECT_TRUE(checkPixel( 0, 63, 153, 153, 153, 153));
-
- EXPECT_TRUE(checkPixel( 4, 7, 255, 0, 0, 255));
- EXPECT_TRUE(checkPixel(25, 51, 0, 255, 0, 255));
- EXPECT_TRUE(checkPixel(40, 19, 0, 0, 255, 255));
- EXPECT_TRUE(checkPixel(29, 51, 153, 153, 153, 153));
- EXPECT_TRUE(checkPixel( 5, 32, 153, 153, 153, 153));
- EXPECT_TRUE(checkPixel(13, 8, 153, 153, 153, 153));
- EXPECT_TRUE(checkPixel(46, 3, 153, 153, 153, 153));
- EXPECT_TRUE(checkPixel(30, 33, 153, 153, 153, 153));
- EXPECT_TRUE(checkPixel( 6, 52, 153, 153, 153, 153));
- EXPECT_TRUE(checkPixel(55, 33, 153, 153, 153, 153));
- EXPECT_TRUE(checkPixel(16, 29, 153, 153, 153, 153));
- EXPECT_TRUE(checkPixel( 1, 30, 153, 153, 153, 153));
- EXPECT_TRUE(checkPixel(41, 37, 153, 153, 153, 153));
- EXPECT_TRUE(checkPixel(46, 29, 153, 153, 153, 153));
- EXPECT_TRUE(checkPixel(15, 25, 153, 153, 153, 153));
- EXPECT_TRUE(checkPixel( 3, 52, 153, 153, 153, 153));
-}
-
-// Test to examine the actual encoding with GL buffers
-// Actual encoder, Actual GL Buffers Filled SurfaceMediaSource
-// The same pattern is rendered every frame
-TEST_F(SurfaceMediaSourceGLTest, EncodingFromGLRgbaSameImageEachBufNpotWrite) {
- ALOGV("Test # %d", testId++);
- ALOGV("************** Testing the whole pipeline with actual Recorder ***********");
- ALOGV("************** GL Filling the buffers ***********");
- // Note: No need to set the colorformat for the buffers. The colorformat is
- // in the GRAlloc buffers itself.
-
- const char *fileName = "/sdcard/outputSurfEncMSourceGL.mp4";
- int fd = open(fileName, O_RDWR | O_CREAT, 0744);
- if (fd < 0) {
- ALOGE("ERROR: Could not open the the file %s, fd = %d !!", fileName, fd);
- }
- CHECK(fd >= 0);
-
- sp<MediaRecorder> mr = setUpMediaRecorder(fd, VIDEO_SOURCE_SURFACE,
- OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264, mYuvTexWidth, mYuvTexHeight, 30);
-
- // get the reference to the surfacemediasource living in
- // mediaserver that is created by stagefrightrecorder
- setUpEGLSurfaceFromMediaRecorder(mr);
-
- int32_t nFramesCount = 0;
- while (nFramesCount <= 300) {
- oneBufferPassGL();
- nFramesCount++;
- ALOGV("framesCount = %d", nFramesCount);
- }
-
- EXPECT_TRUE(eglMakeCurrent(mEglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE,
- EGL_NO_CONTEXT));
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
- eglDestroySurface(mEglDisplay, mEglSurface);
- mEglSurface = EGL_NO_SURFACE;
-
- ALOGV("Stopping MediaRecorder...");
- CHECK_EQ((status_t)OK, mr->stop());
- mr.clear();
- close(fd);
-}
-
-// Test to examine the actual encoding from the GL Buffers
-// Actual encoder, Actual GL Buffers Filled SurfaceMediaSource
-// A different pattern is rendered every frame
-TEST_F(SurfaceMediaSourceGLTest, EncodingFromGLRgbaDiffImageEachBufNpotWrite) {
- ALOGV("Test # %d", testId++);
- ALOGV("************** Testing the whole pipeline with actual Recorder ***********");
- ALOGV("************** Diff GL Filling the buffers ***********");
- // Note: No need to set the colorformat for the buffers. The colorformat is
- // in the GRAlloc buffers itself.
-
- const char *fileName = "/sdcard/outputSurfEncMSourceGLDiff.mp4";
- int fd = open(fileName, O_RDWR | O_CREAT, 0744);
- if (fd < 0) {
- ALOGE("ERROR: Could not open the the file %s, fd = %d !!", fileName, fd);
- }
- CHECK(fd >= 0);
-
- sp<MediaRecorder> mr = setUpMediaRecorder(fd, VIDEO_SOURCE_SURFACE,
- OUTPUT_FORMAT_MPEG_4, VIDEO_ENCODER_H264, mYuvTexWidth, mYuvTexHeight, 30);
-
- // get the reference to the surfacemediasource living in
- // mediaserver that is created by stagefrightrecorder
- setUpEGLSurfaceFromMediaRecorder(mr);
-
- int32_t nFramesCount = 0;
- while (nFramesCount <= 300) {
- oneBufferPassGL(nFramesCount);
- nFramesCount++;
- ALOGV("framesCount = %d", nFramesCount);
- }
-
- EXPECT_TRUE(eglMakeCurrent(mEglDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE,
- EGL_NO_CONTEXT));
- ASSERT_EQ(EGL_SUCCESS, eglGetError());
- eglDestroySurface(mEglDisplay, mEglSurface);
- mEglSurface = EGL_NO_SURFACE;
-
- ALOGV("Stopping MediaRecorder...");
- CHECK_EQ((status_t)OK, mr->stop());
- mr.clear();
- close(fd);
-}
-} // namespace android
diff --git a/media/libstagefright/timedtext/TextDescriptions.cpp b/media/libstagefright/timedtext/TextDescriptions.cpp
index c762a74..088eaae 100644
--- a/media/libstagefright/timedtext/TextDescriptions.cpp
+++ b/media/libstagefright/timedtext/TextDescriptions.cpp
@@ -15,7 +15,7 @@
*/
#include "TextDescriptions.h"
-#include <media/stagefright/Utils.h>
+#include <media/stagefright/foundation/ByteUtils.h>
#include <media/stagefright/MediaErrors.h>
namespace android {
diff --git a/media/libstagefright/webm/WebmFrame.cpp b/media/libstagefright/webm/WebmFrame.cpp
index e5134ed..4b0d47c 100644
--- a/media/libstagefright/webm/WebmFrame.cpp
+++ b/media/libstagefright/webm/WebmFrame.cpp
@@ -27,7 +27,7 @@
using namespace webm;
namespace {
-sp<ABuffer> toABuffer(MediaBuffer *mbuf) {
+sp<ABuffer> toABuffer(MediaBufferBase *mbuf) {
sp<ABuffer> abuf = new ABuffer(mbuf->range_length());
memcpy(abuf->data(), (uint8_t*) mbuf->data() + mbuf->range_offset(), mbuf->range_length());
return abuf;
@@ -46,7 +46,7 @@
mEos(true) {
}
-WebmFrame::WebmFrame(int type, bool key, uint64_t absTimecode, MediaBuffer *mbuf)
+WebmFrame::WebmFrame(int type, bool key, uint64_t absTimecode, MediaBufferBase *mbuf)
: mType(type),
mKey(key),
mAbsTimecode(absTimecode),
diff --git a/media/libstagefright/webm/WebmFrame.h b/media/libstagefright/webm/WebmFrame.h
index 4f0b055..a410a87 100644
--- a/media/libstagefright/webm/WebmFrame.h
+++ b/media/libstagefright/webm/WebmFrame.h
@@ -30,7 +30,7 @@
const bool mEos;
WebmFrame();
- WebmFrame(int type, bool key, uint64_t absTimecode, MediaBuffer *buf);
+ WebmFrame(int type, bool key, uint64_t absTimecode, MediaBufferBase *buf);
~WebmFrame() {}
sp<WebmElement> SimpleBlock(uint64_t baseTimecode) const;
diff --git a/media/libstagefright/webm/WebmFrameThread.cpp b/media/libstagefright/webm/WebmFrameThread.cpp
index 71bfbc9..23269af 100644
--- a/media/libstagefright/webm/WebmFrameThread.cpp
+++ b/media/libstagefright/webm/WebmFrameThread.cpp
@@ -252,7 +252,7 @@
}
WebmFrameMediaSourceThread::WebmFrameMediaSourceThread(
- const sp<IMediaSource>& source,
+ const sp<MediaSource>& source,
int type,
LinkedBlockingQueue<const sp<WebmFrame> >& sink,
uint64_t timeCodeScale,
@@ -337,7 +337,7 @@
mStartTimeUs = kUninitialized;
status_t err = OK;
- MediaBuffer *buffer;
+ MediaBufferBase *buffer;
while (!mDone && (err = mSource->read(&buffer, NULL)) == OK) {
if (buffer->range_length() == 0) {
buffer->release();
@@ -345,8 +345,8 @@
continue;
}
- sp<MetaData> md = buffer->meta_data();
- CHECK(md->findInt64(kKeyTime, ×tampUs));
+ MetaDataBase &md = buffer->meta_data();
+ CHECK(md.findInt64(kKeyTime, ×tampUs));
if (mStartTimeUs == kUninitialized) {
mStartTimeUs = timestampUs;
}
@@ -374,7 +374,7 @@
CHECK_GE(timestampUs, 0ll);
int32_t isSync = false;
- md->findInt32(kKeyIsSyncFrame, &isSync);
+ md.findInt32(kKeyIsSyncFrame, &isSync);
const sp<WebmFrame> f = new WebmFrame(
mType,
isSync,
diff --git a/media/libstagefright/webm/WebmFrameThread.h b/media/libstagefright/webm/WebmFrameThread.h
index 528984f..76c91f1 100644
--- a/media/libstagefright/webm/WebmFrameThread.h
+++ b/media/libstagefright/webm/WebmFrameThread.h
@@ -20,8 +20,8 @@
#include "WebmFrame.h"
#include "LinkedBlockingQueue.h"
+#include <media/MediaSource.h>
#include <media/stagefright/FileSource.h>
-#include <media/stagefright/MediaSource.h>
#include <utils/List.h>
#include <utils/Errors.h>
@@ -123,7 +123,7 @@
class WebmFrameMediaSourceThread: public WebmFrameSourceThread {
public:
WebmFrameMediaSourceThread(
- const sp<IMediaSource>& source,
+ const sp<MediaSource>& source,
int type,
LinkedBlockingQueue<const sp<WebmFrame> >& sink,
uint64_t timeCodeScale,
@@ -142,7 +142,7 @@
}
private:
- const sp<IMediaSource> mSource;
+ const sp<MediaSource> mSource;
const uint64_t mTimeCodeScale;
uint64_t mStartTimeUs;
diff --git a/media/libstagefright/webm/WebmWriter.cpp b/media/libstagefright/webm/WebmWriter.cpp
index d6c6930..4d73eb8 100644
--- a/media/libstagefright/webm/WebmWriter.cpp
+++ b/media/libstagefright/webm/WebmWriter.cpp
@@ -360,7 +360,7 @@
return err;
}
-status_t WebmWriter::addSource(const sp<IMediaSource> &source) {
+status_t WebmWriter::addSource(const sp<MediaSource> &source) {
Mutex::Autolock l(mLock);
if (mStarted) {
ALOGE("Attempt to add source AFTER recording is started");
diff --git a/media/libstagefright/webm/WebmWriter.h b/media/libstagefright/webm/WebmWriter.h
index 9f3b19f..ffe4c79 100644
--- a/media/libstagefright/webm/WebmWriter.h
+++ b/media/libstagefright/webm/WebmWriter.h
@@ -21,7 +21,7 @@
#include "WebmFrameThread.h"
#include "LinkedBlockingQueue.h"
-#include <media/stagefright/MediaSource.h>
+#include <media/MediaSource.h>
#include <media/stagefright/MediaWriter.h>
#include <utils/Errors.h>
@@ -40,7 +40,7 @@
~WebmWriter() { reset(); }
- virtual status_t addSource(const sp<IMediaSource> &source);
+ virtual status_t addSource(const sp<MediaSource> &source);
virtual status_t start(MetaData *param = NULL);
virtual status_t stop();
virtual status_t pause();
@@ -85,7 +85,7 @@
const char *mName;
sp<WebmElement> (*mMakeTrack)(const sp<MetaData>&);
- sp<IMediaSource> mSource;
+ sp<MediaSource> mSource;
sp<WebmElement> mTrackEntry;
sp<WebmFrameSourceThread> mThread;
LinkedBlockingQueue<const sp<WebmFrame> > mSink;
diff --git a/media/libstagefright/wifi-display/Android.bp b/media/libstagefright/wifi-display/Android.bp
deleted file mode 100644
index fb08c5b..0000000
--- a/media/libstagefright/wifi-display/Android.bp
+++ /dev/null
@@ -1,51 +0,0 @@
-cc_library_shared {
- name: "libstagefright_wfd",
-
- srcs: [
- "MediaSender.cpp",
- "Parameters.cpp",
- "rtp/RTPSender.cpp",
- "source/Converter.cpp",
- "source/MediaPuller.cpp",
- "source/PlaybackSession.cpp",
- "source/RepeaterSource.cpp",
- "source/TSPacketizer.cpp",
- "source/WifiDisplaySource.cpp",
- "VideoFormats.cpp",
- ],
-
- include_dirs: [
- "frameworks/av/media/libstagefright",
- "frameworks/native/include/media/openmax",
- "frameworks/native/include/media/hardware",
- "frameworks/av/media/libstagefright/mpeg2ts",
- ],
-
- shared_libs: [
- "libbinder",
- "libcutils",
- "liblog",
- "libmedia",
- "libstagefright",
- "libstagefright_foundation",
- "libui",
- "libgui",
- "libutils",
- ],
-
- cflags: [
- "-Wno-multichar",
- "-Werror",
- "-Wall",
- ],
-
- sanitize: {
- misc_undefined: [
- "signed-integer-overflow",
- ],
- cfi: true,
- diag: {
- cfi: true,
- },
- },
-}
diff --git a/media/libstagefright/wifi-display/MediaSender.cpp b/media/libstagefright/wifi-display/MediaSender.cpp
deleted file mode 100644
index cc412f5..0000000
--- a/media/libstagefright/wifi-display/MediaSender.cpp
+++ /dev/null
@@ -1,519 +0,0 @@
-/*
- * Copyright 2013, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MediaSender"
-#include <utils/Log.h>
-
-#include "MediaSender.h"
-
-#include "rtp/RTPSender.h"
-#include "source/TSPacketizer.h"
-
-#include "include/avc_utils.h"
-
-#include <media/IHDCP.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/ANetworkSession.h>
-#include <ui/GraphicBuffer.h>
-
-namespace android {
-
-MediaSender::MediaSender(
- const sp<ANetworkSession> &netSession,
- const sp<AMessage> ¬ify)
- : mNetSession(netSession),
- mNotify(notify),
- mMode(MODE_UNDEFINED),
- mGeneration(0),
- mPrevTimeUs(-1ll),
- mInitDoneCount(0),
- mLogFile(NULL) {
- // mLogFile = fopen("/data/misc/log.ts", "wb");
-}
-
-MediaSender::~MediaSender() {
- if (mLogFile != NULL) {
- fclose(mLogFile);
- mLogFile = NULL;
- }
-}
-
-status_t MediaSender::setHDCP(const sp<IHDCP> &hdcp) {
- if (mMode != MODE_UNDEFINED) {
- return INVALID_OPERATION;
- }
-
- mHDCP = hdcp;
-
- return OK;
-}
-
-ssize_t MediaSender::addTrack(const sp<AMessage> &format, uint32_t flags) {
- if (mMode != MODE_UNDEFINED) {
- return INVALID_OPERATION;
- }
-
- TrackInfo info;
- info.mFormat = format;
- info.mFlags = flags;
- info.mPacketizerTrackIndex = -1;
-
- AString mime;
- CHECK(format->findString("mime", &mime));
- info.mIsAudio = !strncasecmp("audio/", mime.c_str(), 6);
-
- size_t index = mTrackInfos.size();
- mTrackInfos.push_back(info);
-
- return index;
-}
-
-status_t MediaSender::initAsync(
- ssize_t trackIndex,
- const char *remoteHost,
- int32_t remoteRTPPort,
- RTPSender::TransportMode rtpMode,
- int32_t remoteRTCPPort,
- RTPSender::TransportMode rtcpMode,
- int32_t *localRTPPort) {
- if (trackIndex < 0) {
- if (mMode != MODE_UNDEFINED) {
- return INVALID_OPERATION;
- }
-
- uint32_t flags = 0;
- if (mHDCP != NULL) {
- // XXX Determine proper HDCP version.
- flags |= TSPacketizer::EMIT_HDCP20_DESCRIPTOR;
- }
- mTSPacketizer = new TSPacketizer(flags);
-
- status_t err = OK;
- for (size_t i = 0; i < mTrackInfos.size(); ++i) {
- TrackInfo *info = &mTrackInfos.editItemAt(i);
-
- ssize_t packetizerTrackIndex =
- mTSPacketizer->addTrack(info->mFormat);
-
- if (packetizerTrackIndex < 0) {
- err = packetizerTrackIndex;
- break;
- }
-
- info->mPacketizerTrackIndex = packetizerTrackIndex;
- }
-
- if (err == OK) {
- sp<AMessage> notify = new AMessage(kWhatSenderNotify, this);
- notify->setInt32("generation", mGeneration);
- mTSSender = new RTPSender(mNetSession, notify);
- looper()->registerHandler(mTSSender);
-
- err = mTSSender->initAsync(
- remoteHost,
- remoteRTPPort,
- rtpMode,
- remoteRTCPPort,
- rtcpMode,
- localRTPPort);
-
- if (err != OK) {
- looper()->unregisterHandler(mTSSender->id());
- mTSSender.clear();
- }
- }
-
- if (err != OK) {
- for (size_t i = 0; i < mTrackInfos.size(); ++i) {
- TrackInfo *info = &mTrackInfos.editItemAt(i);
- info->mPacketizerTrackIndex = -1;
- }
-
- mTSPacketizer.clear();
- return err;
- }
-
- mMode = MODE_TRANSPORT_STREAM;
- mInitDoneCount = 1;
-
- return OK;
- }
-
- if (mMode == MODE_TRANSPORT_STREAM) {
- return INVALID_OPERATION;
- }
-
- if ((size_t)trackIndex >= mTrackInfos.size()) {
- return -ERANGE;
- }
-
- TrackInfo *info = &mTrackInfos.editItemAt(trackIndex);
-
- if (info->mSender != NULL) {
- return INVALID_OPERATION;
- }
-
- sp<AMessage> notify = new AMessage(kWhatSenderNotify, this);
- notify->setInt32("generation", mGeneration);
- notify->setSize("trackIndex", trackIndex);
-
- info->mSender = new RTPSender(mNetSession, notify);
- looper()->registerHandler(info->mSender);
-
- status_t err = info->mSender->initAsync(
- remoteHost,
- remoteRTPPort,
- rtpMode,
- remoteRTCPPort,
- rtcpMode,
- localRTPPort);
-
- if (err != OK) {
- looper()->unregisterHandler(info->mSender->id());
- info->mSender.clear();
-
- return err;
- }
-
- if (mMode == MODE_UNDEFINED) {
- mInitDoneCount = mTrackInfos.size();
- }
-
- mMode = MODE_ELEMENTARY_STREAMS;
-
- return OK;
-}
-
-status_t MediaSender::queueAccessUnit(
- size_t trackIndex, const sp<ABuffer> &accessUnit) {
- if (mMode == MODE_UNDEFINED) {
- return INVALID_OPERATION;
- }
-
- if (trackIndex >= mTrackInfos.size()) {
- return -ERANGE;
- }
-
- if (mMode == MODE_TRANSPORT_STREAM) {
- TrackInfo *info = &mTrackInfos.editItemAt(trackIndex);
- info->mAccessUnits.push_back(accessUnit);
-
- mTSPacketizer->extractCSDIfNecessary(info->mPacketizerTrackIndex);
-
- for (;;) {
- ssize_t minTrackIndex = -1;
- int64_t minTimeUs = -1ll;
-
- for (size_t i = 0; i < mTrackInfos.size(); ++i) {
- const TrackInfo &info = mTrackInfos.itemAt(i);
-
- if (info.mAccessUnits.empty()) {
- minTrackIndex = -1;
- minTimeUs = -1ll;
- break;
- }
-
- int64_t timeUs;
- const sp<ABuffer> &accessUnit = *info.mAccessUnits.begin();
- CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
-
- if (minTrackIndex < 0 || timeUs < minTimeUs) {
- minTrackIndex = i;
- minTimeUs = timeUs;
- }
- }
-
- if (minTrackIndex < 0) {
- return OK;
- }
-
- TrackInfo *info = &mTrackInfos.editItemAt(minTrackIndex);
- sp<ABuffer> accessUnit = *info->mAccessUnits.begin();
- info->mAccessUnits.erase(info->mAccessUnits.begin());
-
- sp<ABuffer> tsPackets;
- status_t err = packetizeAccessUnit(
- minTrackIndex, accessUnit, &tsPackets);
-
- if (err == OK) {
- if (mLogFile != NULL) {
- fwrite(tsPackets->data(), 1, tsPackets->size(), mLogFile);
- }
-
- int64_t timeUs;
- CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
- tsPackets->meta()->setInt64("timeUs", timeUs);
-
- err = mTSSender->queueBuffer(
- tsPackets,
- 33 /* packetType */,
- RTPSender::PACKETIZATION_TRANSPORT_STREAM);
- }
-
- if (err != OK) {
- return err;
- }
- }
- }
-
- TrackInfo *info = &mTrackInfos.editItemAt(trackIndex);
-
- return info->mSender->queueBuffer(
- accessUnit,
- info->mIsAudio ? 96 : 97 /* packetType */,
- info->mIsAudio
- ? RTPSender::PACKETIZATION_AAC : RTPSender::PACKETIZATION_H264);
-}
-
-void MediaSender::onMessageReceived(const sp<AMessage> &msg) {
- switch (msg->what()) {
- case kWhatSenderNotify:
- {
- int32_t generation;
- CHECK(msg->findInt32("generation", &generation));
- if (generation != mGeneration) {
- break;
- }
-
- onSenderNotify(msg);
- break;
- }
-
- default:
- TRESPASS();
- }
-}
-
-void MediaSender::onSenderNotify(const sp<AMessage> &msg) {
- int32_t what;
- CHECK(msg->findInt32("what", &what));
-
- switch (what) {
- case RTPSender::kWhatInitDone:
- {
- --mInitDoneCount;
-
- int32_t err;
- CHECK(msg->findInt32("err", &err));
-
- if (err != OK) {
- notifyInitDone(err);
- ++mGeneration;
- break;
- }
-
- if (mInitDoneCount == 0) {
- notifyInitDone(OK);
- }
- break;
- }
-
- case RTPSender::kWhatError:
- {
- int32_t err;
- CHECK(msg->findInt32("err", &err));
-
- notifyError(err);
- break;
- }
-
- case kWhatNetworkStall:
- {
- size_t numBytesQueued;
- CHECK(msg->findSize("numBytesQueued", &numBytesQueued));
-
- notifyNetworkStall(numBytesQueued);
- break;
- }
-
- case kWhatInformSender:
- {
- int64_t avgLatencyUs;
- CHECK(msg->findInt64("avgLatencyUs", &avgLatencyUs));
-
- int64_t maxLatencyUs;
- CHECK(msg->findInt64("maxLatencyUs", &maxLatencyUs));
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatInformSender);
- notify->setInt64("avgLatencyUs", avgLatencyUs);
- notify->setInt64("maxLatencyUs", maxLatencyUs);
- notify->post();
- break;
- }
-
- default:
- TRESPASS();
- }
-}
-
-void MediaSender::notifyInitDone(status_t err) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatInitDone);
- notify->setInt32("err", err);
- notify->post();
-}
-
-void MediaSender::notifyError(status_t err) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatError);
- notify->setInt32("err", err);
- notify->post();
-}
-
-void MediaSender::notifyNetworkStall(size_t numBytesQueued) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatNetworkStall);
- notify->setSize("numBytesQueued", numBytesQueued);
- notify->post();
-}
-
-status_t MediaSender::packetizeAccessUnit(
- size_t trackIndex,
- sp<ABuffer> accessUnit,
- sp<ABuffer> *tsPackets) {
- const TrackInfo &info = mTrackInfos.itemAt(trackIndex);
-
- uint32_t flags = 0;
-
- bool isHDCPEncrypted = false;
- uint64_t inputCTR;
- uint8_t HDCP_private_data[16];
-
- bool manuallyPrependSPSPPS =
- !info.mIsAudio
- && (info.mFlags & FLAG_MANUALLY_PREPEND_SPS_PPS)
- && IsIDR(accessUnit);
-
- if (mHDCP != NULL && !info.mIsAudio) {
- isHDCPEncrypted = true;
-
- if (manuallyPrependSPSPPS) {
- accessUnit = mTSPacketizer->prependCSD(
- info.mPacketizerTrackIndex, accessUnit);
- }
-
- status_t err;
- native_handle_t* handle;
- if (accessUnit->meta()->findPointer("handle", (void**)&handle)
- && handle != NULL) {
- int32_t rangeLength, rangeOffset;
- sp<AMessage> notify;
- CHECK(accessUnit->meta()->findInt32("rangeOffset", &rangeOffset));
- CHECK(accessUnit->meta()->findInt32("rangeLength", &rangeLength));
- CHECK(accessUnit->meta()->findMessage("notify", ¬ify)
- && notify != NULL);
- CHECK_GE((int32_t)accessUnit->size(), rangeLength);
-
- sp<GraphicBuffer> grbuf(new GraphicBuffer(
- rangeOffset + rangeLength /* width */, 1 /* height */,
- HAL_PIXEL_FORMAT_Y8, 1 /* layerCount */,
- GRALLOC_USAGE_HW_VIDEO_ENCODER,
- rangeOffset + rangeLength /* stride */, handle,
- false /* keepOwnership */));
-
- err = mHDCP->encryptNative(
- grbuf, rangeOffset, rangeLength,
- trackIndex /* streamCTR */,
- &inputCTR,
- accessUnit->data());
- notify->post();
- } else {
- err = mHDCP->encrypt(
- accessUnit->data(), accessUnit->size(),
- trackIndex /* streamCTR */,
- &inputCTR,
- accessUnit->data());
- }
-
- if (err != OK) {
- ALOGE("Failed to HDCP-encrypt media data (err %d)",
- err);
-
- return err;
- }
-
- HDCP_private_data[0] = 0x00;
-
- HDCP_private_data[1] =
- (((trackIndex >> 30) & 3) << 1) | 1;
-
- HDCP_private_data[2] = (trackIndex >> 22) & 0xff;
-
- HDCP_private_data[3] =
- (((trackIndex >> 15) & 0x7f) << 1) | 1;
-
- HDCP_private_data[4] = (trackIndex >> 7) & 0xff;
-
- HDCP_private_data[5] =
- ((trackIndex & 0x7f) << 1) | 1;
-
- HDCP_private_data[6] = 0x00;
-
- HDCP_private_data[7] =
- (((inputCTR >> 60) & 0x0f) << 1) | 1;
-
- HDCP_private_data[8] = (inputCTR >> 52) & 0xff;
-
- HDCP_private_data[9] =
- (((inputCTR >> 45) & 0x7f) << 1) | 1;
-
- HDCP_private_data[10] = (inputCTR >> 37) & 0xff;
-
- HDCP_private_data[11] =
- (((inputCTR >> 30) & 0x7f) << 1) | 1;
-
- HDCP_private_data[12] = (inputCTR >> 22) & 0xff;
-
- HDCP_private_data[13] =
- (((inputCTR >> 15) & 0x7f) << 1) | 1;
-
- HDCP_private_data[14] = (inputCTR >> 7) & 0xff;
-
- HDCP_private_data[15] =
- ((inputCTR & 0x7f) << 1) | 1;
-
- flags |= TSPacketizer::IS_ENCRYPTED;
- } else if (manuallyPrependSPSPPS) {
- flags |= TSPacketizer::PREPEND_SPS_PPS_TO_IDR_FRAMES;
- }
-
- int64_t timeUs = ALooper::GetNowUs();
- if (mPrevTimeUs < 0ll || mPrevTimeUs + 100000ll <= timeUs) {
- flags |= TSPacketizer::EMIT_PCR;
- flags |= TSPacketizer::EMIT_PAT_AND_PMT;
-
- mPrevTimeUs = timeUs;
- }
-
- mTSPacketizer->packetize(
- info.mPacketizerTrackIndex,
- accessUnit,
- tsPackets,
- flags,
- !isHDCPEncrypted ? NULL : HDCP_private_data,
- !isHDCPEncrypted ? 0 : sizeof(HDCP_private_data),
- info.mIsAudio ? 2 : 0 /* numStuffingBytes */);
-
- return OK;
-}
-
-} // namespace android
-
diff --git a/media/libstagefright/wifi-display/MediaSender.h b/media/libstagefright/wifi-display/MediaSender.h
deleted file mode 100644
index 04538ea..0000000
--- a/media/libstagefright/wifi-display/MediaSender.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright 2013, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_SENDER_H_
-
-#define MEDIA_SENDER_H_
-
-#include "rtp/RTPSender.h"
-
-#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/foundation/AHandler.h>
-#include <utils/Errors.h>
-#include <utils/Vector.h>
-
-namespace android {
-
-struct ABuffer;
-struct ANetworkSession;
-struct AMessage;
-struct IHDCP;
-struct TSPacketizer;
-
-// This class facilitates sending of data from one or more media tracks
-// through one or more RTP channels, either providing a 1:1 mapping from
-// track to RTP channel or muxing all tracks into a single RTP channel and
-// using transport stream encapsulation.
-// Optionally the (video) data is encrypted using the provided hdcp object.
-struct MediaSender : public AHandler {
- enum {
- kWhatInitDone,
- kWhatError,
- kWhatNetworkStall,
- kWhatInformSender,
- };
-
- MediaSender(
- const sp<ANetworkSession> &netSession,
- const sp<AMessage> ¬ify);
-
- status_t setHDCP(const sp<IHDCP> &hdcp);
-
- enum FlagBits {
- FLAG_MANUALLY_PREPEND_SPS_PPS = 1,
- };
- ssize_t addTrack(const sp<AMessage> &format, uint32_t flags);
-
- // If trackIndex == -1, initialize for transport stream muxing.
- status_t initAsync(
- ssize_t trackIndex,
- const char *remoteHost,
- int32_t remoteRTPPort,
- RTPSender::TransportMode rtpMode,
- int32_t remoteRTCPPort,
- RTPSender::TransportMode rtcpMode,
- int32_t *localRTPPort);
-
- status_t queueAccessUnit(
- size_t trackIndex, const sp<ABuffer> &accessUnit);
-
-protected:
- virtual void onMessageReceived(const sp<AMessage> &msg);
- virtual ~MediaSender();
-
-private:
- enum {
- kWhatSenderNotify,
- };
-
- enum Mode {
- MODE_UNDEFINED,
- MODE_TRANSPORT_STREAM,
- MODE_ELEMENTARY_STREAMS,
- };
-
- struct TrackInfo {
- sp<AMessage> mFormat;
- uint32_t mFlags;
- sp<RTPSender> mSender;
- List<sp<ABuffer> > mAccessUnits;
- ssize_t mPacketizerTrackIndex;
- bool mIsAudio;
- };
-
- sp<ANetworkSession> mNetSession;
- sp<AMessage> mNotify;
-
- sp<IHDCP> mHDCP;
-
- Mode mMode;
- int32_t mGeneration;
-
- Vector<TrackInfo> mTrackInfos;
-
- sp<TSPacketizer> mTSPacketizer;
- sp<RTPSender> mTSSender;
- int64_t mPrevTimeUs;
-
- size_t mInitDoneCount;
-
- FILE *mLogFile;
-
- void onSenderNotify(const sp<AMessage> &msg);
-
- void notifyInitDone(status_t err);
- void notifyError(status_t err);
- void notifyNetworkStall(size_t numBytesQueued);
-
- status_t packetizeAccessUnit(
- size_t trackIndex,
- sp<ABuffer> accessUnit,
- sp<ABuffer> *tsPackets);
-
- DISALLOW_EVIL_CONSTRUCTORS(MediaSender);
-};
-
-} // namespace android
-
-#endif // MEDIA_SENDER_H_
-
diff --git a/media/libstagefright/wifi-display/Parameters.cpp b/media/libstagefright/wifi-display/Parameters.cpp
deleted file mode 100644
index d2a61ea..0000000
--- a/media/libstagefright/wifi-display/Parameters.cpp
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright 2012, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Parameters.h"
-
-#include <media/stagefright/MediaErrors.h>
-
-namespace android {
-
-// static
-sp<Parameters> Parameters::Parse(const char *data, size_t size) {
- sp<Parameters> params = new Parameters;
- status_t err = params->parse(data, size);
-
- if (err != OK) {
- return NULL;
- }
-
- return params;
-}
-
-Parameters::Parameters() {}
-
-Parameters::~Parameters() {}
-
-status_t Parameters::parse(const char *data, size_t size) {
- size_t i = 0;
- while (i < size) {
- size_t nameStart = i;
- while (i < size && data[i] != ':') {
- ++i;
- }
-
- if (i == size || i == nameStart) {
- return ERROR_MALFORMED;
- }
-
- AString name(&data[nameStart], i - nameStart);
- name.trim();
- name.tolower();
-
- ++i;
-
- size_t valueStart = i;
-
- while (i + 1 < size && (data[i] != '\r' || data[i + 1] != '\n')) {
- ++i;
- }
-
- AString value(&data[valueStart], i - valueStart);
- value.trim();
-
- mDict.add(name, value);
-
- while (i + 1 < size && data[i] == '\r' && data[i + 1] == '\n') {
- i += 2;
- }
- }
-
- return OK;
-}
-
-bool Parameters::findParameter(const char *name, AString *value) const {
- AString key = name;
- key.tolower();
-
- ssize_t index = mDict.indexOfKey(key);
-
- if (index < 0) {
- value->clear();
-
- return false;
- }
-
- *value = mDict.valueAt(index);
- return true;
-}
-
-} // namespace android
diff --git a/media/libstagefright/wifi-display/Parameters.h b/media/libstagefright/wifi-display/Parameters.h
deleted file mode 100644
index a5e787e..0000000
--- a/media/libstagefright/wifi-display/Parameters.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright 2012, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/foundation/AString.h>
-#include <utils/KeyedVector.h>
-#include <utils/RefBase.h>
-
-namespace android {
-
-struct Parameters : public RefBase {
- static sp<Parameters> Parse(const char *data, size_t size);
-
- bool findParameter(const char *name, AString *value) const;
-
-protected:
- virtual ~Parameters();
-
-private:
- KeyedVector<AString, AString> mDict;
-
- Parameters();
- status_t parse(const char *data, size_t size);
-
- DISALLOW_EVIL_CONSTRUCTORS(Parameters);
-};
-
-} // namespace android
diff --git a/media/libstagefright/wifi-display/VideoFormats.cpp b/media/libstagefright/wifi-display/VideoFormats.cpp
deleted file mode 100644
index dbc511c..0000000
--- a/media/libstagefright/wifi-display/VideoFormats.cpp
+++ /dev/null
@@ -1,550 +0,0 @@
-/*
- * Copyright 2013, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "VideoFormats"
-#include <utils/Log.h>
-
-#include "VideoFormats.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-
-namespace android {
-
-// static
-const VideoFormats::config_t VideoFormats::mResolutionTable[][32] = {
- {
- // CEA Resolutions
- { 640, 480, 60, false, 0, 0},
- { 720, 480, 60, false, 0, 0},
- { 720, 480, 60, true, 0, 0},
- { 720, 576, 50, false, 0, 0},
- { 720, 576, 50, true, 0, 0},
- { 1280, 720, 30, false, 0, 0},
- { 1280, 720, 60, false, 0, 0},
- { 1920, 1080, 30, false, 0, 0},
- { 1920, 1080, 60, false, 0, 0},
- { 1920, 1080, 60, true, 0, 0},
- { 1280, 720, 25, false, 0, 0},
- { 1280, 720, 50, false, 0, 0},
- { 1920, 1080, 25, false, 0, 0},
- { 1920, 1080, 50, false, 0, 0},
- { 1920, 1080, 50, true, 0, 0},
- { 1280, 720, 24, false, 0, 0},
- { 1920, 1080, 24, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- },
- {
- // VESA Resolutions
- { 800, 600, 30, false, 0, 0},
- { 800, 600, 60, false, 0, 0},
- { 1024, 768, 30, false, 0, 0},
- { 1024, 768, 60, false, 0, 0},
- { 1152, 864, 30, false, 0, 0},
- { 1152, 864, 60, false, 0, 0},
- { 1280, 768, 30, false, 0, 0},
- { 1280, 768, 60, false, 0, 0},
- { 1280, 800, 30, false, 0, 0},
- { 1280, 800, 60, false, 0, 0},
- { 1360, 768, 30, false, 0, 0},
- { 1360, 768, 60, false, 0, 0},
- { 1366, 768, 30, false, 0, 0},
- { 1366, 768, 60, false, 0, 0},
- { 1280, 1024, 30, false, 0, 0},
- { 1280, 1024, 60, false, 0, 0},
- { 1400, 1050, 30, false, 0, 0},
- { 1400, 1050, 60, false, 0, 0},
- { 1440, 900, 30, false, 0, 0},
- { 1440, 900, 60, false, 0, 0},
- { 1600, 900, 30, false, 0, 0},
- { 1600, 900, 60, false, 0, 0},
- { 1600, 1200, 30, false, 0, 0},
- { 1600, 1200, 60, false, 0, 0},
- { 1680, 1024, 30, false, 0, 0},
- { 1680, 1024, 60, false, 0, 0},
- { 1680, 1050, 30, false, 0, 0},
- { 1680, 1050, 60, false, 0, 0},
- { 1920, 1200, 30, false, 0, 0},
- { 1920, 1200, 60, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- },
- {
- // HH Resolutions
- { 800, 480, 30, false, 0, 0},
- { 800, 480, 60, false, 0, 0},
- { 854, 480, 30, false, 0, 0},
- { 854, 480, 60, false, 0, 0},
- { 864, 480, 30, false, 0, 0},
- { 864, 480, 60, false, 0, 0},
- { 640, 360, 30, false, 0, 0},
- { 640, 360, 60, false, 0, 0},
- { 960, 540, 30, false, 0, 0},
- { 960, 540, 60, false, 0, 0},
- { 848, 480, 30, false, 0, 0},
- { 848, 480, 60, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- { 0, 0, 0, false, 0, 0},
- }
-};
-
-VideoFormats::VideoFormats() {
- memcpy(mConfigs, mResolutionTable, sizeof(mConfigs));
-
- for (size_t i = 0; i < kNumResolutionTypes; ++i) {
- mResolutionEnabled[i] = 0;
- }
-
- setNativeResolution(RESOLUTION_CEA, 0); // default to 640x480 p60
-}
-
-void VideoFormats::setNativeResolution(ResolutionType type, size_t index) {
- CHECK_LT(type, kNumResolutionTypes);
- CHECK(GetConfiguration(type, index, NULL, NULL, NULL, NULL));
-
- mNativeType = type;
- mNativeIndex = index;
-
- setResolutionEnabled(type, index);
-}
-
-void VideoFormats::getNativeResolution(
- ResolutionType *type, size_t *index) const {
- *type = mNativeType;
- *index = mNativeIndex;
-}
-
-void VideoFormats::disableAll() {
- for (size_t i = 0; i < kNumResolutionTypes; ++i) {
- mResolutionEnabled[i] = 0;
- for (size_t j = 0; j < 32; j++) {
- mConfigs[i][j].profile = mConfigs[i][j].level = 0;
- }
- }
-}
-
-void VideoFormats::enableAll() {
- for (size_t i = 0; i < kNumResolutionTypes; ++i) {
- mResolutionEnabled[i] = 0xffffffff;
- for (size_t j = 0; j < 32; j++) {
- mConfigs[i][j].profile = (1ul << PROFILE_CBP);
- mConfigs[i][j].level = (1ul << LEVEL_31);
- }
- }
-}
-
-void VideoFormats::enableResolutionUpto(
- ResolutionType type, size_t index,
- ProfileType profile, LevelType level) {
- size_t width, height, fps, score;
- bool interlaced;
- if (!GetConfiguration(type, index, &width, &height,
- &fps, &interlaced)) {
- ALOGE("Maximum resolution not found!");
- return;
- }
- score = width * height * fps * (!interlaced + 1);
- for (size_t i = 0; i < kNumResolutionTypes; ++i) {
- for (size_t j = 0; j < 32; j++) {
- if (GetConfiguration((ResolutionType)i, j,
- &width, &height, &fps, &interlaced)
- && score >= width * height * fps * (!interlaced + 1)) {
- setResolutionEnabled((ResolutionType)i, j);
- setProfileLevel((ResolutionType)i, j, profile, level);
- }
- }
- }
-}
-
-void VideoFormats::setResolutionEnabled(
- ResolutionType type, size_t index, bool enabled) {
- CHECK_LT(type, kNumResolutionTypes);
- CHECK(GetConfiguration(type, index, NULL, NULL, NULL, NULL));
-
- if (enabled) {
- mResolutionEnabled[type] |= (1ul << index);
- mConfigs[type][index].profile = (1ul << PROFILE_CBP);
- mConfigs[type][index].level = (1ul << LEVEL_31);
- } else {
- mResolutionEnabled[type] &= ~(1ul << index);
- mConfigs[type][index].profile = 0;
- mConfigs[type][index].level = 0;
- }
-}
-
-void VideoFormats::setProfileLevel(
- ResolutionType type, size_t index,
- ProfileType profile, LevelType level) {
- CHECK_LT(type, kNumResolutionTypes);
- CHECK(GetConfiguration(type, index, NULL, NULL, NULL, NULL));
-
- mConfigs[type][index].profile = (1ul << profile);
- mConfigs[type][index].level = (1ul << level);
-}
-
-void VideoFormats::getProfileLevel(
- ResolutionType type, size_t index,
- ProfileType *profile, LevelType *level) const{
- CHECK_LT(type, kNumResolutionTypes);
- CHECK(GetConfiguration(type, index, NULL, NULL, NULL, NULL));
-
- int i, bestProfile = -1, bestLevel = -1;
-
- for (i = 0; i < kNumProfileTypes; ++i) {
- if (mConfigs[type][index].profile & (1ul << i)) {
- bestProfile = i;
- }
- }
-
- for (i = 0; i < kNumLevelTypes; ++i) {
- if (mConfigs[type][index].level & (1ul << i)) {
- bestLevel = i;
- }
- }
-
- if (bestProfile == -1 || bestLevel == -1) {
- ALOGE("Profile or level not set for resolution type %d, index %zu",
- type, index);
- bestProfile = PROFILE_CBP;
- bestLevel = LEVEL_31;
- }
-
- *profile = (ProfileType) bestProfile;
- *level = (LevelType) bestLevel;
-}
-
-bool VideoFormats::isResolutionEnabled(
- ResolutionType type, size_t index) const {
- CHECK_LT(type, kNumResolutionTypes);
- CHECK(GetConfiguration(type, index, NULL, NULL, NULL, NULL));
-
- return mResolutionEnabled[type] & (1ul << index);
-}
-
-// static
-bool VideoFormats::GetConfiguration(
- ResolutionType type,
- size_t index,
- size_t *width, size_t *height, size_t *framesPerSecond,
- bool *interlaced) {
- CHECK_LT(type, kNumResolutionTypes);
-
- if (index >= 32) {
- return false;
- }
-
- const config_t *config = &mResolutionTable[type][index];
-
- if (config->width == 0) {
- return false;
- }
-
- if (width) {
- *width = config->width;
- }
-
- if (height) {
- *height = config->height;
- }
-
- if (framesPerSecond) {
- *framesPerSecond = config->framesPerSecond;
- }
-
- if (interlaced) {
- *interlaced = config->interlaced;
- }
-
- return true;
-}
-
-bool VideoFormats::parseH264Codec(const char *spec) {
- unsigned profile, level, res[3];
-
- if (sscanf(
- spec,
- "%02x %02x %08X %08X %08X",
- &profile,
- &level,
- &res[0],
- &res[1],
- &res[2]) != 5) {
- return false;
- }
-
- for (size_t i = 0; i < kNumResolutionTypes; ++i) {
- for (size_t j = 0; j < 32; ++j) {
- if (res[i] & (1ul << j)){
- mResolutionEnabled[i] |= (1ul << j);
- if (profile > mConfigs[i][j].profile) {
- // prefer higher profile (even if level is lower)
- mConfigs[i][j].profile = profile;
- mConfigs[i][j].level = level;
- } else if (profile == mConfigs[i][j].profile &&
- level > mConfigs[i][j].level) {
- mConfigs[i][j].level = level;
- }
- }
- }
- }
-
- return true;
-}
-
-// static
-bool VideoFormats::GetProfileLevel(
- ProfileType profile, LevelType level, unsigned *profileIdc,
- unsigned *levelIdc, unsigned *constraintSet) {
- CHECK_LT(profile, kNumProfileTypes);
- CHECK_LT(level, kNumLevelTypes);
-
- static const unsigned kProfileIDC[kNumProfileTypes] = {
- 66, // PROFILE_CBP
- 100, // PROFILE_CHP
- };
-
- static const unsigned kLevelIDC[kNumLevelTypes] = {
- 31, // LEVEL_31
- 32, // LEVEL_32
- 40, // LEVEL_40
- 41, // LEVEL_41
- 42, // LEVEL_42
- };
-
- static const unsigned kConstraintSet[kNumProfileTypes] = {
- 0xc0, // PROFILE_CBP
- 0x0c, // PROFILE_CHP
- };
-
- if (profileIdc) {
- *profileIdc = kProfileIDC[profile];
- }
-
- if (levelIdc) {
- *levelIdc = kLevelIDC[level];
- }
-
- if (constraintSet) {
- *constraintSet = kConstraintSet[profile];
- }
-
- return true;
-}
-
-bool VideoFormats::parseFormatSpec(const char *spec) {
- CHECK_EQ(kNumResolutionTypes, 3);
-
- disableAll();
-
- unsigned native, dummy;
- size_t size = strlen(spec);
- size_t offset = 0;
-
- if (sscanf(spec, "%02x %02x ", &native, &dummy) != 2) {
- return false;
- }
-
- offset += 6; // skip native and preferred-display-mode-supported
- CHECK_LE(offset + 58, size);
- while (offset < size) {
- parseH264Codec(spec + offset);
- offset += 60; // skip H.264-codec + ", "
- }
-
- mNativeIndex = native >> 3;
- mNativeType = (ResolutionType)(native & 7);
-
- bool success;
- if (mNativeType >= kNumResolutionTypes) {
- success = false;
- } else {
- success = GetConfiguration(
- mNativeType, mNativeIndex, NULL, NULL, NULL, NULL);
- }
-
- if (!success) {
- ALOGW("sink advertised an illegal native resolution, fortunately "
- "this value is ignored for the time being...");
- }
-
- return true;
-}
-
-AString VideoFormats::getFormatSpec(bool forM4Message) const {
- CHECK_EQ(kNumResolutionTypes, 3);
-
- // wfd_video_formats:
- // 1 byte "native"
- // 1 byte "preferred-display-mode-supported" 0 or 1
- // one or more avc codec structures
- // 1 byte profile
- // 1 byte level
- // 4 byte CEA mask
- // 4 byte VESA mask
- // 4 byte HH mask
- // 1 byte latency
- // 2 byte min-slice-slice
- // 2 byte slice-enc-params
- // 1 byte framerate-control-support
- // max-hres (none or 2 byte)
- // max-vres (none or 2 byte)
-
- return AStringPrintf(
- "%02x 00 %02x %02x %08x %08x %08x 00 0000 0000 00 none none",
- forM4Message ? 0x00 : ((mNativeIndex << 3) | mNativeType),
- mConfigs[mNativeType][mNativeIndex].profile,
- mConfigs[mNativeType][mNativeIndex].level,
- mResolutionEnabled[0],
- mResolutionEnabled[1],
- mResolutionEnabled[2]);
-}
-
-// static
-bool VideoFormats::PickBestFormat(
- const VideoFormats &sinkSupported,
- const VideoFormats &sourceSupported,
- ResolutionType *chosenType,
- size_t *chosenIndex,
- ProfileType *chosenProfile,
- LevelType *chosenLevel) {
-#if 0
- // Support for the native format is a great idea, the spec includes
- // these features, but nobody supports it and the tests don't validate it.
-
- ResolutionType nativeType;
- size_t nativeIndex;
- sinkSupported.getNativeResolution(&nativeType, &nativeIndex);
- if (sinkSupported.isResolutionEnabled(nativeType, nativeIndex)) {
- if (sourceSupported.isResolutionEnabled(nativeType, nativeIndex)) {
- ALOGI("Choosing sink's native resolution");
- *chosenType = nativeType;
- *chosenIndex = nativeIndex;
- return true;
- }
- } else {
- ALOGW("Sink advertised native resolution that it doesn't "
- "actually support... ignoring");
- }
-
- sourceSupported.getNativeResolution(&nativeType, &nativeIndex);
- if (sourceSupported.isResolutionEnabled(nativeType, nativeIndex)) {
- if (sinkSupported.isResolutionEnabled(nativeType, nativeIndex)) {
- ALOGI("Choosing source's native resolution");
- *chosenType = nativeType;
- *chosenIndex = nativeIndex;
- return true;
- }
- } else {
- ALOGW("Source advertised native resolution that it doesn't "
- "actually support... ignoring");
- }
-#endif
-
- bool first = true;
- uint32_t bestScore = 0;
- size_t bestType = 0;
- size_t bestIndex = 0;
- for (size_t i = 0; i < kNumResolutionTypes; ++i) {
- for (size_t j = 0; j < 32; ++j) {
- size_t width, height, framesPerSecond;
- bool interlaced;
- if (!GetConfiguration(
- (ResolutionType)i,
- j,
- &width, &height, &framesPerSecond, &interlaced)) {
- break;
- }
-
- if (!sinkSupported.isResolutionEnabled((ResolutionType)i, j)
- || !sourceSupported.isResolutionEnabled(
- (ResolutionType)i, j)) {
- continue;
- }
-
- ALOGV("type %zu, index %zu, %zu x %zu %c%zu supported",
- i, j, width, height, interlaced ? 'i' : 'p', framesPerSecond);
-
- uint32_t score = width * height * framesPerSecond;
- if (!interlaced) {
- score *= 2;
- }
-
- if (first || score > bestScore) {
- bestScore = score;
- bestType = i;
- bestIndex = j;
-
- first = false;
- }
- }
- }
-
- if (first) {
- return false;
- }
-
- *chosenType = (ResolutionType)bestType;
- *chosenIndex = bestIndex;
-
- // Pick the best profile/level supported by both sink and source.
- ProfileType srcProfile, sinkProfile;
- LevelType srcLevel, sinkLevel;
- sourceSupported.getProfileLevel(
- (ResolutionType)bestType, bestIndex,
- &srcProfile, &srcLevel);
- sinkSupported.getProfileLevel(
- (ResolutionType)bestType, bestIndex,
- &sinkProfile, &sinkLevel);
- *chosenProfile = srcProfile < sinkProfile ? srcProfile : sinkProfile;
- *chosenLevel = srcLevel < sinkLevel ? srcLevel : sinkLevel;
-
- return true;
-}
-
-} // namespace android
-
diff --git a/media/libstagefright/wifi-display/VideoFormats.h b/media/libstagefright/wifi-display/VideoFormats.h
deleted file mode 100644
index fd38fd1..0000000
--- a/media/libstagefright/wifi-display/VideoFormats.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright 2013, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef VIDEO_FORMATS_H_
-
-#define VIDEO_FORMATS_H_
-
-#include <media/stagefright/foundation/ABase.h>
-
-#include <stdint.h>
-
-namespace android {
-
-struct AString;
-
-// This class encapsulates that video resolution capabilities of a wfd source
-// or sink as outlined in the wfd specs. Currently three sets of resolutions
-// are specified, each of which supports up to 32 resolutions.
-// In addition to its capabilities each sink/source also publishes its
-// "native" resolution, presumably one that is preferred among all others
-// because it wouldn't require any scaling and directly corresponds to the
-// display capabilities/pixels.
-struct VideoFormats {
- VideoFormats();
-
- struct config_t {
- size_t width, height, framesPerSecond;
- bool interlaced;
- unsigned char profile, level;
- };
-
- enum ProfileType {
- PROFILE_CBP = 0,
- PROFILE_CHP,
- kNumProfileTypes,
- };
-
- enum LevelType {
- LEVEL_31 = 0,
- LEVEL_32,
- LEVEL_40,
- LEVEL_41,
- LEVEL_42,
- kNumLevelTypes,
- };
-
- enum ResolutionType {
- RESOLUTION_CEA,
- RESOLUTION_VESA,
- RESOLUTION_HH,
- kNumResolutionTypes,
- };
-
- void setNativeResolution(ResolutionType type, size_t index);
- void getNativeResolution(ResolutionType *type, size_t *index) const;
-
- void disableAll();
- void enableAll();
- void enableResolutionUpto(
- ResolutionType type, size_t index,
- ProfileType profile, LevelType level);
-
- void setResolutionEnabled(
- ResolutionType type, size_t index, bool enabled = true);
-
- bool isResolutionEnabled(ResolutionType type, size_t index) const;
-
- void setProfileLevel(
- ResolutionType type, size_t index,
- ProfileType profile, LevelType level);
-
- void getProfileLevel(
- ResolutionType type, size_t index,
- ProfileType *profile, LevelType *level) const;
-
- static bool GetConfiguration(
- ResolutionType type, size_t index,
- size_t *width, size_t *height, size_t *framesPerSecond,
- bool *interlaced);
-
- static bool GetProfileLevel(
- ProfileType profile, LevelType level,
- unsigned *profileIdc, unsigned *levelIdc,
- unsigned *constraintSet);
-
- bool parseFormatSpec(const char *spec);
- AString getFormatSpec(bool forM4Message = false) const;
-
- static bool PickBestFormat(
- const VideoFormats &sinkSupported,
- const VideoFormats &sourceSupported,
- ResolutionType *chosenType,
- size_t *chosenIndex,
- ProfileType *chosenProfile,
- LevelType *chosenLevel);
-
-private:
- bool parseH264Codec(const char *spec);
- ResolutionType mNativeType;
- size_t mNativeIndex;
-
- uint32_t mResolutionEnabled[kNumResolutionTypes];
- static const config_t mResolutionTable[kNumResolutionTypes][32];
- config_t mConfigs[kNumResolutionTypes][32];
-
- DISALLOW_EVIL_CONSTRUCTORS(VideoFormats);
-};
-
-} // namespace android
-
-#endif // VIDEO_FORMATS_H_
-
diff --git a/media/libstagefright/wifi-display/rtp/RTPBase.h b/media/libstagefright/wifi-display/rtp/RTPBase.h
deleted file mode 100644
index 194f1ee..0000000
--- a/media/libstagefright/wifi-display/rtp/RTPBase.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright 2013, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef RTP_BASE_H_
-
-#define RTP_BASE_H_
-
-namespace android {
-
-struct RTPBase {
- enum PacketizationMode {
- PACKETIZATION_TRANSPORT_STREAM,
- PACKETIZATION_H264,
- PACKETIZATION_AAC,
- PACKETIZATION_NONE,
- };
-
- enum TransportMode {
- TRANSPORT_UNDEFINED,
- TRANSPORT_NONE,
- TRANSPORT_UDP,
- TRANSPORT_TCP,
- TRANSPORT_TCP_INTERLEAVED,
- };
-
- // Really UDP _payload_ size
- const unsigned int kMaxUDPPacketSize = 1472; // 1472 good, 1473 bad on Android@Home
-
- static int32_t PickRandomRTPPort();
-};
-
-} // namespace android
-
-#endif // RTP_BASE_H_
-
-
diff --git a/media/libstagefright/wifi-display/rtp/RTPSender.cpp b/media/libstagefright/wifi-display/rtp/RTPSender.cpp
deleted file mode 100644
index ca9fdd2..0000000
--- a/media/libstagefright/wifi-display/rtp/RTPSender.cpp
+++ /dev/null
@@ -1,808 +0,0 @@
-/*
- * Copyright 2013, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "RTPSender"
-#include <utils/Log.h>
-
-#include "RTPSender.h"
-
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/ANetworkSession.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/Utils.h>
-
-#include "include/avc_utils.h"
-
-namespace android {
-
-RTPSender::RTPSender(
- const sp<ANetworkSession> &netSession,
- const sp<AMessage> ¬ify)
- : mNetSession(netSession),
- mNotify(notify),
- mRTPMode(TRANSPORT_UNDEFINED),
- mRTCPMode(TRANSPORT_UNDEFINED),
- mRTPSessionID(0),
- mRTCPSessionID(0),
- mRTPConnected(false),
- mRTCPConnected(false),
- mLastNTPTime(0),
- mLastRTPTime(0),
- mNumRTPSent(0),
- mNumRTPOctetsSent(0),
- mNumSRsSent(0),
- mRTPSeqNo(0),
- mHistorySize(0) {
-}
-
-RTPSender::~RTPSender() {
- if (mRTCPSessionID != 0) {
- mNetSession->destroySession(mRTCPSessionID);
- mRTCPSessionID = 0;
- }
-
- if (mRTPSessionID != 0) {
- mNetSession->destroySession(mRTPSessionID);
- mRTPSessionID = 0;
- }
-}
-
-// static
-int32_t RTPBase::PickRandomRTPPort() {
- // Pick an even integer in range [1024, 65534)
-
- static const size_t kRange = (65534 - 1024) / 2;
-
- return (int32_t)(((float)(kRange + 1) * rand()) / RAND_MAX) * 2 + 1024;
-}
-
-status_t RTPSender::initAsync(
- const char *remoteHost,
- int32_t remoteRTPPort,
- TransportMode rtpMode,
- int32_t remoteRTCPPort,
- TransportMode rtcpMode,
- int32_t *outLocalRTPPort) {
- if (mRTPMode != TRANSPORT_UNDEFINED
- || rtpMode == TRANSPORT_UNDEFINED
- || rtpMode == TRANSPORT_NONE
- || rtcpMode == TRANSPORT_UNDEFINED) {
- return INVALID_OPERATION;
- }
-
- CHECK_NE(rtpMode, TRANSPORT_TCP_INTERLEAVED);
- CHECK_NE(rtcpMode, TRANSPORT_TCP_INTERLEAVED);
-
- if ((rtcpMode == TRANSPORT_NONE && remoteRTCPPort >= 0)
- || (rtcpMode != TRANSPORT_NONE && remoteRTCPPort < 0)) {
- return INVALID_OPERATION;
- }
-
- sp<AMessage> rtpNotify = new AMessage(kWhatRTPNotify, this);
-
- sp<AMessage> rtcpNotify;
- if (remoteRTCPPort >= 0) {
- rtcpNotify = new AMessage(kWhatRTCPNotify, this);
- }
-
- CHECK_EQ(mRTPSessionID, 0);
- CHECK_EQ(mRTCPSessionID, 0);
-
- int32_t localRTPPort;
-
- for (;;) {
- localRTPPort = PickRandomRTPPort();
-
- status_t err;
- if (rtpMode == TRANSPORT_UDP) {
- err = mNetSession->createUDPSession(
- localRTPPort,
- remoteHost,
- remoteRTPPort,
- rtpNotify,
- &mRTPSessionID);
- } else {
- CHECK_EQ(rtpMode, TRANSPORT_TCP);
- err = mNetSession->createTCPDatagramSession(
- localRTPPort,
- remoteHost,
- remoteRTPPort,
- rtpNotify,
- &mRTPSessionID);
- }
-
- if (err != OK) {
- continue;
- }
-
- if (remoteRTCPPort < 0) {
- break;
- }
-
- if (rtcpMode == TRANSPORT_UDP) {
- err = mNetSession->createUDPSession(
- localRTPPort + 1,
- remoteHost,
- remoteRTCPPort,
- rtcpNotify,
- &mRTCPSessionID);
- } else {
- CHECK_EQ(rtcpMode, TRANSPORT_TCP);
- err = mNetSession->createTCPDatagramSession(
- localRTPPort + 1,
- remoteHost,
- remoteRTCPPort,
- rtcpNotify,
- &mRTCPSessionID);
- }
-
- if (err == OK) {
- break;
- }
-
- mNetSession->destroySession(mRTPSessionID);
- mRTPSessionID = 0;
- }
-
- if (rtpMode == TRANSPORT_UDP) {
- mRTPConnected = true;
- }
-
- if (rtcpMode == TRANSPORT_UDP) {
- mRTCPConnected = true;
- }
-
- mRTPMode = rtpMode;
- mRTCPMode = rtcpMode;
- *outLocalRTPPort = localRTPPort;
-
- if (mRTPMode == TRANSPORT_UDP
- && (mRTCPMode == TRANSPORT_UDP || mRTCPMode == TRANSPORT_NONE)) {
- notifyInitDone(OK);
- }
-
- return OK;
-}
-
-status_t RTPSender::queueBuffer(
- const sp<ABuffer> &buffer, uint8_t packetType, PacketizationMode mode) {
- status_t err;
-
- switch (mode) {
- case PACKETIZATION_NONE:
- err = queueRawPacket(buffer, packetType);
- break;
-
- case PACKETIZATION_TRANSPORT_STREAM:
- err = queueTSPackets(buffer, packetType);
- break;
-
- case PACKETIZATION_H264:
- err = queueAVCBuffer(buffer, packetType);
- break;
-
- default:
- TRESPASS();
- }
-
- return err;
-}
-
-status_t RTPSender::queueRawPacket(
- const sp<ABuffer> &packet, uint8_t packetType) {
- CHECK_LE(packet->size(), kMaxUDPPacketSize - 12);
-
- int64_t timeUs;
- CHECK(packet->meta()->findInt64("timeUs", &timeUs));
-
- sp<ABuffer> udpPacket = new ABuffer(12 + packet->size());
-
- udpPacket->setInt32Data(mRTPSeqNo);
-
- uint8_t *rtp = udpPacket->data();
- rtp[0] = 0x80;
- rtp[1] = packetType;
-
- rtp[2] = (mRTPSeqNo >> 8) & 0xff;
- rtp[3] = mRTPSeqNo & 0xff;
- ++mRTPSeqNo;
-
- uint32_t rtpTime = (timeUs * 9) / 100ll;
-
- rtp[4] = rtpTime >> 24;
- rtp[5] = (rtpTime >> 16) & 0xff;
- rtp[6] = (rtpTime >> 8) & 0xff;
- rtp[7] = rtpTime & 0xff;
-
- rtp[8] = kSourceID >> 24;
- rtp[9] = (kSourceID >> 16) & 0xff;
- rtp[10] = (kSourceID >> 8) & 0xff;
- rtp[11] = kSourceID & 0xff;
-
- memcpy(&rtp[12], packet->data(), packet->size());
-
- return sendRTPPacket(
- udpPacket,
- true /* storeInHistory */,
- true /* timeValid */,
- ALooper::GetNowUs());
-}
-
-status_t RTPSender::queueTSPackets(
- const sp<ABuffer> &tsPackets, uint8_t packetType) {
- CHECK_EQ(0u, tsPackets->size() % 188);
-
- int64_t timeUs;
- CHECK(tsPackets->meta()->findInt64("timeUs", &timeUs));
-
- size_t srcOffset = 0;
- while (srcOffset < tsPackets->size()) {
- sp<ABuffer> udpPacket =
- new ABuffer(12 + kMaxNumTSPacketsPerRTPPacket * 188);
-
- udpPacket->setInt32Data(mRTPSeqNo);
-
- uint8_t *rtp = udpPacket->data();
- rtp[0] = 0x80;
- rtp[1] = packetType;
-
- rtp[2] = (mRTPSeqNo >> 8) & 0xff;
- rtp[3] = mRTPSeqNo & 0xff;
- ++mRTPSeqNo;
-
- int64_t nowUs = ALooper::GetNowUs();
- uint32_t rtpTime = (nowUs * 9) / 100ll;
-
- rtp[4] = rtpTime >> 24;
- rtp[5] = (rtpTime >> 16) & 0xff;
- rtp[6] = (rtpTime >> 8) & 0xff;
- rtp[7] = rtpTime & 0xff;
-
- rtp[8] = kSourceID >> 24;
- rtp[9] = (kSourceID >> 16) & 0xff;
- rtp[10] = (kSourceID >> 8) & 0xff;
- rtp[11] = kSourceID & 0xff;
-
- size_t numTSPackets = (tsPackets->size() - srcOffset) / 188;
- if (numTSPackets > kMaxNumTSPacketsPerRTPPacket) {
- numTSPackets = kMaxNumTSPacketsPerRTPPacket;
- }
-
- memcpy(&rtp[12], tsPackets->data() + srcOffset, numTSPackets * 188);
-
- udpPacket->setRange(0, 12 + numTSPackets * 188);
-
- srcOffset += numTSPackets * 188;
- bool isLastPacket = (srcOffset == tsPackets->size());
-
- status_t err = sendRTPPacket(
- udpPacket,
- true /* storeInHistory */,
- isLastPacket /* timeValid */,
- timeUs);
-
- if (err != OK) {
- return err;
- }
- }
-
- return OK;
-}
-
-status_t RTPSender::queueAVCBuffer(
- const sp<ABuffer> &accessUnit, uint8_t packetType) {
- int64_t timeUs;
- CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
-
- uint32_t rtpTime = (timeUs * 9 / 100ll);
-
- List<sp<ABuffer> > packets;
-
- sp<ABuffer> out = new ABuffer(kMaxUDPPacketSize);
- size_t outBytesUsed = 12; // Placeholder for RTP header.
-
- const uint8_t *data = accessUnit->data();
- size_t size = accessUnit->size();
- const uint8_t *nalStart;
- size_t nalSize;
- while (getNextNALUnit(
- &data, &size, &nalStart, &nalSize,
- true /* startCodeFollows */) == OK) {
- size_t bytesNeeded = nalSize + 2;
- if (outBytesUsed == 12) {
- ++bytesNeeded;
- }
-
- if (outBytesUsed + bytesNeeded > out->capacity()) {
- bool emitSingleNALPacket = false;
-
- if (outBytesUsed == 12
- && outBytesUsed + nalSize <= out->capacity()) {
- // We haven't emitted anything into the current packet yet and
- // this NAL unit fits into a single-NAL-unit-packet while
- // it wouldn't have fit as part of a STAP-A packet.
-
- memcpy(out->data() + outBytesUsed, nalStart, nalSize);
- outBytesUsed += nalSize;
-
- emitSingleNALPacket = true;
- }
-
- if (outBytesUsed > 12) {
- out->setRange(0, outBytesUsed);
- packets.push_back(out);
- out = new ABuffer(kMaxUDPPacketSize);
- outBytesUsed = 12; // Placeholder for RTP header
- }
-
- if (emitSingleNALPacket) {
- continue;
- }
- }
-
- if (outBytesUsed + bytesNeeded <= out->capacity()) {
- uint8_t *dst = out->data() + outBytesUsed;
-
- if (outBytesUsed == 12) {
- *dst++ = 24; // STAP-A header
- }
-
- *dst++ = (nalSize >> 8) & 0xff;
- *dst++ = nalSize & 0xff;
- memcpy(dst, nalStart, nalSize);
-
- outBytesUsed += bytesNeeded;
- continue;
- }
-
- // This single NAL unit does not fit into a single RTP packet,
- // we need to emit an FU-A.
-
- CHECK_EQ(outBytesUsed, 12u);
-
- uint8_t nalType = nalStart[0] & 0x1f;
- uint8_t nri = (nalStart[0] >> 5) & 3;
-
- size_t srcOffset = 1;
- while (srcOffset < nalSize) {
- size_t copy = out->capacity() - outBytesUsed - 2;
- if (copy > nalSize - srcOffset) {
- copy = nalSize - srcOffset;
- }
-
- uint8_t *dst = out->data() + outBytesUsed;
- dst[0] = (nri << 5) | 28;
-
- dst[1] = nalType;
-
- if (srcOffset == 1) {
- dst[1] |= 0x80;
- }
-
- if (srcOffset + copy == nalSize) {
- dst[1] |= 0x40;
- }
-
- memcpy(&dst[2], nalStart + srcOffset, copy);
- srcOffset += copy;
-
- out->setRange(0, outBytesUsed + copy + 2);
-
- packets.push_back(out);
- out = new ABuffer(kMaxUDPPacketSize);
- outBytesUsed = 12; // Placeholder for RTP header
- }
- }
-
- if (outBytesUsed > 12) {
- out->setRange(0, outBytesUsed);
- packets.push_back(out);
- }
-
- while (!packets.empty()) {
- sp<ABuffer> out = *packets.begin();
- packets.erase(packets.begin());
-
- out->setInt32Data(mRTPSeqNo);
-
- bool last = packets.empty();
-
- uint8_t *dst = out->data();
-
- dst[0] = 0x80;
-
- dst[1] = packetType;
- if (last) {
- dst[1] |= 1 << 7; // M-bit
- }
-
- dst[2] = (mRTPSeqNo >> 8) & 0xff;
- dst[3] = mRTPSeqNo & 0xff;
- ++mRTPSeqNo;
-
- dst[4] = rtpTime >> 24;
- dst[5] = (rtpTime >> 16) & 0xff;
- dst[6] = (rtpTime >> 8) & 0xff;
- dst[7] = rtpTime & 0xff;
- dst[8] = kSourceID >> 24;
- dst[9] = (kSourceID >> 16) & 0xff;
- dst[10] = (kSourceID >> 8) & 0xff;
- dst[11] = kSourceID & 0xff;
-
- status_t err = sendRTPPacket(out, true /* storeInHistory */);
-
- if (err != OK) {
- return err;
- }
- }
-
- return OK;
-}
-
-status_t RTPSender::sendRTPPacket(
- const sp<ABuffer> &buffer, bool storeInHistory,
- bool timeValid, int64_t timeUs) {
- CHECK(mRTPConnected);
-
- status_t err = mNetSession->sendRequest(
- mRTPSessionID, buffer->data(), buffer->size(),
- timeValid, timeUs);
-
- if (err != OK) {
- return err;
- }
-
- mLastNTPTime = GetNowNTP();
- mLastRTPTime = U32_AT(buffer->data() + 4);
-
- ++mNumRTPSent;
- mNumRTPOctetsSent += buffer->size() - 12;
-
- if (storeInHistory) {
- if (mHistorySize == kMaxHistorySize) {
- mHistory.erase(mHistory.begin());
- } else {
- ++mHistorySize;
- }
- mHistory.push_back(buffer);
- }
-
- return OK;
-}
-
-// static
-uint64_t RTPSender::GetNowNTP() {
- struct timeval tv;
- gettimeofday(&tv, NULL /* timezone */);
-
- uint64_t nowUs = tv.tv_sec * 1000000ll + tv.tv_usec;
-
- nowUs += ((70ll * 365 + 17) * 24) * 60 * 60 * 1000000ll;
-
- uint64_t hi = nowUs / 1000000ll;
- uint64_t lo = ((1ll << 32) * (nowUs % 1000000ll)) / 1000000ll;
-
- return (hi << 32) | lo;
-}
-
-void RTPSender::onMessageReceived(const sp<AMessage> &msg) {
- switch (msg->what()) {
- case kWhatRTPNotify:
- case kWhatRTCPNotify:
- onNetNotify(msg->what() == kWhatRTPNotify, msg);
- break;
-
- default:
- TRESPASS();
- }
-}
-
-void RTPSender::onNetNotify(bool isRTP, const sp<AMessage> &msg) {
- int32_t reason;
- CHECK(msg->findInt32("reason", &reason));
-
- switch (reason) {
- case ANetworkSession::kWhatError:
- {
- int32_t sessionID;
- CHECK(msg->findInt32("sessionID", &sessionID));
-
- int32_t err;
- CHECK(msg->findInt32("err", &err));
-
- int32_t errorOccuredDuringSend;
- CHECK(msg->findInt32("send", &errorOccuredDuringSend));
-
- AString detail;
- CHECK(msg->findString("detail", &detail));
-
- ALOGE("An error occurred during %s in session %d "
- "(%d, '%s' (%s)).",
- errorOccuredDuringSend ? "send" : "receive",
- sessionID,
- err,
- detail.c_str(),
- strerror(-err));
-
- mNetSession->destroySession(sessionID);
-
- if (sessionID == mRTPSessionID) {
- mRTPSessionID = 0;
- } else if (sessionID == mRTCPSessionID) {
- mRTCPSessionID = 0;
- }
-
- if (!mRTPConnected
- || (mRTPMode != TRANSPORT_NONE && !mRTCPConnected)) {
- // We haven't completed initialization, attach the error
- // to the notification instead.
- notifyInitDone(err);
- break;
- }
-
- notifyError(err);
- break;
- }
-
- case ANetworkSession::kWhatDatagram:
- {
- sp<ABuffer> data;
- CHECK(msg->findBuffer("data", &data));
-
- if (isRTP) {
- ALOGW("Huh? Received data on RTP connection...");
- } else {
- onRTCPData(data);
- }
- break;
- }
-
- case ANetworkSession::kWhatConnected:
- {
- int32_t sessionID;
- CHECK(msg->findInt32("sessionID", &sessionID));
-
- if (isRTP) {
- CHECK_EQ(mRTPMode, TRANSPORT_TCP);
- CHECK_EQ(sessionID, mRTPSessionID);
- mRTPConnected = true;
- } else {
- CHECK_EQ(mRTCPMode, TRANSPORT_TCP);
- CHECK_EQ(sessionID, mRTCPSessionID);
- mRTCPConnected = true;
- }
-
- if (mRTPConnected
- && (mRTCPMode == TRANSPORT_NONE || mRTCPConnected)) {
- notifyInitDone(OK);
- }
- break;
- }
-
- case ANetworkSession::kWhatNetworkStall:
- {
- size_t numBytesQueued;
- CHECK(msg->findSize("numBytesQueued", &numBytesQueued));
-
- notifyNetworkStall(numBytesQueued);
- break;
- }
-
- default:
- TRESPASS();
- }
-}
-
-status_t RTPSender::onRTCPData(const sp<ABuffer> &buffer) {
- const uint8_t *data = buffer->data();
- size_t size = buffer->size();
-
- while (size > 0) {
- if (size < 8) {
- // Too short to be a valid RTCP header
- return ERROR_MALFORMED;
- }
-
- if ((data[0] >> 6) != 2) {
- // Unsupported version.
- return ERROR_UNSUPPORTED;
- }
-
- if (data[0] & 0x20) {
- // Padding present.
-
- size_t paddingLength = data[size - 1];
-
- if (paddingLength + 12 > size) {
- // If we removed this much padding we'd end up with something
- // that's too short to be a valid RTP header.
- return ERROR_MALFORMED;
- }
-
- size -= paddingLength;
- }
-
- size_t headerLength = 4 * (data[2] << 8 | data[3]) + 4;
-
- if (size < headerLength) {
- // Only received a partial packet?
- return ERROR_MALFORMED;
- }
-
- switch (data[1]) {
- case 200:
- case 201: // RR
- parseReceiverReport(data, headerLength);
- break;
-
- case 202: // SDES
- case 203:
- break;
-
- case 204: // APP
- parseAPP(data, headerLength);
- break;
-
- case 205: // TSFB (transport layer specific feedback)
- parseTSFB(data, headerLength);
- break;
-
- case 206: // PSFB (payload specific feedback)
- // hexdump(data, headerLength);
- break;
-
- default:
- {
- ALOGW("Unknown RTCP packet type %u of size %zu",
- (unsigned)data[1], headerLength);
- break;
- }
- }
-
- data += headerLength;
- size -= headerLength;
- }
-
- return OK;
-}
-
-status_t RTPSender::parseReceiverReport(
- const uint8_t *data, size_t /* size */) {
- float fractionLost = data[12] / 256.0f;
-
- ALOGI("lost %.2f %% of packets during report interval.",
- 100.0f * fractionLost);
-
- return OK;
-}
-
-status_t RTPSender::parseTSFB(const uint8_t *data, size_t size) {
- if ((data[0] & 0x1f) != 1) {
- return ERROR_UNSUPPORTED; // We only support NACK for now.
- }
-
- uint32_t srcId = U32_AT(&data[8]);
- if (srcId != kSourceID) {
- return ERROR_MALFORMED;
- }
-
- for (size_t i = 12; i < size; i += 4) {
- uint16_t seqNo = U16_AT(&data[i]);
- uint16_t blp = U16_AT(&data[i + 2]);
-
- List<sp<ABuffer> >::iterator it = mHistory.begin();
- bool foundSeqNo = false;
- while (it != mHistory.end()) {
- const sp<ABuffer> &buffer = *it;
-
- uint16_t bufferSeqNo = buffer->int32Data() & 0xffff;
-
- bool retransmit = false;
- if (bufferSeqNo == seqNo) {
- retransmit = true;
- } else if (blp != 0) {
- for (size_t i = 0; i < 16; ++i) {
- if ((blp & (1 << i))
- && (bufferSeqNo == ((seqNo + i + 1) & 0xffff))) {
- blp &= ~(1 << i);
- retransmit = true;
- }
- }
- }
-
- if (retransmit) {
- ALOGV("retransmitting seqNo %d", bufferSeqNo);
-
- CHECK_EQ((status_t)OK,
- sendRTPPacket(buffer, false /* storeInHistory */));
-
- if (bufferSeqNo == seqNo) {
- foundSeqNo = true;
- }
-
- if (foundSeqNo && blp == 0) {
- break;
- }
- }
-
- ++it;
- }
-
- if (!foundSeqNo || blp != 0) {
- ALOGI("Some sequence numbers were no longer available for "
- "retransmission (seqNo = %d, foundSeqNo = %d, blp = 0x%04x)",
- seqNo, foundSeqNo, blp);
-
- if (!mHistory.empty()) {
- int32_t earliest = (*mHistory.begin())->int32Data() & 0xffff;
- int32_t latest = (*--mHistory.end())->int32Data() & 0xffff;
-
- ALOGI("have seq numbers from %d - %d", earliest, latest);
- }
- }
- }
-
- return OK;
-}
-
-status_t RTPSender::parseAPP(const uint8_t *data, size_t size) {
- static const size_t late_offset = 8;
- static const char late_string[] = "late";
- static const size_t avgLatencyUs_offset = late_offset + sizeof(late_string) - 1;
- static const size_t maxLatencyUs_offset = avgLatencyUs_offset + sizeof(int64_t);
-
- if ((size >= (maxLatencyUs_offset + sizeof(int64_t)))
- && !memcmp(late_string, &data[late_offset], sizeof(late_string) - 1)) {
- int64_t avgLatencyUs = (int64_t)U64_AT(&data[avgLatencyUs_offset]);
- int64_t maxLatencyUs = (int64_t)U64_AT(&data[maxLatencyUs_offset]);
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatInformSender);
- notify->setInt64("avgLatencyUs", avgLatencyUs);
- notify->setInt64("maxLatencyUs", maxLatencyUs);
- notify->post();
- }
-
- return OK;
-}
-
-void RTPSender::notifyInitDone(status_t err) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatInitDone);
- notify->setInt32("err", err);
- notify->post();
-}
-
-void RTPSender::notifyError(status_t err) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatError);
- notify->setInt32("err", err);
- notify->post();
-}
-
-void RTPSender::notifyNetworkStall(size_t numBytesQueued) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatNetworkStall);
- notify->setSize("numBytesQueued", numBytesQueued);
- notify->post();
-}
-
-} // namespace android
-
diff --git a/media/libstagefright/wifi-display/rtp/RTPSender.h b/media/libstagefright/wifi-display/rtp/RTPSender.h
deleted file mode 100644
index bedfd01..0000000
--- a/media/libstagefright/wifi-display/rtp/RTPSender.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright 2013, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef RTP_SENDER_H_
-
-#define RTP_SENDER_H_
-
-#include "RTPBase.h"
-
-#include <media/stagefright/foundation/AHandler.h>
-
-namespace android {
-
-struct ABuffer;
-struct ANetworkSession;
-
-// An object of this class facilitates sending of media data over an RTP
-// channel. The channel is established over a UDP or TCP connection depending
-// on which "TransportMode" was chosen. In addition different RTP packetization
-// schemes are supported such as "Transport Stream Packets over RTP",
-// or "AVC/H.264 encapsulation as specified in RFC 3984 (non-interleaved mode)"
-struct RTPSender : public RTPBase, public AHandler {
- enum {
- kWhatInitDone,
- kWhatError,
- kWhatNetworkStall,
- kWhatInformSender,
- };
- RTPSender(
- const sp<ANetworkSession> &netSession,
- const sp<AMessage> ¬ify);
-
- status_t initAsync(
- const char *remoteHost,
- int32_t remoteRTPPort,
- TransportMode rtpMode,
- int32_t remoteRTCPPort,
- TransportMode rtcpMode,
- int32_t *outLocalRTPPort);
-
- status_t queueBuffer(
- const sp<ABuffer> &buffer,
- uint8_t packetType,
- PacketizationMode mode);
-
-protected:
- virtual ~RTPSender();
- virtual void onMessageReceived(const sp<AMessage> &msg);
-
-private:
- enum {
- kWhatRTPNotify,
- kWhatRTCPNotify,
- };
-
- const unsigned int kMaxNumTSPacketsPerRTPPacket = (kMaxUDPPacketSize - 12) / 188;
- const unsigned int kMaxHistorySize = 1024;
- const unsigned int kSourceID = 0xdeadbeef;
-
- sp<ANetworkSession> mNetSession;
- sp<AMessage> mNotify;
- TransportMode mRTPMode;
- TransportMode mRTCPMode;
- int32_t mRTPSessionID;
- int32_t mRTCPSessionID;
- bool mRTPConnected;
- bool mRTCPConnected;
-
- uint64_t mLastNTPTime;
- uint32_t mLastRTPTime;
- uint32_t mNumRTPSent;
- uint32_t mNumRTPOctetsSent;
- uint32_t mNumSRsSent;
-
- uint32_t mRTPSeqNo;
-
- List<sp<ABuffer> > mHistory;
- size_t mHistorySize;
-
- static uint64_t GetNowNTP();
-
- status_t queueRawPacket(const sp<ABuffer> &tsPackets, uint8_t packetType);
- status_t queueTSPackets(const sp<ABuffer> &tsPackets, uint8_t packetType);
- status_t queueAVCBuffer(const sp<ABuffer> &accessUnit, uint8_t packetType);
-
- status_t sendRTPPacket(
- const sp<ABuffer> &packet, bool storeInHistory,
- bool timeValid = false, int64_t timeUs = -1ll);
-
- void onNetNotify(bool isRTP, const sp<AMessage> &msg);
-
- status_t onRTCPData(const sp<ABuffer> &data);
- status_t parseReceiverReport(const uint8_t *data, size_t size);
- status_t parseTSFB(const uint8_t *data, size_t size);
- status_t parseAPP(const uint8_t *data, size_t size);
-
- void notifyInitDone(status_t err);
- void notifyError(status_t err);
- void notifyNetworkStall(size_t numBytesQueued);
-
- DISALLOW_EVIL_CONSTRUCTORS(RTPSender);
-};
-
-} // namespace android
-
-#endif // RTP_SENDER_H_
diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp
deleted file mode 100644
index 273af18..0000000
--- a/media/libstagefright/wifi-display/source/Converter.cpp
+++ /dev/null
@@ -1,821 +0,0 @@
-/*
- * Copyright 2012, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "Converter"
-#include <utils/Log.h>
-
-#include "Converter.h"
-
-#include "MediaPuller.h"
-#include "include/avc_utils.h"
-
-#include <cutils/properties.h>
-#include <gui/Surface.h>
-#include <media/ICrypto.h>
-#include <media/MediaCodecBuffer.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaCodec.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-
-#include <arpa/inet.h>
-
-#include <OMX_Video.h>
-
-namespace android {
-
-Converter::Converter(
- const sp<AMessage> ¬ify,
- const sp<ALooper> &codecLooper,
- const sp<AMessage> &outputFormat,
- uint32_t flags)
- : mNotify(notify),
- mCodecLooper(codecLooper),
- mOutputFormat(outputFormat),
- mFlags(flags),
- mIsVideo(false),
- mIsH264(false),
- mIsPCMAudio(false),
- mNeedToManuallyPrependSPSPPS(false),
- mDoMoreWorkPending(false)
-#if ENABLE_SILENCE_DETECTION
- ,mFirstSilentFrameUs(-1ll)
- ,mInSilentMode(false)
-#endif
- ,mPrevVideoBitrate(-1)
- ,mNumFramesToDrop(0)
- ,mEncodingSuspended(false)
- {
- AString mime;
- CHECK(mOutputFormat->findString("mime", &mime));
-
- if (!strncasecmp("video/", mime.c_str(), 6)) {
- mIsVideo = true;
-
- mIsH264 = !strcasecmp(mime.c_str(), MEDIA_MIMETYPE_VIDEO_AVC);
- } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, mime.c_str())) {
- mIsPCMAudio = true;
- }
-}
-
-void Converter::releaseEncoder() {
- if (mEncoder == NULL) {
- return;
- }
-
- mEncoder->release();
- mEncoder.clear();
-
- mInputBufferQueue.clear();
- mEncoderInputBuffers.clear();
- mEncoderOutputBuffers.clear();
-}
-
-Converter::~Converter() {
- CHECK(mEncoder == NULL);
-}
-
-void Converter::shutdownAsync() {
- ALOGV("shutdown");
- (new AMessage(kWhatShutdown, this))->post();
-}
-
-status_t Converter::init() {
- status_t err = initEncoder();
-
- if (err != OK) {
- releaseEncoder();
- }
-
- return err;
-}
-
-sp<IGraphicBufferProducer> Converter::getGraphicBufferProducer() {
- CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
- return mGraphicBufferProducer;
-}
-
-size_t Converter::getInputBufferCount() const {
- return mEncoderInputBuffers.size();
-}
-
-sp<AMessage> Converter::getOutputFormat() const {
- return mOutputFormat;
-}
-
-bool Converter::needToManuallyPrependSPSPPS() const {
- return mNeedToManuallyPrependSPSPPS;
-}
-
-// static
-int32_t Converter::GetInt32Property(
- const char *propName, int32_t defaultValue) {
- char val[PROPERTY_VALUE_MAX];
- if (property_get(propName, val, NULL)) {
- char *end;
- unsigned long x = strtoul(val, &end, 10);
-
- if (*end == '\0' && end > val && x > 0) {
- return x;
- }
- }
-
- return defaultValue;
-}
-
-status_t Converter::initEncoder() {
- AString outputMIME;
- CHECK(mOutputFormat->findString("mime", &outputMIME));
-
- bool isAudio = !strncasecmp(outputMIME.c_str(), "audio/", 6);
-
- if (!mIsPCMAudio) {
- mEncoder = MediaCodec::CreateByType(
- mCodecLooper, outputMIME.c_str(), true /* encoder */);
-
- if (mEncoder == NULL) {
- return ERROR_UNSUPPORTED;
- }
- }
-
- if (mIsPCMAudio) {
- return OK;
- }
-
- int32_t audioBitrate = GetInt32Property("media.wfd.audio-bitrate", 128000);
- int32_t videoBitrate = GetInt32Property("media.wfd.video-bitrate", 5000000);
- mPrevVideoBitrate = videoBitrate;
-
- ALOGI("using audio bitrate of %d bps, video bitrate of %d bps",
- audioBitrate, videoBitrate);
-
- if (isAudio) {
- mOutputFormat->setInt32("bitrate", audioBitrate);
- } else {
- mOutputFormat->setInt32("bitrate", videoBitrate);
- mOutputFormat->setInt32("bitrate-mode", OMX_Video_ControlRateConstant);
- mOutputFormat->setInt32("frame-rate", 30);
- mOutputFormat->setInt32("i-frame-interval", 15); // Iframes every 15 secs
-
- // Configure encoder to use intra macroblock refresh mode
- mOutputFormat->setInt32("intra-refresh-mode", OMX_VIDEO_IntraRefreshCyclic);
-
- int width, height, mbs;
- if (!mOutputFormat->findInt32("width", &width)
- || !mOutputFormat->findInt32("height", &height)) {
- return ERROR_UNSUPPORTED;
- }
-
- // Update macroblocks in a cyclic fashion with 10% of all MBs within
- // frame gets updated at one time. It takes about 10 frames to
- // completely update a whole video frame. If the frame rate is 30,
- // it takes about 333 ms in the best case (if next frame is not an IDR)
- // to recover from a lost/corrupted packet.
- mbs = (((width + 15) / 16) * ((height + 15) / 16) * 10) / 100;
- mOutputFormat->setInt32("intra-refresh-CIR-mbs", mbs);
- }
-
- ALOGV("output format is '%s'", mOutputFormat->debugString(0).c_str());
-
- mNeedToManuallyPrependSPSPPS = false;
-
- status_t err = NO_INIT;
-
- if (!isAudio) {
- sp<AMessage> tmp = mOutputFormat->dup();
- tmp->setInt32("prepend-sps-pps-to-idr-frames", 1);
-
- err = mEncoder->configure(
- tmp,
- NULL /* nativeWindow */,
- NULL /* crypto */,
- MediaCodec::CONFIGURE_FLAG_ENCODE);
-
- if (err == OK) {
- // Encoder supported prepending SPS/PPS, we don't need to emulate
- // it.
- mOutputFormat = tmp;
- } else {
- mNeedToManuallyPrependSPSPPS = true;
-
- ALOGI("We going to manually prepend SPS and PPS to IDR frames.");
- }
- }
-
- if (err != OK) {
- // We'll get here for audio or if we failed to configure the encoder
- // to automatically prepend SPS/PPS in the case of video.
-
- err = mEncoder->configure(
- mOutputFormat,
- NULL /* nativeWindow */,
- NULL /* crypto */,
- MediaCodec::CONFIGURE_FLAG_ENCODE);
- }
-
- if (err != OK) {
- return err;
- }
-
- if (mFlags & FLAG_USE_SURFACE_INPUT) {
- CHECK(mIsVideo);
-
- err = mEncoder->createInputSurface(&mGraphicBufferProducer);
-
- if (err != OK) {
- return err;
- }
- }
-
- err = mEncoder->start();
-
- if (err != OK) {
- return err;
- }
-
- err = mEncoder->getInputBuffers(&mEncoderInputBuffers);
-
- if (err != OK) {
- return err;
- }
-
- err = mEncoder->getOutputBuffers(&mEncoderOutputBuffers);
-
- if (err != OK) {
- return err;
- }
-
- if (mFlags & FLAG_USE_SURFACE_INPUT) {
- scheduleDoMoreWork();
- }
-
- return OK;
-}
-
-void Converter::notifyError(status_t err) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatError);
- notify->setInt32("err", err);
- notify->post();
-}
-
-// static
-bool Converter::IsSilence(const sp<ABuffer> &accessUnit) {
- const uint8_t *ptr = accessUnit->data();
- const uint8_t *end = ptr + accessUnit->size();
- while (ptr < end) {
- if (*ptr != 0) {
- return false;
- }
- ++ptr;
- }
-
- return true;
-}
-
-void Converter::onMessageReceived(const sp<AMessage> &msg) {
- switch (msg->what()) {
- case kWhatMediaPullerNotify:
- {
- int32_t what;
- CHECK(msg->findInt32("what", &what));
-
- if (!mIsPCMAudio && mEncoder == NULL) {
- ALOGV("got msg '%s' after encoder shutdown.",
- msg->debugString().c_str());
-
- if (what == MediaPuller::kWhatAccessUnit) {
- sp<ABuffer> accessUnit;
- CHECK(msg->findBuffer("accessUnit", &accessUnit));
-
- accessUnit->setMediaBufferBase(NULL);
- }
- break;
- }
-
- if (what == MediaPuller::kWhatEOS) {
- mInputBufferQueue.push_back(NULL);
-
- feedEncoderInputBuffers();
-
- scheduleDoMoreWork();
- } else {
- CHECK_EQ(what, MediaPuller::kWhatAccessUnit);
-
- sp<ABuffer> accessUnit;
- CHECK(msg->findBuffer("accessUnit", &accessUnit));
-
- if (mNumFramesToDrop > 0 || mEncodingSuspended) {
- if (mNumFramesToDrop > 0) {
- --mNumFramesToDrop;
- ALOGI("dropping frame.");
- }
-
- accessUnit->setMediaBufferBase(NULL);
- break;
- }
-
-#if 0
- MediaBuffer *mbuf =
- (MediaBuffer *)(accessUnit->getMediaBufferBase());
- if (mbuf != NULL) {
- ALOGI("queueing mbuf %p", mbuf);
- mbuf->release();
- }
-#endif
-
-#if ENABLE_SILENCE_DETECTION
- if (!mIsVideo) {
- if (IsSilence(accessUnit)) {
- if (mInSilentMode) {
- break;
- }
-
- int64_t nowUs = ALooper::GetNowUs();
-
- if (mFirstSilentFrameUs < 0ll) {
- mFirstSilentFrameUs = nowUs;
- } else if (nowUs >= mFirstSilentFrameUs + 10000000ll) {
- mInSilentMode = true;
- ALOGI("audio in silent mode now.");
- break;
- }
- } else {
- if (mInSilentMode) {
- ALOGI("audio no longer in silent mode.");
- }
- mInSilentMode = false;
- mFirstSilentFrameUs = -1ll;
- }
- }
-#endif
-
- mInputBufferQueue.push_back(accessUnit);
-
- feedEncoderInputBuffers();
-
- scheduleDoMoreWork();
- }
- break;
- }
-
- case kWhatEncoderActivity:
- {
-#if 0
- int64_t whenUs;
- if (msg->findInt64("whenUs", &whenUs)) {
- int64_t nowUs = ALooper::GetNowUs();
- ALOGI("[%s] kWhatEncoderActivity after %lld us",
- mIsVideo ? "video" : "audio", nowUs - whenUs);
- }
-#endif
-
- mDoMoreWorkPending = false;
-
- if (mEncoder == NULL) {
- break;
- }
-
- status_t err = doMoreWork();
-
- if (err != OK) {
- notifyError(err);
- } else {
- scheduleDoMoreWork();
- }
- break;
- }
-
- case kWhatRequestIDRFrame:
- {
- if (mEncoder == NULL) {
- break;
- }
-
- if (mIsVideo) {
- ALOGV("requesting IDR frame");
- mEncoder->requestIDRFrame();
- }
- break;
- }
-
- case kWhatShutdown:
- {
- ALOGI("shutting down %s encoder", mIsVideo ? "video" : "audio");
-
- releaseEncoder();
-
- AString mime;
- CHECK(mOutputFormat->findString("mime", &mime));
- ALOGI("encoder (%s) shut down.", mime.c_str());
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatShutdownCompleted);
- notify->post();
- break;
- }
-
- case kWhatDropAFrame:
- {
- ++mNumFramesToDrop;
- break;
- }
-
- case kWhatReleaseOutputBuffer:
- {
- if (mEncoder != NULL) {
- size_t bufferIndex;
- CHECK(msg->findInt32("bufferIndex", (int32_t*)&bufferIndex));
- CHECK(bufferIndex < mEncoderOutputBuffers.size());
- mEncoder->releaseOutputBuffer(bufferIndex);
- }
- break;
- }
-
- case kWhatSuspendEncoding:
- {
- int32_t suspend;
- CHECK(msg->findInt32("suspend", &suspend));
-
- mEncodingSuspended = suspend;
-
- if (mFlags & FLAG_USE_SURFACE_INPUT) {
- sp<AMessage> params = new AMessage;
- params->setInt32("drop-input-frames",suspend);
- mEncoder->setParameters(params);
- }
- break;
- }
-
- default:
- TRESPASS();
- }
-}
-
-void Converter::scheduleDoMoreWork() {
- if (mIsPCMAudio) {
- // There's no encoder involved in this case.
- return;
- }
-
- if (mDoMoreWorkPending) {
- return;
- }
-
- mDoMoreWorkPending = true;
-
-#if 1
- if (mEncoderActivityNotify == NULL) {
- mEncoderActivityNotify = new AMessage(kWhatEncoderActivity, this);
- }
- mEncoder->requestActivityNotification(mEncoderActivityNotify->dup());
-#else
- sp<AMessage> notify = new AMessage(kWhatEncoderActivity, this);
- notify->setInt64("whenUs", ALooper::GetNowUs());
- mEncoder->requestActivityNotification(notify);
-#endif
-}
-
-status_t Converter::feedRawAudioInputBuffers() {
- // Split incoming PCM audio into buffers of 6 AUs of 80 audio frames each
- // and add a 4 byte header according to the wifi display specs.
-
- while (!mInputBufferQueue.empty()) {
- sp<ABuffer> buffer = *mInputBufferQueue.begin();
- mInputBufferQueue.erase(mInputBufferQueue.begin());
-
- int16_t *ptr = (int16_t *)buffer->data();
- int16_t *stop = (int16_t *)(buffer->data() + buffer->size());
- while (ptr < stop) {
- *ptr = htons(*ptr);
- ++ptr;
- }
-
- static const size_t kFrameSize = 2 * sizeof(int16_t); // stereo
- static const size_t kFramesPerAU = 80;
- static const size_t kNumAUsPerPESPacket = 6;
-
- if (mPartialAudioAU != NULL) {
- size_t bytesMissingForFullAU =
- kNumAUsPerPESPacket * kFramesPerAU * kFrameSize
- - mPartialAudioAU->size() + 4;
-
- size_t copy = buffer->size();
- if(copy > bytesMissingForFullAU) {
- copy = bytesMissingForFullAU;
- }
-
- memcpy(mPartialAudioAU->data() + mPartialAudioAU->size(),
- buffer->data(),
- copy);
-
- mPartialAudioAU->setRange(0, mPartialAudioAU->size() + copy);
-
- buffer->setRange(buffer->offset() + copy, buffer->size() - copy);
-
- int64_t timeUs;
- CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
-
- int64_t copyUs = (int64_t)((copy / kFrameSize) * 1E6 / 48000.0);
- timeUs += copyUs;
- buffer->meta()->setInt64("timeUs", timeUs);
-
- if (bytesMissingForFullAU == copy) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatAccessUnit);
- notify->setBuffer("accessUnit", mPartialAudioAU);
- notify->post();
-
- mPartialAudioAU.clear();
- }
- }
-
- while (buffer->size() > 0) {
- sp<ABuffer> partialAudioAU =
- new ABuffer(
- 4
- + kNumAUsPerPESPacket * kFrameSize * kFramesPerAU);
-
- uint8_t *ptr = partialAudioAU->data();
- ptr[0] = 0xa0; // 10100000b
- ptr[1] = kNumAUsPerPESPacket;
- ptr[2] = 0; // reserved, audio _emphasis_flag = 0
-
- static const unsigned kQuantizationWordLength = 0; // 16-bit
- static const unsigned kAudioSamplingFrequency = 2; // 48Khz
- static const unsigned kNumberOfAudioChannels = 1; // stereo
-
- ptr[3] = (kQuantizationWordLength << 6)
- | (kAudioSamplingFrequency << 3)
- | kNumberOfAudioChannels;
-
- size_t copy = buffer->size();
- if (copy > partialAudioAU->size() - 4) {
- copy = partialAudioAU->size() - 4;
- }
-
- memcpy(&ptr[4], buffer->data(), copy);
-
- partialAudioAU->setRange(0, 4 + copy);
- buffer->setRange(buffer->offset() + copy, buffer->size() - copy);
-
- int64_t timeUs;
- CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
-
- partialAudioAU->meta()->setInt64("timeUs", timeUs);
-
- int64_t copyUs = (int64_t)((copy / kFrameSize) * 1E6 / 48000.0);
- timeUs += copyUs;
- buffer->meta()->setInt64("timeUs", timeUs);
-
- if (copy == partialAudioAU->capacity() - 4) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatAccessUnit);
- notify->setBuffer("accessUnit", partialAudioAU);
- notify->post();
-
- partialAudioAU.clear();
- continue;
- }
-
- mPartialAudioAU = partialAudioAU;
- }
- }
-
- return OK;
-}
-
-status_t Converter::feedEncoderInputBuffers() {
- if (mIsPCMAudio) {
- return feedRawAudioInputBuffers();
- }
-
- while (!mInputBufferQueue.empty()
- && !mAvailEncoderInputIndices.empty()) {
- sp<ABuffer> buffer = *mInputBufferQueue.begin();
- mInputBufferQueue.erase(mInputBufferQueue.begin());
-
- size_t bufferIndex = *mAvailEncoderInputIndices.begin();
- mAvailEncoderInputIndices.erase(mAvailEncoderInputIndices.begin());
-
- int64_t timeUs = 0ll;
- uint32_t flags = 0;
-
- if (buffer != NULL) {
- CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
-
- memcpy(mEncoderInputBuffers.itemAt(bufferIndex)->data(),
- buffer->data(),
- buffer->size());
-
- MediaBuffer *mediaBuffer =
- (MediaBuffer *)(buffer->getMediaBufferBase());
- if (mediaBuffer != NULL) {
- mEncoderInputBuffers.itemAt(bufferIndex)->setMediaBufferBase(
- mediaBuffer);
-
- buffer->setMediaBufferBase(NULL);
- }
- } else {
- flags = MediaCodec::BUFFER_FLAG_EOS;
- }
-
- status_t err = mEncoder->queueInputBuffer(
- bufferIndex, 0, (buffer == NULL) ? 0 : buffer->size(),
- timeUs, flags);
-
- if (err != OK) {
- return err;
- }
- }
-
- return OK;
-}
-
-sp<ABuffer> Converter::prependCSD(const sp<ABuffer> &accessUnit) const {
- CHECK(mCSD0 != NULL);
-
- sp<ABuffer> dup = new ABuffer(accessUnit->size() + mCSD0->size());
- memcpy(dup->data(), mCSD0->data(), mCSD0->size());
- memcpy(dup->data() + mCSD0->size(), accessUnit->data(), accessUnit->size());
-
- int64_t timeUs;
- CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
-
- dup->meta()->setInt64("timeUs", timeUs);
-
- return dup;
-}
-
-status_t Converter::doMoreWork() {
- status_t err;
-
- if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
- for (;;) {
- size_t bufferIndex;
- err = mEncoder->dequeueInputBuffer(&bufferIndex);
-
- if (err != OK) {
- break;
- }
-
- mAvailEncoderInputIndices.push_back(bufferIndex);
- }
-
- feedEncoderInputBuffers();
- }
-
- for (;;) {
- size_t bufferIndex;
- size_t offset;
- size_t size;
- int64_t timeUs;
- uint32_t flags;
- native_handle_t* handle = NULL;
- err = mEncoder->dequeueOutputBuffer(
- &bufferIndex, &offset, &size, &timeUs, &flags);
-
- if (err != OK) {
- if (err == INFO_FORMAT_CHANGED) {
- continue;
- } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
- mEncoder->getOutputBuffers(&mEncoderOutputBuffers);
- continue;
- }
-
- if (err == -EAGAIN) {
- err = OK;
- }
- break;
- }
-
- if (flags & MediaCodec::BUFFER_FLAG_EOS) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatEOS);
- notify->post();
- } else {
-#if 0
- if (mIsVideo) {
- int32_t videoBitrate = GetInt32Property(
- "media.wfd.video-bitrate", 5000000);
-
- setVideoBitrate(videoBitrate);
- }
-#endif
-
- sp<ABuffer> buffer;
- sp<MediaCodecBuffer> outbuf = mEncoderOutputBuffers.itemAt(bufferIndex);
-
- if (outbuf->meta()->findPointer("handle", (void**)&handle) &&
- handle != NULL) {
- int32_t rangeLength, rangeOffset;
- CHECK(outbuf->meta()->findInt32("rangeOffset", &rangeOffset));
- CHECK(outbuf->meta()->findInt32("rangeLength", &rangeLength));
- outbuf->meta()->setPointer("handle", NULL);
-
- // MediaSender will post the following message when HDCP
- // is done, to release the output buffer back to encoder.
- sp<AMessage> notify(new AMessage(kWhatReleaseOutputBuffer, this));
- notify->setInt32("bufferIndex", bufferIndex);
-
- buffer = new ABuffer(
- rangeLength > (int32_t)size ? rangeLength : size);
- buffer->meta()->setPointer("handle", handle);
- buffer->meta()->setInt32("rangeOffset", rangeOffset);
- buffer->meta()->setInt32("rangeLength", rangeLength);
- buffer->meta()->setMessage("notify", notify);
- } else {
- buffer = new ABuffer(size);
- }
-
- buffer->meta()->setInt64("timeUs", timeUs);
-
- ALOGV("[%s] time %lld us (%.2f secs)",
- mIsVideo ? "video" : "audio", (long long)timeUs, timeUs / 1E6);
-
- memcpy(buffer->data(), outbuf->base() + offset, size);
-
- if (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) {
- if (!handle) {
- if (mIsH264) {
- mCSD0 = buffer;
- }
- mOutputFormat->setBuffer("csd-0", buffer);
- }
- } else {
- if (mNeedToManuallyPrependSPSPPS
- && mIsH264
- && (mFlags & FLAG_PREPEND_CSD_IF_NECESSARY)
- && IsIDR(buffer)) {
- buffer = prependCSD(buffer);
- }
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatAccessUnit);
- notify->setBuffer("accessUnit", buffer);
- notify->post();
- }
- }
-
- if (!handle) {
- mEncoder->releaseOutputBuffer(bufferIndex);
- }
-
- if (flags & MediaCodec::BUFFER_FLAG_EOS) {
- break;
- }
- }
-
- return err;
-}
-
-void Converter::requestIDRFrame() {
- (new AMessage(kWhatRequestIDRFrame, this))->post();
-}
-
-void Converter::dropAFrame() {
- // Unsupported in surface input mode.
- CHECK(!(mFlags & FLAG_USE_SURFACE_INPUT));
-
- (new AMessage(kWhatDropAFrame, this))->post();
-}
-
-void Converter::suspendEncoding(bool suspend) {
- sp<AMessage> msg = new AMessage(kWhatSuspendEncoding, this);
- msg->setInt32("suspend", suspend);
- msg->post();
-}
-
-int32_t Converter::getVideoBitrate() const {
- return mPrevVideoBitrate;
-}
-
-void Converter::setVideoBitrate(int32_t bitRate) {
- if (mIsVideo && mEncoder != NULL && bitRate != mPrevVideoBitrate) {
- sp<AMessage> params = new AMessage;
- params->setInt32("video-bitrate", bitRate);
-
- mEncoder->setParameters(params);
-
- mPrevVideoBitrate = bitRate;
- }
-}
-
-} // namespace android
diff --git a/media/libstagefright/wifi-display/source/Converter.h b/media/libstagefright/wifi-display/source/Converter.h
deleted file mode 100644
index ad95ab5..0000000
--- a/media/libstagefright/wifi-display/source/Converter.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Copyright 2012, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef CONVERTER_H_
-
-#define CONVERTER_H_
-
-#include <media/stagefright/foundation/AHandler.h>
-
-namespace android {
-
-struct ABuffer;
-class IGraphicBufferProducer;
-struct MediaCodec;
-class MediaCodecBuffer;
-
-#define ENABLE_SILENCE_DETECTION 0
-
-// Utility class that receives media access units and converts them into
-// media access unit of a different format.
-// Right now this'll convert raw video into H.264 and raw audio into AAC.
-struct Converter : public AHandler {
- enum {
- kWhatAccessUnit,
- kWhatEOS,
- kWhatError,
- kWhatShutdownCompleted,
- };
-
- enum FlagBits {
- FLAG_USE_SURFACE_INPUT = 1,
- FLAG_PREPEND_CSD_IF_NECESSARY = 2,
- };
- Converter(const sp<AMessage> ¬ify,
- const sp<ALooper> &codecLooper,
- const sp<AMessage> &outputFormat,
- uint32_t flags = 0);
-
- status_t init();
-
- sp<IGraphicBufferProducer> getGraphicBufferProducer();
-
- size_t getInputBufferCount() const;
-
- sp<AMessage> getOutputFormat() const;
- bool needToManuallyPrependSPSPPS() const;
-
- void feedAccessUnit(const sp<ABuffer> &accessUnit);
- void signalEOS();
-
- void requestIDRFrame();
-
- void dropAFrame();
- void suspendEncoding(bool suspend);
-
- void shutdownAsync();
-
- int32_t getVideoBitrate() const;
- void setVideoBitrate(int32_t bitrate);
-
- static int32_t GetInt32Property(const char *propName, int32_t defaultValue);
-
- enum {
- // MUST not conflict with private enums below.
- kWhatMediaPullerNotify = 'pulN',
- };
-
-protected:
- virtual ~Converter();
- virtual void onMessageReceived(const sp<AMessage> &msg);
-
-private:
- enum {
- kWhatDoMoreWork,
- kWhatRequestIDRFrame,
- kWhatSuspendEncoding,
- kWhatShutdown,
- kWhatEncoderActivity,
- kWhatDropAFrame,
- kWhatReleaseOutputBuffer,
- };
-
- sp<AMessage> mNotify;
- sp<ALooper> mCodecLooper;
- sp<AMessage> mOutputFormat;
- uint32_t mFlags;
- bool mIsVideo;
- bool mIsH264;
- bool mIsPCMAudio;
- bool mNeedToManuallyPrependSPSPPS;
-
- sp<MediaCodec> mEncoder;
- sp<AMessage> mEncoderActivityNotify;
-
- sp<IGraphicBufferProducer> mGraphicBufferProducer;
-
- Vector<sp<MediaCodecBuffer> > mEncoderInputBuffers;
- Vector<sp<MediaCodecBuffer> > mEncoderOutputBuffers;
-
- List<size_t> mAvailEncoderInputIndices;
-
- List<sp<ABuffer> > mInputBufferQueue;
-
- sp<ABuffer> mCSD0;
-
- bool mDoMoreWorkPending;
-
-#if ENABLE_SILENCE_DETECTION
- int64_t mFirstSilentFrameUs;
- bool mInSilentMode;
-#endif
-
- sp<ABuffer> mPartialAudioAU;
-
- int32_t mPrevVideoBitrate;
-
- int32_t mNumFramesToDrop;
- bool mEncodingSuspended;
-
- status_t initEncoder();
- void releaseEncoder();
-
- status_t feedEncoderInputBuffers();
-
- void scheduleDoMoreWork();
- status_t doMoreWork();
-
- void notifyError(status_t err);
-
- // Packetizes raw PCM audio data available in mInputBufferQueue
- // into a format suitable for transport stream inclusion and
- // notifies the observer.
- status_t feedRawAudioInputBuffers();
-
- static bool IsSilence(const sp<ABuffer> &accessUnit);
-
- sp<ABuffer> prependCSD(const sp<ABuffer> &accessUnit) const;
-
- DISALLOW_EVIL_CONSTRUCTORS(Converter);
-};
-
-} // namespace android
-
-#endif // CONVERTER_H_
diff --git a/media/libstagefright/wifi-display/source/MediaPuller.cpp b/media/libstagefright/wifi-display/source/MediaPuller.cpp
deleted file mode 100644
index ce07a4e..0000000
--- a/media/libstagefright/wifi-display/source/MediaPuller.cpp
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Copyright 2012, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MediaPuller"
-#include <utils/Log.h>
-
-#include "MediaPuller.h"
-
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-
-namespace android {
-
-MediaPuller::MediaPuller(
- const sp<MediaSource> &source, const sp<AMessage> ¬ify)
- : mSource(source),
- mNotify(notify),
- mPullGeneration(0),
- mIsAudio(false),
- mPaused(false) {
- sp<MetaData> meta = source->getFormat();
- const char *mime;
- CHECK(meta->findCString(kKeyMIMEType, &mime));
-
- mIsAudio = !strncasecmp(mime, "audio/", 6);
-}
-
-MediaPuller::~MediaPuller() {
-}
-
-status_t MediaPuller::postSynchronouslyAndReturnError(
- const sp<AMessage> &msg) {
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
-
- if (err != OK) {
- return err;
- }
-
- if (!response->findInt32("err", &err)) {
- err = OK;
- }
-
- return err;
-}
-
-status_t MediaPuller::start() {
- return postSynchronouslyAndReturnError(new AMessage(kWhatStart, this));
-}
-
-void MediaPuller::stopAsync(const sp<AMessage> ¬ify) {
- sp<AMessage> msg = new AMessage(kWhatStop, this);
- msg->setMessage("notify", notify);
- msg->post();
-}
-
-void MediaPuller::pause() {
- (new AMessage(kWhatPause, this))->post();
-}
-
-void MediaPuller::resume() {
- (new AMessage(kWhatResume, this))->post();
-}
-
-void MediaPuller::onMessageReceived(const sp<AMessage> &msg) {
- switch (msg->what()) {
- case kWhatStart:
- {
- status_t err;
- if (mIsAudio) {
- // This atrocity causes AudioSource to deliver absolute
- // systemTime() based timestamps (off by 1 us).
- sp<MetaData> params = new MetaData;
- params->setInt64(kKeyTime, 1ll);
- err = mSource->start(params.get());
- } else {
- err = mSource->start();
- if (err != OK) {
- ALOGE("source failed to start w/ err %d", err);
- }
- }
-
- if (err == OK) {
- schedulePull();
- }
-
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
-
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
- break;
- }
-
- case kWhatStop:
- {
- sp<MetaData> meta = mSource->getFormat();
- const char *tmp;
- CHECK(meta->findCString(kKeyMIMEType, &tmp));
- AString mime = tmp;
-
- ALOGI("MediaPuller(%s) stopping.", mime.c_str());
- mSource->stop();
- ALOGI("MediaPuller(%s) stopped.", mime.c_str());
- ++mPullGeneration;
-
- sp<AMessage> notify;
- CHECK(msg->findMessage("notify", ¬ify));
- notify->post();
- break;
- }
-
- case kWhatPull:
- {
- int32_t generation;
- CHECK(msg->findInt32("generation", &generation));
-
- if (generation != mPullGeneration) {
- break;
- }
-
- MediaBuffer *mbuf;
- status_t err = mSource->read(&mbuf);
-
- if (mPaused) {
- if (err == OK) {
- mbuf->release();
- mbuf = NULL;
- }
-
- schedulePull();
- break;
- }
-
- if (err != OK) {
- if (err == ERROR_END_OF_STREAM) {
- ALOGI("stream ended.");
- } else {
- ALOGE("error %d reading stream.", err);
- }
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatEOS);
- notify->post();
- } else {
- int64_t timeUs;
- CHECK(mbuf->meta_data()->findInt64(kKeyTime, &timeUs));
-
- sp<ABuffer> accessUnit = new ABuffer(mbuf->range_length());
-
- memcpy(accessUnit->data(),
- (const uint8_t *)mbuf->data() + mbuf->range_offset(),
- mbuf->range_length());
-
- accessUnit->meta()->setInt64("timeUs", timeUs);
-
- if (mIsAudio) {
- mbuf->release();
- mbuf = NULL;
- } else {
- // video encoder will release MediaBuffer when done
- // with underlying data.
- accessUnit->setMediaBufferBase(mbuf);
- }
-
- sp<AMessage> notify = mNotify->dup();
-
- notify->setInt32("what", kWhatAccessUnit);
- notify->setBuffer("accessUnit", accessUnit);
- notify->post();
-
- if (mbuf != NULL) {
- ALOGV("posted mbuf %p", mbuf);
- }
-
- schedulePull();
- }
- break;
- }
-
- case kWhatPause:
- {
- mPaused = true;
- break;
- }
-
- case kWhatResume:
- {
- mPaused = false;
- break;
- }
-
- default:
- TRESPASS();
- }
-}
-
-void MediaPuller::schedulePull() {
- sp<AMessage> msg = new AMessage(kWhatPull, this);
- msg->setInt32("generation", mPullGeneration);
- msg->post();
-}
-
-} // namespace android
-
diff --git a/media/libstagefright/wifi-display/source/MediaPuller.h b/media/libstagefright/wifi-display/source/MediaPuller.h
deleted file mode 100644
index 1291bb3..0000000
--- a/media/libstagefright/wifi-display/source/MediaPuller.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright 2012, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_PULLER_H_
-
-#define MEDIA_PULLER_H_
-
-#include <media/stagefright/foundation/AHandler.h>
-
-namespace android {
-
-struct MediaSource;
-
-struct MediaPuller : public AHandler {
- enum {
- kWhatEOS,
- kWhatAccessUnit
- };
-
- MediaPuller(const sp<MediaSource> &source, const sp<AMessage> ¬ify);
-
- status_t start();
- void stopAsync(const sp<AMessage> ¬ify);
-
- void pause();
- void resume();
-
-protected:
- virtual void onMessageReceived(const sp<AMessage> &msg);
- virtual ~MediaPuller();
-
-private:
- enum {
- kWhatStart,
- kWhatStop,
- kWhatPull,
- kWhatPause,
- kWhatResume,
- };
-
- sp<MediaSource> mSource;
- sp<AMessage> mNotify;
- int32_t mPullGeneration;
- bool mIsAudio;
- bool mPaused;
-
- status_t postSynchronouslyAndReturnError(const sp<AMessage> &msg);
- void schedulePull();
-
- DISALLOW_EVIL_CONSTRUCTORS(MediaPuller);
-};
-
-} // namespace android
-
-#endif // MEDIA_PULLER_H_
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
deleted file mode 100644
index f1ecca0..0000000
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ /dev/null
@@ -1,1112 +0,0 @@
-/*
- * Copyright 2012, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "PlaybackSession"
-#include <utils/Log.h>
-
-#include "PlaybackSession.h"
-
-#include "Converter.h"
-#include "MediaPuller.h"
-#include "RepeaterSource.h"
-#include "include/avc_utils.h"
-#include "WifiDisplaySource.h"
-
-#include <binder/IServiceManager.h>
-#include <cutils/properties.h>
-#include <media/IHDCP.h>
-#include <media/IMediaHTTPService.h>
-#include <media/stagefright/foundation/ABitReader.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/AudioSource.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/NuMediaExtractor.h>
-#include <media/stagefright/SurfaceMediaSource.h>
-#include <media/stagefright/Utils.h>
-
-#include <OMX_IVCommon.h>
-
-namespace android {
-
-struct WifiDisplaySource::PlaybackSession::Track : public AHandler {
- enum {
- kWhatStopped,
- };
-
- Track(const sp<AMessage> ¬ify,
- const sp<ALooper> &pullLooper,
- const sp<ALooper> &codecLooper,
- const sp<MediaPuller> &mediaPuller,
- const sp<Converter> &converter);
-
- Track(const sp<AMessage> ¬ify, const sp<AMessage> &format);
-
- void setRepeaterSource(const sp<RepeaterSource> &source);
-
- sp<AMessage> getFormat();
- bool isAudio() const;
-
- const sp<Converter> &converter() const;
- const sp<RepeaterSource> &repeaterSource() const;
-
- ssize_t mediaSenderTrackIndex() const;
- void setMediaSenderTrackIndex(size_t index);
-
- status_t start();
- void stopAsync();
-
- void pause();
- void resume();
-
- void queueAccessUnit(const sp<ABuffer> &accessUnit);
- sp<ABuffer> dequeueAccessUnit();
-
- bool hasOutputBuffer(int64_t *timeUs) const;
- void queueOutputBuffer(const sp<ABuffer> &accessUnit);
- sp<ABuffer> dequeueOutputBuffer();
-
-#if SUSPEND_VIDEO_IF_IDLE
- bool isSuspended() const;
-#endif
-
- size_t countQueuedOutputBuffers() const {
- return mQueuedOutputBuffers.size();
- }
-
- void requestIDRFrame();
-
-protected:
- virtual void onMessageReceived(const sp<AMessage> &msg);
- virtual ~Track();
-
-private:
- enum {
- kWhatMediaPullerStopped,
- };
-
- sp<AMessage> mNotify;
- sp<ALooper> mPullLooper;
- sp<ALooper> mCodecLooper;
- sp<MediaPuller> mMediaPuller;
- sp<Converter> mConverter;
- sp<AMessage> mFormat;
- bool mStarted;
- ssize_t mMediaSenderTrackIndex;
- bool mIsAudio;
- List<sp<ABuffer> > mQueuedAccessUnits;
- sp<RepeaterSource> mRepeaterSource;
- List<sp<ABuffer> > mQueuedOutputBuffers;
- int64_t mLastOutputBufferQueuedTimeUs;
-
- static bool IsAudioFormat(const sp<AMessage> &format);
-
- DISALLOW_EVIL_CONSTRUCTORS(Track);
-};
-
-WifiDisplaySource::PlaybackSession::Track::Track(
- const sp<AMessage> ¬ify,
- const sp<ALooper> &pullLooper,
- const sp<ALooper> &codecLooper,
- const sp<MediaPuller> &mediaPuller,
- const sp<Converter> &converter)
- : mNotify(notify),
- mPullLooper(pullLooper),
- mCodecLooper(codecLooper),
- mMediaPuller(mediaPuller),
- mConverter(converter),
- mStarted(false),
- mIsAudio(IsAudioFormat(mConverter->getOutputFormat())),
- mLastOutputBufferQueuedTimeUs(-1ll) {
-}
-
-WifiDisplaySource::PlaybackSession::Track::Track(
- const sp<AMessage> ¬ify, const sp<AMessage> &format)
- : mNotify(notify),
- mFormat(format),
- mStarted(false),
- mIsAudio(IsAudioFormat(format)),
- mLastOutputBufferQueuedTimeUs(-1ll) {
-}
-
-WifiDisplaySource::PlaybackSession::Track::~Track() {
- CHECK(!mStarted);
-}
-
-// static
-bool WifiDisplaySource::PlaybackSession::Track::IsAudioFormat(
- const sp<AMessage> &format) {
- AString mime;
- CHECK(format->findString("mime", &mime));
-
- return !strncasecmp(mime.c_str(), "audio/", 6);
-}
-
-sp<AMessage> WifiDisplaySource::PlaybackSession::Track::getFormat() {
- return mFormat != NULL ? mFormat : mConverter->getOutputFormat();
-}
-
-bool WifiDisplaySource::PlaybackSession::Track::isAudio() const {
- return mIsAudio;
-}
-
-const sp<Converter> &WifiDisplaySource::PlaybackSession::Track::converter() const {
- return mConverter;
-}
-
-const sp<RepeaterSource> &
-WifiDisplaySource::PlaybackSession::Track::repeaterSource() const {
- return mRepeaterSource;
-}
-
-ssize_t WifiDisplaySource::PlaybackSession::Track::mediaSenderTrackIndex() const {
- CHECK_GE(mMediaSenderTrackIndex, 0);
- return mMediaSenderTrackIndex;
-}
-
-void WifiDisplaySource::PlaybackSession::Track::setMediaSenderTrackIndex(
- size_t index) {
- mMediaSenderTrackIndex = index;
-}
-
-status_t WifiDisplaySource::PlaybackSession::Track::start() {
- ALOGV("Track::start isAudio=%d", mIsAudio);
-
- CHECK(!mStarted);
-
- status_t err = OK;
-
- if (mMediaPuller != NULL) {
- err = mMediaPuller->start();
- }
-
- if (err == OK) {
- mStarted = true;
- }
-
- return err;
-}
-
-void WifiDisplaySource::PlaybackSession::Track::stopAsync() {
- ALOGV("Track::stopAsync isAudio=%d", mIsAudio);
-
- if (mConverter != NULL) {
- mConverter->shutdownAsync();
- }
-
- sp<AMessage> msg = new AMessage(kWhatMediaPullerStopped, this);
-
- if (mStarted && mMediaPuller != NULL) {
- if (mRepeaterSource != NULL) {
- // Let's unblock MediaPuller's MediaSource::read().
- mRepeaterSource->wakeUp();
- }
-
- mMediaPuller->stopAsync(msg);
- } else {
- mStarted = false;
- msg->post();
- }
-}
-
-void WifiDisplaySource::PlaybackSession::Track::pause() {
- mMediaPuller->pause();
-}
-
-void WifiDisplaySource::PlaybackSession::Track::resume() {
- mMediaPuller->resume();
-}
-
-void WifiDisplaySource::PlaybackSession::Track::onMessageReceived(
- const sp<AMessage> &msg) {
- switch (msg->what()) {
- case kWhatMediaPullerStopped:
- {
- mConverter.clear();
-
- mStarted = false;
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatStopped);
- notify->post();
-
- ALOGI("kWhatStopped %s posted", mIsAudio ? "audio" : "video");
- break;
- }
-
- default:
- TRESPASS();
- }
-}
-
-void WifiDisplaySource::PlaybackSession::Track::queueAccessUnit(
- const sp<ABuffer> &accessUnit) {
- mQueuedAccessUnits.push_back(accessUnit);
-}
-
-sp<ABuffer> WifiDisplaySource::PlaybackSession::Track::dequeueAccessUnit() {
- if (mQueuedAccessUnits.empty()) {
- return NULL;
- }
-
- sp<ABuffer> accessUnit = *mQueuedAccessUnits.begin();
- CHECK(accessUnit != NULL);
-
- mQueuedAccessUnits.erase(mQueuedAccessUnits.begin());
-
- return accessUnit;
-}
-
-void WifiDisplaySource::PlaybackSession::Track::setRepeaterSource(
- const sp<RepeaterSource> &source) {
- mRepeaterSource = source;
-}
-
-void WifiDisplaySource::PlaybackSession::Track::requestIDRFrame() {
- if (mIsAudio) {
- return;
- }
-
- if (mRepeaterSource != NULL) {
- mRepeaterSource->wakeUp();
- }
-
- mConverter->requestIDRFrame();
-}
-
-bool WifiDisplaySource::PlaybackSession::Track::hasOutputBuffer(
- int64_t *timeUs) const {
- *timeUs = 0ll;
-
- if (mQueuedOutputBuffers.empty()) {
- return false;
- }
-
- const sp<ABuffer> &outputBuffer = *mQueuedOutputBuffers.begin();
-
- CHECK(outputBuffer->meta()->findInt64("timeUs", timeUs));
-
- return true;
-}
-
-void WifiDisplaySource::PlaybackSession::Track::queueOutputBuffer(
- const sp<ABuffer> &accessUnit) {
- mQueuedOutputBuffers.push_back(accessUnit);
- mLastOutputBufferQueuedTimeUs = ALooper::GetNowUs();
-}
-
-sp<ABuffer> WifiDisplaySource::PlaybackSession::Track::dequeueOutputBuffer() {
- CHECK(!mQueuedOutputBuffers.empty());
-
- sp<ABuffer> outputBuffer = *mQueuedOutputBuffers.begin();
- mQueuedOutputBuffers.erase(mQueuedOutputBuffers.begin());
-
- return outputBuffer;
-}
-
-#if SUSPEND_VIDEO_IF_IDLE
-bool WifiDisplaySource::PlaybackSession::Track::isSuspended() const {
- if (!mQueuedOutputBuffers.empty()) {
- return false;
- }
-
- if (mLastOutputBufferQueuedTimeUs < 0ll) {
- // We've never seen an output buffer queued, but tracks start
- // out live, not suspended.
- return false;
- }
-
- // If we've not seen new output data for 60ms or more, we consider
- // this track suspended for the time being.
- return (ALooper::GetNowUs() - mLastOutputBufferQueuedTimeUs) > 60000ll;
-}
-#endif
-
-////////////////////////////////////////////////////////////////////////////////
-
-WifiDisplaySource::PlaybackSession::PlaybackSession(
- const String16 &opPackageName,
- const sp<ANetworkSession> &netSession,
- const sp<AMessage> ¬ify,
- const in_addr &interfaceAddr,
- const sp<IHDCP> &hdcp,
- const char *path)
- : mOpPackageName(opPackageName),
- mNetSession(netSession),
- mNotify(notify),
- mInterfaceAddr(interfaceAddr),
- mHDCP(hdcp),
- mLocalRTPPort(-1),
- mWeAreDead(false),
- mPaused(false),
- mLastLifesignUs(),
- mVideoTrackIndex(-1),
- mPrevTimeUs(-1ll),
- mPullExtractorPending(false),
- mPullExtractorGeneration(0),
- mFirstSampleTimeRealUs(-1ll),
- mFirstSampleTimeUs(-1ll) {
- if (path != NULL) {
- mMediaPath.setTo(path);
- }
-}
-
-status_t WifiDisplaySource::PlaybackSession::init(
- const char *clientIP,
- int32_t clientRtp,
- RTPSender::TransportMode rtpMode,
- int32_t clientRtcp,
- RTPSender::TransportMode rtcpMode,
- bool enableAudio,
- bool usePCMAudio,
- bool enableVideo,
- VideoFormats::ResolutionType videoResolutionType,
- size_t videoResolutionIndex,
- VideoFormats::ProfileType videoProfileType,
- VideoFormats::LevelType videoLevelType) {
- sp<AMessage> notify = new AMessage(kWhatMediaSenderNotify, this);
- mMediaSender = new MediaSender(mNetSession, notify);
- looper()->registerHandler(mMediaSender);
-
- mMediaSender->setHDCP(mHDCP);
-
- status_t err = setupPacketizer(
- enableAudio,
- usePCMAudio,
- enableVideo,
- videoResolutionType,
- videoResolutionIndex,
- videoProfileType,
- videoLevelType);
-
- if (err == OK) {
- err = mMediaSender->initAsync(
- -1 /* trackIndex */,
- clientIP,
- clientRtp,
- rtpMode,
- clientRtcp,
- rtcpMode,
- &mLocalRTPPort);
- }
-
- if (err != OK) {
- mLocalRTPPort = -1;
-
- looper()->unregisterHandler(mMediaSender->id());
- mMediaSender.clear();
-
- return err;
- }
-
- updateLiveness();
-
- return OK;
-}
-
-WifiDisplaySource::PlaybackSession::~PlaybackSession() {
-}
-
-int32_t WifiDisplaySource::PlaybackSession::getRTPPort() const {
- return mLocalRTPPort;
-}
-
-int64_t WifiDisplaySource::PlaybackSession::getLastLifesignUs() const {
- return mLastLifesignUs;
-}
-
-void WifiDisplaySource::PlaybackSession::updateLiveness() {
- mLastLifesignUs = ALooper::GetNowUs();
-}
-
-status_t WifiDisplaySource::PlaybackSession::play() {
- updateLiveness();
-
- (new AMessage(kWhatResume, this))->post();
-
- return OK;
-}
-
-status_t WifiDisplaySource::PlaybackSession::onMediaSenderInitialized() {
- for (size_t i = 0; i < mTracks.size(); ++i) {
- CHECK_EQ((status_t)OK, mTracks.editValueAt(i)->start());
- }
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatSessionEstablished);
- notify->post();
-
- return OK;
-}
-
-status_t WifiDisplaySource::PlaybackSession::pause() {
- updateLiveness();
-
- (new AMessage(kWhatPause, this))->post();
-
- return OK;
-}
-
-void WifiDisplaySource::PlaybackSession::destroyAsync() {
- ALOGI("destroyAsync");
-
- for (size_t i = 0; i < mTracks.size(); ++i) {
- mTracks.valueAt(i)->stopAsync();
- }
-}
-
-void WifiDisplaySource::PlaybackSession::onMessageReceived(
- const sp<AMessage> &msg) {
- switch (msg->what()) {
- case kWhatConverterNotify:
- {
- if (mWeAreDead) {
- ALOGV("dropping msg '%s' because we're dead",
- msg->debugString().c_str());
-
- break;
- }
-
- int32_t what;
- CHECK(msg->findInt32("what", &what));
-
- size_t trackIndex;
- CHECK(msg->findSize("trackIndex", &trackIndex));
-
- if (what == Converter::kWhatAccessUnit) {
- sp<ABuffer> accessUnit;
- CHECK(msg->findBuffer("accessUnit", &accessUnit));
-
- const sp<Track> &track = mTracks.valueFor(trackIndex);
-
- status_t err = mMediaSender->queueAccessUnit(
- track->mediaSenderTrackIndex(),
- accessUnit);
-
- if (err != OK) {
- notifySessionDead();
- }
- break;
- } else if (what == Converter::kWhatEOS) {
- CHECK_EQ(what, Converter::kWhatEOS);
-
- ALOGI("output EOS on track %zu", trackIndex);
-
- ssize_t index = mTracks.indexOfKey(trackIndex);
- CHECK_GE(index, 0);
-
- const sp<Converter> &converter =
- mTracks.valueAt(index)->converter();
- looper()->unregisterHandler(converter->id());
-
- mTracks.removeItemsAt(index);
-
- if (mTracks.isEmpty()) {
- ALOGI("Reached EOS");
- }
- } else if (what != Converter::kWhatShutdownCompleted) {
- CHECK_EQ(what, Converter::kWhatError);
-
- status_t err;
- CHECK(msg->findInt32("err", &err));
-
- ALOGE("converter signaled error %d", err);
-
- notifySessionDead();
- }
- break;
- }
-
- case kWhatMediaSenderNotify:
- {
- int32_t what;
- CHECK(msg->findInt32("what", &what));
-
- if (what == MediaSender::kWhatInitDone) {
- status_t err;
- CHECK(msg->findInt32("err", &err));
-
- if (err == OK) {
- onMediaSenderInitialized();
- } else {
- notifySessionDead();
- }
- } else if (what == MediaSender::kWhatError) {
- notifySessionDead();
- } else if (what == MediaSender::kWhatNetworkStall) {
- size_t numBytesQueued;
- CHECK(msg->findSize("numBytesQueued", &numBytesQueued));
-
- if (mVideoTrackIndex >= 0) {
- const sp<Track> &videoTrack =
- mTracks.valueFor(mVideoTrackIndex);
-
- sp<Converter> converter = videoTrack->converter();
- if (converter != NULL) {
- converter->dropAFrame();
- }
- }
- } else if (what == MediaSender::kWhatInformSender) {
- onSinkFeedback(msg);
- } else {
- TRESPASS();
- }
- break;
- }
-
- case kWhatTrackNotify:
- {
- int32_t what;
- CHECK(msg->findInt32("what", &what));
-
- size_t trackIndex;
- CHECK(msg->findSize("trackIndex", &trackIndex));
-
- if (what == Track::kWhatStopped) {
- ALOGI("Track %zu stopped", trackIndex);
-
- sp<Track> track = mTracks.valueFor(trackIndex);
- looper()->unregisterHandler(track->id());
- mTracks.removeItem(trackIndex);
- track.clear();
-
- if (!mTracks.isEmpty()) {
- ALOGI("not all tracks are stopped yet");
- break;
- }
-
- looper()->unregisterHandler(mMediaSender->id());
- mMediaSender.clear();
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatSessionDestroyed);
- notify->post();
- }
- break;
- }
-
- case kWhatPause:
- {
- if (mExtractor != NULL) {
- ++mPullExtractorGeneration;
- mFirstSampleTimeRealUs = -1ll;
- mFirstSampleTimeUs = -1ll;
- }
-
- if (mPaused) {
- break;
- }
-
- for (size_t i = 0; i < mTracks.size(); ++i) {
- mTracks.editValueAt(i)->pause();
- }
-
- mPaused = true;
- break;
- }
-
- case kWhatResume:
- {
- if (mExtractor != NULL) {
- schedulePullExtractor();
- }
-
- if (!mPaused) {
- break;
- }
-
- for (size_t i = 0; i < mTracks.size(); ++i) {
- mTracks.editValueAt(i)->resume();
- }
-
- mPaused = false;
- break;
- }
-
- case kWhatPullExtractorSample:
- {
- int32_t generation;
- CHECK(msg->findInt32("generation", &generation));
-
- if (generation != mPullExtractorGeneration) {
- break;
- }
-
- mPullExtractorPending = false;
-
- onPullExtractor();
- break;
- }
-
- default:
- TRESPASS();
- }
-}
-
-void WifiDisplaySource::PlaybackSession::onSinkFeedback(const sp<AMessage> &msg) {
- int64_t avgLatencyUs;
- CHECK(msg->findInt64("avgLatencyUs", &avgLatencyUs));
-
- int64_t maxLatencyUs;
- CHECK(msg->findInt64("maxLatencyUs", &maxLatencyUs));
-
- ALOGI("sink reports avg. latency of %lld ms (max %lld ms)",
- avgLatencyUs / 1000ll,
- maxLatencyUs / 1000ll);
-
- if (mVideoTrackIndex >= 0) {
- const sp<Track> &videoTrack = mTracks.valueFor(mVideoTrackIndex);
- sp<Converter> converter = videoTrack->converter();
-
- if (converter != NULL) {
- int32_t videoBitrate =
- Converter::GetInt32Property("media.wfd.video-bitrate", -1);
-
- char val[PROPERTY_VALUE_MAX];
- if (videoBitrate < 0
- && property_get("media.wfd.video-bitrate", val, NULL)
- && !strcasecmp("adaptive", val)) {
- videoBitrate = converter->getVideoBitrate();
-
- if (avgLatencyUs > 300000ll) {
- videoBitrate *= 0.6;
- } else if (avgLatencyUs < 100000ll) {
- videoBitrate *= 1.1;
- }
- }
-
- if (videoBitrate > 0) {
- if (videoBitrate < 500000) {
- videoBitrate = 500000;
- } else if (videoBitrate > 10000000) {
- videoBitrate = 10000000;
- }
-
- if (videoBitrate != converter->getVideoBitrate()) {
- ALOGI("setting video bitrate to %d bps", videoBitrate);
-
- converter->setVideoBitrate(videoBitrate);
- }
- }
- }
-
- sp<RepeaterSource> repeaterSource = videoTrack->repeaterSource();
- if (repeaterSource != NULL) {
- double rateHz =
- Converter::GetInt32Property(
- "media.wfd.video-framerate", -1);
-
- char val[PROPERTY_VALUE_MAX];
- if (rateHz < 0.0
- && property_get("media.wfd.video-framerate", val, NULL)
- && !strcasecmp("adaptive", val)) {
- rateHz = repeaterSource->getFrameRate();
-
- if (avgLatencyUs > 300000ll) {
- rateHz *= 0.9;
- } else if (avgLatencyUs < 200000ll) {
- rateHz *= 1.1;
- }
- }
-
- if (rateHz > 0) {
- if (rateHz < 5.0) {
- rateHz = 5.0;
- } else if (rateHz > 30.0) {
- rateHz = 30.0;
- }
-
- if (rateHz != repeaterSource->getFrameRate()) {
- ALOGI("setting frame rate to %.2f Hz", rateHz);
-
- repeaterSource->setFrameRate(rateHz);
- }
- }
- }
- }
-}
-
-status_t WifiDisplaySource::PlaybackSession::setupMediaPacketizer(
- bool enableAudio, bool enableVideo) {
- mExtractor = new NuMediaExtractor;
-
- status_t err = mExtractor->setDataSource(
- NULL /* httpService */, mMediaPath.c_str());
-
- if (err != OK) {
- return err;
- }
-
- size_t n = mExtractor->countTracks();
- bool haveAudio = false;
- bool haveVideo = false;
- for (size_t i = 0; i < n; ++i) {
- sp<AMessage> format;
- err = mExtractor->getTrackFormat(i, &format);
-
- if (err != OK) {
- continue;
- }
-
- AString mime;
- CHECK(format->findString("mime", &mime));
-
- bool isAudio = !strncasecmp(mime.c_str(), "audio/", 6);
- bool isVideo = !strncasecmp(mime.c_str(), "video/", 6);
-
- if (isAudio && enableAudio && !haveAudio) {
- haveAudio = true;
- } else if (isVideo && enableVideo && !haveVideo) {
- haveVideo = true;
- } else {
- continue;
- }
-
- err = mExtractor->selectTrack(i);
-
- size_t trackIndex = mTracks.size();
-
- sp<AMessage> notify = new AMessage(kWhatTrackNotify, this);
- notify->setSize("trackIndex", trackIndex);
-
- sp<Track> track = new Track(notify, format);
- looper()->registerHandler(track);
-
- mTracks.add(trackIndex, track);
-
- mExtractorTrackToInternalTrack.add(i, trackIndex);
-
- if (isVideo) {
- mVideoTrackIndex = trackIndex;
- }
-
- uint32_t flags = MediaSender::FLAG_MANUALLY_PREPEND_SPS_PPS;
-
- ssize_t mediaSenderTrackIndex =
- mMediaSender->addTrack(format, flags);
- CHECK_GE(mediaSenderTrackIndex, 0);
-
- track->setMediaSenderTrackIndex(mediaSenderTrackIndex);
-
- if ((haveAudio || !enableAudio) && (haveVideo || !enableVideo)) {
- break;
- }
- }
-
- return OK;
-}
-
-void WifiDisplaySource::PlaybackSession::schedulePullExtractor() {
- if (mPullExtractorPending) {
- return;
- }
-
- int64_t delayUs = 1000000; // default delay is 1 sec
- int64_t sampleTimeUs;
- status_t err = mExtractor->getSampleTime(&sampleTimeUs);
-
- if (err == OK) {
- int64_t nowUs = ALooper::GetNowUs();
-
- if (mFirstSampleTimeRealUs < 0ll) {
- mFirstSampleTimeRealUs = nowUs;
- mFirstSampleTimeUs = sampleTimeUs;
- }
-
- int64_t whenUs = sampleTimeUs - mFirstSampleTimeUs + mFirstSampleTimeRealUs;
- delayUs = whenUs - nowUs;
- } else {
- ALOGW("could not get sample time (%d)", err);
- }
-
- sp<AMessage> msg = new AMessage(kWhatPullExtractorSample, this);
- msg->setInt32("generation", mPullExtractorGeneration);
- msg->post(delayUs);
-
- mPullExtractorPending = true;
-}
-
-void WifiDisplaySource::PlaybackSession::onPullExtractor() {
- sp<ABuffer> accessUnit = new ABuffer(1024 * 1024);
- status_t err = mExtractor->readSampleData(accessUnit);
- if (err != OK) {
- // EOS.
- return;
- }
-
- int64_t timeUs;
- CHECK_EQ((status_t)OK, mExtractor->getSampleTime(&timeUs));
-
- accessUnit->meta()->setInt64(
- "timeUs", mFirstSampleTimeRealUs + timeUs - mFirstSampleTimeUs);
-
- size_t trackIndex;
- CHECK_EQ((status_t)OK, mExtractor->getSampleTrackIndex(&trackIndex));
-
- sp<AMessage> msg = new AMessage(kWhatConverterNotify, this);
-
- msg->setSize(
- "trackIndex", mExtractorTrackToInternalTrack.valueFor(trackIndex));
-
- msg->setInt32("what", Converter::kWhatAccessUnit);
- msg->setBuffer("accessUnit", accessUnit);
- msg->post();
-
- mExtractor->advance();
-
- schedulePullExtractor();
-}
-
-status_t WifiDisplaySource::PlaybackSession::setupPacketizer(
- bool enableAudio,
- bool usePCMAudio,
- bool enableVideo,
- VideoFormats::ResolutionType videoResolutionType,
- size_t videoResolutionIndex,
- VideoFormats::ProfileType videoProfileType,
- VideoFormats::LevelType videoLevelType) {
- CHECK(enableAudio || enableVideo);
-
- if (!mMediaPath.empty()) {
- return setupMediaPacketizer(enableAudio, enableVideo);
- }
-
- if (enableVideo) {
- status_t err = addVideoSource(
- videoResolutionType, videoResolutionIndex, videoProfileType,
- videoLevelType);
-
- if (err != OK) {
- return err;
- }
- }
-
- if (!enableAudio) {
- return OK;
- }
-
- return addAudioSource(usePCMAudio);
-}
-
-status_t WifiDisplaySource::PlaybackSession::addSource(
- bool isVideo, const sp<MediaSource> &source, bool isRepeaterSource,
- bool usePCMAudio, unsigned profileIdc, unsigned levelIdc,
- unsigned constraintSet, size_t *numInputBuffers) {
- CHECK(!usePCMAudio || !isVideo);
- CHECK(!isRepeaterSource || isVideo);
- CHECK(!profileIdc || isVideo);
- CHECK(!levelIdc || isVideo);
- CHECK(!constraintSet || isVideo);
-
- sp<ALooper> pullLooper = new ALooper;
- pullLooper->setName("pull_looper");
-
- pullLooper->start(
- false /* runOnCallingThread */,
- false /* canCallJava */,
- PRIORITY_AUDIO);
-
- sp<ALooper> codecLooper = new ALooper;
- codecLooper->setName("codec_looper");
-
- codecLooper->start(
- false /* runOnCallingThread */,
- false /* canCallJava */,
- PRIORITY_AUDIO);
-
- size_t trackIndex;
-
- sp<AMessage> notify;
-
- trackIndex = mTracks.size();
-
- sp<AMessage> format;
- status_t err = convertMetaDataToMessage(source->getFormat(), &format);
- CHECK_EQ(err, (status_t)OK);
-
- if (isVideo) {
- format->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
- format->setInt32(
- "android._input-metadata-buffer-type", kMetadataBufferTypeANWBuffer);
- format->setInt32("android._store-metadata-in-buffers-output", (mHDCP != NULL)
- && (mHDCP->getCaps() & HDCPModule::HDCP_CAPS_ENCRYPT_NATIVE));
- format->setInt32(
- "color-format", OMX_COLOR_FormatAndroidOpaque);
- format->setInt32("profile-idc", profileIdc);
- format->setInt32("level-idc", levelIdc);
- format->setInt32("constraint-set", constraintSet);
- } else {
- if (usePCMAudio) {
- format->setInt32("pcm-encoding", kAudioEncodingPcm16bit);
- format->setString("mime", MEDIA_MIMETYPE_AUDIO_RAW);
- } else {
- format->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
- }
- }
-
- notify = new AMessage(kWhatConverterNotify, this);
- notify->setSize("trackIndex", trackIndex);
-
- sp<Converter> converter = new Converter(notify, codecLooper, format);
-
- looper()->registerHandler(converter);
-
- err = converter->init();
- if (err != OK) {
- ALOGE("%s converter returned err %d", isVideo ? "video" : "audio", err);
-
- looper()->unregisterHandler(converter->id());
- return err;
- }
-
- notify = new AMessage(Converter::kWhatMediaPullerNotify, converter);
- notify->setSize("trackIndex", trackIndex);
-
- sp<MediaPuller> puller = new MediaPuller(source, notify);
- pullLooper->registerHandler(puller);
-
- if (numInputBuffers != NULL) {
- *numInputBuffers = converter->getInputBufferCount();
- }
-
- notify = new AMessage(kWhatTrackNotify, this);
- notify->setSize("trackIndex", trackIndex);
-
- sp<Track> track = new Track(
- notify, pullLooper, codecLooper, puller, converter);
-
- if (isRepeaterSource) {
- track->setRepeaterSource(static_cast<RepeaterSource *>(source.get()));
- }
-
- looper()->registerHandler(track);
-
- mTracks.add(trackIndex, track);
-
- if (isVideo) {
- mVideoTrackIndex = trackIndex;
- }
-
- uint32_t flags = 0;
- if (converter->needToManuallyPrependSPSPPS()) {
- flags |= MediaSender::FLAG_MANUALLY_PREPEND_SPS_PPS;
- }
-
- ssize_t mediaSenderTrackIndex =
- mMediaSender->addTrack(converter->getOutputFormat(), flags);
- CHECK_GE(mediaSenderTrackIndex, 0);
-
- track->setMediaSenderTrackIndex(mediaSenderTrackIndex);
-
- return OK;
-}
-
-status_t WifiDisplaySource::PlaybackSession::addVideoSource(
- VideoFormats::ResolutionType videoResolutionType,
- size_t videoResolutionIndex,
- VideoFormats::ProfileType videoProfileType,
- VideoFormats::LevelType videoLevelType) {
- size_t width, height, framesPerSecond;
- bool interlaced;
- CHECK(VideoFormats::GetConfiguration(
- videoResolutionType,
- videoResolutionIndex,
- &width,
- &height,
- &framesPerSecond,
- &interlaced));
-
- unsigned profileIdc, levelIdc, constraintSet;
- CHECK(VideoFormats::GetProfileLevel(
- videoProfileType,
- videoLevelType,
- &profileIdc,
- &levelIdc,
- &constraintSet));
-
- sp<SurfaceMediaSource> source = new SurfaceMediaSource(width, height);
-
- source->setUseAbsoluteTimestamps();
-
- sp<RepeaterSource> videoSource =
- new RepeaterSource(source, framesPerSecond);
-
- size_t numInputBuffers;
- status_t err = addSource(
- true /* isVideo */, videoSource, true /* isRepeaterSource */,
- false /* usePCMAudio */, profileIdc, levelIdc, constraintSet,
- &numInputBuffers);
-
- if (err != OK) {
- return err;
- }
-
- err = source->setMaxAcquiredBufferCount(numInputBuffers);
- CHECK_EQ(err, (status_t)OK);
-
- mProducer = source->getProducer();
-
- return OK;
-}
-
-status_t WifiDisplaySource::PlaybackSession::addAudioSource(bool usePCMAudio) {
- sp<AudioSource> audioSource = new AudioSource(
- AUDIO_SOURCE_REMOTE_SUBMIX,
- mOpPackageName,
- 48000 /* sampleRate */,
- 2 /* channelCount */);
-
- if (audioSource->initCheck() == OK) {
- return addSource(
- false /* isVideo */, audioSource, false /* isRepeaterSource */,
- usePCMAudio, 0 /* profileIdc */, 0 /* levelIdc */,
- 0 /* constraintSet */, NULL /* numInputBuffers */);
- }
-
- ALOGW("Unable to instantiate audio source");
-
- return OK;
-}
-
-sp<IGraphicBufferProducer> WifiDisplaySource::PlaybackSession::getSurfaceTexture() {
- return mProducer;
-}
-
-void WifiDisplaySource::PlaybackSession::requestIDRFrame() {
- for (size_t i = 0; i < mTracks.size(); ++i) {
- const sp<Track> &track = mTracks.valueAt(i);
-
- track->requestIDRFrame();
- }
-}
-
-void WifiDisplaySource::PlaybackSession::notifySessionDead() {
- // Inform WifiDisplaySource of our premature death (wish).
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatSessionDead);
- notify->post();
-
- mWeAreDead = true;
-}
-
-} // namespace android
-
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.h b/media/libstagefright/wifi-display/source/PlaybackSession.h
deleted file mode 100644
index f6673df..0000000
--- a/media/libstagefright/wifi-display/source/PlaybackSession.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright 2012, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef PLAYBACK_SESSION_H_
-
-#define PLAYBACK_SESSION_H_
-
-#include "MediaSender.h"
-#include "VideoFormats.h"
-#include "WifiDisplaySource.h"
-
-#include <utils/String16.h>
-
-namespace android {
-
-struct ABuffer;
-struct IHDCP;
-class IGraphicBufferProducer;
-struct MediaPuller;
-struct MediaSource;
-struct MediaSender;
-struct NuMediaExtractor;
-
-// Encapsulates the state of an RTP/RTCP session in the context of wifi
-// display.
-struct WifiDisplaySource::PlaybackSession : public AHandler {
- PlaybackSession(
- const String16 &opPackageName,
- const sp<ANetworkSession> &netSession,
- const sp<AMessage> ¬ify,
- const struct in_addr &interfaceAddr,
- const sp<IHDCP> &hdcp,
- const char *path = NULL);
-
- status_t init(
- const char *clientIP,
- int32_t clientRtp,
- RTPSender::TransportMode rtpMode,
- int32_t clientRtcp,
- RTPSender::TransportMode rtcpMode,
- bool enableAudio,
- bool usePCMAudio,
- bool enableVideo,
- VideoFormats::ResolutionType videoResolutionType,
- size_t videoResolutionIndex,
- VideoFormats::ProfileType videoProfileType,
- VideoFormats::LevelType videoLevelType);
-
- void destroyAsync();
-
- int32_t getRTPPort() const;
-
- int64_t getLastLifesignUs() const;
- void updateLiveness();
-
- status_t play();
- status_t finishPlay();
- status_t pause();
-
- sp<IGraphicBufferProducer> getSurfaceTexture();
-
- void requestIDRFrame();
-
- enum {
- kWhatSessionDead,
- kWhatBinaryData,
- kWhatSessionEstablished,
- kWhatSessionDestroyed,
- };
-
-protected:
- virtual void onMessageReceived(const sp<AMessage> &msg);
- virtual ~PlaybackSession();
-
-private:
- struct Track;
-
- enum {
- kWhatMediaPullerNotify,
- kWhatConverterNotify,
- kWhatTrackNotify,
- kWhatUpdateSurface,
- kWhatPause,
- kWhatResume,
- kWhatMediaSenderNotify,
- kWhatPullExtractorSample,
- };
-
- String16 mOpPackageName;
-
- sp<ANetworkSession> mNetSession;
- sp<AMessage> mNotify;
- in_addr mInterfaceAddr;
- sp<IHDCP> mHDCP;
- AString mMediaPath;
-
- sp<MediaSender> mMediaSender;
- int32_t mLocalRTPPort;
-
- bool mWeAreDead;
- bool mPaused;
-
- int64_t mLastLifesignUs;
-
- sp<IGraphicBufferProducer> mProducer;
-
- KeyedVector<size_t, sp<Track> > mTracks;
- ssize_t mVideoTrackIndex;
-
- int64_t mPrevTimeUs;
-
- sp<NuMediaExtractor> mExtractor;
- KeyedVector<size_t, size_t> mExtractorTrackToInternalTrack;
- bool mPullExtractorPending;
- int32_t mPullExtractorGeneration;
- int64_t mFirstSampleTimeRealUs;
- int64_t mFirstSampleTimeUs;
-
- status_t setupMediaPacketizer(bool enableAudio, bool enableVideo);
-
- status_t setupPacketizer(
- bool enableAudio,
- bool usePCMAudio,
- bool enableVideo,
- VideoFormats::ResolutionType videoResolutionType,
- size_t videoResolutionIndex,
- VideoFormats::ProfileType videoProfileType,
- VideoFormats::LevelType videoLevelType);
-
- status_t addSource(
- bool isVideo,
- const sp<MediaSource> &source,
- bool isRepeaterSource,
- bool usePCMAudio,
- unsigned profileIdc,
- unsigned levelIdc,
- unsigned contraintSet,
- size_t *numInputBuffers);
-
- status_t addVideoSource(
- VideoFormats::ResolutionType videoResolutionType,
- size_t videoResolutionIndex,
- VideoFormats::ProfileType videoProfileType,
- VideoFormats::LevelType videoLevelType);
-
- status_t addAudioSource(bool usePCMAudio);
-
- status_t onMediaSenderInitialized();
-
- void notifySessionDead();
-
- void schedulePullExtractor();
- void onPullExtractor();
-
- void onSinkFeedback(const sp<AMessage> &msg);
-
- DISALLOW_EVIL_CONSTRUCTORS(PlaybackSession);
-};
-
-} // namespace android
-
-#endif // PLAYBACK_SESSION_H_
-
diff --git a/media/libstagefright/wifi-display/source/RepeaterSource.cpp b/media/libstagefright/wifi-display/source/RepeaterSource.cpp
deleted file mode 100644
index af6b663..0000000
--- a/media/libstagefright/wifi-display/source/RepeaterSource.cpp
+++ /dev/null
@@ -1,219 +0,0 @@
-//#define LOG_NDEBUG 0
-#define LOG_TAG "RepeaterSource"
-#include <utils/Log.h>
-
-#include "RepeaterSource.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MetaData.h>
-
-namespace android {
-
-RepeaterSource::RepeaterSource(const sp<MediaSource> &source, double rateHz)
- : mStarted(false),
- mSource(source),
- mRateHz(rateHz),
- mBuffer(NULL),
- mResult(OK),
- mLastBufferUpdateUs(-1ll),
- mStartTimeUs(-1ll),
- mFrameCount(0) {
-}
-
-RepeaterSource::~RepeaterSource() {
- CHECK(!mStarted);
-}
-
-double RepeaterSource::getFrameRate() const {
- return mRateHz;
-}
-
-void RepeaterSource::setFrameRate(double rateHz) {
- Mutex::Autolock autoLock(mLock);
-
- if (rateHz == mRateHz) {
- return;
- }
-
- if (mStartTimeUs >= 0ll) {
- int64_t nextTimeUs = mStartTimeUs + (mFrameCount * 1000000ll) / mRateHz;
- mStartTimeUs = nextTimeUs;
- mFrameCount = 0;
- }
- mRateHz = rateHz;
-}
-
-status_t RepeaterSource::start(MetaData *params) {
- CHECK(!mStarted);
-
- status_t err = mSource->start(params);
-
- if (err != OK) {
- return err;
- }
-
- mBuffer = NULL;
- mResult = OK;
- mStartTimeUs = -1ll;
- mFrameCount = 0;
-
- mLooper = new ALooper;
- mLooper->setName("repeater_looper");
- mLooper->start();
-
- mReflector = new AHandlerReflector<RepeaterSource>(this);
- mLooper->registerHandler(mReflector);
-
- postRead();
-
- mStarted = true;
-
- return OK;
-}
-
-status_t RepeaterSource::stop() {
- CHECK(mStarted);
-
- ALOGV("stopping");
-
- status_t err = mSource->stop();
-
- if (mLooper != NULL) {
- mLooper->stop();
- mLooper.clear();
-
- mReflector.clear();
- }
-
- if (mBuffer != NULL) {
- ALOGV("releasing mbuf %p", mBuffer);
- mBuffer->release();
- mBuffer = NULL;
- }
-
-
- ALOGV("stopped");
-
- mStarted = false;
-
- return err;
-}
-
-sp<MetaData> RepeaterSource::getFormat() {
- return mSource->getFormat();
-}
-
-status_t RepeaterSource::read(
- MediaBuffer **buffer, const ReadOptions *options) {
- int64_t seekTimeUs;
- ReadOptions::SeekMode seekMode;
- CHECK(options == NULL || !options->getSeekTo(&seekTimeUs, &seekMode));
-
- for (;;) {
- int64_t bufferTimeUs = -1ll;
-
- if (mStartTimeUs < 0ll) {
- Mutex::Autolock autoLock(mLock);
- while ((mLastBufferUpdateUs < 0ll || mBuffer == NULL)
- && mResult == OK) {
- mCondition.wait(mLock);
- }
-
- ALOGV("now resuming.");
- mStartTimeUs = ALooper::GetNowUs();
- bufferTimeUs = mStartTimeUs;
- } else {
- bufferTimeUs = mStartTimeUs + (mFrameCount * 1000000ll) / mRateHz;
-
- int64_t nowUs = ALooper::GetNowUs();
- int64_t delayUs = bufferTimeUs - nowUs;
-
- if (delayUs > 0ll) {
- usleep(delayUs);
- }
- }
-
- bool stale = false;
-
- {
- Mutex::Autolock autoLock(mLock);
- if (mResult != OK) {
- CHECK(mBuffer == NULL);
- return mResult;
- }
-
-#if SUSPEND_VIDEO_IF_IDLE
- int64_t nowUs = ALooper::GetNowUs();
- if (nowUs - mLastBufferUpdateUs > 1000000ll) {
- mLastBufferUpdateUs = -1ll;
- stale = true;
- } else
-#endif
- {
- mBuffer->add_ref();
- *buffer = mBuffer;
- (*buffer)->meta_data()->setInt64(kKeyTime, bufferTimeUs);
- ++mFrameCount;
- }
- }
-
- if (!stale) {
- break;
- }
-
- mStartTimeUs = -1ll;
- mFrameCount = 0;
- ALOGV("now dormant");
- }
-
- return OK;
-}
-
-void RepeaterSource::postRead() {
- (new AMessage(kWhatRead, mReflector))->post();
-}
-
-void RepeaterSource::onMessageReceived(const sp<AMessage> &msg) {
- switch (msg->what()) {
- case kWhatRead:
- {
- MediaBuffer *buffer;
- status_t err = mSource->read(&buffer);
-
- ALOGV("read mbuf %p", buffer);
-
- Mutex::Autolock autoLock(mLock);
- if (mBuffer != NULL) {
- mBuffer->release();
- mBuffer = NULL;
- }
- mBuffer = buffer;
- mResult = err;
- mLastBufferUpdateUs = ALooper::GetNowUs();
-
- mCondition.broadcast();
-
- if (err == OK) {
- postRead();
- }
- break;
- }
-
- default:
- TRESPASS();
- }
-}
-
-void RepeaterSource::wakeUp() {
- ALOGV("wakeUp");
- Mutex::Autolock autoLock(mLock);
- if (mLastBufferUpdateUs < 0ll && mBuffer != NULL) {
- mLastBufferUpdateUs = ALooper::GetNowUs();
- mCondition.broadcast();
- }
-}
-
-} // namespace android
diff --git a/media/libstagefright/wifi-display/source/RepeaterSource.h b/media/libstagefright/wifi-display/source/RepeaterSource.h
deleted file mode 100644
index 8d414fd..0000000
--- a/media/libstagefright/wifi-display/source/RepeaterSource.h
+++ /dev/null
@@ -1,67 +0,0 @@
-#ifndef REPEATER_SOURCE_H_
-
-#define REPEATER_SOURCE_H_
-
-#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/foundation/AHandlerReflector.h>
-#include <media/stagefright/MediaSource.h>
-
-#define SUSPEND_VIDEO_IF_IDLE 0
-
-namespace android {
-
-// This MediaSource delivers frames at a constant rate by repeating buffers
-// if necessary.
-struct RepeaterSource : public MediaSource {
- RepeaterSource(const sp<MediaSource> &source, double rateHz);
-
- virtual status_t start(MetaData *params);
- virtual status_t stop();
- virtual sp<MetaData> getFormat();
-
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options);
-
- void onMessageReceived(const sp<AMessage> &msg);
-
- // If RepeaterSource is currently dormant, because SurfaceFlinger didn't
- // send updates in a while, this is its wakeup call.
- void wakeUp();
-
- double getFrameRate() const;
- void setFrameRate(double rateHz);
-
-protected:
- virtual ~RepeaterSource();
-
-private:
- enum {
- kWhatRead,
- };
-
- Mutex mLock;
- Condition mCondition;
-
- bool mStarted;
-
- sp<MediaSource> mSource;
- double mRateHz;
-
- sp<ALooper> mLooper;
- sp<AHandlerReflector<RepeaterSource> > mReflector;
-
- MediaBuffer *mBuffer;
- status_t mResult;
- int64_t mLastBufferUpdateUs;
-
- int64_t mStartTimeUs;
- int32_t mFrameCount;
-
- void postRead();
-
- DISALLOW_EVIL_CONSTRUCTORS(RepeaterSource);
-};
-
-} // namespace android
-
-#endif // REPEATER_SOURCE_H_
diff --git a/media/libstagefright/wifi-display/source/TSPacketizer.cpp b/media/libstagefright/wifi-display/source/TSPacketizer.cpp
deleted file mode 100644
index 865ba94..0000000
--- a/media/libstagefright/wifi-display/source/TSPacketizer.cpp
+++ /dev/null
@@ -1,1055 +0,0 @@
-/*
- * Copyright 2012, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "TSPacketizer"
-#include <utils/Log.h>
-
-#include "TSPacketizer.h"
-#include "include/avc_utils.h"
-
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-
-#include <arpa/inet.h>
-
-namespace android {
-
-struct TSPacketizer::Track : public RefBase {
- Track(const sp<AMessage> &format,
- unsigned PID, unsigned streamType, unsigned streamID);
-
- unsigned PID() const;
- unsigned streamType() const;
- unsigned streamID() const;
-
- // Returns the previous value.
- unsigned incrementContinuityCounter();
-
- bool isAudio() const;
- bool isVideo() const;
-
- bool isH264() const;
- bool isAAC() const;
- bool lacksADTSHeader() const;
- bool isPCMAudio() const;
-
- sp<ABuffer> prependCSD(const sp<ABuffer> &accessUnit) const;
- sp<ABuffer> prependADTSHeader(const sp<ABuffer> &accessUnit) const;
-
- size_t countDescriptors() const;
- sp<ABuffer> descriptorAt(size_t index) const;
-
- void finalize();
- void extractCSDIfNecessary();
-
-protected:
- virtual ~Track();
-
-private:
- sp<AMessage> mFormat;
-
- unsigned mPID;
- unsigned mStreamType;
- unsigned mStreamID;
- unsigned mContinuityCounter;
-
- AString mMIME;
- Vector<sp<ABuffer> > mCSD;
-
- Vector<sp<ABuffer> > mDescriptors;
-
- bool mAudioLacksATDSHeaders;
- bool mFinalized;
- bool mExtractedCSD;
-
- DISALLOW_EVIL_CONSTRUCTORS(Track);
-};
-
-TSPacketizer::Track::Track(
- const sp<AMessage> &format,
- unsigned PID, unsigned streamType, unsigned streamID)
- : mFormat(format),
- mPID(PID),
- mStreamType(streamType),
- mStreamID(streamID),
- mContinuityCounter(0),
- mAudioLacksATDSHeaders(false),
- mFinalized(false),
- mExtractedCSD(false) {
- CHECK(format->findString("mime", &mMIME));
-}
-
-void TSPacketizer::Track::extractCSDIfNecessary() {
- if (mExtractedCSD) {
- return;
- }
-
- if (!strcasecmp(mMIME.c_str(), MEDIA_MIMETYPE_VIDEO_AVC)
- || !strcasecmp(mMIME.c_str(), MEDIA_MIMETYPE_AUDIO_AAC)) {
- for (size_t i = 0;; ++i) {
- sp<ABuffer> csd;
- if (!mFormat->findBuffer(AStringPrintf("csd-%d", i).c_str(), &csd)) {
- break;
- }
-
- mCSD.push(csd);
- }
-
- if (!strcasecmp(mMIME.c_str(), MEDIA_MIMETYPE_AUDIO_AAC)) {
- int32_t isADTS;
- if (!mFormat->findInt32("is-adts", &isADTS) || isADTS == 0) {
- mAudioLacksATDSHeaders = true;
- }
- }
- }
-
- mExtractedCSD = true;
-}
-
-TSPacketizer::Track::~Track() {
-}
-
-unsigned TSPacketizer::Track::PID() const {
- return mPID;
-}
-
-unsigned TSPacketizer::Track::streamType() const {
- return mStreamType;
-}
-
-unsigned TSPacketizer::Track::streamID() const {
- return mStreamID;
-}
-
-unsigned TSPacketizer::Track::incrementContinuityCounter() {
- unsigned prevCounter = mContinuityCounter;
-
- if (++mContinuityCounter == 16) {
- mContinuityCounter = 0;
- }
-
- return prevCounter;
-}
-
-bool TSPacketizer::Track::isAudio() const {
- return !strncasecmp("audio/", mMIME.c_str(), 6);
-}
-
-bool TSPacketizer::Track::isVideo() const {
- return !strncasecmp("video/", mMIME.c_str(), 6);
-}
-
-bool TSPacketizer::Track::isH264() const {
- return !strcasecmp(mMIME.c_str(), MEDIA_MIMETYPE_VIDEO_AVC);
-}
-
-bool TSPacketizer::Track::isAAC() const {
- return !strcasecmp(mMIME.c_str(), MEDIA_MIMETYPE_AUDIO_AAC);
-}
-
-bool TSPacketizer::Track::isPCMAudio() const {
- return !strcasecmp(mMIME.c_str(), MEDIA_MIMETYPE_AUDIO_RAW);
-}
-
-bool TSPacketizer::Track::lacksADTSHeader() const {
- return mAudioLacksATDSHeaders;
-}
-
-sp<ABuffer> TSPacketizer::Track::prependCSD(
- const sp<ABuffer> &accessUnit) const {
- size_t size = 0;
- for (size_t i = 0; i < mCSD.size(); ++i) {
- size += mCSD.itemAt(i)->size();
- }
-
- sp<ABuffer> dup = new ABuffer(accessUnit->size() + size);
- size_t offset = 0;
- for (size_t i = 0; i < mCSD.size(); ++i) {
- const sp<ABuffer> &csd = mCSD.itemAt(i);
-
- memcpy(dup->data() + offset, csd->data(), csd->size());
- offset += csd->size();
- }
-
- memcpy(dup->data() + offset, accessUnit->data(), accessUnit->size());
-
- return dup;
-}
-
-sp<ABuffer> TSPacketizer::Track::prependADTSHeader(
- const sp<ABuffer> &accessUnit) const {
- CHECK_EQ(mCSD.size(), 1u);
-
- const uint8_t *codec_specific_data = mCSD.itemAt(0)->data();
-
- const uint32_t aac_frame_length = accessUnit->size() + 7;
-
- sp<ABuffer> dup = new ABuffer(aac_frame_length);
-
- unsigned profile = (codec_specific_data[0] >> 3) - 1;
-
- unsigned sampling_freq_index =
- ((codec_specific_data[0] & 7) << 1)
- | (codec_specific_data[1] >> 7);
-
- unsigned channel_configuration =
- (codec_specific_data[1] >> 3) & 0x0f;
-
- uint8_t *ptr = dup->data();
-
- *ptr++ = 0xff;
- *ptr++ = 0xf9; // b11111001, ID=1(MPEG-2), layer=0, protection_absent=1
-
- *ptr++ =
- profile << 6
- | sampling_freq_index << 2
- | ((channel_configuration >> 2) & 1); // private_bit=0
-
- // original_copy=0, home=0, copyright_id_bit=0, copyright_id_start=0
- *ptr++ =
- (channel_configuration & 3) << 6
- | aac_frame_length >> 11;
- *ptr++ = (aac_frame_length >> 3) & 0xff;
- *ptr++ = (aac_frame_length & 7) << 5;
-
- // adts_buffer_fullness=0, number_of_raw_data_blocks_in_frame=0
- *ptr++ = 0;
-
- memcpy(ptr, accessUnit->data(), accessUnit->size());
-
- return dup;
-}
-
-size_t TSPacketizer::Track::countDescriptors() const {
- return mDescriptors.size();
-}
-
-sp<ABuffer> TSPacketizer::Track::descriptorAt(size_t index) const {
- CHECK_LT(index, mDescriptors.size());
- return mDescriptors.itemAt(index);
-}
-
-void TSPacketizer::Track::finalize() {
- if (mFinalized) {
- return;
- }
-
- if (isH264()) {
- {
- // AVC video descriptor (40)
-
- sp<ABuffer> descriptor = new ABuffer(6);
- uint8_t *data = descriptor->data();
- data[0] = 40; // descriptor_tag
- data[1] = 4; // descriptor_length
-
- if (mCSD.size() > 0) {
- CHECK_GE(mCSD.size(), 1u);
- const sp<ABuffer> &sps = mCSD.itemAt(0);
- CHECK(!memcmp("\x00\x00\x00\x01", sps->data(), 4));
- CHECK_GE(sps->size(), 7u);
- // profile_idc, constraint_set*, level_idc
- memcpy(&data[2], sps->data() + 4, 3);
- } else {
- int32_t profileIdc, levelIdc, constraintSet;
- CHECK(mFormat->findInt32("profile-idc", &profileIdc));
- CHECK(mFormat->findInt32("level-idc", &levelIdc));
- CHECK(mFormat->findInt32("constraint-set", &constraintSet));
- CHECK_GE(profileIdc, 0);
- CHECK_GE(levelIdc, 0);
- data[2] = profileIdc; // profile_idc
- data[3] = constraintSet; // constraint_set*
- data[4] = levelIdc; // level_idc
- }
-
- // AVC_still_present=0, AVC_24_hour_picture_flag=0, reserved
- data[5] = 0x3f;
-
- mDescriptors.push_back(descriptor);
- }
-
- {
- // AVC timing and HRD descriptor (42)
-
- sp<ABuffer> descriptor = new ABuffer(4);
- uint8_t *data = descriptor->data();
- data[0] = 42; // descriptor_tag
- data[1] = 2; // descriptor_length
-
- // hrd_management_valid_flag = 0
- // reserved = 111111b
- // picture_and_timing_info_present = 0
-
- data[2] = 0x7e;
-
- // fixed_frame_rate_flag = 0
- // temporal_poc_flag = 0
- // picture_to_display_conversion_flag = 0
- // reserved = 11111b
- data[3] = 0x1f;
-
- mDescriptors.push_back(descriptor);
- }
- } else if (isPCMAudio()) {
- // LPCM audio stream descriptor (0x83)
-
- int32_t channelCount;
- CHECK(mFormat->findInt32("channel-count", &channelCount));
- CHECK_EQ(channelCount, 2);
-
- int32_t sampleRate;
- CHECK(mFormat->findInt32("sample-rate", &sampleRate));
- CHECK(sampleRate == 44100 || sampleRate == 48000);
-
- sp<ABuffer> descriptor = new ABuffer(4);
- uint8_t *data = descriptor->data();
- data[0] = 0x83; // descriptor_tag
- data[1] = 2; // descriptor_length
-
- unsigned sampling_frequency = (sampleRate == 44100) ? 1 : 2;
-
- data[2] = (sampling_frequency << 5)
- | (3 /* reserved */ << 1)
- | 0 /* emphasis_flag */;
-
- data[3] =
- (1 /* number_of_channels = stereo */ << 5)
- | 0xf /* reserved */;
-
- mDescriptors.push_back(descriptor);
- }
-
- mFinalized = true;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-TSPacketizer::TSPacketizer(uint32_t flags)
- : mFlags(flags),
- mPATContinuityCounter(0),
- mPMTContinuityCounter(0) {
- initCrcTable();
-
- if (flags & (EMIT_HDCP20_DESCRIPTOR | EMIT_HDCP21_DESCRIPTOR)) {
- int32_t hdcpVersion;
- if (flags & EMIT_HDCP20_DESCRIPTOR) {
- CHECK(!(flags & EMIT_HDCP21_DESCRIPTOR));
- hdcpVersion = 0x20;
- } else {
- CHECK(!(flags & EMIT_HDCP20_DESCRIPTOR));
-
- // HDCP2.0 _and_ HDCP 2.1 specs say to set the version
- // inside the HDCP descriptor to 0x20!!!
- hdcpVersion = 0x20;
- }
-
- // HDCP descriptor
- sp<ABuffer> descriptor = new ABuffer(7);
- uint8_t *data = descriptor->data();
- data[0] = 0x05; // descriptor_tag
- data[1] = 5; // descriptor_length
- data[2] = 'H';
- data[3] = 'D';
- data[4] = 'C';
- data[5] = 'P';
- data[6] = hdcpVersion;
-
- mProgramInfoDescriptors.push_back(descriptor);
- }
-}
-
-TSPacketizer::~TSPacketizer() {
-}
-
-ssize_t TSPacketizer::addTrack(const sp<AMessage> &format) {
- AString mime;
- CHECK(format->findString("mime", &mime));
-
- unsigned PIDStart;
- bool isVideo = !strncasecmp("video/", mime.c_str(), 6);
- bool isAudio = !strncasecmp("audio/", mime.c_str(), 6);
-
- if (isVideo) {
- PIDStart = 0x1011;
- } else if (isAudio) {
- PIDStart = 0x1100;
- } else {
- return ERROR_UNSUPPORTED;
- }
-
- unsigned streamType;
- unsigned streamIDStart;
- unsigned streamIDStop;
-
- if (!strcasecmp(mime.c_str(), MEDIA_MIMETYPE_VIDEO_AVC)) {
- streamType = 0x1b;
- streamIDStart = 0xe0;
- streamIDStop = 0xef;
- } else if (!strcasecmp(mime.c_str(), MEDIA_MIMETYPE_AUDIO_AAC)) {
- streamType = 0x0f;
- streamIDStart = 0xc0;
- streamIDStop = 0xdf;
- } else if (!strcasecmp(mime.c_str(), MEDIA_MIMETYPE_AUDIO_RAW)) {
- streamType = 0x83;
- streamIDStart = 0xbd;
- streamIDStop = 0xbd;
- } else {
- return ERROR_UNSUPPORTED;
- }
-
- size_t numTracksOfThisType = 0;
- unsigned PID = PIDStart;
-
- for (size_t i = 0; i < mTracks.size(); ++i) {
- const sp<Track> &track = mTracks.itemAt(i);
-
- if (track->streamType() == streamType) {
- ++numTracksOfThisType;
- }
-
- if ((isAudio && track->isAudio()) || (isVideo && track->isVideo())) {
- ++PID;
- }
- }
-
- unsigned streamID = streamIDStart + numTracksOfThisType;
- if (streamID > streamIDStop) {
- return -ERANGE;
- }
-
- sp<Track> track = new Track(format, PID, streamType, streamID);
- return mTracks.add(track);
-}
-
-status_t TSPacketizer::extractCSDIfNecessary(size_t trackIndex) {
- if (trackIndex >= mTracks.size()) {
- return -ERANGE;
- }
-
- const sp<Track> &track = mTracks.itemAt(trackIndex);
- track->extractCSDIfNecessary();
-
- return OK;
-}
-
-status_t TSPacketizer::packetize(
- size_t trackIndex,
- const sp<ABuffer> &_accessUnit,
- sp<ABuffer> *packets,
- uint32_t flags,
- const uint8_t *PES_private_data, size_t PES_private_data_len,
- size_t numStuffingBytes) {
- sp<ABuffer> accessUnit = _accessUnit;
-
- int64_t timeUs;
- CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
-
- packets->clear();
-
- if (trackIndex >= mTracks.size()) {
- return -ERANGE;
- }
-
- const sp<Track> &track = mTracks.itemAt(trackIndex);
-
- if (track->isH264() && (flags & PREPEND_SPS_PPS_TO_IDR_FRAMES)
- && IsIDR(accessUnit)) {
- // prepend codec specific data, i.e. SPS and PPS.
- accessUnit = track->prependCSD(accessUnit);
- } else if (track->isAAC() && track->lacksADTSHeader()) {
- CHECK(!(flags & IS_ENCRYPTED));
- accessUnit = track->prependADTSHeader(accessUnit);
- }
-
- // 0x47
- // transport_error_indicator = b0
- // payload_unit_start_indicator = b1
- // transport_priority = b0
- // PID
- // transport_scrambling_control = b00
- // adaptation_field_control = b??
- // continuity_counter = b????
- // -- payload follows
- // packet_startcode_prefix = 0x000001
- // stream_id
- // PES_packet_length = 0x????
- // reserved = b10
- // PES_scrambling_control = b00
- // PES_priority = b0
- // data_alignment_indicator = b1
- // copyright = b0
- // original_or_copy = b0
- // PTS_DTS_flags = b10 (PTS only)
- // ESCR_flag = b0
- // ES_rate_flag = b0
- // DSM_trick_mode_flag = b0
- // additional_copy_info_flag = b0
- // PES_CRC_flag = b0
- // PES_extension_flag = b0
- // PES_header_data_length = 0x05
- // reserved = b0010 (PTS)
- // PTS[32..30] = b???
- // reserved = b1
- // PTS[29..15] = b??? ???? ???? ???? (15 bits)
- // reserved = b1
- // PTS[14..0] = b??? ???? ???? ???? (15 bits)
- // reserved = b1
- // the first fragment of "buffer" follows
-
- // Each transport packet (except for the last one contributing to the PES
- // payload) must contain a multiple of 16 bytes of payload per HDCP spec.
- bool alignPayload =
- (mFlags & (EMIT_HDCP20_DESCRIPTOR | EMIT_HDCP21_DESCRIPTOR));
-
- /*
- a) The very first PES transport stream packet contains
-
- 4 bytes of TS header
- ... padding
- 14 bytes of static PES header
- PES_private_data_len + 1 bytes (only if PES_private_data_len > 0)
- numStuffingBytes bytes
-
- followed by the payload
-
- b) Subsequent PES transport stream packets contain
-
- 4 bytes of TS header
- ... padding
-
- followed by the payload
- */
-
- size_t PES_packet_length = accessUnit->size() + 8 + numStuffingBytes;
- if (PES_private_data_len > 0) {
- PES_packet_length += PES_private_data_len + 1;
- }
-
- size_t numTSPackets = 1;
-
- {
- // Make sure the PES header fits into a single TS packet:
- size_t PES_header_size = 14 + numStuffingBytes;
- if (PES_private_data_len > 0) {
- PES_header_size += PES_private_data_len + 1;
- }
-
- CHECK_LE(PES_header_size, 188u - 4u);
-
- size_t sizeAvailableForPayload = 188 - 4 - PES_header_size;
- size_t numBytesOfPayload = accessUnit->size();
-
- if (numBytesOfPayload > sizeAvailableForPayload) {
- numBytesOfPayload = sizeAvailableForPayload;
-
- if (alignPayload && numBytesOfPayload > 16) {
- numBytesOfPayload -= (numBytesOfPayload % 16);
- }
- }
-
- size_t numPaddingBytes = sizeAvailableForPayload - numBytesOfPayload;
- ALOGV("packet 1 contains %zd padding bytes and %zd bytes of payload",
- numPaddingBytes, numBytesOfPayload);
-
- size_t numBytesOfPayloadRemaining = accessUnit->size() - numBytesOfPayload;
-
-#if 0
- // The following hopefully illustrates the logic that led to the
- // more efficient computation in the #else block...
-
- while (numBytesOfPayloadRemaining > 0) {
- size_t sizeAvailableForPayload = 188 - 4;
-
- size_t numBytesOfPayload = numBytesOfPayloadRemaining;
-
- if (numBytesOfPayload > sizeAvailableForPayload) {
- numBytesOfPayload = sizeAvailableForPayload;
-
- if (alignPayload && numBytesOfPayload > 16) {
- numBytesOfPayload -= (numBytesOfPayload % 16);
- }
- }
-
- size_t numPaddingBytes = sizeAvailableForPayload - numBytesOfPayload;
- ALOGI("packet %zd contains %zd padding bytes and %zd bytes of payload",
- numTSPackets + 1, numPaddingBytes, numBytesOfPayload);
-
- numBytesOfPayloadRemaining -= numBytesOfPayload;
- ++numTSPackets;
- }
-#else
- // This is how many bytes of payload each subsequent TS packet
- // can contain at most.
- sizeAvailableForPayload = 188 - 4;
- size_t sizeAvailableForAlignedPayload = sizeAvailableForPayload;
- if (alignPayload) {
- // We're only going to use a subset of the available space
- // since we need to make each fragment a multiple of 16 in size.
- sizeAvailableForAlignedPayload -=
- (sizeAvailableForAlignedPayload % 16);
- }
-
- size_t numFullTSPackets =
- numBytesOfPayloadRemaining / sizeAvailableForAlignedPayload;
-
- numTSPackets += numFullTSPackets;
-
- numBytesOfPayloadRemaining -=
- numFullTSPackets * sizeAvailableForAlignedPayload;
-
- // numBytesOfPayloadRemaining < sizeAvailableForAlignedPayload
- if (numFullTSPackets == 0 && numBytesOfPayloadRemaining > 0) {
- // There wasn't enough payload left to form a full aligned payload,
- // the last packet doesn't have to be aligned.
- ++numTSPackets;
- } else if (numFullTSPackets > 0
- && numBytesOfPayloadRemaining
- + sizeAvailableForAlignedPayload > sizeAvailableForPayload) {
- // The last packet emitted had a full aligned payload and together
- // with the bytes remaining does exceed the unaligned payload
- // size, so we need another packet.
- ++numTSPackets;
- }
-#endif
- }
-
- if (flags & EMIT_PAT_AND_PMT) {
- numTSPackets += 2;
- }
-
- if (flags & EMIT_PCR) {
- ++numTSPackets;
- }
-
- sp<ABuffer> buffer = new ABuffer(numTSPackets * 188);
- uint8_t *packetDataStart = buffer->data();
-
- if (flags & EMIT_PAT_AND_PMT) {
- // Program Association Table (PAT):
- // 0x47
- // transport_error_indicator = b0
- // payload_unit_start_indicator = b1
- // transport_priority = b0
- // PID = b0000000000000 (13 bits)
- // transport_scrambling_control = b00
- // adaptation_field_control = b01 (no adaptation field, payload only)
- // continuity_counter = b????
- // skip = 0x00
- // --- payload follows
- // table_id = 0x00
- // section_syntax_indicator = b1
- // must_be_zero = b0
- // reserved = b11
- // section_length = 0x00d
- // transport_stream_id = 0x0000
- // reserved = b11
- // version_number = b00001
- // current_next_indicator = b1
- // section_number = 0x00
- // last_section_number = 0x00
- // one program follows:
- // program_number = 0x0001
- // reserved = b111
- // program_map_PID = kPID_PMT (13 bits!)
- // CRC = 0x????????
-
- if (++mPATContinuityCounter == 16) {
- mPATContinuityCounter = 0;
- }
-
- uint8_t *ptr = packetDataStart;
- *ptr++ = 0x47;
- *ptr++ = 0x40;
- *ptr++ = 0x00;
- *ptr++ = 0x10 | mPATContinuityCounter;
- *ptr++ = 0x00;
-
- uint8_t *crcDataStart = ptr;
- *ptr++ = 0x00;
- *ptr++ = 0xb0;
- *ptr++ = 0x0d;
- *ptr++ = 0x00;
- *ptr++ = 0x00;
- *ptr++ = 0xc3;
- *ptr++ = 0x00;
- *ptr++ = 0x00;
- *ptr++ = 0x00;
- *ptr++ = 0x01;
- *ptr++ = 0xe0 | (kPID_PMT >> 8);
- *ptr++ = kPID_PMT & 0xff;
-
- CHECK_EQ(ptr - crcDataStart, 12);
- uint32_t crc = htonl(crc32(crcDataStart, ptr - crcDataStart));
- memcpy(ptr, &crc, 4);
- ptr += 4;
-
- size_t sizeLeft = packetDataStart + 188 - ptr;
- memset(ptr, 0xff, sizeLeft);
-
- packetDataStart += 188;
-
- // Program Map (PMT):
- // 0x47
- // transport_error_indicator = b0
- // payload_unit_start_indicator = b1
- // transport_priority = b0
- // PID = kPID_PMT (13 bits)
- // transport_scrambling_control = b00
- // adaptation_field_control = b01 (no adaptation field, payload only)
- // continuity_counter = b????
- // skip = 0x00
- // -- payload follows
- // table_id = 0x02
- // section_syntax_indicator = b1
- // must_be_zero = b0
- // reserved = b11
- // section_length = 0x???
- // program_number = 0x0001
- // reserved = b11
- // version_number = b00001
- // current_next_indicator = b1
- // section_number = 0x00
- // last_section_number = 0x00
- // reserved = b111
- // PCR_PID = kPCR_PID (13 bits)
- // reserved = b1111
- // program_info_length = 0x???
- // program_info_descriptors follow
- // one or more elementary stream descriptions follow:
- // stream_type = 0x??
- // reserved = b111
- // elementary_PID = b? ???? ???? ???? (13 bits)
- // reserved = b1111
- // ES_info_length = 0x000
- // CRC = 0x????????
-
- if (++mPMTContinuityCounter == 16) {
- mPMTContinuityCounter = 0;
- }
-
- ptr = packetDataStart;
- *ptr++ = 0x47;
- *ptr++ = 0x40 | (kPID_PMT >> 8);
- *ptr++ = kPID_PMT & 0xff;
- *ptr++ = 0x10 | mPMTContinuityCounter;
- *ptr++ = 0x00;
-
- crcDataStart = ptr;
- *ptr++ = 0x02;
-
- *ptr++ = 0x00; // section_length to be filled in below.
- *ptr++ = 0x00;
-
- *ptr++ = 0x00;
- *ptr++ = 0x01;
- *ptr++ = 0xc3;
- *ptr++ = 0x00;
- *ptr++ = 0x00;
- *ptr++ = 0xe0 | (kPID_PCR >> 8);
- *ptr++ = kPID_PCR & 0xff;
-
- size_t program_info_length = 0;
- for (size_t i = 0; i < mProgramInfoDescriptors.size(); ++i) {
- program_info_length += mProgramInfoDescriptors.itemAt(i)->size();
- }
-
- CHECK_LT(program_info_length, 0x400u);
- *ptr++ = 0xf0 | (program_info_length >> 8);
- *ptr++ = (program_info_length & 0xff);
-
- for (size_t i = 0; i < mProgramInfoDescriptors.size(); ++i) {
- const sp<ABuffer> &desc = mProgramInfoDescriptors.itemAt(i);
- memcpy(ptr, desc->data(), desc->size());
- ptr += desc->size();
- }
-
- for (size_t i = 0; i < mTracks.size(); ++i) {
- const sp<Track> &track = mTracks.itemAt(i);
-
- // Make sure all the decriptors have been added.
- track->finalize();
-
- *ptr++ = track->streamType();
- *ptr++ = 0xe0 | (track->PID() >> 8);
- *ptr++ = track->PID() & 0xff;
-
- size_t ES_info_length = 0;
- for (size_t i = 0; i < track->countDescriptors(); ++i) {
- ES_info_length += track->descriptorAt(i)->size();
- }
- CHECK_LE(ES_info_length, 0xfffu);
-
- *ptr++ = 0xf0 | (ES_info_length >> 8);
- *ptr++ = (ES_info_length & 0xff);
-
- for (size_t i = 0; i < track->countDescriptors(); ++i) {
- const sp<ABuffer> &descriptor = track->descriptorAt(i);
- memcpy(ptr, descriptor->data(), descriptor->size());
- ptr += descriptor->size();
- }
- }
-
- size_t section_length = ptr - (crcDataStart + 3) + 4 /* CRC */;
-
- crcDataStart[1] = 0xb0 | (section_length >> 8);
- crcDataStart[2] = section_length & 0xff;
-
- crc = htonl(crc32(crcDataStart, ptr - crcDataStart));
- memcpy(ptr, &crc, 4);
- ptr += 4;
-
- sizeLeft = packetDataStart + 188 - ptr;
- memset(ptr, 0xff, sizeLeft);
-
- packetDataStart += 188;
- }
-
- if (flags & EMIT_PCR) {
- // PCR stream
- // 0x47
- // transport_error_indicator = b0
- // payload_unit_start_indicator = b1
- // transport_priority = b0
- // PID = kPCR_PID (13 bits)
- // transport_scrambling_control = b00
- // adaptation_field_control = b10 (adaptation field only, no payload)
- // continuity_counter = b0000 (does not increment)
- // adaptation_field_length = 183
- // discontinuity_indicator = b0
- // random_access_indicator = b0
- // elementary_stream_priority_indicator = b0
- // PCR_flag = b1
- // OPCR_flag = b0
- // splicing_point_flag = b0
- // transport_private_data_flag = b0
- // adaptation_field_extension_flag = b0
- // program_clock_reference_base = b?????????????????????????????????
- // reserved = b111111
- // program_clock_reference_extension = b?????????
-
- int64_t nowUs = ALooper::GetNowUs();
-
- uint64_t PCR = nowUs * 27; // PCR based on a 27MHz clock
- uint64_t PCR_base = PCR / 300;
- uint32_t PCR_ext = PCR % 300;
-
- uint8_t *ptr = packetDataStart;
- *ptr++ = 0x47;
- *ptr++ = 0x40 | (kPID_PCR >> 8);
- *ptr++ = kPID_PCR & 0xff;
- *ptr++ = 0x20;
- *ptr++ = 0xb7; // adaptation_field_length
- *ptr++ = 0x10;
- *ptr++ = (PCR_base >> 25) & 0xff;
- *ptr++ = (PCR_base >> 17) & 0xff;
- *ptr++ = (PCR_base >> 9) & 0xff;
- *ptr++ = ((PCR_base & 1) << 7) | 0x7e | ((PCR_ext >> 8) & 1);
- *ptr++ = (PCR_ext & 0xff);
-
- size_t sizeLeft = packetDataStart + 188 - ptr;
- memset(ptr, 0xff, sizeLeft);
-
- packetDataStart += 188;
- }
-
- uint64_t PTS = (timeUs * 9ll) / 100ll;
-
- if (PES_packet_length >= 65536) {
- // This really should only happen for video.
- CHECK(track->isVideo());
-
- // It's valid to set this to 0 for video according to the specs.
- PES_packet_length = 0;
- }
-
- size_t sizeAvailableForPayload = 188 - 4 - 14 - numStuffingBytes;
- if (PES_private_data_len > 0) {
- sizeAvailableForPayload -= PES_private_data_len + 1;
- }
-
- size_t copy = accessUnit->size();
-
- if (copy > sizeAvailableForPayload) {
- copy = sizeAvailableForPayload;
-
- if (alignPayload && copy > 16) {
- copy -= (copy % 16);
- }
- }
-
- size_t numPaddingBytes = sizeAvailableForPayload - copy;
-
- uint8_t *ptr = packetDataStart;
- *ptr++ = 0x47;
- *ptr++ = 0x40 | (track->PID() >> 8);
- *ptr++ = track->PID() & 0xff;
-
- *ptr++ = (numPaddingBytes > 0 ? 0x30 : 0x10)
- | track->incrementContinuityCounter();
-
- if (numPaddingBytes > 0) {
- *ptr++ = numPaddingBytes - 1;
- if (numPaddingBytes >= 2) {
- *ptr++ = 0x00;
- memset(ptr, 0xff, numPaddingBytes - 2);
- ptr += numPaddingBytes - 2;
- }
- }
-
- *ptr++ = 0x00;
- *ptr++ = 0x00;
- *ptr++ = 0x01;
- *ptr++ = track->streamID();
- *ptr++ = PES_packet_length >> 8;
- *ptr++ = PES_packet_length & 0xff;
- *ptr++ = 0x84;
- *ptr++ = (PES_private_data_len > 0) ? 0x81 : 0x80;
-
- size_t headerLength = 0x05 + numStuffingBytes;
- if (PES_private_data_len > 0) {
- headerLength += 1 + PES_private_data_len;
- }
-
- *ptr++ = headerLength;
-
- *ptr++ = 0x20 | (((PTS >> 30) & 7) << 1) | 1;
- *ptr++ = (PTS >> 22) & 0xff;
- *ptr++ = (((PTS >> 15) & 0x7f) << 1) | 1;
- *ptr++ = (PTS >> 7) & 0xff;
- *ptr++ = ((PTS & 0x7f) << 1) | 1;
-
- if (PES_private_data_len > 0) {
- *ptr++ = 0x8e; // PES_private_data_flag, reserved.
- memcpy(ptr, PES_private_data, PES_private_data_len);
- ptr += PES_private_data_len;
- }
-
- for (size_t i = 0; i < numStuffingBytes; ++i) {
- *ptr++ = 0xff;
- }
-
- memcpy(ptr, accessUnit->data(), copy);
- ptr += copy;
-
- CHECK_EQ(ptr, packetDataStart + 188);
- packetDataStart += 188;
-
- size_t offset = copy;
- while (offset < accessUnit->size()) {
- // for subsequent fragments of "buffer":
- // 0x47
- // transport_error_indicator = b0
- // payload_unit_start_indicator = b0
- // transport_priority = b0
- // PID = b0 0001 1110 ???? (13 bits) [0x1e0 + 1 + sourceIndex]
- // transport_scrambling_control = b00
- // adaptation_field_control = b??
- // continuity_counter = b????
- // the fragment of "buffer" follows.
-
- size_t sizeAvailableForPayload = 188 - 4;
-
- size_t copy = accessUnit->size() - offset;
-
- if (copy > sizeAvailableForPayload) {
- copy = sizeAvailableForPayload;
-
- if (alignPayload && copy > 16) {
- copy -= (copy % 16);
- }
- }
-
- size_t numPaddingBytes = sizeAvailableForPayload - copy;
-
- uint8_t *ptr = packetDataStart;
- *ptr++ = 0x47;
- *ptr++ = 0x00 | (track->PID() >> 8);
- *ptr++ = track->PID() & 0xff;
-
- *ptr++ = (numPaddingBytes > 0 ? 0x30 : 0x10)
- | track->incrementContinuityCounter();
-
- if (numPaddingBytes > 0) {
- *ptr++ = numPaddingBytes - 1;
- if (numPaddingBytes >= 2) {
- *ptr++ = 0x00;
- memset(ptr, 0xff, numPaddingBytes - 2);
- ptr += numPaddingBytes - 2;
- }
- }
-
- memcpy(ptr, accessUnit->data() + offset, copy);
- ptr += copy;
- CHECK_EQ(ptr, packetDataStart + 188);
-
- offset += copy;
- packetDataStart += 188;
- }
-
- CHECK(packetDataStart == buffer->data() + buffer->capacity());
-
- *packets = buffer;
-
- return OK;
-}
-
-void TSPacketizer::initCrcTable() {
- uint32_t poly = 0x04C11DB7;
-
- for (int i = 0; i < 256; i++) {
- uint32_t crc = i << 24;
- for (int j = 0; j < 8; j++) {
- crc = (crc << 1) ^ ((crc & 0x80000000) ? (poly) : 0);
- }
- mCrcTable[i] = crc;
- }
-}
-
-uint32_t TSPacketizer::crc32(const uint8_t *start, size_t size) const {
- uint32_t crc = 0xFFFFFFFF;
- const uint8_t *p;
-
- for (p = start; p < start + size; ++p) {
- crc = (crc << 8) ^ mCrcTable[((crc >> 24) ^ *p) & 0xFF];
- }
-
- return crc;
-}
-
-sp<ABuffer> TSPacketizer::prependCSD(
- size_t trackIndex, const sp<ABuffer> &accessUnit) const {
- CHECK_LT(trackIndex, mTracks.size());
-
- const sp<Track> &track = mTracks.itemAt(trackIndex);
- CHECK(track->isH264() && IsIDR(accessUnit));
-
- int64_t timeUs;
- CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
-
- sp<ABuffer> accessUnit2 = track->prependCSD(accessUnit);
-
- accessUnit2->meta()->setInt64("timeUs", timeUs);
-
- return accessUnit2;
-}
-
-} // namespace android
-
diff --git a/media/libstagefright/wifi-display/source/TSPacketizer.h b/media/libstagefright/wifi-display/source/TSPacketizer.h
deleted file mode 100644
index 0dcb179..0000000
--- a/media/libstagefright/wifi-display/source/TSPacketizer.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2012, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef TS_PACKETIZER_H_
-
-#define TS_PACKETIZER_H_
-
-#include <media/stagefright/foundation/ABase.h>
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-#include <utils/Vector.h>
-
-namespace android {
-
-struct ABuffer;
-struct AMessage;
-
-// Forms the packets of a transport stream given access units.
-// Emits metadata tables (PAT and PMT) and timestamp stream (PCR) based
-// on flags.
-struct TSPacketizer : public RefBase {
- enum {
- EMIT_HDCP20_DESCRIPTOR = 1,
- EMIT_HDCP21_DESCRIPTOR = 2,
- };
- explicit TSPacketizer(uint32_t flags);
-
- // Returns trackIndex or error.
- ssize_t addTrack(const sp<AMessage> &format);
-
- enum {
- EMIT_PAT_AND_PMT = 1,
- EMIT_PCR = 2,
- IS_ENCRYPTED = 4,
- PREPEND_SPS_PPS_TO_IDR_FRAMES = 8,
- };
- status_t packetize(
- size_t trackIndex, const sp<ABuffer> &accessUnit,
- sp<ABuffer> *packets,
- uint32_t flags,
- const uint8_t *PES_private_data, size_t PES_private_data_len,
- size_t numStuffingBytes = 0);
-
- status_t extractCSDIfNecessary(size_t trackIndex);
-
- // XXX to be removed once encoder config option takes care of this for
- // encrypted mode.
- sp<ABuffer> prependCSD(
- size_t trackIndex, const sp<ABuffer> &accessUnit) const;
-
-protected:
- virtual ~TSPacketizer();
-
-private:
- enum {
- kPID_PMT = 0x100,
- kPID_PCR = 0x1000,
- };
-
- struct Track;
-
- uint32_t mFlags;
- Vector<sp<Track> > mTracks;
-
- Vector<sp<ABuffer> > mProgramInfoDescriptors;
-
- unsigned mPATContinuityCounter;
- unsigned mPMTContinuityCounter;
-
- uint32_t mCrcTable[256];
-
- void initCrcTable();
- uint32_t crc32(const uint8_t *start, size_t size) const;
-
- DISALLOW_EVIL_CONSTRUCTORS(TSPacketizer);
-};
-
-} // namespace android
-
-#endif // TS_PACKETIZER_H_
-
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
deleted file mode 100644
index 4695e5d..0000000
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
+++ /dev/null
@@ -1,1737 +0,0 @@
-/*
- * Copyright 2012, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "WifiDisplaySource"
-#include <utils/Log.h>
-
-#include "WifiDisplaySource.h"
-#include "PlaybackSession.h"
-#include "Parameters.h"
-#include "rtp/RTPSender.h"
-
-#include <binder/IServiceManager.h>
-#include <gui/IGraphicBufferProducer.h>
-#include <media/IHDCP.h>
-#include <media/IMediaPlayerService.h>
-#include <media/IRemoteDisplayClient.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/ParsedMessage.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/Utils.h>
-
-#include <arpa/inet.h>
-#include <cutils/properties.h>
-
-#include <ctype.h>
-
-namespace android {
-
-// static
-const int64_t WifiDisplaySource::kReaperIntervalUs;
-const int64_t WifiDisplaySource::kTeardownTriggerTimeouSecs;
-const int64_t WifiDisplaySource::kPlaybackSessionTimeoutSecs;
-const int64_t WifiDisplaySource::kPlaybackSessionTimeoutUs;
-const AString WifiDisplaySource::sUserAgent = MakeUserAgent();
-
-WifiDisplaySource::WifiDisplaySource(
- const String16 &opPackageName,
- const sp<ANetworkSession> &netSession,
- const sp<IRemoteDisplayClient> &client,
- const char *path)
- : mOpPackageName(opPackageName),
- mState(INITIALIZED),
- mNetSession(netSession),
- mClient(client),
- mSessionID(0),
- mStopReplyID(NULL),
- mChosenRTPPort(-1),
- mUsingPCMAudio(false),
- mClientSessionID(0),
- mReaperPending(false),
- mNextCSeq(1),
- mUsingHDCP(false),
- mIsHDCP2_0(false),
- mHDCPPort(0),
- mHDCPInitializationComplete(false),
- mSetupTriggerDeferred(false),
- mPlaybackSessionEstablished(false) {
- if (path != NULL) {
- mMediaPath.setTo(path);
- }
-
- mSupportedSourceVideoFormats.disableAll();
-
- mSupportedSourceVideoFormats.setNativeResolution(
- VideoFormats::RESOLUTION_CEA, 5); // 1280x720 p30
-
- // Enable all resolutions up to 1280x720p30
- mSupportedSourceVideoFormats.enableResolutionUpto(
- VideoFormats::RESOLUTION_CEA, 5,
- VideoFormats::PROFILE_CHP, // Constrained High Profile
- VideoFormats::LEVEL_32); // Level 3.2
-}
-
-WifiDisplaySource::~WifiDisplaySource() {
-}
-
-static status_t PostAndAwaitResponse(
- const sp<AMessage> &msg, sp<AMessage> *response) {
- status_t err = msg->postAndAwaitResponse(response);
-
- if (err != OK) {
- return err;
- }
-
- if (response == NULL || !(*response)->findInt32("err", &err)) {
- err = OK;
- }
-
- return err;
-}
-
-status_t WifiDisplaySource::start(const char *iface) {
- CHECK_EQ(mState, INITIALIZED);
-
- sp<AMessage> msg = new AMessage(kWhatStart, this);
- msg->setString("iface", iface);
-
- sp<AMessage> response;
- return PostAndAwaitResponse(msg, &response);
-}
-
-status_t WifiDisplaySource::stop() {
- sp<AMessage> msg = new AMessage(kWhatStop, this);
-
- sp<AMessage> response;
- return PostAndAwaitResponse(msg, &response);
-}
-
-status_t WifiDisplaySource::pause() {
- sp<AMessage> msg = new AMessage(kWhatPause, this);
-
- sp<AMessage> response;
- return PostAndAwaitResponse(msg, &response);
-}
-
-status_t WifiDisplaySource::resume() {
- sp<AMessage> msg = new AMessage(kWhatResume, this);
-
- sp<AMessage> response;
- return PostAndAwaitResponse(msg, &response);
-}
-
-void WifiDisplaySource::onMessageReceived(const sp<AMessage> &msg) {
- switch (msg->what()) {
- case kWhatStart:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- AString iface;
- CHECK(msg->findString("iface", &iface));
-
- status_t err = OK;
-
- ssize_t colonPos = iface.find(":");
-
- unsigned long port;
-
- if (colonPos >= 0) {
- const char *s = iface.c_str() + colonPos + 1;
-
- char *end;
- port = strtoul(s, &end, 10);
-
- if (end == s || *end != '\0' || port > 65535) {
- err = -EINVAL;
- } else {
- iface.erase(colonPos, iface.size() - colonPos);
- }
- } else {
- port = kWifiDisplayDefaultPort;
- }
-
- if (err == OK) {
- if (inet_aton(iface.c_str(), &mInterfaceAddr) != 0) {
- sp<AMessage> notify = new AMessage(kWhatRTSPNotify, this);
-
- err = mNetSession->createRTSPServer(
- mInterfaceAddr, port, notify, &mSessionID);
- } else {
- err = -EINVAL;
- }
- }
-
- mState = AWAITING_CLIENT_CONNECTION;
-
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
- response->postReply(replyID);
- break;
- }
-
- case kWhatRTSPNotify:
- {
- int32_t reason;
- CHECK(msg->findInt32("reason", &reason));
-
- switch (reason) {
- case ANetworkSession::kWhatError:
- {
- int32_t sessionID;
- CHECK(msg->findInt32("sessionID", &sessionID));
-
- int32_t err;
- CHECK(msg->findInt32("err", &err));
-
- AString detail;
- CHECK(msg->findString("detail", &detail));
-
- ALOGE("An error occurred in session %d (%d, '%s/%s').",
- sessionID,
- err,
- detail.c_str(),
- strerror(-err));
-
- mNetSession->destroySession(sessionID);
-
- if (sessionID == mClientSessionID) {
- mClientSessionID = 0;
-
- mClient->onDisplayError(
- IRemoteDisplayClient::kDisplayErrorUnknown);
- }
- break;
- }
-
- case ANetworkSession::kWhatClientConnected:
- {
- int32_t sessionID;
- CHECK(msg->findInt32("sessionID", &sessionID));
-
- if (mClientSessionID > 0) {
- ALOGW("A client tried to connect, but we already "
- "have one.");
-
- mNetSession->destroySession(sessionID);
- break;
- }
-
- CHECK_EQ(mState, AWAITING_CLIENT_CONNECTION);
-
- CHECK(msg->findString("client-ip", &mClientInfo.mRemoteIP));
- CHECK(msg->findString("server-ip", &mClientInfo.mLocalIP));
-
- if (mClientInfo.mRemoteIP == mClientInfo.mLocalIP) {
- // Disallow connections from the local interface
- // for security reasons.
- mNetSession->destroySession(sessionID);
- break;
- }
-
- CHECK(msg->findInt32(
- "server-port", &mClientInfo.mLocalPort));
- mClientInfo.mPlaybackSessionID = -1;
-
- mClientSessionID = sessionID;
-
- ALOGI("We now have a client (%d) connected.", sessionID);
-
- mState = AWAITING_CLIENT_SETUP;
-
- status_t err = sendM1(sessionID);
- CHECK_EQ(err, (status_t)OK);
- break;
- }
-
- case ANetworkSession::kWhatData:
- {
- status_t err = onReceiveClientData(msg);
-
- if (err != OK) {
- mClient->onDisplayError(
- IRemoteDisplayClient::kDisplayErrorUnknown);
- }
-
-#if 0
- // testing only.
- char val[PROPERTY_VALUE_MAX];
- if (property_get("media.wfd.trigger", val, NULL)) {
- if (!strcasecmp(val, "pause") && mState == PLAYING) {
- mState = PLAYING_TO_PAUSED;
- sendTrigger(mClientSessionID, TRIGGER_PAUSE);
- } else if (!strcasecmp(val, "play")
- && mState == PAUSED) {
- mState = PAUSED_TO_PLAYING;
- sendTrigger(mClientSessionID, TRIGGER_PLAY);
- }
- }
-#endif
- break;
- }
-
- case ANetworkSession::kWhatNetworkStall:
- {
- break;
- }
-
- default:
- TRESPASS();
- }
- break;
- }
-
- case kWhatStop:
- {
- CHECK(msg->senderAwaitsResponse(&mStopReplyID));
-
- CHECK_LT(mState, AWAITING_CLIENT_TEARDOWN);
-
- if (mState >= AWAITING_CLIENT_PLAY) {
- // We have a session, i.e. a previous SETUP succeeded.
-
- status_t err = sendTrigger(
- mClientSessionID, TRIGGER_TEARDOWN);
-
- if (err == OK) {
- mState = AWAITING_CLIENT_TEARDOWN;
-
- (new AMessage(kWhatTeardownTriggerTimedOut, this))->post(
- kTeardownTriggerTimeouSecs * 1000000ll);
-
- break;
- }
-
- // fall through.
- }
-
- finishStop();
- break;
- }
-
- case kWhatPause:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- status_t err = OK;
-
- if (mState != PLAYING) {
- err = INVALID_OPERATION;
- } else {
- mState = PLAYING_TO_PAUSED;
- sendTrigger(mClientSessionID, TRIGGER_PAUSE);
- }
-
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
- response->postReply(replyID);
- break;
- }
-
- case kWhatResume:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- status_t err = OK;
-
- if (mState != PAUSED) {
- err = INVALID_OPERATION;
- } else {
- mState = PAUSED_TO_PLAYING;
- sendTrigger(mClientSessionID, TRIGGER_PLAY);
- }
-
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
- response->postReply(replyID);
- break;
- }
-
- case kWhatReapDeadClients:
- {
- mReaperPending = false;
-
- if (mClientSessionID == 0
- || mClientInfo.mPlaybackSession == NULL) {
- break;
- }
-
- if (mClientInfo.mPlaybackSession->getLastLifesignUs()
- + kPlaybackSessionTimeoutUs < ALooper::GetNowUs()) {
- ALOGI("playback session timed out, reaping.");
-
- mNetSession->destroySession(mClientSessionID);
- mClientSessionID = 0;
-
- mClient->onDisplayError(
- IRemoteDisplayClient::kDisplayErrorUnknown);
- } else {
- scheduleReaper();
- }
- break;
- }
-
- case kWhatPlaybackSessionNotify:
- {
- int32_t playbackSessionID;
- CHECK(msg->findInt32("playbackSessionID", &playbackSessionID));
-
- int32_t what;
- CHECK(msg->findInt32("what", &what));
-
- if (what == PlaybackSession::kWhatSessionDead) {
- ALOGI("playback session wants to quit.");
-
- mClient->onDisplayError(
- IRemoteDisplayClient::kDisplayErrorUnknown);
- } else if (what == PlaybackSession::kWhatSessionEstablished) {
- mPlaybackSessionEstablished = true;
-
- if (mClient != NULL) {
- if (!mSinkSupportsVideo) {
- mClient->onDisplayConnected(
- NULL, // SurfaceTexture
- 0, // width,
- 0, // height,
- mUsingHDCP
- ? IRemoteDisplayClient::kDisplayFlagSecure
- : 0,
- 0);
- } else {
- size_t width, height;
-
- CHECK(VideoFormats::GetConfiguration(
- mChosenVideoResolutionType,
- mChosenVideoResolutionIndex,
- &width,
- &height,
- NULL /* framesPerSecond */,
- NULL /* interlaced */));
-
- mClient->onDisplayConnected(
- mClientInfo.mPlaybackSession
- ->getSurfaceTexture(),
- width,
- height,
- mUsingHDCP
- ? IRemoteDisplayClient::kDisplayFlagSecure
- : 0,
- playbackSessionID);
- }
- }
-
- finishPlay();
-
- if (mState == ABOUT_TO_PLAY) {
- mState = PLAYING;
- }
- } else if (what == PlaybackSession::kWhatSessionDestroyed) {
- disconnectClient2();
- } else {
- CHECK_EQ(what, PlaybackSession::kWhatBinaryData);
-
- int32_t channel;
- CHECK(msg->findInt32("channel", &channel));
-
- sp<ABuffer> data;
- CHECK(msg->findBuffer("data", &data));
-
- CHECK_LE(channel, 0xff);
- CHECK_LE(data->size(), 0xffffu);
-
- int32_t sessionID;
- CHECK(msg->findInt32("sessionID", &sessionID));
-
- char header[4];
- header[0] = '$';
- header[1] = channel;
- header[2] = data->size() >> 8;
- header[3] = data->size() & 0xff;
-
- mNetSession->sendRequest(
- sessionID, header, sizeof(header));
-
- mNetSession->sendRequest(
- sessionID, data->data(), data->size());
- }
- break;
- }
-
- case kWhatKeepAlive:
- {
- int32_t sessionID;
- CHECK(msg->findInt32("sessionID", &sessionID));
-
- if (mClientSessionID != sessionID) {
- // Obsolete event, client is already gone.
- break;
- }
-
- sendM16(sessionID);
- break;
- }
-
- case kWhatTeardownTriggerTimedOut:
- {
- if (mState == AWAITING_CLIENT_TEARDOWN) {
- ALOGI("TEARDOWN trigger timed out, forcing disconnection.");
-
- CHECK(mStopReplyID != NULL);
- finishStop();
- break;
- }
- break;
- }
-
- case kWhatHDCPNotify:
- {
- int32_t msgCode, ext1, ext2;
- CHECK(msg->findInt32("msg", &msgCode));
- CHECK(msg->findInt32("ext1", &ext1));
- CHECK(msg->findInt32("ext2", &ext2));
-
- ALOGI("Saw HDCP notification code %d, ext1 %d, ext2 %d",
- msgCode, ext1, ext2);
-
- switch (msgCode) {
- case HDCPModule::HDCP_INITIALIZATION_COMPLETE:
- {
- mHDCPInitializationComplete = true;
-
- if (mSetupTriggerDeferred) {
- mSetupTriggerDeferred = false;
-
- sendTrigger(mClientSessionID, TRIGGER_SETUP);
- }
- break;
- }
-
- case HDCPModule::HDCP_SHUTDOWN_COMPLETE:
- case HDCPModule::HDCP_SHUTDOWN_FAILED:
- {
- // Ugly hack to make sure that the call to
- // HDCPObserver::notify is completely handled before
- // we clear the HDCP instance and unload the shared
- // library :(
- (new AMessage(kWhatFinishStop2, this))->post(300000ll);
- break;
- }
-
- default:
- {
- ALOGE("HDCP failure, shutting down.");
-
- mClient->onDisplayError(
- IRemoteDisplayClient::kDisplayErrorUnknown);
- break;
- }
- }
- break;
- }
-
- case kWhatFinishStop2:
- {
- finishStop2();
- break;
- }
-
- default:
- TRESPASS();
- }
-}
-
-void WifiDisplaySource::registerResponseHandler(
- int32_t sessionID, int32_t cseq, HandleRTSPResponseFunc func) {
- ResponseID id;
- id.mSessionID = sessionID;
- id.mCSeq = cseq;
- mResponseHandlers.add(id, func);
-}
-
-status_t WifiDisplaySource::sendM1(int32_t sessionID) {
- AString request = "OPTIONS * RTSP/1.0\r\n";
- AppendCommonResponse(&request, mNextCSeq);
-
- request.append(
- "Require: org.wfa.wfd1.0\r\n"
- "\r\n");
-
- status_t err =
- mNetSession->sendRequest(sessionID, request.c_str(), request.size());
-
- if (err != OK) {
- return err;
- }
-
- registerResponseHandler(
- sessionID, mNextCSeq, &WifiDisplaySource::onReceiveM1Response);
-
- ++mNextCSeq;
-
- return OK;
-}
-
-status_t WifiDisplaySource::sendM3(int32_t sessionID) {
- AString body =
- "wfd_content_protection\r\n"
- "wfd_video_formats\r\n"
- "wfd_audio_codecs\r\n"
- "wfd_client_rtp_ports\r\n";
-
- AString request = "GET_PARAMETER rtsp://localhost/wfd1.0 RTSP/1.0\r\n";
- AppendCommonResponse(&request, mNextCSeq);
-
- request.append("Content-Type: text/parameters\r\n");
- request.append(AStringPrintf("Content-Length: %d\r\n", body.size()));
- request.append("\r\n");
- request.append(body);
-
- status_t err =
- mNetSession->sendRequest(sessionID, request.c_str(), request.size());
-
- if (err != OK) {
- return err;
- }
-
- registerResponseHandler(
- sessionID, mNextCSeq, &WifiDisplaySource::onReceiveM3Response);
-
- ++mNextCSeq;
-
- return OK;
-}
-
-status_t WifiDisplaySource::sendM4(int32_t sessionID) {
- CHECK_EQ(sessionID, mClientSessionID);
-
- AString body;
-
- if (mSinkSupportsVideo) {
- body.append("wfd_video_formats: ");
-
- VideoFormats chosenVideoFormat;
- chosenVideoFormat.disableAll();
- chosenVideoFormat.setNativeResolution(
- mChosenVideoResolutionType, mChosenVideoResolutionIndex);
- chosenVideoFormat.setProfileLevel(
- mChosenVideoResolutionType, mChosenVideoResolutionIndex,
- mChosenVideoProfile, mChosenVideoLevel);
-
- body.append(chosenVideoFormat.getFormatSpec(true /* forM4Message */));
- body.append("\r\n");
- }
-
- if (mSinkSupportsAudio) {
- body.append(
- AStringPrintf("wfd_audio_codecs: %s\r\n",
- (mUsingPCMAudio
- ? "LPCM 00000002 00" // 2 ch PCM 48kHz
- : "AAC 00000001 00"))); // 2 ch AAC 48kHz
- }
-
- body.append(
- AStringPrintf(
- "wfd_presentation_URL: rtsp://%s/wfd1.0/streamid=0 none\r\n",
- mClientInfo.mLocalIP.c_str()));
-
- body.append(
- AStringPrintf(
- "wfd_client_rtp_ports: %s\r\n", mWfdClientRtpPorts.c_str()));
-
- AString request = "SET_PARAMETER rtsp://localhost/wfd1.0 RTSP/1.0\r\n";
- AppendCommonResponse(&request, mNextCSeq);
-
- request.append("Content-Type: text/parameters\r\n");
- request.append(AStringPrintf("Content-Length: %d\r\n", body.size()));
- request.append("\r\n");
- request.append(body);
-
- status_t err =
- mNetSession->sendRequest(sessionID, request.c_str(), request.size());
-
- if (err != OK) {
- return err;
- }
-
- registerResponseHandler(
- sessionID, mNextCSeq, &WifiDisplaySource::onReceiveM4Response);
-
- ++mNextCSeq;
-
- return OK;
-}
-
-status_t WifiDisplaySource::sendTrigger(
- int32_t sessionID, TriggerType triggerType) {
- AString body = "wfd_trigger_method: ";
- switch (triggerType) {
- case TRIGGER_SETUP:
- body.append("SETUP");
- break;
- case TRIGGER_TEARDOWN:
- ALOGI("Sending TEARDOWN trigger.");
- body.append("TEARDOWN");
- break;
- case TRIGGER_PAUSE:
- body.append("PAUSE");
- break;
- case TRIGGER_PLAY:
- body.append("PLAY");
- break;
- default:
- TRESPASS();
- }
-
- body.append("\r\n");
-
- AString request = "SET_PARAMETER rtsp://localhost/wfd1.0 RTSP/1.0\r\n";
- AppendCommonResponse(&request, mNextCSeq);
-
- request.append("Content-Type: text/parameters\r\n");
- request.append(AStringPrintf("Content-Length: %d\r\n", body.size()));
- request.append("\r\n");
- request.append(body);
-
- status_t err =
- mNetSession->sendRequest(sessionID, request.c_str(), request.size());
-
- if (err != OK) {
- return err;
- }
-
- registerResponseHandler(
- sessionID, mNextCSeq, &WifiDisplaySource::onReceiveM5Response);
-
- ++mNextCSeq;
-
- return OK;
-}
-
-status_t WifiDisplaySource::sendM16(int32_t sessionID) {
- AString request = "GET_PARAMETER rtsp://localhost/wfd1.0 RTSP/1.0\r\n";
- AppendCommonResponse(&request, mNextCSeq);
-
- CHECK_EQ(sessionID, mClientSessionID);
- request.append(
- AStringPrintf("Session: %d\r\n", mClientInfo.mPlaybackSessionID));
- request.append("\r\n"); // Empty body
-
- status_t err =
- mNetSession->sendRequest(sessionID, request.c_str(), request.size());
-
- if (err != OK) {
- return err;
- }
-
- registerResponseHandler(
- sessionID, mNextCSeq, &WifiDisplaySource::onReceiveM16Response);
-
- ++mNextCSeq;
-
- scheduleKeepAlive(sessionID);
-
- return OK;
-}
-
-status_t WifiDisplaySource::onReceiveM1Response(
- int32_t /* sessionID */, const sp<ParsedMessage> &msg) {
- int32_t statusCode;
- if (!msg->getStatusCode(&statusCode)) {
- return ERROR_MALFORMED;
- }
-
- if (statusCode != 200) {
- return ERROR_UNSUPPORTED;
- }
-
- return OK;
-}
-
-// sink_audio_list := ("LPCM"|"AAC"|"AC3" HEXDIGIT*8 HEXDIGIT*2)
-// (", " sink_audio_list)*
-static void GetAudioModes(const char *s, const char *prefix, uint32_t *modes) {
- *modes = 0;
-
- size_t prefixLen = strlen(prefix);
-
- while (*s != '0') {
- if (!strncmp(s, prefix, prefixLen) && s[prefixLen] == ' ') {
- unsigned latency;
- if (sscanf(&s[prefixLen + 1], "%08x %02x", modes, &latency) != 2) {
- *modes = 0;
- }
-
- return;
- }
-
- const char *commaPos = strchr(s, ',');
- if (commaPos != NULL) {
- s = commaPos + 1;
-
- while (isspace(*s)) {
- ++s;
- }
- } else {
- break;
- }
- }
-}
-
-status_t WifiDisplaySource::onReceiveM3Response(
- int32_t sessionID, const sp<ParsedMessage> &msg) {
- int32_t statusCode;
- if (!msg->getStatusCode(&statusCode)) {
- return ERROR_MALFORMED;
- }
-
- if (statusCode != 200) {
- return ERROR_UNSUPPORTED;
- }
-
- sp<Parameters> params =
- Parameters::Parse(msg->getContent(), strlen(msg->getContent()));
-
- if (params == NULL) {
- return ERROR_MALFORMED;
- }
-
- AString value;
- if (!params->findParameter("wfd_client_rtp_ports", &value)) {
- ALOGE("Sink doesn't report its choice of wfd_client_rtp_ports.");
- return ERROR_MALFORMED;
- }
-
- unsigned port0 = 0, port1 = 0;
- if (sscanf(value.c_str(),
- "RTP/AVP/UDP;unicast %u %u mode=play",
- &port0,
- &port1) == 2
- || sscanf(value.c_str(),
- "RTP/AVP/TCP;unicast %u %u mode=play",
- &port0,
- &port1) == 2) {
- if (port0 == 0 || port0 > 65535 || port1 != 0) {
- ALOGE("Sink chose its wfd_client_rtp_ports poorly (%s)",
- value.c_str());
-
- return ERROR_MALFORMED;
- }
- } else if (strcmp(value.c_str(), "RTP/AVP/TCP;interleaved mode=play")) {
- ALOGE("Unsupported value for wfd_client_rtp_ports (%s)",
- value.c_str());
-
- return ERROR_UNSUPPORTED;
- }
-
- mWfdClientRtpPorts = value;
- mChosenRTPPort = port0;
-
- if (!params->findParameter("wfd_video_formats", &value)) {
- ALOGE("Sink doesn't report its choice of wfd_video_formats.");
- return ERROR_MALFORMED;
- }
-
- mSinkSupportsVideo = false;
-
- if (!(value == "none")) {
- mSinkSupportsVideo = true;
- if (!mSupportedSinkVideoFormats.parseFormatSpec(value.c_str())) {
- ALOGE("Failed to parse sink provided wfd_video_formats (%s)",
- value.c_str());
-
- return ERROR_MALFORMED;
- }
-
- if (!VideoFormats::PickBestFormat(
- mSupportedSinkVideoFormats,
- mSupportedSourceVideoFormats,
- &mChosenVideoResolutionType,
- &mChosenVideoResolutionIndex,
- &mChosenVideoProfile,
- &mChosenVideoLevel)) {
- ALOGE("Sink and source share no commonly supported video "
- "formats.");
-
- return ERROR_UNSUPPORTED;
- }
-
- size_t width, height, framesPerSecond;
- bool interlaced;
- CHECK(VideoFormats::GetConfiguration(
- mChosenVideoResolutionType,
- mChosenVideoResolutionIndex,
- &width,
- &height,
- &framesPerSecond,
- &interlaced));
-
- ALOGI("Picked video resolution %zu x %zu %c%zu",
- width, height, interlaced ? 'i' : 'p', framesPerSecond);
-
- ALOGI("Picked AVC profile %d, level %d",
- mChosenVideoProfile, mChosenVideoLevel);
- } else {
- ALOGI("Sink doesn't support video at all.");
- }
-
- if (!params->findParameter("wfd_audio_codecs", &value)) {
- ALOGE("Sink doesn't report its choice of wfd_audio_codecs.");
- return ERROR_MALFORMED;
- }
-
- mSinkSupportsAudio = false;
-
- if (!(value == "none")) {
- mSinkSupportsAudio = true;
-
- uint32_t modes;
- GetAudioModes(value.c_str(), "AAC", &modes);
-
- bool supportsAAC = (modes & 1) != 0; // AAC 2ch 48kHz
-
- GetAudioModes(value.c_str(), "LPCM", &modes);
-
- bool supportsPCM = (modes & 2) != 0; // LPCM 2ch 48kHz
-
- if (supportsPCM
- && property_get_bool("media.wfd.use-pcm-audio", false)) {
- ALOGI("Using PCM audio.");
- mUsingPCMAudio = true;
- } else if (supportsAAC) {
- ALOGI("Using AAC audio.");
- mUsingPCMAudio = false;
- } else if (supportsPCM) {
- ALOGI("Using PCM audio.");
- mUsingPCMAudio = true;
- } else {
- ALOGI("Sink doesn't support an audio format we do.");
- return ERROR_UNSUPPORTED;
- }
- } else {
- ALOGI("Sink doesn't support audio at all.");
- }
-
- if (!mSinkSupportsVideo && !mSinkSupportsAudio) {
- ALOGE("Sink supports neither video nor audio...");
- return ERROR_UNSUPPORTED;
- }
-
- mUsingHDCP = false;
- if (!params->findParameter("wfd_content_protection", &value)) {
- ALOGI("Sink doesn't appear to support content protection.");
- } else if (value == "none") {
- ALOGI("Sink does not support content protection.");
- } else {
- mUsingHDCP = true;
-
- bool isHDCP2_0 = false;
- if (value.startsWith("HDCP2.0 ")) {
- isHDCP2_0 = true;
- } else if (!value.startsWith("HDCP2.1 ")) {
- ALOGE("malformed wfd_content_protection: '%s'", value.c_str());
-
- return ERROR_MALFORMED;
- }
-
- int32_t hdcpPort;
- if (!ParsedMessage::GetInt32Attribute(
- value.c_str() + 8, "port", &hdcpPort)
- || hdcpPort < 1 || hdcpPort > 65535) {
- return ERROR_MALFORMED;
- }
-
- mIsHDCP2_0 = isHDCP2_0;
- mHDCPPort = hdcpPort;
-
- status_t err = makeHDCP();
- if (err != OK) {
- ALOGE("Unable to instantiate HDCP component. "
- "Not using HDCP after all.");
-
- mUsingHDCP = false;
- }
- }
-
- return sendM4(sessionID);
-}
-
-status_t WifiDisplaySource::onReceiveM4Response(
- int32_t sessionID, const sp<ParsedMessage> &msg) {
- int32_t statusCode;
- if (!msg->getStatusCode(&statusCode)) {
- return ERROR_MALFORMED;
- }
-
- if (statusCode != 200) {
- return ERROR_UNSUPPORTED;
- }
-
- if (mUsingHDCP && !mHDCPInitializationComplete) {
- ALOGI("Deferring SETUP trigger until HDCP initialization completes.");
-
- mSetupTriggerDeferred = true;
- return OK;
- }
-
- return sendTrigger(sessionID, TRIGGER_SETUP);
-}
-
-status_t WifiDisplaySource::onReceiveM5Response(
- int32_t /* sessionID */, const sp<ParsedMessage> &msg) {
- int32_t statusCode;
- if (!msg->getStatusCode(&statusCode)) {
- return ERROR_MALFORMED;
- }
-
- if (statusCode != 200) {
- return ERROR_UNSUPPORTED;
- }
-
- return OK;
-}
-
-status_t WifiDisplaySource::onReceiveM16Response(
- int32_t sessionID, const sp<ParsedMessage> & /* msg */) {
- // If only the response was required to include a "Session:" header...
-
- CHECK_EQ(sessionID, mClientSessionID);
-
- if (mClientInfo.mPlaybackSession != NULL) {
- mClientInfo.mPlaybackSession->updateLiveness();
- }
-
- return OK;
-}
-
-void WifiDisplaySource::scheduleReaper() {
- if (mReaperPending) {
- return;
- }
-
- mReaperPending = true;
- (new AMessage(kWhatReapDeadClients, this))->post(kReaperIntervalUs);
-}
-
-void WifiDisplaySource::scheduleKeepAlive(int32_t sessionID) {
- // We need to send updates at least 5 secs before the timeout is set to
- // expire, make sure the timeout is greater than 5 secs to begin with.
- CHECK_GT(kPlaybackSessionTimeoutUs, 5000000ll);
-
- sp<AMessage> msg = new AMessage(kWhatKeepAlive, this);
- msg->setInt32("sessionID", sessionID);
- msg->post(kPlaybackSessionTimeoutUs - 5000000ll);
-}
-
-status_t WifiDisplaySource::onReceiveClientData(const sp<AMessage> &msg) {
- int32_t sessionID;
- CHECK(msg->findInt32("sessionID", &sessionID));
-
- sp<RefBase> obj;
- CHECK(msg->findObject("data", &obj));
-
- sp<ParsedMessage> data =
- static_cast<ParsedMessage *>(obj.get());
-
- ALOGV("session %d received '%s'",
- sessionID, data->debugString().c_str());
-
- AString method;
- AString uri;
- data->getRequestField(0, &method);
-
- int32_t cseq;
- if (!data->findInt32("cseq", &cseq)) {
- sendErrorResponse(sessionID, "400 Bad Request", -1 /* cseq */);
- return ERROR_MALFORMED;
- }
-
- if (method.startsWith("RTSP/")) {
- // This is a response.
-
- ResponseID id;
- id.mSessionID = sessionID;
- id.mCSeq = cseq;
-
- ssize_t index = mResponseHandlers.indexOfKey(id);
-
- if (index < 0) {
- ALOGW("Received unsolicited server response, cseq %d", cseq);
- return ERROR_MALFORMED;
- }
-
- HandleRTSPResponseFunc func = mResponseHandlers.valueAt(index);
- mResponseHandlers.removeItemsAt(index);
-
- status_t err = (this->*func)(sessionID, data);
-
- if (err != OK) {
- ALOGW("Response handler for session %d, cseq %d returned "
- "err %d (%s)",
- sessionID, cseq, err, strerror(-err));
-
- return err;
- }
-
- return OK;
- }
-
- AString version;
- data->getRequestField(2, &version);
- if (!(version == AString("RTSP/1.0"))) {
- sendErrorResponse(sessionID, "505 RTSP Version not supported", cseq);
- return ERROR_UNSUPPORTED;
- }
-
- status_t err;
- if (method == "OPTIONS") {
- err = onOptionsRequest(sessionID, cseq, data);
- } else if (method == "SETUP") {
- err = onSetupRequest(sessionID, cseq, data);
- } else if (method == "PLAY") {
- err = onPlayRequest(sessionID, cseq, data);
- } else if (method == "PAUSE") {
- err = onPauseRequest(sessionID, cseq, data);
- } else if (method == "TEARDOWN") {
- err = onTeardownRequest(sessionID, cseq, data);
- } else if (method == "GET_PARAMETER") {
- err = onGetParameterRequest(sessionID, cseq, data);
- } else if (method == "SET_PARAMETER") {
- err = onSetParameterRequest(sessionID, cseq, data);
- } else {
- sendErrorResponse(sessionID, "405 Method Not Allowed", cseq);
-
- err = ERROR_UNSUPPORTED;
- }
-
- return err;
-}
-
-status_t WifiDisplaySource::onOptionsRequest(
- int32_t sessionID,
- int32_t cseq,
- const sp<ParsedMessage> &data) {
- int32_t playbackSessionID;
- sp<PlaybackSession> playbackSession =
- findPlaybackSession(data, &playbackSessionID);
-
- if (playbackSession != NULL) {
- playbackSession->updateLiveness();
- }
-
- AString response = "RTSP/1.0 200 OK\r\n";
- AppendCommonResponse(&response, cseq);
-
- response.append(
- "Public: org.wfa.wfd1.0, SETUP, TEARDOWN, PLAY, PAUSE, "
- "GET_PARAMETER, SET_PARAMETER\r\n");
-
- response.append("\r\n");
-
- status_t err = mNetSession->sendRequest(sessionID, response.c_str());
-
- if (err == OK) {
- err = sendM3(sessionID);
- }
-
- return err;
-}
-
-status_t WifiDisplaySource::onSetupRequest(
- int32_t sessionID,
- int32_t cseq,
- const sp<ParsedMessage> &data) {
- CHECK_EQ(sessionID, mClientSessionID);
- if (mClientInfo.mPlaybackSessionID != -1) {
- // We only support a single playback session per client.
- // This is due to the reversed keep-alive design in the wfd specs...
- sendErrorResponse(sessionID, "400 Bad Request", cseq);
- return ERROR_MALFORMED;
- }
-
- AString transport;
- if (!data->findString("transport", &transport)) {
- sendErrorResponse(sessionID, "400 Bad Request", cseq);
- return ERROR_MALFORMED;
- }
-
- RTPSender::TransportMode rtpMode = RTPSender::TRANSPORT_UDP;
-
- int clientRtp, clientRtcp;
- if (transport.startsWith("RTP/AVP/TCP;")) {
- AString interleaved;
- if (ParsedMessage::GetAttribute(
- transport.c_str(), "interleaved", &interleaved)
- && sscanf(interleaved.c_str(), "%d-%d",
- &clientRtp, &clientRtcp) == 2) {
- rtpMode = RTPSender::TRANSPORT_TCP_INTERLEAVED;
- } else {
- bool badRequest = false;
-
- AString clientPort;
- if (!ParsedMessage::GetAttribute(
- transport.c_str(), "client_port", &clientPort)) {
- badRequest = true;
- } else if (sscanf(clientPort.c_str(), "%d-%d",
- &clientRtp, &clientRtcp) == 2) {
- } else if (sscanf(clientPort.c_str(), "%d", &clientRtp) == 1) {
- // No RTCP.
- clientRtcp = -1;
- } else {
- badRequest = true;
- }
-
- if (badRequest) {
- sendErrorResponse(sessionID, "400 Bad Request", cseq);
- return ERROR_MALFORMED;
- }
-
- rtpMode = RTPSender::TRANSPORT_TCP;
- }
- } else if (transport.startsWith("RTP/AVP;unicast;")
- || transport.startsWith("RTP/AVP/UDP;unicast;")) {
- bool badRequest = false;
-
- AString clientPort;
- if (!ParsedMessage::GetAttribute(
- transport.c_str(), "client_port", &clientPort)) {
- badRequest = true;
- } else if (sscanf(clientPort.c_str(), "%d-%d",
- &clientRtp, &clientRtcp) == 2) {
- } else if (sscanf(clientPort.c_str(), "%d", &clientRtp) == 1) {
- // No RTCP.
- clientRtcp = -1;
- } else {
- badRequest = true;
- }
-
- if (badRequest) {
- sendErrorResponse(sessionID, "400 Bad Request", cseq);
- return ERROR_MALFORMED;
- }
-#if 1
- // The older LG dongles doesn't specify client_port=xxx apparently.
- } else if (transport == "RTP/AVP/UDP;unicast") {
- clientRtp = 19000;
- clientRtcp = -1;
-#endif
- } else {
- sendErrorResponse(sessionID, "461 Unsupported Transport", cseq);
- return ERROR_UNSUPPORTED;
- }
-
- int32_t playbackSessionID = makeUniquePlaybackSessionID();
-
- sp<AMessage> notify = new AMessage(kWhatPlaybackSessionNotify, this);
- notify->setInt32("playbackSessionID", playbackSessionID);
- notify->setInt32("sessionID", sessionID);
-
- sp<PlaybackSession> playbackSession =
- new PlaybackSession(
- mOpPackageName, mNetSession, notify, mInterfaceAddr, mHDCP, mMediaPath.c_str());
-
- looper()->registerHandler(playbackSession);
-
- AString uri;
- data->getRequestField(1, &uri);
-
- if (strncasecmp("rtsp://", uri.c_str(), 7)) {
- sendErrorResponse(sessionID, "400 Bad Request", cseq);
- return ERROR_MALFORMED;
- }
-
- if (!(uri.startsWith("rtsp://") && uri.endsWith("/wfd1.0/streamid=0"))) {
- sendErrorResponse(sessionID, "404 Not found", cseq);
- return ERROR_MALFORMED;
- }
-
- RTPSender::TransportMode rtcpMode = RTPSender::TRANSPORT_UDP;
- if (clientRtcp < 0) {
- rtcpMode = RTPSender::TRANSPORT_NONE;
- }
-
- status_t err = playbackSession->init(
- mClientInfo.mRemoteIP.c_str(),
- clientRtp,
- rtpMode,
- clientRtcp,
- rtcpMode,
- mSinkSupportsAudio,
- mUsingPCMAudio,
- mSinkSupportsVideo,
- mChosenVideoResolutionType,
- mChosenVideoResolutionIndex,
- mChosenVideoProfile,
- mChosenVideoLevel);
-
- if (err != OK) {
- looper()->unregisterHandler(playbackSession->id());
- playbackSession.clear();
- }
-
- switch (err) {
- case OK:
- break;
- case -ENOENT:
- sendErrorResponse(sessionID, "404 Not Found", cseq);
- return err;
- default:
- sendErrorResponse(sessionID, "403 Forbidden", cseq);
- return err;
- }
-
- mClientInfo.mPlaybackSessionID = playbackSessionID;
- mClientInfo.mPlaybackSession = playbackSession;
-
- AString response = "RTSP/1.0 200 OK\r\n";
- AppendCommonResponse(&response, cseq, playbackSessionID);
-
- if (rtpMode == RTPSender::TRANSPORT_TCP_INTERLEAVED) {
- response.append(
- AStringPrintf(
- "Transport: RTP/AVP/TCP;interleaved=%d-%d;",
- clientRtp, clientRtcp));
- } else {
- int32_t serverRtp = playbackSession->getRTPPort();
-
- AString transportString = "UDP";
- if (rtpMode == RTPSender::TRANSPORT_TCP) {
- transportString = "TCP";
- }
-
- if (clientRtcp >= 0) {
- response.append(
- AStringPrintf(
- "Transport: RTP/AVP/%s;unicast;client_port=%d-%d;"
- "server_port=%d-%d\r\n",
- transportString.c_str(),
- clientRtp, clientRtcp, serverRtp, serverRtp + 1));
- } else {
- response.append(
- AStringPrintf(
- "Transport: RTP/AVP/%s;unicast;client_port=%d;"
- "server_port=%d\r\n",
- transportString.c_str(),
- clientRtp, serverRtp));
- }
- }
-
- response.append("\r\n");
-
- err = mNetSession->sendRequest(sessionID, response.c_str());
-
- if (err != OK) {
- return err;
- }
-
- mState = AWAITING_CLIENT_PLAY;
-
- scheduleReaper();
- scheduleKeepAlive(sessionID);
-
- return OK;
-}
-
-status_t WifiDisplaySource::onPlayRequest(
- int32_t sessionID,
- int32_t cseq,
- const sp<ParsedMessage> &data) {
- int32_t playbackSessionID;
- sp<PlaybackSession> playbackSession =
- findPlaybackSession(data, &playbackSessionID);
-
- if (playbackSession == NULL) {
- sendErrorResponse(sessionID, "454 Session Not Found", cseq);
- return ERROR_MALFORMED;
- }
-
- if (mState != AWAITING_CLIENT_PLAY
- && mState != PAUSED_TO_PLAYING
- && mState != PAUSED) {
- ALOGW("Received PLAY request but we're in state %d", mState);
-
- sendErrorResponse(
- sessionID, "455 Method Not Valid in This State", cseq);
-
- return INVALID_OPERATION;
- }
-
- ALOGI("Received PLAY request.");
- if (mPlaybackSessionEstablished) {
- finishPlay();
- } else {
- ALOGI("deferring PLAY request until session established.");
- }
-
- AString response = "RTSP/1.0 200 OK\r\n";
- AppendCommonResponse(&response, cseq, playbackSessionID);
- response.append("Range: npt=now-\r\n");
- response.append("\r\n");
-
- status_t err = mNetSession->sendRequest(sessionID, response.c_str());
-
- if (err != OK) {
- return err;
- }
-
- if (mState == PAUSED_TO_PLAYING || mPlaybackSessionEstablished) {
- mState = PLAYING;
- return OK;
- }
-
- CHECK_EQ(mState, AWAITING_CLIENT_PLAY);
- mState = ABOUT_TO_PLAY;
-
- return OK;
-}
-
-void WifiDisplaySource::finishPlay() {
- const sp<PlaybackSession> &playbackSession =
- mClientInfo.mPlaybackSession;
-
- status_t err = playbackSession->play();
- CHECK_EQ(err, (status_t)OK);
-}
-
-status_t WifiDisplaySource::onPauseRequest(
- int32_t sessionID,
- int32_t cseq,
- const sp<ParsedMessage> &data) {
- int32_t playbackSessionID;
- sp<PlaybackSession> playbackSession =
- findPlaybackSession(data, &playbackSessionID);
-
- if (playbackSession == NULL) {
- sendErrorResponse(sessionID, "454 Session Not Found", cseq);
- return ERROR_MALFORMED;
- }
-
- ALOGI("Received PAUSE request.");
-
- if (mState != PLAYING_TO_PAUSED && mState != PLAYING) {
- return INVALID_OPERATION;
- }
-
- status_t err = playbackSession->pause();
- CHECK_EQ(err, (status_t)OK);
-
- AString response = "RTSP/1.0 200 OK\r\n";
- AppendCommonResponse(&response, cseq, playbackSessionID);
- response.append("\r\n");
-
- err = mNetSession->sendRequest(sessionID, response.c_str());
-
- if (err != OK) {
- return err;
- }
-
- mState = PAUSED;
-
- return err;
-}
-
-status_t WifiDisplaySource::onTeardownRequest(
- int32_t sessionID,
- int32_t cseq,
- const sp<ParsedMessage> &data) {
- ALOGI("Received TEARDOWN request.");
-
- int32_t playbackSessionID;
- sp<PlaybackSession> playbackSession =
- findPlaybackSession(data, &playbackSessionID);
-
- if (playbackSession == NULL) {
- sendErrorResponse(sessionID, "454 Session Not Found", cseq);
- return ERROR_MALFORMED;
- }
-
- AString response = "RTSP/1.0 200 OK\r\n";
- AppendCommonResponse(&response, cseq, playbackSessionID);
- response.append("Connection: close\r\n");
- response.append("\r\n");
-
- mNetSession->sendRequest(sessionID, response.c_str());
-
- if (mState == AWAITING_CLIENT_TEARDOWN) {
- CHECK(mStopReplyID != NULL);
- finishStop();
- } else {
- mClient->onDisplayError(IRemoteDisplayClient::kDisplayErrorUnknown);
- }
-
- return OK;
-}
-
-void WifiDisplaySource::finishStop() {
- ALOGV("finishStop");
-
- mState = STOPPING;
-
- disconnectClientAsync();
-}
-
-void WifiDisplaySource::finishStopAfterDisconnectingClient() {
- ALOGV("finishStopAfterDisconnectingClient");
-
- if (mHDCP != NULL) {
- ALOGI("Initiating HDCP shutdown.");
- mHDCP->shutdownAsync();
- return;
- }
-
- finishStop2();
-}
-
-void WifiDisplaySource::finishStop2() {
- ALOGV("finishStop2");
-
- if (mHDCP != NULL) {
- mHDCP->setObserver(NULL);
- mHDCPObserver.clear();
- mHDCP.clear();
- }
-
- if (mSessionID != 0) {
- mNetSession->destroySession(mSessionID);
- mSessionID = 0;
- }
-
- ALOGI("We're stopped.");
- mState = STOPPED;
-
- status_t err = OK;
-
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
- response->postReply(mStopReplyID);
-}
-
-status_t WifiDisplaySource::onGetParameterRequest(
- int32_t sessionID,
- int32_t cseq,
- const sp<ParsedMessage> &data) {
- int32_t playbackSessionID;
- sp<PlaybackSession> playbackSession =
- findPlaybackSession(data, &playbackSessionID);
-
- if (playbackSession == NULL) {
- sendErrorResponse(sessionID, "454 Session Not Found", cseq);
- return ERROR_MALFORMED;
- }
-
- playbackSession->updateLiveness();
-
- AString response = "RTSP/1.0 200 OK\r\n";
- AppendCommonResponse(&response, cseq, playbackSessionID);
- response.append("\r\n");
-
- status_t err = mNetSession->sendRequest(sessionID, response.c_str());
- return err;
-}
-
-status_t WifiDisplaySource::onSetParameterRequest(
- int32_t sessionID,
- int32_t cseq,
- const sp<ParsedMessage> &data) {
- int32_t playbackSessionID;
- sp<PlaybackSession> playbackSession =
- findPlaybackSession(data, &playbackSessionID);
-
- if (playbackSession == NULL) {
- sendErrorResponse(sessionID, "454 Session Not Found", cseq);
- return ERROR_MALFORMED;
- }
-
- if (strstr(data->getContent(), "wfd_idr_request\r\n")) {
- playbackSession->requestIDRFrame();
- }
-
- playbackSession->updateLiveness();
-
- AString response = "RTSP/1.0 200 OK\r\n";
- AppendCommonResponse(&response, cseq, playbackSessionID);
- response.append("\r\n");
-
- status_t err = mNetSession->sendRequest(sessionID, response.c_str());
- return err;
-}
-
-// static
-void WifiDisplaySource::AppendCommonResponse(
- AString *response, int32_t cseq, int32_t playbackSessionID) {
- time_t now = time(NULL);
- struct tm *now2 = gmtime(&now);
- char buf[128];
- strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S %z", now2);
-
- response->append("Date: ");
- response->append(buf);
- response->append("\r\n");
-
- response->append(AStringPrintf("Server: %s\r\n", sUserAgent.c_str()));
-
- if (cseq >= 0) {
- response->append(AStringPrintf("CSeq: %d\r\n", cseq));
- }
-
- if (playbackSessionID >= 0ll) {
- response->append(
- AStringPrintf(
- "Session: %d;timeout=%lld\r\n",
- playbackSessionID, kPlaybackSessionTimeoutSecs));
- }
-}
-
-void WifiDisplaySource::sendErrorResponse(
- int32_t sessionID,
- const char *errorDetail,
- int32_t cseq) {
- AString response;
- response.append("RTSP/1.0 ");
- response.append(errorDetail);
- response.append("\r\n");
-
- AppendCommonResponse(&response, cseq);
-
- response.append("\r\n");
-
- mNetSession->sendRequest(sessionID, response.c_str());
-}
-
-int32_t WifiDisplaySource::makeUniquePlaybackSessionID() const {
- return rand();
-}
-
-sp<WifiDisplaySource::PlaybackSession> WifiDisplaySource::findPlaybackSession(
- const sp<ParsedMessage> &data, int32_t *playbackSessionID) const {
- if (!data->findInt32("session", playbackSessionID)) {
- // XXX the older dongles do not always include a "Session:" header.
- *playbackSessionID = mClientInfo.mPlaybackSessionID;
- return mClientInfo.mPlaybackSession;
- }
-
- if (*playbackSessionID != mClientInfo.mPlaybackSessionID) {
- return NULL;
- }
-
- return mClientInfo.mPlaybackSession;
-}
-
-void WifiDisplaySource::disconnectClientAsync() {
- ALOGV("disconnectClient");
-
- if (mClientInfo.mPlaybackSession == NULL) {
- disconnectClient2();
- return;
- }
-
- if (mClientInfo.mPlaybackSession != NULL) {
- ALOGV("Destroying PlaybackSession");
- mClientInfo.mPlaybackSession->destroyAsync();
- }
-}
-
-void WifiDisplaySource::disconnectClient2() {
- ALOGV("disconnectClient2");
-
- if (mClientInfo.mPlaybackSession != NULL) {
- looper()->unregisterHandler(mClientInfo.mPlaybackSession->id());
- mClientInfo.mPlaybackSession.clear();
- }
-
- if (mClientSessionID != 0) {
- mNetSession->destroySession(mClientSessionID);
- mClientSessionID = 0;
- }
-
- mClient->onDisplayDisconnected();
-
- finishStopAfterDisconnectingClient();
-}
-
-struct WifiDisplaySource::HDCPObserver : public BnHDCPObserver {
- explicit HDCPObserver(const sp<AMessage> ¬ify);
-
- virtual void notify(
- int msg, int ext1, int ext2, const Parcel *obj);
-
-private:
- sp<AMessage> mNotify;
-
- DISALLOW_EVIL_CONSTRUCTORS(HDCPObserver);
-};
-
-WifiDisplaySource::HDCPObserver::HDCPObserver(
- const sp<AMessage> ¬ify)
- : mNotify(notify) {
-}
-
-void WifiDisplaySource::HDCPObserver::notify(
- int msg, int ext1, int ext2, const Parcel * /* obj */) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("msg", msg);
- notify->setInt32("ext1", ext1);
- notify->setInt32("ext2", ext2);
- notify->post();
-}
-
-status_t WifiDisplaySource::makeHDCP() {
- sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> binder = sm->getService(String16("media.player"));
-
- sp<IMediaPlayerService> service =
- interface_cast<IMediaPlayerService>(binder);
-
- CHECK(service != NULL);
-
- mHDCP = service->makeHDCP(true /* createEncryptionModule */);
-
- if (mHDCP == NULL) {
- return ERROR_UNSUPPORTED;
- }
-
- sp<AMessage> notify = new AMessage(kWhatHDCPNotify, this);
- mHDCPObserver = new HDCPObserver(notify);
-
- status_t err = mHDCP->setObserver(mHDCPObserver);
-
- if (err != OK) {
- ALOGE("Failed to set HDCP observer.");
-
- mHDCPObserver.clear();
- mHDCP.clear();
-
- return err;
- }
-
- ALOGI("Initiating HDCP negotiation w/ host %s:%d",
- mClientInfo.mRemoteIP.c_str(), mHDCPPort);
-
- err = mHDCP->initAsync(mClientInfo.mRemoteIP.c_str(), mHDCPPort);
-
- if (err != OK) {
- return err;
- }
-
- return OK;
-}
-
-} // namespace android
-
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.h b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
deleted file mode 100644
index c25a675..0000000
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.h
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Copyright 2012, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef WIFI_DISPLAY_SOURCE_H_
-
-#define WIFI_DISPLAY_SOURCE_H_
-
-#include "VideoFormats.h"
-
-#include <media/stagefright/foundation/AHandler.h>
-#include <media/stagefright/foundation/ANetworkSession.h>
-
-#include <netinet/in.h>
-
-#include <utils/String16.h>
-
-namespace android {
-
-struct AReplyToken;
-struct IHDCP;
-class IRemoteDisplayClient;
-struct ParsedMessage;
-
-// Represents the RTSP server acting as a wifi display source.
-// Manages incoming connections, sets up Playback sessions as necessary.
-struct WifiDisplaySource : public AHandler {
- static const unsigned kWifiDisplayDefaultPort = 7236;
-
- WifiDisplaySource(
- const String16 &opPackageName,
- const sp<ANetworkSession> &netSession,
- const sp<IRemoteDisplayClient> &client,
- const char *path = NULL);
-
- status_t start(const char *iface);
- status_t stop();
-
- status_t pause();
- status_t resume();
-
-protected:
- virtual ~WifiDisplaySource();
- virtual void onMessageReceived(const sp<AMessage> &msg);
-
-private:
- struct PlaybackSession;
- struct HDCPObserver;
-
- enum State {
- INITIALIZED,
- AWAITING_CLIENT_CONNECTION,
- AWAITING_CLIENT_SETUP,
- AWAITING_CLIENT_PLAY,
- ABOUT_TO_PLAY,
- PLAYING,
- PLAYING_TO_PAUSED,
- PAUSED,
- PAUSED_TO_PLAYING,
- AWAITING_CLIENT_TEARDOWN,
- STOPPING,
- STOPPED,
- };
-
- enum {
- kWhatStart,
- kWhatRTSPNotify,
- kWhatStop,
- kWhatPause,
- kWhatResume,
- kWhatReapDeadClients,
- kWhatPlaybackSessionNotify,
- kWhatKeepAlive,
- kWhatHDCPNotify,
- kWhatFinishStop2,
- kWhatTeardownTriggerTimedOut,
- };
-
- struct ResponseID {
- int32_t mSessionID;
- int32_t mCSeq;
-
- bool operator<(const ResponseID &other) const {
- return mSessionID < other.mSessionID
- || (mSessionID == other.mSessionID
- && mCSeq < other.mCSeq);
- }
- };
-
- typedef status_t (WifiDisplaySource::*HandleRTSPResponseFunc)(
- int32_t sessionID, const sp<ParsedMessage> &msg);
-
- static const int64_t kReaperIntervalUs = 1000000ll;
-
- // We request that the dongle send us a "TEARDOWN" in order to
- // perform an orderly shutdown. We're willing to wait up to 2 secs
- // for this message to arrive, after that we'll force a disconnect
- // instead.
- static const int64_t kTeardownTriggerTimeouSecs = 2;
-
- static const int64_t kPlaybackSessionTimeoutSecs = 30;
-
- static const int64_t kPlaybackSessionTimeoutUs =
- kPlaybackSessionTimeoutSecs * 1000000ll;
-
- static const AString sUserAgent;
-
- String16 mOpPackageName;
-
- State mState;
- VideoFormats mSupportedSourceVideoFormats;
- sp<ANetworkSession> mNetSession;
- sp<IRemoteDisplayClient> mClient;
- AString mMediaPath;
- struct in_addr mInterfaceAddr;
- int32_t mSessionID;
-
- sp<AReplyToken> mStopReplyID;
-
- AString mWfdClientRtpPorts;
- int32_t mChosenRTPPort; // extracted from "wfd_client_rtp_ports"
-
- bool mSinkSupportsVideo;
- VideoFormats mSupportedSinkVideoFormats;
-
- VideoFormats::ResolutionType mChosenVideoResolutionType;
- size_t mChosenVideoResolutionIndex;
- VideoFormats::ProfileType mChosenVideoProfile;
- VideoFormats::LevelType mChosenVideoLevel;
-
- bool mSinkSupportsAudio;
-
- bool mUsingPCMAudio;
- int32_t mClientSessionID;
-
- struct ClientInfo {
- AString mRemoteIP;
- AString mLocalIP;
- int32_t mLocalPort;
- int32_t mPlaybackSessionID;
- sp<PlaybackSession> mPlaybackSession;
- };
- ClientInfo mClientInfo;
-
- bool mReaperPending;
-
- int32_t mNextCSeq;
-
- KeyedVector<ResponseID, HandleRTSPResponseFunc> mResponseHandlers;
-
- // HDCP specific section >>>>
- bool mUsingHDCP;
- bool mIsHDCP2_0;
- int32_t mHDCPPort;
- sp<IHDCP> mHDCP;
- sp<HDCPObserver> mHDCPObserver;
-
- bool mHDCPInitializationComplete;
- bool mSetupTriggerDeferred;
-
- bool mPlaybackSessionEstablished;
-
- status_t makeHDCP();
- // <<<< HDCP specific section
-
- status_t sendM1(int32_t sessionID);
- status_t sendM3(int32_t sessionID);
- status_t sendM4(int32_t sessionID);
-
- enum TriggerType {
- TRIGGER_SETUP,
- TRIGGER_TEARDOWN,
- TRIGGER_PAUSE,
- TRIGGER_PLAY,
- };
-
- // M5
- status_t sendTrigger(int32_t sessionID, TriggerType triggerType);
-
- status_t sendM16(int32_t sessionID);
-
- status_t onReceiveM1Response(
- int32_t sessionID, const sp<ParsedMessage> &msg);
-
- status_t onReceiveM3Response(
- int32_t sessionID, const sp<ParsedMessage> &msg);
-
- status_t onReceiveM4Response(
- int32_t sessionID, const sp<ParsedMessage> &msg);
-
- status_t onReceiveM5Response(
- int32_t sessionID, const sp<ParsedMessage> &msg);
-
- status_t onReceiveM16Response(
- int32_t sessionID, const sp<ParsedMessage> &msg);
-
- void registerResponseHandler(
- int32_t sessionID, int32_t cseq, HandleRTSPResponseFunc func);
-
- status_t onReceiveClientData(const sp<AMessage> &msg);
-
- status_t onOptionsRequest(
- int32_t sessionID,
- int32_t cseq,
- const sp<ParsedMessage> &data);
-
- status_t onSetupRequest(
- int32_t sessionID,
- int32_t cseq,
- const sp<ParsedMessage> &data);
-
- status_t onPlayRequest(
- int32_t sessionID,
- int32_t cseq,
- const sp<ParsedMessage> &data);
-
- status_t onPauseRequest(
- int32_t sessionID,
- int32_t cseq,
- const sp<ParsedMessage> &data);
-
- status_t onTeardownRequest(
- int32_t sessionID,
- int32_t cseq,
- const sp<ParsedMessage> &data);
-
- status_t onGetParameterRequest(
- int32_t sessionID,
- int32_t cseq,
- const sp<ParsedMessage> &data);
-
- status_t onSetParameterRequest(
- int32_t sessionID,
- int32_t cseq,
- const sp<ParsedMessage> &data);
-
- void sendErrorResponse(
- int32_t sessionID,
- const char *errorDetail,
- int32_t cseq);
-
- static void AppendCommonResponse(
- AString *response, int32_t cseq, int32_t playbackSessionID = -1ll);
-
- void scheduleReaper();
- void scheduleKeepAlive(int32_t sessionID);
-
- int32_t makeUniquePlaybackSessionID() const;
-
- sp<PlaybackSession> findPlaybackSession(
- const sp<ParsedMessage> &data, int32_t *playbackSessionID) const;
-
- void finishStop();
- void disconnectClientAsync();
- void disconnectClient2();
- void finishStopAfterDisconnectingClient();
- void finishStop2();
-
- void finishPlay();
-
- DISALLOW_EVIL_CONSTRUCTORS(WifiDisplaySource);
-};
-
-} // namespace android
-
-#endif // WIFI_DISPLAY_SOURCE_H_
diff --git a/media/libstagefright/xmlparser/Android.bp b/media/libstagefright/xmlparser/Android.bp
index 3507284..a4fa342 100644
--- a/media/libstagefright/xmlparser/Android.bp
+++ b/media/libstagefright/xmlparser/Android.bp
@@ -15,10 +15,7 @@
shared_libs: [
"libexpat",
- "libutils",
"liblog",
- "libcutils",
- "libstagefright_foundation",
"libstagefright_omx_utils",
],
diff --git a/media/mtp/Android.bp b/media/mtp/Android.bp
index acea373..2cf9b82 100644
--- a/media/mtp/Android.bp
+++ b/media/mtp/Android.bp
@@ -49,7 +49,6 @@
shared_libs: [
"libasyncio",
"libbase",
- "libutils",
"liblog",
"libusbhost",
],
diff --git a/media/mtp/IMtpDatabase.h b/media/mtp/IMtpDatabase.h
new file mode 100644
index 0000000..1245092
--- /dev/null
+++ b/media/mtp/IMtpDatabase.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _I_MTP_DATABASE_H
+#define _I_MTP_DATABASE_H
+
+#include "MtpTypes.h"
+
+namespace android {
+
+class MtpDataPacket;
+class MtpProperty;
+class MtpObjectInfo;
+class MtpStringBuffer;
+
+class IMtpDatabase {
+public:
+ virtual ~IMtpDatabase() {}
+
+ // Called from SendObjectInfo to reserve a database entry for the incoming file.
+ virtual MtpObjectHandle beginSendObject(const char* path,
+ MtpObjectFormat format,
+ MtpObjectHandle parent,
+ MtpStorageID storage) = 0;
+
+ // Called to report success or failure of the SendObject file transfer.
+ virtual void endSendObject(MtpObjectHandle handle,
+ bool succeeded) = 0;
+
+ // Called to rescan a file, such as after an edit.
+ virtual void rescanFile(const char* path,
+ MtpObjectHandle handle,
+ MtpObjectFormat format) = 0;
+
+ virtual MtpObjectHandleList* getObjectList(MtpStorageID storageID,
+ MtpObjectFormat format,
+ MtpObjectHandle parent) = 0;
+
+ virtual int getNumObjects(MtpStorageID storageID,
+ MtpObjectFormat format,
+ MtpObjectHandle parent) = 0;
+
+ // callee should delete[] the results from these
+ // results can be NULL
+ virtual MtpObjectFormatList* getSupportedPlaybackFormats() = 0;
+ virtual MtpObjectFormatList* getSupportedCaptureFormats() = 0;
+ virtual MtpObjectPropertyList* getSupportedObjectProperties(MtpObjectFormat format) = 0;
+ virtual MtpDevicePropertyList* getSupportedDeviceProperties() = 0;
+
+ virtual MtpResponseCode getObjectPropertyValue(MtpObjectHandle handle,
+ MtpObjectProperty property,
+ MtpDataPacket& packet) = 0;
+
+ virtual MtpResponseCode setObjectPropertyValue(MtpObjectHandle handle,
+ MtpObjectProperty property,
+ MtpDataPacket& packet) = 0;
+
+ virtual MtpResponseCode getDevicePropertyValue(MtpDeviceProperty property,
+ MtpDataPacket& packet) = 0;
+
+ virtual MtpResponseCode setDevicePropertyValue(MtpDeviceProperty property,
+ MtpDataPacket& packet) = 0;
+
+ virtual MtpResponseCode resetDeviceProperty(MtpDeviceProperty property) = 0;
+
+ virtual MtpResponseCode getObjectPropertyList(MtpObjectHandle handle,
+ uint32_t format, uint32_t property,
+ int groupCode, int depth,
+ MtpDataPacket& packet) = 0;
+
+ virtual MtpResponseCode getObjectInfo(MtpObjectHandle handle,
+ MtpObjectInfo& info) = 0;
+
+ virtual void* getThumbnail(MtpObjectHandle handle, size_t& outThumbSize) = 0;
+
+ virtual MtpResponseCode getObjectFilePath(MtpObjectHandle handle,
+ MtpStringBuffer& outFilePath,
+ int64_t& outFileLength,
+ MtpObjectFormat& outFormat) = 0;
+
+ virtual MtpResponseCode beginDeleteObject(MtpObjectHandle handle) = 0;
+ virtual void endDeleteObject(MtpObjectHandle handle, bool succeeded) = 0;
+
+ virtual MtpObjectHandleList* getObjectReferences(MtpObjectHandle handle) = 0;
+
+ virtual MtpResponseCode setObjectReferences(MtpObjectHandle handle,
+ MtpObjectHandleList* references) = 0;
+
+ virtual MtpProperty* getObjectPropertyDesc(MtpObjectProperty property,
+ MtpObjectFormat format) = 0;
+
+ virtual MtpProperty* getDevicePropertyDesc(MtpDeviceProperty property) = 0;
+
+ virtual MtpResponseCode beginMoveObject(MtpObjectHandle handle, MtpObjectHandle newParent,
+ MtpStorageID newStorage) = 0;
+
+ virtual void endMoveObject(MtpObjectHandle oldParent, MtpObjectHandle newParent,
+ MtpStorageID oldStorage, MtpStorageID newStorage,
+ MtpObjectHandle handle, bool succeeded) = 0;
+
+ virtual MtpResponseCode beginCopyObject(MtpObjectHandle handle, MtpObjectHandle newParent,
+ MtpStorageID newStorage);
+ virtual void endCopyObject(MtpObjectHandle handle, bool succeeded);
+};
+
+}; // namespace android
+
+#endif // _I_MTP_DATABASE_H
diff --git a/media/mtp/IMtpHandle.h b/media/mtp/IMtpHandle.h
index c65bdd0..fd14b18 100644
--- a/media/mtp/IMtpHandle.h
+++ b/media/mtp/IMtpHandle.h
@@ -32,8 +32,7 @@
virtual int sendEvent(mtp_event me) = 0;
// Return 0 if operation is successful, or -1 else
- virtual int start() = 0;
- virtual int configure(bool ptp) = 0;
+ virtual int start(bool ptp) = 0;
virtual void close() = 0;
diff --git a/media/mtp/MtpDataPacket.cpp b/media/mtp/MtpDataPacket.cpp
index d1c71d7..992dc9a 100644
--- a/media/mtp/MtpDataPacket.cpp
+++ b/media/mtp/MtpDataPacket.cpp
@@ -19,6 +19,7 @@
#include "MtpDataPacket.h"
#include <algorithm>
+#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <sys/types.h>
@@ -129,7 +130,7 @@
delete result;
return NULL;
}
- result->push(value);
+ result->push_back(value);
}
return result;
}
@@ -145,7 +146,7 @@
delete result;
return NULL;
}
- result->push(value);
+ result->push_back(value);
}
return result;
}
@@ -161,7 +162,7 @@
delete result;
return NULL;
}
- result->push(value);
+ result->push_back(value);
}
return result;
}
@@ -177,7 +178,7 @@
delete result;
return NULL;
}
- result->push(value);
+ result->push_back(value);
}
return result;
}
@@ -193,7 +194,7 @@
delete result;
return NULL;
}
- result->push(value);
+ result->push_back(value);
}
return result;
}
@@ -209,7 +210,7 @@
delete result;
return NULL;
}
- result->push(value);
+ result->push_back(value);
}
return result;
}
@@ -225,7 +226,7 @@
delete result;
return NULL;
}
- result->push(value);
+ result->push_back(value);
}
return result;
}
@@ -241,7 +242,7 @@
delete result;
return NULL;
}
- result->push(value);
+ result->push_back(value);
}
return result;
}
diff --git a/media/mtp/MtpDatabase.h b/media/mtp/MtpDatabase.h
deleted file mode 100644
index f3f9720..0000000
--- a/media/mtp/MtpDatabase.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _MTP_DATABASE_H
-#define _MTP_DATABASE_H
-
-#include "MtpTypes.h"
-
-namespace android {
-
-class MtpDataPacket;
-class MtpProperty;
-class MtpObjectInfo;
-
-class MtpDatabase {
-public:
- virtual ~MtpDatabase() {}
-
- // called from SendObjectInfo to reserve a database entry for the incoming file
- virtual MtpObjectHandle beginSendObject(const char* path,
- MtpObjectFormat format,
- MtpObjectHandle parent,
- MtpStorageID storage,
- uint64_t size,
- time_t modified) = 0;
-
- // called to report success or failure of the SendObject file transfer
- // success should signal a notification of the new object's creation,
- // failure should remove the database entry created in beginSendObject
- virtual void endSendObject(const char* path,
- MtpObjectHandle handle,
- MtpObjectFormat format,
- bool succeeded) = 0;
-
- virtual void doScanDirectory(const char* path) = 0;
-
- virtual MtpObjectHandleList* getObjectList(MtpStorageID storageID,
- MtpObjectFormat format,
- MtpObjectHandle parent) = 0;
-
- virtual int getNumObjects(MtpStorageID storageID,
- MtpObjectFormat format,
- MtpObjectHandle parent) = 0;
-
- // callee should delete[] the results from these
- // results can be NULL
- virtual MtpObjectFormatList* getSupportedPlaybackFormats() = 0;
- virtual MtpObjectFormatList* getSupportedCaptureFormats() = 0;
- virtual MtpObjectPropertyList* getSupportedObjectProperties(MtpObjectFormat format) = 0;
- virtual MtpDevicePropertyList* getSupportedDeviceProperties() = 0;
-
- virtual MtpResponseCode getObjectPropertyValue(MtpObjectHandle handle,
- MtpObjectProperty property,
- MtpDataPacket& packet) = 0;
-
- virtual MtpResponseCode setObjectPropertyValue(MtpObjectHandle handle,
- MtpObjectProperty property,
- MtpDataPacket& packet) = 0;
-
- virtual MtpResponseCode getDevicePropertyValue(MtpDeviceProperty property,
- MtpDataPacket& packet) = 0;
-
- virtual MtpResponseCode setDevicePropertyValue(MtpDeviceProperty property,
- MtpDataPacket& packet) = 0;
-
- virtual MtpResponseCode resetDeviceProperty(MtpDeviceProperty property) = 0;
-
- virtual MtpResponseCode getObjectPropertyList(MtpObjectHandle handle,
- uint32_t format, uint32_t property,
- int groupCode, int depth,
- MtpDataPacket& packet) = 0;
-
- virtual MtpResponseCode getObjectInfo(MtpObjectHandle handle,
- MtpObjectInfo& info) = 0;
-
- virtual void* getThumbnail(MtpObjectHandle handle, size_t& outThumbSize) = 0;
-
- virtual MtpResponseCode getObjectFilePath(MtpObjectHandle handle,
- MtpString& outFilePath,
- int64_t& outFileLength,
- MtpObjectFormat& outFormat) = 0;
-
- virtual MtpResponseCode deleteFile(MtpObjectHandle handle) = 0;
-
- virtual MtpObjectHandleList* getObjectReferences(MtpObjectHandle handle) = 0;
-
- virtual MtpResponseCode setObjectReferences(MtpObjectHandle handle,
- MtpObjectHandleList* references) = 0;
-
- virtual MtpProperty* getObjectPropertyDesc(MtpObjectProperty property,
- MtpObjectFormat format) = 0;
-
- virtual MtpProperty* getDevicePropertyDesc(MtpDeviceProperty property) = 0;
-
- virtual MtpResponseCode moveObject(MtpObjectHandle handle, MtpObjectHandle newParent,
- MtpStorageID newStorage, MtpString& newPath) = 0;
-
- virtual void sessionStarted() = 0;
-
- virtual void sessionEnded() = 0;
-};
-
-}; // namespace android
-
-#endif // _MTP_DATABASE_H
diff --git a/media/mtp/MtpDebug.h b/media/mtp/MtpDebug.h
index 5b53e31..8d48273 100644
--- a/media/mtp/MtpDebug.h
+++ b/media/mtp/MtpDebug.h
@@ -18,10 +18,10 @@
#define _MTP_DEBUG_H
// #define LOG_NDEBUG 0
-#include <utils/Log.h>
-
#include "MtpTypes.h"
+#include <log/log.h>
+
namespace android {
class MtpDebug {
diff --git a/media/mtp/MtpDescriptors.cpp b/media/mtp/MtpDescriptors.cpp
index d9b6060..4a336c8 100644
--- a/media/mtp/MtpDescriptors.cpp
+++ b/media/mtp/MtpDescriptors.cpp
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#include <android-base/logging.h>
+#include <sys/types.h>
+
#include "MtpDescriptors.h"
namespace android {
@@ -257,4 +260,24 @@
.hs_descs = ptp_hs_descriptors,
};
+bool writeDescriptors(int fd, bool ptp) {
+ ssize_t ret = TEMP_FAILURE_RETRY(write(fd,
+ &(ptp ? ptp_desc_v2 : mtp_desc_v2), sizeof(desc_v2)));
+ if (ret < 0) {
+ PLOG(ERROR) << fd << "Switching to V1 descriptor format";
+ ret = TEMP_FAILURE_RETRY(write(fd,
+ &(ptp ? ptp_desc_v1 : mtp_desc_v1), sizeof(desc_v1)));
+ if (ret < 0) {
+ PLOG(ERROR) << fd << "Writing descriptors failed";
+ return false;
+ }
+ }
+ ret = TEMP_FAILURE_RETRY(write(fd, &mtp_strings, sizeof(mtp_strings)));
+ if (ret < 0) {
+ PLOG(ERROR) << fd << "Writing strings failed";
+ return false;
+ }
+ return true;
+}
+
}; // namespace android
diff --git a/media/mtp/MtpDescriptors.h b/media/mtp/MtpDescriptors.h
index cfc3930..d600a24 100644
--- a/media/mtp/MtpDescriptors.h
+++ b/media/mtp/MtpDescriptors.h
@@ -23,6 +23,16 @@
namespace android {
+constexpr char FFS_MTP_EP0[] = "/dev/usb-ffs/mtp/ep0";
+constexpr char FFS_MTP_EP_IN[] = "/dev/usb-ffs/mtp/ep1";
+constexpr char FFS_MTP_EP_OUT[] = "/dev/usb-ffs/mtp/ep2";
+constexpr char FFS_MTP_EP_INTR[] = "/dev/usb-ffs/mtp/ep3";
+
+constexpr char FFS_PTP_EP0[] = "/dev/usb-ffs/ptp/ep0";
+constexpr char FFS_PTP_EP_IN[] = "/dev/usb-ffs/ptp/ep1";
+constexpr char FFS_PTP_EP_OUT[] = "/dev/usb-ffs/ptp/ep2";
+constexpr char FFS_PTP_EP_INTR[] = "/dev/usb-ffs/ptp/ep3";
+
constexpr int MAX_PACKET_SIZE_FS = 64;
constexpr int MAX_PACKET_SIZE_HS = 512;
constexpr int MAX_PACKET_SIZE_SS = 1024;
@@ -91,6 +101,8 @@
extern const struct desc_v1 ptp_desc_v1;
extern const struct functionfs_strings mtp_strings;
+bool writeDescriptors(int fd, bool ptp);
+
}; // namespace android
#endif // MTP_DESCRIPTORS_H
diff --git a/media/mtp/MtpDevHandle.cpp b/media/mtp/MtpDevHandle.cpp
index 6aa57ac..e8bdf80 100644
--- a/media/mtp/MtpDevHandle.cpp
+++ b/media/mtp/MtpDevHandle.cpp
@@ -60,7 +60,7 @@
return ioctl(mFd, MTP_SEND_EVENT, reinterpret_cast<unsigned long>(&me));
}
-int MtpDevHandle::start() {
+int MtpDevHandle::start(bool /* ptp */) {
mFd.reset(TEMP_FAILURE_RETRY(open(mtp_dev_path, O_RDWR)));
if (mFd == -1) return -1;
return 0;
@@ -70,9 +70,4 @@
mFd.reset();
}
-int MtpDevHandle::configure(bool) {
- // Nothing to do, driver can configure itself
- return 0;
-}
-
} // namespace android
diff --git a/media/mtp/MtpDevHandle.h b/media/mtp/MtpDevHandle.h
index b0480ed..740ac85 100644
--- a/media/mtp/MtpDevHandle.h
+++ b/media/mtp/MtpDevHandle.h
@@ -36,10 +36,8 @@
int sendFile(mtp_file_range mfr);
int sendEvent(mtp_event me);
- int start();
+ int start(bool ptp);
void close();
-
- int configure(bool ptp);
};
} // namespace android
diff --git a/media/mtp/MtpDevice.cpp b/media/mtp/MtpDevice.cpp
index 0bf7854..993797a 100644
--- a/media/mtp/MtpDevice.cpp
+++ b/media/mtp/MtpDevice.cpp
@@ -262,7 +262,7 @@
MtpDeviceProperty propCode = (*mDeviceInfo->mDeviceProperties)[i];
MtpProperty* property = getDevicePropDesc(propCode);
if (property)
- mDeviceProperties.push(property);
+ mDeviceProperties.push_back(property);
}
}
}
@@ -327,7 +327,7 @@
}
bool MtpDevice::openSession() {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
mSessionID = 0;
mTransactionID = 0;
@@ -353,7 +353,7 @@
}
MtpDeviceInfo* MtpDevice::getDeviceInfo() {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
mRequest.reset();
if (!sendRequest(MTP_OPERATION_GET_DEVICE_INFO))
@@ -372,7 +372,7 @@
}
MtpStorageIDList* MtpDevice::getStorageIDs() {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
mRequest.reset();
if (!sendRequest(MTP_OPERATION_GET_STORAGE_IDS))
@@ -387,7 +387,7 @@
}
MtpStorageInfo* MtpDevice::getStorageInfo(MtpStorageID storageID) {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
mRequest.reset();
mRequest.setParameter(1, storageID);
@@ -408,7 +408,7 @@
MtpObjectHandleList* MtpDevice::getObjectHandles(MtpStorageID storageID,
MtpObjectFormat format, MtpObjectHandle parent) {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
mRequest.reset();
mRequest.setParameter(1, storageID);
@@ -426,7 +426,7 @@
}
MtpObjectInfo* MtpDevice::getObjectInfo(MtpObjectHandle handle) {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
// FIXME - we might want to add some caching here
@@ -448,7 +448,7 @@
}
void* MtpDevice::getThumbnail(MtpObjectHandle handle, int& outLength) {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
mRequest.reset();
mRequest.setParameter(1, handle);
@@ -463,7 +463,7 @@
}
MtpObjectHandle MtpDevice::sendObjectInfo(MtpObjectInfo* info) {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
mRequest.reset();
MtpObjectHandle parent = info->mParent;
@@ -517,7 +517,7 @@
}
bool MtpDevice::sendObject(MtpObjectHandle handle, int size, int srcFD) {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
if (mLastSendObjectInfoTransactionID + 1 != mTransactionID ||
mLastSendObjectInfoObjectHandle != handle) {
@@ -537,7 +537,7 @@
}
bool MtpDevice::deleteObject(MtpObjectHandle handle) {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
mRequest.reset();
mRequest.setParameter(1, handle);
@@ -572,7 +572,7 @@
}
MtpObjectPropertyList* MtpDevice::getObjectPropsSupported(MtpObjectFormat format) {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
mRequest.reset();
mRequest.setParameter(1, format);
@@ -589,7 +589,7 @@
}
MtpProperty* MtpDevice::getDevicePropDesc(MtpDeviceProperty code) {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
mRequest.reset();
mRequest.setParameter(1, code);
@@ -609,7 +609,7 @@
}
MtpProperty* MtpDevice::getObjectPropDesc(MtpObjectProperty code, MtpObjectFormat format) {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
mRequest.reset();
mRequest.setParameter(1, code);
@@ -633,7 +633,7 @@
if (property == nullptr)
return false;
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
mRequest.reset();
mRequest.setParameter(1, handle);
@@ -684,7 +684,7 @@
ReadObjectCallback callback,
const uint32_t* expectedLength,
void* clientData) {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
mRequest.reset();
mRequest.setParameter(1, handle);
@@ -806,7 +806,7 @@
uint32_t *writtenSize,
ReadObjectCallback callback,
void* clientData) {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
mRequest.reset();
mRequest.setParameter(1, handle);
@@ -828,7 +828,7 @@
uint32_t *writtenSize,
ReadObjectCallback callback,
void* clientData) {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
mRequest.reset();
mRequest.setParameter(1, handle);
@@ -908,7 +908,7 @@
}
int MtpDevice::submitEventRequest() {
- if (mEventMutex.tryLock()) {
+ if (!mEventMutex.try_lock()) {
// An event is being reaped on another thread.
return -1;
}
@@ -916,7 +916,7 @@
// An event request was submitted, but no reapEventRequest called so far.
return -1;
}
- Mutex::Autolock autoLock(mEventMutexForInterrupt);
+ std::lock_guard<std::mutex> lg(mEventMutexForInterrupt);
mEventPacket.sendRequest(mRequestIntr);
const int currentHandle = ++mCurrentEventHandle;
mProcessingEvent = true;
@@ -925,7 +925,7 @@
}
int MtpDevice::reapEventRequest(int handle, uint32_t (*parameters)[3]) {
- Mutex::Autolock autoLock(mEventMutex);
+ std::lock_guard<std::mutex> lg(mEventMutex);
if (!mProcessingEvent || mCurrentEventHandle != handle || !parameters) {
return -1;
}
@@ -940,7 +940,7 @@
}
void MtpDevice::discardEventRequest(int handle) {
- Mutex::Autolock autoLock(mEventMutexForInterrupt);
+ std::lock_guard<std::mutex> lg(mEventMutexForInterrupt);
if (mCurrentEventHandle != handle) {
return;
}
diff --git a/media/mtp/MtpDevice.h b/media/mtp/MtpDevice.h
index a9a3e0e..8cf9e5e 100644
--- a/media/mtp/MtpDevice.h
+++ b/media/mtp/MtpDevice.h
@@ -23,7 +23,7 @@
#include "MtpResponsePacket.h"
#include "MtpTypes.h"
-#include <utils/threads.h>
+#include <mutex>
struct usb_device;
struct usb_request;
@@ -67,9 +67,9 @@
MtpObjectHandle mLastSendObjectInfoObjectHandle;
// to ensure only one MTP transaction at a time
- Mutex mMutex;
- Mutex mEventMutex;
- Mutex mEventMutexForInterrupt;
+ std::mutex mMutex;
+ std::mutex mEventMutex;
+ std::mutex mEventMutexForInterrupt;
// Remember the device's packet division mode.
UrbPacketDivisionMode mPacketDivisionMode;
diff --git a/media/mtp/MtpEventPacket.h b/media/mtp/MtpEventPacket.h
index 3f3b6a3..94d6ebf 100644
--- a/media/mtp/MtpEventPacket.h
+++ b/media/mtp/MtpEventPacket.h
@@ -20,6 +20,8 @@
#include "MtpPacket.h"
#include "mtp.h"
+#include <errno.h>
+
class IMtpHandle;
namespace android {
diff --git a/media/mtp/MtpFfsCompatHandle.cpp b/media/mtp/MtpFfsCompatHandle.cpp
index 3dd73f3..d2b342c 100644
--- a/media/mtp/MtpFfsCompatHandle.cpp
+++ b/media/mtp/MtpFfsCompatHandle.cpp
@@ -61,7 +61,8 @@
namespace android {
-MtpFfsCompatHandle::MtpFfsCompatHandle() :
+MtpFfsCompatHandle::MtpFfsCompatHandle(int controlFd) :
+ MtpFfsHandle(controlFd),
mMaxWrite(USB_FFS_MAX_WRITE),
mMaxRead(USB_FFS_MAX_READ) {}
@@ -108,10 +109,8 @@
return ret;
}
-int MtpFfsCompatHandle::start() {
- mLock.lock();
-
- if (!openEndpoints())
+int MtpFfsCompatHandle::start(bool ptp) {
+ if (!openEndpoints(ptp))
return -1;
for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
diff --git a/media/mtp/MtpFfsCompatHandle.h b/media/mtp/MtpFfsCompatHandle.h
index cd61482..5982d60 100644
--- a/media/mtp/MtpFfsCompatHandle.h
+++ b/media/mtp/MtpFfsCompatHandle.h
@@ -42,9 +42,9 @@
* Open ffs endpoints and allocate necessary kernel and user memory.
* Will sleep until endpoints are enabled, for up to 1 second.
*/
- int start() override;
+ int start(bool ptp) override;
- MtpFfsCompatHandle();
+ MtpFfsCompatHandle(int controlFd);
~MtpFfsCompatHandle();
};
diff --git a/media/mtp/MtpFfsHandle.cpp b/media/mtp/MtpFfsHandle.cpp
index 217e0c9..f25fc71 100644
--- a/media/mtp/MtpFfsHandle.cpp
+++ b/media/mtp/MtpFfsHandle.cpp
@@ -39,10 +39,6 @@
namespace {
-constexpr char FFS_MTP_EP_IN[] = "/dev/usb-ffs/mtp/ep1";
-constexpr char FFS_MTP_EP_OUT[] = "/dev/usb-ffs/mtp/ep2";
-constexpr char FFS_MTP_EP_INTR[] = "/dev/usb-ffs/mtp/ep3";
-
constexpr unsigned AIO_BUFS_MAX = 128;
constexpr unsigned AIO_BUF_LEN = 16384;
@@ -73,7 +69,9 @@
}
}
-MtpFfsHandle::MtpFfsHandle() {}
+MtpFfsHandle::MtpFfsHandle(int controlFd) {
+ mControl.reset(controlFd);
+}
MtpFfsHandle::~MtpFfsHandle() {}
@@ -83,27 +81,27 @@
mBulkOut.reset();
}
-bool MtpFfsHandle::openEndpoints() {
+bool MtpFfsHandle::openEndpoints(bool ptp) {
if (mBulkIn < 0) {
- mBulkIn.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_IN, O_RDWR)));
+ mBulkIn.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_IN : FFS_MTP_EP_IN, O_RDWR)));
if (mBulkIn < 0) {
- PLOG(ERROR) << FFS_MTP_EP_IN << ": cannot open bulk in ep";
+ PLOG(ERROR) << (ptp ? FFS_PTP_EP_IN : FFS_MTP_EP_IN) << ": cannot open bulk in ep";
return false;
}
}
if (mBulkOut < 0) {
- mBulkOut.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_OUT, O_RDWR)));
+ mBulkOut.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_OUT : FFS_MTP_EP_OUT, O_RDWR)));
if (mBulkOut < 0) {
- PLOG(ERROR) << FFS_MTP_EP_OUT << ": cannot open bulk out ep";
+ PLOG(ERROR) << (ptp ? FFS_PTP_EP_OUT : FFS_MTP_EP_OUT) << ": cannot open bulk out ep";
return false;
}
}
if (mIntr < 0) {
- mIntr.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_INTR, O_RDWR)));
+ mIntr.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_INTR : FFS_MTP_EP_INTR, O_RDWR)));
if (mIntr < 0) {
- PLOG(ERROR) << FFS_MTP_EP_INTR << ": cannot open intr ep";
+ PLOG(ERROR) << (ptp ? FFS_PTP_EP_INTR : FFS_MTP_EP_INTR) << ": cannot open intr ep";
return false;
}
}
@@ -121,66 +119,53 @@
PLOG(ERROR) << "Failed to fadvise";
}
-bool MtpFfsHandle::initFunctionfs() {
- if (mControl < 0) { // might have already done this before
- mControl.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP0, O_RDWR)));
- if (mControl < 0) {
- PLOG(ERROR) << FFS_MTP_EP0 << ": cannot open control endpoint";
- return false;
- }
- if (!writeDescriptors()) {
- closeConfig();
- return false;
- }
- }
- return true;
-}
-
-bool MtpFfsHandle::writeDescriptors() {
- ssize_t ret = TEMP_FAILURE_RETRY(::write(mControl,
- &(mPtp ? ptp_desc_v2 : mtp_desc_v2), sizeof(desc_v2)));
- if (ret < 0) {
- PLOG(ERROR) << FFS_MTP_EP0 << "Switching to V1 descriptor format";
- ret = TEMP_FAILURE_RETRY(::write(mControl,
- &(mPtp ? ptp_desc_v1 : mtp_desc_v1), sizeof(desc_v1)));
- if (ret < 0) {
- PLOG(ERROR) << FFS_MTP_EP0 << "Writing descriptors failed";
- return false;
- }
- }
- ret = TEMP_FAILURE_RETRY(::write(mControl, &mtp_strings, sizeof(mtp_strings)));
- if (ret < 0) {
- PLOG(ERROR) << FFS_MTP_EP0 << "Writing strings failed";
- return false;
- }
- return true;
+bool MtpFfsHandle::writeDescriptors(bool ptp) {
+ return ::android::writeDescriptors(mControl, ptp);
}
void MtpFfsHandle::closeConfig() {
mControl.reset();
}
-int MtpFfsHandle::doAsync(void* data, size_t len, bool read) {
- struct io_event ioevs[1];
- if (len > AIO_BUF_LEN) {
- LOG(ERROR) << "Mtp read/write too large " << len;
- errno = EINVAL;
- return -1;
+int MtpFfsHandle::doAsync(void* data, size_t len, bool read, bool zero_packet) {
+ struct io_event ioevs[AIO_BUFS_MAX];
+ size_t total = 0;
+
+ while (total < len) {
+ size_t this_len = std::min(len - total, static_cast<size_t>(AIO_BUF_LEN * AIO_BUFS_MAX));
+ int num_bufs = this_len / AIO_BUF_LEN + (this_len % AIO_BUF_LEN == 0 ? 0 : 1);
+ for (int i = 0; i < num_bufs; i++) {
+ mIobuf[0].buf[i] = reinterpret_cast<unsigned char*>(data) + total + i * AIO_BUF_LEN;
+ }
+ int ret = iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, this_len, read);
+ if (ret < 0) return -1;
+ ret = waitEvents(&mIobuf[0], ret, ioevs, nullptr);
+ if (ret < 0) return -1;
+ total += ret;
+ if (static_cast<size_t>(ret) < this_len) break;
}
- mIobuf[0].buf[0] = reinterpret_cast<unsigned char*>(data);
- if (iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, len, read) == -1)
- return -1;
- int ret = waitEvents(&mIobuf[0], 1, ioevs, nullptr);
- mIobuf[0].buf[0] = mIobuf[0].bufs.data();
- return ret;
+
+ int packet_size = getPacketSize(read ? mBulkOut : mBulkIn);
+ if (len % packet_size == 0 && zero_packet) {
+ int ret = iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, 0, read);
+ if (ret < 0) return -1;
+ ret = waitEvents(&mIobuf[0], ret, ioevs, nullptr);
+ if (ret < 0) return -1;
+ }
+
+ for (unsigned i = 0; i < AIO_BUFS_MAX; i++) {
+ mIobuf[0].buf[i] = mIobuf[0].bufs.data() + i * AIO_BUF_LEN;
+ }
+ return total;
}
int MtpFfsHandle::read(void* data, size_t len) {
- return doAsync(data, len, true);
+ // Zero packets are handled by receiveFile()
+ return doAsync(data, len, true, false);
}
int MtpFfsHandle::write(const void* data, size_t len) {
- return doAsync(const_cast<void*>(data), len, false);
+ return doAsync(const_cast<void*>(data), len, false, true);
}
int MtpFfsHandle::handleEvent() {
@@ -197,11 +182,9 @@
switch (event->type) {
case FUNCTIONFS_BIND:
case FUNCTIONFS_ENABLE:
- case FUNCTIONFS_RESUME:
ret = 0;
errno = 0;
break;
- case FUNCTIONFS_SUSPEND:
case FUNCTIONFS_UNBIND:
case FUNCTIONFS_DISABLE:
errno = ESHUTDOWN;
@@ -211,6 +194,9 @@
if (handleControlRequest(&event->u.setup) == -1)
ret = -1;
break;
+ case FUNCTIONFS_SUSPEND:
+ case FUNCTIONFS_RESUME:
+ break;
default:
LOG(ERROR) << "Mtp Event " << event->type << " (unknown)";
}
@@ -277,10 +263,8 @@
return 0;
}
-int MtpFfsHandle::start() {
- mLock.lock();
-
- if (!openEndpoints())
+int MtpFfsHandle::start(bool ptp) {
+ if (!openEndpoints(ptp))
return -1;
for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
@@ -309,33 +293,10 @@
return 0;
}
-int MtpFfsHandle::configure(bool usePtp) {
- // Wait till previous server invocation has closed
- if (!mLock.try_lock_for(std::chrono::milliseconds(300))) {
- LOG(ERROR) << "MtpServer was unable to get configure lock";
- return -1;
- }
- int ret = 0;
-
- // If ptp is changed, the configuration must be rewritten
- if (mPtp != usePtp) {
- closeEndpoints();
- closeConfig();
- }
- mPtp = usePtp;
-
- if (!initFunctionfs()) {
- ret = -1;
- }
-
- mLock.unlock();
- return ret;
-}
-
void MtpFfsHandle::close() {
io_destroy(mCtx);
closeEndpoints();
- mLock.unlock();
+ closeConfig();
}
int MtpFfsHandle::waitEvents(struct io_buffer *buf, int min_events, struct io_event *events,
@@ -627,7 +588,8 @@
if (TEMP_FAILURE_RETRY(pread(mfr.fd, mIobuf[0].bufs.data() +
sizeof(mtp_data_header), init_read_len, offset))
!= init_read_len) return -1;
- if (write(mIobuf[0].bufs.data(), sizeof(mtp_data_header) + init_read_len) == -1)
+ if (doAsync(mIobuf[0].bufs.data(), sizeof(mtp_data_header) + init_read_len,
+ false, false /* zlps are handled below */) == -1)
return -1;
file_length -= init_read_len;
offset += init_read_len;
diff --git a/media/mtp/MtpFfsHandle.h b/media/mtp/MtpFfsHandle.h
index 2347000..fe343f7 100644
--- a/media/mtp/MtpFfsHandle.h
+++ b/media/mtp/MtpFfsHandle.h
@@ -29,8 +29,6 @@
namespace android {
-constexpr char FFS_MTP_EP0[] = "/dev/usb-ffs/mtp/ep0";
-
constexpr int NUM_IO_BUFS = 2;
struct io_buffer {
@@ -42,31 +40,24 @@
};
template <class T> class MtpFfsHandleTest;
-template <class T> class MtpFfsHandleTest_testControl_Test;
class MtpFfsHandle : public IMtpHandle {
template <class T> friend class MtpFfsHandleTest;
- template <class T> friend class MtpFfsHandleTest_testControl_Test;
protected:
- bool initFunctionfs();
- bool writeDescriptors();
void closeConfig();
void closeEndpoints();
void advise(int fd);
int handleControlRequest(const struct usb_ctrlrequest *request);
- int doAsync(void* data, size_t len, bool read);
+ int doAsync(void* data, size_t len, bool read, bool zero_packet);
int handleEvent();
void cancelTransaction();
void doSendEvent(mtp_event me);
- bool openEndpoints();
+ bool openEndpoints(bool ptp);
static int getPacketSize(int ffs_fd);
- bool mPtp;
bool mCanceled;
- std::timed_mutex mLock; // protects configure() vs main loop
-
android::base::unique_fd mControl;
// "in" from the host's perspective => sink for mtp server
android::base::unique_fd mBulkIn;
@@ -99,12 +90,12 @@
int sendFile(mtp_file_range mfr) override;
int sendEvent(mtp_event me) override;
- int start() override;
+ int start(bool ptp) override;
void close() override;
- int configure(bool ptp) override;
+ bool writeDescriptors(bool ptp);
- MtpFfsHandle();
+ MtpFfsHandle(int controlFd);
~MtpFfsHandle();
};
diff --git a/media/mtp/MtpPacket.h b/media/mtp/MtpPacket.h
index d47c91d..9842b28 100644
--- a/media/mtp/MtpPacket.h
+++ b/media/mtp/MtpPacket.h
@@ -19,6 +19,7 @@
#include <android-base/macros.h>
+#include "MtpDebug.h"
#include "MtpTypes.h"
struct usb_device;
diff --git a/media/mtp/MtpProperty.cpp b/media/mtp/MtpProperty.cpp
index 039e4f5..5c02a0d 100644
--- a/media/mtp/MtpProperty.cpp
+++ b/media/mtp/MtpProperty.cpp
@@ -18,6 +18,10 @@
#include <inttypes.h>
#include <cutils/compiler.h>
+#include <iomanip>
+#include <sstream>
+#include <string>
+
#include "MtpDataPacket.h"
#include "MtpDebug.h"
#include "MtpProperty.h"
@@ -336,7 +340,7 @@
}
void MtpProperty::print() {
- MtpString buffer;
+ std::string buffer;
bool deviceProp = isDeviceProperty();
if (deviceProp)
ALOGI(" %s (%04X)", MtpDebug::getDevicePropCodeName(mCode), mCode);
@@ -346,11 +350,11 @@
ALOGI(" writeable %s", (mWriteable ? "true" : "false"));
buffer = " default value: ";
print(mDefaultValue, buffer);
- ALOGI("%s", (const char *)buffer);
+ ALOGI("%s", buffer.c_str());
if (deviceProp) {
buffer = " current value: ";
print(mCurrentValue, buffer);
- ALOGI("%s", (const char *)buffer);
+ ALOGI("%s", buffer.c_str());
}
switch (mFormFlag) {
case kFormNone:
@@ -363,7 +367,7 @@
buffer += ", ";
print(mStepSize, buffer);
buffer += ")";
- ALOGI("%s", (const char *)buffer);
+ ALOGI("%s", buffer.c_str());
break;
case kFormEnum:
buffer = " Enum { ";
@@ -372,7 +376,7 @@
buffer += " ";
}
buffer += "}";
- ALOGI("%s", (const char *)buffer);
+ ALOGI("%s", buffer.c_str());
break;
case kFormDateTime:
ALOGI(" DateTime\n");
@@ -383,42 +387,47 @@
}
}
-void MtpProperty::print(MtpPropertyValue& value, MtpString& buffer) {
+void MtpProperty::print(MtpPropertyValue& value, std::string& buffer) {
+ std::ostringstream s;
switch (mType) {
case MTP_TYPE_INT8:
- buffer.appendFormat("%d", value.u.i8);
+ buffer += std::to_string(value.u.i8);
break;
case MTP_TYPE_UINT8:
- buffer.appendFormat("%d", value.u.u8);
+ buffer += std::to_string(value.u.u8);
break;
case MTP_TYPE_INT16:
- buffer.appendFormat("%d", value.u.i16);
+ buffer += std::to_string(value.u.i16);
break;
case MTP_TYPE_UINT16:
- buffer.appendFormat("%d", value.u.u16);
+ buffer += std::to_string(value.u.u16);
break;
case MTP_TYPE_INT32:
- buffer.appendFormat("%d", value.u.i32);
+ buffer += std::to_string(value.u.i32);
break;
case MTP_TYPE_UINT32:
- buffer.appendFormat("%d", value.u.u32);
+ buffer += std::to_string(value.u.u32);
break;
case MTP_TYPE_INT64:
- buffer.appendFormat("%" PRId64, value.u.i64);
+ buffer += std::to_string(value.u.i64);
break;
case MTP_TYPE_UINT64:
- buffer.appendFormat("%" PRIu64, value.u.u64);
+ buffer += std::to_string(value.u.u64);
break;
case MTP_TYPE_INT128:
- buffer.appendFormat("%08X%08X%08X%08X", value.u.i128[0], value.u.i128[1],
- value.u.i128[2], value.u.i128[3]);
+ for (auto i : value.u.i128) {
+ s << std::hex << std::setfill('0') << std::uppercase << i;
+ }
+ buffer += s.str();
break;
case MTP_TYPE_UINT128:
- buffer.appendFormat("%08X%08X%08X%08X", value.u.u128[0], value.u.u128[1],
- value.u.u128[2], value.u.u128[3]);
+ for (auto i : value.u.u128) {
+ s << std::hex << std::setfill('0') << std::uppercase << i;
+ }
+ buffer += s.str();
break;
case MTP_TYPE_STR:
- buffer.appendFormat("%s", value.str);
+ buffer += value.str;
break;
default:
ALOGE("unsupported type for MtpProperty::print\n");
diff --git a/media/mtp/MtpProperty.h b/media/mtp/MtpProperty.h
index 03c08e1..bfd5f7f 100644
--- a/media/mtp/MtpProperty.h
+++ b/media/mtp/MtpProperty.h
@@ -19,6 +19,8 @@
#include "MtpTypes.h"
+#include <string>
+
namespace android {
class MtpDataPacket;
@@ -97,7 +99,6 @@
void setFormDateTime();
void print();
- void print(MtpPropertyValue& value, MtpString& buffer);
inline bool isDeviceProperty() const {
return ( ((mCode & 0xF000) == 0x5000)
@@ -110,6 +111,7 @@
MtpPropertyValue* readArrayValues(MtpDataPacket& packet, uint32_t& length);
void writeArrayValues(MtpDataPacket& packet,
MtpPropertyValue* values, uint32_t length);
+ void print(MtpPropertyValue& value, std::string& buffer);
};
}; // namespace android
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index bb0414d..ccddd6e 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <algorithm>
#include <android-base/logging.h>
#include <android-base/properties.h>
#include <chrono>
@@ -31,7 +32,8 @@
#define LOG_TAG "MtpServer"
#include "MtpDebug.h"
-#include "MtpDatabase.h"
+#include "IMtpDatabase.h"
+#include "MtpDescriptors.h"
#include "MtpDevHandle.h"
#include "MtpFfsCompatHandle.h"
#include "MtpFfsHandle.h"
@@ -99,11 +101,11 @@
MTP_EVENT_DEVICE_PROP_CHANGED,
};
-MtpServer::MtpServer(MtpDatabase* database, bool ptp,
- const MtpString& deviceInfoManufacturer,
- const MtpString& deviceInfoModel,
- const MtpString& deviceInfoDeviceVersion,
- const MtpString& deviceInfoSerialNumber)
+MtpServer::MtpServer(IMtpDatabase* database, int controlFd, bool ptp,
+ const char *deviceInfoManufacturer,
+ const char *deviceInfoModel,
+ const char *deviceInfoDeviceVersion,
+ const char *deviceInfoSerialNumber)
: mDatabase(database),
mPtp(ptp),
mDeviceInfoManufacturer(deviceInfoManufacturer),
@@ -117,54 +119,38 @@
mSendObjectFileSize(0),
mSendObjectModifiedTime(0)
{
+ bool ffs_ok = access(FFS_MTP_EP0, W_OK) == 0;
+ if (ffs_ok) {
+ bool aio_compat = android::base::GetBoolProperty("sys.usb.ffs.aio_compat", false);
+ mHandle = aio_compat ? new MtpFfsCompatHandle(controlFd) : new MtpFfsHandle(controlFd);
+ } else {
+ mHandle = new MtpDevHandle();
+ }
}
MtpServer::~MtpServer() {
}
-IMtpHandle* MtpServer::sHandle = nullptr;
-
-int MtpServer::configure(bool usePtp) {
- bool ffs_ok = access(FFS_MTP_EP0, W_OK) == 0;
- if (sHandle == nullptr) {
- if (ffs_ok) {
- bool aio_compat = android::base::GetBoolProperty("sys.usb.ffs.aio_compat", false);
- sHandle = aio_compat ? new MtpFfsCompatHandle() : new MtpFfsHandle();
- } else {
- sHandle = new MtpDevHandle();
- }
- }
-
- int ret = sHandle->configure(usePtp);
- if (ret) ALOGE("Failed to configure MTP driver!");
- android::base::SetProperty("sys.usb.ffs.mtp.ready", "1");
- return ret;
-}
-
void MtpServer::addStorage(MtpStorage* storage) {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
- mStorages.push(storage);
+ mStorages.push_back(storage);
sendStoreAdded(storage->getStorageID());
}
void MtpServer::removeStorage(MtpStorage* storage) {
- Mutex::Autolock autoLock(mMutex);
-
- for (size_t i = 0; i < mStorages.size(); i++) {
- if (mStorages[i] == storage) {
- mStorages.removeAt(i);
- sendStoreRemoved(storage->getStorageID());
- break;
- }
+ std::lock_guard<std::mutex> lg(mMutex);
+ auto iter = std::find(mStorages.begin(), mStorages.end(), storage);
+ if (iter != mStorages.end()) {
+ sendStoreRemoved(storage->getStorageID());
+ mStorages.erase(iter);
}
}
MtpStorage* MtpServer::getStorage(MtpStorageID id) {
if (id == 0)
return mStorages[0];
- for (size_t i = 0; i < mStorages.size(); i++) {
- MtpStorage* storage = mStorages[i];
+ for (MtpStorage *storage : mStorages) {
if (storage->getStorageID() == id)
return storage;
}
@@ -178,19 +164,14 @@
}
void MtpServer::run() {
- if (!sHandle) {
- ALOGE("MtpServer was never configured!");
- return;
- }
-
- if (sHandle->start()) {
+ if (mHandle->start(mPtp)) {
ALOGE("Failed to start usb driver!");
- sHandle->close();
+ mHandle->close();
return;
}
while (1) {
- int ret = mRequest.read(sHandle);
+ int ret = mRequest.read(mHandle);
if (ret < 0) {
ALOGE("request read returned %d, errno: %d", ret, errno);
if (errno == ECANCELED) {
@@ -209,7 +190,7 @@
|| operation == MTP_OPERATION_SET_OBJECT_PROP_VALUE
|| operation == MTP_OPERATION_SET_DEVICE_PROP_VALUE);
if (dataIn) {
- int ret = mData.read(sHandle);
+ int ret = mData.read(mHandle);
if (ret < 0) {
ALOGE("data read returned %d, errno: %d", ret, errno);
if (errno == ECANCELED) {
@@ -228,7 +209,7 @@
mData.setOperationCode(operation);
mData.setTransactionID(transaction);
ALOGV("sending data:");
- ret = mData.write(sHandle);
+ ret = mData.write(mHandle);
if (ret < 0) {
ALOGE("request write returned %d, errno: %d", ret, errno);
if (errno == ECANCELED) {
@@ -241,7 +222,7 @@
mResponse.setTransactionID(transaction);
ALOGV("sending response %04X", mResponse.getResponseCode());
- ret = mResponse.write(sHandle);
+ ret = mResponse.write(mHandle);
const int savedErrno = errno;
if (ret < 0) {
ALOGE("request write returned %d, errno: %d", ret, errno);
@@ -265,10 +246,7 @@
}
mObjectEditList.clear();
- if (mSessionOpen)
- mDatabase->sessionEnded();
-
- sHandle->close();
+ mHandle->close();
}
void MtpServer::sendObjectAdded(MtpObjectHandle handle) {
@@ -301,15 +279,15 @@
mEvent.setEventCode(code);
mEvent.setTransactionID(mRequest.getTransactionID());
mEvent.setParameter(1, param1);
- if (mEvent.write(sHandle))
+ if (mEvent.write(mHandle))
ALOGE("Mtp send event failed: %s", strerror(errno));
}
}
-void MtpServer::addEditObject(MtpObjectHandle handle, MtpString& path,
+void MtpServer::addEditObject(MtpObjectHandle handle, MtpStringBuffer& path,
uint64_t size, MtpObjectFormat format, int fd) {
ObjectEdit* edit = new ObjectEdit(handle, path, size, format, fd);
- mObjectEditList.add(edit);
+ mObjectEditList.push_back(edit);
}
MtpServer::ObjectEdit* MtpServer::getEditObject(MtpObjectHandle handle) {
@@ -327,7 +305,7 @@
ObjectEdit* edit = mObjectEditList[i];
if (edit->mHandle == handle) {
delete edit;
- mObjectEditList.removeAt(i);
+ mObjectEditList.erase(mObjectEditList.begin() + i);
return;
}
}
@@ -335,12 +313,12 @@
}
void MtpServer::commitEdit(ObjectEdit* edit) {
- mDatabase->endSendObject((const char *)edit->mPath, edit->mHandle, edit->mFormat, true);
+ mDatabase->rescanFile((const char *)edit->mPath, edit->mHandle, edit->mFormat);
}
bool MtpServer::handleRequest() {
- Mutex::Autolock autoLock(mMutex);
+ std::lock_guard<std::mutex> lg(mMutex);
MtpOperationCode operation = mRequest.getOperationCode();
MtpResponseCode response;
@@ -348,9 +326,9 @@
mResponse.reset();
if (mSendObjectHandle != kInvalidObjectHandle && operation != MTP_OPERATION_SEND_OBJECT) {
- // FIXME - need to delete mSendObjectHandle from the database
- ALOGE("expected SendObject after SendObjectInfo");
mSendObjectHandle = kInvalidObjectHandle;
+ mSendObjectFormat = 0;
+ mSendObjectModifiedTime = 0;
}
int containertype = mRequest.getContainerType();
@@ -526,8 +504,6 @@
mSessionID = mRequest.getParameter(1);
mSessionOpen = true;
- mDatabase->sessionStarted();
-
return MTP_RESPONSE_OK;
}
@@ -536,7 +512,6 @@
return MTP_RESPONSE_SESSION_NOT_OPEN;
mSessionID = 0;
mSessionOpen = false;
- mDatabase->sessionEnded();
return MTP_RESPONSE_OK;
}
@@ -604,6 +579,8 @@
return MTP_RESPONSE_INVALID_STORAGE_ID;
MtpObjectHandleList* handles = mDatabase->getObjectList(storageID, format, parent);
+ if (handles == NULL)
+ return MTP_RESPONSE_INVALID_OBJECT_HANDLE;
mData.putAUInt32(handles);
delete handles;
return MTP_RESPONSE_OK;
@@ -792,7 +769,7 @@
if (mRequest.getParameterCount() < 1)
return MTP_RESPONSE_INVALID_PARAMETER;
MtpObjectHandle handle = mRequest.getParameter(1);
- MtpString pathBuf;
+ MtpStringBuffer pathBuf;
int64_t fileLength;
MtpObjectFormat format;
int result = mDatabase->getObjectFilePath(handle, pathBuf, fileLength, format);
@@ -813,7 +790,7 @@
mfr.transaction_id = mRequest.getTransactionID();
// then transfer the file
- int ret = sHandle->sendFile(mfr);
+ int ret = mHandle->sendFile(mfr);
if (ret < 0) {
ALOGE("Mtp send file got error %s", strerror(errno));
if (errno == ECANCELED) {
@@ -832,7 +809,7 @@
uint64_t finalsize = sstat.st_size;
ALOGV("Sent a file over MTP. Time: %f s, Size: %" PRIu64 ", Rate: %f bytes/s",
diff.count(), finalsize, ((double) finalsize) / diff.count());
- close(mfr.fd);
+ closeObjFd(mfr.fd, filePath);
return result;
}
@@ -846,7 +823,7 @@
// send data
mData.setOperationCode(mRequest.getOperationCode());
mData.setTransactionID(mRequest.getTransactionID());
- mData.writeData(sHandle, thumb, thumbSize);
+ mData.writeData(mHandle, thumb, thumbSize);
free(thumb);
return MTP_RESPONSE_OK;
} else {
@@ -878,7 +855,7 @@
// standard GetPartialObject
length = mRequest.getParameter(3);
}
- MtpString pathBuf;
+ MtpStringBuffer pathBuf;
int64_t fileLength;
MtpObjectFormat format;
int result = mDatabase->getObjectFilePath(handle, pathBuf, fileLength, format);
@@ -901,7 +878,7 @@
mResponse.setParameter(1, length);
// transfer the file
- int ret = sHandle->sendFile(mfr);
+ int ret = mHandle->sendFile(mfr);
ALOGV("MTP_SEND_FILE_WITH_HEADER returned %d\n", ret);
result = MTP_RESPONSE_OK;
if (ret < 0) {
@@ -910,12 +887,12 @@
else
result = MTP_RESPONSE_GENERAL_ERROR;
}
- close(mfr.fd);
+ closeObjFd(mfr.fd, filePath);
return result;
}
MtpResponseCode MtpServer::doSendObjectInfo() {
- MtpString path;
+ MtpStringBuffer path;
uint16_t temp16;
uint32_t temp32;
@@ -929,7 +906,7 @@
// special case the root
if (parent == MTP_PARENT_ROOT) {
- path = storage->getPath();
+ path.set(storage->getPath());
parent = 0;
} else {
int64_t length;
@@ -961,7 +938,7 @@
if (!mData.getUInt32(temp32)) return MTP_RESPONSE_INVALID_PARAMETER; // sequence number
MtpStringBuffer name, created, modified;
if (!mData.getString(name)) return MTP_RESPONSE_INVALID_PARAMETER; // file name
- if (name.getCharCount() == 0) {
+ if (name.isEmpty()) {
ALOGE("empty name");
return MTP_RESPONSE_INVALID_PARAMETER;
}
@@ -975,8 +952,8 @@
modifiedTime = 0;
if (path[path.size() - 1] != '/')
- path += "/";
- path += (const char *)name;
+ path.append("/");
+ path.append(name);
// check space first
if (mSendObjectFileSize > storage->getFreeSpace())
@@ -991,26 +968,25 @@
}
ALOGD("path: %s parent: %d storageID: %08X", (const char*)path, parent, storageID);
- MtpObjectHandle handle = mDatabase->beginSendObject((const char*)path,
- format, parent, storageID, mSendObjectFileSize, modifiedTime);
+ MtpObjectHandle handle = mDatabase->beginSendObject((const char*)path, format,
+ parent, storageID);
if (handle == kInvalidObjectHandle) {
return MTP_RESPONSE_GENERAL_ERROR;
}
- if (format == MTP_FORMAT_ASSOCIATION) {
+ if (format == MTP_FORMAT_ASSOCIATION) {
int ret = makeFolder((const char *)path);
if (ret)
return MTP_RESPONSE_GENERAL_ERROR;
// SendObject does not get sent for directories, so call endSendObject here instead
- mDatabase->endSendObject(path, handle, MTP_FORMAT_ASSOCIATION, MTP_RESPONSE_OK);
- } else {
- mSendObjectFilePath = path;
- // save the handle for the SendObject call, which should follow
- mSendObjectHandle = handle;
- mSendObjectFormat = format;
- mSendObjectModifiedTime = modifiedTime;
+ mDatabase->endSendObject(handle, MTP_RESPONSE_OK);
}
+ mSendObjectFilePath = path;
+ // save the handle for the SendObject call, which should follow
+ mSendObjectHandle = handle;
+ mSendObjectFormat = format;
+ mSendObjectModifiedTime = modifiedTime;
mResponse.setParameter(1, storageID);
mResponse.setParameter(2, parent);
@@ -1030,10 +1006,10 @@
MtpObjectHandle parent = mRequest.getParameter(3);
if (!storage)
return MTP_RESPONSE_INVALID_STORAGE_ID;
- MtpString path;
+ MtpStringBuffer path;
MtpResponseCode result;
- MtpString fromPath;
+ MtpStringBuffer fromPath;
int64_t fileLength;
MtpObjectFormat format;
MtpObjectInfo info(objectHandle);
@@ -1046,7 +1022,7 @@
// special case the root
if (parent == 0) {
- path = storage->getPath();
+ path.set(storage->getPath());
} else {
int64_t parentLength;
MtpObjectFormat parentFormat;
@@ -1058,12 +1034,16 @@
}
if (path[path.size() - 1] != '/')
- path += "/";
- path += info.mName;
+ path.append("/");
+ path.append(info.mName);
+
+ result = mDatabase->beginMoveObject(objectHandle, parent, storageID);
+ if (result != MTP_RESPONSE_OK)
+ return result;
if (info.mStorageID == storageID) {
ALOGV("Moving file from %s to %s", (const char*)fromPath, (const char*)path);
- if (rename(fromPath, path)) {
+ if (renameTo(fromPath, path)) {
PLOG(ERROR) << "rename() failed from " << fromPath << " to " << path;
result = MTP_RESPONSE_GENERAL_ERROR;
}
@@ -1087,8 +1067,8 @@
}
// If the move failed, undo the database change
- if (result == MTP_RESPONSE_OK)
- result = mDatabase->moveObject(objectHandle, parent, storageID, path);
+ mDatabase->endMoveObject(info.mParent, parent, info.mStorageID, storageID, objectHandle,
+ result == MTP_RESPONSE_OK);
return result;
}
@@ -1105,9 +1085,9 @@
MtpObjectHandle parent = mRequest.getParameter(3);
if (!storage)
return MTP_RESPONSE_INVALID_STORAGE_ID;
- MtpString path;
+ MtpStringBuffer path;
- MtpString fromPath;
+ MtpStringBuffer fromPath;
int64_t fileLength;
MtpObjectFormat format;
MtpObjectInfo info(objectHandle);
@@ -1120,7 +1100,7 @@
// special case the root
if (parent == 0) {
- path = storage->getPath();
+ path.set(storage->getPath());
} else {
int64_t parentLength;
MtpObjectFormat parentFormat;
@@ -1136,11 +1116,10 @@
return MTP_RESPONSE_STORAGE_FULL;
if (path[path.size() - 1] != '/')
- path += "/";
- path += info.mName;
+ path.append("/");
+ path.append(info.mName);
- MtpObjectHandle handle = mDatabase->beginSendObject((const char*)path,
- format, parent, storageID, fileLength, info.mDateModified);
+ MtpObjectHandle handle = mDatabase->beginCopyObject(objectHandle, parent, storageID);
if (handle == kInvalidObjectHandle) {
return MTP_RESPONSE_GENERAL_ERROR;
}
@@ -1158,9 +1137,7 @@
}
}
- mDatabase->endSendObject(path, handle, format, result);
- if (format == MTP_FORMAT_ASSOCIATION)
- mDatabase->doScanDirectory(path);
+ mDatabase->endCopyObject(handle, result);
mResponse.setParameter(1, handle);
return result;
}
@@ -1183,13 +1160,22 @@
}
// read the header, and possibly some data
- ret = mData.read(sHandle);
+ ret = mData.read(mHandle);
if (ret < MTP_CONTAINER_HEADER_SIZE) {
result = MTP_RESPONSE_GENERAL_ERROR;
goto done;
}
initialData = ret - MTP_CONTAINER_HEADER_SIZE;
+ if (mSendObjectFormat == MTP_FORMAT_ASSOCIATION) {
+ if (initialData != 0)
+ ALOGE("Expected folder size to be 0!");
+ mSendObjectHandle = kInvalidObjectHandle;
+ mSendObjectFormat = 0;
+ mSendObjectModifiedTime = 0;
+ return result;
+ }
+
mtp_file_range mfr;
mfr.fd = open(mSendObjectFilePath, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
if (mfr.fd < 0) {
@@ -1222,7 +1208,7 @@
mfr.transaction_id = 0;
// transfer the file
- ret = sHandle->receiveFile(mfr, mfr.length == 0 &&
+ ret = mHandle->receiveFile(mfr, mfr.length == 0 &&
initialData == MTP_BUFFER_SIZE - MTP_CONTAINER_HEADER_SIZE);
if ((ret < 0) && (errno == ECANCELED)) {
isCanceled = true;
@@ -1240,7 +1226,7 @@
}
fstat(mfr.fd, &sstat);
- close(mfr.fd);
+ closeObjFd(mfr.fd, mSendObjectFilePath);
if (ret < 0) {
ALOGE("Mtp receive file got error %s", strerror(errno));
@@ -1255,8 +1241,7 @@
// reset so we don't attempt to send the data back
mData.reset();
- mDatabase->endSendObject(mSendObjectFilePath, mSendObjectHandle, mSendObjectFormat,
- result == MTP_RESPONSE_OK);
+ mDatabase->endSendObject(mSendObjectHandle, result == MTP_RESPONSE_OK);
mSendObjectHandle = kInvalidObjectHandle;
mSendObjectFormat = 0;
mSendObjectModifiedTime = 0;
@@ -1279,19 +1264,21 @@
// FIXME - support deleting all objects if handle is 0xFFFFFFFF
// FIXME - implement deleting objects by format
- MtpString filePath;
+ MtpStringBuffer filePath;
int64_t fileLength;
int result = mDatabase->getObjectFilePath(handle, filePath, fileLength, format);
- if (result == MTP_RESPONSE_OK) {
- ALOGV("deleting %s", (const char *)filePath);
- result = mDatabase->deleteFile(handle);
- // Don't delete the actual files unless the database deletion is allowed
- if (result == MTP_RESPONSE_OK) {
- deletePath((const char *)filePath);
- }
- }
+ if (result != MTP_RESPONSE_OK)
+ return result;
- return result;
+ // Don't delete the actual files unless the database deletion is allowed
+ result = mDatabase->beginDeleteObject(handle);
+ if (result != MTP_RESPONSE_OK)
+ return result;
+
+ bool success = deletePath((const char *)filePath);
+
+ mDatabase->endDeleteObject(handle, success);
+ return success ? result : MTP_RESPONSE_PARTIAL_DELETION;
}
MtpResponseCode MtpServer::doGetObjectPropDesc() {
@@ -1350,7 +1337,7 @@
ALOGV("receiving partial %s %" PRIu64 " %" PRIu32, filePath, offset, length);
// read the header, and possibly some data
- int ret = mData.read(sHandle);
+ int ret = mData.read(mHandle);
if (ret < MTP_CONTAINER_HEADER_SIZE)
return MTP_RESPONSE_GENERAL_ERROR;
int initialData = ret - MTP_CONTAINER_HEADER_SIZE;
@@ -1373,7 +1360,7 @@
mfr.transaction_id = 0;
// transfer the file
- ret = sHandle->receiveFile(mfr, mfr.length == 0 &&
+ ret = mHandle->receiveFile(mfr, mfr.length == 0 &&
initialData == MTP_BUFFER_SIZE - MTP_CONTAINER_HEADER_SIZE);
if ((ret < 0) && (errno == ECANCELED)) {
isCanceled = true;
@@ -1427,7 +1414,7 @@
return MTP_RESPONSE_GENERAL_ERROR;
}
- MtpString path;
+ MtpStringBuffer path;
int64_t fileLength;
MtpObjectFormat format;
int result = mDatabase->getObjectFilePath(handle, path, fileLength, format);
diff --git a/media/mtp/MtpServer.h b/media/mtp/MtpServer.h
index 0204b09..f6939d7 100644
--- a/media/mtp/MtpServer.h
+++ b/media/mtp/MtpServer.h
@@ -21,36 +21,36 @@
#include "MtpDataPacket.h"
#include "MtpResponsePacket.h"
#include "MtpEventPacket.h"
+#include "MtpStringBuffer.h"
#include "mtp.h"
#include "MtpUtils.h"
#include "IMtpHandle.h"
-#include <utils/threads.h>
-#include <queue>
#include <memory>
#include <mutex>
+#include <queue>
namespace android {
-class MtpDatabase;
+class IMtpDatabase;
class MtpStorage;
class MtpServer {
private:
- MtpDatabase* mDatabase;
+ IMtpDatabase* mDatabase;
// appear as a PTP device
bool mPtp;
// Manufacturer to report in DeviceInfo
- MtpString mDeviceInfoManufacturer;
+ MtpStringBuffer mDeviceInfoManufacturer;
// Model to report in DeviceInfo
- MtpString mDeviceInfoModel;
+ MtpStringBuffer mDeviceInfoModel;
// Device version to report in DeviceInfo
- MtpString mDeviceInfoDeviceVersion;
+ MtpStringBuffer mDeviceInfoDeviceVersion;
// Serial number to report in DeviceInfo
- MtpString mDeviceInfoSerialNumber;
+ MtpStringBuffer mDeviceInfoSerialNumber;
// current session ID
MtpSessionID mSessionID;
@@ -65,23 +65,23 @@
MtpStorageList mStorages;
- static IMtpHandle* sHandle;
+ IMtpHandle* mHandle;
// handle for new object, set by SendObjectInfo and used by SendObject
MtpObjectHandle mSendObjectHandle;
MtpObjectFormat mSendObjectFormat;
- MtpString mSendObjectFilePath;
+ MtpStringBuffer mSendObjectFilePath;
size_t mSendObjectFileSize;
time_t mSendObjectModifiedTime;
- Mutex mMutex;
+ std::mutex mMutex;
// represents an MTP object that is being edited using the android extensions
// for direct editing (BeginEditObject, SendPartialObject, TruncateObject and EndEditObject)
class ObjectEdit {
public:
MtpObjectHandle mHandle;
- MtpString mPath;
+ MtpStringBuffer mPath;
uint64_t mSize;
MtpObjectFormat mFormat;
int mFD;
@@ -95,14 +95,14 @@
close(mFD);
}
};
- Vector<ObjectEdit*> mObjectEditList;
+ std::vector<ObjectEdit*> mObjectEditList;
public:
- MtpServer(MtpDatabase* database, bool ptp,
- const MtpString& deviceInfoManufacturer,
- const MtpString& deviceInfoModel,
- const MtpString& deviceInfoDeviceVersion,
- const MtpString& deviceInfoSerialNumber);
+ MtpServer(IMtpDatabase* database, int controlFd, bool ptp,
+ const char *deviceInfoManufacturer,
+ const char *deviceInfoModel,
+ const char *deviceInfoDeviceVersion,
+ const char *deviceInfoSerialNumber);
virtual ~MtpServer();
MtpStorage* getStorage(MtpStorageID id);
@@ -111,7 +111,6 @@
void addStorage(MtpStorage* storage);
void removeStorage(MtpStorage* storage);
- static int configure(bool usePtp);
void run();
void sendObjectAdded(MtpObjectHandle handle);
@@ -123,7 +122,7 @@
void sendStoreRemoved(MtpStorageID id);
void sendEvent(MtpEventCode code, uint32_t param1);
- void addEditObject(MtpObjectHandle handle, MtpString& path,
+ void addEditObject(MtpObjectHandle handle, MtpStringBuffer& path,
uint64_t size, MtpObjectFormat format, int fd);
ObjectEdit* getEditObject(MtpObjectHandle handle);
void removeEditObject(MtpObjectHandle handle);
diff --git a/media/mtp/MtpStorage.cpp b/media/mtp/MtpStorage.cpp
index d77ca72..a147325 100644
--- a/media/mtp/MtpStorage.cpp
+++ b/media/mtp/MtpStorage.cpp
@@ -17,7 +17,6 @@
#define LOG_TAG "MtpStorage"
#include "MtpDebug.h"
-#include "MtpDatabase.h"
#include "MtpStorage.h"
#include <sys/types.h>
@@ -33,14 +32,12 @@
namespace android {
MtpStorage::MtpStorage(MtpStorageID id, const char* filePath,
- const char* description, uint64_t reserveSpace,
- bool removable, uint64_t maxFileSize)
+ const char* description, bool removable, uint64_t maxFileSize)
: mStorageID(id),
mFilePath(filePath),
mDescription(description),
mMaxCapacity(0),
mMaxFileSize(maxFileSize),
- mReserveSpace(reserveSpace),
mRemovable(removable)
{
ALOGV("MtpStorage id: %d path: %s\n", id, filePath);
@@ -75,8 +72,7 @@
struct statfs stat;
if (statfs(getPath(), &stat))
return -1;
- uint64_t freeSpace = (uint64_t)stat.f_bavail * (uint64_t)stat.f_bsize;
- return (freeSpace > mReserveSpace ? freeSpace - mReserveSpace : 0);
+ return (uint64_t)stat.f_bavail * (uint64_t)stat.f_bsize;
}
const char* MtpStorage::getDescription() const {
diff --git a/media/mtp/MtpStorage.h b/media/mtp/MtpStorage.h
index e5a2e57..e9518dd 100644
--- a/media/mtp/MtpStorage.h
+++ b/media/mtp/MtpStorage.h
@@ -17,6 +17,7 @@
#ifndef _MTP_STORAGE_H
#define _MTP_STORAGE_H
+#include "MtpStringBuffer.h"
#include "MtpTypes.h"
#include "mtp.h"
@@ -28,17 +29,15 @@
private:
MtpStorageID mStorageID;
- MtpString mFilePath;
- MtpString mDescription;
+ MtpStringBuffer mFilePath;
+ MtpStringBuffer mDescription;
uint64_t mMaxCapacity;
uint64_t mMaxFileSize;
- // amount of free space to leave unallocated
- uint64_t mReserveSpace;
bool mRemovable;
public:
MtpStorage(MtpStorageID id, const char* filePath,
- const char* description, uint64_t reserveSpace,
+ const char* description,
bool removable, uint64_t maxFileSize);
virtual ~MtpStorage();
diff --git a/media/mtp/MtpStringBuffer.cpp b/media/mtp/MtpStringBuffer.cpp
index df04694..cd379bf 100644
--- a/media/mtp/MtpStringBuffer.cpp
+++ b/media/mtp/MtpStringBuffer.cpp
@@ -16,168 +16,97 @@
#define LOG_TAG "MtpStringBuffer"
-#include <string.h>
+#include <codecvt>
+#include <locale>
+#include <string>
+#include <vector>
#include "MtpDataPacket.h"
#include "MtpStringBuffer.h"
-namespace android {
+namespace {
-MtpStringBuffer::MtpStringBuffer()
- : mCharCount(0),
- mByteCount(1)
-{
- mBuffer[0] = 0;
+std::wstring_convert<std::codecvt_utf8_utf16<char16_t>,char16_t> gConvert;
+
+static std::string utf16ToUtf8(std::u16string input_str) {
+ return gConvert.to_bytes(input_str);
}
+static std::u16string utf8ToUtf16(std::string input_str) {
+ return gConvert.from_bytes(input_str);
+}
+
+} // namespace
+
+namespace android {
+
MtpStringBuffer::MtpStringBuffer(const char* src)
- : mCharCount(0),
- mByteCount(1)
{
set(src);
}
MtpStringBuffer::MtpStringBuffer(const uint16_t* src)
- : mCharCount(0),
- mByteCount(1)
{
set(src);
}
MtpStringBuffer::MtpStringBuffer(const MtpStringBuffer& src)
- : mCharCount(src.mCharCount),
- mByteCount(src.mByteCount)
{
- memcpy(mBuffer, src.mBuffer, mByteCount);
-}
-
-
-MtpStringBuffer::~MtpStringBuffer() {
+ mString = src.mString;
}
void MtpStringBuffer::set(const char* src) {
- // count the characters
- int count = 0;
- char ch;
- char* dest = (char*)mBuffer;
-
- while ((ch = *src++) != 0 && count < MTP_STRING_MAX_CHARACTER_NUMBER) {
- if ((ch & 0x80) == 0) {
- // single byte character
- *dest++ = ch;
- } else if ((ch & 0xE0) == 0xC0) {
- // two byte character
- char ch1 = *src++;
- if (! ch1) {
- // last character was truncated, so ignore last byte
- break;
- }
-
- *dest++ = ch;
- *dest++ = ch1;
- } else if ((ch & 0xF0) == 0xE0) {
- // 3 byte char
- char ch1 = *src++;
- if (! ch1) {
- // last character was truncated, so ignore last byte
- break;
- }
- char ch2 = *src++;
- if (! ch2) {
- // last character was truncated, so ignore last byte
- break;
- }
-
- *dest++ = ch;
- *dest++ = ch1;
- *dest++ = ch2;
- }
- count++;
- }
-
- *dest++ = 0;
- mByteCount = dest - (char*)mBuffer;
- mCharCount = count;
+ mString = std::string(src);
}
void MtpStringBuffer::set(const uint16_t* src) {
- int count = 0;
- uint16_t ch;
- uint8_t* dest = mBuffer;
-
- while ((ch = *src++) != 0 && count < MTP_STRING_MAX_CHARACTER_NUMBER) {
- if (ch >= 0x0800) {
- *dest++ = (uint8_t)(0xE0 | (ch >> 12));
- *dest++ = (uint8_t)(0x80 | ((ch >> 6) & 0x3F));
- *dest++ = (uint8_t)(0x80 | (ch & 0x3F));
- } else if (ch >= 0x80) {
- *dest++ = (uint8_t)(0xC0 | (ch >> 6));
- *dest++ = (uint8_t)(0x80 | (ch & 0x3F));
- } else {
- *dest++ = ch;
- }
- count++;
- }
- *dest++ = 0;
- mCharCount = count;
- mByteCount = dest - mBuffer;
+ mString = utf16ToUtf8(std::u16string((const char16_t*)src));
}
bool MtpStringBuffer::readFromPacket(MtpDataPacket* packet) {
uint8_t count;
if (!packet->getUInt8(count))
return false;
+ if (count == 0)
+ return true;
- uint8_t* dest = mBuffer;
+ std::vector<char16_t> buffer(count);
for (int i = 0; i < count; i++) {
uint16_t ch;
-
if (!packet->getUInt16(ch))
return false;
- if (ch >= 0x0800) {
- *dest++ = (uint8_t)(0xE0 | (ch >> 12));
- *dest++ = (uint8_t)(0x80 | ((ch >> 6) & 0x3F));
- *dest++ = (uint8_t)(0x80 | (ch & 0x3F));
- } else if (ch >= 0x80) {
- *dest++ = (uint8_t)(0xC0 | (ch >> 6));
- *dest++ = (uint8_t)(0x80 | (ch & 0x3F));
- } else {
- *dest++ = ch;
- }
+ buffer[i] = ch;
}
- *dest++ = 0;
- mCharCount = count;
- mByteCount = dest - mBuffer;
+ if (buffer[count-1] != '\0') {
+ ALOGE("Mtp string not null terminated\n");
+ return false;
+ }
+ mString = utf16ToUtf8(std::u16string(buffer.data()));
return true;
}
void MtpStringBuffer::writeToPacket(MtpDataPacket* packet) const {
- int count = mCharCount;
- const uint8_t* src = mBuffer;
- packet->putUInt8(count > 0 ? count + 1 : 0);
+ std::u16string src16 = utf8ToUtf16(mString);
+ int count = src16.length();
- // expand utf8 to 16 bit chars
- for (int i = 0; i < count; i++) {
- uint16_t ch;
- uint16_t ch1 = *src++;
- if ((ch1 & 0x80) == 0) {
- // single byte character
- ch = ch1;
- } else if ((ch1 & 0xE0) == 0xC0) {
- // two byte character
- uint16_t ch2 = *src++;
- ch = ((ch1 & 0x1F) << 6) | (ch2 & 0x3F);
- } else {
- // three byte character
- uint16_t ch2 = *src++;
- uint16_t ch3 = *src++;
- ch = ((ch1 & 0x0F) << 12) | ((ch2 & 0x3F) << 6) | (ch3 & 0x3F);
+ if (count == 0) {
+ packet->putUInt8(0);
+ return;
+ }
+ packet->putUInt8(std::min(count + 1, MTP_STRING_MAX_CHARACTER_NUMBER));
+
+ int i = 0;
+ for (char16_t &c : src16) {
+ if (i == MTP_STRING_MAX_CHARACTER_NUMBER - 1) {
+ // Leave a slot for null termination.
+ ALOGI("Mtp truncating long string\n");
+ break;
}
- packet->putUInt16(ch);
+ packet->putUInt16(c);
+ i++;
}
// only terminate with zero if string is not empty
- if (count > 0)
- packet->putUInt16(0);
+ packet->putUInt16(0);
}
} // namespace android
diff --git a/media/mtp/MtpStringBuffer.h b/media/mtp/MtpStringBuffer.h
index bcf2a48..4cec58a 100644
--- a/media/mtp/MtpStringBuffer.h
+++ b/media/mtp/MtpStringBuffer.h
@@ -17,7 +17,9 @@
#ifndef _MTP_STRING_BUFFER_H
#define _MTP_STRING_BUFFER_H
+#include <log/log.h>
#include <stdint.h>
+#include <string>
// Max Character number of a MTP String
#define MTP_STRING_MAX_CHARACTER_NUMBER 255
@@ -30,31 +32,39 @@
class MtpStringBuffer {
private:
- // mBuffer contains string in UTF8 format
- // maximum 3 bytes/character, with 1 extra for zero termination
- uint8_t mBuffer[MTP_STRING_MAX_CHARACTER_NUMBER * 3 + 1];
- int mCharCount;
- int mByteCount;
+ std::string mString;
public:
- MtpStringBuffer();
+ MtpStringBuffer() {};
+ ~MtpStringBuffer() {};
+
explicit MtpStringBuffer(const char* src);
explicit MtpStringBuffer(const uint16_t* src);
MtpStringBuffer(const MtpStringBuffer& src);
- virtual ~MtpStringBuffer();
void set(const char* src);
void set(const uint16_t* src);
+ inline void append(const char* other);
+ inline void append(MtpStringBuffer &other);
+
bool readFromPacket(MtpDataPacket* packet);
void writeToPacket(MtpDataPacket* packet) const;
- inline int getCharCount() const { return mCharCount; }
- inline int getByteCount() const { return mByteCount; }
+ inline bool isEmpty() const { return mString.empty(); }
+ inline int size() const { return mString.length(); }
- inline operator const char*() const { return (const char *)mBuffer; }
+ inline operator const char*() const { return mString.c_str(); }
};
+inline void MtpStringBuffer::append(const char* other) {
+ mString += other;
+}
+
+inline void MtpStringBuffer::append(MtpStringBuffer &other) {
+ mString += other.mString;
+}
+
}; // namespace android
#endif // _MTP_STRING_BUFFER_H
diff --git a/media/mtp/MtpTypes.h b/media/mtp/MtpTypes.h
index c749c66..e6ac23c 100644
--- a/media/mtp/MtpTypes.h
+++ b/media/mtp/MtpTypes.h
@@ -18,8 +18,7 @@
#define _MTP_TYPES_H
#include <stdint.h>
-#include "utils/String8.h"
-#include "utils/Vector.h"
+#include <vector>
namespace android {
@@ -51,18 +50,18 @@
class MtpDevice;
class MtpProperty;
-typedef Vector<MtpStorage *> MtpStorageList;
-typedef Vector<MtpDevice*> MtpDeviceList;
-typedef Vector<MtpProperty*> MtpPropertyList;
+typedef std::vector<MtpStorage *> MtpStorageList;
+typedef std::vector<MtpDevice*> MtpDeviceList;
+typedef std::vector<MtpProperty*> MtpPropertyList;
-typedef Vector<uint8_t> UInt8List;
-typedef Vector<uint16_t> UInt16List;
-typedef Vector<uint32_t> UInt32List;
-typedef Vector<uint64_t> UInt64List;
-typedef Vector<int8_t> Int8List;
-typedef Vector<int16_t> Int16List;
-typedef Vector<int32_t> Int32List;
-typedef Vector<int64_t> Int64List;
+typedef std::vector<uint8_t> UInt8List;
+typedef std::vector<uint16_t> UInt16List;
+typedef std::vector<uint32_t> UInt32List;
+typedef std::vector<uint64_t> UInt64List;
+typedef std::vector<int8_t> Int8List;
+typedef std::vector<int16_t> Int16List;
+typedef std::vector<int32_t> Int32List;
+typedef std::vector<int64_t> Int64List;
typedef UInt16List MtpObjectPropertyList;
typedef UInt16List MtpDevicePropertyList;
@@ -71,8 +70,6 @@
typedef UInt16List MtpObjectPropertyList;
typedef UInt32List MtpStorageIDList;
-typedef String8 MtpString;
-
enum UrbPacketDivisionMode {
// First packet only contains a header.
FIRST_PACKET_ONLY_HEADER,
diff --git a/media/mtp/MtpUtils.cpp b/media/mtp/MtpUtils.cpp
index 3f5648b..8564576 100644
--- a/media/mtp/MtpUtils.cpp
+++ b/media/mtp/MtpUtils.cpp
@@ -36,6 +36,13 @@
constexpr unsigned long FILE_COPY_SIZE = 262144;
+static void access_ok(const char *path) {
+ if (access(path, F_OK) == -1) {
+ // Ignore. Failure could be common in cases of delete where
+ // the metadata was updated through other paths.
+ }
+}
+
/*
DateTime strings follow a compatible subset of the definition found in ISO 8601, and
take the form of a Unicode string formatted as: "YYYYMMDDThhmmss.s". In this
@@ -101,6 +108,7 @@
} else {
chown((const char *)path, getuid(), FILE_GROUP);
}
+ access_ok(path);
return ret;
}
@@ -181,6 +189,7 @@
LOG(DEBUG) << "Copied a file with MTP. Time: " << diff.count() << " s, Size: " << length <<
", Rate: " << ((double) length) / diff.count() << " bytes/s";
chown(toPath, getuid(), FILE_GROUP);
+ access_ok(toPath);
return ret == -1 ? -1 : 0;
}
@@ -204,29 +213,55 @@
if (name[0] == '.' && (name[1] == 0 || (name[1] == '.' && name[2] == 0))) {
continue;
}
- pathStr.append(name);
+ string childPath = pathStr + name;
+ int success;
if (entry->d_type == DT_DIR) {
- deleteRecursive(pathStr.c_str());
- rmdir(pathStr.c_str());
+ deleteRecursive(childPath.c_str());
+ success = rmdir(childPath.c_str());
} else {
- unlink(pathStr.c_str());
+ success = unlink(childPath.c_str());
}
+ access_ok(childPath.c_str());
+ if (success == -1)
+ PLOG(ERROR) << "Deleting path " << childPath << " failed";
}
closedir(dir);
}
-void deletePath(const char* path) {
+bool deletePath(const char* path) {
struct stat statbuf;
+ int success;
if (stat(path, &statbuf) == 0) {
if (S_ISDIR(statbuf.st_mode)) {
+ // rmdir will fail if the directory is non empty, so
+ // there is no need to keep errors from deleteRecursive
deleteRecursive(path);
- rmdir(path);
+ success = rmdir(path);
} else {
- unlink(path);
+ success = unlink(path);
}
} else {
- PLOG(ERROR) << "deletePath stat failed for " << path;;
+ PLOG(ERROR) << "deletePath stat failed for " << path;
+ return false;
}
+ if (success == -1)
+ PLOG(ERROR) << "Deleting path " << path << " failed";
+ access_ok(path);
+ return success == 0;
+}
+
+int renameTo(const char *oldPath, const char *newPath) {
+ int ret = rename(oldPath, newPath);
+ access_ok(oldPath);
+ access_ok(newPath);
+ return ret;
+}
+
+// Calls access(2) on the path to update underlying filesystems,
+// then closes the fd.
+void closeObjFd(int fd, const char *path) {
+ close(fd);
+ access_ok(path);
}
} // namespace android
diff --git a/media/mtp/MtpUtils.h b/media/mtp/MtpUtils.h
index b7c72f5..21f5df0 100644
--- a/media/mtp/MtpUtils.h
+++ b/media/mtp/MtpUtils.h
@@ -33,9 +33,10 @@
int makeFolder(const char *path);
int copyRecursive(const char *fromPath, const char *toPath);
int copyFile(const char *fromPath, const char *toPath);
-void deleteRecursive(const char* path);
-void deletePath(const char* path);
+bool deletePath(const char* path);
+int renameTo(const char *oldPath, const char *newPath);
+void closeObjFd(int fd, const char *path);
}; // namespace android
#endif // _MTP_UTILS_H
diff --git a/media/mtp/tests/MtpFfsHandle_test.cpp b/media/mtp/tests/MtpFfsHandle_test.cpp
index 9c916b7..c9c9e62 100644
--- a/media/mtp/tests/MtpFfsHandle_test.cpp
+++ b/media/mtp/tests/MtpFfsHandle_test.cpp
@@ -23,7 +23,7 @@
#include <random>
#include <string>
#include <unistd.h>
-#include <utils/Log.h>
+#include <log/log.h>
#include "MtpDescriptors.h"
#include "MtpFfsHandle.h"
@@ -64,7 +64,7 @@
MtpFfsHandleTest() {
int fd[2];
- handle = std::make_unique<T>();
+ handle = std::make_unique<T>(-1);
EXPECT_EQ(pipe(fd), 0);
control.reset(fd[0]);
@@ -84,7 +84,7 @@
intr.reset(fd[0]);
handle->mIntr.reset(fd[1]);
- EXPECT_EQ(handle->start(), 0);
+ EXPECT_EQ(handle->start(false), 0);
}
~MtpFfsHandleTest() {
@@ -95,8 +95,8 @@
typedef ::testing::Types<MtpFfsHandle, MtpFfsCompatHandle> mtpHandles;
TYPED_TEST_CASE(MtpFfsHandleTest, mtpHandles);
-TYPED_TEST(MtpFfsHandleTest, testControl) {
- EXPECT_TRUE(this->handle->writeDescriptors());
+TYPED_TEST(MtpFfsHandleTest, testMtpControl) {
+ EXPECT_TRUE(this->handle->writeDescriptors(false));
struct desc_v2 desc;
struct functionfs_strings strings;
EXPECT_EQ(read(this->control, &desc, sizeof(desc)), (long)sizeof(desc));
@@ -105,6 +105,16 @@
EXPECT_TRUE(std::memcmp(&strings, &mtp_strings, sizeof(strings)) == 0);
}
+TYPED_TEST(MtpFfsHandleTest, testPtpControl) {
+ EXPECT_TRUE(this->handle->writeDescriptors(true));
+ struct desc_v2 desc;
+ struct functionfs_strings strings;
+ EXPECT_EQ(read(this->control, &desc, sizeof(desc)), (long)sizeof(desc));
+ EXPECT_EQ(read(this->control, &strings, sizeof(strings)), (long)sizeof(strings));
+ EXPECT_TRUE(std::memcmp(&desc, &ptp_desc_v2, sizeof(desc)) == 0);
+ EXPECT_TRUE(std::memcmp(&strings, &mtp_strings, sizeof(strings)) == 0);
+}
+
TYPED_TEST(MtpFfsHandleTest, testRead) {
EXPECT_EQ(write(this->bulk_out, dummyDataStr.c_str(), TEST_PACKET_SIZE), TEST_PACKET_SIZE);
char buf[TEST_PACKET_SIZE + 1];
@@ -113,6 +123,21 @@
EXPECT_STREQ(buf, dummyDataStr.c_str());
}
+TYPED_TEST(MtpFfsHandleTest, testReadLarge) {
+ std::stringstream ss;
+ int size = TEST_PACKET_SIZE * MED_MULT;
+ char buf[size + 1];
+ buf[size] = '\0';
+
+ for (int i = 0; i < MED_MULT; i++)
+ ss << dummyDataStr;
+
+ EXPECT_EQ(write(this->bulk_out, ss.str().c_str(), size), size);
+ EXPECT_EQ(this->handle->read(buf, size), size);
+
+ EXPECT_STREQ(buf, ss.str().c_str());
+}
+
TYPED_TEST(MtpFfsHandleTest, testWrite) {
char buf[TEST_PACKET_SIZE + 1];
buf[TEST_PACKET_SIZE] = '\0';
@@ -121,6 +146,21 @@
EXPECT_STREQ(buf, dummyDataStr.c_str());
}
+TYPED_TEST(MtpFfsHandleTest, testWriteLarge) {
+ std::stringstream ss;
+ int size = TEST_PACKET_SIZE * MED_MULT;
+ char buf[size + 1];
+ buf[size] = '\0';
+
+ for (int i = 0; i < MED_MULT; i++)
+ ss << dummyDataStr;
+
+ EXPECT_EQ(this->handle->write(ss.str().c_str(), size), size);
+ EXPECT_EQ(read(this->bulk_in, buf, size), size);
+
+ EXPECT_STREQ(buf, ss.str().c_str());
+}
+
TYPED_TEST(MtpFfsHandleTest, testReceiveFileEmpty) {
std::stringstream ss;
mtp_file_range mfr;
diff --git a/media/mtp/tests/PosixAsyncIO_test.cpp b/media/mtp/tests/PosixAsyncIO_test.cpp
index 63b9a35..9e337aa 100644
--- a/media/mtp/tests/PosixAsyncIO_test.cpp
+++ b/media/mtp/tests/PosixAsyncIO_test.cpp
@@ -20,7 +20,7 @@
#include <gtest/gtest.h>
#include <string>
#include <unistd.h>
-#include <utils/Log.h>
+#include <log/log.h>
#include "PosixAsyncIO.h"
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index 0d48de1..4a36681 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -37,6 +37,7 @@
srcs: [
"NdkMediaCodec.cpp",
"NdkMediaCrypto.cpp",
+ "NdkMediaDataSource.cpp",
"NdkMediaExtractor.cpp",
"NdkMediaFormat.cpp",
"NdkMediaMuxer.cpp",
@@ -67,9 +68,10 @@
shared_libs: [
"libbinder",
"libmedia",
+ "libmedia_omx",
"libmedia_jni",
"libmediadrm",
- "libskia",
+ "libmediaextractor",
"libstagefright",
"libstagefright_foundation",
"liblog",
diff --git a/media/ndk/NdkImage.cpp b/media/ndk/NdkImage.cpp
index 87b649a..20b1667 100644
--- a/media/ndk/NdkImage.cpp
+++ b/media/ndk/NdkImage.cpp
@@ -37,8 +37,8 @@
mTimestamp(timestamp), mWidth(width), mHeight(height), mNumPlanes(numPlanes) {
}
-// Can only be called by free() with mLock hold
AImage::~AImage() {
+ Mutex::Autolock _l(mLock);
if (!mIsClosed) {
LOG_ALWAYS_FATAL(
"Error: AImage %p is deleted before returning buffer to AImageReader!", this);
@@ -53,7 +53,6 @@
void
AImage::close(int releaseFenceFd) {
- lockReader();
Mutex::Autolock _l(mLock);
if (mIsClosed) {
return;
@@ -71,7 +70,6 @@
mBuffer = nullptr;
mLockedBuffer = nullptr;
mIsClosed = true;
- unlockReader();
}
void
@@ -80,7 +78,6 @@
ALOGE("Cannot free AImage before close!");
return;
}
- Mutex::Autolock _l(mLock);
delete this;
}
@@ -622,7 +619,9 @@
void AImage_deleteAsync(AImage* image, int releaseFenceFd) {
ALOGV("%s", __FUNCTION__);
if (image != nullptr) {
+ image->lockReader();
image->close(releaseFenceFd);
+ image->unlockReader();
if (!image->isClosed()) {
LOG_ALWAYS_FATAL("Image close failed!");
}
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index e90783d..be635ff 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -349,8 +349,8 @@
for (auto it = mAcquiredImages.begin();
it != mAcquiredImages.end(); it++) {
AImage* image = *it;
+ Mutex::Autolock _l(image->mLock);
releaseImageLocked(image, /*releaseFenceFd*/-1);
- image->close();
}
// Delete Buffer Items
@@ -502,6 +502,8 @@
mBufferItemConsumer->releaseBuffer(*buffer, bufferFence);
returnBufferItemLocked(buffer);
image->mBuffer = nullptr;
+ image->mLockedBuffer = nullptr;
+ image->mIsClosed = true;
bool found = false;
// cleanup acquired image list
@@ -655,7 +657,7 @@
EXPORT
media_status_t AImageReader_getWindow(AImageReader* reader, /*out*/ANativeWindow** window) {
- ALOGE("%s", __FUNCTION__);
+ ALOGV("%s", __FUNCTION__);
if (reader == nullptr || window == nullptr) {
ALOGE("%s: invalid argument. reader %p, window %p",
__FUNCTION__, reader, window);
diff --git a/media/ndk/NdkImageReaderPriv.h b/media/ndk/NdkImageReaderPriv.h
index 989b937..bed8a21 100644
--- a/media/ndk/NdkImageReaderPriv.h
+++ b/media/ndk/NdkImageReaderPriv.h
@@ -85,7 +85,7 @@
// Called by AImageReader_acquireXXX to acquire a Buffer and setup AImage.
media_status_t acquireImageLocked(/*out*/AImage** image, /*out*/int* fenceFd);
- // Called by AImage to close image
+ // Called by AImage/~AImageReader to close image. Caller is responsible to grab AImage::mLock
void releaseImageLocked(AImage* image, int releaseFenceFd);
static int getBufferWidth(BufferItem* buffer);
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 128edba..6b20bca 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -52,6 +52,7 @@
enum {
kWhatActivityNotify,
+ kWhatAsyncNotify,
kWhatRequestActivityNotifications,
kWhatStopActivityNotifications,
};
@@ -88,6 +89,11 @@
bool mRequestedActivityNotification;
OnCodecEvent mCallback;
void *mCallbackUserData;
+
+ sp<AMessage> mAsyncNotify;
+ mutable Mutex mAsyncCallbackLock;
+ AMediaCodecOnAsyncNotifyCallback mAsyncCallback;
+ void *mAsyncCallbackUserData;
};
CodecHandler::CodecHandler(AMediaCodec *codec) {
@@ -128,6 +134,147 @@
break;
}
+ case kWhatAsyncNotify:
+ {
+ int32_t cbID;
+ if (!msg->findInt32("callbackID", &cbID)) {
+ ALOGE("kWhatAsyncNotify: callbackID is expected.");
+ break;
+ }
+
+ ALOGV("kWhatAsyncNotify: cbID = %d", cbID);
+
+ switch (cbID) {
+ case MediaCodec::CB_INPUT_AVAILABLE:
+ {
+ int32_t index;
+ if (!msg->findInt32("index", &index)) {
+ ALOGE("CB_INPUT_AVAILABLE: index is expected.");
+ break;
+ }
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncInputAvailable != NULL) {
+ mCodec->mAsyncCallback.onAsyncInputAvailable(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ index);
+ }
+
+ break;
+ }
+
+ case MediaCodec::CB_OUTPUT_AVAILABLE:
+ {
+ int32_t index;
+ size_t offset;
+ size_t size;
+ int64_t timeUs;
+ int32_t flags;
+
+ if (!msg->findInt32("index", &index)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: index is expected.");
+ break;
+ }
+ if (!msg->findSize("offset", &offset)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: offset is expected.");
+ break;
+ }
+ if (!msg->findSize("size", &size)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: size is expected.");
+ break;
+ }
+ if (!msg->findInt64("timeUs", &timeUs)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: timeUs is expected.");
+ break;
+ }
+ if (!msg->findInt32("flags", &flags)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: flags is expected.");
+ break;
+ }
+
+ AMediaCodecBufferInfo bufferInfo = {
+ (int32_t)offset,
+ (int32_t)size,
+ timeUs,
+ (uint32_t)flags};
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncOutputAvailable != NULL) {
+ mCodec->mAsyncCallback.onAsyncOutputAvailable(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ index,
+ &bufferInfo);
+ }
+
+ break;
+ }
+
+ case MediaCodec::CB_OUTPUT_FORMAT_CHANGED:
+ {
+ sp<AMessage> format;
+ if (!msg->findMessage("format", &format)) {
+ ALOGE("CB_OUTPUT_FORMAT_CHANGED: format is expected.");
+ break;
+ }
+
+ AMediaFormat *aMediaFormat = AMediaFormat_fromMsg(&format);
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncFormatChanged != NULL) {
+ mCodec->mAsyncCallback.onAsyncFormatChanged(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ aMediaFormat);
+ }
+
+ break;
+ }
+
+ case MediaCodec::CB_ERROR:
+ {
+ status_t err;
+ int32_t actionCode;
+ AString detail;
+ if (!msg->findInt32("err", &err)) {
+ ALOGE("CB_ERROR: err is expected.");
+ break;
+ }
+ if (!msg->findInt32("action", &actionCode)) {
+ ALOGE("CB_ERROR: action is expected.");
+ break;
+ }
+ msg->findString("detail", &detail);
+ ALOGE("Decoder reported error(0x%x), actionCode(%d), detail(%s)",
+ err, actionCode, detail.c_str());
+
+ Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+ if (mCodec->mAsyncCallbackUserData != NULL
+ || mCodec->mAsyncCallback.onAsyncError != NULL) {
+ mCodec->mAsyncCallback.onAsyncError(
+ mCodec,
+ mCodec->mAsyncCallbackUserData,
+ translate_error(err),
+ actionCode,
+ detail.c_str());
+ }
+
+ break;
+ }
+
+ default:
+ {
+ ALOGE("kWhatAsyncNotify: callbackID(%d) is unexpected.", cbID);
+ break;
+ }
+ }
+ break;
+ }
+
case kWhatStopActivityNotifications:
{
sp<AReplyToken> replyID;
@@ -162,7 +309,7 @@
size_t res = mData->mLooper->start(
false, // runOnCallingThread
true, // canCallJava XXX
- PRIORITY_FOREGROUND);
+ PRIORITY_AUDIO);
if (res != OK) {
ALOGE("Failed to start the looper");
AMediaCodec_delete(mData);
@@ -183,6 +330,9 @@
mData->mRequestedActivityNotification = false;
mData->mCallback = NULL;
+ mData->mAsyncCallback = {};
+ mData->mAsyncCallbackUserData = NULL;
+
return mData;
}
@@ -222,6 +372,32 @@
}
EXPORT
+media_status_t AMediaCodec_getName(
+ AMediaCodec *mData,
+ char** out_name) {
+ if (out_name == NULL) {
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ AString compName;
+ status_t err = mData->mCodec->getName(&compName);
+ if (err != OK) {
+ return translate_error(err);
+ }
+ *out_name = strdup(compName.c_str());
+ return AMEDIA_OK;
+}
+
+EXPORT
+void AMediaCodec_releaseName(
+ AMediaCodec * /* mData */,
+ char* name) {
+ if (name != NULL) {
+ free(name);
+ }
+}
+
+EXPORT
media_status_t AMediaCodec_configure(
AMediaCodec *mData,
const AMediaFormat* format,
@@ -236,8 +412,40 @@
surface = (Surface*) window;
}
- return translate_error(mData->mCodec->configure(nativeFormat, surface,
- crypto ? crypto->mCrypto : NULL, flags));
+ status_t err = mData->mCodec->configure(nativeFormat, surface,
+ crypto ? crypto->mCrypto : NULL, flags);
+ if (err != OK) {
+ ALOGE("configure: err(%d), failed with format: %s",
+ err, nativeFormat->debugString(0).c_str());
+ }
+ return translate_error(err);
+}
+
+EXPORT
+media_status_t AMediaCodec_setAsyncNotifyCallback(
+ AMediaCodec *mData,
+ AMediaCodecOnAsyncNotifyCallback callback,
+ void *userdata) {
+ if (mData->mAsyncNotify == NULL && userdata != NULL) {
+ mData->mAsyncNotify = new AMessage(kWhatAsyncNotify, mData->mHandler);
+ status_t err = mData->mCodec->setCallback(mData->mAsyncNotify);
+ if (err != OK) {
+ ALOGE("setAsyncNotifyCallback: err(%d), failed to set async callback", err);
+ return translate_error(err);
+ }
+ }
+
+ Mutex::Autolock _l(mData->mAsyncCallbackLock);
+ mData->mAsyncCallback = callback;
+ mData->mAsyncCallbackUserData = userdata;
+
+ return AMEDIA_OK;
+}
+
+
+EXPORT
+media_status_t AMediaCodec_releaseCrypto(AMediaCodec *mData) {
+ return translate_error(mData->mCodec->releaseCrypto());
}
EXPORT
@@ -282,6 +490,19 @@
EXPORT
uint8_t* AMediaCodec_getInputBuffer(AMediaCodec *mData, size_t idx, size_t *out_size) {
+ if (mData->mAsyncNotify != NULL) {
+ // Asynchronous mode
+ sp<MediaCodecBuffer> abuf;
+ if (mData->mCodec->getInputBuffer(idx, &abuf) != 0) {
+ return NULL;
+ }
+
+ if (out_size != NULL) {
+ *out_size = abuf->capacity();
+ }
+ return abuf->data();
+ }
+
android::Vector<android::sp<android::MediaCodecBuffer> > abufs;
if (mData->mCodec->getInputBuffers(&abufs) == 0) {
size_t n = abufs.size();
@@ -304,6 +525,19 @@
EXPORT
uint8_t* AMediaCodec_getOutputBuffer(AMediaCodec *mData, size_t idx, size_t *out_size) {
+ if (mData->mAsyncNotify != NULL) {
+ // Asynchronous mode
+ sp<MediaCodecBuffer> abuf;
+ if (mData->mCodec->getOutputBuffer(idx, &abuf) != 0) {
+ return NULL;
+ }
+
+ if (out_size != NULL) {
+ *out_size = abuf->capacity();
+ }
+ return abuf->data();
+ }
+
android::Vector<android::sp<android::MediaCodecBuffer> > abufs;
if (mData->mCodec->getOutputBuffers(&abufs) == 0) {
size_t n = abufs.size();
@@ -367,6 +601,20 @@
}
EXPORT
+AMediaFormat* AMediaCodec_getInputFormat(AMediaCodec *mData) {
+ sp<AMessage> format;
+ mData->mCodec->getInputFormat(&format);
+ return AMediaFormat_fromMsg(&format);
+}
+
+EXPORT
+AMediaFormat* AMediaCodec_getBufferFormat(AMediaCodec *mData, size_t index) {
+ sp<AMessage> format;
+ mData->mCodec->getOutputFormat(index, &format);
+ return AMediaFormat_fromMsg(&format);
+}
+
+EXPORT
media_status_t AMediaCodec_releaseOutputBuffer(AMediaCodec *mData, size_t idx, bool render) {
if (render) {
return translate_error(mData->mCodec->renderOutputBufferAndRelease(idx));
@@ -535,6 +783,16 @@
return translate_error(err);
}
+EXPORT
+bool AMediaCodecActionCode_isRecoverable(int32_t actionCode) {
+ return (actionCode == ACTION_CODE_RECOVERABLE);
+}
+
+EXPORT
+bool AMediaCodecActionCode_isTransient(int32_t actionCode) {
+ return (actionCode == ACTION_CODE_TRANSIENT);
+}
+
EXPORT
void AMediaCodecCryptoInfo_setPattern(AMediaCodecCryptoInfo *info,
diff --git a/media/ndk/NdkMediaDataSource.cpp b/media/ndk/NdkMediaDataSource.cpp
new file mode 100644
index 0000000..9d00e5e
--- /dev/null
+++ b/media/ndk/NdkMediaDataSource.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NdkMediaDataSource"
+
+#include "NdkMediaDataSourcePriv.h"
+
+#include <inttypes.h>
+#include <jni.h>
+#include <unistd.h>
+
+#include <binder/IServiceManager.h>
+#include <cutils/properties.h>
+#include <utils/Log.h>
+#include <utils/StrongPointer.h>
+#include <media/NdkMediaError.h>
+#include <media/NdkMediaDataSource.h>
+#include <media/stagefright/InterfaceUtils.h>
+
+#include "../../libstagefright/include/HTTPBase.h"
+#include "../../libstagefright/include/NuCachedSource2.h"
+
+using namespace android;
+
+struct AMediaDataSource {
+ void *userdata;
+ AMediaDataSourceReadAt readAt;
+ AMediaDataSourceGetSize getSize;
+ AMediaDataSourceClose close;
+};
+
+NdkDataSource::NdkDataSource(AMediaDataSource *dataSource)
+ : mDataSource(AMediaDataSource_new()) {
+ AMediaDataSource_setReadAt(mDataSource, dataSource->readAt);
+ AMediaDataSource_setGetSize(mDataSource, dataSource->getSize);
+ AMediaDataSource_setClose(mDataSource, dataSource->close);
+ AMediaDataSource_setUserdata(mDataSource, dataSource->userdata);
+}
+
+NdkDataSource::~NdkDataSource() {
+ AMediaDataSource_delete(mDataSource);
+}
+
+status_t NdkDataSource::initCheck() const {
+ return OK;
+}
+
+ssize_t NdkDataSource::readAt(off64_t offset, void *data, size_t size) {
+ Mutex::Autolock l(mLock);
+ if (mDataSource->getSize == NULL || mDataSource->userdata == NULL) {
+ return -1;
+ }
+ return mDataSource->readAt(mDataSource->userdata, offset, data, size);
+}
+
+status_t NdkDataSource::getSize(off64_t *size) {
+ Mutex::Autolock l(mLock);
+ if (mDataSource->getSize == NULL || mDataSource->userdata == NULL) {
+ return NO_INIT;
+ }
+ if (size != NULL) {
+ *size = mDataSource->getSize(mDataSource->userdata);
+ }
+ return OK;
+}
+
+String8 NdkDataSource::toString() {
+ return String8::format("NdkDataSource(pid %d, uid %d)", getpid(), getuid());
+}
+
+String8 NdkDataSource::getMIMEType() const {
+ return String8("application/octet-stream");
+}
+
+void NdkDataSource::close() {
+ if (mDataSource->close != NULL && mDataSource->userdata != NULL) {
+ mDataSource->close(mDataSource->userdata);
+ }
+}
+
+extern "C" {
+
+EXPORT
+AMediaDataSource* AMediaDataSource_new() {
+ AMediaDataSource *mSource = new AMediaDataSource();
+ mSource->userdata = NULL;
+ mSource->readAt = NULL;
+ mSource->getSize = NULL;
+ mSource->close = NULL;
+ return mSource;
+}
+
+EXPORT
+void AMediaDataSource_delete(AMediaDataSource *mSource) {
+ ALOGV("dtor");
+ if (mSource != NULL) {
+ delete mSource;
+ }
+}
+
+EXPORT
+void AMediaDataSource_setUserdata(AMediaDataSource *mSource, void *userdata) {
+ mSource->userdata = userdata;
+}
+
+EXPORT
+void AMediaDataSource_setReadAt(AMediaDataSource *mSource, AMediaDataSourceReadAt readAt) {
+ mSource->readAt = readAt;
+}
+
+EXPORT
+void AMediaDataSource_setGetSize(AMediaDataSource *mSource, AMediaDataSourceGetSize getSize) {
+ mSource->getSize = getSize;
+}
+
+EXPORT
+void AMediaDataSource_setClose(AMediaDataSource *mSource, AMediaDataSourceClose close) {
+ mSource->close = close;
+}
+
+} // extern "C"
+
diff --git a/media/ndk/NdkMediaDataSourcePriv.h b/media/ndk/NdkMediaDataSourcePriv.h
new file mode 100644
index 0000000..ea9c865
--- /dev/null
+++ b/media/ndk/NdkMediaDataSourcePriv.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+ * This file defines an NDK API.
+ * Do not remove methods.
+ * Do not change method signatures.
+ * Do not change the value of constants.
+ * Do not change the size of any of the classes defined in here.
+ * Do not reference types that are not part of the NDK.
+ * Do not #include files that aren't part of the NDK.
+ */
+
+#ifndef _NDK_MEDIA_DATASOURCE_PRIV_H
+#define _NDK_MEDIA_DATASOURCE_PRIV_H
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+
+#include <media/DataSource.h>
+#include <media/NdkMediaDataSource.h>
+#include <utils/Mutex.h>
+#include <utils/String8.h>
+
+using namespace android;
+
+struct NdkDataSource : public DataSource {
+
+ NdkDataSource(AMediaDataSource *);
+
+ virtual status_t initCheck() const;
+ virtual ssize_t readAt(off64_t offset, void *data, size_t size);
+ virtual status_t getSize(off64_t *);
+ virtual String8 toString();
+ virtual String8 getMIMEType() const;
+ virtual void close();
+
+protected:
+ virtual ~NdkDataSource();
+
+private:
+
+ Mutex mLock;
+ AMediaDataSource *mDataSource;
+
+};
+
+#endif // _NDK_MEDIA_DATASOURCE_PRIV_H
+
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index eecc858..6d10f1c 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -224,7 +224,7 @@
static bool findId(AMediaDrm *mObj, const AMediaDrmByteArray &id, List<idvec_t>::iterator &iter) {
for (iter = mObj->mIds.begin(); iter != mObj->mIds.end(); ++iter) {
- if (iter->array() == id.ptr && iter->size() == id.length) {
+ if (id.length == iter->size() && memcmp(iter->array(), id.ptr, iter->size()) == 0) {
return true;
}
}
@@ -240,7 +240,7 @@
return AMEDIA_ERROR_INVALID_PARAMETER;
}
Vector<uint8_t> session;
- status_t status = mObj->mDrm->openSession(session);
+ status_t status = mObj->mDrm->openSession(DrmPlugin::kSecurityLevelMax, session);
if (status == OK) {
mObj->mIds.push_front(session);
List<idvec_t>::iterator iter = mObj->mIds.begin();
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index e677d00..b5e60a4 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -20,6 +20,7 @@
#include <media/NdkMediaError.h>
#include <media/NdkMediaExtractor.h>
+#include "NdkMediaDataSourcePriv.h"
#include "NdkMediaFormatPriv.h"
@@ -42,7 +43,12 @@
static media_status_t translate_error(status_t err) {
if (err == OK) {
return AMEDIA_OK;
+ } else if (err == ERROR_END_OF_STREAM) {
+ return AMEDIA_ERROR_END_OF_STREAM;
+ } else if (err == ERROR_IO) {
+ return AMEDIA_ERROR_IO;
}
+
ALOGE("sf error code: %d", err);
return AMEDIA_ERROR_UNKNOWN;
}
@@ -50,7 +56,6 @@
struct AMediaExtractor {
sp<NuMediaExtractor> mImpl;
sp<ABuffer> mPsshBuf;
-
};
extern "C" {
@@ -121,6 +126,18 @@
}
EXPORT
+media_status_t AMediaExtractor_setDataSourceCustom(AMediaExtractor* mData, AMediaDataSource *src) {
+ return translate_error(mData->mImpl->setDataSource(new NdkDataSource(src)));
+}
+
+EXPORT
+AMediaFormat* AMediaExtractor_getFileFormat(AMediaExtractor *mData) {
+ sp<AMessage> format;
+ mData->mImpl->getFileFormat(&format);
+ return AMediaFormat_fromMsg(&format);
+}
+
+EXPORT
size_t AMediaExtractor_getTrackCount(AMediaExtractor *mData) {
return mData->mImpl->countTracks();
}
@@ -182,6 +199,16 @@
}
EXPORT
+ssize_t AMediaExtractor_getSampleSize(AMediaExtractor *mData) {
+ size_t sampleSize;
+ status_t err = mData->mImpl->getSampleSize(&sampleSize);
+ if (err != OK) {
+ return -1;
+ }
+ return sampleSize;
+}
+
+EXPORT
uint32_t AMediaExtractor_getSampleFlags(AMediaExtractor *mData) {
int sampleFlags = 0;
sp<MetaData> meta;
@@ -379,6 +406,80 @@
(size_t*) crypteddata);
}
+EXPORT
+int64_t AMediaExtractor_getCachedDuration(AMediaExtractor *ex) {
+ bool eos;
+ int64_t durationUs;
+ if (ex->mImpl->getCachedDuration(&durationUs, &eos)) {
+ return durationUs;
+ }
+ return -1;
+}
+
+EXPORT
+media_status_t AMediaExtractor_getSampleFormat(AMediaExtractor *ex, AMediaFormat *fmt) {
+ if (fmt == NULL) {
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ sp<MetaData> sampleMeta;
+ status_t err = ex->mImpl->getSampleMeta(&sampleMeta);
+ if (err != OK) {
+ return translate_error(err);
+ }
+
+ sp<AMessage> meta;
+ AMediaFormat_getFormat(fmt, &meta);
+ meta->clear();
+
+ int32_t layerId;
+ if (sampleMeta->findInt32(kKeyTemporalLayerId, &layerId)) {
+ meta->setInt32(AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID, layerId);
+ }
+
+ size_t trackIndex;
+ err = ex->mImpl->getSampleTrackIndex(&trackIndex);
+ if (err == OK) {
+ meta->setInt32(AMEDIAFORMAT_KEY_TRACK_INDEX, trackIndex);
+ sp<AMessage> trackFormat;
+ AString mime;
+ err = ex->mImpl->getTrackFormat(trackIndex, &trackFormat);
+ if (err == OK
+ && trackFormat != NULL
+ && trackFormat->findString(AMEDIAFORMAT_KEY_MIME, &mime)) {
+ meta->setString(AMEDIAFORMAT_KEY_MIME, mime);
+ }
+ }
+
+ int64_t durationUs;
+ if (sampleMeta->findInt64(kKeyDuration, &durationUs)) {
+ meta->setInt64(AMEDIAFORMAT_KEY_DURATION, durationUs);
+ }
+
+ uint32_t dataType; // unused
+ const void *seiData;
+ size_t seiLength;
+ if (sampleMeta->findData(kKeySEI, &dataType, &seiData, &seiLength)) {
+ sp<ABuffer> sei = ABuffer::CreateAsCopy(seiData, seiLength);;
+ meta->setBuffer(AMEDIAFORMAT_KEY_SEI, sei);
+ }
+
+ const void *mpegUserDataPointer;
+ size_t mpegUserDataLength;
+ if (sampleMeta->findData(
+ kKeyMpegUserData, &dataType, &mpegUserDataPointer, &mpegUserDataLength)) {
+ sp<ABuffer> mpegUserData = ABuffer::CreateAsCopy(mpegUserDataPointer, mpegUserDataLength);
+ meta->setBuffer(AMEDIAFORMAT_KEY_MPEG_USER_DATA, mpegUserData);
+ }
+
+ return AMEDIA_OK;
+}
+
+EXPORT
+media_status_t AMediaExtractor_disconnect(AMediaExtractor * ex) {
+ ex->mImpl->disconnect();
+ return AMEDIA_OK;
+}
} // extern "C"
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index ee27520..f32b83e 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -25,7 +25,6 @@
#include <utils/StrongPointer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MetaData.h>
#include <android_runtime/AndroidRuntime.h>
#include <android_util_Binder.h>
@@ -125,6 +124,14 @@
ret.appendFormat("double(%f)", val);
break;
}
+ case AMessage::kTypeRect:
+ {
+ int32_t left, top, right, bottom;
+ f->findRect(name, &left, &top, &right, &bottom);
+ ret.appendFormat("Rect(%" PRId32 ", %" PRId32 ", %" PRId32 ", %" PRId32 ")",
+ left, top, right, bottom);
+ break;
+ }
case AMessage::kTypeString:
{
AString val;
@@ -165,11 +172,22 @@
}
EXPORT
+bool AMediaFormat_getDouble(AMediaFormat* format, const char *name, double *out) {
+ return format->mFormat->findDouble(name, out);
+}
+
+EXPORT
bool AMediaFormat_getSize(AMediaFormat* format, const char *name, size_t *out) {
return format->mFormat->findSize(name, out);
}
EXPORT
+bool AMediaFormat_getRect(AMediaFormat* format, const char *name,
+ int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) {
+ return format->mFormat->findRect(name, left, top, right, bottom);
+}
+
+EXPORT
bool AMediaFormat_getBuffer(AMediaFormat* format, const char *name, void** data, size_t *outsize) {
sp<ABuffer> buf;
if (format->mFormat->findBuffer(name, &buf)) {
@@ -216,6 +234,22 @@
}
EXPORT
+void AMediaFormat_setDouble(AMediaFormat* format, const char* name, double value) {
+ format->mFormat->setDouble(name, value);
+}
+
+EXPORT
+void AMediaFormat_setSize(AMediaFormat* format, const char* name, size_t value) {
+ format->mFormat->setSize(name, value);
+}
+
+EXPORT
+void AMediaFormat_setRect(AMediaFormat* format, const char *name,
+ int32_t left, int32_t top, int32_t right, int32_t bottom) {
+ format->mFormat->setRect(name, left, top, right, bottom);
+}
+
+EXPORT
void AMediaFormat_setString(AMediaFormat* format, const char* name, const char* value) {
// AMessage::setString() makes a copy of the string
format->mFormat->setString(name, value, strlen(value));
@@ -233,30 +267,72 @@
}
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR = "aac-drc-cut-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR = "aac-drc-boost-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION = "aac-drc-heavy-compression";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL = "aac-target-ref-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL = "aac-encoded-target-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT = "aac-max-output-channel_count";
EXPORT const char* AMEDIAFORMAT_KEY_AAC_PROFILE = "aac-profile";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE = "aac-sbr-mode";
+EXPORT const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID = "audio-session-id";
+EXPORT const char* AMEDIAFORMAT_KEY_BITRATE_MODE = "bitrate-mode";
EXPORT const char* AMEDIAFORMAT_KEY_BIT_RATE = "bitrate";
+EXPORT const char* AMEDIAFORMAT_KEY_CAPTURE_RATE = "capture-rate";
EXPORT const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT = "channel-count";
EXPORT const char* AMEDIAFORMAT_KEY_CHANNEL_MASK = "channel-mask";
EXPORT const char* AMEDIAFORMAT_KEY_COLOR_FORMAT = "color-format";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_RANGE = "color-range";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_STANDARD = "color-standard";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER = "color-transfer";
+EXPORT const char* AMEDIAFORMAT_KEY_COMPLEXITY = "complexity";
+EXPORT const char* AMEDIAFORMAT_KEY_CSD = "csd";
+EXPORT const char* AMEDIAFORMAT_KEY_CSD_0 = "csd-0";
+EXPORT const char* AMEDIAFORMAT_KEY_CSD_1 = "csd-1";
+EXPORT const char* AMEDIAFORMAT_KEY_CSD_2 = "csd-2";
+EXPORT const char* AMEDIAFORMAT_KEY_DISPLAY_CROP = "crop";
+EXPORT const char* AMEDIAFORMAT_KEY_DISPLAY_HEIGHT = "display-height";
+EXPORT const char* AMEDIAFORMAT_KEY_DISPLAY_WIDTH = "display-width";
EXPORT const char* AMEDIAFORMAT_KEY_DURATION = "durationUs";
EXPORT const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL = "flac-compression-level";
EXPORT const char* AMEDIAFORMAT_KEY_FRAME_RATE = "frame-rate";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_COLUMNS = "grid-cols";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_ROWS = "grid-rows";
+EXPORT const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO = "hdr-static-info";
EXPORT const char* AMEDIAFORMAT_KEY_HEIGHT = "height";
+EXPORT const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD = "intra-refresh-period";
EXPORT const char* AMEDIAFORMAT_KEY_IS_ADTS = "is-adts";
EXPORT const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT = "is-autoselect";
EXPORT const char* AMEDIAFORMAT_KEY_IS_DEFAULT = "is-default";
EXPORT const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE = "is-forced-subtitle";
EXPORT const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL = "i-frame-interval";
EXPORT const char* AMEDIAFORMAT_KEY_LANGUAGE = "language";
+EXPORT const char* AMEDIAFORMAT_KEY_LATENCY = "latency";
+EXPORT const char* AMEDIAFORMAT_KEY_LEVEL = "level";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_HEIGHT = "max-height";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE = "max-input-size";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_WIDTH = "max-width";
EXPORT const char* AMEDIAFORMAT_KEY_MIME = "mime";
+EXPORT const char* AMEDIAFORMAT_KEY_MPEG_USER_DATA = "mpeg-user-data";
+EXPORT const char* AMEDIAFORMAT_KEY_OPERATING_RATE = "operating-rate";
+EXPORT const char* AMEDIAFORMAT_KEY_PCM_ENCODING = "pcm-encoding";
+EXPORT const char* AMEDIAFORMAT_KEY_PRIORITY = "priority";
+EXPORT const char* AMEDIAFORMAT_KEY_PROFILE = "profile";
EXPORT const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP = "push-blank-buffers-on-shutdown";
EXPORT const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER = "repeat-previous-frame-after";
+EXPORT const char* AMEDIAFORMAT_KEY_ROTATION = "rotation-degrees";
EXPORT const char* AMEDIAFORMAT_KEY_SAMPLE_RATE = "sample-rate";
-EXPORT const char* AMEDIAFORMAT_KEY_WIDTH = "width";
+EXPORT const char* AMEDIAFORMAT_KEY_SEI = "sei";
+EXPORT const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT = "slice-height";
EXPORT const char* AMEDIAFORMAT_KEY_STRIDE = "stride";
+EXPORT const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID = "temporal-layer-id";
+EXPORT const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING = "ts-schema";
+EXPORT const char* AMEDIAFORMAT_KEY_TILE_HEIGHT = "tile-height";
+EXPORT const char* AMEDIAFORMAT_KEY_TILE_WIDTH = "tile-width";
+EXPORT const char* AMEDIAFORMAT_KEY_TIME_US = "timeUs";
+EXPORT const char* AMEDIAFORMAT_KEY_TRACK_ID = "track-id";
+EXPORT const char* AMEDIAFORMAT_KEY_TRACK_INDEX = "track-index";
+EXPORT const char* AMEDIAFORMAT_KEY_WIDTH = "width";
} // extern "C"
diff --git a/media/ndk/NdkMediaMuxer.cpp b/media/ndk/NdkMediaMuxer.cpp
index 80a4391..dffc4d7 100644
--- a/media/ndk/NdkMediaMuxer.cpp
+++ b/media/ndk/NdkMediaMuxer.cpp
@@ -27,7 +27,6 @@
#include <utils/StrongPointer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MetaData.h>
#include <media/stagefright/MediaMuxer.h>
#include <media/IMediaHTTPService.h>
#include <android_runtime/AndroidRuntime.h>
diff --git a/media/ndk/OWNERS b/media/ndk/OWNERS
new file mode 100644
index 0000000..11e8340
--- /dev/null
+++ b/media/ndk/OWNERS
@@ -0,0 +1,5 @@
+marcone@google.com
+# For AImage/AImageReader
+etalvala@google.com
+yinchiayeh@google.com
+zhijunhe@google.com
diff --git a/media/ndk/include/media/NdkImage.h b/media/ndk/include/media/NdkImage.h
index 99cf5d5..19df760 100644
--- a/media/ndk/include/media/NdkImage.h
+++ b/media/ndk/include/media/NdkImage.h
@@ -72,14 +72,15 @@
AIMAGE_FORMAT_RGBA_8888 = 0x1,
/**
- * 32 bits RGBX format, 8 bits for each of the four channels.
+ * 32 bits RGBX format, 8 bits for each of the four channels. The values
+ * of the alpha channel bits are ignored (image is assumed to be opaque).
*
* <p>
* Corresponding formats:
* <ul>
* <li>AHardwareBuffer: AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM</li>
* <li>Vulkan: VK_FORMAT_R8G8B8A8_UNORM</li>
- * <li>OpenGL ES: GL_RGBA8</li>
+ * <li>OpenGL ES: GL_RGB8</li>
* </ul>
* </p>
*
@@ -717,7 +718,7 @@
#if __ANDROID_API__ >= 26
-/*
+/**
* Return the image back the the system and delete the AImage object from memory asynchronously.
*
* <p>Similar to {@link AImage_delete}, do NOT use the image pointer after this method returns.
@@ -746,8 +747,9 @@
* AHardwareBuffer_acquire} to acquire an extra reference, and call {@link AHardwareBuffer_release}
* once it has finished using it in order to properly deallocate the underlying memory managed by
* {@link AHardwareBuffer}. If the caller has acquired extra reference on an {@link AHardwareBuffer}
- * returned from this function, it must also listen to {@link onBufferFreed} callback to be
- * notified when the buffer is no longer used by {@link AImageReader}.</p>
+ * returned from this function, it must also register a listener using the function
+ * {@link AImageReader_setBufferRemovedListener} to be notified when the buffer is no longer used
+ * by {@link AImageReader}.</p>
*
* @param image the {@link AImage} of interest.
* @param outBuffer The memory area pointed to by buffer will contain the acquired AHardwareBuffer
diff --git a/media/ndk/include/media/NdkImageReader.h b/media/ndk/include/media/NdkImageReader.h
index a8667c9..571410b 100644
--- a/media/ndk/include/media/NdkImageReader.h
+++ b/media/ndk/include/media/NdkImageReader.h
@@ -256,7 +256,7 @@
/**
- * The definition of {@link AImageReader} new image available callback.
+ * Signature of the callback which is called when a new image is available from {@link AImageReader}.
*
* @param context The optional application context provided by user in
* {@link AImageReader_setImageListener}.
@@ -265,11 +265,11 @@
typedef void (*AImageReader_ImageCallback)(void* context, AImageReader* reader);
typedef struct AImageReader_ImageListener {
- /// optional application context.
+ /// Optional application context passed as the first parameter of the callback.
void* context;
/**
- * This callback is called when there is a new image available for in the image reader's queue.
+ * This callback is called when there is a new image available in the image reader's queue.
*
* <p>The callback happens on one dedicated thread per {@link AImageReader} instance. It is okay
* to use AImageReader_* and AImage_* methods within the callback. Note that it is possible that
@@ -285,11 +285,11 @@
/**
* Set the onImageAvailable listener of this image reader.
*
- * <p>Note that calling this method will replace previously registered listeners.</p>
+ * Calling this method will replace previously registered listeners.
*
* @param reader The image reader of interest.
- * @param listener the {@link AImageReader_ImageListener} to be registered. Set this to NULL if
- * application no longer needs to listen to new images.
+ * @param listener The {@link AImageReader_ImageListener} to be registered. Set this to NULL if
+ * the application no longer needs to listen to new images.
*
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
@@ -305,9 +305,9 @@
/**
* AImageReader constructor similar to {@link AImageReader_new} that takes an additional parameter
* for the consumer usage. All other parameters and the return values are identical to those passed
- * to {@line AImageReader_new}.
+ * to {@link AImageReader_new}.
*
- * <p>If the {@code format} is {@link AIMAGE_FORMAT_PRIVATE}, the created {@link AImageReader}
+ * <p>If the \c format is {@link AIMAGE_FORMAT_PRIVATE}, the created {@link AImageReader}
* will produce images whose contents are not directly accessible by the application. The application can
* still acquire images from this {@link AImageReader} and access {@link AHardwareBuffer} via
* {@link AImage_getHardwareBuffer()}. The {@link AHardwareBuffer} gained this way can then
@@ -322,7 +322,7 @@
* AImageReader}s using other format such as {@link AIMAGE_FORMAT_YUV_420_888}.</p>
*
* <p>Note that not all format and usage flag combination is supported by the {@link AImageReader},
- * especially if {@code format} is {@link AIMAGE_FORMAT_PRIVATE}, {@code usage} must not include either
+ * especially if \c format is {@link AIMAGE_FORMAT_PRIVATE}, \c usage must not include either
* {@link AHARDWAREBUFFER_USAGE_READ_RARELY} or {@link AHARDWAREBUFFER_USAGE_READ_OFTEN}</p>
*
* @param width The default width in pixels of the Images that this reader will produce.
@@ -367,7 +367,7 @@
int32_t width, int32_t height, int32_t format, uint64_t usage, int32_t maxImages,
/*out*/ AImageReader** reader);
-/*
+/**
* Acquire the next {@link AImage} from the image reader's queue asynchronously.
*
* <p>AImageReader acquire method similar to {@link AImageReader_acquireNextImage} that takes an
@@ -377,7 +377,7 @@
* @param acquireFenceFd A sync fence fd defined in {@link sync.h}, which is used to signal when the
* buffer is ready to consume. When synchronization fence is not needed, fence will be set
* to -1 and the {@link AImage} returned is ready for use immediately. Otherwise, user shall
- * use syscalls such as {@code poll()}, {@code epoll()}, {@code select()} to wait for the
+ * use syscalls such as \c poll(), \c epoll(), \c select() to wait for the
* fence fd to change status before attempting to access the {@link AImage} returned.
*
* @see sync.h
@@ -386,7 +386,7 @@
media_status_t AImageReader_acquireNextImageAsync(
AImageReader* reader, /*out*/AImage** image, /*out*/int* acquireFenceFd);
-/*
+/**
* Acquire the latest {@link AImage} from the image reader's queue asynchronously, dropping older
* images.
*
@@ -397,7 +397,7 @@
* @param acquireFenceFd A sync fence fd defined in {@link sync.h}, which is used to signal when the
* buffer is ready to consume. When synchronization fence is not needed, fence will be set
* to -1 and the {@link AImage} returned is ready for use immediately. Otherwise, user shall
- * use syscalls such as {@code poll()}, {@code epoll()}, {@code select()} to wait for the
+ * use syscalls such as \c poll(), \c epoll(), \c select() to wait for the
* fence fd to change status before attempting to access the {@link AImage} returned.
*
* @see sync.h
@@ -406,7 +406,7 @@
media_status_t AImageReader_acquireLatestImageAsync(
AImageReader* reader, /*out*/AImage** image, /*out*/int* acquireFenceFd);
/**
- * The definition of {@link AImageReader} buffer removed callback.
+ * Signature of the callback which is called when {@link AImageReader} is about to remove a buffer.
*
* @param context The optional application context provided by user in
* {@link AImageReader_setBufferRemovedListener}.
@@ -418,7 +418,7 @@
AHardwareBuffer* buffer);
typedef struct AImageReader_BufferRemovedListener {
- /// optional application context.
+ /// Optional application context passed as the first parameter of the callback.
void* context;
/**
diff --git a/media/ndk/include/media/NdkMediaCodec.h b/media/ndk/include/media/NdkMediaCodec.h
index 33cd7af..c49582d 100644
--- a/media/ndk/include/media/NdkMediaCodec.h
+++ b/media/ndk/include/media/NdkMediaCodec.h
@@ -62,11 +62,63 @@
typedef struct AMediaCodecCryptoInfo AMediaCodecCryptoInfo;
enum {
+ AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG = 2,
AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM = 4,
+ AMEDIACODEC_BUFFER_FLAG_PARTIAL_FRAME = 8,
+
AMEDIACODEC_CONFIGURE_FLAG_ENCODE = 1,
AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED = -3,
AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED = -2,
- AMEDIACODEC_INFO_TRY_AGAIN_LATER = -1
+ AMEDIACODEC_INFO_TRY_AGAIN_LATER = -1,
+};
+
+/**
+ * Called when an input buffer becomes available.
+ * The specified index is the index of the available input buffer.
+ */
+typedef void (*AMediaCodecOnAsyncInputAvailable)(
+ AMediaCodec *codec,
+ void *userdata,
+ int32_t index);
+/**
+ * Called when an output buffer becomes available.
+ * The specified index is the index of the available output buffer.
+ * The specified bufferInfo contains information regarding the available output buffer.
+ */
+typedef void (*AMediaCodecOnAsyncOutputAvailable)(
+ AMediaCodec *codec,
+ void *userdata,
+ int32_t index,
+ AMediaCodecBufferInfo *bufferInfo);
+/**
+ * Called when the output format has changed.
+ * The specified format contains the new output format.
+ */
+typedef void (*AMediaCodecOnAsyncFormatChanged)(
+ AMediaCodec *codec,
+ void *userdata,
+ AMediaFormat *format);
+/**
+ * Called when the MediaCodec encountered an error.
+ * The specified actionCode indicates the possible actions that client can take,
+ * and it can be checked by calling AMediaCodecActionCode_isRecoverable or
+ * AMediaCodecActionCode_isTransient. If both AMediaCodecActionCode_isRecoverable()
+ * and AMediaCodecActionCode_isTransient() return false, then the codec error is fatal
+ * and the codec must be deleted.
+ * The specified detail may contain more detailed messages about this error.
+ */
+typedef void (*AMediaCodecOnAsyncError)(
+ AMediaCodec *codec,
+ void *userdata,
+ media_status_t error,
+ int32_t actionCode,
+ const char *detail);
+
+struct AMediaCodecOnAsyncNotifyCallback {
+ AMediaCodecOnAsyncInputAvailable onAsyncInputAvailable;
+ AMediaCodecOnAsyncOutputAvailable onAsyncOutputAvailable;
+ AMediaCodecOnAsyncFormatChanged onAsyncFormatChanged;
+ AMediaCodecOnAsyncError onAsyncError;
};
#if __ANDROID_API__ >= 21
@@ -189,6 +241,12 @@
AMediaFormat* AMediaCodec_getOutputFormat(AMediaCodec*);
/**
+ * Get format of the buffer. The specified buffer index must have been previously obtained from
+ * dequeueOutputBuffer.
+ */
+AMediaFormat* AMediaCodec_getBufferFormat(AMediaCodec*, size_t index);
+
+/**
* If you are done with a buffer, use this call to return the buffer to
* the codec. If you previously specified a surface when configuring this
* video decoder you can optionally render the buffer.
@@ -292,6 +350,71 @@
#endif /* __ANDROID_API__ >= 26 */
+#if __ANDROID_API__ >= 28
+
+/**
+ * Get the component name. If the codec was created by createDecoderByType
+ * or createEncoderByType, what component is chosen is not known beforehand.
+ * Caller shall call AMediaCodec_releaseName to free the returned pointer.
+ */
+media_status_t AMediaCodec_getName(AMediaCodec*, char** out_name);
+
+/**
+ * Free the memory pointed by name which is returned by AMediaCodec_getName.
+ */
+void AMediaCodec_releaseName(AMediaCodec*, char* name);
+
+/**
+ * Set an asynchronous callback for actionable AMediaCodec events.
+ * When asynchronous callback is enabled, the client should not call
+ * AMediaCodec_getInputBuffers(), AMediaCodec_getOutputBuffers(),
+ * AMediaCodec_dequeueInputBuffer() or AMediaCodec_dequeueOutputBuffer().
+ *
+ * Also, AMediaCodec_flush() behaves differently in asynchronous mode.
+ * After calling AMediaCodec_flush(), you must call AMediaCodec_start() to
+ * "resume" receiving input buffers, even if an input surface was created.
+ *
+ * Refer to the definition of AMediaCodecOnAsyncNotifyCallback on how each
+ * callback function is called and what are specified.
+ * The specified userdata is the pointer used when those callback functions are
+ * called.
+ *
+ * All callbacks are fired on one NDK internal thread.
+ * AMediaCodec_setAsyncNotifyCallback should not be called on the callback thread.
+ * No heavy duty task should be performed on callback thread.
+ */
+media_status_t AMediaCodec_setAsyncNotifyCallback(
+ AMediaCodec*,
+ AMediaCodecOnAsyncNotifyCallback callback,
+ void *userdata);
+
+/**
+ * Release the crypto if applicable.
+ */
+media_status_t AMediaCodec_releaseCrypto(AMediaCodec*);
+
+/**
+ * Call this after AMediaCodec_configure() returns successfully to get the input
+ * format accepted by the codec. Do this to determine what optional configuration
+ * parameters were supported by the codec.
+ */
+AMediaFormat* AMediaCodec_getInputFormat(AMediaCodec*);
+
+/**
+ * Returns true if the codec cannot proceed further, but can be recovered by stopping,
+ * configuring, and starting again.
+ */
+bool AMediaCodecActionCode_isRecoverable(int32_t actionCode);
+
+/**
+ * Returns true if the codec error is a transient issue, perhaps due to
+ * resource constraints, and that the method (or encoding/decoding) may be
+ * retried at a later time.
+ */
+bool AMediaCodecActionCode_isTransient(int32_t actionCode);
+
+#endif /* __ANDROID_API__ >= 28 */
+
typedef enum {
AMEDIACODECRYPTOINFO_MODE_CLEAR = 0,
AMEDIACODECRYPTOINFO_MODE_AES_CTR = 1,
diff --git a/media/ndk/include/media/NdkMediaDataSource.h b/media/ndk/include/media/NdkMediaDataSource.h
new file mode 100644
index 0000000..9e2e351
--- /dev/null
+++ b/media/ndk/include/media/NdkMediaDataSource.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+ * This file defines an NDK API.
+ * Do not remove methods.
+ * Do not change method signatures.
+ * Do not change the value of constants.
+ * Do not change the size of any of the classes defined in here.
+ * Do not reference types that are not part of the NDK.
+ * Do not #include files that aren't part of the NDK.
+ */
+
+#ifndef _NDK_MEDIA_DATASOURCE_H
+#define _NDK_MEDIA_DATASOURCE_H
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+
+#include <media/NdkMediaError.h>
+
+__BEGIN_DECLS
+
+struct AMediaDataSource;
+typedef struct AMediaDataSource AMediaDataSource;
+
+#if __ANDROID_API__ >= 28
+
+/*
+ * AMediaDataSource's callbacks will be invoked on an implementation-defined thread
+ * or thread pool. No guarantees are provided about which thread(s) will be used for
+ * callbacks. However, it is guaranteed that AMediaDataSource's callbacks will only
+ * ever be invoked by a single thread at a time.
+ *
+ * There will be a thread synchronization point between each call to ensure that
+ * modifications to the state of your AMediaDataSource are visible to future
+ * calls. This means you don't need to do your own synchronization unless you're
+ * modifying the AMediaDataSource from another thread while it's being used by the
+ * framework.
+ */
+
+/**
+ * Called to request data from the given |offset|.
+ *
+ * Implementations should should write up to |size| bytes into
+ * |buffer|, and return the number of bytes written.
+ *
+ * Return 0 if size is zero (thus no bytes are read).
+ *
+ * Return -1 to indicate that end of stream is reached.
+ */
+typedef ssize_t (*AMediaDataSourceReadAt)(
+ void *userdata, off64_t offset, void * buffer, size_t size);
+
+/**
+ * Called to get the size of the data source.
+ *
+ * Return the size of data source in bytes, or -1 if the size is unknown.
+ */
+typedef ssize_t (*AMediaDataSourceGetSize)(void *userdata);
+
+/**
+ * Called to close the data source and release associated resources.
+ * The NDK media framework guarantees that after |close| is called
+ * no future callbacks will be invoked on the data source.
+ */
+typedef void (*AMediaDataSourceClose)(void *userdata);
+
+/**
+ * Create new media data source. Returns NULL if memory allocation
+ * for the new data source object fails.
+ */
+AMediaDataSource* AMediaDataSource_new();
+
+/**
+ * Delete a previously created media data source.
+ */
+void AMediaDataSource_delete(AMediaDataSource*);
+
+/**
+ * Set an user provided opaque handle. This opaque handle is passed as
+ * the first argument to the data source callbacks.
+ */
+void AMediaDataSource_setUserdata(
+ AMediaDataSource*, void *userdata);
+
+/**
+ * Set a custom callback for supplying random access media data to the
+ * NDK media framework.
+ *
+ * Implement this if your app has special requirements for the way media
+ * data is obtained, or if you need a callback when data is read by the
+ * NDK media framework.
+ *
+ * Please refer to the definition of AMediaDataSourceReadAt for
+ * additional details.
+ */
+void AMediaDataSource_setReadAt(
+ AMediaDataSource*,
+ AMediaDataSourceReadAt);
+
+/**
+ * Set a custom callback for supplying the size of the data source to the
+ * NDK media framework.
+ *
+ * Please refer to the definition of AMediaDataSourceGetSize for
+ * additional details.
+ */
+void AMediaDataSource_setGetSize(
+ AMediaDataSource*,
+ AMediaDataSourceGetSize);
+
+/**
+ * Set a custom callback to receive signal from the NDK media framework
+ * when the data source is closed.
+ *
+ * Please refer to the definition of AMediaDataSourceClose for
+ * additional details.
+ */
+void AMediaDataSource_setClose(
+ AMediaDataSource*,
+ AMediaDataSourceClose);
+
+#endif /*__ANDROID_API__ >= 28 */
+
+__END_DECLS
+
+#endif // _NDK_MEDIA_DATASOURCE_H
diff --git a/media/ndk/include/media/NdkMediaError.h b/media/ndk/include/media/NdkMediaError.h
index 7afb681..75f4605 100644
--- a/media/ndk/include/media/NdkMediaError.h
+++ b/media/ndk/include/media/NdkMediaError.h
@@ -43,6 +43,17 @@
typedef enum {
AMEDIA_OK = 0,
+ /**
+ * This indicates required resource was not able to be allocated.
+ */
+ AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE = 1100,
+
+ /**
+ * This indicates the resource manager reclaimed the media resource used by the codec.
+ * With this error, the codec must be released, as it has moved to terminal state.
+ */
+ AMEDIACODEC_ERROR_RECLAIMED = 1101,
+
AMEDIA_ERROR_BASE = -10000,
AMEDIA_ERROR_UNKNOWN = AMEDIA_ERROR_BASE,
AMEDIA_ERROR_MALFORMED = AMEDIA_ERROR_BASE - 1,
@@ -50,6 +61,8 @@
AMEDIA_ERROR_INVALID_OBJECT = AMEDIA_ERROR_BASE - 3,
AMEDIA_ERROR_INVALID_PARAMETER = AMEDIA_ERROR_BASE - 4,
AMEDIA_ERROR_INVALID_OPERATION = AMEDIA_ERROR_BASE - 5,
+ AMEDIA_ERROR_END_OF_STREAM = AMEDIA_ERROR_BASE - 6,
+ AMEDIA_ERROR_IO = AMEDIA_ERROR_BASE - 7,
AMEDIA_DRM_ERROR_BASE = -20000,
AMEDIA_DRM_NOT_PROVISIONED = AMEDIA_DRM_ERROR_BASE - 1,
diff --git a/media/ndk/include/media/NdkMediaExtractor.h b/media/ndk/include/media/NdkMediaExtractor.h
index ad8f9da..3452cc9 100644
--- a/media/ndk/include/media/NdkMediaExtractor.h
+++ b/media/ndk/include/media/NdkMediaExtractor.h
@@ -40,6 +40,7 @@
#include <sys/types.h>
#include "NdkMediaCodec.h"
+#include "NdkMediaDataSource.h"
#include "NdkMediaFormat.h"
#include "NdkMediaCrypto.h"
@@ -72,6 +73,15 @@
media_status_t AMediaExtractor_setDataSource(AMediaExtractor*, const char *location);
// TODO support headers
+#if __ANDROID_API__ >= 28
+
+/**
+ * Set the custom data source implementation from which the extractor will read.
+ */
+media_status_t AMediaExtractor_setDataSourceCustom(AMediaExtractor*, AMediaDataSource *src);
+
+#endif /* __ANDROID_API__ >= 28 */
+
/**
* Return the number of tracks in the previously specified media file
*/
@@ -160,12 +170,60 @@
AMediaCodecCryptoInfo *AMediaExtractor_getSampleCryptoInfo(AMediaExtractor *);
-
enum {
AMEDIAEXTRACTOR_SAMPLE_FLAG_SYNC = 1,
AMEDIAEXTRACTOR_SAMPLE_FLAG_ENCRYPTED = 2,
};
+#if __ANDROID_API__ >= 28
+
+/**
+ * Returns the format of the extractor. The caller must free the returned format
+ * using AMediaFormat_delete(format).
+ *
+ * This function will always return a format; however, the format could be empty
+ * (no key-value pairs) if the media container does not provide format information.
+ */
+AMediaFormat* AMediaExtractor_getFileFormat(AMediaExtractor*);
+
+/**
+ * Returns the size of the current sample in bytes, or -1 when no samples are
+ * available (end of stream). This API can be used in in conjunction with
+ * AMediaExtractor_readSampleData:
+ *
+ * ssize_t sampleSize = AMediaExtractor_getSampleSize(ex);
+ * uint8_t *buf = new uint8_t[sampleSize];
+ * AMediaExtractor_readSampleData(ex, buf, sampleSize);
+ *
+ */
+ssize_t AMediaExtractor_getSampleSize(AMediaExtractor*);
+
+/**
+ * Returns the duration of cached media samples downloaded from a network data source
+ * (AMediaExtractor_setDataSource with a "http(s)" URI) in microseconds.
+ *
+ * This information is calculated using total bitrate; if total bitrate is not in the
+ * media container it is calculated using total duration and file size.
+ *
+ * Returns -1 when the extractor is not reading from a network data source, or when the
+ * cached duration cannot be calculated (bitrate, duration, and file size information
+ * not available).
+ */
+int64_t AMediaExtractor_getCachedDuration(AMediaExtractor *);
+
+/**
+ * Read the current sample's metadata format into |fmt|. Examples of sample metadata are
+ * SEI (supplemental enhancement information) and MPEG user data, both of which can embed
+ * closed-caption data.
+ *
+ * Returns AMEDIA_OK on success or AMEDIA_ERROR_* to indicate failure reason.
+ * Existing key-value pairs in |fmt| would be removed if this API returns AMEDIA_OK.
+ * The contents of |fmt| is undefined if this API returns AMEDIA_ERROR_*.
+ */
+media_status_t AMediaExtractor_getSampleFormat(AMediaExtractor *ex, AMediaFormat *fmt);
+
+#endif /* __ANDROID_API__ >= 28 */
+
#endif /* __ANDROID_API__ >= 21 */
__END_DECLS
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 6c01d35..f510dff 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -60,6 +60,7 @@
bool AMediaFormat_getInt32(AMediaFormat*, const char *name, int32_t *out);
bool AMediaFormat_getInt64(AMediaFormat*, const char *name, int64_t *out);
bool AMediaFormat_getFloat(AMediaFormat*, const char *name, float *out);
+bool AMediaFormat_getSize(AMediaFormat*, const char *name, size_t *out);
/**
* The returned data is owned by the format and remains valid as long as the named entry
* is part of the format.
@@ -89,33 +90,86 @@
/**
* XXX should these be ints/enums that we look up in a table as needed?
*/
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL;
+extern const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL;
+extern const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT;
extern const char* AMEDIAFORMAT_KEY_AAC_PROFILE;
+extern const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE;
+extern const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID;
+extern const char* AMEDIAFORMAT_KEY_BITRATE_MODE;
extern const char* AMEDIAFORMAT_KEY_BIT_RATE;
+extern const char* AMEDIAFORMAT_KEY_CAPTURE_RATE;
extern const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT;
extern const char* AMEDIAFORMAT_KEY_CHANNEL_MASK;
extern const char* AMEDIAFORMAT_KEY_COLOR_FORMAT;
+extern const char* AMEDIAFORMAT_KEY_COLOR_RANGE;
+extern const char* AMEDIAFORMAT_KEY_COLOR_STANDARD;
+extern const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER;
+extern const char* AMEDIAFORMAT_KEY_COMPLEXITY;
+extern const char* AMEDIAFORMAT_KEY_CSD;
+extern const char* AMEDIAFORMAT_KEY_CSD_0;
+extern const char* AMEDIAFORMAT_KEY_CSD_1;
+extern const char* AMEDIAFORMAT_KEY_CSD_2;
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_CROP;
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_HEIGHT;
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_WIDTH;
extern const char* AMEDIAFORMAT_KEY_DURATION;
extern const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL;
extern const char* AMEDIAFORMAT_KEY_FRAME_RATE;
+extern const char* AMEDIAFORMAT_KEY_GRID_COLUMNS;
+extern const char* AMEDIAFORMAT_KEY_GRID_ROWS;
+extern const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO;
extern const char* AMEDIAFORMAT_KEY_HEIGHT;
+extern const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD;
extern const char* AMEDIAFORMAT_KEY_IS_ADTS;
extern const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT;
extern const char* AMEDIAFORMAT_KEY_IS_DEFAULT;
extern const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE;
extern const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL;
extern const char* AMEDIAFORMAT_KEY_LANGUAGE;
+extern const char* AMEDIAFORMAT_KEY_LATENCY;
+extern const char* AMEDIAFORMAT_KEY_LEVEL;
extern const char* AMEDIAFORMAT_KEY_MAX_HEIGHT;
extern const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE;
extern const char* AMEDIAFORMAT_KEY_MAX_WIDTH;
extern const char* AMEDIAFORMAT_KEY_MIME;
+extern const char* AMEDIAFORMAT_KEY_MPEG_USER_DATA;
+extern const char* AMEDIAFORMAT_KEY_OPERATING_RATE;
+extern const char* AMEDIAFORMAT_KEY_PCM_ENCODING;
+extern const char* AMEDIAFORMAT_KEY_PRIORITY;
+extern const char* AMEDIAFORMAT_KEY_PROFILE;
extern const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP;
extern const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER;
+extern const char* AMEDIAFORMAT_KEY_ROTATION;
extern const char* AMEDIAFORMAT_KEY_SAMPLE_RATE;
-extern const char* AMEDIAFORMAT_KEY_WIDTH;
+extern const char* AMEDIAFORMAT_KEY_SEI;
+extern const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT;
extern const char* AMEDIAFORMAT_KEY_STRIDE;
+extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID;
+extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING;
+extern const char* AMEDIAFORMAT_KEY_TILE_HEIGHT;
+extern const char* AMEDIAFORMAT_KEY_TILE_WIDTH;
+extern const char* AMEDIAFORMAT_KEY_TIME_US;
+extern const char* AMEDIAFORMAT_KEY_TRACK_ID;
+extern const char* AMEDIAFORMAT_KEY_TRACK_INDEX;
+extern const char* AMEDIAFORMAT_KEY_WIDTH;
#endif /* __ANDROID_API__ >= 21 */
+#if __ANDROID_API__ >= 28
+bool AMediaFormat_getDouble(AMediaFormat*, const char *name, double *out);
+bool AMediaFormat_getRect(AMediaFormat*, const char *name,
+ int32_t *left, int32_t *top, int32_t *right, int32_t *bottom);
+
+void AMediaFormat_setDouble(AMediaFormat*, const char* name, double value);
+void AMediaFormat_setSize(AMediaFormat*, const char* name, size_t value);
+void AMediaFormat_setRect(AMediaFormat*, const char* name,
+ int32_t left, int32_t top, int32_t right, int32_t bottom);
+#endif /* __ANDROID_API__ >= 28 */
+
__END_DECLS
#endif // _NDK_MEDIA_FORMAT_H
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index d7ad370..fb56694 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -26,30 +26,63 @@
AImage_getPlaneRowStride; # introduced=24
AImage_getTimestamp; # introduced=24
AImage_getWidth; # introduced=24
+ AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL; # var introduced=28
+ AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT; # var introduced=28
AMEDIAFORMAT_KEY_AAC_PROFILE; # var
+ AMEDIAFORMAT_KEY_AAC_SBR_MODE; # var introduced=28
+ AMEDIAFORMAT_KEY_AUDIO_SESSION_ID; # var introduced=28
+ AMEDIAFORMAT_KEY_BITRATE_MODE; # var introduced=28
AMEDIAFORMAT_KEY_BIT_RATE; # var
+ AMEDIAFORMAT_KEY_CAPTURE_RATE; # var introduced=28
AMEDIAFORMAT_KEY_CHANNEL_COUNT; # var
AMEDIAFORMAT_KEY_CHANNEL_MASK; # var
AMEDIAFORMAT_KEY_COLOR_FORMAT; # var
+ AMEDIAFORMAT_KEY_COLOR_RANGE; # var introduced=28
+ AMEDIAFORMAT_KEY_COLOR_STANDARD; # var introduced=28
+ AMEDIAFORMAT_KEY_COLOR_TRANSFER; # var introduced=28
+ AMEDIAFORMAT_KEY_COMPLEXITY; # var introduced=28
+ AMEDIAFORMAT_KEY_DISPLAY_CROP; # var introduced=28
AMEDIAFORMAT_KEY_DURATION; # var
AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var
AMEDIAFORMAT_KEY_FRAME_RATE; # var
+ AMEDIAFORMAT_KEY_GRID_COLUMNS; # var introduced=28
+ AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
+ AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
AMEDIAFORMAT_KEY_HEIGHT; # var
+ AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD; # var introduced=28
AMEDIAFORMAT_KEY_IS_ADTS; # var
AMEDIAFORMAT_KEY_IS_AUTOSELECT; # var
AMEDIAFORMAT_KEY_IS_DEFAULT; # var
AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE; # var
AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var
AMEDIAFORMAT_KEY_LANGUAGE; # var
+ AMEDIAFORMAT_KEY_LATENCY; # var introduced=28
+ AMEDIAFORMAT_KEY_LEVEL; # var introduced=28
AMEDIAFORMAT_KEY_MAX_HEIGHT; # var
AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var
AMEDIAFORMAT_KEY_MAX_WIDTH; # var
AMEDIAFORMAT_KEY_MIME; # var
+ AMEDIAFORMAT_KEY_OPERATING_RATE; # var introduced=28
+ AMEDIAFORMAT_KEY_PCM_ENCODING; # var introduced=28
+ AMEDIAFORMAT_KEY_PRIORITY; # var introduced=28
+ AMEDIAFORMAT_KEY_PROFILE; # var introduced=28
AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var
AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var
+ AMEDIAFORMAT_KEY_ROTATION; # var introduced=28
AMEDIAFORMAT_KEY_SAMPLE_RATE; # var
+ AMEDIAFORMAT_KEY_SLICE_HEIGHT; # var introduced=28
AMEDIAFORMAT_KEY_STRIDE; # var
+ AMEDIAFORMAT_KEY_TEMPORAL_LAYERING; # var introduced=28
+ AMEDIAFORMAT_KEY_TILE_HEIGHT; # var introduced=28
+ AMEDIAFORMAT_KEY_TILE_WIDTH; # var introduced=28
+ AMEDIAFORMAT_KEY_TRACK_ID; # var introduced=28
AMEDIAFORMAT_KEY_WIDTH; # var
+ AMediaCodecActionCode_isRecoverable; # introduced=28
+ AMediaCodecActionCode_isTransient; # introduced=28
AMediaCodecCryptoInfo_delete;
AMediaCodecCryptoInfo_getClearBytes;
AMediaCodecCryptoInfo_getEncryptedBytes;
@@ -68,12 +101,17 @@
AMediaCodec_dequeueOutputBuffer;
AMediaCodec_flush;
AMediaCodec_getInputBuffer;
+ AMediaCodec_getInputFormat; # introduced=28
+ AMediaCodec_getName; # introduced=28
AMediaCodec_getOutputBuffer;
AMediaCodec_getOutputFormat;
AMediaCodec_queueInputBuffer;
AMediaCodec_queueSecureInputBuffer;
+ AMediaCodec_releaseCrypto; # introduced=28
+ AMediaCodec_releaseName; # introduced=28
AMediaCodec_releaseOutputBuffer;
AMediaCodec_releaseOutputBufferAtTime;
+ AMediaCodec_setAsyncNotifyCallback; # introduced=28
AMediaCodec_setOutputSurface; # introduced=24
AMediaCodec_setParameters; # introduced=26
AMediaCodec_setInputSurface; # introduced=26
@@ -86,6 +124,12 @@
AMediaCrypto_isCryptoSchemeSupported;
AMediaCrypto_new;
AMediaCrypto_requiresSecureDecoderComponent;
+ AMediaDataSource_delete; # introduced=28
+ AMediaDataSource_new; # introduced=28
+ AMediaDataSource_setClose; # introduced=28
+ AMediaDataSource_setGetSize; # introduced=28
+ AMediaDataSource_setReadAt; # introduced=28
+ AMediaDataSource_setUserdata; # introduced=28
AMediaDrm_closeSession;
AMediaDrm_createByUUID;
AMediaDrm_decrypt;
@@ -111,9 +155,13 @@
AMediaDrm_verify;
AMediaExtractor_advance;
AMediaExtractor_delete;
+ AMediaExtractor_getCachedDuration; # introduced=28
+ AMediaExtractor_getFileFormat; # introduced=28
AMediaExtractor_getPsshInfo;
AMediaExtractor_getSampleCryptoInfo;
AMediaExtractor_getSampleFlags;
+ AMediaExtractor_getSampleFormat; # introduced=28
+ AMediaExtractor_getSampleSize; # introduced=28
AMediaExtractor_getSampleTime;
AMediaExtractor_getSampleTrackIndex;
AMediaExtractor_getTrackCount;
@@ -123,20 +171,26 @@
AMediaExtractor_seekTo;
AMediaExtractor_selectTrack;
AMediaExtractor_setDataSource;
+ AMediaExtractor_setDataSourceCustom; # introduced=28
AMediaExtractor_setDataSourceFd;
AMediaExtractor_unselectTrack;
AMediaFormat_delete;
AMediaFormat_getBuffer;
+ AMediaFormat_getDouble; # introduced=28
AMediaFormat_getFloat;
AMediaFormat_getInt32;
AMediaFormat_getInt64;
+ AMediaFormat_getRect; # introduced=28
AMediaFormat_getSize;
AMediaFormat_getString;
AMediaFormat_new;
AMediaFormat_setBuffer;
+ AMediaFormat_setDouble; # introduced=28
AMediaFormat_setFloat;
AMediaFormat_setInt32;
AMediaFormat_setInt64;
+ AMediaFormat_setRect; # introduced=28
+ AMediaFormat_setSize; # introduced=28
AMediaFormat_setString;
AMediaFormat_toString;
AMediaMuxer_addTrack;
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index 72917dd..d6dae5b 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-cc_library_shared {
+cc_library {
name: "libmediautils",
srcs: [
@@ -24,7 +24,6 @@
],
shared_libs: [
"libbinder",
- "libcutils",
"liblog",
"libutils",
"libmemunreachable",
diff --git a/media/utils/ISchedulingPolicyService.cpp b/media/utils/ISchedulingPolicyService.cpp
index 22fbc97..b210404 100644
--- a/media/utils/ISchedulingPolicyService.cpp
+++ b/media/utils/ISchedulingPolicyService.cpp
@@ -25,6 +25,7 @@
// Keep in sync with frameworks/base/core/java/android/os/ISchedulingPolicyService.aidl
enum {
REQUEST_PRIORITY_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
+ REQUEST_CPUSET_BOOST,
};
// ----------------------------------------------------------------------
@@ -60,6 +61,23 @@
}
return reply.readInt32();
}
+
+ virtual int requestCpusetBoost(bool enable, const sp<IInterface>& client)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(ISchedulingPolicyService::getInterfaceDescriptor());
+ data.writeInt32(enable);
+ data.writeStrongBinder(IInterface::asBinder(client));
+ status_t status = remote()->transact(REQUEST_CPUSET_BOOST, data, &reply, 0);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ // fail on exception: force binder reconnection
+ if (reply.readExceptionCode() != 0) {
+ return DEAD_OBJECT;
+ }
+ return reply.readInt32();
+ }
};
IMPLEMENT_META_INTERFACE(SchedulingPolicyService, "android.os.ISchedulingPolicyService");
@@ -71,6 +89,7 @@
{
switch (code) {
case REQUEST_PRIORITY_TRANSACTION:
+ case REQUEST_CPUSET_BOOST:
// Not reached
return NO_ERROR;
break;
diff --git a/media/utils/ISchedulingPolicyService.h b/media/utils/ISchedulingPolicyService.h
index 1015677..e4f7c0d 100644
--- a/media/utils/ISchedulingPolicyService.h
+++ b/media/utils/ISchedulingPolicyService.h
@@ -29,6 +29,7 @@
virtual int requestPriority(/*pid_t*/int32_t pid, /*pid_t*/int32_t tid,
int32_t prio, bool isForApp, bool asynchronous) = 0;
+ virtual int requestCpusetBoost(bool enable, const sp<IInterface>& client) = 0;
};
class BnSchedulingPolicyService : public BnInterface<ISchedulingPolicyService>
diff --git a/media/utils/OWNERS b/media/utils/OWNERS
new file mode 100644
index 0000000..f9cb567
--- /dev/null
+++ b/media/utils/OWNERS
@@ -0,0 +1 @@
+gkasten@google.com
diff --git a/media/utils/SchedulingPolicyService.cpp b/media/utils/SchedulingPolicyService.cpp
index d7055ef..4e9792f 100644
--- a/media/utils/SchedulingPolicyService.cpp
+++ b/media/utils/SchedulingPolicyService.cpp
@@ -59,4 +59,31 @@
return ret;
}
+int requestCpusetBoost(bool enable, const sp<IInterface> &client)
+{
+ int ret;
+ sMutex.lock();
+ sp<ISchedulingPolicyService> sps = sSchedulingPolicyService;
+ sMutex.unlock();
+ if (sps == 0) {
+ sp<IBinder> binder = defaultServiceManager()->checkService(_scheduling_policy);
+ if (binder == 0) {
+ return DEAD_OBJECT;
+ }
+ sps = interface_cast<ISchedulingPolicyService>(binder);
+ sMutex.lock();
+ sSchedulingPolicyService = sps;
+ sMutex.unlock();
+ }
+ ret = sps->requestCpusetBoost(enable, client);
+ if (ret != DEAD_OBJECT) {
+ return ret;
+ }
+ ALOGW("SchedulingPolicyService died");
+ sMutex.lock();
+ sSchedulingPolicyService.clear();
+ sMutex.unlock();
+ return ret;
+}
+
} // namespace android
diff --git a/media/utils/include/mediautils/SchedulingPolicyService.h b/media/utils/include/mediautils/SchedulingPolicyService.h
index 47d8734..a33539f 100644
--- a/media/utils/include/mediautils/SchedulingPolicyService.h
+++ b/media/utils/include/mediautils/SchedulingPolicyService.h
@@ -17,8 +17,11 @@
#ifndef _ANDROID_SCHEDULING_POLICY_SERVICE_H
#define _ANDROID_SCHEDULING_POLICY_SERVICE_H
+#include <utils/RefBase.h>
+
namespace android {
+class IInterface;
// Request elevated priority for thread tid, whose thread group leader must be pid.
// The priority parameter is currently restricted to either 1 or 2.
// The asynchronous parameter should be 'true' to return immediately,
@@ -26,6 +29,14 @@
// The default value 'false' means to return after request has been enqueued and executed.
int requestPriority(pid_t pid, pid_t tid, int32_t prio, bool isForApp, bool asynchronous = false);
+// Request to move media.codec process between SP_FOREGROUND and SP_TOP_APP.
+// When 'enable' is 'true', server will attempt to move media.codec process
+// from SP_FOREGROUND into SP_TOP_APP cpuset. A valid 'client' must be provided
+// for the server to receive death notifications. When 'enable' is 'false', server
+// will attempt to move media.codec process back to the original cpuset, and
+// 'client' is ignored in this case.
+int requestCpusetBoost(bool enable, const sp<IInterface> &client);
+
} // namespace android
#endif // _ANDROID_SCHEDULING_POLICY_SERVICE_H
diff --git a/packages/MediaComponents/Android.mk b/packages/MediaComponents/Android.mk
new file mode 100644
index 0000000..def9dc5
--- /dev/null
+++ b/packages/MediaComponents/Android.mk
@@ -0,0 +1,72 @@
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This package is excluded from build for now since APIs using this apk became hidden.
+#
+#LOCAL_PATH := $(call my-dir)
+#ifneq ($(TARGET_BUILD_PDK),true) # Build MediaComponents only if this is not a PDK build. MediaComponents won't
+## build in PDK builds because frameworks/base/core/java is not available but
+## IMediaSession2.aidl and IMediaController2.aidl are using classes from
+## frameworks/base/core/java.
+#
+#include $(CLEAR_VARS)
+#
+#LOCAL_PACKAGE_NAME := MediaComponents
+#LOCAL_MODULE_OWNER := google
+#
+## TODO: create a separate key for this package.
+#LOCAL_CERTIFICATE := platform
+#
+## TODO: Use System SDK once public APIs are approved
+## LOCAL_SDK_VERSION := system_current
+#LOCAL_PRIVATE_PLATFORM_APIS := true
+#
+#LOCAL_SRC_FILES := \
+# $(call all-java-files-under, src) \
+# $(call all-Iaidl-files-under, src)
+#
+#LOCAL_PROGUARD_FLAG_FILES := proguard.cfg
+#
+#LOCAL_MULTILIB := first
+#
+#LOCAL_JAVA_LIBRARIES += android-support-annotations
+#
+## To embed native libraries in package, uncomment the lines below.
+##LOCAL_MODULE_TAGS := samples
+##LOCAL_JNI_SHARED_LIBRARIES := \
+## libaacextractor \
+## libamrextractor \
+## libflacextractor \
+## libmidiextractor \
+## libmkvextractor \
+## libmp3extractor \
+## libmp4extractor \
+## libmpeg2extractor \
+## liboggextractor \
+## libwavextractor \
+#
+## TODO: Remove dependency with other support libraries.
+#LOCAL_STATIC_ANDROID_LIBRARIES += \
+# android-support-v4 \
+# android-support-v7-appcompat \
+# android-support-v7-palette
+#LOCAL_USE_AAPT2 := true
+#
+#include $(BUILD_PACKAGE)
+#
+#endif # ifneq ($(TARGET_BUILD_PDK),true)
+#
+#include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/packages/MediaComponents/AndroidManifest.xml b/packages/MediaComponents/AndroidManifest.xml
new file mode 100644
index 0000000..50fdca1
--- /dev/null
+++ b/packages/MediaComponents/AndroidManifest.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="utf-8"?>
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.android.media.update"
+ android:versionCode="1"
+ android:versionName="1.0" >
+
+ <application
+ android:label="Media Components Update"
+ android:multiArch="true"
+ android:allowBackup="false"
+ android:hasCode="false"
+ android:extractNativeLibs="false">
+ </application>
+
+</manifest>
diff --git a/packages/MediaComponents/proguard.cfg b/packages/MediaComponents/proguard.cfg
new file mode 100644
index 0000000..d7bf730
--- /dev/null
+++ b/packages/MediaComponents/proguard.cfg
@@ -0,0 +1,20 @@
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Keep entry point for updatable Java classes
+-keep public class com.android.media.update.ApiFactory {
+ public static com.android.media.update.ApiFactory initialize(android.content.pm.ApplicationInfo);
+}
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_audiotrack_dark.png b/packages/MediaComponents/res/drawable-hdpi/ic_audiotrack_dark.png
new file mode 100644
index 0000000..17fd51f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_audiotrack_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_audiotrack_light.png b/packages/MediaComponents/res/drawable-hdpi/ic_audiotrack_light.png
new file mode 100644
index 0000000..d7c8252
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_audiotrack_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_dialog_close_dark.png b/packages/MediaComponents/res/drawable-hdpi/ic_dialog_close_dark.png
new file mode 100644
index 0000000..928ddea
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_dialog_close_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_dialog_close_light.png b/packages/MediaComponents/res/drawable-hdpi/ic_dialog_close_light.png
new file mode 100644
index 0000000..1a9cd75
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_dialog_close_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_media_pause_dark.png b/packages/MediaComponents/res/drawable-hdpi/ic_media_pause_dark.png
new file mode 100644
index 0000000..7192ad4
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_media_pause_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_media_pause_light.png b/packages/MediaComponents/res/drawable-hdpi/ic_media_pause_light.png
new file mode 100644
index 0000000..bb707ea
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_media_pause_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_media_play_dark.png b/packages/MediaComponents/res/drawable-hdpi/ic_media_play_dark.png
new file mode 100644
index 0000000..0c32d00
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_media_play_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_media_play_light.png b/packages/MediaComponents/res/drawable-hdpi/ic_media_play_light.png
new file mode 100644
index 0000000..5345ee3
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_media_play_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_media_stop_dark.png b/packages/MediaComponents/res/drawable-hdpi/ic_media_stop_dark.png
new file mode 100644
index 0000000..801d341
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_media_stop_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_media_stop_light.png b/packages/MediaComponents/res/drawable-hdpi/ic_media_stop_light.png
new file mode 100644
index 0000000..9d6b65d
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_media_stop_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_media_subtitle_disabled.png b/packages/MediaComponents/res/drawable-hdpi/ic_media_subtitle_disabled.png
new file mode 100644
index 0000000..0354f61
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_media_subtitle_disabled.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_media_subtitle_enabled.png b/packages/MediaComponents/res/drawable-hdpi/ic_media_subtitle_enabled.png
new file mode 100644
index 0000000..5f8febe
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_media_subtitle_enabled.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_mr_button_disabled_dark.png b/packages/MediaComponents/res/drawable-hdpi/ic_mr_button_disabled_dark.png
new file mode 100644
index 0000000..8ad305d
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_mr_button_disabled_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_mr_button_disabled_light.png b/packages/MediaComponents/res/drawable-hdpi/ic_mr_button_disabled_light.png
new file mode 100644
index 0000000..887fde4
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_mr_button_disabled_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_mr_button_disconnected_dark.png b/packages/MediaComponents/res/drawable-hdpi/ic_mr_button_disconnected_dark.png
new file mode 100644
index 0000000..5739df7
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_mr_button_disconnected_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_mr_button_disconnected_light.png b/packages/MediaComponents/res/drawable-hdpi/ic_mr_button_disconnected_light.png
new file mode 100644
index 0000000..58c344a
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_mr_button_disconnected_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_mr_button_grey.png b/packages/MediaComponents/res/drawable-hdpi/ic_mr_button_grey.png
new file mode 100644
index 0000000..1a03420
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_mr_button_grey.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_speaker_dark.png b/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_speaker_dark.png
new file mode 100755
index 0000000..723e455
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_speaker_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_speaker_group_dark.png b/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_speaker_group_dark.png
new file mode 100755
index 0000000..40c25a3
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_speaker_group_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_speaker_group_light.png b/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_speaker_group_light.png
new file mode 100755
index 0000000..afdb9c1
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_speaker_group_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_speaker_light.png b/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_speaker_light.png
new file mode 100755
index 0000000..846c109
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_speaker_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_tv_dark.png b/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_tv_dark.png
new file mode 100755
index 0000000..33bf484
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_tv_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_tv_light.png b/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_tv_light.png
new file mode 100755
index 0000000..c911b5c
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-hdpi/ic_vol_type_tv_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_audiotrack_dark.png b/packages/MediaComponents/res/drawable-mdpi/ic_audiotrack_dark.png
new file mode 100644
index 0000000..e94ed50
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_audiotrack_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_audiotrack_light.png b/packages/MediaComponents/res/drawable-mdpi/ic_audiotrack_light.png
new file mode 100644
index 0000000..2cf7e0c
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_audiotrack_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_dialog_close_dark.png b/packages/MediaComponents/res/drawable-mdpi/ic_dialog_close_dark.png
new file mode 100644
index 0000000..66558a8
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_dialog_close_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_dialog_close_light.png b/packages/MediaComponents/res/drawable-mdpi/ic_dialog_close_light.png
new file mode 100644
index 0000000..40a1a84
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_dialog_close_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_media_pause_dark.png b/packages/MediaComponents/res/drawable-mdpi/ic_media_pause_dark.png
new file mode 100644
index 0000000..f49aed7
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_media_pause_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_media_pause_light.png b/packages/MediaComponents/res/drawable-mdpi/ic_media_pause_light.png
new file mode 100644
index 0000000..74068ea
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_media_pause_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_media_play_dark.png b/packages/MediaComponents/res/drawable-mdpi/ic_media_play_dark.png
new file mode 100644
index 0000000..9cc777c
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_media_play_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_media_play_light.png b/packages/MediaComponents/res/drawable-mdpi/ic_media_play_light.png
new file mode 100644
index 0000000..f208795
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_media_play_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_media_stop_dark.png b/packages/MediaComponents/res/drawable-mdpi/ic_media_stop_dark.png
new file mode 100644
index 0000000..3ad2c9c
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_media_stop_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_media_stop_light.png b/packages/MediaComponents/res/drawable-mdpi/ic_media_stop_light.png
new file mode 100644
index 0000000..b002ab7
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_media_stop_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_media_subtitle_disabled.png b/packages/MediaComponents/res/drawable-mdpi/ic_media_subtitle_disabled.png
new file mode 100644
index 0000000..0354f61
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_media_subtitle_disabled.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_media_subtitle_enabled.png b/packages/MediaComponents/res/drawable-mdpi/ic_media_subtitle_enabled.png
new file mode 100644
index 0000000..5f8febe
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_media_subtitle_enabled.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_mr_button_disabled_dark.png b/packages/MediaComponents/res/drawable-mdpi/ic_mr_button_disabled_dark.png
new file mode 100644
index 0000000..4446ea4
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_mr_button_disabled_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_mr_button_disabled_light.png b/packages/MediaComponents/res/drawable-mdpi/ic_mr_button_disabled_light.png
new file mode 100644
index 0000000..4d790c6
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_mr_button_disabled_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_mr_button_disconnected_dark.png b/packages/MediaComponents/res/drawable-mdpi/ic_mr_button_disconnected_dark.png
new file mode 100644
index 0000000..c401dc0
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_mr_button_disconnected_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_mr_button_disconnected_light.png b/packages/MediaComponents/res/drawable-mdpi/ic_mr_button_disconnected_light.png
new file mode 100644
index 0000000..e24d586
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_mr_button_disconnected_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_mr_button_grey.png b/packages/MediaComponents/res/drawable-mdpi/ic_mr_button_grey.png
new file mode 100644
index 0000000..ccbb772
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_mr_button_grey.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_speaker_dark.png b/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_speaker_dark.png
new file mode 100755
index 0000000..7cc9845
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_speaker_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_speaker_group_dark.png b/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_speaker_group_dark.png
new file mode 100755
index 0000000..22617e1
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_speaker_group_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_speaker_group_light.png b/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_speaker_group_light.png
new file mode 100755
index 0000000..cefef3c
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_speaker_group_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_speaker_light.png b/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_speaker_light.png
new file mode 100755
index 0000000..9a0047c
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_speaker_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_tv_dark.png b/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_tv_dark.png
new file mode 100755
index 0000000..ca5d6a2
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_tv_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_tv_light.png b/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_tv_light.png
new file mode 100755
index 0000000..8134310
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-mdpi/ic_vol_type_tv_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_audiotrack_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_audiotrack_dark.png
new file mode 100644
index 0000000..b5c899f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_audiotrack_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_audiotrack_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_audiotrack_light.png
new file mode 100644
index 0000000..4778e00
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_audiotrack_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_dialog_close_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_dialog_close_dark.png
new file mode 100644
index 0000000..f992fc5
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_dialog_close_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_dialog_close_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_dialog_close_light.png
new file mode 100644
index 0000000..d3884e6
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_dialog_close_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_media_pause_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_media_pause_dark.png
new file mode 100644
index 0000000..660ac65
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_media_pause_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_media_pause_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_media_pause_light.png
new file mode 100644
index 0000000..792104f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_media_pause_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_media_play_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_media_play_dark.png
new file mode 100644
index 0000000..be5c062
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_media_play_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_media_play_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_media_play_light.png
new file mode 100644
index 0000000..d12d495
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_media_play_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_media_stop_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_media_stop_dark.png
new file mode 100644
index 0000000..5239336
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_media_stop_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_media_stop_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_media_stop_light.png
new file mode 100644
index 0000000..5bc5a6c
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_media_stop_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_00_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_00_dark.png
new file mode 100644
index 0000000..f6dd214
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_00_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_00_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_00_light.png
new file mode 100644
index 0000000..6b7bdcd
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_00_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_01_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_01_dark.png
new file mode 100644
index 0000000..c7fe576
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_01_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_01_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_01_light.png
new file mode 100644
index 0000000..0a5d6aa
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_01_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_02_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_02_dark.png
new file mode 100644
index 0000000..0aadfa3
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_02_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_02_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_02_light.png
new file mode 100644
index 0000000..125fe0b
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_02_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_03_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_03_dark.png
new file mode 100644
index 0000000..05c48a7
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_03_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_03_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_03_light.png
new file mode 100644
index 0000000..741e911
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_03_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_04_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_04_dark.png
new file mode 100644
index 0000000..ae4218a
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_04_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_04_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_04_light.png
new file mode 100644
index 0000000..8b30fab
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_04_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_05_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_05_dark.png
new file mode 100644
index 0000000..d7aa903
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_05_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_05_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_05_light.png
new file mode 100644
index 0000000..f7e2f29
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_05_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_06_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_06_dark.png
new file mode 100644
index 0000000..e7871e2
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_06_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_06_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_06_light.png
new file mode 100644
index 0000000..8c57f63
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_06_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_07_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_07_dark.png
new file mode 100644
index 0000000..0041b01
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_07_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_07_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_07_light.png
new file mode 100644
index 0000000..6dbb694
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_07_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_08_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_08_dark.png
new file mode 100644
index 0000000..08e1013
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_08_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_08_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_08_light.png
new file mode 100644
index 0000000..5c352c3
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_08_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_09_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_09_dark.png
new file mode 100644
index 0000000..70532e9
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_09_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_09_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_09_light.png
new file mode 100644
index 0000000..9c6ba30
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_09_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_10_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_10_dark.png
new file mode 100644
index 0000000..9ba3b5f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_10_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_10_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_10_light.png
new file mode 100644
index 0000000..bd4bb22
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_10_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_11_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_11_dark.png
new file mode 100644
index 0000000..2156127
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_11_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_11_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_11_light.png
new file mode 100644
index 0000000..b417a9f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_11_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_12_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_12_dark.png
new file mode 100644
index 0000000..9bf633e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_12_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_12_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_12_light.png
new file mode 100644
index 0000000..ba51811
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_12_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_13_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_13_dark.png
new file mode 100644
index 0000000..756a53c
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_13_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_13_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_13_light.png
new file mode 100644
index 0000000..4705dca
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_13_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_14_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_14_dark.png
new file mode 100644
index 0000000..50e4ea3
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_14_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_14_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_14_light.png
new file mode 100644
index 0000000..bc6724f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_14_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_15_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_15_dark.png
new file mode 100644
index 0000000..9e3b410
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_15_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_15_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_15_light.png
new file mode 100644
index 0000000..2f18abd
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_15_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_16_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_16_dark.png
new file mode 100644
index 0000000..de81133
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_16_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_16_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_16_light.png
new file mode 100644
index 0000000..b80b191
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_16_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_17_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_17_dark.png
new file mode 100644
index 0000000..48aba3d
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_17_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_17_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_17_light.png
new file mode 100644
index 0000000..ca34d5b
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_17_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_18_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_18_dark.png
new file mode 100644
index 0000000..e9957b3
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_18_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_18_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_18_light.png
new file mode 100644
index 0000000..a5d384f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_18_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_19_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_19_dark.png
new file mode 100644
index 0000000..ddc6297
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_19_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_19_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_19_light.png
new file mode 100644
index 0000000..28ab684
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_19_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_20_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_20_dark.png
new file mode 100644
index 0000000..51e7f75
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_20_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_20_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_20_light.png
new file mode 100644
index 0000000..4aa3ca3
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_20_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_21_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_21_dark.png
new file mode 100644
index 0000000..9caecde
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_21_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_21_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_21_light.png
new file mode 100644
index 0000000..1b8d0b6
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_21_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_22_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_22_dark.png
new file mode 100644
index 0000000..400be3c
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_22_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_22_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_22_light.png
new file mode 100644
index 0000000..c14f1bf
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_22_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_23_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_23_dark.png
new file mode 100644
index 0000000..4e18b46
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_23_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_23_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_23_light.png
new file mode 100644
index 0000000..c4c2c00
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_23_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_24_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_24_dark.png
new file mode 100644
index 0000000..98fae44
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_24_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_24_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_24_light.png
new file mode 100644
index 0000000..d64c289
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_24_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_25_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_25_dark.png
new file mode 100644
index 0000000..91f9327
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_25_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_25_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_25_light.png
new file mode 100644
index 0000000..f5e1f69
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_25_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_26_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_26_dark.png
new file mode 100644
index 0000000..3e6fafd
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_26_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_26_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_26_light.png
new file mode 100644
index 0000000..ae2bd87
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_26_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_27_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_27_dark.png
new file mode 100644
index 0000000..f73a1f8
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_27_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_27_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_27_light.png
new file mode 100644
index 0000000..78c1069
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_27_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_28_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_28_dark.png
new file mode 100644
index 0000000..562b803
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_28_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_28_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_28_light.png
new file mode 100644
index 0000000..ddfba02
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_28_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_29_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_29_dark.png
new file mode 100644
index 0000000..257f2d2
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_29_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_29_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_29_light.png
new file mode 100644
index 0000000..38f5478
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_29_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_30_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_30_dark.png
new file mode 100644
index 0000000..f995af0
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_30_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_30_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_30_light.png
new file mode 100644
index 0000000..c50b7f0
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connected_30_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_00_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_00_dark.png
new file mode 100644
index 0000000..f6dd214
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_00_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_00_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_00_light.png
new file mode 100644
index 0000000..6b7bdcd
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_00_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_01_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_01_dark.png
new file mode 100644
index 0000000..c7fe576
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_01_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_01_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_01_light.png
new file mode 100644
index 0000000..0a5d6aa
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_01_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_02_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_02_dark.png
new file mode 100644
index 0000000..0aadfa3
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_02_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_02_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_02_light.png
new file mode 100644
index 0000000..125fe0b
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_02_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_03_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_03_dark.png
new file mode 100644
index 0000000..05c48a7
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_03_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_03_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_03_light.png
new file mode 100644
index 0000000..741e911
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_03_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_04_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_04_dark.png
new file mode 100644
index 0000000..ae4218a
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_04_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_04_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_04_light.png
new file mode 100644
index 0000000..8b30fab
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_04_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_05_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_05_dark.png
new file mode 100644
index 0000000..d7aa903
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_05_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_05_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_05_light.png
new file mode 100644
index 0000000..f7e2f29
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_05_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_06_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_06_dark.png
new file mode 100644
index 0000000..e7871e2
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_06_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_06_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_06_light.png
new file mode 100644
index 0000000..8c57f63
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_06_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_07_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_07_dark.png
new file mode 100644
index 0000000..0041b01
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_07_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_07_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_07_light.png
new file mode 100644
index 0000000..6dbb694
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_07_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_08_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_08_dark.png
new file mode 100644
index 0000000..08e1013
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_08_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_08_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_08_light.png
new file mode 100644
index 0000000..5c352c3
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_08_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_09_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_09_dark.png
new file mode 100644
index 0000000..70532e9
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_09_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_09_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_09_light.png
new file mode 100644
index 0000000..9c6ba30
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_09_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_10_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_10_dark.png
new file mode 100644
index 0000000..9ba3b5f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_10_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_10_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_10_light.png
new file mode 100644
index 0000000..bd4bb22
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_10_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_11_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_11_dark.png
new file mode 100644
index 0000000..f3570f4
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_11_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_11_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_11_light.png
new file mode 100644
index 0000000..65a403e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_11_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_12_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_12_dark.png
new file mode 100644
index 0000000..f644bfd
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_12_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_12_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_12_light.png
new file mode 100644
index 0000000..c7d6048
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_12_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_13_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_13_dark.png
new file mode 100644
index 0000000..6e0d558
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_13_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_13_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_13_light.png
new file mode 100644
index 0000000..f3bc48d
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_13_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_14_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_14_dark.png
new file mode 100644
index 0000000..14d8f8e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_14_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_14_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_14_light.png
new file mode 100644
index 0000000..98b90e5
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_14_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_15_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_15_dark.png
new file mode 100644
index 0000000..83234a7
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_15_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_15_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_15_light.png
new file mode 100644
index 0000000..47d452f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_15_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_16_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_16_dark.png
new file mode 100644
index 0000000..b81cf5a
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_16_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_16_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_16_light.png
new file mode 100644
index 0000000..20d08b4
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_16_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_17_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_17_dark.png
new file mode 100644
index 0000000..6feb3f1
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_17_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_17_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_17_light.png
new file mode 100644
index 0000000..e6ae8b3
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_17_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_18_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_18_dark.png
new file mode 100644
index 0000000..0b0fc08
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_18_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_18_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_18_light.png
new file mode 100644
index 0000000..c2a16ac
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_18_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_19_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_19_dark.png
new file mode 100644
index 0000000..a3598cc
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_19_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_19_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_19_light.png
new file mode 100644
index 0000000..846d16d
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_19_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_20_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_20_dark.png
new file mode 100644
index 0000000..2070455
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_20_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_20_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_20_light.png
new file mode 100644
index 0000000..ae6db13
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_20_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_21_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_21_dark.png
new file mode 100644
index 0000000..7f3828a
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_21_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_21_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_21_light.png
new file mode 100644
index 0000000..aaccc73
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_21_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_22_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_22_dark.png
new file mode 100644
index 0000000..5c8ced9
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_22_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_22_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_22_light.png
new file mode 100644
index 0000000..ad01b9e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_22_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_23_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_23_dark.png
new file mode 100644
index 0000000..ce31dd3
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_23_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_23_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_23_light.png
new file mode 100644
index 0000000..9ef78e4
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_23_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_24_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_24_dark.png
new file mode 100644
index 0000000..a7c2cdb
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_24_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_24_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_24_light.png
new file mode 100644
index 0000000..e7c5bea
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_24_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_25_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_25_dark.png
new file mode 100644
index 0000000..ecad0d4
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_25_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_25_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_25_light.png
new file mode 100644
index 0000000..5fa5923
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_25_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_26_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_26_dark.png
new file mode 100644
index 0000000..f687e25
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_26_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_26_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_26_light.png
new file mode 100644
index 0000000..9c06db8
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_26_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_27_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_27_dark.png
new file mode 100644
index 0000000..90225ba
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_27_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_27_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_27_light.png
new file mode 100644
index 0000000..19697de
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_27_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_28_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_28_dark.png
new file mode 100644
index 0000000..d37ec21
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_28_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_28_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_28_light.png
new file mode 100644
index 0000000..21840bf
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_28_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_29_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_29_dark.png
new file mode 100644
index 0000000..5445e3a
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_29_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_29_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_29_light.png
new file mode 100644
index 0000000..2337c65
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_29_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_30_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_30_dark.png
new file mode 100644
index 0000000..f6dd214
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_30_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_30_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_30_light.png
new file mode 100644
index 0000000..6b7bdcd
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_connecting_30_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_disabled_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_disabled_dark.png
new file mode 100644
index 0000000..c4dc132
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_disabled_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_disabled_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_disabled_light.png
new file mode 100644
index 0000000..b14617c
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_disabled_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_disconnected_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_disconnected_dark.png
new file mode 100644
index 0000000..bb30773
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_disconnected_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_disconnected_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_disconnected_light.png
new file mode 100644
index 0000000..a05d7d7
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_disconnected_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_grey.png b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_grey.png
new file mode 100644
index 0000000..2238d58
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_mr_button_grey.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_speaker_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_speaker_dark.png
new file mode 100755
index 0000000..e40349d
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_speaker_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_speaker_group_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_speaker_group_dark.png
new file mode 100755
index 0000000..f67c463
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_speaker_group_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_speaker_group_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_speaker_group_light.png
new file mode 100755
index 0000000..7fcebf5
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_speaker_group_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_speaker_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_speaker_light.png
new file mode 100755
index 0000000..ea32a7a
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_speaker_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_tv_dark.png b/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_tv_dark.png
new file mode 100755
index 0000000..d62ca37
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_tv_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_tv_light.png b/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_tv_light.png
new file mode 100755
index 0000000..3131256
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xhdpi/ic_vol_type_tv_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_audiotrack_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_audiotrack_dark.png
new file mode 100644
index 0000000..f131e1b
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_audiotrack_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_audiotrack_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_audiotrack_light.png
new file mode 100644
index 0000000..e5946a2
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_audiotrack_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_dialog_close_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_dialog_close_dark.png
new file mode 100644
index 0000000..b85e87f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_dialog_close_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_dialog_close_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_dialog_close_light.png
new file mode 100644
index 0000000..51b4401
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_dialog_close_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_media_pause_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_media_pause_dark.png
new file mode 100644
index 0000000..3ea7e03
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_media_pause_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_media_pause_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_media_pause_light.png
new file mode 100644
index 0000000..dc63538
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_media_pause_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_media_play_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_media_play_dark.png
new file mode 100644
index 0000000..2745c3a
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_media_play_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_media_play_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_media_play_light.png
new file mode 100644
index 0000000..eda3ba5
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_media_play_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_media_stop_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_media_stop_dark.png
new file mode 100644
index 0000000..035ca18
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_media_stop_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_media_stop_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_media_stop_light.png
new file mode 100644
index 0000000..eac183d
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_media_stop_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_00_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_00_dark.png
new file mode 100644
index 0000000..0db679e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_00_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_00_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_00_light.png
new file mode 100644
index 0000000..51c6051
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_00_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_01_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_01_dark.png
new file mode 100644
index 0000000..c083914
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_01_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_01_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_01_light.png
new file mode 100644
index 0000000..c3c3caf
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_01_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_02_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_02_dark.png
new file mode 100644
index 0000000..fc444cf
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_02_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_02_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_02_light.png
new file mode 100644
index 0000000..abd6377
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_02_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_03_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_03_dark.png
new file mode 100644
index 0000000..6dbd1da
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_03_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_03_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_03_light.png
new file mode 100644
index 0000000..d2e7108
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_03_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_04_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_04_dark.png
new file mode 100644
index 0000000..d9f596b
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_04_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_04_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_04_light.png
new file mode 100644
index 0000000..4f32e1a
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_04_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_05_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_05_dark.png
new file mode 100644
index 0000000..c568e04
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_05_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_05_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_05_light.png
new file mode 100644
index 0000000..ed20dd9
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_05_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_06_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_06_dark.png
new file mode 100644
index 0000000..bbe39e7
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_06_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_06_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_06_light.png
new file mode 100644
index 0000000..1edc15f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_06_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_07_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_07_dark.png
new file mode 100644
index 0000000..78aebaf
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_07_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_07_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_07_light.png
new file mode 100644
index 0000000..b5a6a4f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_07_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_08_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_08_dark.png
new file mode 100644
index 0000000..44b91ce
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_08_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_08_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_08_light.png
new file mode 100644
index 0000000..85f66f9
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_08_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_09_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_09_dark.png
new file mode 100644
index 0000000..51ea34b
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_09_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_09_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_09_light.png
new file mode 100644
index 0000000..952de04
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_09_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_10_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_10_dark.png
new file mode 100644
index 0000000..8b1aa21
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_10_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_10_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_10_light.png
new file mode 100644
index 0000000..534bcc0
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_10_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_11_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_11_dark.png
new file mode 100644
index 0000000..f666b35
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_11_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_11_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_11_light.png
new file mode 100644
index 0000000..145a8fb
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_11_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_12_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_12_dark.png
new file mode 100644
index 0000000..edeb132
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_12_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_12_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_12_light.png
new file mode 100644
index 0000000..9da2b60
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_12_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_13_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_13_dark.png
new file mode 100644
index 0000000..ab80aa9
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_13_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_13_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_13_light.png
new file mode 100644
index 0000000..115efe4
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_13_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_14_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_14_dark.png
new file mode 100644
index 0000000..8c0cc31
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_14_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_14_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_14_light.png
new file mode 100644
index 0000000..e6ae6fc
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_14_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_15_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_15_dark.png
new file mode 100644
index 0000000..b8816c9
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_15_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_15_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_15_light.png
new file mode 100644
index 0000000..bd42931
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_15_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_16_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_16_dark.png
new file mode 100644
index 0000000..10d5b7f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_16_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_16_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_16_light.png
new file mode 100644
index 0000000..303a0fe
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_16_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_17_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_17_dark.png
new file mode 100644
index 0000000..3c2a655
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_17_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_17_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_17_light.png
new file mode 100644
index 0000000..90debc2
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_17_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_18_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_18_dark.png
new file mode 100644
index 0000000..d3e78a7
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_18_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_18_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_18_light.png
new file mode 100644
index 0000000..3a3f991
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_18_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_19_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_19_dark.png
new file mode 100644
index 0000000..63fad9e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_19_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_19_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_19_light.png
new file mode 100644
index 0000000..d6dd8d4
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_19_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_20_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_20_dark.png
new file mode 100644
index 0000000..890fd5f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_20_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_20_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_20_light.png
new file mode 100644
index 0000000..6b0b5c1
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_20_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_21_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_21_dark.png
new file mode 100644
index 0000000..9ce1ef1
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_21_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_21_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_21_light.png
new file mode 100644
index 0000000..81710d4
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_21_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_22_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_22_dark.png
new file mode 100644
index 0000000..861c080
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_22_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_22_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_22_light.png
new file mode 100644
index 0000000..1c4aa21
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_22_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_23_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_23_dark.png
new file mode 100644
index 0000000..59a6b30
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_23_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_23_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_23_light.png
new file mode 100644
index 0000000..c6e8fe0
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_23_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_24_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_24_dark.png
new file mode 100644
index 0000000..57b840e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_24_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_24_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_24_light.png
new file mode 100644
index 0000000..bf24050
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_24_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_25_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_25_dark.png
new file mode 100644
index 0000000..01c18c1
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_25_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_25_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_25_light.png
new file mode 100644
index 0000000..be9753e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_25_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_26_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_26_dark.png
new file mode 100644
index 0000000..3f291b1
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_26_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_26_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_26_light.png
new file mode 100644
index 0000000..dc1c619
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_26_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_27_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_27_dark.png
new file mode 100644
index 0000000..6504a70
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_27_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_27_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_27_light.png
new file mode 100644
index 0000000..a7e0a60
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_27_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_28_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_28_dark.png
new file mode 100644
index 0000000..57b1f3e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_28_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_28_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_28_light.png
new file mode 100644
index 0000000..5c551ec
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_28_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_29_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_29_dark.png
new file mode 100644
index 0000000..238667e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_29_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_29_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_29_light.png
new file mode 100644
index 0000000..ffb8183
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_29_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_30_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_30_dark.png
new file mode 100644
index 0000000..4893f18
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_30_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_30_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_30_light.png
new file mode 100644
index 0000000..ac5e156
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connected_30_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_00_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_00_dark.png
new file mode 100644
index 0000000..0db679e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_00_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_00_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_00_light.png
new file mode 100644
index 0000000..51c6051
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_00_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_01_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_01_dark.png
new file mode 100644
index 0000000..c083914
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_01_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_01_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_01_light.png
new file mode 100644
index 0000000..c3c3caf
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_01_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_02_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_02_dark.png
new file mode 100644
index 0000000..fc444cf
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_02_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_02_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_02_light.png
new file mode 100644
index 0000000..abd6377
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_02_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_03_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_03_dark.png
new file mode 100644
index 0000000..6dbd1da
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_03_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_03_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_03_light.png
new file mode 100644
index 0000000..d2e7108
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_03_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_04_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_04_dark.png
new file mode 100644
index 0000000..d9f596b
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_04_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_04_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_04_light.png
new file mode 100644
index 0000000..4f32e1a
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_04_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_05_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_05_dark.png
new file mode 100644
index 0000000..c568e04
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_05_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_05_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_05_light.png
new file mode 100644
index 0000000..ed20dd9
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_05_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_06_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_06_dark.png
new file mode 100644
index 0000000..bbe39e7
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_06_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_06_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_06_light.png
new file mode 100644
index 0000000..1edc15f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_06_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_07_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_07_dark.png
new file mode 100644
index 0000000..78aebaf
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_07_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_07_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_07_light.png
new file mode 100644
index 0000000..b5a6a4f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_07_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_08_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_08_dark.png
new file mode 100644
index 0000000..44b91ce
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_08_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_08_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_08_light.png
new file mode 100644
index 0000000..85f66f9
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_08_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_09_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_09_dark.png
new file mode 100644
index 0000000..51ea34b
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_09_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_09_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_09_light.png
new file mode 100644
index 0000000..952de04
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_09_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_10_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_10_dark.png
new file mode 100644
index 0000000..8b1aa21
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_10_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_10_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_10_light.png
new file mode 100644
index 0000000..534bcc0
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_10_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_11_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_11_dark.png
new file mode 100644
index 0000000..1fffa01
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_11_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_11_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_11_light.png
new file mode 100644
index 0000000..0ff7e57
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_11_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_12_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_12_dark.png
new file mode 100644
index 0000000..06ac4dc
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_12_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_12_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_12_light.png
new file mode 100644
index 0000000..42a86f5
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_12_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_13_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_13_dark.png
new file mode 100644
index 0000000..0301090
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_13_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_13_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_13_light.png
new file mode 100644
index 0000000..4396f0e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_13_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_14_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_14_dark.png
new file mode 100644
index 0000000..e19001b
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_14_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_14_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_14_light.png
new file mode 100644
index 0000000..2271581
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_14_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_15_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_15_dark.png
new file mode 100644
index 0000000..5e96208
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_15_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_15_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_15_light.png
new file mode 100644
index 0000000..0f69500
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_15_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_16_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_16_dark.png
new file mode 100644
index 0000000..07e1bd6
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_16_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_16_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_16_light.png
new file mode 100644
index 0000000..cde8f19
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_16_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_17_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_17_dark.png
new file mode 100644
index 0000000..b632e95
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_17_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_17_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_17_light.png
new file mode 100644
index 0000000..11d5d2e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_17_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_18_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_18_dark.png
new file mode 100644
index 0000000..660d527
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_18_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_18_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_18_light.png
new file mode 100644
index 0000000..2761ae1
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_18_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_19_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_19_dark.png
new file mode 100644
index 0000000..0aa3f84
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_19_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_19_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_19_light.png
new file mode 100644
index 0000000..27d166f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_19_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_20_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_20_dark.png
new file mode 100644
index 0000000..ebe527e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_20_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_20_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_20_light.png
new file mode 100644
index 0000000..aeb2a8e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_20_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_21_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_21_dark.png
new file mode 100644
index 0000000..7337af5
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_21_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_21_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_21_light.png
new file mode 100644
index 0000000..f3f31ef
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_21_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_22_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_22_dark.png
new file mode 100644
index 0000000..20d9f57
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_22_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_22_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_22_light.png
new file mode 100644
index 0000000..bf8eb77
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_22_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_23_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_23_dark.png
new file mode 100644
index 0000000..56a0e14
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_23_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_23_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_23_light.png
new file mode 100644
index 0000000..67425e1
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_23_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_24_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_24_dark.png
new file mode 100644
index 0000000..7c76e19
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_24_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_24_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_24_light.png
new file mode 100644
index 0000000..e02f1ed
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_24_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_25_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_25_dark.png
new file mode 100644
index 0000000..f5fdcdd
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_25_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_25_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_25_light.png
new file mode 100644
index 0000000..8ce9b819
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_25_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_26_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_26_dark.png
new file mode 100644
index 0000000..a29e443
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_26_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_26_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_26_light.png
new file mode 100644
index 0000000..349ca89
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_26_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_27_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_27_dark.png
new file mode 100644
index 0000000..0fc75d5
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_27_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_27_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_27_light.png
new file mode 100644
index 0000000..5cbd27c
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_27_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_28_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_28_dark.png
new file mode 100644
index 0000000..0ebb0ac
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_28_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_28_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_28_light.png
new file mode 100644
index 0000000..5b514aa
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_28_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_29_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_29_dark.png
new file mode 100644
index 0000000..8e7fe5c
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_29_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_29_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_29_light.png
new file mode 100644
index 0000000..efb2c10
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_29_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_30_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_30_dark.png
new file mode 100644
index 0000000..0db679e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_30_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_30_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_30_light.png
new file mode 100644
index 0000000..51c6051
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_connecting_30_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_disabled_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_disabled_dark.png
new file mode 100644
index 0000000..fdb2121
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_disabled_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_disabled_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_disabled_light.png
new file mode 100644
index 0000000..9ce7e3a
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_disabled_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_disconnected_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_disconnected_dark.png
new file mode 100644
index 0000000..e8601ce
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_disconnected_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_disconnected_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_disconnected_light.png
new file mode 100644
index 0000000..34928d7
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_disconnected_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_grey.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_grey.png
new file mode 100644
index 0000000..792fd77
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_mr_button_grey.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_speaker_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_speaker_dark.png
new file mode 100755
index 0000000..f171a8c
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_speaker_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_speaker_group_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_speaker_group_dark.png
new file mode 100755
index 0000000..c8cb6ca
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_speaker_group_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_speaker_group_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_speaker_group_light.png
new file mode 100755
index 0000000..9c8863d
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_speaker_group_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_speaker_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_speaker_light.png
new file mode 100755
index 0000000..9335038
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_speaker_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_tv_dark.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_tv_dark.png
new file mode 100755
index 0000000..a6a4858
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_tv_dark.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_tv_light.png b/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_tv_light.png
new file mode 100755
index 0000000..4ca6787
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxhdpi/ic_vol_type_tv_light.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_00.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_00.png
new file mode 100644
index 0000000..b2305d2
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_00.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_01.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_01.png
new file mode 100644
index 0000000..59395d4
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_01.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_02.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_02.png
new file mode 100644
index 0000000..70a7282
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_02.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_03.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_03.png
new file mode 100644
index 0000000..b3f0f53
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_03.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_04.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_04.png
new file mode 100644
index 0000000..66a80d9
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_04.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_05.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_05.png
new file mode 100644
index 0000000..8ec3939
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_05.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_06.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_06.png
new file mode 100644
index 0000000..0f02536
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_06.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_07.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_07.png
new file mode 100644
index 0000000..ba228f4
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_07.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_08.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_08.png
new file mode 100644
index 0000000..304277e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_08.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_09.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_09.png
new file mode 100644
index 0000000..f865bfb
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_09.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_10.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_10.png
new file mode 100644
index 0000000..17c5d6b
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_10.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_11.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_11.png
new file mode 100644
index 0000000..a2f4ad5
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_11.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_12.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_12.png
new file mode 100644
index 0000000..c230648
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_12.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_13.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_13.png
new file mode 100644
index 0000000..b99324e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_13.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_14.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_14.png
new file mode 100644
index 0000000..c8618f0
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_14.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_15.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_15.png
new file mode 100644
index 0000000..4a0d770
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_collapse_15.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_00.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_00.png
new file mode 100644
index 0000000..4a0d770
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_00.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_01.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_01.png
new file mode 100644
index 0000000..4db4e50
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_01.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_02.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_02.png
new file mode 100644
index 0000000..82b5f03
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_02.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_03.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_03.png
new file mode 100644
index 0000000..b05c758
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_03.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_04.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_04.png
new file mode 100644
index 0000000..fa5c7fa
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_04.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_05.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_05.png
new file mode 100644
index 0000000..2c287e4
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_05.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_06.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_06.png
new file mode 100644
index 0000000..eb7d0cf
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_06.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_07.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_07.png
new file mode 100644
index 0000000..95fa72b
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_07.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_08.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_08.png
new file mode 100644
index 0000000..5650eea
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_08.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_09.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_09.png
new file mode 100644
index 0000000..6f44355
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_09.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_10.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_10.png
new file mode 100644
index 0000000..4e877c3
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_10.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_11.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_11.png
new file mode 100644
index 0000000..7927f0a
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_11.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_12.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_12.png
new file mode 100644
index 0000000..71b19bb
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_12.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_13.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_13.png
new file mode 100644
index 0000000..bf5921e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_13.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_14.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_14.png
new file mode 100644
index 0000000..14b76b1
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_14.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_15.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_15.png
new file mode 100644
index 0000000..b2305d2
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_group_expand_15.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable-xxxhdpi/ic_mr_button_grey.png b/packages/MediaComponents/res/drawable-xxxhdpi/ic_mr_button_grey.png
new file mode 100644
index 0000000..04a9525
--- /dev/null
+++ b/packages/MediaComponents/res/drawable-xxxhdpi/ic_mr_button_grey.png
Binary files differ
diff --git a/packages/MediaComponents/res/drawable/custom_progress.xml b/packages/MediaComponents/res/drawable/custom_progress.xml
new file mode 100644
index 0000000..9731a6e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/custom_progress.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<layer-list xmlns:android="http://schemas.android.com/apk/res/android" >
+ <item android:id="@android:id/background">
+ <shape android:shape="rectangle" >
+ <solid android:color="#26000000" />
+ </shape>
+ </item>
+ <item android:id="@android:id/secondaryProgress">
+ <clip>
+ <shape android:shape="rectangle" >
+ <solid android:color="#5Cffffff" />
+ </shape>
+ </clip>
+ </item>
+ <item android:id="@android:id/progress">
+ <clip>
+ <shape android:shape="rectangle" >
+ <solid android:color="#ffffff" />
+ </shape>
+ </clip>
+ </item>
+</layer-list>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/drawable/custom_progress_thumb.xml b/packages/MediaComponents/res/drawable/custom_progress_thumb.xml
new file mode 100644
index 0000000..1a35970
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/custom_progress_thumb.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<shape xmlns:android="http://schemas.android.com/apk/res/android"
+ android:shape="oval" >
+ <solid android:color="#ffffff" />
+ <size
+ android:height="@dimen/mcv2_custom_progress_thumb_size"
+ android:width="@dimen/mcv2_custom_progress_thumb_size" />
+</shape>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/drawable/ic_arrow_back.xml b/packages/MediaComponents/res/drawable/ic_arrow_back.xml
new file mode 100644
index 0000000..2b5f71e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_arrow_back.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <path
+ android:pathData="M20,11H7.83l5.59,-5.59L12,4l-8,8 8,8 1.41,-1.41L7.83,13H20v-2z"
+ android:fillColor="#FFFFFF"/>
+</vector>
diff --git a/packages/MediaComponents/res/drawable/ic_aspect_ratio.xml b/packages/MediaComponents/res/drawable/ic_aspect_ratio.xml
new file mode 100644
index 0000000..c6228e6
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_aspect_ratio.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <path
+ android:pathData="M19,12h-2v3h-3v2h5v-5zM7,9h3L10,7L5,7v5h2L7,9zM21,3L3,3c-1.1,0 -2,0.9 -2,2v14c0,1.1 0.9,2 2,2h18c1.1,0 2,-0.9 2,-2L23,5c0,-1.1 -0.9,-2 -2,-2zM21,19.01L3,19.01L3,4.99h18v14.02z"
+ android:fillColor="#FFFFFF"/>
+</vector>
diff --git a/packages/MediaComponents/res/drawable/ic_audiotrack.xml b/packages/MediaComponents/res/drawable/ic_audiotrack.xml
new file mode 100644
index 0000000..27c12b5
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_audiotrack.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <path
+ android:fillColor="#FFFFFF"
+ android:pathData="M12,3v9.28c-0.47,-0.17 -0.97,-0.28 -1.5,-0.28C8.01,12 6,14.01 6,16.5S8.01,21 10.5,21c2.31,0 4.2,-1.75 4.45,-4H15V6h4V3h-7z"/>
+</vector>
diff --git a/packages/MediaComponents/res/drawable/ic_check.xml b/packages/MediaComponents/res/drawable/ic_check.xml
new file mode 100644
index 0000000..32f720b
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_check.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="utf-8"?>
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="24"
+ android:viewportHeight="24">
+
+ <path
+ android:pathData="M0 0h24v24H0z" />
+ <path
+ android:fillColor="#FFFFFF"
+ android:pathData="M9 16.2L4.8 12l-1.4 1.4L9 19 21 7l-1.4-1.4L9 16.2z" />
+</vector>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/drawable/ic_chevron_left.xml b/packages/MediaComponents/res/drawable/ic_chevron_left.xml
new file mode 100644
index 0000000..8336d17
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_chevron_left.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <path
+ android:pathData="M15.41,7.41L14,6l-6,6 6,6 1.41,-1.41L10.83,12z"
+ android:fillColor="#FFFFFF"/>
+</vector>
diff --git a/packages/MediaComponents/res/drawable/ic_chevron_right.xml b/packages/MediaComponents/res/drawable/ic_chevron_right.xml
new file mode 100644
index 0000000..fb2ce09
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_chevron_right.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <path
+ android:pathData="M10,6L8.59,7.41 13.17,12l-4.58,4.59L10,18l6,-6z"
+ android:fillColor="#FFFFFF"/>
+</vector>
diff --git a/packages/MediaComponents/res/drawable/ic_default_album_image.xml b/packages/MediaComponents/res/drawable/ic_default_album_image.xml
new file mode 100644
index 0000000..1cee643
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_default_album_image.xml
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="utf-8"?>
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="512dp"
+ android:height="512dp"
+ android:viewportWidth="512"
+ android:viewportHeight="512">
+
+ <path
+ android:fillColor="#616161"
+ android:pathData="M 0 0 H 512 V 512 H 0 V 0 Z" />
+ <path
+ android:fillColor="#525252"
+ android:pathData="M256,151v123.14c-6.88-4.02-14.82-6.48-23.33-6.48 c-25.78,0-46.67,20.88-46.67,46.67c0,25.78,20.88,46.67,46.67,46.67s46.67-20.88,46.67-46.67V197.67H326V151H256z" />
+</vector>
diff --git a/packages/MediaComponents/res/drawable/ic_forward_30.xml b/packages/MediaComponents/res/drawable/ic_forward_30.xml
new file mode 100644
index 0000000..7efdf16
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_forward_30.xml
@@ -0,0 +1,12 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="40dp"
+ android:height="40dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <group>
+ <clip-path android:pathData="M24,24H0V0h24v24z M 0,0" />
+ <path
+ android:pathData="M9.6 13.5h.4c.2 0 .4,-.1.5,-.2s.2,-.2.2,-.4v-.2s-.1,-.1,-.1,-.2,-.1,-.1,-.2,-.1h-.5s-.1.1,-.2.1,-.1.1,-.1.2v.2h-1c0,-.2 0,-.3.1,-.5s.2,-.3.3,-.4.3,-.2.4,-.2.4,-.1.5,-.1c.2 0 .4 0 .6.1s.3.1.5.2.2.2.3.4.1.3.1.5v.3s-.1.2,-.1.3,-.1.2,-.2.2,-.2.1,-.3.2c.2.1.4.2.5.4s.2.4.2.6c0 .2 0 .4,-.1.5s-.2.3,-.3.4,-.3.2,-.5.2,-.4.1,-.6.1c-.2 0,-.4 0,-.5,-.1s-.3,-.1,-.5,-.2,-.2,-.2,-.3,-.4,-.1,-.4,-.1,-.6h.8v.2s.1.1.1.2.1.1.2.1h.5s.1,-.1.2,-.1.1,-.1.1,-.2v-.5s-.1,-.1,-.1,-.2,-.1,-.1,-.2,-.1h-.6v-.7zm5.7.7c0 .3 0 .6,-.1.8l-.3.6s-.3.3,-.5.3,-.4.1,-.6.1,-.4 0,-.6,-.1,-.3,-.2,-.5,-.3,-.2,-.3,-.3,-.6,-.1,-.5,-.1,-.8v-.7c0,-.3 0,-.6.1,-.8l.3,-.6s.3,-.3.5,-.3.4,-.1.6,-.1.4 0 .6.1.3.2.5.3.2.3.3.6.1.5.1.8v.7zm-.9,-.8v-.5s-.1,-.2,-.1,-.3,-.1,-.1,-.2,-.2,-.2,-.1,-.3,-.1,-.2 0,-.3.1l-.2.2s-.1.2,-.1.3v2s.1.2.1.3.1.1.2.2.2.1.3.1.2 0 .3,-.1l.2,-.2s.1,-.2.1,-.3v-1.5zM4 13c0 4.4 3.6 8 8 8s8,-3.6 8,-8h-2c0 3.3,-2.7 6,-6 6s-6,-2.7,-6,-6 2.7,-6 6,-6v4l5,-5,-5,-5v4c-4.4 0,-8 3.6,-8 8z"
+ android:fillColor="#FFFFFF"/>
+ </group>
+</vector>
diff --git a/packages/MediaComponents/res/drawable/ic_fullscreen.xml b/packages/MediaComponents/res/drawable/ic_fullscreen.xml
new file mode 100644
index 0000000..4b4f6bc
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_fullscreen.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <path
+ android:pathData="M7,14L5,14v5h5v-2L7,17v-3zM5,10h2L7,7h3L10,5L5,5v5zM17,17h-3v2h5v-5h-2v3zM14,5v2h3v3h2L19,5h-5z"
+ android:fillColor="#FFFFFF"/>
+</vector>
diff --git a/packages/MediaComponents/res/drawable/ic_fullscreen_exit.xml b/packages/MediaComponents/res/drawable/ic_fullscreen_exit.xml
new file mode 100644
index 0000000..bc204e2
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_fullscreen_exit.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <path
+ android:pathData="M5,16h3v3h2v-5L5,14v2zM8,8L5,8v2h5L10,5L8,5v3zM14,19h2v-3h3v-2h-5v5zM16,8L16,5h-2v5h5L19,8h-3z"
+ android:fillColor="#FFFFFF"/>
+</vector>
diff --git a/packages/MediaComponents/res/drawable/ic_help.xml b/packages/MediaComponents/res/drawable/ic_help.xml
new file mode 100644
index 0000000..4d1d75d
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_help.xml
@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="utf-8"?>
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="24"
+ android:viewportHeight="24">
+
+ <path
+ android:pathData="M0 0h24v24H0z" />
+ <path
+ android:fillColor="#FFFFFF"
+ android:pathData="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm1
+17h-2v-2h2v2zm2.07-7.75l-.9 .92 C13.45 12.9 13 13.5 13 15h-2v-.5c0-1.1 .45 -2.1
+1.17-2.83l1.24-1.26c.37-.36 .59 -.86 .59 -1.41 0-1.1-.9-2-2-2s-2 .9-2 2H8c0-2.21
+1.79-4 4-4s4 1.79 4 4c0 .88-.36 1.68-.93 2.25z" />
+</vector>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/drawable/ic_high_quality.xml b/packages/MediaComponents/res/drawable/ic_high_quality.xml
new file mode 100644
index 0000000..f76e22f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_high_quality.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="utf-8"?>
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="34dp"
+ android:height="34dp"
+ android:viewportWidth="24"
+ android:viewportHeight="24">
+
+ <path
+ android:pathData="M0 0h24v24H0z" />
+ <path
+ android:fillColor="#FFFFFF"
+ android:pathData="M19 4H5c-1.11 0-2 0.9-2 2v12c0 1.1 0.89 2 2 2h14c1.1 0 2-0.9 2-2V6c0-1.1-0.9-2-2-2zm-8 11H9.5v-2h-2v2H6V9h1.5v2.5h2V9H11v6zm7-1c0 0.55-0.45 1-1 1h-0.75v1.5h-1.5V15H14c-0.55 0-1-0.45-1-1v-4c0-0.55 0.45 -1 1-1h3c0.55 0 1 0.45 1 1v4zm-3.5-0.5h2v-3h-2v3z" />
+</vector>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/drawable/ic_launch.xml b/packages/MediaComponents/res/drawable/ic_launch.xml
new file mode 100644
index 0000000..f7af6aa
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_launch.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <path
+ android:fillColor="#FFFFFF"
+ android:pathData="M19,19H5V5h7V3H5c-1.11,0 -2,0.9 -2,2v14c0,1.1 0.89,2 2,2h14c1.1,0 2,-0.9 2,-2v-7h-2v7zM14,3v2h3.59l-9.83,9.83 1.41,1.41L19,6.41V10h2V3h-7z"/>
+</vector>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/drawable/ic_mute.xml b/packages/MediaComponents/res/drawable/ic_mute.xml
new file mode 100644
index 0000000..560aaec
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_mute.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <path
+ android:pathData="M16.5,12c0,-1.77 -1.02,-3.29 -2.5,-4.03v2.21l2.45,2.45c0.03,-0.2 0.05,-0.41 0.05,-0.63zM19,12c0,0.94 -0.2,1.82 -0.54,2.64l1.51,1.51C20.63,14.91 21,13.5 21,12c0,-4.28 -2.99,-7.86 -7,-8.77v2.06c2.89,0.86 5,3.54 5,6.71zM4.27,3L3,4.27 7.73,9L3,9v6h4l5,5v-6.73l4.25,4.25c-0.67,0.52 -1.42,0.93 -2.25,1.18v2.06c1.38,-0.31 2.63,-0.95 3.69,-1.81L19.73,21 21,19.73l-9,-9L4.27,3zM12,4L9.91,6.09 12,8.18L12,4z"
+ android:fillColor="#FFFFFF"/>
+</vector>
diff --git a/packages/MediaComponents/res/drawable/ic_pause_circle_filled.xml b/packages/MediaComponents/res/drawable/ic_pause_circle_filled.xml
new file mode 100644
index 0000000..73be228
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_pause_circle_filled.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="40dp"
+ android:height="40dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <path
+ android:pathData="M12,2C6.48,2 2,6.48 2,12s4.48,10 10,10 10,-4.48 10,-10S17.52,2 12,2zM11,16L9,16L9,8h2v8zM15,16h-2L13,8h2v8z"
+ android:fillColor="#FFFFFF"/>
+</vector>
diff --git a/packages/MediaComponents/res/drawable/ic_play_circle_filled.xml b/packages/MediaComponents/res/drawable/ic_play_circle_filled.xml
new file mode 100644
index 0000000..9d39def
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_play_circle_filled.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="40dp"
+ android:height="40dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <path
+ android:pathData="M12,2C6.48,2 2,6.48 2,12s4.48,10 10,10 10,-4.48 10,-10S17.52,2 12,2zM10,16.5v-9l6,4.5 -6,4.5z"
+ android:fillColor="#FFFFFF"/>
+</vector>
diff --git a/packages/MediaComponents/res/drawable/ic_replay_circle_filled.xml b/packages/MediaComponents/res/drawable/ic_replay_circle_filled.xml
new file mode 100644
index 0000000..a56d5d9
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_replay_circle_filled.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="utf-8"?>
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="40dp"
+ android:height="40dp"
+ android:viewportWidth="24"
+ android:viewportHeight="24">
+
+ <path
+ android:pathData="M0,0h24v24H0V0z" />
+ <path
+ android:fillColor="#FFFFFF"
+ android:fillType="evenOdd"
+ android:pathData="M12,2C6.48,2,2,6.48,2,12c0,5.52,4.48,10,10,10c5.52,0,10-4.48,10-10
+C22,6.48,17.52,2,12,2z
+M18,12c0,3.31-2.69,6-6,6c-3.31,0-6-2.69-6-6h2c0,2.21,1.79,4,4,4s4-1.79,4-4s-1.79-4-4-4v3L8,7l4-4v3
+C15.31,6,18,8.69,18,12z" />
+</vector>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/drawable/ic_rewind_10.xml b/packages/MediaComponents/res/drawable/ic_rewind_10.xml
new file mode 100644
index 0000000..ae586b4
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_rewind_10.xml
@@ -0,0 +1,12 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="40dp"
+ android:height="40dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <group>
+ <clip-path android:pathData="M0,0h24v24H0V0z M 0,0" />
+ <path
+ android:pathData="M12 5V1L7 6l5 5V7c3.3 0 6 2.7 6 6s-2.7 6,-6 6,-6,-2.7,-6,-6H4c0 4.4 3.6 8 8 8s8,-3.6 8,-8,-3.6,-8,-8,-8zm-1.1 11H10v-3.3L9 13v-.7l1.8,-.6h.1V16zm4.3,-1.8c0 .3 0 .6,-.1.8l-.3.6s-.3.3,-.5.3,-.4.1,-.6.1,-.4 0,-.6,-.1,-.3,-.2,-.5,-.3,-.2,-.3,-.3,-.6,-.1,-.5,-.1,-.8v-.7c0,-.3 0,-.6.1,-.8l.3,-.6s.3,-.3.5,-.3.4,-.1.6,-.1.4 0 .6.1c.2.1.3.2.5.3s.2.3.3.6.1.5.1.8v.7zm-.9,-.8v-.5s-.1,-.2,-.1,-.3,-.1,-.1,-.2,-.2,-.2,-.1,-.3,-.1,-.2 0,-.3.1l-.2.2s-.1.2,-.1.3v2s.1.2.1.3.1.1.2.2.2.1.3.1.2 0 .3,-.1l.2,-.2s.1,-.2.1,-.3v-1.5z"
+ android:fillColor="#FFFFFF"/>
+ </group>
+</vector>
diff --git a/packages/MediaComponents/res/drawable/ic_sd.xml b/packages/MediaComponents/res/drawable/ic_sd.xml
new file mode 100644
index 0000000..decb6d2
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_sd.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="utf-8"?>
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="24"
+ android:viewportHeight="24">
+
+ <path
+ android:pathData="M0,0h24v24H0V0z" />
+ <path
+ android:fillColor="#FFFFFF"
+ android:pathData="M19,3H5C3.89,3,3,3.9,3,5v14c0,1.1,0.89,2,2,2h14c1.1,0,2-0.9,2-2V5C21,3.9,20.1,3,19,3z
+M13,9h4c0.55,0,1,0.45,1,1v4 c0,0.55-0.45,1-1,1h-4V9z
+M9.5,13.5v-1H7c-0.55,0-1-0.45-1-1V10c0-0.55,0.45-1,1-1h3c0.55,0,1,0.45,1,1v1H9.5v-0.5h-2v1H10
+c0.55,0,1,0.45,1,1V14c0,0.55-0.45,1-1,1H7c-0.55,0-1-0.45-1-1v-1h1.5v0.5H9.5z
+M14.5,13.5h2v-3h-2V13.5z" />
+</vector>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/drawable/ic_settings.xml b/packages/MediaComponents/res/drawable/ic_settings.xml
new file mode 100644
index 0000000..a59ecc1
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_settings.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <path
+ android:pathData="M19.43,12.98c0.04,-0.32 0.07,-0.64 0.07,-0.98s-0.03,-0.66 -0.07,-0.98l2.11,-1.65c0.19,-0.15 0.24,-0.42 0.12,-0.64l-2,-3.46c-0.12,-0.22 -0.39,-0.3 -0.61,-0.22l-2.49,1c-0.52,-0.4 -1.08,-0.73 -1.69,-0.98l-0.38,-2.65C14.46,2.18 14.25,2 14,2h-4c-0.25,0 -0.46,0.18 -0.49,0.42l-0.38,2.65c-0.61,0.25 -1.17,0.59 -1.69,0.98l-2.49,-1c-0.23,-0.09 -0.49,0 -0.61,0.22l-2,3.46c-0.13,0.22 -0.07,0.49 0.12,0.64l2.11,1.65c-0.04,0.32 -0.07,0.65 -0.07,0.98s0.03,0.66 0.07,0.98l-2.11,1.65c-0.19,0.15 -0.24,0.42 -0.12,0.64l2,3.46c0.12,0.22 0.39,0.3 0.61,0.22l2.49,-1c0.52,0.4 1.08,0.73 1.69,0.98l0.38,2.65c0.03,0.24 0.24,0.42 0.49,0.42h4c0.25,0 0.46,-0.18 0.49,-0.42l0.38,-2.65c0.61,-0.25 1.17,-0.59 1.69,-0.98l2.49,1c0.23,0.09 0.49,0 0.61,-0.22l2,-3.46c0.12,-0.22 0.07,-0.49 -0.12,-0.64l-2.11,-1.65zM12,15.5c-1.93,0 -3.5,-1.57 -3.5,-3.5s1.57,-3.5 3.5,-3.5 3.5,1.57 3.5,3.5 -1.57,3.5 -3.5,3.5z"
+ android:fillColor="#FFFFFF"/>
+</vector>
diff --git a/packages/MediaComponents/res/drawable/ic_skip_next.xml b/packages/MediaComponents/res/drawable/ic_skip_next.xml
new file mode 100644
index 0000000..b1f2812
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_skip_next.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="40dp"
+ android:height="40dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <path
+ android:pathData="M6,18l8.5,-6L6,6v12zM16,6v12h2V6h-2z"
+ android:fillColor="#FFFFFF"/>
+</vector>
diff --git a/packages/MediaComponents/res/drawable/ic_skip_previous.xml b/packages/MediaComponents/res/drawable/ic_skip_previous.xml
new file mode 100644
index 0000000..81da314
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_skip_previous.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="40dp"
+ android:height="40dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <path
+ android:pathData="M6,6h2v12L6,18zM9.5,12l8.5,6L18,6z"
+ android:fillColor="#FFFFFF"/>
+</vector>
diff --git a/packages/MediaComponents/res/drawable/ic_subtitle_off.xml b/packages/MediaComponents/res/drawable/ic_subtitle_off.xml
new file mode 100644
index 0000000..c0a727a
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_subtitle_off.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="24"
+ android:viewportHeight="24">
+
+ <path
+ android:pathData="M0,0h24v24H0V0z" />
+ <path
+ android:fillColor="#FFFFFF"
+ android:pathData="M19.5,5.5v13h-15v-13H19.5z M19,4H5C3.89,4,3,4.9,3,6v12c0,1.1,0.89,2,2,2h14c1.1,0,2-0.9,2-2V6C21,4.9,20.1,4,19,4L19,4z" />
+ <path
+ android:fillColor="#FFFFFF"
+ android:pathData="M11,11H9.5v-0.5h-2v3h2V13H11v1c0,0.55-0.45,1-1,1H7c-0.55,0-1-0.45-1-1v-4c0-0.55,0.45-1,1-1h3c0.55,0,1,0.45,1,1V11z" />
+ <path
+ android:fillColor="#FFFFFF"
+ android:pathData="M18,11h-1.5v-0.5h-2v3h2V13H18v1c0,0.55-0.45,1-1,1h-3c-0.55,0-1-0.45-1-1v-4c0-0.55,0.45-1,1-1h3c0.55,0,1,0.45,1,1V11z" />
+</vector>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/drawable/ic_subtitle_on.xml b/packages/MediaComponents/res/drawable/ic_subtitle_on.xml
new file mode 100644
index 0000000..7c91c06
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_subtitle_on.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="utf-8"?>
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="24"
+ android:viewportHeight="24">
+
+ <path
+ android:pathData="M0 0h24v24H0z" />
+ <path
+ android:fillColor="#FFFFFF"
+ android:pathData="M19 4H5c-1.11 0-2 0.9-2 2v12c0 1.1 0.89 2 2 2h14c1.1 0 2-0.9 2-2V6c0-1.1-0.9-2-2-2zm-8 7H9.5v-0.5h-2v3h2V13H11v1c0 0.55-0.45 1-1 1H7c-0.55 0-1-0.45-1-1v-4c0-0.55 0.45 -1 1-1h3c0.55 0 1 0.45 1 1v1zm7 0h-1.5v-0.5h-2v3h2V13H18v1c0 0.55-0.45 1-1 1h-3c-0.55 0-1-0.45-1-1v-4c0-0.55 0.45 -1 1-1h3c0.55 0 1 0.45 1 1v1z" />
+</vector>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/drawable/ic_unmute.xml b/packages/MediaComponents/res/drawable/ic_unmute.xml
new file mode 100644
index 0000000..9dfb2b9
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/ic_unmute.xml
@@ -0,0 +1,9 @@
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+ android:width="24dp"
+ android:height="24dp"
+ android:viewportWidth="24.0"
+ android:viewportHeight="24.0">
+ <path
+ android:pathData="M3,9v6h4l5,5L12,4L7,9L3,9zM16.5,12c0,-1.77 -1.02,-3.29 -2.5,-4.03v8.05c1.48,-0.73 2.5,-2.25 2.5,-4.02zM14,3.23v2.06c2.89,0.86 5,3.54 5,6.71s-2.11,5.85 -5,6.71v2.06c4.01,-0.91 7,-4.49 7,-8.77s-2.99,-7.86 -7,-8.77z"
+ android:fillColor="#FFFFFF"/>
+</vector>
diff --git a/packages/MediaComponents/res/drawable/mr_button_connected_dark.xml b/packages/MediaComponents/res/drawable/mr_button_connected_dark.xml
new file mode 100644
index 0000000..110ff13
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_button_connected_dark.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<animation-list
+ xmlns:android="http://schemas.android.com/apk/res/android"
+ android:oneshot="true">
+ <item android:drawable="@drawable/ic_mr_button_connected_00_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_01_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_02_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_03_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_04_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_05_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_06_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_07_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_08_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_09_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_10_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_11_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_12_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_13_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_14_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_15_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_16_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_17_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_18_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_19_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_20_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_21_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_22_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_23_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_24_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_25_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_26_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_27_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_28_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_29_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_30_dark" android:duration="42" />
+</animation-list>
diff --git a/packages/MediaComponents/res/drawable/mr_button_connected_light.xml b/packages/MediaComponents/res/drawable/mr_button_connected_light.xml
new file mode 100644
index 0000000..bcfc7fe
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_button_connected_light.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<animation-list
+ xmlns:android="http://schemas.android.com/apk/res/android"
+ android:oneshot="true">
+ <item android:drawable="@drawable/ic_mr_button_connected_00_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_01_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_02_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_03_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_04_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_05_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_06_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_07_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_08_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_09_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_10_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_11_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_12_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_13_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_14_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_15_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_16_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_17_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_18_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_19_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_20_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_21_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_22_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_23_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_24_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_25_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_26_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_27_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_28_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_29_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connected_30_light" android:duration="42" />
+</animation-list>
diff --git a/packages/MediaComponents/res/drawable/mr_button_connecting_dark.xml b/packages/MediaComponents/res/drawable/mr_button_connecting_dark.xml
new file mode 100644
index 0000000..55af7b3
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_button_connecting_dark.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<animation-list
+ xmlns:android="http://schemas.android.com/apk/res/android"
+ android:oneshot="false">
+ <item android:drawable="@drawable/ic_mr_button_connecting_00_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_01_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_02_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_03_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_04_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_05_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_06_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_07_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_08_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_09_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_10_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_11_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_12_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_13_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_14_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_15_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_16_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_17_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_18_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_19_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_20_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_21_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_22_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_23_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_24_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_25_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_26_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_27_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_28_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_29_dark" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_30_dark" android:duration="42" />
+</animation-list>
diff --git a/packages/MediaComponents/res/drawable/mr_button_connecting_light.xml b/packages/MediaComponents/res/drawable/mr_button_connecting_light.xml
new file mode 100644
index 0000000..93b4170
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_button_connecting_light.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<animation-list
+ xmlns:android="http://schemas.android.com/apk/res/android"
+ android:oneshot="false">
+ <item android:drawable="@drawable/ic_mr_button_connecting_00_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_01_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_02_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_03_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_04_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_05_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_06_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_07_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_08_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_09_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_10_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_11_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_12_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_13_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_14_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_15_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_16_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_17_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_18_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_19_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_20_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_21_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_22_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_23_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_24_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_25_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_26_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_27_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_28_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_29_light" android:duration="42" />
+ <item android:drawable="@drawable/ic_mr_button_connecting_30_light" android:duration="42" />
+</animation-list>
diff --git a/packages/MediaComponents/res/drawable/mr_button_dark.xml b/packages/MediaComponents/res/drawable/mr_button_dark.xml
new file mode 100644
index 0000000..8f1dfaa
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_button_dark.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<selector xmlns:android="http://schemas.android.com/apk/res/android">
+ <item android:state_checked="true" android:state_enabled="true"
+ android:drawable="@drawable/mr_button_connected_dark" />
+ <item android:state_checkable="true" android:state_enabled="true"
+ android:drawable="@drawable/mr_button_connecting_dark" />
+ <item android:state_enabled="true"
+ android:drawable="@drawable/ic_mr_button_disconnected_dark" />
+ <item android:drawable="@drawable/ic_mr_button_disabled_dark" />
+</selector>
diff --git a/packages/MediaComponents/res/drawable/mr_button_light.xml b/packages/MediaComponents/res/drawable/mr_button_light.xml
new file mode 100644
index 0000000..1d3d84e
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_button_light.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<selector xmlns:android="http://schemas.android.com/apk/res/android">
+ <item android:state_checked="true" android:state_enabled="true"
+ android:drawable="@drawable/mr_button_connected_light" />
+ <item android:state_checkable="true" android:state_enabled="true"
+ android:drawable="@drawable/mr_button_connecting_light" />
+ <item android:state_enabled="true"
+ android:drawable="@drawable/ic_mr_button_disconnected_light" />
+ <item android:drawable="@drawable/ic_mr_button_disabled_light" />
+</selector>
diff --git a/packages/MediaComponents/res/drawable/mr_dialog_close_dark.xml b/packages/MediaComponents/res/drawable/mr_dialog_close_dark.xml
new file mode 100644
index 0000000..288c8c7
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_dialog_close_dark.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<selector xmlns:android="http://schemas.android.com/apk/res/android">
+ <item android:drawable="@drawable/ic_dialog_close_dark" />
+</selector>
diff --git a/packages/MediaComponents/res/drawable/mr_dialog_close_light.xml b/packages/MediaComponents/res/drawable/mr_dialog_close_light.xml
new file mode 100644
index 0000000..cd50e0f
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_dialog_close_light.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<selector xmlns:android="http://schemas.android.com/apk/res/android">
+ <item>
+ <bitmap
+ android:src="@drawable/ic_dialog_close_light"
+ android:alpha="0.87" />
+ </item>
+
+</selector>
diff --git a/packages/MediaComponents/res/drawable/mr_dialog_material_background_dark.xml b/packages/MediaComponents/res/drawable/mr_dialog_material_background_dark.xml
new file mode 100644
index 0000000..ebc7eca
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_dialog_material_background_dark.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- This is the copy of @drawable/abc_dialog_material_background_dark except for inset
+ which includes unnecessary padding. -->
+<shape xmlns:android="http://schemas.android.com/apk/res/android"
+ android:shape="rectangle">
+ <corners android:radius="2dp" />
+ <solid android:color="@color/background_floating_material_dark" />
+</shape>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/drawable/mr_dialog_material_background_light.xml b/packages/MediaComponents/res/drawable/mr_dialog_material_background_light.xml
new file mode 100644
index 0000000..c1b235a
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_dialog_material_background_light.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- This is the copy of @drawable/abc_dialog_material_background_light except for inset
+ which includes unnecessary padding. -->
+<shape xmlns:android="http://schemas.android.com/apk/res/android"
+ android:shape="rectangle">
+ <corners android:radius="2dp" />
+ <solid android:color="@color/background_floating_material_light" />
+</shape>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/drawable/mr_group_collapse.xml b/packages/MediaComponents/res/drawable/mr_group_collapse.xml
new file mode 100644
index 0000000..8f72bc8
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_group_collapse.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<animation-list xmlns:android="http://schemas.android.com/apk/res/android"
+ android:oneshot="true">
+ <item android:drawable="@drawable/ic_group_collapse_00" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_collapse_01" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_collapse_02" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_collapse_03" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_collapse_04" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_collapse_05" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_collapse_06" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_collapse_07" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_collapse_08" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_collapse_09" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_collapse_10" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_collapse_11" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_collapse_12" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_collapse_13" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_collapse_14" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_collapse_15" android:duration="13" />
+</animation-list>
diff --git a/packages/MediaComponents/res/drawable/mr_group_expand.xml b/packages/MediaComponents/res/drawable/mr_group_expand.xml
new file mode 100644
index 0000000..6b3fdb6
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_group_expand.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<animation-list xmlns:android="http://schemas.android.com/apk/res/android"
+ android:oneshot="true">
+ <item android:drawable="@drawable/ic_group_expand_00" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_expand_01" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_expand_02" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_expand_03" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_expand_04" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_expand_05" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_expand_06" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_expand_07" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_expand_08" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_expand_09" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_expand_10" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_expand_11" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_expand_12" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_expand_13" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_expand_14" android:duration="13" />
+ <item android:drawable="@drawable/ic_group_expand_15" android:duration="13" />
+</animation-list>
diff --git a/packages/MediaComponents/res/drawable/mr_media_pause_dark.xml b/packages/MediaComponents/res/drawable/mr_media_pause_dark.xml
new file mode 100644
index 0000000..86218a7
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_media_pause_dark.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<selector xmlns:android="http://schemas.android.com/apk/res/android">
+ <item android:drawable="@drawable/ic_media_pause_dark" />
+</selector>
diff --git a/packages/MediaComponents/res/drawable/mr_media_pause_light.xml b/packages/MediaComponents/res/drawable/mr_media_pause_light.xml
new file mode 100644
index 0000000..2dd1f02
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_media_pause_light.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<selector xmlns:android="http://schemas.android.com/apk/res/android">
+ <item>
+ <bitmap
+ android:src="@drawable/ic_media_pause_light"
+ android:alpha="0.87" />
+ </item>
+</selector>
diff --git a/packages/MediaComponents/res/drawable/mr_media_play_dark.xml b/packages/MediaComponents/res/drawable/mr_media_play_dark.xml
new file mode 100644
index 0000000..9d45a33
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_media_play_dark.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<selector xmlns:android="http://schemas.android.com/apk/res/android">
+ <item android:drawable="@drawable/ic_media_play_dark" />
+</selector>
diff --git a/packages/MediaComponents/res/drawable/mr_media_play_light.xml b/packages/MediaComponents/res/drawable/mr_media_play_light.xml
new file mode 100644
index 0000000..f1fb7a6
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_media_play_light.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<selector xmlns:android="http://schemas.android.com/apk/res/android">
+ <item>
+ <bitmap
+ android:src="@drawable/ic_media_play_light"
+ android:alpha="0.87" />
+ </item>
+</selector>
diff --git a/packages/MediaComponents/res/drawable/mr_media_stop_dark.xml b/packages/MediaComponents/res/drawable/mr_media_stop_dark.xml
new file mode 100644
index 0000000..3e108a9
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_media_stop_dark.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<selector xmlns:android="http://schemas.android.com/apk/res/android">
+ <item android:drawable="@drawable/ic_media_stop_dark" />
+</selector>
diff --git a/packages/MediaComponents/res/drawable/mr_media_stop_light.xml b/packages/MediaComponents/res/drawable/mr_media_stop_light.xml
new file mode 100644
index 0000000..b2c6ce8
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_media_stop_light.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<selector xmlns:android="http://schemas.android.com/apk/res/android">
+ <item>
+ <bitmap
+ android:src="@drawable/ic_media_stop_light"
+ android:alpha="0.87" />
+ </item>
+</selector>
diff --git a/packages/MediaComponents/res/drawable/mr_vol_type_audiotrack_dark.xml b/packages/MediaComponents/res/drawable/mr_vol_type_audiotrack_dark.xml
new file mode 100644
index 0000000..44f4fd6
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_vol_type_audiotrack_dark.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<selector xmlns:android="http://schemas.android.com/apk/res/android">
+ <item android:drawable="@drawable/ic_audiotrack_dark" />
+</selector>
diff --git a/packages/MediaComponents/res/drawable/mr_vol_type_audiotrack_light.xml b/packages/MediaComponents/res/drawable/mr_vol_type_audiotrack_light.xml
new file mode 100644
index 0000000..5c9dbc0
--- /dev/null
+++ b/packages/MediaComponents/res/drawable/mr_vol_type_audiotrack_light.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<selector xmlns:android="http://schemas.android.com/apk/res/android">
+ <item>
+ <bitmap
+ android:src="@drawable/ic_audiotrack_light"
+ android:alpha="0.87" />
+ </item>
+</selector>
diff --git a/packages/MediaComponents/res/interpolator/mr_fast_out_slow_in.xml b/packages/MediaComponents/res/interpolator/mr_fast_out_slow_in.xml
new file mode 100644
index 0000000..6b6a171
--- /dev/null
+++ b/packages/MediaComponents/res/interpolator/mr_fast_out_slow_in.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ ~ Copyright 2018 The Android Open Source Project
+ ~
+ ~ Licensed under the Apache License, Version 2.0 (the "License");
+ ~ you may not use this file except in compliance with the License.
+ ~ You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing, software
+ ~ distributed under the License is distributed on an "AS IS" BASIS,
+ ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ~ See the License for the specific language governing permissions and
+ ~ limitations under the License
+ -->
+
+<pathInterpolator xmlns:android="http://schemas.android.com/apk/res/android"
+ android:controlX1="0.4"
+ android:controlY1="0"
+ android:controlX2="0.2"
+ android:controlY2="1"/>
diff --git a/packages/MediaComponents/res/interpolator/mr_linear_out_slow_in.xml b/packages/MediaComponents/res/interpolator/mr_linear_out_slow_in.xml
new file mode 100644
index 0000000..20bf298
--- /dev/null
+++ b/packages/MediaComponents/res/interpolator/mr_linear_out_slow_in.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ ~ Copyright 2018 The Android Open Source Project
+ ~
+ ~ Licensed under the Apache License, Version 2.0 (the "License");
+ ~ you may not use this file except in compliance with the License.
+ ~ You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing, software
+ ~ distributed under the License is distributed on an "AS IS" BASIS,
+ ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ~ See the License for the specific language governing permissions and
+ ~ limitations under the License
+ -->
+
+<pathInterpolator xmlns:android="http://schemas.android.com/apk/res/android"
+ android:controlX1="0"
+ android:controlY1="0"
+ android:controlX2="0.2"
+ android:controlY2="1"/>
diff --git a/packages/MediaComponents/res/layout/embedded_music.xml b/packages/MediaComponents/res/layout/embedded_music.xml
new file mode 100644
index 0000000..3e4d365
--- /dev/null
+++ b/packages/MediaComponents/res/layout/embedded_music.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent"
+ android:orientation="vertical">
+
+ <View
+ android:layout_width="match_parent"
+ android:layout_height="0dp"
+ android:layout_weight="0.25"/>
+
+ <ImageView
+ android:id="@+id/album"
+ android:layout_width="match_parent"
+ android:layout_height="0dp"
+ android:layout_weight="0.5"
+ android:scaleType="fitCenter"
+ android:src="@drawable/ic_default_album_image" />
+
+ <View
+ android:layout_width="match_parent"
+ android:layout_height="0dp"
+ android:layout_weight="0.25"/>
+</LinearLayout>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/layout/embedded_settings_list_item.xml b/packages/MediaComponents/res/layout/embedded_settings_list_item.xml
new file mode 100644
index 0000000..1156dca
--- /dev/null
+++ b/packages/MediaComponents/res/layout/embedded_settings_list_item.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_embedded_settings_height"
+ android:orientation="horizontal"
+ android:background="@color/black_opacity_70">
+
+ <LinearLayout
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_embedded_settings_height"
+ android:gravity="center"
+ android:orientation="horizontal">
+
+ <ImageView
+ android:id="@+id/icon"
+ android:layout_width="@dimen/mcv2_embedded_settings_icon_size"
+ android:layout_height="@dimen/mcv2_embedded_settings_icon_size"
+ android:layout_margin="8dp"
+ android:gravity="center" />
+ </LinearLayout>
+
+ <RelativeLayout
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_embedded_settings_height"
+ android:gravity="center"
+ android:orientation="vertical">
+
+ <TextView
+ android:id="@+id/main_text"
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_embedded_settings_text_height"
+ android:gravity="center"
+ android:paddingLeft="2dp"
+ android:textColor="@color/white"
+ android:textSize="@dimen/mcv2_embedded_settings_main_text_size"/>
+
+ <TextView
+ android:id="@+id/sub_text"
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_embedded_settings_text_height"
+ android:layout_below="@id/main_text"
+ android:gravity="center"
+ android:paddingLeft="2dp"
+ android:textColor="@color/white_opacity_70"
+ android:textSize="@dimen/mcv2_embedded_settings_sub_text_size"/>
+ </RelativeLayout>
+</LinearLayout>
+
diff --git a/packages/MediaComponents/res/layout/embedded_sub_settings_list_item.xml b/packages/MediaComponents/res/layout/embedded_sub_settings_list_item.xml
new file mode 100644
index 0000000..5947a72
--- /dev/null
+++ b/packages/MediaComponents/res/layout/embedded_sub_settings_list_item.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_embedded_settings_height"
+ android:orientation="horizontal"
+ android:background="@color/black_opacity_70">
+
+ <LinearLayout
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_embedded_settings_height"
+ android:gravity="center"
+ android:orientation="horizontal">
+
+ <ImageView
+ android:id="@+id/check"
+ android:layout_width="@dimen/mcv2_embedded_settings_icon_size"
+ android:layout_height="@dimen/mcv2_embedded_settings_icon_size"
+ android:layout_margin="8dp"
+ android:gravity="center"
+ android:src="@drawable/ic_check"/>
+ </LinearLayout>
+
+ <RelativeLayout
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_embedded_settings_height"
+ android:gravity="center"
+ android:orientation="vertical">
+
+ <TextView
+ android:id="@+id/text"
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_embedded_settings_text_height"
+ android:gravity="center"
+ android:paddingLeft="2dp"
+ android:textColor="@color/white"
+ android:textSize="@dimen/mcv2_embedded_settings_main_text_size"/>
+ </RelativeLayout>
+</LinearLayout>
diff --git a/packages/MediaComponents/res/layout/embedded_transport_controls.xml b/packages/MediaComponents/res/layout/embedded_transport_controls.xml
new file mode 100644
index 0000000..a3a5957
--- /dev/null
+++ b/packages/MediaComponents/res/layout/embedded_transport_controls.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent"
+ android:gravity="center"
+ android:orientation="horizontal"
+ android:paddingLeft="@dimen/mcv2_transport_controls_padding"
+ android:paddingRight="@dimen/mcv2_transport_controls_padding"
+ android:visibility="visible">
+
+ <ImageButton android:id="@+id/prev" style="@style/EmbeddedTransportControlsButton.Previous" />
+ <ImageButton android:id="@+id/rew" style="@style/EmbeddedTransportControlsButton.Rew" />
+ <ImageButton android:id="@+id/pause" style="@style/EmbeddedTransportControlsButton.Pause" />
+ <ImageButton android:id="@+id/ffwd" style="@style/EmbeddedTransportControlsButton.Ffwd" />
+ <ImageButton android:id="@+id/next" style="@style/EmbeddedTransportControlsButton.Next" />
+</LinearLayout>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/layout/full_landscape_music.xml b/packages/MediaComponents/res/layout/full_landscape_music.xml
new file mode 100644
index 0000000..8ce7058
--- /dev/null
+++ b/packages/MediaComponents/res/layout/full_landscape_music.xml
@@ -0,0 +1,54 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent"
+ android:background="#B300FF00"
+ android:orientation="horizontal">
+
+ <LinearLayout
+ android:id="@+id/music_image"
+ style="@style/FullMusicLandscape.Image">
+
+ <ImageView
+ android:id="@+id/album"
+ android:layout_width="@dimen/mcv2_full_album_image_landscape_size"
+ android:layout_height="@dimen/mcv2_full_album_image_landscape_size"
+ android:src="@drawable/ic_default_album_image"/>
+ </LinearLayout>
+
+ <LinearLayout
+ android:id="@+id/music_text"
+ style="@style/FullMusicLandscape.Text">
+
+ <TextView
+ android:id="@+id/title"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:text="@string/mcv2_music_title_unknown_text"
+ android:textSize="20sp"
+ android:textStyle="bold"
+ android:textColor="#FFFFFF" />
+ <TextView
+ android:id="@+id/artist"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:text="@string/mcv2_music_artist_unknown_text"
+ android:textSize="16sp"
+ android:textColor="#BBBBBB" />
+ </LinearLayout>
+</LinearLayout>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/layout/full_portrait_music.xml b/packages/MediaComponents/res/layout/full_portrait_music.xml
new file mode 100644
index 0000000..75f1bb3
--- /dev/null
+++ b/packages/MediaComponents/res/layout/full_portrait_music.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent"
+ android:orientation="vertical">
+
+ <LinearLayout
+ android:id="@+id/music_image"
+ style="@style/FullMusicPortrait.Image">
+
+ <ImageView
+ android:id="@+id/album"
+ android:layout_width="@dimen/mcv2_full_album_image_portrait_size"
+ android:layout_height="@dimen/mcv2_full_album_image_portrait_size"
+ android:src="@drawable/ic_default_album_image"/>
+ </LinearLayout>
+
+ <LinearLayout
+ android:id="@+id/music_text"
+ style="@style/FullMusicPortrait.Text">
+
+ <TextView
+ android:id="@+id/title"
+ android:layout_width="@dimen/mcv2_full_album_image_portrait_size"
+ android:layout_height="wrap_content"
+ android:text="@string/mcv2_music_title_unknown_text"
+ android:textSize="20sp"
+ android:textStyle="bold"
+ android:textColor="#FFFFFF" />
+ <TextView
+ android:id="@+id/artist"
+ android:layout_width="@dimen/mcv2_full_album_image_portrait_size"
+ android:layout_height="wrap_content"
+ android:text="@string/mcv2_music_artist_unknown_text"
+ android:textSize="16sp"
+ android:textColor="#BBBBBB" />
+ </LinearLayout>
+</LinearLayout>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/layout/full_settings_list_item.xml b/packages/MediaComponents/res/layout/full_settings_list_item.xml
new file mode 100644
index 0000000..f92ea5e
--- /dev/null
+++ b/packages/MediaComponents/res/layout/full_settings_list_item.xml
@@ -0,0 +1,62 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_full_settings_height"
+ android:orientation="horizontal"
+ android:background="@color/black_opacity_70">
+
+ <LinearLayout
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_full_settings_height"
+ android:gravity="center"
+ android:orientation="horizontal">
+
+ <ImageView
+ android:id="@+id/icon"
+ android:layout_width="@dimen/mcv2_full_settings_icon_size"
+ android:layout_height="@dimen/mcv2_full_settings_icon_size"
+ android:layout_margin="8dp"
+ android:gravity="center"/>
+ </LinearLayout>
+
+ <RelativeLayout
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_full_settings_height"
+ android:gravity="center"
+ android:orientation="vertical">
+
+ <TextView
+ android:id="@+id/main_text"
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_full_settings_text_height"
+ android:paddingLeft="2dp"
+ android:gravity="center"
+ android:textColor="@color/white"
+ android:textSize="@dimen/mcv2_full_settings_main_text_size"/>
+
+ <TextView
+ android:id="@+id/sub_text"
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_full_settings_text_height"
+ android:layout_below="@id/main_text"
+ android:gravity="center"
+ android:paddingLeft="2dp"
+ android:textColor="@color/white_opacity_70"
+ android:textSize="@dimen/mcv2_full_settings_sub_text_size"/>
+ </RelativeLayout>
+</LinearLayout>
diff --git a/packages/MediaComponents/res/layout/full_sub_settings_list_item.xml b/packages/MediaComponents/res/layout/full_sub_settings_list_item.xml
new file mode 100644
index 0000000..49128d0
--- /dev/null
+++ b/packages/MediaComponents/res/layout/full_sub_settings_list_item.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_full_settings_height"
+ android:orientation="horizontal"
+ android:background="@color/black_opacity_70">
+
+ <LinearLayout
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_full_settings_height"
+ android:gravity="center"
+ android:orientation="horizontal">
+
+ <ImageView
+ android:id="@+id/check"
+ android:layout_width="@dimen/mcv2_full_settings_icon_size"
+ android:layout_height="@dimen/mcv2_full_settings_icon_size"
+ android:layout_margin="8dp"
+ android:gravity="center"
+ android:src="@drawable/ic_check"/>
+ </LinearLayout>
+
+ <RelativeLayout
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_full_settings_height"
+ android:gravity="center"
+ android:orientation="vertical">
+
+ <TextView
+ android:id="@+id/text"
+ android:layout_width="wrap_content"
+ android:layout_height="@dimen/mcv2_full_settings_text_height"
+ android:gravity="center"
+ android:paddingLeft="2dp"
+ android:textColor="@color/white"
+ android:textSize="@dimen/mcv2_full_settings_main_text_size"/>
+ </RelativeLayout>
+</LinearLayout>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/layout/full_transport_controls.xml b/packages/MediaComponents/res/layout/full_transport_controls.xml
new file mode 100644
index 0000000..0914785
--- /dev/null
+++ b/packages/MediaComponents/res/layout/full_transport_controls.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent"
+ android:gravity="center"
+ android:orientation="horizontal"
+ android:paddingLeft="@dimen/mcv2_transport_controls_padding"
+ android:paddingRight="@dimen/mcv2_transport_controls_padding"
+ android:visibility="visible">
+
+ <ImageButton android:id="@+id/prev" style="@style/FullTransportControlsButton.Previous" />
+ <ImageButton android:id="@+id/rew" style="@style/FullTransportControlsButton.Rew" />
+ <ImageButton android:id="@+id/pause" style="@style/FullTransportControlsButton.Pause" />
+ <ImageButton android:id="@+id/ffwd" style="@style/FullTransportControlsButton.Ffwd" />
+ <ImageButton android:id="@+id/next" style="@style/FullTransportControlsButton.Next" />
+</LinearLayout>
diff --git a/packages/MediaComponents/res/layout/media_controller.xml b/packages/MediaComponents/res/layout/media_controller.xml
new file mode 100644
index 0000000..4658f04
--- /dev/null
+++ b/packages/MediaComponents/res/layout/media_controller.xml
@@ -0,0 +1,265 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2017 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent"
+ android:orientation="vertical"
+ android:layoutDirection="ltr">
+
+ <RelativeLayout
+ android:id="@+id/title_bar"
+ android:background="@layout/title_bar_gradient"
+ style="@style/TitleBar">
+
+ <LinearLayout
+ android:id="@+id/title_bar_left"
+ android:gravity="center"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_alignParentLeft="true"
+ android:layout_centerVertical="true"
+ android:orientation="horizontal">
+
+ <ImageButton
+ android:id="@+id/back"
+ android:clickable="true"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_alignParentStart="true"
+ android:layout_centerVertical="true"
+ android:paddingLeft="5dip"
+ android:visibility="visible"
+ style="@style/TitleBarButton.Back"/>
+
+ <TextView
+ android:id="@+id/title_text"
+ android:ellipsize="end"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_toRightOf="@id/back"
+ android:layout_centerVertical="true"
+ android:maxLines="1"
+ android:paddingLeft="5dip"
+ android:paddingRight="5dip"
+ android:textSize="15sp"
+ android:textColor="#FFFFFFFF"/>
+ </LinearLayout>
+
+ <LinearLayout
+ android:id="@+id/title_bar_right"
+ android:gravity="center"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_alignParentRight="true"
+ android:layout_centerVertical="true"
+ android:orientation="horizontal">
+
+ <LinearLayout
+ android:id="@+id/ad_external_link"
+ android:clickable="true"
+ android:gravity="center"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_alignParentRight="true"
+ android:layout_centerVertical="true"
+ android:paddingLeft="5dip"
+ android:paddingRight="10dip"
+ android:orientation="horizontal"
+ android:visibility="gone">
+
+ <TextView
+ android:id="@+id/ad_text"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_centerVertical="true"
+ android:paddingRight="5dip"
+ android:text="@string/MediaControlView2_ad_text"
+ android:textSize="10sp"
+ android:textColor="#FFFFFFFF" />
+
+ <ImageButton
+ android:id="@+id/ad_launch"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_centerVertical="true"
+ style="@style/TitleBarButton.Launch" />
+ </LinearLayout>
+
+ <view class="com.android.support.mediarouter.app.MediaRouteButton"
+ android:id="@+id/cast"
+ android:layout_centerVertical="true"
+ android:visibility="gone"
+ android:contentDescription="@string/mr_button_content_description"
+ style="@style/TitleBarButton" />
+ </LinearLayout>
+
+ </RelativeLayout>
+
+ <LinearLayout
+ android:id="@+id/center_view"
+ android:layout_width="match_parent"
+ android:layout_height="0dp"
+ android:layout_weight="1"
+ android:gravity="center"
+ android:orientation="vertical">
+ </LinearLayout>
+
+ <LinearLayout
+ android:id="@+id/minimal_extra_view"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:gravity="right">
+
+ <ImageButton
+ android:id="@+id/fullscreen"
+ android:gravity="right"
+ style="@style/BottomBarButton.FullScreen" />
+ </LinearLayout>
+
+ <RelativeLayout
+ android:layout_width="match_parent"
+ android:layout_height="@dimen/mcv2_custom_progress_thumb_size">
+
+ <SeekBar
+ android:id="@+id/progress"
+ android:layout_width="match_parent"
+ android:layout_height="@dimen/mcv2_custom_progress_thumb_size"
+ android:contentDescription="@string/mcv2_seek_bar_desc"
+ android:padding="0dp"
+ android:maxHeight="@dimen/mcv2_custom_progress_max_size"
+ android:minHeight="@dimen/mcv2_custom_progress_max_size"
+ android:elevation="10dp"/>
+
+ <View
+ android:id="@+id/progress_buffer"
+ android:layout_width="match_parent"
+ android:layout_height="@dimen/mcv2_buffer_view_height"
+ android:layout_alignParentBottom="true"
+ android:background="@color/bottom_bar_background"
+ android:elevation="0dp"/>
+ </RelativeLayout>
+
+ <RelativeLayout
+ android:id="@+id/bottom_bar"
+ android:layout_width="match_parent"
+ android:layout_height="44dp"
+ android:orientation="horizontal"
+ android:background="@color/bottom_bar_background">
+
+ <LinearLayout
+ android:id="@+id/bottom_bar_left"
+ android:layout_width="wrap_content"
+ android:layout_height="match_parent"
+ android:layout_alignParentStart="true"
+ android:layout_centerVertical="true">
+
+ <TextView
+ android:id="@+id/ad_skip_time"
+ android:gravity="center"
+ android:layout_width="wrap_content"
+ android:layout_height="match_parent"
+ android:layout_marginLeft="4dp"
+ android:textSize="12sp"
+ android:textColor="#FFFFFF"
+ android:visibility="gone" />
+ </LinearLayout>
+
+ <LinearLayout
+ android:id="@+id/time"
+ android:layout_width="wrap_content"
+ android:layout_height="match_parent"
+ android:layout_toRightOf="@id/bottom_bar_left"
+ android:paddingLeft="10dp"
+ android:paddingRight="10dp"
+ android:gravity="center" >
+
+ <TextView
+ android:id="@+id/time_current"
+ style="@style/TimeText.Current"/>
+ <TextView
+ android:id="@+id/time_interpunct"
+ style="@style/TimeText.Interpunct"/>
+ <TextView
+ android:id="@+id/time_end"
+ style="@style/TimeText.End"/>
+ </LinearLayout>
+
+ <LinearLayout
+ android:id="@+id/bottom_bar_right"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_alignParentEnd="true"
+ android:gravity="right">
+
+ <LinearLayout
+ android:id="@+id/basic_controls"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:gravity="center_vertical"
+ android:orientation="horizontal" >
+
+ <TextView
+ android:id="@+id/ad_remaining"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:gravity="center"
+ android:textSize="12sp"
+ android:textColor="#FFFFFF"
+ android:visibility="gone" />
+
+ <ImageButton
+ android:id="@+id/mute"
+ style="@style/BottomBarButton.Mute" />
+ <ImageButton
+ android:id="@+id/subtitle"
+ android:scaleType="fitCenter"
+ android:visibility="gone"
+ style="@style/BottomBarButton.CC" />
+ <ImageButton
+ android:id="@+id/fullscreen"
+ style="@style/BottomBarButton.FullScreen"/>
+ <ImageButton
+ android:id="@+id/overflow_right"
+ style="@style/BottomBarButton.OverflowRight"/>
+ </LinearLayout>
+
+ <LinearLayout
+ android:id="@+id/extra_controls"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:visibility="gone"
+ android:orientation="horizontal"
+ android:gravity="center_vertical">
+
+ <LinearLayout
+ android:id="@+id/custom_buttons"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content" />
+
+ <ImageButton
+ android:id="@+id/video_quality"
+ style="@style/BottomBarButton.VideoQuality" />
+ <ImageButton
+ android:id="@+id/settings"
+ style="@style/BottomBarButton.Settings" />
+ <ImageButton
+ android:id="@+id/overflow_left"
+ style="@style/BottomBarButton.OverflowLeft"/>
+ </LinearLayout>
+ </LinearLayout>
+ </RelativeLayout>
+</LinearLayout>
diff --git a/packages/MediaComponents/res/layout/minimal_transport_controls.xml b/packages/MediaComponents/res/layout/minimal_transport_controls.xml
new file mode 100644
index 0000000..800c80b
--- /dev/null
+++ b/packages/MediaComponents/res/layout/minimal_transport_controls.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="match_parent"
+ android:layout_height="match_parent"
+ android:gravity="center"
+ android:orientation="horizontal">
+
+ <ImageButton android:id="@+id/pause" style="@style/MinimalTransportControlsButton" />
+</LinearLayout>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/layout/mr_chooser_dialog.xml b/packages/MediaComponents/res/layout/mr_chooser_dialog.xml
new file mode 100644
index 0000000..ee89e16
--- /dev/null
+++ b/packages/MediaComponents/res/layout/mr_chooser_dialog.xml
@@ -0,0 +1,55 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="fill_parent"
+ android:layout_height="wrap_content"
+ android:orientation="vertical">
+ <TextView android:id="@+id/mr_chooser_title"
+ android:layout_width="fill_parent"
+ android:layout_height="wrap_content"
+ android:paddingLeft="24dp"
+ android:paddingRight="24dp"
+ android:paddingTop="24dp"
+ android:text="@string/mr_chooser_title"
+ android:singleLine="true"
+ android:ellipsize="end"
+ android:textAppearance="@style/TextAppearance.MediaRouter.Title" />
+ <ListView android:id="@+id/mr_chooser_list"
+ android:layout_width="fill_parent"
+ android:layout_height="wrap_content"
+ android:divider="@android:color/transparent"
+ android:dividerHeight="0dp" />
+ <LinearLayout android:id="@android:id/empty"
+ android:layout_width="fill_parent"
+ android:layout_height="240dp"
+ android:orientation="vertical"
+ android:paddingTop="90dp"
+ android:paddingLeft="16dp"
+ android:paddingRight="16dp"
+ android:visibility="gone">
+ <TextView android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_gravity="center"
+ android:text="@string/mr_chooser_searching"
+ android:textAppearance="@style/TextAppearance.MediaRouter.SecondaryText" />
+ <ProgressBar android:layout_width="150dp"
+ android:layout_height="wrap_content"
+ android:layout_gravity="center"
+ android:indeterminate="true"
+ style="?android:attr/progressBarStyleHorizontal" />
+ </LinearLayout>
+</LinearLayout>
diff --git a/packages/MediaComponents/res/layout/mr_chooser_list_item.xml b/packages/MediaComponents/res/layout/mr_chooser_list_item.xml
new file mode 100644
index 0000000..958879b
--- /dev/null
+++ b/packages/MediaComponents/res/layout/mr_chooser_list_item.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="fill_parent"
+ android:layout_height="wrap_content"
+ android:minHeight="56dp"
+ android:paddingLeft="24dp"
+ android:paddingRight="24dp"
+ android:orientation="horizontal"
+ android:gravity="center_vertical" >
+
+ <ImageView android:id="@+id/mr_chooser_route_icon"
+ android:layout_width="24dp"
+ android:layout_height="24dp"
+ android:layout_marginRight="24dp" />
+
+ <LinearLayout android:layout_width="fill_parent"
+ android:layout_height="wrap_content"
+ android:layout_marginBottom="1dp"
+ android:orientation="vertical" >
+
+ <TextView android:id="@+id/mr_chooser_route_name"
+ android:layout_width="fill_parent"
+ android:layout_height="32dp"
+ android:singleLine="true"
+ android:ellipsize="marquee"
+ android:textAppearance="@style/TextAppearance.MediaRouter.PrimaryText" />
+
+ <TextView android:id="@+id/mr_chooser_route_desc"
+ android:layout_width="fill_parent"
+ android:layout_height="24dp"
+ android:singleLine="true"
+ android:ellipsize="marquee"
+ android:textAppearance="@style/TextAppearance.MediaRouter.SecondaryText" />
+ </LinearLayout>
+
+</LinearLayout>
diff --git a/packages/MediaComponents/res/layout/mr_controller_material_dialog_b.xml b/packages/MediaComponents/res/layout/mr_controller_material_dialog_b.xml
new file mode 100644
index 0000000..b304471
--- /dev/null
+++ b/packages/MediaComponents/res/layout/mr_controller_material_dialog_b.xml
@@ -0,0 +1,206 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<FrameLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:id="@+id/mr_expandable_area"
+ android:layout_width="fill_parent"
+ android:layout_height="fill_parent">
+ <LinearLayout android:id="@+id/mr_dialog_area"
+ android:layout_width="fill_parent"
+ android:layout_height="wrap_content"
+ android:layout_gravity="center"
+ android:orientation="vertical"
+ android:background="?android:attr/colorBackgroundFloating">
+ <LinearLayout android:id="@+id/mr_title_bar"
+ android:layout_width="fill_parent"
+ android:layout_height="wrap_content"
+ android:paddingLeft="24dp"
+ android:paddingRight="12dp"
+ android:orientation="horizontal" >
+ <TextView android:id="@+id/mr_name"
+ android:layout_width="0dp"
+ android:layout_height="72dp"
+ android:layout_weight="1"
+ android:gravity="center_vertical"
+ android:singleLine="true"
+ android:ellipsize="end"
+ android:textAppearance="@style/TextAppearance.MediaRouter.Title" />
+ <ImageButton android:id="@+id/mr_close"
+ android:layout_width="48dp"
+ android:layout_height="48dp"
+ android:layout_gravity="center_vertical"
+ android:contentDescription="@string/mr_controller_close_description"
+ android:src="?attr/mediaRouteCloseDrawable"
+ android:background="?android:attr/selectableItemBackgroundBorderless" />
+ </LinearLayout>
+ <FrameLayout android:id="@+id/mr_custom_control"
+ android:layout_width="fill_parent"
+ android:layout_height="wrap_content"
+ android:visibility="gone" />
+ <FrameLayout android:id="@+id/mr_default_control"
+ android:layout_width="fill_parent"
+ android:layout_height="wrap_content">
+ <ImageView android:id="@+id/mr_art"
+ android:layout_width="fill_parent"
+ android:layout_height="wrap_content"
+ android:adjustViewBounds="true"
+ android:scaleType="fitXY"
+ android:background="?android:attr/colorPrimary"
+ android:layout_gravity="top"
+ android:contentDescription="@string/mr_controller_album_art"
+ android:visibility="gone" />
+ <LinearLayout android:layout_width="fill_parent"
+ android:layout_height="wrap_content"
+ android:orientation="vertical"
+ android:layout_gravity="bottom"
+ android:splitMotionEvents="false">
+ <LinearLayout android:id="@+id/mr_media_main_control"
+ android:layout_width="fill_parent"
+ android:layout_height="wrap_content"
+ android:orientation="vertical"
+ android:paddingTop="16dp"
+ android:paddingBottom="16dp"
+ android:layout_gravity="bottom"
+ android:theme="?attr/mediaRouteControlPanelThemeOverlay">
+ <RelativeLayout
+ android:id="@+id/mr_playback_control"
+ android:layout_width="fill_parent"
+ android:layout_height="wrap_content"
+ android:orientation="horizontal"
+ android:paddingLeft="24dp"
+ android:paddingRight="12dp" >
+ <ImageButton android:id="@+id/mr_control_playback_ctrl"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_marginLeft="12dp"
+ android:layout_alignParentRight="true"
+ android:layout_centerVertical="true"
+ android:contentDescription="@string/mr_controller_play"
+ android:background="?android:attr/selectableItemBackgroundBorderless"
+ android:visibility="gone" />
+ <LinearLayout android:id="@+id/mr_control_title_container"
+ android:orientation="vertical"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:layout_toLeftOf="@id/mr_control_playback_ctrl"
+ android:layout_alignParentLeft="true"
+ android:layout_centerVertical="true">
+ <TextView android:id="@+id/mr_control_title"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:textAppearance="@style/TextAppearance.MediaRouter.PrimaryText"
+ android:singleLine="true" />
+ <TextView android:id="@+id/mr_control_subtitle"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:textAppearance="@style/TextAppearance.MediaRouter.SecondaryText"
+ android:singleLine="true" />
+ </LinearLayout>
+ </RelativeLayout>
+ <View android:id="@+id/mr_control_divider"
+ android:layout_width="fill_parent"
+ android:layout_height="8dp"
+ android:visibility="gone" />
+ <LinearLayout
+ android:id="@+id/mr_volume_control"
+ android:layout_width="fill_parent"
+ android:layout_height="wrap_content"
+ android:minHeight="48dp"
+ android:gravity="center_vertical"
+ android:paddingLeft="24dp"
+ android:paddingRight="12dp"
+ android:splitMotionEvents="false">
+ <ImageView
+ android:layout_width="24dp"
+ android:layout_height="24dp"
+ android:src="?attr/mediaRouteAudioTrackDrawable"
+ android:gravity="center"
+ android:scaleType="center"/>
+ <!-- Since dialog's top layout mr_expandable_area is clickable, it propagates pressed state
+ to its non-clickable children. Specify android:clickable="true" to prevent volume slider
+ from having false pressed state. -->
+ <com.android.support.mediarouter.app.MediaRouteVolumeSlider
+ android:id="@+id/mr_volume_slider"
+ android:layout_width="0dp"
+ android:layout_height="wrap_content"
+ android:minHeight="48dp"
+ android:maxHeight="48dp"
+ android:layout_weight="1"
+ android:clickable="true"
+ android:contentDescription="@string/mr_controller_volume_slider" />
+ <com.android.support.mediarouter.app.MediaRouteExpandCollapseButton
+ android:id="@+id/mr_group_expand_collapse"
+ android:layout_width="48dp"
+ android:layout_height="48dp"
+ android:padding="12dp"
+ android:background="?android:attr/selectableItemBackgroundBorderless"
+ android:visibility="gone"/>
+ </LinearLayout>
+ </LinearLayout>
+ <com.android.support.mediarouter.app.OverlayListView
+ android:id="@+id/mr_volume_group_list"
+ android:layout_width="fill_parent"
+ android:layout_height="wrap_content"
+ android:paddingTop="@dimen/mr_controller_volume_group_list_padding_top"
+ android:scrollbarStyle="outsideOverlay"
+ android:clipToPadding="false"
+ android:visibility="gone"
+ android:splitMotionEvents="false"
+ android:theme="?attr/mediaRouteControlPanelThemeOverlay" />
+ </LinearLayout>
+ </FrameLayout>
+ <ScrollView
+ android:id="@+id/buttonPanel"
+ style="?attr/buttonBarStyle"
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:fillViewport="true"
+ android:scrollIndicators="top|bottom">
+ <android.support.v7.widget.ButtonBarLayout
+ android:layout_width="match_parent"
+ android:layout_height="wrap_content"
+ android:gravity="bottom"
+ android:layoutDirection="locale"
+ android:orientation="horizontal"
+ android:paddingBottom="4dp"
+ android:paddingLeft="12dp"
+ android:paddingRight="12dp"
+ android:paddingTop="4dp">
+ <Button
+ android:id="@android:id/button3"
+ style="?android:attr/buttonBarNeutralButtonStyle"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"/>
+ <android.support.v4.widget.Space
+ android:id="@+id/spacer"
+ android:layout_width="0dp"
+ android:layout_height="0dp"
+ android:layout_weight="1"
+ android:visibility="invisible"/>
+ <Button
+ android:id="@android:id/button2"
+ style="?android:attr/buttonBarNegativeButtonStyle"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"/>
+ <Button
+ android:id="@android:id/button1"
+ style="?android:attr/buttonBarPositiveButtonStyle"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"/>
+ </android.support.v7.widget.ButtonBarLayout>
+ </ScrollView>
+ </LinearLayout>
+</FrameLayout>
diff --git a/packages/MediaComponents/res/layout/mr_controller_volume_item.xml b/packages/MediaComponents/res/layout/mr_controller_volume_item.xml
new file mode 100644
index 0000000..a89058b
--- /dev/null
+++ b/packages/MediaComponents/res/layout/mr_controller_volume_item.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="fill_parent"
+ android:layout_height="wrap_content">
+ <LinearLayout android:id="@+id/volume_item_container"
+ android:layout_width="fill_parent"
+ android:layout_height="@dimen/mr_controller_volume_group_list_item_height"
+ android:paddingLeft="24dp"
+ android:paddingRight="60dp"
+ android:paddingBottom="8dp"
+ android:orientation="vertical" >
+ <TextView android:id="@+id/mr_name"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content"
+ android:textAppearance="@style/TextAppearance.MediaRouter.SecondaryText"
+ android:singleLine="true" />
+ <LinearLayout android:layout_width="fill_parent"
+ android:layout_height="wrap_content"
+ android:orientation="horizontal"
+ android:gravity="center_vertical">
+ <ImageView android:id="@+id/mr_volume_item_icon"
+ android:layout_width="@dimen/mr_controller_volume_group_list_item_icon_size"
+ android:layout_height="@dimen/mr_controller_volume_group_list_item_icon_size"
+ android:layout_marginTop="8dp"
+ android:layout_marginBottom="8dp"
+ android:scaleType="fitCenter"
+ android:src="?attr/mediaRouteAudioTrackDrawable" />
+ <android.support.v7.app.MediaRouteVolumeSlider
+ android:id="@+id/mr_volume_slider"
+ android:layout_width="fill_parent"
+ android:layout_height="40dp"
+ android:minHeight="40dp"
+ android:maxHeight="40dp"
+ android:contentDescription="@string/mr_controller_volume_slider" />
+ </LinearLayout>
+ </LinearLayout>
+</LinearLayout>
diff --git a/packages/MediaComponents/res/layout/settings_list.xml b/packages/MediaComponents/res/layout/settings_list.xml
new file mode 100644
index 0000000..ea30538
--- /dev/null
+++ b/packages/MediaComponents/res/layout/settings_list.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<ListView xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="@dimen/mcv2_embedded_settings_width"
+ android:layout_height="@dimen/mcv2_embedded_settings_height"
+ android:divider="@null"
+ android:dividerHeight="0dp">
+</ListView>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/layout/title_bar_gradient.xml b/packages/MediaComponents/res/layout/title_bar_gradient.xml
new file mode 100644
index 0000000..ab1fc6e
--- /dev/null
+++ b/packages/MediaComponents/res/layout/title_bar_gradient.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<shape xmlns:android="http://schemas.android.com/apk/res/android"
+ android:shape="rectangle">
+ <gradient
+ android:startColor="@color/title_bar_gradient_start"
+ android:endColor="@color/title_bar_gradient_end"
+ android:angle="-270" />
+</shape>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/values-af/strings.xml b/packages/MediaComponents/res/values-af/strings.xml
new file mode 100644
index 0000000..47230ad
--- /dev/null
+++ b/packages/MediaComponents/res/values-af/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Stelsel"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Toestelle"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Cast-knoppie"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Uitsaai-knoppie. Ontkoppel"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Uitsaai-knoppie. Koppel tans"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Uitsaai-knoppie. Gekoppel"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Saai uit na"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Vind tans toestelle"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Ontkoppel"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Hou op uitsaai"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Maak toe"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Speel"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Laat wag"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Stop"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Vou uit"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Vou in"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Albumkunswerk"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Volumeglyer"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Geen media is gekies nie"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Geen inligting beskikbaar nie"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Saai tans skerm uit"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-am/strings.xml b/packages/MediaComponents/res/values-am/strings.xml
new file mode 100644
index 0000000..39a1903
--- /dev/null
+++ b/packages/MediaComponents/res/values-am/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"ስርዓት"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"መሣሪያዎች"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"የCast አዝራር"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Cast አዝራር። ግንኙነት ተቋርጧል"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Cast አዝራር በማገናኘት ላይ"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Cast አዝራር። ተገናኝቷል"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Cast አድርግ ወደ"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"መሣሪያዎችን በማግኘት ላይ"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"ግንኙነት አቋርጥ"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Cast ማድረግ አቁም"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"ዝጋ"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"አጫውት"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"ለአፍታ አቁም"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"አቁም"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"አስፋ"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"ሰብስብ"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"የአልበም ስነ-ጥበብ"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"ተንሸራታች የድምፅ መቆጣጠሪያ"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"ምንም ማህደረመረጃ አልተመረጠም"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"ምንም መረጃ አይገኝም"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"ማያ ገጽን በመውሰድ ላይ"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-ar/strings.xml b/packages/MediaComponents/res/values-ar/strings.xml
new file mode 100644
index 0000000..f8fb97d
--- /dev/null
+++ b/packages/MediaComponents/res/values-ar/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"النظام"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"الأجهزة"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"زر الإرسال"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"زر الإرسال. تم قطع الاتصال"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"زر الإرسال. جارٍ الاتصال"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"زر الإرسال. تم الاتصال"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"إرسال إلى"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"جارٍ البحث عن أجهزة"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"قطع الاتصال"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"إيقاف الإرسال"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"إغلاق"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"تشغيل"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"إيقاف مؤقت"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"إيقاف"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"توسيع"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"تصغير"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"صورة الألبوم"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"شريط تمرير مستوى الصوت"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"لم يتم اختيار أي وسائط"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"لا تتوفر أي معلومات"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"جارٍ إرسال الشاشة"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-az/strings.xml b/packages/MediaComponents/res/values-az/strings.xml
new file mode 100644
index 0000000..a3c60ab
--- /dev/null
+++ b/packages/MediaComponents/res/values-az/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistem"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Cihazlar"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Yayım düyməsi"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Yayım düyməsi. Bağlantı kəsildi"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Yayım düyməsi. Qoşulur"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Yayım düyməsi. Qoşuldu"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Bura yayımlayın"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Cihazlar axtarılır"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Bağlantını kəsin"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Yayımı dayandırın"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Qapadın"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Oynadın"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Durdurun"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Dayandırın"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Genişləndirin"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Yığcamlaşdırın"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Albom incəsənəti"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Səs hərmi diyircəyi"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Heç bir media seçilməyib"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Əlçatan məlumat yoxdur"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Ekran yayımlanır"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-b+sr+Latn/strings.xml b/packages/MediaComponents/res/values-b+sr+Latn/strings.xml
new file mode 100644
index 0000000..e25bd6e
--- /dev/null
+++ b/packages/MediaComponents/res/values-b+sr+Latn/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistem"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Uređaji"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Dugme Prebaci"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Dugme Prebaci. Veza je prekinuta"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Dugme Prebaci. Povezuje se"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Dugme Prebaci. Povezan je"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Prebacuj na"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Pronalaženje uređaja"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Prekini vezu"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Zaustavi prebacivanje"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Zatvori"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Pusti"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pauziraj"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Zaustavi"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Proširi"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Skupi"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Omot albuma"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Klizač za jačinu zvuka"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Nema izabranih medija"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Nisu dostupne nikakve informacije"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Prebacuje se ekran"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-be/strings.xml b/packages/MediaComponents/res/values-be/strings.xml
new file mode 100644
index 0000000..ac391c1
--- /dev/null
+++ b/packages/MediaComponents/res/values-be/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Сістэма"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Прылады"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Кнопка трансляцыі"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Кнопка трансляцыі. Адключана"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Кнопка трансляцыі. Ідзе падключэнне"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Кнопка трансляцыі. Падключана"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Трансліраваць на"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Пошук прылад"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Адлучыць"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Спыніць трансляцыю"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Закрыць"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Прайграць"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Прыпыніць"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Спыніць"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Разгарнуць"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Згарнуць"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Вокладка альбома"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Паўзунок гучнасці"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Медыяфайл не выбраны"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Інфармацыя адсутнічае"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Экран трансляцыі"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-bg/strings.xml b/packages/MediaComponents/res/values-bg/strings.xml
new file mode 100644
index 0000000..76712d4
--- /dev/null
+++ b/packages/MediaComponents/res/values-bg/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Система"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Устройства"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Бутон за предаване"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Бутон за предаване. Връзката е прекратена"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Бутон за предаване. Свързва се"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Бутон за предаване. Установена е връзка"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Предаване към"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Търсят се устройства"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Прекратяване на връзката"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Спиране на предаването"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Затваряне"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Пускане"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Поставяне на пауза"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Спиране"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Разгъване"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Свиване"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Обложка на албума"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Плъзгач за силата на звука"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Няма избрана мултимедия"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Няма налична информация"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Екранът се предава"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-bn/strings.xml b/packages/MediaComponents/res/values-bn/strings.xml
new file mode 100644
index 0000000..1bf5932
--- /dev/null
+++ b/packages/MediaComponents/res/values-bn/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"সিস্টেম"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"ডিভাইসগুলি"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"কাস্ট করার বোতাম"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"কাস্ট করার বোতাম৷ সংযোগ বিচ্ছিন্ন হয়েছে"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"কাস্ট করার বোতাম৷ সংযোগ করা হচ্ছে"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"কাস্ট করার বোতাম৷ সংযুক্ত হয়েছে"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"এতে কাস্ট করুন"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"ডিভাইসগুলিকে খোঁজা হচ্ছে"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"সংযোগ বিচ্ছিন্ন করুন"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"কাস্ট করা বন্ধ করুন"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"বন্ধ করুন"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"চালান"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"বিরাম দিন"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"থামান"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"বড় করুন"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"সঙ্কুচিত করুন"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"অ্যালবাম শৈলি"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"ভলিউম স্লাইডার"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"কোনো মিডিয়া নির্বাচন করা হয়নি"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"কোনো তথ্য উপলব্ধ নেই"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"স্ক্রীন কাস্ট করা হচ্ছে"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-bs/strings.xml b/packages/MediaComponents/res/values-bs/strings.xml
new file mode 100644
index 0000000..711c742
--- /dev/null
+++ b/packages/MediaComponents/res/values-bs/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistem"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Uređaji"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Dugme za emitiranje"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Dugme za emitiranje. Veza je prekinuta"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Dugme za emitiranje. Povezivanje"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Dugme za emitiranje. Povezano"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Emitiranje na"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Traženje uređaja"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Prekini vezu"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Zaustavi prebacivanje"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Zatvori"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Reproduciraj"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pauziraj"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Zaustavi"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Proširi"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Skupi"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Omot albuma"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Klizač za jačinu zvuka"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Nijedan medij nije odabran"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Nema dostupnih informacija"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Prebacuje se ekran"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-ca/strings.xml b/packages/MediaComponents/res/values-ca/strings.xml
new file mode 100644
index 0000000..bf85acf
--- /dev/null
+++ b/packages/MediaComponents/res/values-ca/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistema"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Dispositius"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Botó d\'emetre"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Botó Emet. Desconnectat."</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Botó Emet. S\'està connectant."</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Botó Emet. Connectat."</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Emet a"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"S\'estan cercant dispositius"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Desconnecta"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Atura l\'emissió"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Tanca"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Reprodueix"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Posa en pausa"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Atura"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Desplega"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Replega"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Imatge de l\'àlbum"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Control lliscant de volum"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"No s\'ha seleccionat cap fitxer multimèdia"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"No hi ha informació disponible"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Emissió de pantalla"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-cs/strings.xml b/packages/MediaComponents/res/values-cs/strings.xml
new file mode 100644
index 0000000..09a8920
--- /dev/null
+++ b/packages/MediaComponents/res/values-cs/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Systém"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Zařízení"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Tlačítko odesílání"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Tlačítko odesílání. Odpojeno"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Tlačítko odesílání. Připojování"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Tlačítko odesílání. Připojeno"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Odesílat do"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Hledání zařízení"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Odpojit"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Zastavit odesílání"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Zavřít"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Přehrát"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pozastavit"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Zastavit"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Rozbalit"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Sbalit"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Obal alba"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Posuvník hlasitosti"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Nebyla vybrána žádná média"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Nejsou k dispozici žádné informace"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Odesílání obsahu obrazovky"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-da/strings.xml b/packages/MediaComponents/res/values-da/strings.xml
new file mode 100644
index 0000000..8e7a790
--- /dev/null
+++ b/packages/MediaComponents/res/values-da/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"System"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Enheder"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Cast-knap"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Cast-knap. Forbindelsen er afbrudt"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Cast-knap. Opretter forbindelse"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Cast-knap. Tilsluttet"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Cast til"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Finder enheder"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Afbryd"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Stop med at caste"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Luk"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Afspil"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Sæt på pause"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Stop"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Udvid"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Skjul"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Albumgrafik"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Lydstyrkeskyder"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Ingen medier er markeret"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Der er ingen tilgængelige oplysninger"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Skærmen castes"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-de/strings.xml b/packages/MediaComponents/res/values-de/strings.xml
new file mode 100644
index 0000000..26bf57c
--- /dev/null
+++ b/packages/MediaComponents/res/values-de/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"System"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Geräte"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Cast-Symbol"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Streaming-Schaltfläche. Nicht verbunden"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Streaming-Schaltfläche. Verbindung wird hergestellt"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Streaming-Schaltfläche. Verbunden"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Streamen auf"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Geräte werden gesucht."</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Verbindung trennen"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Streaming beenden"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Schließen"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Wiedergeben"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pausieren"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Beenden"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Maximieren"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Minimieren"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Albumcover"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Schieberegler für die Lautstärke"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Keine Medien ausgewählt"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Keine Informationen verfügbar"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Bildschirm wird gestreamt."</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-el/strings.xml b/packages/MediaComponents/res/values-el/strings.xml
new file mode 100644
index 0000000..d82f69b
--- /dev/null
+++ b/packages/MediaComponents/res/values-el/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Σύστημα"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Συσκευές"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Κουμπί Cast"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Κουμπί μετάδοσης. Αποσυνδέθηκε"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Κουμπί μετάδοση. Σύνδεση σε εξέλιξη"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Κουμπί μετάδοσης. Συνδέθηκε"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Μετάδοση σε"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Εύρεση συσκευών"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Αποσύνδεση"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Διακοπή μετάδοσης"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Κλείσιμο"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Αναπαραγωγή"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Παύση"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Διακοπή"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Ανάπτυξη"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Σύμπτυξη"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Εξώφυλλο άλμπουμ"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Ρυθμιστικό έντασης ήχου"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Δεν έχουν επιλεγεί μέσα"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Δεν υπάρχουν διαθέσιμες πληροφορίες"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Μετάδοση οθόνης"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-en-rAU/strings.xml b/packages/MediaComponents/res/values-en-rAU/strings.xml
new file mode 100644
index 0000000..dd3f219
--- /dev/null
+++ b/packages/MediaComponents/res/values-en-rAU/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"System"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Devices"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Cast button"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Cast button. Disconnected"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Cast button. Connecting"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Cast button. Connected"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Cast to"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Finding devices"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Disconnect"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Stop casting"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Close"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Play"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pause"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Stop"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Expand"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Collapse"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Album art"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Volume slider"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"No media selected"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"No info available"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Casting screen"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-en-rCA/strings.xml b/packages/MediaComponents/res/values-en-rCA/strings.xml
new file mode 100644
index 0000000..dd3f219
--- /dev/null
+++ b/packages/MediaComponents/res/values-en-rCA/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"System"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Devices"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Cast button"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Cast button. Disconnected"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Cast button. Connecting"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Cast button. Connected"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Cast to"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Finding devices"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Disconnect"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Stop casting"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Close"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Play"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pause"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Stop"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Expand"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Collapse"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Album art"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Volume slider"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"No media selected"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"No info available"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Casting screen"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-en-rGB/strings.xml b/packages/MediaComponents/res/values-en-rGB/strings.xml
new file mode 100644
index 0000000..dd3f219
--- /dev/null
+++ b/packages/MediaComponents/res/values-en-rGB/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"System"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Devices"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Cast button"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Cast button. Disconnected"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Cast button. Connecting"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Cast button. Connected"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Cast to"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Finding devices"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Disconnect"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Stop casting"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Close"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Play"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pause"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Stop"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Expand"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Collapse"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Album art"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Volume slider"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"No media selected"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"No info available"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Casting screen"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-en-rIN/strings.xml b/packages/MediaComponents/res/values-en-rIN/strings.xml
new file mode 100644
index 0000000..dd3f219
--- /dev/null
+++ b/packages/MediaComponents/res/values-en-rIN/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"System"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Devices"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Cast button"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Cast button. Disconnected"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Cast button. Connecting"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Cast button. Connected"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Cast to"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Finding devices"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Disconnect"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Stop casting"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Close"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Play"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pause"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Stop"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Expand"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Collapse"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Album art"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Volume slider"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"No media selected"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"No info available"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Casting screen"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-en-rXC/strings.xml b/packages/MediaComponents/res/values-en-rXC/strings.xml
new file mode 100644
index 0000000..a87007e
--- /dev/null
+++ b/packages/MediaComponents/res/values-en-rXC/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"System"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Devices"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Cast button"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Cast button. Disconnected"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Cast button. Connecting"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Cast button. Connected"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Cast to"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Finding devices"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Disconnect"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Stop casting"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Close"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Play"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pause"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Stop"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Expand"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Collapse"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Album art"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Volume slider"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"No media selected"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"No info available"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Casting screen"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-es-rUS/strings.xml b/packages/MediaComponents/res/values-es-rUS/strings.xml
new file mode 100644
index 0000000..441ead1
--- /dev/null
+++ b/packages/MediaComponents/res/values-es-rUS/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistema"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Dispositivos"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Botón para transmitir"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Botón para transmitir (desconectado)"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Botón para transmitir (conectando)"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Botón para transmitir (conectado)"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Transmitir a"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Buscando dispositivos"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Desconectar"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Detener la transmisión"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Cerrar"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Reproducir"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pausar"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Detener"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Mostrar"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Ocultar"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Imagen del álbum"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Control deslizante del volumen"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"No se seleccionó ningún contenido multimedia"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Sin información disponible"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Transmitiendo pantalla"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-es/strings.xml b/packages/MediaComponents/res/values-es/strings.xml
new file mode 100644
index 0000000..ff43008
--- /dev/null
+++ b/packages/MediaComponents/res/values-es/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistema"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Dispositivos"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Botón de enviar"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Botón de enviar. Desconectado"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Botón de enviar. Conectando"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Botón de enviar. Conectado"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Enviar a"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Buscando dispositivos"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Desconectar"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Detener envío de contenido"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Cerrar"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Reproducir"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pausa"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Detener"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Mostrar"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Ocultar"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Portada del álbum"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Control deslizante de volumen"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"No se ha seleccionado ningún medio"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"No hay información disponible"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Enviando pantalla"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-et/strings.xml b/packages/MediaComponents/res/values-et/strings.xml
new file mode 100644
index 0000000..453235b
--- /dev/null
+++ b/packages/MediaComponents/res/values-et/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Süsteem"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Seadmed"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Ülekandenupp"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Ülekandenupp. Ühendus on katkestatud"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Ülekandenupp. Ühendamine"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Ülekandenupp. Ühendatud"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Ülekandmine seadmesse"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Seadmete otsimine"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Katkesta ühendus"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Peata ülekandmine"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Sulgemine"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Esitamine"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Peatamine"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Peata"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Laiendamine"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Ahendamine"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Albumi kujundus"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Helitugevuse liugur"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Meediat pole valitud"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Teave puudub"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Ekraanikuva ülekandmine"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-eu/strings.xml b/packages/MediaComponents/res/values-eu/strings.xml
new file mode 100644
index 0000000..dba19e4
--- /dev/null
+++ b/packages/MediaComponents/res/values-eu/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistema"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Gailuak"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Igorri botoia"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Igortzeko botoia. Deskonektatuta"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Igortzeko botoia. Konektatzen"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Igortzeko botoia. Konektatuta"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Igorri hona:"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Gailuak bilatzen"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Deskonektatu"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Utzi igortzeari"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Itxi"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Erreproduzitu"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pausatu"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Gelditu"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Zabaldu"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Tolestu"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Albumaren azala"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Bolumenaren graduatzailea"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Ez da hautatu multimedia-edukirik"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Ez dago informaziorik"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Pantaila igortzen"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-fa/strings.xml b/packages/MediaComponents/res/values-fa/strings.xml
new file mode 100644
index 0000000..4c6c779
--- /dev/null
+++ b/packages/MediaComponents/res/values-fa/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"سیستم"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"دستگاهها"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"دکمه ارسال محتوا"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"دکمه فرستادن. ارتباط قطع شد"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"دکمه فرستادن. درحال مرتبطسازی"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"دکمه فرستادن. مرتبط شد"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"ارسال محتوا به"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"پیدا کردن دستگاهها"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"قطع ارتباط"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"توقف ارسال محتوا"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"بستن"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"پخش"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"مکث"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"توقف"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"بزرگ کردن"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"کوچک کردن"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"عکس روی جلد آلبوم"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"لغزنده میزان صدا"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"رسانه انتخاب نشده است"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"اطلاعات در دسترس نیست"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"درحال فرستادن صفحه"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-fi/strings.xml b/packages/MediaComponents/res/values-fi/strings.xml
new file mode 100644
index 0000000..d683435
--- /dev/null
+++ b/packages/MediaComponents/res/values-fi/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Järjestelmä"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Laitteet"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Cast-painike"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Cast-painike. Yhteys katkaistu"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Cast-painike. Yhdistetään"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Cast-painike. Yhdistetty"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Suoratoiston kohde"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Etsitään laitteita"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Katkaise yhteys"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Lopeta suoratoisto"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Sulje"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Toista"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Keskeytä"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Pysäytä"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Laajenna"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Tiivistä"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Albumin kansikuva"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Äänenvoimakkuuden liukusäädin"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Ei valittua mediaa."</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Tietoja ei ole saatavilla"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Suoratoistetaan näyttöä"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-fr-rCA/strings.xml b/packages/MediaComponents/res/values-fr-rCA/strings.xml
new file mode 100644
index 0000000..c4f984b
--- /dev/null
+++ b/packages/MediaComponents/res/values-fr-rCA/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Système"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Appareils"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Bouton Diffuser"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Bouton Diffuser. Déconnecté"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Bouton Diffuser. Connexion en cours…"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Bouton Diffuser. Connecté"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Diffuser sur"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Recherche d\'appareils"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Se déconnecter"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Arrêter la diffusion"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Fermer"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Lire"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Interrompre"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Arrêter"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Développer"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Réduire"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Image de l\'album"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Curseur de réglage du volume"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Aucun média sélectionné"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Aucune information disponible"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Diffusion de l\'écran en cours"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-fr/strings.xml b/packages/MediaComponents/res/values-fr/strings.xml
new file mode 100644
index 0000000..12c312f
--- /dev/null
+++ b/packages/MediaComponents/res/values-fr/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Système"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Appareils"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Icône Cast"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Icône Cast. Déconnecté"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Icône Cast. Connexion…"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Icône Cast. Connecté"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Caster sur"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Recherche d\'appareils…"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Déconnecter"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Arrêter la diffusion"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Fermer"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Lecture"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pause"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Arrêter"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Développer"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Réduire"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Image de l\'album"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Curseur de volume"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Aucun média sélectionné"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Aucune information disponible"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Diffusion de l\'écran en cours…"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-gl/strings.xml b/packages/MediaComponents/res/values-gl/strings.xml
new file mode 100644
index 0000000..1b2c354
--- /dev/null
+++ b/packages/MediaComponents/res/values-gl/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistema"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Dispositivos"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Botón de emitir"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Botón de emitir. Desconectado"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Botón de emitir. Conectando"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Botón de emitir. Conectado"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Emitir a"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Buscando dispositivos"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Desconectar"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Deter emisión"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Pechar"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Reproduce"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pausa"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Deter"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Ampliar"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Contraer"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Portada do álbum"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Control desprazable do volume"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Non se seleccionaron recursos"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Non hai información dispoñible"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Emisión de pantalla"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-gu/strings.xml b/packages/MediaComponents/res/values-gu/strings.xml
new file mode 100644
index 0000000..2cd5f3f
--- /dev/null
+++ b/packages/MediaComponents/res/values-gu/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"સિસ્ટમ"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"ઉપકરણો"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"કાસ્ટ કરો બટન"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"કાસ્ટ કરો બટન. ડિસ્કનેક્ટ કર્યું"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"કાસ્ટ કરો બટન. કનેક્ટ થઈ રહ્યું છે"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"કાસ્ટ કરો બટન. કનેક્ટ થયું"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"આના પર કાસ્ટ કરો"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"ઉપકરણો શોધી રહ્યાં છીએ"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"ડિસ્કનેક્ટ કરો"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"કાસ્ટ કરવાનું રોકો"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"બંધ કરો"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"ચલાવો"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"થોભાવો"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"રોકો"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"વિસ્તૃત કરો"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"સંકુચિત કરો"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"આલ્બમ કલા"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"વૉલ્યુમ સ્લાઇડર"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"કોઈ મીડિયા પસંદ કરેલ નથી"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"કોઈ માહિતી ઉપલબ્ધ નથી"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"સ્ક્રીનને કાસ્ટ કરી રહ્યાં છે"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-hi/strings.xml b/packages/MediaComponents/res/values-hi/strings.xml
new file mode 100644
index 0000000..9552a59
--- /dev/null
+++ b/packages/MediaComponents/res/values-hi/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"सिस्टम"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"डिवाइस"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"कास्ट करें बटन"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"कास्ट करें बटन. डिसकनेक्ट है"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"कास्ट करें बटन. कनेक्ट हो रहा है"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"कास्ट करें बटन. कनेक्ट है"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"इस पर कास्ट करें"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"डिवाइस ढूंढ रहा है"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"डिसकनेक्ट करें"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"कास्ट करना बंद करें"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"बंद करें"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"चलाएं"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"रोकें"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"बंद करें"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"विस्तार करें"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"छोटा करें"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"एल्बम आर्ट"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"वॉल्यूम स्लाइडर"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"कोई मीडिया चयनित नहीं है"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"कोई जानकारी मौजूद नहीं है"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"स्क्रीन कास्ट हो रही है"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-hr/strings.xml b/packages/MediaComponents/res/values-hr/strings.xml
new file mode 100644
index 0000000..3c43ee7
--- /dev/null
+++ b/packages/MediaComponents/res/values-hr/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sustav"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Uređaji"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Gumb za emitiranje"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Gumb za emitiranje. Veza prekinuta"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Gumb za emitiranje. Povezivanje"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Gumb za emitiranje. Povezan"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Emitiranje na"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Traženje uređaja"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Prekini vezu"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Zaustavi emitiranje"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Zatvaranje"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Reprodukcija"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pauziranje"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Zaustavi"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Proširivanje"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Sažimanje"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Naslovnica albuma"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Klizač za glasnoću"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Nije odabran nijedan medij"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Informacije nisu dostupne"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Emitiranje zaslona"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-hu/strings.xml b/packages/MediaComponents/res/values-hu/strings.xml
new file mode 100644
index 0000000..a36bdfe
--- /dev/null
+++ b/packages/MediaComponents/res/values-hu/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Rendszer"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Eszközök"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Átküldés gomb"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Átküldés gomb. Kapcsolat bontva"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Átküldés gomb. Csatlakozás"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Átküldés gomb. Csatlakoztatva"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Átküldés ide"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Eszközök keresése"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Leválasztás"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Átküldés leállítása"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Bezárás"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Lejátszás"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Szüneteltetés"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Leállítás"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Kibontás"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Összecsukás"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Lemezborító"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Hangerőszabályzó"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Nincs média kiválasztva"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Nincs információ"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Képernyőtartalom átküldése"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-hy/strings.xml b/packages/MediaComponents/res/values-hy/strings.xml
new file mode 100644
index 0000000..8ec82b7
--- /dev/null
+++ b/packages/MediaComponents/res/values-hy/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Համակարգ"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Սարքեր"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Հեռարձակման կոճակ"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Հեռարձակման կոճակ: Սարքն անջատված է"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Հեռարձակման կոճակ: Սարքը կապակցվում է"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Հեռարձակման կոճակ: Սարքը կապակցված է"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Ընտրեք սարքը"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Սարքերի որոնում"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Անջատել"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Դադարեցնել հեռարձակումը"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Փակել"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Նվագարկել"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Դադար"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Դադարեցնել"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Ընդարձակել"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Կոծկել"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Ալբոմի շապիկ"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Ձայնի ուժգնության կարգավորիչ"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Մեդիա ֆայլեր չեն ընտրվել"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Տեղեկությունները հասանելի չեն"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Էկրանը հեռարձակվում է"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-in/strings.xml b/packages/MediaComponents/res/values-in/strings.xml
new file mode 100644
index 0000000..6b2752e
--- /dev/null
+++ b/packages/MediaComponents/res/values-in/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistem"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Perangkat"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Tombol Cast"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Tombol Cast. Terputus"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Tombol Cast. Menghubungkan"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Tombol Cast. Terhubung"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Transmisikan ke"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Mencari perangkat"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Putuskan sambungan"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Hentikan cast"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Tutup"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Putar"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Jeda"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Berhenti"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Luaskan"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Ciutkan"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Sampul album"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Bilah geser volume"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Tidak ada media yang dipilih"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Tidak ada info yang tersedia"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Transmisi layar"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-is/strings.xml b/packages/MediaComponents/res/values-is/strings.xml
new file mode 100644
index 0000000..6a35ea6
--- /dev/null
+++ b/packages/MediaComponents/res/values-is/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Kerfi"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Tæki"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Útsendingarhnappur"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Útsendingarhnappur. Aftengt"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Útsendingarhnappur. Tengist"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Útsendingarhnappur. Tengt"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Senda út í"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Leitað að tækjum"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Aftengjast"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Stöðva útsendingu"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Loka"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Spila"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Hlé"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Stöðva"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Stækka"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Minnka"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Plötuumslag"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Hljóðstyrkssleði"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Enginn miðill valinn"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Engar upplýsingar í boði"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Skjár sendur út"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-it/strings.xml b/packages/MediaComponents/res/values-it/strings.xml
new file mode 100644
index 0000000..716e3ac
--- /dev/null
+++ b/packages/MediaComponents/res/values-it/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistema"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Dispositivi"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Pulsante Trasmetti"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Pulsante Trasmetti. Disconnesso"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Pulsante Trasmetti. Connessione in corso"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Pulsante Trasmetti. Connesso"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Trasmetti a"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Ricerca di dispositivi in corso"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Scollega"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Interrompi trasmissione"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Chiudi"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Riproduci"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pausa"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Interrompi"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Espandi"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Comprimi"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Copertina"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Dispositivo di scorrimento del volume"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Nessun contenuto multimediale selezionato"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Nessuna informazione disponibile"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Trasmissione dello schermo in corso"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-iw/strings.xml b/packages/MediaComponents/res/values-iw/strings.xml
new file mode 100644
index 0000000..252b0ce
--- /dev/null
+++ b/packages/MediaComponents/res/values-iw/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"מערכת"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"מכשירים"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"לחצן הפעלת Cast"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"לחצן הפעלת Cast. מנותק"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"לחצן הפעלת Cast. מתחבר"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"לחצן הפעלת Cast. מחובר"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"העברה אל"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"מחפש מכשירים"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"נתק"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"הפסק את ההעברה"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"סגור"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"הפעל"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"השהה"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"הפסק"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"הרחב"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"כווץ"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"עטיפת אלבום"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"מחוון עוצמה"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"לא נבחרה מדיה"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"אין מידע זמין"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"העברת מסך מתבצעת"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-ja/strings.xml b/packages/MediaComponents/res/values-ja/strings.xml
new file mode 100644
index 0000000..a149727
--- /dev/null
+++ b/packages/MediaComponents/res/values-ja/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"システム"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"端末"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"キャストアイコン"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"キャスト アイコン。接続解除済み"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"キャスト アイコン。接続中"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"キャスト アイコン。接続済み"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"キャストするデバイス"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"端末を検索しています"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"接続を解除"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"キャストを停止"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"閉じる"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"再生"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"一時停止"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"停止"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"展開"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"折りたたむ"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"アルバムアート"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"音量スライダー"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"メディアが選択されていません"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"情報がありません"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"画面をキャストしています"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-ka/strings.xml b/packages/MediaComponents/res/values-ka/strings.xml
new file mode 100644
index 0000000..3da081a
--- /dev/null
+++ b/packages/MediaComponents/res/values-ka/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"სისტემა"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"მოწყობილობები"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"ტრანსლირების ღილაკი"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"ტრანსლირების ღილაკი. გათიშული"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"ტრანსლირების ღილაკი. მიმდინარეობს დაკავშირება"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"ტრანსლირების ღილაკი. დაკავშირებული"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"ტრანსლირება:"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"მოწყობილობების მოძიება..."</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"კავშირის გაწყვეტა"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"ტრანსლირების შეწყვეტა"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"დახურვა"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"დაკვრა"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"პაუზა"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"შეწყვეტა"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"გაშლა"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"ჩაკეცვა"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"ალბომის გარეკანი"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"ხმის სლაიდერი"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"მედია არჩეული არ არის"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"ინფორმაცია არ არის ხელმისაწვდომი"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"მიმდინარეობს ეკრანის გადაცემა"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-kk/strings.xml b/packages/MediaComponents/res/values-kk/strings.xml
new file mode 100644
index 0000000..94dcbb3
--- /dev/null
+++ b/packages/MediaComponents/res/values-kk/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Жүйе"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Құрылғылар"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Трансляциялау түймесі"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"\"Трансляциялау\" түймесі. Ажыратулы"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"\"Трансляциялау\" түймесі. Қосылуда"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"\"Трансляциялау\" түймесі. Қосылды"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Келесіге трансляциялау"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Құрылғыларды табу"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Ажырату"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Трансляциялауды тоқтату"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Жабу"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Ойнату"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Кідірту"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Тоқтату"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Жаю"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Жию"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Альбом шебері"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Дыбыс деңгейінің жүгірткісі"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Ешбір тасушы таңдалмаған"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Қол жетімді ақпарат жоқ"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Экранды трансляциялау"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-km/strings.xml b/packages/MediaComponents/res/values-km/strings.xml
new file mode 100644
index 0000000..e44780e
--- /dev/null
+++ b/packages/MediaComponents/res/values-km/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"ប្រព័ន្ធ"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"ឧបករណ៍"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"ប៊ូតុងខាស"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"ខាសប៊ូតុង៖ បានកាត់ផ្តាច់"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"ខាសប៊ូតុង៖ កំពុងភ្ជាប់"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"ខាសប៊ូតុង៖ បានភ្ជាប់ហើយ"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"បញ្ជូនទៅ"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"កំពុងស្វែងរកឧបករណ៍"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"ផ្ដាច់"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"ឈប់ភ្ជាប់"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"បិទ"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"ចាក់"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"ផ្អាក"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"ឈប់"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"ពង្រីក"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"បង្រួម"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"ស្នាដៃសិល្បៈអាល់ប៊ុម"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"របារកម្រិតសំឡេង"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"គ្មានការជ្រើសមេឌៀទេ"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"មិនមានព័ត៌មានទេ"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"កំពុងខាសអេក្រង់"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-kn/strings.xml b/packages/MediaComponents/res/values-kn/strings.xml
new file mode 100644
index 0000000..4237fdd
--- /dev/null
+++ b/packages/MediaComponents/res/values-kn/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"ಸಿಸ್ಟಂ"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"ಸಾಧನಗಳು"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"ಬಿತ್ತರಿಸು ಬಟನ್"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"ಬಿತ್ತರಿಸು ಬಟನ್. ಸಂಪರ್ಕ ಕಡಿತಗೊಳಿಸಲಾಗಿದೆ"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"ಬಿತ್ತರಿಸು ಬಟನ್. ಸಂಪರ್ಕಿಸಲಾಗುತ್ತಿದೆ"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"ಬಿತ್ತರಿಸು ಬಟನ್. ಸಂಪರ್ಕಿತಗೊಂಡಿದೆ"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"ಇದಕ್ಕೆ ಬಿತ್ತರಿಸಿ"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"ಸಾಧನಗಳನ್ನು ಹುಡುಕಲಾಗುತ್ತಿದೆ"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"ಸಂಪರ್ಕ ಕಡಿತಗೊಳಿಸು"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"ಬಿತ್ತರಿಸುವಿಕೆ ನಿಲ್ಲಿಸಿ"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"ಮುಚ್ಚು"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"ಪ್ಲೇ ಮಾಡಿ"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"ವಿರಾಮ"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"ನಿಲ್ಲಿಸಿ"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"ವಿಸ್ತರಿಸು"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"ಸಂಕುಚಿಸು"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"ಆಲ್ಬಮ್ ಕಲೆ"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"ವಾಲ್ಯೂಮ್ ಸ್ಲೈಡರ್"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"ಯಾವುದೇ ಮಾಧ್ಯಮ ಆಯ್ಕೆಮಾಡಲಾಗಿಲ್ಲ"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"ಯಾವುದೇ ಮಾಹಿತಿ ಲಭ್ಯವಿಲ್ಲ"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"ಪರದೆಯನ್ನು ಬಿತ್ತರಿಸಲಾಗುತ್ತಿದೆ"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-ko/strings.xml b/packages/MediaComponents/res/values-ko/strings.xml
new file mode 100644
index 0000000..be893a9
--- /dev/null
+++ b/packages/MediaComponents/res/values-ko/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"시스템"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"기기"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"전송 버튼"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"전송 버튼. 연결 해제됨"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"전송 버튼. 연결 중"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"전송 버튼. 연결됨"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"전송할 기기"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"기기를 찾는 중"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"연결 해제"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"전송 중지"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"닫기"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"재생"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"일시중지"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"중지"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"펼치기"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"접기"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"앨범아트"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"볼륨 슬라이더"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"선택한 미디어 없음"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"정보가 없습니다."</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"화면 전송 중"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-ky/strings.xml b/packages/MediaComponents/res/values-ky/strings.xml
new file mode 100644
index 0000000..57813af
--- /dev/null
+++ b/packages/MediaComponents/res/values-ky/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Тутум"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Түзмөктөр"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Тышкы экранга чыгаруу баскычы"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Тышкы экранга чыгаруу баскычы. Түзмөк ажырап турат."</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Тышкы экранга чыгаруу баскычы. Түзмөк туташууда"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Тышкы экранга чыгаруу баскычы. Түзмөк туташып турат"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Төмөнкүгө чыгаруу"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Түзмөктөр изделүүдө"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Ажыратуу"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Тышк экранга чыгарну токтотуу"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Жабуу"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Ойнотуу"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Тындыруу"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Токтотуу"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Жайып көрсөтүү"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Жыйыштыруу"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Альбом мукабасы"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Үндү катуулатуучу сыдырма"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Бир да медиа файл тандалган жок"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Эч маалымат жок"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Тышкы экранга чыгарылууда"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-land/dimens.xml b/packages/MediaComponents/res/values-land/dimens.xml
new file mode 100644
index 0000000..29f1e1d
--- /dev/null
+++ b/packages/MediaComponents/res/values-land/dimens.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<resources>
+ <!-- MediaRouteController's volume group list -->
+ <eat-comment />
+ <!-- Maximum height of volume group list. -->
+ <dimen name="mr_controller_volume_group_list_max_height">132dp</dimen>
+ <!-- Height of volume group item. -->
+ <dimen name="mr_controller_volume_group_list_item_height">61dp</dimen>
+ <!-- Size of an item's icon. -->
+ <dimen name="mr_controller_volume_group_list_item_icon_size">18dp</dimen>
+</resources>
diff --git a/packages/MediaComponents/res/values-lo/strings.xml b/packages/MediaComponents/res/values-lo/strings.xml
new file mode 100644
index 0000000..91737db
--- /dev/null
+++ b/packages/MediaComponents/res/values-lo/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"ລະບົບ"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"ອຸປະກອນ"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"ປຸ່ມຄາສທ໌"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"ປຸ່ມສົ່ງສັນຍານ. ຕັດການເຊື່ອມຕໍ່ແລ້ວ"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"ປຸ່ມສົ່ງສັນຍານ. ກຳລັງເຊື່ອມຕໍ່"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"ປຸ່ມສົ່ງສັນຍານ. ເຊື່ອມຕໍ່ແລ້ວ"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"ສົ່ງສັນຍານຫາ"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"ກຳລັງຊອກຫາອຸປະກອນ"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"ຕັດການເຊື່ອມຕໍ່"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"ຢຸດການສົ່ງສັນຍານ"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"ປິດ"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"ຫຼິ້ນ"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"ຢຸດຊົ່ວຄາວ"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"ຢຸດ"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"ຂະຫຍາຍ"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"ຫຍໍ້ລົງ"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"ໜ້າປົກອະລະບໍ້າ"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"ຕົວປັບລະດັບສຽງ"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"ບໍ່ໄດ້ເລືອກມີເດຍໃດໄວ້"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"ບໍ່ມີຂໍ້ມູນ"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"ການສົ່ງພາບໜ້າຈໍ"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-lt/strings.xml b/packages/MediaComponents/res/values-lt/strings.xml
new file mode 100644
index 0000000..ff036d1
--- /dev/null
+++ b/packages/MediaComponents/res/values-lt/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistema"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Įrenginiai"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Perdavimo mygtukas"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Perdavimo mygtukas. Atsijungta"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Perdavimo mygtukas. Prisijungiama"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Perdavimo mygtukas. Prisijungta"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Perduoti į"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Randami įrenginiai"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Atjungti"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Sustabdyti perdavimą"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Uždaryti"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Leisti"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pristabdyti"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Sustabdyti"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Išskleisti"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Sutraukti"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Albumo viršelis"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Garsumo šliaužiklis"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Nepasirinkta jokia medija"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Informacija nepasiekiama"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Perduodamas ekranas"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-lv/strings.xml b/packages/MediaComponents/res/values-lv/strings.xml
new file mode 100644
index 0000000..454063e
--- /dev/null
+++ b/packages/MediaComponents/res/values-lv/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistēma"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Ierīces"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Apraides poga"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Apraides poga. Savienojums pārtraukts"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Apraides poga. Notiek savienojuma izveide"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Apraides poga. Savienojums izveidots"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Apraidīšana uz ierīci"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Notiek ierīču meklēšana"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Atvienot"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Apturēt apraidi"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Aizvērt"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Atskaņot"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Apturēt"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Apturēt"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Izvērst"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Sakļaut"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Albuma vāciņš"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Skaļuma slīdnis"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Nav atlasīti multivides faili"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Nav pieejama informācija"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Notiek ekrāna apraide"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-mk/strings.xml b/packages/MediaComponents/res/values-mk/strings.xml
new file mode 100644
index 0000000..12dee36
--- /dev/null
+++ b/packages/MediaComponents/res/values-mk/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Систем"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Уреди"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Копчето за Cast"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Копче за Cast. Исклучено"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Копче за Cast. Се поврзува"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Копче за Cast. Поврзано"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Емитувај на"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Се бараат уреди"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Исклучи"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Сопри го емитувањето"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Затвори"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Репродуцирај"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Паузирај"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Сопри"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Прошири"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Собери"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Корица на албум"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Лизгач за јачина на звук"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Не се избрани медиуми"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Нема достапни информации"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Екранот се емитува"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-ml/strings.xml b/packages/MediaComponents/res/values-ml/strings.xml
new file mode 100644
index 0000000..2d914b9
--- /dev/null
+++ b/packages/MediaComponents/res/values-ml/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"സിസ്റ്റം"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"ഉപകരണങ്ങൾ"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"ടാപ്പുചെയ്യുക"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"കാസ്റ്റ് ബട്ടൺ. വിച്ഛേദിച്ചു"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"കാസ്റ്റ് ബട്ടൺ. കണക്റ്റുചെയ്യുന്നു"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"കാസ്റ്റ് ബട്ടൺ. കണക്റ്റുചെയ്തു"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"ഇതിലേക്ക് കാസ്റ്റുചെയ്യുക"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"ഉപകരണങ്ങൾ കണ്ടെത്തുന്നു"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"വിച്ഛേദിക്കുക"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"കാസ്റ്റുചെയ്യൽ നിർത്തുക"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"അവസാനിപ്പിക്കുക"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"പ്ലേ ചെയ്യുക"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"തൽക്കാലം നിർത്തൂ"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"നിര്ത്തുക"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"വികസിപ്പിക്കുക"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"ചുരുക്കുക"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"ആൽബം ആർട്ട്"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"വോളിയം സ്ലൈഡർ"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"മീഡിയയൊന്നും തിരഞ്ഞെടുത്തിട്ടില്ല"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"വിവരങ്ങളൊന്നും ലഭ്യമല്ല"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"സ്ക്രീൻ കാസ്റ്റുചെയ്യുന്നു"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-mn/strings.xml b/packages/MediaComponents/res/values-mn/strings.xml
new file mode 100644
index 0000000..ef87c92
--- /dev/null
+++ b/packages/MediaComponents/res/values-mn/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Систем"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Төхөөрөмжүүд"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Дамжуулах товчлуур"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Дамжуулах товчлуур. Салсан"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Дамжуулах товчлуур. Холбож байна"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Дамжуулах товчлуур. Холбогдсон"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Дамжуулах"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Төхөөрөмж хайж байна"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Салгах"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Дамжуулахыг зогсоох"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Хаах"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Тоглуулах"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Түр зогсоох"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Зогсоох"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Дэлгэх"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Хураах"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Цомгийн зураг"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Дууны түвшин тааруулагч"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Ямар ч медиа сонгоогүй"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Мэдээлэл байхгүй байна"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Дэлгэцийг дамжуулж байна"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-mr/strings.xml b/packages/MediaComponents/res/values-mr/strings.xml
new file mode 100644
index 0000000..2ffbebb
--- /dev/null
+++ b/packages/MediaComponents/res/values-mr/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"सिस्टम"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"डिव्हाइसेस"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"कास्ट बटण"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"कास्ट बटण. डिस्कनेक्ट केले"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"कास्ट बटण. कनेक्ट करत आहे"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"कास्ट बटण. कनेक्ट केले"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"यावर कास्ट करा"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"डिव्हाइसेस शोधत आहे"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"डिस्कनेक्ट करा"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"कास्ट करणे थांबवा"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"बंद करा"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"प्ले करा"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"विराम"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"थांबा"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"विस्तृत करा"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"संकुचित करा"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"अल्बम कला"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"व्हॉल्यूम स्लायडर"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"मीडिया निवडला नाही"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"कोणतीही माहिती उपलब्ध नाही"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"स्क्रीन कास्ट करत आहे"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-ms/strings.xml b/packages/MediaComponents/res/values-ms/strings.xml
new file mode 100644
index 0000000..085e480
--- /dev/null
+++ b/packages/MediaComponents/res/values-ms/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistem"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Peranti"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Butang Hantar"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Butang hantar. Sambungan diputuskan"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Butang hantar. Menyambung"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Butang hantar. Disambungkan"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Hantar ke"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Mencari peranti"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Putuskan sambungan"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Berhenti menghantar"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Tutup"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Main"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Jeda"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Berhenti"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Kembangkan"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Runtuhkan"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Seni album"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Peluncur kelantangan"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Tiada media dipilih"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Maklumat tidak tersedia"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Menghantar skrin"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-my/strings.xml b/packages/MediaComponents/res/values-my/strings.xml
new file mode 100644
index 0000000..083d805
--- /dev/null
+++ b/packages/MediaComponents/res/values-my/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"စနစ်"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"စက်ပစ္စည်းများ"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"ကာစ်တ်လုပ်ရန် ခလုတ်"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"ကာစ်ခလုတ်။ ချိတ်ဆက်မထားပါ"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"ကာစ်ခလုတ်။ ချိတ်ဆက်နေသည်"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"ကာစ်ခလုတ်။ ချိတ်ဆက်ထားသည်"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"ကာစ်လုပ်ရန် စက်"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"စက်ပစ္စည်းများ ရှာဖွေခြင်း"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"ဆက်သွယ်မှု ဖြတ်ရန်"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"ကာစ်လုပ်ခြင်း ရပ်ရန်"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"ပိတ်ရန်"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"ဖွင့်ရန်"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"ခဏရပ်ရန်"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"ရပ်ရန်"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"ဖြန့်ချရန်၃"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"ခေါက်သိမ်းရန်..."</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"အယ်လ်ဘမ်ပုံ"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"အသံအတိုးအကျယ်ချိန်သည့် ဆလိုက်ဒါ"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"မည်သည့်မီဒီမှ မရွေးချယ်ထားပါ"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"အချက်အလက် မရရှိနိုင်ပါ"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"တည်းဖြတ်ရေး မျက်နှာပြင်"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-nb/strings.xml b/packages/MediaComponents/res/values-nb/strings.xml
new file mode 100644
index 0000000..4f764c9
--- /dev/null
+++ b/packages/MediaComponents/res/values-nb/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"System"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Enheter"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Cast-ikonet"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Cast-knappen. Frakoblet"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Cast-knappen. Kobler til"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Cast-knappen. Tilkoblet"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Cast til"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Søker etter enheter"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Koble fra"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Stopp castingen"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Lukk"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Spill av"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Sett på pause"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Stopp"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Utvid"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Skjul"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Albumgrafikk"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Glidebryter for volum"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Du har ikke valgt noen medier"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Ingen informasjon er tilgjengelig"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Caster skjermen"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-ne/strings.xml b/packages/MediaComponents/res/values-ne/strings.xml
new file mode 100644
index 0000000..d6c2e1a
--- /dev/null
+++ b/packages/MediaComponents/res/values-ne/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"प्रणाली"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"उपकरणहरू"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Cast बटन"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Cast बटन। जडान विच्छेद भयो"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Cast बटन। जडान हुँदै"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Cast बटन। जडान भयो"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"यसमा Cast गर्नुहोस्"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"यन्त्रहरू पत्ता लगाउँदै"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"विच्छेद गर्नुहोस्"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"casting रोक्नुहोस्"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"बन्द गर्नुहोस्"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"बजाउनुहोस्"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"रोक्नुहोस्"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"रोक्नुहोस्"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"विस्तार गर्नुहोस्"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"संक्षिप्त पार्नुहोस्"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"एल्बम आर्ट"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"भोल्युमको स्लाइडर"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"कुनै मिडिया चयन भएको छैन"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"जानकारी उपलब्ध छैन"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"स्क्रिन cast गर्दै"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-nl/strings.xml b/packages/MediaComponents/res/values-nl/strings.xml
new file mode 100644
index 0000000..05df62d
--- /dev/null
+++ b/packages/MediaComponents/res/values-nl/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Systeem"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Apparaten"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Cast-icoon"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Cast-icoon. Verbinding verbroken"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Cast-icoon. Verbinding maken"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Cast-icoon. Verbonden"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Casten naar"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Apparaten zoeken"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Loskoppelen"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Casten stoppen"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Sluiten"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Afspelen"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Onderbreken"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Stoppen"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Uitvouwen"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Samenvouwen"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Albumhoes"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Volumeschuifregelaar"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Geen media geselecteerd"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Geen informatie beschikbaar"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Scherm casten"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-pa/strings.xml b/packages/MediaComponents/res/values-pa/strings.xml
new file mode 100644
index 0000000..1b5df71
--- /dev/null
+++ b/packages/MediaComponents/res/values-pa/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"ਸਿਸਟਮ"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"ਡਿਵਾਈਸਾਂ"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"ਕਾਸਟ ਬਟਨ"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"ਕਾਸਟ ਬਟਨ। ਡਿਸਕਨੈਕਟ ਕੀਤਾ ਗਿਆ"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"ਕਾਸਟ ਬਟਨ। ਕਨੈਕਟ ਕੀਤਾ ਜਾ ਰਿਹਾ ਹੈ"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"ਕਾਸਟ ਬਟਨ। ਕਨੈਕਟ ਕੀਤਾ ਗਿਆ"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"ਏਥੇ ਕਾਸਟ ਕਰੋ"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"ਡੀਵਾਈਸਾਂ ਨੂੰ ਲੱਭਿਆ ਜਾ ਰਿਹਾ ਹੈ"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"ਡਿਸਕਨੈਕਟ ਕਰੋ"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"ਕਾਸਟ ਕਰਨਾ ਬੰਦ ਕਰੋ"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"ਬੰਦ ਕਰੋ"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"ਪਲੇ ਕਰੋ"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"ਰੋਕੋ"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"ਬੰਦ ਕਰੋ"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"ਵਿਸਤਾਰ ਕਰੋ"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"ਬੰਦ ਕਰੋ"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"ਐਲਬਮ ਆਰਟ"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"ਵੌਲਯੂਮ ਸਲਾਈਡਰ"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"ਕੋਈ ਵੀ ਮੀਡੀਆ ਨਹੀਂ ਚੁਣਿਆ ਗਿਆ"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"ਕੋਈ ਜਾਣਕਾਰੀ ਉਪਲਬਧ ਨਹੀਂ"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"ਸਕ੍ਰੀਨ ਜੋੜ ਰਿਹਾ ਹੈ"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-pl/strings.xml b/packages/MediaComponents/res/values-pl/strings.xml
new file mode 100644
index 0000000..c792a6d
--- /dev/null
+++ b/packages/MediaComponents/res/values-pl/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"System"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Urządzenia"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Przycisk Cast"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Przycisk Prześlij ekran. Rozłączono"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Przycisk Prześlij ekran. Łączę"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Przycisk Prześlij ekran. Połączono"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Przesyłaj na"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Znajdowanie urządzeń"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Odłącz"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Zatrzymaj przesyłanie"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Zamknij"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Odtwórz"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Wstrzymaj"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Zatrzymaj"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Rozwiń"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Zwiń"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Okładka albumu"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Suwak głośności"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Nie wybrano multimediów"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Brak informacji"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Przesyłam ekran"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-pt-rBR/strings.xml b/packages/MediaComponents/res/values-pt-rBR/strings.xml
new file mode 100644
index 0000000..43c619d
--- /dev/null
+++ b/packages/MediaComponents/res/values-pt-rBR/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistema"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Dispositivos"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Botão Transmitir"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Botão \"Transmitir\". Desconectado"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Botão \"Transmitir\". Conectando"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Botão \"Transmitir\". Conectado"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Transmitir para"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Localizando dispositivos"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Desconectar"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Interromper transmissão"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Fechar"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Reproduzir"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pausar"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Parar"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Expandir"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Recolher"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Arte do álbum"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Controle deslizante de volume"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Nenhuma mídia selecionada"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Nenhuma informação disponível"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Transmitindo a tela"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-pt-rPT/strings.xml b/packages/MediaComponents/res/values-pt-rPT/strings.xml
new file mode 100644
index 0000000..3f0a61d
--- /dev/null
+++ b/packages/MediaComponents/res/values-pt-rPT/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistema"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Dispositivos"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Botão Transmitir"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Botão Transmitir. Desligado"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Botão Transmitir. A ligar..."</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Botão Transmitir. Ligado"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Transmitir para"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"A localizar dispositivos"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Desassociar"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Parar a transmissão"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Fechar"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Reproduzir"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Interromper"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Parar"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Expandir"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Reduzir"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Imagem do álbum"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Controlo de deslize do volume"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Nenhum suporte multimédia selecionado"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Nenhuma informação disponível"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"A transmitir o ecrã"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-pt/strings.xml b/packages/MediaComponents/res/values-pt/strings.xml
new file mode 100644
index 0000000..43c619d
--- /dev/null
+++ b/packages/MediaComponents/res/values-pt/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistema"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Dispositivos"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Botão Transmitir"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Botão \"Transmitir\". Desconectado"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Botão \"Transmitir\". Conectando"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Botão \"Transmitir\". Conectado"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Transmitir para"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Localizando dispositivos"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Desconectar"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Interromper transmissão"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Fechar"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Reproduzir"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pausar"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Parar"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Expandir"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Recolher"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Arte do álbum"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Controle deslizante de volume"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Nenhuma mídia selecionada"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Nenhuma informação disponível"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Transmitindo a tela"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-ro/strings.xml b/packages/MediaComponents/res/values-ro/strings.xml
new file mode 100644
index 0000000..6ebb2f6
--- /dev/null
+++ b/packages/MediaComponents/res/values-ro/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistem"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Dispozitive"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Butonul de proiecție"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Butonul de proiecție. Deconectat"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Butonul de proiecție. Se conectează"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Butonul de proiecție. Conectat"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Proiectați pe"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Se caută dispozitive"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Deconectați-vă"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Nu mai proiectați"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Închideți"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Redați"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Întrerupeți"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Opriți"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Extindeți"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Restrângeți"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Grafica albumului"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Glisor pentru volum"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Niciun fișier media selectat"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Nu sunt disponibile informații"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Se proiectează ecranul"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-ru/strings.xml b/packages/MediaComponents/res/values-ru/strings.xml
new file mode 100644
index 0000000..7c462d2
--- /dev/null
+++ b/packages/MediaComponents/res/values-ru/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Система"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Устройства"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Кнопка трансляции"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Кнопка трансляции. Устройство отключено."</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Кнопка трансляции. Устройство подключается."</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Кнопка трансляции. Устройство подключено."</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Выберите устройство"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Поиск устройств…"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Отключить"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Прекратить трансляцию"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Закрыть"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Воспроизвести"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Приостановить"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Остановить"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Развернуть"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Свернуть"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Обложка"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Регулятор громкости"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Медиафайл не выбран"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Данных нет"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Подключение к удаленному монитору"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-si/strings.xml b/packages/MediaComponents/res/values-si/strings.xml
new file mode 100644
index 0000000..a55ce50
--- /dev/null
+++ b/packages/MediaComponents/res/values-si/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"පද්ධතිය"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"උපාංග"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"විකාශ බොත්තම"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"විකාශ බොත්තම. විසන්ධි කරන ලදී"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"විකාශ බොත්තම සම්බන්ධ කරමින්"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"විකාශ බොත්තම සම්බන්ධ කරන ලදී"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"විකාශය"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"උපාංග සෙවීම"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"විසන්ධි කරන්න"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"විකාශ කිරීම නතර කරන්න"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"වසන්න"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"ධාවනය කරන්න"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"විරාම ගන්වන්න"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"නතර කරන්න"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"දිග හරින්න"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"හකුළන්න"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"ඇල්බම කලාව"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"හඬ පරිමා ස්ලයිඩරය"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"මාධ්යය තෝරා නැත"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"ලබා ගත හැකි තොරතුරු නොමැත"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"විකාශ තිරය"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-sk/strings.xml b/packages/MediaComponents/res/values-sk/strings.xml
new file mode 100644
index 0000000..a58aa11
--- /dev/null
+++ b/packages/MediaComponents/res/values-sk/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Systém"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Zariadenia"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Tlačidlo prenosu"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Tlačidlo prenosu. Odpojené"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Tlačidlo prenosu. Pripája sa"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Tlačidlo prenosu. Pripojené"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Prenos do"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Hľadajú sa zariadenia"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Odpojiť"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Zastaviť prenášanie"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Zavrieť"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Prehrať"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pozastaviť"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Zastaviť"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Rozbaliť"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Zbaliť"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Obrázok albumu"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Posúvač hlasitosti"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Nie sú vybrané žiadne médiá"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Nie sú k dispozícii žiadne informácie"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Prenáša sa obrazovka"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-sl/strings.xml b/packages/MediaComponents/res/values-sl/strings.xml
new file mode 100644
index 0000000..8ca4ce4
--- /dev/null
+++ b/packages/MediaComponents/res/values-sl/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistem"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Naprave"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Gumb za predvajanje"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Gumb za predvajanje. Povezava je prekinjena."</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Gumb za predvajanje. Vzpostavljanje povezave."</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Gumb za predvajanje. Povezava je vzpostavljena."</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Predvajanje prek:"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Iskanje naprav"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Prekini povezavo"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Ustavi predvajanje"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Zapri"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Predvajanje"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Zaustavi"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Ustavi"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Razširi"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Strni"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Naslovnica albuma"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Drsnik za glasnost"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Ni izbrane predstavnosti"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Podatki niso na voljo"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Predvajanje zaslona"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-sq/strings.xml b/packages/MediaComponents/res/values-sq/strings.xml
new file mode 100644
index 0000000..816e110
--- /dev/null
+++ b/packages/MediaComponents/res/values-sq/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistemi"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Pajisjet"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Butoni i transmetimit"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Butoni i transmetimit. Je i shkëputur"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Butoni i transmetimit. Po lidhet"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Butoni i transmetimit. Je i lidhur"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Transmeto te"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Po kërkon pajisje"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Shkëpute"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Ndalo transmetimin"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Mbyll"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Luaj"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pauzë"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Ndalo"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Zgjeroje"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Palose"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Kopertina e albumit"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Rrëshqitësi i volumit"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Nuk u zgjodh asnjë media"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Nuk jepet asnjë informacion"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Po transmeton ekranin"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-sr/strings.xml b/packages/MediaComponents/res/values-sr/strings.xml
new file mode 100644
index 0000000..caabad5
--- /dev/null
+++ b/packages/MediaComponents/res/values-sr/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Систем"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Уређаји"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Дугме Пребаци"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Дугме Пребаци. Веза је прекинута"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Дугме Пребаци. Повезује се"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Дугме Пребаци. Повезан је"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Пребацуј на"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Проналажење уређаја"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Прекини везу"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Заустави пребацивање"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Затвори"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Пусти"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Паузирај"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Заустави"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Прошири"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Скупи"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Омот албума"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Клизач за јачину звука"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Нема изабраних медија"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Нису доступне никакве информације"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Пребацује се екран"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-sv/strings.xml b/packages/MediaComponents/res/values-sv/strings.xml
new file mode 100644
index 0000000..ca7d3e0
--- /dev/null
+++ b/packages/MediaComponents/res/values-sv/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"System"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Enheter"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Cast-knappen"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Cast-knappen. Frånkopplad"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Cast-knappen. Ansluter"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Cast-knappen. Ansluten"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Casta till"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Letar efter enheter"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Koppla från"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Sluta casta"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Stäng"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Spela upp"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Pausa"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Avbryt"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Utöka"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Komprimera"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Skivomslag"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Volymreglage"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Inga media har valts"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Det finns ingen information"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Skärmen castas"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-sw/strings.xml b/packages/MediaComponents/res/values-sw/strings.xml
new file mode 100644
index 0000000..9562cb1
--- /dev/null
+++ b/packages/MediaComponents/res/values-sw/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Mfumo"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Vifaa"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Kitufe cha kutuma"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Kitufe cha kutuma. Kimeondolewa"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Kitufe cha kutuma. Kinaunganisha"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Kitufe cha kutuma. Kimeunganishwa"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Tuma kwenye"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Inatafuta vifaa"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Ondoa"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Acha kutuma"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Funga"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Cheza"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Sitisha"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Simamisha"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Panua"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Kunja"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Sanaa ya albamu"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Kitelezi cha sauti"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Hakuna maudhui yaliyochaguliwa"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Hakuna maelezo yaliyopatikana"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Inatuma skrini"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-sw600dp/dimens.xml b/packages/MediaComponents/res/values-sw600dp/dimens.xml
new file mode 100644
index 0000000..4042348
--- /dev/null
+++ b/packages/MediaComponents/res/values-sw600dp/dimens.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<resources>
+ <!-- The platform's desired fixed width for a dialog along the major axis
+ (the screen is in landscape). This may be either a fraction or a dimension.-->
+ <item type="dimen" name="mr_dialog_fixed_width_major">60%</item>
+ <!-- The platform's desired fixed width for a dialog along the minor axis
+ (the screen is in portrait). This may be either a fraction or a dimension.-->
+ <item type="dimen" name="mr_dialog_fixed_width_minor">90%</item>
+</resources>
diff --git a/packages/MediaComponents/res/values-sw720dp/dimens.xml b/packages/MediaComponents/res/values-sw720dp/dimens.xml
new file mode 100644
index 0000000..634ab8d
--- /dev/null
+++ b/packages/MediaComponents/res/values-sw720dp/dimens.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<resources>
+ <!-- The platform's desired fixed width for a dialog along the major axis
+ (the screen is in landscape). This may be either a fraction or a dimension.-->
+ <item type="dimen" name="mr_dialog_fixed_width_major">50%</item>
+ <!-- The platform's desired fixed width for a dialog along the minor axis
+ (the screen is in portrait). This may be either a fraction or a dimension.-->
+ <item type="dimen" name="mr_dialog_fixed_width_minor">70%</item>
+</resources>
diff --git a/packages/MediaComponents/res/values-ta/strings.xml b/packages/MediaComponents/res/values-ta/strings.xml
new file mode 100644
index 0000000..e1978f3
--- /dev/null
+++ b/packages/MediaComponents/res/values-ta/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"சிஸ்டம்"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"சாதனங்கள்"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"திரையிடு பட்டன்"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"அனுப்புதல் பொத்தான். துண்டிக்கப்பட்டது"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"அனுப்புதல் பொத்தான். இணைக்கிறது"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"அனுப்புதல் பொத்தான். இணைக்கப்பட்டது"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"இதற்கு அனுப்பு"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"சாதனங்களைத் தேடுகிறது"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"தொடர்பைத் துண்டி"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"அனுப்புவதை நிறுத்து"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"மூடும்"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"இயக்கும்"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"இடைநிறுத்தும்"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"நிறுத்துவதற்கான பொத்தான்"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"விரிவாக்கு"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"சுருக்கு"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"ஆல்பம் ஆர்ட்"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"ஒலியளவு ஸ்லைடர்"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"மீடியா எதுவும் தேர்ந்தெடுக்கப்படவில்லை"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"தகவல் எதுவுமில்லை"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"திரையை அனுப்புகிறீர்கள்"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-te/strings.xml b/packages/MediaComponents/res/values-te/strings.xml
new file mode 100644
index 0000000..7d312e3
--- /dev/null
+++ b/packages/MediaComponents/res/values-te/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"సిస్టమ్"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"పరికరాలు"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"ప్రసారం చేయి బటన్"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"ప్రసార బటన్. డిస్కనెక్ట్ చేయబడింది"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"ప్రసార బటన్. కనెక్ట్ చేస్తోంది"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"ప్రసార బటన్. కనెక్ట్ చేయబడింది"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"దీనికి ప్రసారం చేయండి"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"పరికరాలను కనుగొంటోంది"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"డిస్కనెక్ట్ చేయి"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"ప్రసారాన్ని ఆపివేయి"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"మూసివేస్తుంది"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"ప్లే చేస్తుంది"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"పాజ్ చేస్తుంది"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"ఆపివేయి"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"విస్తరింపజేస్తుంది"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"కుదిస్తుంది"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"ఆల్బమ్ ఆర్ట్"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"వాల్యూమ్ స్లయిడర్"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"మీడియా ఏదీ ఎంచుకోబడలేదు"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"సమాచారం అందుబాటులో లేదు"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"స్క్రీన్ను ప్రసారం చేస్తోంది"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-th/strings.xml b/packages/MediaComponents/res/values-th/strings.xml
new file mode 100644
index 0000000..cfa8ae5
--- /dev/null
+++ b/packages/MediaComponents/res/values-th/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"ระบบ"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"อุปกรณ์"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"ปุ่ม \"แคสต์\""</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"ปุ่ม \"แคสต์\" ยกเลิกการเชื่อมต่อ"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"ปุ่ม \"แคสต์\" กำลังเชื่อมต่อ"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"ปุ่ม \"แคสต์\" เชื่อมต่อแล้ว"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"แคสต์ไปยัง"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"กำลังค้นหาอุปกรณ์"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"ยกเลิกการเชื่อมต่อ"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"หยุดแคสต์"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"ปิด"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"เล่น"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"หยุดชั่วคราว"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"หยุด"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"ขยาย"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"ยุบ"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"ปกอัลบั้ม"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"แถบเลื่อนปรับระดับเสียง"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"ไม่ได้เลือกสื่อไว้"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"ไม่มีข้อมูล"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"กำลังแคสต์หน้าจอ"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-tl/strings.xml b/packages/MediaComponents/res/values-tl/strings.xml
new file mode 100644
index 0000000..a8be3d0
--- /dev/null
+++ b/packages/MediaComponents/res/values-tl/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"System"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Mga Device"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Button na I-cast"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Button na I-cast. Nadiskonekta"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Button na I-cast. Kumokonekta"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Button na I-cast. Nakakonekta"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"I-cast sa"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Naghahanap ng mga device"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Idiskonekta"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Ihinto ang pag-cast"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Isara"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"I-play"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"I-pause"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Ihinto"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Palawakin"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"I-collapse"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Album art"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Slider ng volume"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Walang piniling media"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Walang available na impormasyon"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Kina-cast ang screen"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-tr/strings.xml b/packages/MediaComponents/res/values-tr/strings.xml
new file mode 100644
index 0000000..05f6392
--- /dev/null
+++ b/packages/MediaComponents/res/values-tr/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Sistem"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Cihazlar"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Yayınla düğmesi"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Yayınla düğmesi. Bağlantı kesildi"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Yayınla düğmesi. Bağlanıyor"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Yayınla düğmesi. Bağlandı"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Şuraya yayınla:"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Cihazlar bulunuyor"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Bağlantıyı kes"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Yayını durdur"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Kapat"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Oynat"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Duraklat"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Durdur"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Genişlet"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Daralt"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Albüm kapağı"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Ses düzeyi kaydırma çubuğu"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Medya seçilmedi"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Bilgi yok"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Ekran yayınlanıyor"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-uk/strings.xml b/packages/MediaComponents/res/values-uk/strings.xml
new file mode 100644
index 0000000..33d365e
--- /dev/null
+++ b/packages/MediaComponents/res/values-uk/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Система"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Пристрої"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Кнопка трансляції"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Кнопка трансляції. Від’єднано"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Кнопка трансляції. Під’єднання"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Кнопка трансляції. Під’єднано"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Транслювати на"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Пошук пристроїв"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Відключити"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Припинити трансляцію"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Закрити"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Відтворити"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Призупинити"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Припинити"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Розгорнути"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Згорнути"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Обкладинка альбому"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Повзунок гучності"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Медіа-файл не вибрано"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Немає даних"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Трансляція екрана"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-ur/strings.xml b/packages/MediaComponents/res/values-ur/strings.xml
new file mode 100644
index 0000000..632c598
--- /dev/null
+++ b/packages/MediaComponents/res/values-ur/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"سسٹم"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"آلات"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"کاسٹ کرنے کا بٹن"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"کاسٹ کرنے کا بٹن۔ غیر منسلک ہے"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"کاسٹ کرنے کا بٹن۔ منسلک ہو رہا ہے"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"کاسٹ کرنے کا بٹن۔ منسلک ہے"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"اس میں کاسٹ کریں"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"آلات تلاش ہو رہے ہیں"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"غیر منسلک کریں"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"کاسٹ کرنا بند کریں"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"بند کریں"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"چلائیں"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"موقوف کریں"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"روکیں"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"پھیلائیں"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"سکیڑیں"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"البم آرٹ"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"والیوم سلائیڈر"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"کوئی میڈیا منتخب نہیں ہے"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"کوئی معلومات دستیاب نہیں"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"سکرین کاسٹ ہو رہی ہے"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-uz/strings.xml b/packages/MediaComponents/res/values-uz/strings.xml
new file mode 100644
index 0000000..10a0817
--- /dev/null
+++ b/packages/MediaComponents/res/values-uz/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Tizim"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Qurilmalar"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Translatsiya tugmasi"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Translatsiya tugmasi. Uzildi"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Translatsiya tugmasi. Ulanmoqda"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Translatsiya tugmasi. Ulandi"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Quyidagiga translatsiya qilish:"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Qurilmalarni topish"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Ulanishni uzish"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Translatsiyani to‘xtatish"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Yopish"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Boshlash"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"To‘xtatib turish"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"To‘xtatish"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Yoyish"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Yig‘ish"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Albom muqovasi"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Tovush balandligi slayderi"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Multimedia tanlamagan"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Hech qanday ma’lumot yo‘q"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Ekranni translatsiya qilish"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-vi/strings.xml b/packages/MediaComponents/res/values-vi/strings.xml
new file mode 100644
index 0000000..7098cca
--- /dev/null
+++ b/packages/MediaComponents/res/values-vi/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Hệ thống"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Thiết bị"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Nút truyền"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Nút truyền. Đã ngắt kết nối"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Nút truyền. Đang kết nối"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Nút truyền. Đã kết nối"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Truyền tới"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Đang tìm thiết bị"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Ngắt kết nối"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Dừng truyền"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Đóng"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Phát"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Tạm dừng"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Dừng"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Mở rộng"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Thu gọn"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Ảnh bìa album"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Thanh trượt âm lượng"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Không có phương tiện nào được chọn"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Không có thông tin nào"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Đang truyền màn hình"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-zh-rCN/strings.xml b/packages/MediaComponents/res/values-zh-rCN/strings.xml
new file mode 100644
index 0000000..1e22d01
--- /dev/null
+++ b/packages/MediaComponents/res/values-zh-rCN/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"系统"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"设备"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"投射按钮"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"投射按钮。已断开连接"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"投射按钮。正在连接"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"投射按钮。已连接"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"投射到"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"正在查找设备"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"断开连接"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"停止投射"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"关闭"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"播放"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"暂停"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"停止"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"展开"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"收起"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"专辑封面"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"音量滑块"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"未选择任何媒体"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"没有任何相关信息"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"正在投射屏幕"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-zh-rHK/strings.xml b/packages/MediaComponents/res/values-zh-rHK/strings.xml
new file mode 100644
index 0000000..156e5c2
--- /dev/null
+++ b/packages/MediaComponents/res/values-zh-rHK/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"系統"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"裝置"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"投放按鈕"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"投放按鈕。已解除連接"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"投放按鈕。正在連接"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"投放按鈕。已連接"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"投放至"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"正在尋找裝置"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"中斷連線"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"停止投放"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"關閉"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"播放"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"暫停"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"停止"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"展開"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"收合"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"專輯封面"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"音量滑桿"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"尚未選擇媒體"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"沒有詳細資料"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"正在投放螢幕"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-zh-rTW/strings.xml b/packages/MediaComponents/res/values-zh-rTW/strings.xml
new file mode 100644
index 0000000..6cafde1
--- /dev/null
+++ b/packages/MediaComponents/res/values-zh-rTW/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"系統"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"裝置"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"投放按鈕"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"投放按鈕;已中斷連線"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"投放按鈕;連線中"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"投放按鈕;已連線"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"投放到"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"正在尋找裝置"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"中斷連線"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"停止投放"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"關閉"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"播放"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"暫停"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"停止"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"展開"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"收合"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"專輯封面"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"音量滑桿"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"未選取任何媒體"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"沒有可用的資訊"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"正在投放螢幕"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values-zu/strings.xml b/packages/MediaComponents/res/values-zu/strings.xml
new file mode 100644
index 0000000..e107c43
--- /dev/null
+++ b/packages/MediaComponents/res/values-zu/strings.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+ <string name="mr_system_route_name" msgid="5441529851481176817">"Isistimu"</string>
+ <string name="mr_user_route_category_name" msgid="7498112907524977311">"Amadivayisi"</string>
+ <string name="mr_button_content_description" msgid="3698378085901466129">"Inkinobho ye-Cast"</string>
+ <string name="mr_cast_button_disconnected" msgid="816305490427819240">"Inkinobho yokusakaza. Kunqanyuliwe"</string>
+ <string name="mr_cast_button_connecting" msgid="2187642765091873834">"Inkinobho yokusakaza. Kuyaxhunywa"</string>
+ <string name="mr_cast_button_connected" msgid="5088427771788648085">"Inkinobho yokusakaza. Kuxhunyiwe"</string>
+ <string name="mr_chooser_title" msgid="414301941546135990">"Sakaza ku-"</string>
+ <string name="mr_chooser_searching" msgid="6349900579507521956">"Ithola amadivayisi"</string>
+ <string name="mr_controller_disconnect" msgid="1227264889412989580">"Nqamula"</string>
+ <string name="mr_controller_stop_casting" msgid="8857886794086583226">"Misa ukusakaza"</string>
+ <string name="mr_controller_close_description" msgid="7333862312480583260">"Vala"</string>
+ <string name="mr_controller_play" msgid="683634565969987458">"Dlala"</string>
+ <string name="mr_controller_pause" msgid="5451884435510905406">"Misa isikhashana"</string>
+ <string name="mr_controller_stop" msgid="735874641921425123">"Misa"</string>
+ <string name="mr_controller_expand_group" msgid="8062427022744266907">"Nweba"</string>
+ <string name="mr_controller_collapse_group" msgid="7924809056904240926">"Goqa"</string>
+ <string name="mr_controller_album_art" msgid="6422801843540543585">"Ubuciko be-albhamu"</string>
+ <string name="mr_controller_volume_slider" msgid="2361785992211841709">"Isilayida sevolumu"</string>
+ <string name="mr_controller_no_media_selected" msgid="6547130360349182381">"Ayikho imidiya ekhethiwe"</string>
+ <string name="mr_controller_no_info_available" msgid="5585418471741142924">"Alukho ulwazi olutholakalayo"</string>
+ <string name="mr_controller_casting_screen" msgid="4868457957151124867">"Isikrini sokusakaza"</string>
+</resources>
diff --git a/packages/MediaComponents/res/values/arrays.xml b/packages/MediaComponents/res/values/arrays.xml
new file mode 100644
index 0000000..1187320
--- /dev/null
+++ b/packages/MediaComponents/res/values/arrays.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<resources>
+ <integer-array name="speed_multiplied_by_100">
+ <item>25</item>
+ <item>50</item>
+ <item>75</item>
+ <item>100</item>
+ <item>125</item>
+ <item>150</item>
+ <item>200</item>
+ </integer-array>
+</resources>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/values/attrs.xml b/packages/MediaComponents/res/values/attrs.xml
new file mode 100644
index 0000000..e37285b
--- /dev/null
+++ b/packages/MediaComponents/res/values/attrs.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<resources>
+ <declare-styleable name="MediaRouteButton">
+ <!-- This drawable is a state list where the "checked" state
+ indicates active media routing. Checkable indicates connecting
+ and non-checked / non-checkable indicates
+ that media is playing to the local device only. -->
+ <attr name="externalRouteEnabledDrawable" format="reference" />
+ <!-- Tint to apply to the media route button -->
+ <attr name="mediaRouteButtonTint" format="color" />
+
+ <attr name="android:minWidth" />
+ <attr name="android:minHeight" />
+ </declare-styleable>
+
+ <attr name="mediaRouteButtonStyle" format="reference" />
+ <attr name="mediaRouteCloseDrawable" format="reference" />
+ <attr name="mediaRoutePlayDrawable" format="reference" />
+ <attr name="mediaRoutePauseDrawable" format="reference" />
+ <attr name="mediaRouteStopDrawable" format="reference" />
+ <attr name="mediaRouteAudioTrackDrawable" format="reference" />
+ <attr name="mediaRouteDefaultIconDrawable" format="reference" />
+ <attr name="mediaRouteTvIconDrawable" format="reference" />
+ <attr name="mediaRouteSpeakerIconDrawable" format="reference" />
+ <attr name="mediaRouteSpeakerGroupIconDrawable" format="reference" />
+ <attr name="mediaRouteControlPanelThemeOverlay" format="reference" />
+
+ <attr name="mediaRouteTheme" format="reference" />
+</resources>
diff --git a/packages/MediaComponents/res/values/colors.xml b/packages/MediaComponents/res/values/colors.xml
new file mode 100644
index 0000000..e7bc299
--- /dev/null
+++ b/packages/MediaComponents/res/values/colors.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<resources>
+ <color name="gray">#808080</color>
+ <color name="white">#ffffff</color>
+ <color name="white_opacity_70">#B3ffffff</color>
+ <color name="black_opacity_70">#B3000000</color>
+ <color name="title_bar_gradient_start">#50000000</color>
+ <color name="title_bar_gradient_end">#00000000</color>
+ <color name="bottom_bar_background">#40202020</color>
+</resources>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/values/dimens.xml b/packages/MediaComponents/res/values/dimens.xml
new file mode 100644
index 0000000..2d7b022
--- /dev/null
+++ b/packages/MediaComponents/res/values/dimens.xml
@@ -0,0 +1,72 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<resources>
+ <!-- Dialog size -->
+ <eat-comment />
+ <!-- The platform's desired fixed width for a dialog along the major axis
+ (the screen is in landscape). This may be either a fraction or a dimension.-->
+ <dimen name="mr_dialog_fixed_width_major">320dp</dimen>
+ <!-- The platform's desired fixed width for a dialog along the minor axis
+ (the screen is in portrait). This may be either a fraction or a dimension.-->
+ <dimen name="mr_dialog_fixed_width_minor">320dp</dimen>
+
+ <!-- MediaRouteController's volume group list -->
+ <eat-comment />
+ <!-- Maximum height of volume group list. -->
+ <dimen name="mr_controller_volume_group_list_max_height">288dp</dimen>
+ <!-- Height of volume group item. -->
+ <dimen name="mr_controller_volume_group_list_item_height">68dp</dimen>
+ <!-- Size of an item's icon. -->
+ <dimen name="mr_controller_volume_group_list_item_icon_size">24dp</dimen>
+
+ <dimen name="mr_controller_volume_group_list_padding_top">16dp</dimen>
+ <!-- Group list expand/collapse animation duration. -->
+ <integer name="mr_controller_volume_group_list_animation_duration_ms">400</integer>
+ <!-- Group list fade in animation duration. -->
+ <integer name="mr_controller_volume_group_list_fade_in_duration_ms">400</integer>
+ <!-- Group list fade out animation duration. -->
+ <integer name="mr_controller_volume_group_list_fade_out_duration_ms">200</integer>
+
+ <dimen name="mcv2_embedded_settings_width">150dp</dimen>
+ <dimen name="mcv2_embedded_settings_height">36dp</dimen>
+ <dimen name="mcv2_embedded_settings_icon_size">20dp</dimen>
+ <dimen name="mcv2_embedded_settings_text_height">18dp</dimen>
+ <dimen name="mcv2_embedded_settings_main_text_size">12sp</dimen>
+ <dimen name="mcv2_embedded_settings_sub_text_size">10sp</dimen>
+ <dimen name="mcv2_full_settings_width">225dp</dimen>
+ <dimen name="mcv2_full_settings_height">54dp</dimen>
+ <dimen name="mcv2_full_settings_icon_size">30dp</dimen>
+ <dimen name="mcv2_full_settings_text_height">27dp</dimen>
+ <dimen name="mcv2_full_settings_main_text_size">16sp</dimen>
+ <dimen name="mcv2_full_settings_sub_text_size">13sp</dimen>
+ <dimen name="mcv2_settings_offset">8dp</dimen>
+
+ <dimen name="mcv2_transport_controls_padding">4dp</dimen>
+ <dimen name="mcv2_pause_icon_size">36dp</dimen>
+ <dimen name="mcv2_full_icon_size">28dp</dimen>
+ <dimen name="mcv2_embedded_icon_size">24dp</dimen>
+ <dimen name="mcv2_minimal_icon_size">24dp</dimen>
+ <dimen name="mcv2_icon_margin">10dp</dimen>
+
+ <dimen name="mcv2_full_album_image_portrait_size">232dp</dimen>
+ <dimen name="mcv2_full_album_image_landscape_size">176dp</dimen>
+
+ <dimen name="mcv2_custom_progress_max_size">2dp</dimen>
+ <dimen name="mcv2_custom_progress_thumb_size">12dp</dimen>
+ <dimen name="mcv2_buffer_view_height">5dp</dimen>
+ <!-- TODO: adjust bottom bar view -->
+</resources>
diff --git a/packages/MediaComponents/res/values/strings.xml b/packages/MediaComponents/res/values/strings.xml
new file mode 100644
index 0000000..2597a3b
--- /dev/null
+++ b/packages/MediaComponents/res/values/strings.xml
@@ -0,0 +1,157 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<resources xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+
+ <!-- Name for the default system route prior to Jellybean. [CHAR LIMIT=30] -->
+ <string name="mr_system_route_name">System</string>
+
+ <!-- Name for the user route category created when publishing routes to the system in Jellybean and above. [CHAR LIMIT=30] -->
+ <string name="mr_user_route_category_name">Devices</string>
+
+ <!-- String to be shown as a tooltip of MediaRouteButton
+ Cast is the standard android verb for sending content to a remote device. [CHAR LIMIT=50] -->
+ <string name="mr_button_content_description">Cast button</string>
+
+ <!-- Content description of a MediaRouteButton for accessibility support when no remote device is connected.
+ Cast is the standard android verb for sending content to a remote device. [CHAR LIMIT=NONE] -->
+ <string name="mr_cast_button_disconnected">Cast button. Disconnected</string>
+
+ <!-- Content description of a MediaRouteButton for accessibility support while connecting to a remote device.
+ Cast is the standard android verb for sending content to a remote device. [CHAR LIMIT=NONE] -->
+ <string name="mr_cast_button_connecting">Cast button. Connecting</string>
+
+ <!-- Content description of a MediaRouteButton for accessibility support when a remote device is connected.
+ Cast is the standard android verb for sending content to a remote device. [CHAR LIMIT=NONE] -->
+ <string name="mr_cast_button_connected">Cast button. Connected</string>
+
+ <!-- Title of the media route chooser dialog. [CHAR LIMIT=30] -->
+ <string name="mr_chooser_title">Cast to</string>
+
+ <!-- Placeholder text to show when no devices have been found. [CHAR LIMIT=50] -->
+ <string name="mr_chooser_searching">Finding devices</string>
+
+ <!-- Button to disconnect from a media route. [CHAR LIMIT=30] -->
+ <string name="mr_controller_disconnect">Disconnect</string>
+
+ <!-- Button to stop playback and disconnect from a media route. [CHAR LIMIT=30] -->
+ <string name="mr_controller_stop_casting">Stop casting</string>
+
+ <!-- Content description for accessibility (not shown on the screen): dialog close button. [CHAR LIMIT=NONE] -->
+ <string name="mr_controller_close_description">Close</string>
+
+ <!-- Content description for accessibility (not shown on the screen): media play button. [CHAR LIMIT=NONE] -->
+ <string name="mr_controller_play">Play</string>
+
+ <!-- Content description for accessibility (not shown on the screen): media pause button. [CHAR LIMIT=NONE] -->
+ <string name="mr_controller_pause">Pause</string>
+
+ <!-- Content description for accessibility (not shown on the screen): media stop button. [CHAR LIMIT=NONE] -->
+ <string name="mr_controller_stop">Stop</string>
+
+ <!-- Content description for accessibility (not shown on the screen): group expand button. Pressing button shows group members of a selected route group. [CHAR LIMIT=NONE] -->
+ <string name="mr_controller_expand_group">Expand</string>
+
+ <!-- Content description for accessibility (not shown on the screen): group collapse button. Pressing button hides group members of a selected route group. [CHAR LIMIT=NONE] -->
+ <string name="mr_controller_collapse_group">Collapse</string>
+
+ <!-- Content description for accessibility (not shown on the screen): album art button. Clicking on the album art takes user to a predefined activity per media app. [CHAR LIMIT=50] -->
+ <string name="mr_controller_album_art">Album art</string>
+
+ <!-- Content description for accessibility (not shown on the screen): volume slider. [CHAR LIMIT=NONE] -->
+ <string name="mr_controller_volume_slider">Volume slider</string>
+
+ <!-- Placeholder text to show when no media have been selected for playback. [CHAR LIMIT=50] -->
+ <string name="mr_controller_no_media_selected">No media selected</string>
+
+ <!-- Placeholder text to show when no title/description have been found for a given song/video. [CHAR LIMIT=50] -->
+ <string name="mr_controller_no_info_available">No info available</string>
+
+ <!-- Placeholder text indicating that the user is currently casting screen. [CHAR LIMIT=50] -->
+ <string name="mr_controller_casting_screen">Casting screen</string>
+
+ <!-- Text for error alert when a video container is not valid for progressive download/playback. -->
+ <string name="VideoView2_error_text_invalid_progressive_playback">This video isn\'t valid for streaming to this device.</string>
+ <!-- Text for error alert when a video cannot be played. It can be used by any app. -->
+ <string name="VideoView2_error_text_unknown">Can\'t play this video.</string>
+ <!-- Button to close error alert when a video cannot be played. -->
+ <string name="VideoView2_error_button">OK</string>
+
+ <!-- Text for displaying ad skip wait time. -->
+ <string name="MediaControlView2_ad_skip_wait_time">
+ You can skip Ad in <xliff:g id="wait_time" example="5">%1$d</xliff:g>s
+ </string>
+ <!-- Text for displaying ad total remaining time. -->
+ <string name="MediaControlView2_ad_remaining_time">
+ Ad · <xliff:g id="remaining_time" example="1:15">%1$s</xliff:g> remaining
+ </string>
+ <!-- Placeholder text indicating that the user can press the button to go to an external website. -->
+ <string name="MediaControlView2_ad_text">Visit Advertiser</string>
+ <string name="MediaControlView2_subtitle_text">Closed caption</string>
+ <string name="MediaControlView2_subtitle_off_text">Off</string>
+ <string name="MediaControlView2_audio_track_text">Audio track</string>
+ <string name="MediaControlView2_audio_track_none_text">None</string>
+ <string name="MediaControlView2_video_quality_text">Video quality</string>
+ <string name="MediaControlView2_video_quality_auto_text">Auto</string>
+ <string name="MediaControlView2_help_text">Help & feedback</string>
+ <string name="MediaControlView2_playback_speed_text">Playback speed</string>
+ <string-array name="MediaControlView2_playback_speeds">
+ <item>0.25x</item>
+ <item>0.5x</item>
+ <item>0.75x</item>
+ <item>Normal</item>
+ <item>1.25x</item>
+ <item>1.5x</item>
+ <item>2x</item>
+ </string-array>
+ <!-- Placeholder text for displaying time. Used to calculate which size layout to use. -->
+ <string name="MediaControlView2_time_placeholder">00:00:00</string>
+
+ <!-- Text for displaying subtitle track number. -->
+ <string name="MediaControlView2_subtitle_track_number_text">
+ Track <xliff:g id="track_number" example="1">%1$s</xliff:g>
+ </string>
+ <!-- Text for displaying audio track number. -->
+ <string name="MediaControlView2_audio_track_number_text">
+ Track <xliff:g id="audio_number" example="1">%1$s</xliff:g>
+ </string>
+ <!-- Text for displaying unknown song title. -->
+ <string name="mcv2_music_title_unknown_text">Song title unknown</string>
+ <!-- Text for displaying unknown artist name. -->
+ <string name="mcv2_music_artist_unknown_text">Artist unknown</string>
+
+ <!--Content Descriptions -->
+ <string name="mcv2_back_button_desc">Back</string>
+ <string name="mcv2_overflow_left_button_desc">See more buttons</string>
+ <string name="mcv2_overflow_right_button_desc">Back to previous button list</string>
+ <string name="mcv2_seek_bar_desc">Playback progress</string>
+ <string name="mcv2_settings_button_desc">Settings</string>
+ <string name="mcv2_video_quality_button_desc">Video Quality Selection</string>
+ <string name="mcv2_cc_is_on">Subtitle is on. Click to hide it.</string>
+ <string name="mcv2_cc_is_off">Subtitle is off. Click to show it.</string>
+ <string name="mcv2_replay_button_desc">Replay</string>
+ <string name="mcv2_play_button_desc">Play</string>
+ <string name="mcv2_pause_button_desc">Pause</string>
+ <string name="mcv2_previous_button_desc">Previous media</string>
+ <string name="mcv2_next_button_desc">Next media</string>
+ <string name="mcv2_rewind_button_desc">Rewind by 10 seconds</string>
+ <string name="mcv2_ffwd_button_desc">Go forward by 30 seconds</string>
+ <string name="mcv2_launch_button_desc">Launch Link</string>
+ <string name="mcv2_muted_button_desc">Muted. Click to unmute</string>
+ <string name="mcv2_unmuted_button_desc">Click to Mute</string>
+ <string name="mcv2_full_screen_button_desc">Full screen</string>
+</resources>
diff --git a/packages/MediaComponents/res/values/style.xml b/packages/MediaComponents/res/values/style.xml
new file mode 100644
index 0000000..5b9a8ee
--- /dev/null
+++ b/packages/MediaComponents/res/values/style.xml
@@ -0,0 +1,221 @@
+<?xml version="1.0" encoding="utf-8"?>
+<resources>
+ <style name="FullTransportControlsButton">
+ <item name="android:background">@null</item>
+ <item name="android:layout_margin">@dimen/mcv2_icon_margin</item>
+ <item name="android:scaleType">fitXY</item>
+ </style>
+
+ <style name="FullTransportControlsButton.Previous">
+ <item name="android:src">@drawable/ic_skip_previous</item>
+ <item name="android:layout_width">@dimen/mcv2_full_icon_size</item>
+ <item name="android:layout_height">@dimen/mcv2_full_icon_size</item>
+ <item name="android:contentDescription">@string/mcv2_previous_button_desc</item>
+ </style>
+
+ <style name="FullTransportControlsButton.Next">
+ <item name="android:src">@drawable/ic_skip_next</item>
+ <item name="android:layout_width">@dimen/mcv2_full_icon_size</item>
+ <item name="android:layout_height">@dimen/mcv2_full_icon_size</item>
+ <item name="android:contentDescription">@string/mcv2_next_button_desc</item>
+ </style>
+
+ <style name="FullTransportControlsButton.Pause">
+ <item name="android:src">@drawable/ic_pause_circle_filled</item>
+ <item name="android:layout_width">@dimen/mcv2_pause_icon_size</item>
+ <item name="android:layout_height">@dimen/mcv2_pause_icon_size</item>
+ <item name="android:contentDescription">@string/mcv2_pause_button_desc</item>
+ </style>
+
+ <style name="FullTransportControlsButton.Ffwd">
+ <item name="android:src">@drawable/ic_forward_30</item>
+ <item name="android:layout_width">@dimen/mcv2_full_icon_size</item>
+ <item name="android:layout_height">@dimen/mcv2_full_icon_size</item>
+ <item name="android:contentDescription">@string/mcv2_ffwd_button_desc</item>
+ </style>
+
+ <style name="FullTransportControlsButton.Rew">
+ <item name="android:src">@drawable/ic_rewind_10</item>
+ <item name="android:layout_width">@dimen/mcv2_full_icon_size</item>
+ <item name="android:layout_height">@dimen/mcv2_full_icon_size</item>
+ <item name="android:contentDescription">@string/mcv2_rewind_button_desc</item>
+ </style>
+
+ <style name="EmbeddedTransportControlsButton">
+ <item name="android:background">@null</item>
+ <item name="android:layout_margin">@dimen/mcv2_icon_margin</item>
+ <item name="android:scaleType">fitXY</item>
+ </style>
+
+ <style name="EmbeddedTransportControlsButton.Previous">
+ <item name="android:src">@drawable/ic_skip_previous</item>
+ <item name="android:layout_width">@dimen/mcv2_embedded_icon_size</item>
+ <item name="android:layout_height">@dimen/mcv2_embedded_icon_size</item>
+ <item name="android:contentDescription">@string/mcv2_previous_button_desc</item>
+ </style>
+
+ <style name="EmbeddedTransportControlsButton.Next">
+ <item name="android:src">@drawable/ic_skip_next</item>
+ <item name="android:layout_width">@dimen/mcv2_embedded_icon_size</item>
+ <item name="android:layout_height">@dimen/mcv2_embedded_icon_size</item>
+ <item name="android:contentDescription">@string/mcv2_next_button_desc</item>
+ </style>
+
+ <style name="EmbeddedTransportControlsButton.Pause">
+ <item name="android:src">@drawable/ic_pause_circle_filled</item>
+ <item name="android:layout_width">@dimen/mcv2_pause_icon_size</item>
+ <item name="android:layout_height">@dimen/mcv2_pause_icon_size</item>
+ <item name="android:contentDescription">@string/mcv2_pause_button_desc</item>
+ </style>
+
+ <style name="EmbeddedTransportControlsButton.Ffwd">
+ <item name="android:src">@drawable/ic_forward_30</item>
+ <item name="android:layout_width">@dimen/mcv2_embedded_icon_size</item>
+ <item name="android:layout_height">@dimen/mcv2_embedded_icon_size</item>
+ <item name="android:contentDescription">@string/mcv2_ffwd_button_desc</item>
+ </style>
+
+ <style name="EmbeddedTransportControlsButton.Rew">
+ <item name="android:src">@drawable/ic_rewind_10</item>
+ <item name="android:layout_width">@dimen/mcv2_embedded_icon_size</item>
+ <item name="android:layout_height">@dimen/mcv2_embedded_icon_size</item>
+ <item name="android:contentDescription">@string/mcv2_rewind_button_desc</item>
+ </style>
+
+ <style name="MinimalTransportControlsButton">
+ <item name="android:background">@null</item>
+ <item name="android:layout_width">@dimen/mcv2_pause_icon_size</item>
+ <item name="android:layout_height">@dimen/mcv2_pause_icon_size</item>
+ <item name="android:layout_margin">@dimen/mcv2_icon_margin</item>
+ <item name="android:scaleType">fitXY</item>
+ <item name="android:src">@drawable/ic_pause_circle_filled</item>
+ <item name="android:contentDescription">@string/mcv2_pause_button_desc</item>
+ </style>
+
+ <style name="TitleBar">
+ <item name="android:layout_width">match_parent</item>
+ <item name="android:layout_height">46dp</item>
+ <item name="android:paddingStart">5dp</item>
+ <item name="android:paddingEnd">5dp</item>
+ </style>
+
+ <style name="TitleBarButton">
+ <item name="android:background">@null</item>
+ <item name="android:layout_width">36dp</item>
+ <item name="android:layout_height">36dp</item>
+ </style>
+
+ <style name="TitleBarButton.Back">
+ <item name="android:src">@drawable/ic_arrow_back</item>
+ <item name="android:contentDescription">@string/mcv2_back_button_desc</item>
+ </style>
+
+ <style name="TitleBarButton.Launch">
+ <item name="android:src">@drawable/ic_launch</item>
+ <item name="android:contentDescription">@string/mcv2_launch_button_desc</item>
+ </style>
+
+ <style name="TimeText">
+ <item name="android:layout_width">wrap_content</item>
+ <item name="android:layout_height">wrap_content</item>
+ <item name="android:paddingStart">4dp</item>
+ <item name="android:paddingEnd">4dp</item>
+ <item name="android:textStyle">bold</item>
+ <item name="android:textSize">14sp</item>
+ <item name="android:gravity">center</item>
+ </style>
+
+ <style name="TimeText.Current">
+ <item name="android:textColor">@color/white</item>
+ <item name="android:text">@string/MediaControlView2_time_placeholder</item>
+ </style>
+
+ <style name="TimeText.Interpunct">
+ <item name="android:textColor">@color/white_opacity_70</item>
+ <item name="android:text">·</item>
+ </style>
+
+ <style name="TimeText.End">
+ <item name="android:textColor">@color/white_opacity_70</item>
+ <item name="android:text">@string/MediaControlView2_time_placeholder</item>
+ </style>
+
+ <style name="BottomBarButton">
+ <item name="android:background">@null</item>
+ <item name="android:layout_width">@dimen/mcv2_embedded_icon_size</item>
+ <item name="android:layout_height">@dimen/mcv2_embedded_icon_size</item>
+ <item name="android:layout_margin">@dimen/mcv2_icon_margin</item>
+ <item name="android:gravity">center_horizontal</item>
+ <item name="android:scaleType">fitXY</item>
+ </style>
+
+ <style name="BottomBarButton.CC">
+ <item name="android:src">@drawable/ic_subtitle_off</item>
+ <item name="android:contentDescription">@string/mcv2_cc_is_off</item>
+ </style>
+
+ <style name="BottomBarButton.FullScreen">
+ <item name="android:src">@drawable/ic_fullscreen</item>
+ <item name="android:contentDescription">@string/mcv2_full_screen_button_desc</item>
+ </style>
+
+ <style name="BottomBarButton.OverflowRight">
+ <item name="android:src">@drawable/ic_chevron_right</item>
+ <item name="android:contentDescription">@string/mcv2_overflow_right_button_desc</item>
+ </style>
+
+ <style name="BottomBarButton.OverflowLeft">
+ <item name="android:src">@drawable/ic_chevron_left</item>
+ <item name="android:contentDescription">@string/mcv2_overflow_left_button_desc</item>
+ </style>
+
+ <style name="BottomBarButton.Settings">
+ <item name="android:src">@drawable/ic_settings</item>
+ <item name="android:contentDescription">@string/mcv2_settings_button_desc</item>
+ </style>
+
+ <style name="BottomBarButton.Mute">
+ <item name="android:src">@drawable/ic_unmute</item>
+ <item name="android:contentDescription">@string/mcv2_unmuted_button_desc</item>
+ </style>
+
+ <style name="BottomBarButton.VideoQuality">
+ <item name="android:src">@drawable/ic_high_quality</item>
+ <item name="android:contentDescription">@string/mcv2_video_quality_button_desc</item>
+ </style>
+
+ <style name="FullMusicPortrait">
+ <item name="android:layout_height">0dp</item>
+ </style>
+
+ <style name="FullMusicPortrait.Image">
+ <item name="android:layout_width">match_parent</item>
+ <item name="android:layout_weight">0.6</item>
+ <item name="android:gravity">center</item>
+ </style>
+
+ <style name="FullMusicPortrait.Text">
+ <item name="android:layout_width">match_parent</item>
+ <item name="android:layout_weight">0.4</item>
+ <item name="android:gravity">top|center</item>
+ <item name="android:orientation">vertical</item>
+ </style>
+
+ <style name="FullMusicLandscape">
+ <item name="android:layout_width">0dp</item>
+ </style>
+
+ <style name="FullMusicLandscape.Image">
+ <item name="android:layout_height">match_parent</item>
+ <item name="android:layout_weight">0.35</item>
+ <item name="android:gravity">center|right</item>
+ </style>
+
+ <style name="FullMusicLandscape.Text">
+ <item name="android:layout_height">match_parent</item>
+ <item name="android:layout_weight">0.65</item>
+ <item name="android:layout_marginLeft">24dp</item>
+ <item name="android:gravity">center|left</item>
+ <item name="android:orientation">vertical</item>
+ </style>
+</resources>
diff --git a/packages/MediaComponents/res/values/styles.xml b/packages/MediaComponents/res/values/styles.xml
new file mode 100644
index 0000000..bde6900
--- /dev/null
+++ b/packages/MediaComponents/res/values/styles.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<resources>
+ <style name="Widget.MediaRouter.MediaRouteButton"
+ parent="Widget.AppCompat.ActionButton">
+ <item name="externalRouteEnabledDrawable">@drawable/mr_button_dark</item>
+ </style>
+
+ <style name="Widget.MediaRouter.Light.MediaRouteButton"
+ parent="Widget.AppCompat.Light.ActionButton">
+ <item name="externalRouteEnabledDrawable">@drawable/mr_button_light</item>
+ </style>
+
+ <style name="TextAppearance.MediaRouter.Title" parent="TextAppearance.AppCompat.Title" />
+
+ <style name="TextAppearance.MediaRouter.PrimaryText" parent="TextAppearance.AppCompat.Subhead" />
+
+ <style name="TextAppearance.MediaRouter.SecondaryText" parent="TextAppearance.AppCompat.Body1" />
+</resources>
diff --git a/packages/MediaComponents/res/values/symbols.xml b/packages/MediaComponents/res/values/symbols.xml
new file mode 100644
index 0000000..ee0e8c6
--- /dev/null
+++ b/packages/MediaComponents/res/values/symbols.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+/* Copyright 2017, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+-->
+<resources>
+ <!--java-symbol type="id" name="cc" />
+ <java-symbol type="id" name="ffwd" />
+ <java-symbol type="id" name="mediacontroller_progress" />
+ <java-symbol type="id" name="next" />
+ <java-symbol type="id" name="pause" />
+ <java-symbol type="id" name="prev" />
+ <java-symbol type="id" name="rew" />
+ <java-symbol type="id" name="time" />
+ <java-symbol type="id" name="time_current" /-->
+</resources>
\ No newline at end of file
diff --git a/packages/MediaComponents/res/values/themes.xml b/packages/MediaComponents/res/values/themes.xml
new file mode 100644
index 0000000..d9a754b
--- /dev/null
+++ b/packages/MediaComponents/res/values/themes.xml
@@ -0,0 +1,75 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<resources>
+
+ <style name="Theme.MediaRouter" parent="android:Theme.Material">
+ <item name="android:windowNoTitle">true</item>
+ <item name="mediaRouteButtonStyle">@style/Widget.MediaRouter.MediaRouteButton</item>
+
+ <item name="mediaRouteCloseDrawable">@drawable/mr_dialog_close_dark</item>
+ <item name="mediaRoutePlayDrawable">@drawable/mr_media_play_dark</item>
+ <item name="mediaRoutePauseDrawable">@drawable/mr_media_pause_dark</item>
+ <item name="mediaRouteStopDrawable">@drawable/mr_media_stop_dark</item>
+ <item name="mediaRouteAudioTrackDrawable">@drawable/mr_vol_type_audiotrack_dark</item>
+ <item name="mediaRouteDefaultIconDrawable">@drawable/ic_mr_button_disconnected_dark</item>
+ <item name="mediaRouteTvIconDrawable">@drawable/ic_vol_type_tv_dark</item>
+ <item name="mediaRouteSpeakerIconDrawable">@drawable/ic_vol_type_speaker_dark</item>
+ <item name="mediaRouteSpeakerGroupIconDrawable">@drawable/ic_vol_type_speaker_group_dark</item>
+
+ <item name="mediaRouteControlPanelThemeOverlay">@null</item>
+ </style>
+
+ <style name="Theme.MediaRouter.LightControlPanel">
+ <item name="mediaRouteControlPanelThemeOverlay">@style/ThemeOverlay.MediaRouter.Light</item>
+ </style>
+
+ <style name="Theme.MediaRouter.Light" parent="android:Theme.Material.Light">
+ <item name="android:windowNoTitle">true</item>
+ <item name="mediaRouteButtonStyle">@style/Widget.MediaRouter.Light.MediaRouteButton</item>
+
+ <item name="mediaRouteCloseDrawable">@drawable/mr_dialog_close_light</item>
+ <item name="mediaRoutePlayDrawable">@drawable/mr_media_play_light</item>
+ <item name="mediaRoutePauseDrawable">@drawable/mr_media_pause_light</item>
+ <item name="mediaRouteStopDrawable">@drawable/mr_media_stop_light</item>
+ <item name="mediaRouteAudioTrackDrawable">@drawable/mr_vol_type_audiotrack_light</item>
+ <item name="mediaRouteDefaultIconDrawable">@drawable/ic_mr_button_grey</item>
+ <item name="mediaRouteTvIconDrawable">@drawable/ic_vol_type_tv_light</item>
+ <item name="mediaRouteSpeakerIconDrawable">@drawable/ic_vol_type_speaker_light</item>
+ <item name="mediaRouteSpeakerGroupIconDrawable">@drawable/ic_vol_type_speaker_group_light</item>
+
+ <item name="mediaRouteControlPanelThemeOverlay">@null</item>
+ </style>
+
+ <style name="Theme.MediaRouter.Light.DarkControlPanel">
+ <item name="mediaRouteControlPanelThemeOverlay">@style/ThemeOverlay.MediaRouter.Dark</item>
+ </style>
+
+ <style name="ThemeOverlay.MediaRouter.Dark" parent="android:Theme.Material">
+ <item name="mediaRoutePlayDrawable">@drawable/mr_media_play_dark</item>
+ <item name="mediaRoutePauseDrawable">@drawable/mr_media_pause_dark</item>
+ <item name="mediaRouteStopDrawable">@drawable/mr_media_stop_dark</item>
+ <item name="mediaRouteAudioTrackDrawable">@drawable/mr_vol_type_audiotrack_dark</item>
+
+ </style>
+ <style name="ThemeOverlay.MediaRouter.Light" parent="android:Theme.Material.Light">
+ <item name="mediaRoutePlayDrawable">@drawable/mr_media_play_light</item>
+ <item name="mediaRoutePauseDrawable">@drawable/mr_media_pause_light</item>
+ <item name="mediaRouteStopDrawable">@drawable/mr_media_stop_light</item>
+ <item name="mediaRouteAudioTrackDrawable">@drawable/mr_vol_type_audiotrack_light</item>
+ </style>
+
+</resources>
diff --git a/packages/MediaComponents/runcts.sh b/packages/MediaComponents/runcts.sh
new file mode 100644
index 0000000..61b1a1e
--- /dev/null
+++ b/packages/MediaComponents/runcts.sh
@@ -0,0 +1,224 @@
+#!/bin/bash
+# Copyright 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Usage '. runcts.sh'
+
+function _runtest_cts_mediacomponent_usage() {
+ echo 'runtest-cts-MediaComponents [option]: Build, flash device,'
+ echo ' and run subset of CtsMediaTestCases that MediaComponents covers.'
+ echo ' *Warning* This bypasses CTS setup (e.g. download media contents from server)'
+ echo ' For running CTS in official way, use atest or cts-tradefed '
+ echo ' -h|--help: This help'
+ echo ' --skip: Skip build and flash. Just rerun-tests'
+ echo ' --min: Only rebuild tests and updatable library.'
+ echo ' --test: Only rebuild tests'
+ echo ' -s [device_id]: Specify a device name to run test against.'
+ echo ' You can define ${ADBHOST} instead.'
+ echo ' -r [count]: Repeat tests for given count. It will stop when fails.'
+ echo ' --ignore: Keep repeating tests even when it fails.'
+ echo ' -t [test]: Only run the specific test. Can be either a class or a method.'
+}
+
+function runtest-cts-MediaComponents() {
+ # Edit here if you want to support other tests.
+ # List up libs and apks in the media_api needed for tests, and place test target at the last.
+ local TEST_PACKAGE_DIR=("frameworks/av/packages/MediaComponents/test")
+ local TEST_PACKAGE=("android.media.cts")
+ local BUILD_TARGETS=("MediaComponents" "CtsMediaTestCases")
+ # Don't include MediaComponents -- if we simply install it, system server
+ # wouldn't use the installed one.
+ local INSTALL_TARGETS=("CtsMediaTestCases")
+ local TEST_RUNNER="android.support.test.runner.AndroidJUnitRunner"
+ local DEPENDENCIES=("mockito-target-minus-junit4" "android-support-test" "compatibility-device-util")
+ local DEFAULT_TEST_TARGET=""
+ DEFAULT_TEST_TARGET+="android.media.cts.MediaBrowser2Test"
+ DEFAULT_TEST_TARGET+=",android.media.cts.MediaController2Test"
+ DEFAULT_TEST_TARGET+=",android.media.cts.MediaMetadata2Test"
+ DEFAULT_TEST_TARGET+=",android.media.cts.MediaSession2Test"
+ DEFAULT_TEST_TARGET+=",android.media.cts.MediaSession2_PermissionTest"
+ DEFAULT_TEST_TARGET+=",android.media.cts.MediaSessionManager_MediaSession2Test"
+ DEFAULT_TEST_TARGET+=",android.media.cts.SessionToken2Test"
+ if [[ -z "${ANDROID_BUILD_TOP}" ]]; then
+ echo "Needs to lunch a target first"
+ return
+ fi
+
+ local old_path=${OLDPWD}
+ while true; do
+ local OPTION_SKIP="false"
+ local OPTION_MIN="false"
+ local OPTION_TEST="false"
+ local OPTION_REPEAT_COUNT="1"
+ local OPTION_IGNORE="false"
+ local OPTION_TEST_TARGET="${DEFAULT_TEST_TARGET}"
+ local adbhost_local
+ while (( "$#" )); do
+ case "${1}" in
+ -h|--help)
+ _runtest_cts_mediacomponent_usage
+ return
+ ;;
+ --skip)
+ OPTION_SKIP="true"
+ ;;
+ --min)
+ OPTION_MIN="true"
+ ;;
+ --test)
+ OPTION_TEST="true"
+ ;;
+ -s)
+ shift
+ adbhost_local=${1}
+ ;;
+ -r)
+ shift
+ OPTION_REPEAT_COUNT="${1}"
+ ;;
+ --ignore)
+ OPTION_IGNORE="true"
+ ;;
+ -t)
+ shift
+ OPTION_TEST_TARGET="${1}"
+ esac
+ shift
+ done
+
+ # Build adb command.
+ local adb
+ if [[ -z "${adbhost_local}" ]]; then
+ adbhost_local=${ADBHOST}
+ fi
+ if [[ -z "${adbhost_local}" ]]; then
+ local device_count=$(adb devices | sed '/^[[:space:]]*$/d' | wc -l)
+ if [[ "${device_count}" != "2" ]]; then
+ echo "Too many devices. Specify a device." && break
+ fi
+ adb="adb"
+ else
+ adb="adb -s ${adbhost_local}"
+ fi
+
+ local target_dir="${ANDROID_BUILD_TOP}/${TEST_PACKAGE_DIR}"
+ #local TEST_PACKAGE=$(sed -n 's/^.*\bpackage\b="\([a-z0-9\.]*\)".*$/\1/p' ${target_dir}/AndroidManifest.xml)
+
+ if [[ "${OPTION_SKIP}" != "true" ]]; then
+ # Build dependencies if needed.
+ local dependency
+ local build_dependency=""
+ for dependency in ${DEPENDENCIES[@]}; do
+ if [[ "${dependency}" == "out/"* ]]; then
+ if [[ ! -f ${ANDROID_BUILD_TOP}/${dependency} ]]; then
+ build_dependency="true"
+ break
+ fi
+ else
+ if [[ "$(find ${OUT} -name ${dependency}_intermediates | wc -l)" == "0" ]]; then
+ build_dependency="true"
+ break
+ fi
+ fi
+ done
+ if [[ "${build_dependency}" == "true" ]]; then
+ echo "Building dependencies. Will only print stderr."
+ m ${DEPENDENCIES[@]} -j > /dev/null
+ fi
+
+ # Build test apk and required apk.
+ local build_targets
+ if [[ "${OPTION_TEST}" == "true" ]]; then
+ build_targets="${INSTALL_TARGETS[@]}"
+ elif [[ "${OPTION_MIN}" == "true" ]]; then
+ build_targets="${BUILD_TARGETS[@]}"
+ else
+ build_targets="${BUILD_TARGETS[@]} droid"
+ fi
+ m ${build_targets} -j || break
+
+ if [[ "${OPTION_TEST}" != "true" ]]; then
+ # Flash only when needed
+ local device_build_type="$(${adb} shell getprop ro.build.type)"
+ if [[ "${device_build_type}" == "user" ]]; then
+ # User build. Cannot adb sync
+ ${adb} reboot bootloader
+ fastboot flashall
+ else
+ ${adb} root
+ local device_verity_mode="$(${adb} shell getprop ro.boot.veritymode)"
+ if [[ "${device_verity_mode}" != "disabled" ]]; then
+ ${adb} disable-verity
+ ${adb} reboot
+ ${adb} wait-for-device || break
+ ${adb} root
+ fi
+ ${adb} remount
+ ${adb} shell stop
+ ${adb} shell setprop log.tag.MediaSessionService DEBUG
+ ${adb} sync
+ ${adb} shell start
+ fi
+ ${adb} wait-for-device || break
+ # Ensure package manager is loaded.
+ # TODO(jaewan): Find better way to wait
+ sleep 15
+ fi
+
+ # Install apks
+ local install_failed="false"
+ for target in ${INSTALL_TARGETS[@]}; do
+ local apk_path=$(find ${OUT}/system ${OUT}/data -name ${target}.apk)
+ local apk_num=$(find ${OUT}/system ${OUT}/data -name ${target}.apk | wc -l)
+ if [[ "${apk_num}" != "1" ]]; then
+ echo "Cannot locate a ${target}.apk. Found ${apk_num} apks" && break
+ fi
+ echo "Installing ${target}.apk. path=${apk_path}"
+ ${adb} install -r ${apk_path}
+ if [[ "${?}" != "0" ]]; then
+ install_failed="true"
+ break
+ fi
+ done
+ if [[ "${install_failed}" == "true" ]]; then
+ echo "Failed to install. Test wouldn't run."
+ break
+ fi
+ fi
+
+ local test_target=""
+ if [[ -n "${OPTION_TEST_TARGET}" ]]; then
+ test_target="-e class ${OPTION_TEST_TARGET}"
+ fi
+
+ local i
+ local tmpfile=$(tempfile)
+ for ((i=1; i <= ${OPTION_REPEAT_COUNT}; i++)); do
+ echo "Run test ${i}/${OPTION_REPEAT_COUNT}"
+ ${adb} shell am instrument ${test_target} -w ${TEST_PACKAGE}/${TEST_RUNNER} >& ${tmpfile}
+ cat ${tmpfile}
+ if [[ "${OPTION_IGNORE}" != "true" ]]; then
+ if [[ -n "$(grep ${tmpfile} -e 'FAILURE\|crashed')" ]]; then
+ # am instrument doesn't return error code so need to grep result message instead
+ break
+ fi
+ fi
+ done
+ rm ${tmpfile}
+ break
+ done
+}
+
+echo "Following functions are added to your environment:"
+_runtest_cts_mediacomponent_usage
diff --git a/packages/MediaComponents/src/com/android/media/IMediaController2.aidl b/packages/MediaComponents/src/com/android/media/IMediaController2.aidl
new file mode 100644
index 0000000..0488b70
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/IMediaController2.aidl
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import android.app.PendingIntent;
+import android.os.Bundle;
+import android.os.ResultReceiver;
+
+import com.android.media.IMediaSession2;
+
+/**
+ * Interface from MediaSession2 to MediaController2.
+ * <p>
+ * Keep this interface oneway. Otherwise a malicious app may implement fake version of this,
+ * and holds calls from session to make session owner(s) frozen.
+ */
+// TODO(jaewan): (Post P) Handle when the playlist becomes too huge.
+// Note that ParcelledSliceList isn't a good idea for the purpose. (see: b/37493677)
+oneway interface IMediaController2 {
+ void onPlayerStateChanged(int state);
+ void onPositionChanged(long eventTimeMs, long positionMs);
+ void onPlaybackSpeedChanged(float speed);
+ void onBufferedPositionChanged(long bufferedPositionMs);
+ void onPlaylistChanged(in List<Bundle> playlist, in Bundle metadata);
+ void onPlaylistMetadataChanged(in Bundle metadata);
+ void onPlaybackInfoChanged(in Bundle playbackInfo);
+ void onRepeatModeChanged(int repeatMode);
+ void onShuffleModeChanged(int shuffleMode);
+ void onError(int errorCode, in Bundle extras);
+
+ void onConnected(IMediaSession2 sessionBinder, in Bundle commandGroup,
+ int playerState, long positionEventTimeMs, long positionMs, float playbackSpeed,
+ long bufferedPositionMs, in Bundle playbackInfo, int repeatMode, int shuffleMode,
+ in List<Bundle> playlist, in PendingIntent sessionActivity);
+ void onDisconnected();
+
+ void onCustomLayoutChanged(in List<Bundle> commandButtonlist);
+ void onAllowedCommandsChanged(in Bundle commands);
+
+ void onCustomCommand(in Bundle command, in Bundle args, in ResultReceiver receiver);
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ // Browser sepcific
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ void onGetLibraryRootDone(in Bundle rootHints, String rootMediaId, in Bundle rootExtra);
+ void onGetItemDone(String mediaId, in Bundle result);
+ void onChildrenChanged(String rootMediaId, int itemCount, in Bundle extras);
+ void onGetChildrenDone(String parentId, int page, int pageSize, in List<Bundle> result,
+ in Bundle extras);
+ void onSearchResultChanged(String query, int itemCount, in Bundle extras);
+ void onGetSearchResultDone(String query, int page, int pageSize, in List<Bundle> result,
+ in Bundle extras);
+}
diff --git a/packages/MediaComponents/src/com/android/media/IMediaSession2.aidl b/packages/MediaComponents/src/com/android/media/IMediaSession2.aidl
new file mode 100644
index 0000000..664467d
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/IMediaSession2.aidl
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import android.os.Bundle;
+import android.os.ResultReceiver;
+import android.net.Uri;
+
+import com.android.media.IMediaController2;
+
+/**
+ * Interface from MediaController2 to MediaSession2.
+ * <p>
+ * Keep this interface oneway. Otherwise a malicious app may implement fake version of this,
+ * and holds calls from session to make session owner(s) frozen.
+ */
+ // TODO(jaewan): (Post P) Handle when the playlist becomes too huge.
+ // Note that ParcelledSliceList isn't a good idea for the purpose. (see: b/37493677)
+oneway interface IMediaSession2 {
+ // TODO(jaewan): add onCommand() to send private command
+
+ // TODO(jaewan): (Post P) We may consider to add another binder just for the connection
+ // not to expose other methods to the controller whose connection wasn't accepted.
+ // But this would be enough for now because it's the same as existing
+ // MediaBrowser and MediaBrowserService.
+ void connect(IMediaController2 caller, String callingPackage);
+ void release(IMediaController2 caller);
+
+ void setVolumeTo(IMediaController2 caller, int value, int flags);
+ void adjustVolume(IMediaController2 caller, int direction, int flags);
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ // send command
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ void sendTransportControlCommand(IMediaController2 caller,
+ int commandCode, in Bundle args);
+ void sendCustomCommand(IMediaController2 caller, in Bundle command, in Bundle args,
+ in ResultReceiver receiver);
+
+ void prepareFromUri(IMediaController2 caller, in Uri uri, in Bundle extras);
+ void prepareFromSearch(IMediaController2 caller, String query, in Bundle extras);
+ void prepareFromMediaId(IMediaController2 caller, String mediaId, in Bundle extras);
+ void playFromUri(IMediaController2 caller, in Uri uri, in Bundle extras);
+ void playFromSearch(IMediaController2 caller, String query, in Bundle extras);
+ void playFromMediaId(IMediaController2 caller, String mediaId, in Bundle extras);
+ void setRating(IMediaController2 caller, String mediaId, in Bundle rating);
+
+ void setPlaylist(IMediaController2 caller, in List<Bundle> playlist, in Bundle metadata);
+ void updatePlaylistMetadata(IMediaController2 caller, in Bundle metadata);
+ void addPlaylistItem(IMediaController2 caller, int index, in Bundle mediaItem);
+ void removePlaylistItem(IMediaController2 caller, in Bundle mediaItem);
+ void replacePlaylistItem(IMediaController2 caller, int index, in Bundle mediaItem);
+ void skipToPlaylistItem(IMediaController2 caller, in Bundle mediaItem);
+ void skipToPreviousItem(IMediaController2 caller);
+ void skipToNextItem(IMediaController2 caller);
+ void setRepeatMode(IMediaController2 caller, int repeatMode);
+ void setShuffleMode(IMediaController2 caller, int shuffleMode);
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ // library service specific
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ void getLibraryRoot(IMediaController2 caller, in Bundle rootHints);
+ void getItem(IMediaController2 caller, String mediaId);
+ void getChildren(IMediaController2 caller, String parentId, int page, int pageSize,
+ in Bundle extras);
+ void search(IMediaController2 caller, String query, in Bundle extras);
+ void getSearchResult(IMediaController2 caller, String query, int page, int pageSize,
+ in Bundle extras);
+ void subscribe(IMediaController2 caller, String parentId, in Bundle extras);
+ void unsubscribe(IMediaController2 caller, String parentId);
+}
diff --git a/packages/MediaComponents/src/com/android/media/MediaBrowser2Impl.java b/packages/MediaComponents/src/com/android/media/MediaBrowser2Impl.java
new file mode 100644
index 0000000..c909099
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/MediaBrowser2Impl.java
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import android.content.Context;
+import android.media.MediaBrowser2;
+import android.media.MediaBrowser2.BrowserCallback;
+import android.media.MediaController2;
+import android.media.MediaItem2;
+import android.media.SessionToken2;
+import android.media.update.MediaBrowser2Provider;
+import android.os.Bundle;
+import android.os.RemoteException;
+import android.text.TextUtils;
+import android.util.Log;
+
+import java.util.List;
+import java.util.concurrent.Executor;
+
+public class MediaBrowser2Impl extends MediaController2Impl implements MediaBrowser2Provider {
+ private final String TAG = "MediaBrowser2";
+ private final boolean DEBUG = true; // TODO(jaewan): change.
+
+ private final MediaBrowser2 mInstance;
+ private final MediaBrowser2.BrowserCallback mCallback;
+
+ public MediaBrowser2Impl(Context context, MediaBrowser2 instance, SessionToken2 token,
+ Executor executor, BrowserCallback callback) {
+ super(context, instance, token, executor, callback);
+ mInstance = instance;
+ mCallback = callback;
+ }
+
+ @Override MediaBrowser2 getInstance() {
+ return (MediaBrowser2) super.getInstance();
+ }
+
+ @Override
+ public void getLibraryRoot_impl(Bundle rootHints) {
+ final IMediaSession2 binder = getSessionBinder();
+ if (binder != null) {
+ try {
+ binder.getLibraryRoot(getControllerStub(), rootHints);
+ } catch (RemoteException e) {
+ // TODO(jaewan): Handle disconnect.
+ if (DEBUG) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void subscribe_impl(String parentId, Bundle extras) {
+ if (parentId == null) {
+ throw new IllegalArgumentException("parentId shouldn't be null");
+ }
+
+ final IMediaSession2 binder = getSessionBinder();
+ if (binder != null) {
+ try {
+ binder.subscribe(getControllerStub(), parentId, extras);
+ } catch (RemoteException e) {
+ // TODO(jaewan): Handle disconnect.
+ if (DEBUG) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void unsubscribe_impl(String parentId) {
+ if (parentId == null) {
+ throw new IllegalArgumentException("parentId shouldn't be null");
+ }
+
+ final IMediaSession2 binder = getSessionBinder();
+ if (binder != null) {
+ try {
+ binder.unsubscribe(getControllerStub(), parentId);
+ } catch (RemoteException e) {
+ // TODO(jaewan): Handle disconnect.
+ if (DEBUG) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void getItem_impl(String mediaId) {
+ if (mediaId == null) {
+ throw new IllegalArgumentException("mediaId shouldn't be null");
+ }
+
+ final IMediaSession2 binder = getSessionBinder();
+ if (binder != null) {
+ try {
+ binder.getItem(getControllerStub(), mediaId);
+ } catch (RemoteException e) {
+ // TODO(jaewan): Handle disconnect.
+ if (DEBUG) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void getChildren_impl(String parentId, int page, int pageSize, Bundle extras) {
+ if (parentId == null) {
+ throw new IllegalArgumentException("parentId shouldn't be null");
+ }
+ if (page < 1 || pageSize < 1) {
+ throw new IllegalArgumentException("Neither page nor pageSize should be less than 1");
+ }
+
+ final IMediaSession2 binder = getSessionBinder();
+ if (binder != null) {
+ try {
+ binder.getChildren(getControllerStub(), parentId, page, pageSize, extras);
+ } catch (RemoteException e) {
+ // TODO(jaewan): Handle disconnect.
+ if (DEBUG) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void search_impl(String query, Bundle extras) {
+ if (TextUtils.isEmpty(query)) {
+ throw new IllegalArgumentException("query shouldn't be empty");
+ }
+ final IMediaSession2 binder = getSessionBinder();
+ if (binder != null) {
+ try {
+ binder.search(getControllerStub(), query, extras);
+ } catch (RemoteException e) {
+ // TODO(jaewan): Handle disconnect.
+ if (DEBUG) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void getSearchResult_impl(String query, int page, int pageSize, Bundle extras) {
+ if (TextUtils.isEmpty(query)) {
+ throw new IllegalArgumentException("query shouldn't be empty");
+ }
+ if (page < 1 || pageSize < 1) {
+ throw new IllegalArgumentException("Neither page nor pageSize should be less than 1");
+ }
+ final IMediaSession2 binder = getSessionBinder();
+ if (binder != null) {
+ try {
+ binder.getSearchResult(getControllerStub(), query, page, pageSize, extras);
+ } catch (RemoteException e) {
+ // TODO(jaewan): Handle disconnect.
+ if (DEBUG) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ public void onGetLibraryRootDone(
+ final Bundle rootHints, final String rootMediaId, final Bundle rootExtra) {
+ getCallbackExecutor().execute(() -> {
+ mCallback.onGetLibraryRootDone(getInstance(), rootHints, rootMediaId, rootExtra);
+ });
+ }
+
+ public void onGetItemDone(String mediaId, MediaItem2 item) {
+ getCallbackExecutor().execute(() -> {
+ mCallback.onGetItemDone(getInstance(), mediaId, item);
+ });
+ }
+
+ public void onGetChildrenDone(String parentId, int page, int pageSize, List<MediaItem2> result,
+ Bundle extras) {
+ getCallbackExecutor().execute(() -> {
+ mCallback.onGetChildrenDone(getInstance(), parentId, page, pageSize, result, extras);
+ });
+ }
+
+ public void onSearchResultChanged(String query, int itemCount, Bundle extras) {
+ getCallbackExecutor().execute(() -> {
+ mCallback.onSearchResultChanged(getInstance(), query, itemCount, extras);
+ });
+ }
+
+ public void onGetSearchResultDone(String query, int page, int pageSize, List<MediaItem2> result,
+ Bundle extras) {
+ getCallbackExecutor().execute(() -> {
+ mCallback.onGetSearchResultDone(getInstance(), query, page, pageSize, result, extras);
+ });
+ }
+
+ public void onChildrenChanged(final String parentId, int itemCount, final Bundle extras) {
+ getCallbackExecutor().execute(() -> {
+ mCallback.onChildrenChanged(getInstance(), parentId, itemCount, extras);
+ });
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/MediaController2Impl.java b/packages/MediaComponents/src/com/android/media/MediaController2Impl.java
new file mode 100644
index 0000000..249365a
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/MediaController2Impl.java
@@ -0,0 +1,1143 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import static android.media.SessionCommand2.COMMAND_CODE_SET_VOLUME;
+import static android.media.SessionCommand2.COMMAND_CODE_PLAYLIST_ADD_ITEM;
+import static android.media.SessionCommand2.COMMAND_CODE_PLAYLIST_REMOVE_ITEM;
+import static android.media.SessionCommand2.COMMAND_CODE_PLAYLIST_REPLACE_ITEM;
+import static android.media.SessionCommand2.COMMAND_CODE_PLAYLIST_SET_LIST;
+import static android.media.SessionCommand2.COMMAND_CODE_PLAYLIST_SET_LIST_METADATA;
+import static android.media.SessionCommand2.COMMAND_CODE_PLAYLIST_SET_REPEAT_MODE;
+import static android.media.SessionCommand2.COMMAND_CODE_PLAYLIST_SET_SHUFFLE_MODE;
+import static android.media.SessionCommand2.COMMAND_CODE_SESSION_PLAY_FROM_MEDIA_ID;
+import static android.media.SessionCommand2.COMMAND_CODE_SESSION_PLAY_FROM_SEARCH;
+import static android.media.SessionCommand2.COMMAND_CODE_SESSION_PLAY_FROM_URI;
+import static android.media.SessionCommand2.COMMAND_CODE_SESSION_PREPARE_FROM_MEDIA_ID;
+import static android.media.SessionCommand2.COMMAND_CODE_SESSION_PREPARE_FROM_SEARCH;
+import static android.media.SessionCommand2.COMMAND_CODE_SESSION_PREPARE_FROM_URI;
+
+import android.app.PendingIntent;
+import android.content.ComponentName;
+import android.content.Context;
+import android.content.Intent;
+import android.content.ServiceConnection;
+import android.media.AudioAttributes;
+import android.media.MediaController2;
+import android.media.MediaController2.ControllerCallback;
+import android.media.MediaController2.PlaybackInfo;
+import android.media.MediaItem2;
+import android.media.MediaMetadata2;
+import android.media.MediaPlaylistAgent.RepeatMode;
+import android.media.MediaPlaylistAgent.ShuffleMode;
+import android.media.SessionCommand2;
+import android.media.MediaSession2.CommandButton;
+import android.media.SessionCommandGroup2;
+import android.media.MediaSessionService2;
+import android.media.Rating2;
+import android.media.SessionToken2;
+import android.media.update.MediaController2Provider;
+import android.net.Uri;
+import android.os.Bundle;
+import android.os.IBinder;
+import android.os.Process;
+import android.os.RemoteException;
+import android.os.ResultReceiver;
+import android.os.UserHandle;
+import android.support.annotation.GuardedBy;
+import android.text.TextUtils;
+import android.util.Log;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Executor;
+
+public class MediaController2Impl implements MediaController2Provider {
+ private static final String TAG = "MediaController2";
+ private static final boolean DEBUG = true; // TODO(jaewan): Change
+
+ private final MediaController2 mInstance;
+ private final Context mContext;
+ private final Object mLock = new Object();
+
+ private final MediaController2Stub mControllerStub;
+ private final SessionToken2 mToken;
+ private final ControllerCallback mCallback;
+ private final Executor mCallbackExecutor;
+ private final IBinder.DeathRecipient mDeathRecipient;
+
+ @GuardedBy("mLock")
+ private SessionServiceConnection mServiceConnection;
+ @GuardedBy("mLock")
+ private boolean mIsReleased;
+ @GuardedBy("mLock")
+ private List<MediaItem2> mPlaylist;
+ @GuardedBy("mLock")
+ private MediaMetadata2 mPlaylistMetadata;
+ @GuardedBy("mLock")
+ private @RepeatMode int mRepeatMode;
+ @GuardedBy("mLock")
+ private @ShuffleMode int mShuffleMode;
+ @GuardedBy("mLock")
+ private int mPlayerState;
+ @GuardedBy("mLock")
+ private long mPositionEventTimeMs;
+ @GuardedBy("mLock")
+ private long mPositionMs;
+ @GuardedBy("mLock")
+ private float mPlaybackSpeed;
+ @GuardedBy("mLock")
+ private long mBufferedPositionMs;
+ @GuardedBy("mLock")
+ private PlaybackInfo mPlaybackInfo;
+ @GuardedBy("mLock")
+ private PendingIntent mSessionActivity;
+ @GuardedBy("mLock")
+ private SessionCommandGroup2 mAllowedCommands;
+
+ // Assignment should be used with the lock hold, but should be used without a lock to prevent
+ // potential deadlock.
+ // Postfix -Binder is added to explicitly show that it's potentially remote process call.
+ // Technically -Interface is more correct, but it may misread that it's interface (vs class)
+ // so let's keep this postfix until we find better postfix.
+ @GuardedBy("mLock")
+ private volatile IMediaSession2 mSessionBinder;
+
+ // TODO(jaewan): Require session activeness changed listener, because controller can be
+ // available when the session's player is null.
+ public MediaController2Impl(Context context, MediaController2 instance, SessionToken2 token,
+ Executor executor, ControllerCallback callback) {
+ mInstance = instance;
+ if (context == null) {
+ throw new IllegalArgumentException("context shouldn't be null");
+ }
+ if (token == null) {
+ throw new IllegalArgumentException("token shouldn't be null");
+ }
+ if (callback == null) {
+ throw new IllegalArgumentException("callback shouldn't be null");
+ }
+ if (executor == null) {
+ throw new IllegalArgumentException("executor shouldn't be null");
+ }
+ mContext = context;
+ mControllerStub = new MediaController2Stub(this);
+ mToken = token;
+ mCallback = callback;
+ mCallbackExecutor = executor;
+ mDeathRecipient = () -> {
+ mInstance.close();
+ };
+
+ mSessionBinder = null;
+ }
+
+ @Override
+ public void initialize() {
+ // TODO(jaewan): More sanity checks.
+ if (mToken.getType() == SessionToken2.TYPE_SESSION) {
+ // Session
+ mServiceConnection = null;
+ connectToSession(SessionToken2Impl.from(mToken).getSessionBinder());
+ } else {
+ // Session service
+ if (Process.myUid() == Process.SYSTEM_UID) {
+ // It's system server (MediaSessionService) that wants to monitor session.
+ // Don't bind if able..
+ IMediaSession2 binder = SessionToken2Impl.from(mToken).getSessionBinder();
+ if (binder != null) {
+ // Use binder in the session token instead of bind by its own.
+ // Otherwise server will holds the binding to the service *forever* and service
+ // will never stop.
+ mServiceConnection = null;
+ connectToSession(SessionToken2Impl.from(mToken).getSessionBinder());
+ return;
+ } else if (DEBUG) {
+ // Should happen only when system server wants to dispatch media key events to
+ // a dead service.
+ Log.d(TAG, "System server binds to a session service. Should unbind"
+ + " immediately after the use.");
+ }
+ }
+ mServiceConnection = new SessionServiceConnection();
+ connectToService();
+ }
+ }
+
+ private void connectToService() {
+ // Service. Needs to get fresh binder whenever connection is needed.
+ SessionToken2Impl impl = SessionToken2Impl.from(mToken);
+ final Intent intent = new Intent(MediaSessionService2.SERVICE_INTERFACE);
+ intent.setClassName(mToken.getPackageName(), impl.getServiceName());
+
+ // Use bindService() instead of startForegroundService() to start session service for three
+ // reasons.
+ // 1. Prevent session service owner's stopSelf() from destroying service.
+ // With the startForegroundService(), service's call of stopSelf() will trigger immediate
+ // onDestroy() calls on the main thread even when onConnect() is running in another
+ // thread.
+ // 2. Minimize APIs for developers to take care about.
+ // With bindService(), developers only need to take care about Service.onBind()
+ // but Service.onStartCommand() should be also taken care about with the
+ // startForegroundService().
+ // 3. Future support for UI-less playback
+ // If a service wants to keep running, it should be either foreground service or
+ // bounded service. But there had been request for the feature for system apps
+ // and using bindService() will be better fit with it.
+ boolean result;
+ if (Process.myUid() == Process.SYSTEM_UID) {
+ // Use bindServiceAsUser() for binding from system service to avoid following warning.
+ // ContextImpl: Calling a method in the system process without a qualified user
+ result = mContext.bindServiceAsUser(intent, mServiceConnection, Context.BIND_AUTO_CREATE,
+ UserHandle.getUserHandleForUid(mToken.getUid()));
+ } else {
+ result = mContext.bindService(intent, mServiceConnection, Context.BIND_AUTO_CREATE);
+ }
+ if (!result) {
+ Log.w(TAG, "bind to " + mToken + " failed");
+ } else if (DEBUG) {
+ Log.d(TAG, "bind to " + mToken + " success");
+ }
+ }
+
+ private void connectToSession(IMediaSession2 sessionBinder) {
+ try {
+ sessionBinder.connect(mControllerStub, mContext.getPackageName());
+ } catch (RemoteException e) {
+ Log.w(TAG, "Failed to call connection request. Framework will retry"
+ + " automatically");
+ }
+ }
+
+ @Override
+ public void close_impl() {
+ if (DEBUG) {
+ Log.d(TAG, "release from " + mToken);
+ }
+ final IMediaSession2 binder;
+ synchronized (mLock) {
+ if (mIsReleased) {
+ // Prevent re-enterance from the ControllerCallback.onDisconnected()
+ return;
+ }
+ mIsReleased = true;
+ if (mServiceConnection != null) {
+ mContext.unbindService(mServiceConnection);
+ mServiceConnection = null;
+ }
+ binder = mSessionBinder;
+ mSessionBinder = null;
+ mControllerStub.destroy();
+ }
+ if (binder != null) {
+ try {
+ binder.asBinder().unlinkToDeath(mDeathRecipient, 0);
+ binder.release(mControllerStub);
+ } catch (RemoteException e) {
+ // No-op.
+ }
+ }
+ mCallbackExecutor.execute(() -> {
+ mCallback.onDisconnected(mInstance);
+ });
+ }
+
+ IMediaSession2 getSessionBinder() {
+ return mSessionBinder;
+ }
+
+ MediaController2Stub getControllerStub() {
+ return mControllerStub;
+ }
+
+ Executor getCallbackExecutor() {
+ return mCallbackExecutor;
+ }
+
+ Context getContext() {
+ return mContext;
+ }
+
+ MediaController2 getInstance() {
+ return mInstance;
+ }
+
+ // Returns session binder if the controller can send the command.
+ IMediaSession2 getSessionBinderIfAble(int commandCode) {
+ synchronized (mLock) {
+ if (!mAllowedCommands.hasCommand(commandCode)) {
+ // Cannot send because isn't allowed to.
+ Log.w(TAG, "Controller isn't allowed to call command, commandCode="
+ + commandCode);
+ return null;
+ }
+ }
+ // TODO(jaewan): Should we do this with the lock hold?
+ final IMediaSession2 binder = mSessionBinder;
+ if (binder == null) {
+ // Cannot send because disconnected.
+ Log.w(TAG, "Session is disconnected");
+ }
+ return binder;
+ }
+
+ // Returns session binder if the controller can send the command.
+ IMediaSession2 getSessionBinderIfAble(SessionCommand2 command) {
+ synchronized (mLock) {
+ if (!mAllowedCommands.hasCommand(command)) {
+ Log.w(TAG, "Controller isn't allowed to call command, command=" + command);
+ return null;
+ }
+ }
+ // TODO(jaewan): Should we do this with the lock hold?
+ final IMediaSession2 binder = mSessionBinder;
+ if (binder == null) {
+ // Cannot send because disconnected.
+ Log.w(TAG, "Session is disconnected");
+ }
+ return binder;
+ }
+
+ @Override
+ public SessionToken2 getSessionToken_impl() {
+ return mToken;
+ }
+
+ @Override
+ public boolean isConnected_impl() {
+ final IMediaSession2 binder = mSessionBinder;
+ return binder != null;
+ }
+
+ @Override
+ public void play_impl() {
+ sendTransportControlCommand(SessionCommand2.COMMAND_CODE_PLAYBACK_PLAY);
+ }
+
+ @Override
+ public void pause_impl() {
+ sendTransportControlCommand(SessionCommand2.COMMAND_CODE_PLAYBACK_PAUSE);
+ }
+
+ @Override
+ public void stop_impl() {
+ sendTransportControlCommand(SessionCommand2.COMMAND_CODE_PLAYBACK_STOP);
+ }
+
+ @Override
+ public void skipToPlaylistItem_impl(MediaItem2 item) {
+ if (item == null) {
+ throw new IllegalArgumentException("item shouldn't be null");
+ }
+ final IMediaSession2 binder = mSessionBinder;
+ if (binder != null) {
+ try {
+ binder.skipToPlaylistItem(mControllerStub, item.toBundle());
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void skipToPreviousItem_impl() {
+ final IMediaSession2 binder = mSessionBinder;
+ if (binder != null) {
+ try {
+ binder.skipToPreviousItem(mControllerStub);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void skipToNextItem_impl() {
+ final IMediaSession2 binder = mSessionBinder;
+ if (binder != null) {
+ try {
+ binder.skipToNextItem(mControllerStub);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ private void sendTransportControlCommand(int commandCode) {
+ sendTransportControlCommand(commandCode, null);
+ }
+
+ private void sendTransportControlCommand(int commandCode, Bundle args) {
+ final IMediaSession2 binder = mSessionBinder;
+ if (binder != null) {
+ try {
+ binder.sendTransportControlCommand(mControllerStub, commandCode, args);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public PendingIntent getSessionActivity_impl() {
+ return mSessionActivity;
+ }
+
+ @Override
+ public void setVolumeTo_impl(int value, int flags) {
+ // TODO(hdmoon): sanity check
+ final IMediaSession2 binder = getSessionBinderIfAble(COMMAND_CODE_SET_VOLUME);
+ if (binder != null) {
+ try {
+ binder.setVolumeTo(mControllerStub, value, flags);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void adjustVolume_impl(int direction, int flags) {
+ // TODO(hdmoon): sanity check
+ final IMediaSession2 binder = getSessionBinderIfAble(COMMAND_CODE_SET_VOLUME);
+ if (binder != null) {
+ try {
+ binder.adjustVolume(mControllerStub, direction, flags);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void prepareFromUri_impl(Uri uri, Bundle extras) {
+ final IMediaSession2 binder = getSessionBinderIfAble(COMMAND_CODE_SESSION_PREPARE_FROM_URI);
+ if (uri == null) {
+ throw new IllegalArgumentException("uri shouldn't be null");
+ }
+ if (binder != null) {
+ try {
+ binder.prepareFromUri(mControllerStub, uri, extras);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ // TODO(jaewan): Handle.
+ }
+ }
+
+ @Override
+ public void prepareFromSearch_impl(String query, Bundle extras) {
+ final IMediaSession2 binder = getSessionBinderIfAble(
+ COMMAND_CODE_SESSION_PREPARE_FROM_SEARCH);
+ if (TextUtils.isEmpty(query)) {
+ throw new IllegalArgumentException("query shouldn't be empty");
+ }
+ if (binder != null) {
+ try {
+ binder.prepareFromSearch(mControllerStub, query, extras);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ // TODO(jaewan): Handle.
+ }
+ }
+
+ @Override
+ public void prepareFromMediaId_impl(String mediaId, Bundle extras) {
+ final IMediaSession2 binder = getSessionBinderIfAble(
+ COMMAND_CODE_SESSION_PREPARE_FROM_MEDIA_ID);
+ if (mediaId == null) {
+ throw new IllegalArgumentException("mediaId shouldn't be null");
+ }
+ if (binder != null) {
+ try {
+ binder.prepareFromMediaId(mControllerStub, mediaId, extras);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ // TODO(jaewan): Handle.
+ }
+ }
+
+ @Override
+ public void playFromUri_impl(Uri uri, Bundle extras) {
+ final IMediaSession2 binder = getSessionBinderIfAble(COMMAND_CODE_SESSION_PLAY_FROM_URI);
+ if (uri == null) {
+ throw new IllegalArgumentException("uri shouldn't be null");
+ }
+ if (binder != null) {
+ try {
+ binder.playFromUri(mControllerStub, uri, extras);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ // TODO(jaewan): Handle.
+ }
+ }
+
+ @Override
+ public void playFromSearch_impl(String query, Bundle extras) {
+ final IMediaSession2 binder = getSessionBinderIfAble(COMMAND_CODE_SESSION_PLAY_FROM_SEARCH);
+ if (TextUtils.isEmpty(query)) {
+ throw new IllegalArgumentException("query shouldn't be empty");
+ }
+ if (binder != null) {
+ try {
+ binder.playFromSearch(mControllerStub, query, extras);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ // TODO(jaewan): Handle.
+ }
+ }
+
+ @Override
+ public void playFromMediaId_impl(String mediaId, Bundle extras) {
+ final IMediaSession2 binder = getSessionBinderIfAble(
+ COMMAND_CODE_SESSION_PLAY_FROM_MEDIA_ID);
+ if (mediaId == null) {
+ throw new IllegalArgumentException("mediaId shouldn't be null");
+ }
+ if (binder != null) {
+ try {
+ binder.playFromMediaId(mControllerStub, mediaId, extras);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ // TODO(jaewan): Handle.
+ }
+ }
+
+ @Override
+ public void setRating_impl(String mediaId, Rating2 rating) {
+ if (mediaId == null) {
+ throw new IllegalArgumentException("mediaId shouldn't be null");
+ }
+ if (rating == null) {
+ throw new IllegalArgumentException("rating shouldn't be null");
+ }
+
+ final IMediaSession2 binder = mSessionBinder;
+ if (binder != null) {
+ try {
+ binder.setRating(mControllerStub, mediaId, rating.toBundle());
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ // TODO(jaewan): Handle.
+ }
+ }
+
+ @Override
+ public void sendCustomCommand_impl(SessionCommand2 command, Bundle args, ResultReceiver cb) {
+ if (command == null) {
+ throw new IllegalArgumentException("command shouldn't be null");
+ }
+ final IMediaSession2 binder = getSessionBinderIfAble(command);
+ if (binder != null) {
+ try {
+ binder.sendCustomCommand(mControllerStub, command.toBundle(), args, cb);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public List<MediaItem2> getPlaylist_impl() {
+ synchronized (mLock) {
+ return mPlaylist;
+ }
+ }
+
+ @Override
+ public void setPlaylist_impl(List<MediaItem2> list, MediaMetadata2 metadata) {
+ if (list == null) {
+ throw new IllegalArgumentException("list shouldn't be null");
+ }
+ final IMediaSession2 binder = getSessionBinderIfAble(COMMAND_CODE_PLAYLIST_SET_LIST);
+ if (binder != null) {
+ List<Bundle> bundleList = new ArrayList<>();
+ for (int i = 0; i < list.size(); i++) {
+ bundleList.add(list.get(i).toBundle());
+ }
+ Bundle metadataBundle = (metadata == null) ? null : metadata.toBundle();
+ try {
+ binder.setPlaylist(mControllerStub, bundleList, metadataBundle);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public MediaMetadata2 getPlaylistMetadata_impl() {
+ synchronized (mLock) {
+ return mPlaylistMetadata;
+ }
+ }
+
+ @Override
+ public void updatePlaylistMetadata_impl(MediaMetadata2 metadata) {
+ final IMediaSession2 binder = getSessionBinderIfAble(
+ COMMAND_CODE_PLAYLIST_SET_LIST_METADATA);
+ if (binder != null) {
+ Bundle metadataBundle = (metadata == null) ? null : metadata.toBundle();
+ try {
+ binder.updatePlaylistMetadata(mControllerStub, metadataBundle);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void prepare_impl() {
+ sendTransportControlCommand(SessionCommand2.COMMAND_CODE_PLAYBACK_PREPARE);
+ }
+
+ @Override
+ public void fastForward_impl() {
+ // TODO(jaewan): Implement this. Note that fast forward isn't a transport command anymore
+ //sendTransportControlCommand(MediaSession2.COMMAND_CODE_SESSION_FAST_FORWARD);
+ }
+
+ @Override
+ public void rewind_impl() {
+ // TODO(jaewan): Implement this. Note that rewind isn't a transport command anymore
+ //sendTransportControlCommand(MediaSession2.COMMAND_CODE_SESSION_REWIND);
+ }
+
+ @Override
+ public void seekTo_impl(long pos) {
+ if (pos < 0) {
+ throw new IllegalArgumentException("position shouldn't be negative");
+ }
+ Bundle args = new Bundle();
+ args.putLong(MediaSession2Stub.ARGUMENT_KEY_POSITION, pos);
+ sendTransportControlCommand(SessionCommand2.COMMAND_CODE_PLAYBACK_SEEK_TO, args);
+ }
+
+ @Override
+ public void addPlaylistItem_impl(int index, MediaItem2 item) {
+ if (index < 0) {
+ throw new IllegalArgumentException("index shouldn't be negative");
+ }
+ if (item == null) {
+ throw new IllegalArgumentException("item shouldn't be null");
+ }
+ final IMediaSession2 binder = getSessionBinderIfAble(COMMAND_CODE_PLAYLIST_ADD_ITEM);
+ if (binder != null) {
+ try {
+ binder.addPlaylistItem(mControllerStub, index, item.toBundle());
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void removePlaylistItem_impl(MediaItem2 item) {
+ if (item == null) {
+ throw new IllegalArgumentException("item shouldn't be null");
+ }
+ final IMediaSession2 binder = getSessionBinderIfAble(COMMAND_CODE_PLAYLIST_REMOVE_ITEM);
+ if (binder != null) {
+ try {
+ binder.removePlaylistItem(mControllerStub, item.toBundle());
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void replacePlaylistItem_impl(int index, MediaItem2 item) {
+ if (index < 0) {
+ throw new IllegalArgumentException("index shouldn't be negative");
+ }
+ if (item == null) {
+ throw new IllegalArgumentException("item shouldn't be null");
+ }
+ final IMediaSession2 binder = getSessionBinderIfAble(COMMAND_CODE_PLAYLIST_REPLACE_ITEM);
+ if (binder != null) {
+ try {
+ binder.replacePlaylistItem(mControllerStub, index, item.toBundle());
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public int getShuffleMode_impl() {
+ return mShuffleMode;
+ }
+
+ @Override
+ public void setShuffleMode_impl(int shuffleMode) {
+ final IMediaSession2 binder = getSessionBinderIfAble(
+ COMMAND_CODE_PLAYLIST_SET_SHUFFLE_MODE);
+ if (binder != null) {
+ try {
+ binder.setShuffleMode(mControllerStub, shuffleMode);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public int getRepeatMode_impl() {
+ return mRepeatMode;
+ }
+
+ @Override
+ public void setRepeatMode_impl(int repeatMode) {
+ final IMediaSession2 binder = getSessionBinderIfAble(COMMAND_CODE_PLAYLIST_SET_REPEAT_MODE);
+ if (binder != null) {
+ try {
+ binder.setRepeatMode(mControllerStub, repeatMode);
+ } catch (RemoteException e) {
+ Log.w(TAG, "Cannot connect to the service or the session is gone", e);
+ }
+ } else {
+ Log.w(TAG, "Session isn't active", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public PlaybackInfo getPlaybackInfo_impl() {
+ synchronized (mLock) {
+ return mPlaybackInfo;
+ }
+ }
+
+ @Override
+ public int getPlayerState_impl() {
+ synchronized (mLock) {
+ return mPlayerState;
+ }
+ }
+
+ @Override
+ public long getCurrentPosition_impl() {
+ synchronized (mLock) {
+ long timeDiff = System.currentTimeMillis() - mPositionEventTimeMs;
+ long expectedPosition = mPositionMs + (long) (mPlaybackSpeed * timeDiff);
+ return Math.max(0, expectedPosition);
+ }
+ }
+
+ @Override
+ public float getPlaybackSpeed_impl() {
+ synchronized (mLock) {
+ return mPlaybackSpeed;
+ }
+ }
+
+ @Override
+ public long getBufferedPosition_impl() {
+ synchronized (mLock) {
+ return mBufferedPositionMs;
+ }
+ }
+
+ @Override
+ public MediaItem2 getCurrentMediaItem_impl() {
+ // TODO(jaewan): Implement
+ return null;
+ }
+
+ void pushPlayerStateChanges(final int state) {
+ synchronized (mLock) {
+ mPlayerState = state;
+ }
+ mCallbackExecutor.execute(() -> {
+ if (!mInstance.isConnected()) {
+ return;
+ }
+ mCallback.onPlayerStateChanged(mInstance, state);
+ });
+ }
+
+ // TODO(jaewan): Rename to seek completed
+ void pushPositionChanges(final long eventTimeMs, final long positionMs) {
+ synchronized (mLock) {
+ mPositionEventTimeMs = eventTimeMs;
+ mPositionMs = positionMs;
+ }
+ mCallbackExecutor.execute(() -> {
+ if (!mInstance.isConnected()) {
+ return;
+ }
+ mCallback.onSeekCompleted(mInstance, positionMs);
+ });
+ }
+
+ void pushPlaybackSpeedChanges(final float speed) {
+ synchronized (mLock) {
+ mPlaybackSpeed = speed;
+ }
+ mCallbackExecutor.execute(() -> {
+ if (!mInstance.isConnected()) {
+ return;
+ }
+ mCallback.onPlaybackSpeedChanged(mInstance, speed);
+ });
+ }
+
+ void pushBufferedPositionChanges(final long bufferedPositionMs) {
+ synchronized (mLock) {
+ mBufferedPositionMs = bufferedPositionMs;
+ }
+ mCallbackExecutor.execute(() -> {
+ if (!mInstance.isConnected()) {
+ return;
+ }
+ // TODO(jaewan): Fix this -- it's now buffered state
+ //mCallback.onBufferedPositionChanged(mInstance, bufferedPositionMs);
+ });
+ }
+
+ void pushPlaybackInfoChanges(final PlaybackInfo info) {
+ synchronized (mLock) {
+ mPlaybackInfo = info;
+ }
+ mCallbackExecutor.execute(() -> {
+ if (!mInstance.isConnected()) {
+ return;
+ }
+ mCallback.onPlaybackInfoChanged(mInstance, info);
+ });
+ }
+
+ void pushPlaylistChanges(final List<MediaItem2> playlist, final MediaMetadata2 metadata) {
+ synchronized (mLock) {
+ mPlaylist = playlist;
+ mPlaylistMetadata = metadata;
+ }
+ mCallbackExecutor.execute(() -> {
+ if (!mInstance.isConnected()) {
+ return;
+ }
+ mCallback.onPlaylistChanged(mInstance, playlist, metadata);
+ });
+ }
+
+ void pushPlaylistMetadataChanges(MediaMetadata2 metadata) {
+ synchronized (mLock) {
+ mPlaylistMetadata = metadata;
+ }
+ mCallbackExecutor.execute(() -> {
+ if (!mInstance.isConnected()) {
+ return;
+ }
+ mCallback.onPlaylistMetadataChanged(mInstance, metadata);
+ });
+ }
+
+ void pushShuffleModeChanges(int shuffleMode) {
+ synchronized (mLock) {
+ mShuffleMode = shuffleMode;
+ }
+ mCallbackExecutor.execute(() -> {
+ if (!mInstance.isConnected()) {
+ return;
+ }
+ mCallback.onShuffleModeChanged(mInstance, shuffleMode);
+ });
+ }
+
+ void pushRepeatModeChanges(int repeatMode) {
+ synchronized (mLock) {
+ mRepeatMode = repeatMode;
+ }
+ mCallbackExecutor.execute(() -> {
+ if (!mInstance.isConnected()) {
+ return;
+ }
+ mCallback.onRepeatModeChanged(mInstance, repeatMode);
+ });
+ }
+
+ void pushError(int errorCode, Bundle extras) {
+ mCallbackExecutor.execute(() -> {
+ if (!mInstance.isConnected()) {
+ return;
+ }
+ mCallback.onError(mInstance, errorCode, extras);
+ });
+ }
+
+ // Should be used without a lock to prevent potential deadlock.
+ void onConnectedNotLocked(IMediaSession2 sessionBinder,
+ final SessionCommandGroup2 allowedCommands,
+ final int playerState,
+ final long positionEventTimeMs,
+ final long positionMs,
+ final float playbackSpeed,
+ final long bufferedPositionMs,
+ final PlaybackInfo info,
+ final int repeatMode,
+ final int shuffleMode,
+ final List<MediaItem2> playlist,
+ final PendingIntent sessionActivity) {
+ if (DEBUG) {
+ Log.d(TAG, "onConnectedNotLocked sessionBinder=" + sessionBinder
+ + ", allowedCommands=" + allowedCommands);
+ }
+ boolean close = false;
+ try {
+ if (sessionBinder == null || allowedCommands == null) {
+ // Connection rejected.
+ close = true;
+ return;
+ }
+ synchronized (mLock) {
+ if (mIsReleased) {
+ return;
+ }
+ if (mSessionBinder != null) {
+ Log.e(TAG, "Cannot be notified about the connection result many times."
+ + " Probably a bug or malicious app.");
+ close = true;
+ return;
+ }
+ mAllowedCommands = allowedCommands;
+ mPlayerState = playerState;
+ mPositionEventTimeMs = positionEventTimeMs;
+ mPositionMs = positionMs;
+ mPlaybackSpeed = playbackSpeed;
+ mBufferedPositionMs = bufferedPositionMs;
+ mPlaybackInfo = info;
+ mRepeatMode = repeatMode;
+ mShuffleMode = shuffleMode;
+ mPlaylist = playlist;
+ mSessionActivity = sessionActivity;
+ mSessionBinder = sessionBinder;
+ try {
+ // Implementation for the local binder is no-op,
+ // so can be used without worrying about deadlock.
+ mSessionBinder.asBinder().linkToDeath(mDeathRecipient, 0);
+ } catch (RemoteException e) {
+ if (DEBUG) {
+ Log.d(TAG, "Session died too early.", e);
+ }
+ close = true;
+ return;
+ }
+ }
+ // TODO(jaewan): Keep commands to prevents illegal API calls.
+ mCallbackExecutor.execute(() -> {
+ // Note: We may trigger ControllerCallbacks with the initial values
+ // But it's hard to define the order of the controller callbacks
+ // Only notify about the
+ mCallback.onConnected(mInstance, allowedCommands);
+ });
+ } finally {
+ if (close) {
+ // Trick to call release() without holding the lock, to prevent potential deadlock
+ // with the developer's custom lock within the ControllerCallback.onDisconnected().
+ mInstance.close();
+ }
+ }
+ }
+
+ void onCustomCommand(final SessionCommand2 command, final Bundle args,
+ final ResultReceiver receiver) {
+ if (DEBUG) {
+ Log.d(TAG, "onCustomCommand cmd=" + command);
+ }
+ mCallbackExecutor.execute(() -> {
+ // TODO(jaewan): Double check if the controller exists.
+ mCallback.onCustomCommand(mInstance, command, args, receiver);
+ });
+ }
+
+ void onAllowedCommandsChanged(final SessionCommandGroup2 commands) {
+ mCallbackExecutor.execute(() -> {
+ mCallback.onAllowedCommandsChanged(mInstance, commands);
+ });
+ }
+
+ void onCustomLayoutChanged(final List<CommandButton> layout) {
+ mCallbackExecutor.execute(() -> {
+ mCallback.onCustomLayoutChanged(mInstance, layout);
+ });
+ }
+
+ // This will be called on the main thread.
+ private class SessionServiceConnection implements ServiceConnection {
+ @Override
+ public void onServiceConnected(ComponentName name, IBinder service) {
+ // Note that it's always main-thread.
+ if (DEBUG) {
+ Log.d(TAG, "onServiceConnected " + name + " " + this);
+ }
+ // Sanity check
+ if (!mToken.getPackageName().equals(name.getPackageName())) {
+ Log.wtf(TAG, name + " was connected, but expected pkg="
+ + mToken.getPackageName() + " with id=" + mToken.getId());
+ return;
+ }
+ final IMediaSession2 sessionBinder = IMediaSession2.Stub.asInterface(service);
+ connectToSession(sessionBinder);
+ }
+
+ @Override
+ public void onServiceDisconnected(ComponentName name) {
+ // Temporal lose of the binding because of the service crash. System will automatically
+ // rebind, so just no-op.
+ // TODO(jaewan): Really? Either disconnect cleanly or
+ if (DEBUG) {
+ Log.w(TAG, "Session service " + name + " is disconnected.");
+ }
+ }
+
+ @Override
+ public void onBindingDied(ComponentName name) {
+ // Permanent lose of the binding because of the service package update or removed.
+ // This SessionServiceRecord will be removed accordingly, but forget session binder here
+ // for sure.
+ mInstance.close();
+ }
+ }
+
+ public static final class PlaybackInfoImpl implements PlaybackInfoProvider {
+
+ private static final String KEY_PLAYBACK_TYPE =
+ "android.media.playbackinfo_impl.playback_type";
+ private static final String KEY_CONTROL_TYPE =
+ "android.media.playbackinfo_impl.control_type";
+ private static final String KEY_MAX_VOLUME =
+ "android.media.playbackinfo_impl.max_volume";
+ private static final String KEY_CURRENT_VOLUME =
+ "android.media.playbackinfo_impl.current_volume";
+ private static final String KEY_AUDIO_ATTRIBUTES =
+ "android.media.playbackinfo_impl.audio_attrs";
+
+ private final PlaybackInfo mInstance;
+
+ private final int mPlaybackType;
+ private final int mControlType;
+ private final int mMaxVolume;
+ private final int mCurrentVolume;
+ private final AudioAttributes mAudioAttrs;
+
+ private PlaybackInfoImpl(int playbackType, AudioAttributes attrs, int controlType,
+ int max, int current) {
+ mPlaybackType = playbackType;
+ mAudioAttrs = attrs;
+ mControlType = controlType;
+ mMaxVolume = max;
+ mCurrentVolume = current;
+ mInstance = new PlaybackInfo(this);
+ }
+
+ @Override
+ public int getPlaybackType_impl() {
+ return mPlaybackType;
+ }
+
+ @Override
+ public AudioAttributes getAudioAttributes_impl() {
+ return mAudioAttrs;
+ }
+
+ @Override
+ public int getControlType_impl() {
+ return mControlType;
+ }
+
+ @Override
+ public int getMaxVolume_impl() {
+ return mMaxVolume;
+ }
+
+ @Override
+ public int getCurrentVolume_impl() {
+ return mCurrentVolume;
+ }
+
+ PlaybackInfo getInstance() {
+ return mInstance;
+ }
+
+ Bundle toBundle() {
+ Bundle bundle = new Bundle();
+ bundle.putInt(KEY_PLAYBACK_TYPE, mPlaybackType);
+ bundle.putInt(KEY_CONTROL_TYPE, mControlType);
+ bundle.putInt(KEY_MAX_VOLUME, mMaxVolume);
+ bundle.putInt(KEY_CURRENT_VOLUME, mCurrentVolume);
+ bundle.putParcelable(KEY_AUDIO_ATTRIBUTES, mAudioAttrs);
+ return bundle;
+ }
+
+ static PlaybackInfo createPlaybackInfo(int playbackType, AudioAttributes attrs,
+ int controlType, int max, int current) {
+ return new PlaybackInfoImpl(playbackType, attrs, controlType, max, current)
+ .getInstance();
+ }
+
+ static PlaybackInfo fromBundle(Bundle bundle) {
+ if (bundle == null) {
+ return null;
+ }
+ final int volumeType = bundle.getInt(KEY_PLAYBACK_TYPE);
+ final int volumeControl = bundle.getInt(KEY_CONTROL_TYPE);
+ final int maxVolume = bundle.getInt(KEY_MAX_VOLUME);
+ final int currentVolume = bundle.getInt(KEY_CURRENT_VOLUME);
+ final AudioAttributes attrs = bundle.getParcelable(KEY_AUDIO_ATTRIBUTES);
+
+ return createPlaybackInfo(volumeType, attrs, volumeControl, maxVolume, currentVolume);
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/MediaController2Stub.java b/packages/MediaComponents/src/com/android/media/MediaController2Stub.java
new file mode 100644
index 0000000..2cfc5df
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/MediaController2Stub.java
@@ -0,0 +1,470 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import android.app.PendingIntent;
+import android.content.Context;
+import android.media.MediaController2;
+import android.media.MediaItem2;
+import android.media.MediaMetadata2;
+import android.media.SessionCommand2;
+import android.media.MediaSession2.CommandButton;
+import android.media.SessionCommandGroup2;
+import android.os.Bundle;
+import android.os.ResultReceiver;
+import android.text.TextUtils;
+import android.util.Log;
+
+import com.android.media.MediaController2Impl.PlaybackInfoImpl;
+import com.android.media.MediaSession2Impl.CommandButtonImpl;
+
+import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.List;
+
+public class MediaController2Stub extends IMediaController2.Stub {
+ private static final String TAG = "MediaController2Stub";
+ private static final boolean DEBUG = true; // TODO(jaewan): Change
+
+ private final WeakReference<MediaController2Impl> mController;
+
+ MediaController2Stub(MediaController2Impl controller) {
+ mController = new WeakReference<>(controller);
+ }
+
+ private MediaController2Impl getController() throws IllegalStateException {
+ final MediaController2Impl controller = mController.get();
+ if (controller == null) {
+ throw new IllegalStateException("Controller is released");
+ }
+ return controller;
+ }
+
+ private MediaBrowser2Impl getBrowser() throws IllegalStateException {
+ final MediaController2Impl controller = getController();
+ if (controller instanceof MediaBrowser2Impl) {
+ return (MediaBrowser2Impl) controller;
+ }
+ return null;
+ }
+
+ public void destroy() {
+ mController.clear();
+ }
+
+ @Override
+ public void onPlayerStateChanged(int state) {
+ final MediaController2Impl controller;
+ try {
+ controller = getController();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ controller.pushPlayerStateChanges(state);
+ }
+
+ @Override
+ public void onPositionChanged(long eventTimeMs, long positionMs) {
+ final MediaController2Impl controller;
+ try {
+ controller = getController();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ if (eventTimeMs < 0) {
+ Log.w(TAG, "onPositionChanged(): Ignoring negative eventTimeMs");
+ return;
+ }
+ if (positionMs < 0) {
+ Log.w(TAG, "onPositionChanged(): Ignoring negative positionMs");
+ return;
+ }
+ controller.pushPositionChanges(eventTimeMs, positionMs);
+ }
+
+ @Override
+ public void onPlaybackSpeedChanged(float speed) {
+ final MediaController2Impl controller;
+ try {
+ controller = getController();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ controller.pushPlaybackSpeedChanges(speed);
+ }
+
+ @Override
+ public void onBufferedPositionChanged(long bufferedPositionMs) {
+ final MediaController2Impl controller;
+ try {
+ controller = getController();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ if (bufferedPositionMs < 0) {
+ Log.w(TAG, "onBufferedPositionChanged(): Ignoring negative bufferedPositionMs");
+ return;
+ }
+ controller.pushBufferedPositionChanges(bufferedPositionMs);
+ }
+
+ @Override
+ public void onPlaylistChanged(List<Bundle> playlistBundle, Bundle metadataBundle) {
+ final MediaController2Impl controller;
+ try {
+ controller = getController();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ if (playlistBundle == null) {
+ Log.w(TAG, "onPlaylistChanged(): Ignoring null playlist from " + controller);
+ return;
+ }
+ List<MediaItem2> playlist = new ArrayList<>();
+ for (Bundle bundle : playlistBundle) {
+ MediaItem2 item = MediaItem2.fromBundle(bundle);
+ if (item == null) {
+ Log.w(TAG, "onPlaylistChanged(): Ignoring null item in playlist");
+ } else {
+ playlist.add(item);
+ }
+ }
+ MediaMetadata2 metadata = MediaMetadata2.fromBundle(metadataBundle);
+ controller.pushPlaylistChanges(playlist, metadata);
+ }
+
+ @Override
+ public void onPlaylistMetadataChanged(Bundle metadataBundle) throws RuntimeException {
+ final MediaController2Impl controller;
+ try {
+ controller = getController();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ MediaMetadata2 metadata = MediaMetadata2.fromBundle(metadataBundle);
+ controller.pushPlaylistMetadataChanges(metadata);
+ }
+
+ @Override
+ public void onRepeatModeChanged(int repeatMode) {
+ final MediaController2Impl controller;
+ try {
+ controller = getController();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ controller.pushRepeatModeChanges(repeatMode);
+ }
+
+ @Override
+ public void onPlaybackInfoChanged(Bundle playbackInfo) throws RuntimeException {
+ if (DEBUG) {
+ Log.d(TAG, "onPlaybackInfoChanged");
+ }
+ final MediaController2Impl controller;
+ try {
+ controller = getController();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ MediaController2.PlaybackInfo info = PlaybackInfoImpl.fromBundle(playbackInfo);
+ if (info == null) {
+ Log.w(TAG, "onPlaybackInfoChanged(): Ignoring null playbackInfo");
+ return;
+ }
+ controller.pushPlaybackInfoChanges(info);
+ }
+
+ @Override
+ public void onShuffleModeChanged(int shuffleMode) {
+ final MediaController2Impl controller;
+ try {
+ controller = getController();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ controller.pushShuffleModeChanges(shuffleMode);
+ }
+
+ @Override
+ public void onError(int errorCode, Bundle extras) {
+ final MediaController2Impl controller;
+ try {
+ controller = getController();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ controller.pushError(errorCode, extras);
+ }
+
+ @Override
+ public void onConnected(IMediaSession2 sessionBinder, Bundle commandGroup,
+ int playerState, long positionEventTimeMs, long positionMs, float playbackSpeed,
+ long bufferedPositionMs, Bundle playbackInfo, int shuffleMode, int repeatMode,
+ List<Bundle> itemBundleList, PendingIntent sessionActivity) {
+ final MediaController2Impl controller = mController.get();
+ if (controller == null) {
+ if (DEBUG) {
+ Log.d(TAG, "onConnected after MediaController2.close()");
+ }
+ return;
+ }
+ final Context context = controller.getContext();
+ List<MediaItem2> itemList = null;
+ if (itemBundleList != null) {
+ itemList = new ArrayList<>();
+ for (int i = 0; i < itemBundleList.size(); i++) {
+ MediaItem2 item = MediaItem2.fromBundle(itemBundleList.get(i));
+ if (item != null) {
+ itemList.add(item);
+ }
+ }
+ }
+ controller.onConnectedNotLocked(sessionBinder,
+ SessionCommandGroup2.fromBundle(commandGroup),
+ playerState, positionEventTimeMs, positionMs, playbackSpeed, bufferedPositionMs,
+ PlaybackInfoImpl.fromBundle(playbackInfo), repeatMode, shuffleMode,
+ itemList, sessionActivity);
+ }
+
+ @Override
+ public void onDisconnected() {
+ final MediaController2Impl controller = mController.get();
+ if (controller == null) {
+ if (DEBUG) {
+ Log.d(TAG, "onDisconnected after MediaController2.close()");
+ }
+ return;
+ }
+ controller.getInstance().close();
+ }
+
+ @Override
+ public void onCustomLayoutChanged(List<Bundle> commandButtonlist) {
+ if (commandButtonlist == null) {
+ Log.w(TAG, "onCustomLayoutChanged(): Ignoring null commandButtonlist");
+ return;
+ }
+ final MediaController2Impl controller;
+ try {
+ controller = getController();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ if (controller == null) {
+ // TODO(jaewan): Revisit here. Could be a bug
+ return;
+ }
+ List<CommandButton> layout = new ArrayList<>();
+ for (int i = 0; i < commandButtonlist.size(); i++) {
+ CommandButton button = CommandButtonImpl.fromBundle(commandButtonlist.get(i));
+ if (button != null) {
+ layout.add(button);
+ }
+ }
+ controller.onCustomLayoutChanged(layout);
+ }
+
+ @Override
+ public void onAllowedCommandsChanged(Bundle commandsBundle) {
+ final MediaController2Impl controller;
+ try {
+ controller = getController();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ if (controller == null) {
+ // TODO(jaewan): Revisit here. Could be a bug
+ return;
+ }
+ SessionCommandGroup2 commands = SessionCommandGroup2.fromBundle(commandsBundle);
+ if (commands == null) {
+ Log.w(TAG, "onAllowedCommandsChanged(): Ignoring null commands");
+ return;
+ }
+ controller.onAllowedCommandsChanged(commands);
+ }
+
+ @Override
+ public void onCustomCommand(Bundle commandBundle, Bundle args, ResultReceiver receiver) {
+ final MediaController2Impl controller;
+ try {
+ controller = getController();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ SessionCommand2 command = SessionCommand2.fromBundle(commandBundle);
+ if (command == null) {
+ Log.w(TAG, "onCustomCommand(): Ignoring null command");
+ return;
+ }
+ controller.onCustomCommand(command, args, receiver);
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////////
+ // MediaBrowser specific
+ ////////////////////////////////////////////////////////////////////////////////////////////
+ @Override
+ public void onGetLibraryRootDone(Bundle rootHints, String rootMediaId, Bundle rootExtra)
+ throws RuntimeException {
+ final MediaBrowser2Impl browser;
+ try {
+ browser = getBrowser();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ if (browser == null) {
+ // TODO(jaewan): Revisit here. Could be a bug
+ return;
+ }
+ browser.onGetLibraryRootDone(rootHints, rootMediaId, rootExtra);
+ }
+
+
+ @Override
+ public void onGetItemDone(String mediaId, Bundle itemBundle) throws RuntimeException {
+ if (mediaId == null) {
+ Log.w(TAG, "onGetItemDone(): Ignoring null mediaId");
+ return;
+ }
+ final MediaBrowser2Impl browser;
+ try {
+ browser = getBrowser();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ if (browser == null) {
+ // TODO(jaewan): Revisit here. Could be a bug
+ return;
+ }
+ browser.onGetItemDone(mediaId, MediaItem2.fromBundle(itemBundle));
+ }
+
+ @Override
+ public void onGetChildrenDone(String parentId, int page, int pageSize,
+ List<Bundle> itemBundleList, Bundle extras) throws RuntimeException {
+ if (parentId == null) {
+ Log.w(TAG, "onGetChildrenDone(): Ignoring null parentId");
+ return;
+ }
+ final MediaBrowser2Impl browser;
+ try {
+ browser = getBrowser();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ if (browser == null) {
+ // TODO(jaewan): Revisit here. Could be a bug
+ return;
+ }
+
+ List<MediaItem2> result = null;
+ if (itemBundleList != null) {
+ result = new ArrayList<>();
+ for (Bundle bundle : itemBundleList) {
+ result.add(MediaItem2.fromBundle(bundle));
+ }
+ }
+ browser.onGetChildrenDone(parentId, page, pageSize, result, extras);
+ }
+
+ @Override
+ public void onSearchResultChanged(String query, int itemCount, Bundle extras)
+ throws RuntimeException {
+ if (TextUtils.isEmpty(query)) {
+ Log.w(TAG, "onSearchResultChanged(): Ignoring empty query");
+ return;
+ }
+ final MediaBrowser2Impl browser;
+ try {
+ browser = getBrowser();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ if (browser == null) {
+ // TODO(jaewan): Revisit here. Could be a bug
+ return;
+ }
+ browser.onSearchResultChanged(query, itemCount, extras);
+ }
+
+ @Override
+ public void onGetSearchResultDone(String query, int page, int pageSize,
+ List<Bundle> itemBundleList, Bundle extras) throws RuntimeException {
+ if (TextUtils.isEmpty(query)) {
+ Log.w(TAG, "onGetSearchResultDone(): Ignoring empty query");
+ return;
+ }
+ final MediaBrowser2Impl browser;
+ try {
+ browser = getBrowser();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ if (browser == null) {
+ // TODO(jaewan): Revisit here. Could be a bug
+ return;
+ }
+
+ List<MediaItem2> result = null;
+ if (itemBundleList != null) {
+ result = new ArrayList<>();
+ for (Bundle bundle : itemBundleList) {
+ result.add(MediaItem2.fromBundle(bundle));
+ }
+ }
+ browser.onGetSearchResultDone(query, page, pageSize, result, extras);
+ }
+
+ @Override
+ public void onChildrenChanged(String parentId, int itemCount, Bundle extras) {
+ if (parentId == null) {
+ Log.w(TAG, "onChildrenChanged(): Ignoring null parentId");
+ return;
+ }
+ final MediaBrowser2Impl browser;
+ try {
+ browser = getBrowser();
+ } catch (IllegalStateException e) {
+ Log.w(TAG, "Don't fail silently here. Highly likely a bug");
+ return;
+ }
+ if (browser == null) {
+ // TODO(jaewan): Revisit here. Could be a bug
+ return;
+ }
+ browser.onChildrenChanged(parentId, itemCount, extras);
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/MediaItem2Impl.java b/packages/MediaComponents/src/com/android/media/MediaItem2Impl.java
new file mode 100644
index 0000000..910a0f1
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/MediaItem2Impl.java
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import static android.media.MediaItem2.FLAG_BROWSABLE;
+import static android.media.MediaItem2.FLAG_PLAYABLE;
+
+import android.annotation.NonNull;
+import android.annotation.Nullable;
+import android.media.DataSourceDesc;
+import android.media.MediaItem2;
+import android.media.MediaItem2.Builder;
+import android.media.MediaItem2.Flags;
+import android.media.MediaMetadata2;
+import android.media.update.MediaItem2Provider;
+import android.os.Bundle;
+import android.text.TextUtils;
+
+import java.util.UUID;
+
+public class MediaItem2Impl implements MediaItem2Provider {
+ private static final String KEY_ID = "android.media.mediaitem2.id";
+ private static final String KEY_FLAGS = "android.media.mediaitem2.flags";
+ private static final String KEY_METADATA = "android.media.mediaitem2.metadata";
+ private static final String KEY_UUID = "android.media.mediaitem2.uuid";
+
+ private final MediaItem2 mInstance;
+ private final String mId;
+ private final int mFlags;
+ private final UUID mUUID;
+ private MediaMetadata2 mMetadata;
+ private DataSourceDesc mDataSourceDesc;
+
+ // From the public API
+ public MediaItem2Impl(@NonNull String mediaId, @Nullable DataSourceDesc dsd,
+ @Nullable MediaMetadata2 metadata, @Flags int flags) {
+ this(mediaId, dsd, metadata, flags, null);
+ }
+
+ private MediaItem2Impl(@NonNull String mediaId, @Nullable DataSourceDesc dsd,
+ @Nullable MediaMetadata2 metadata, @Flags int flags, @Nullable UUID uuid) {
+ if (mediaId == null) {
+ throw new IllegalArgumentException("mediaId shouldn't be null");
+ }
+ if (metadata != null && !TextUtils.equals(mediaId, metadata.getMediaId())) {
+ throw new IllegalArgumentException("metadata's id should be matched with the mediaid");
+ }
+
+ mId = mediaId;
+ mDataSourceDesc = dsd;
+ mMetadata = metadata;
+ mFlags = flags;
+ mUUID = (uuid == null) ? UUID.randomUUID() : uuid;
+
+ mInstance = new MediaItem2(this);
+ }
+
+ @Override
+ public boolean equals_impl(Object obj) {
+ if (!(obj instanceof MediaItem2)) {
+ return false;
+ }
+ MediaItem2 other = (MediaItem2) obj;
+ return mUUID.equals(((MediaItem2Impl) other.getProvider()).mUUID);
+ }
+
+ /**
+ * Return this object as a bundle to share between processes.
+ *
+ * @return a new bundle instance
+ */
+ public Bundle toBundle_impl() {
+ Bundle bundle = new Bundle();
+ bundle.putString(KEY_ID, mId);
+ bundle.putInt(KEY_FLAGS, mFlags);
+ if (mMetadata != null) {
+ bundle.putBundle(KEY_METADATA, mMetadata.toBundle());
+ }
+ bundle.putString(KEY_UUID, mUUID.toString());
+ return bundle;
+ }
+
+ /**
+ * Create a MediaItem2 from the {@link Bundle}.
+ *
+ * @param bundle The bundle which was published by {@link MediaItem2#toBundle()}.
+ * @return The newly created MediaItem2
+ */
+ public static MediaItem2 fromBundle_impl(@NonNull Bundle bundle) {
+ if (bundle == null) {
+ return null;
+ }
+ final String uuidString = bundle.getString(KEY_UUID);
+ return fromBundle(bundle, UUID.fromString(uuidString));
+ }
+
+ /**
+ * Create a MediaItem2 from the {@link Bundle} with the specified {@link UUID}.
+ * If {@link UUID}
+ * can be null for creating new.
+ *
+ * @param bundle The bundle which was published by {@link MediaItem2#toBundle()}.
+ * @param uuid A {@link UUID} to override. Can be {@link null} for override.
+ * @return The newly created MediaItem2
+ */
+ static MediaItem2 fromBundle(@NonNull Bundle bundle, @Nullable UUID uuid) {
+ if (bundle == null) {
+ return null;
+ }
+ final String id = bundle.getString(KEY_ID);
+ final Bundle metadataBundle = bundle.getBundle(KEY_METADATA);
+ final MediaMetadata2 metadata = MediaMetadata2.fromBundle(metadataBundle);
+ final int flags = bundle.getInt(KEY_FLAGS);
+ return new MediaItem2Impl(id, null, metadata, flags, uuid).getInstance();
+ }
+
+ private MediaItem2 getInstance() {
+ return mInstance;
+ }
+
+ @Override
+ public String toString_impl() {
+ final StringBuilder sb = new StringBuilder("MediaItem2{");
+ sb.append("mFlags=").append(mFlags);
+ sb.append(", mMetadata=").append(mMetadata);
+ sb.append('}');
+ return sb.toString();
+ }
+
+ @Override
+ public @Flags int getFlags_impl() {
+ return mFlags;
+ }
+
+ @Override
+ public boolean isBrowsable_impl() {
+ return (mFlags & FLAG_BROWSABLE) != 0;
+ }
+
+ @Override
+ public boolean isPlayable_impl() {
+ return (mFlags & FLAG_PLAYABLE) != 0;
+ }
+
+ @Override
+ public void setMetadata_impl(@Nullable MediaMetadata2 metadata) {
+ if (metadata != null && !TextUtils.equals(mId, metadata.getMediaId())) {
+ throw new IllegalArgumentException("metadata's id should be matched with the mediaId");
+ }
+ mMetadata = metadata;
+ }
+
+ @Override
+ public @Nullable MediaMetadata2 getMetadata_impl() {
+ return mMetadata;
+ }
+
+ @Override
+ public @NonNull String getMediaId_impl() {
+ return mId;
+ }
+
+ @Override
+ public @Nullable DataSourceDesc getDataSourceDesc_impl() {
+ return mDataSourceDesc;
+ }
+
+ public static class BuilderImpl implements MediaItem2Provider.BuilderProvider {
+ private Builder mInstance;
+ private @Flags int mFlags;
+ private String mMediaId;
+ private MediaMetadata2 mMetadata;
+ private DataSourceDesc mDataSourceDesc;
+
+ public BuilderImpl(Builder instance, int flags) {
+ mInstance = instance;
+ mFlags = flags;
+ }
+
+ @Override
+ public Builder setMediaId_impl(@Nullable String mediaId) {
+ mMediaId = mediaId;
+ return mInstance;
+ }
+
+ @Override
+ public Builder setMetadata_impl(@Nullable MediaMetadata2 metadata) {
+ mMetadata = metadata;
+ return mInstance;
+ }
+
+ @Override
+ public Builder setDataSourceDesc_impl(@Nullable DataSourceDesc dataSourceDesc) {
+ mDataSourceDesc = dataSourceDesc;
+ return mInstance;
+ }
+
+ @Override
+ public MediaItem2 build_impl() {
+ String id = (mMetadata != null)
+ ? mMetadata.getString(MediaMetadata2.METADATA_KEY_MEDIA_ID) : null;
+ if (id == null) {
+ // TODO(jaewan): Double check if its sufficient (e.g. Use UUID instead?)
+ id = (mMediaId != null) ? mMediaId : toString();
+ }
+ return new MediaItem2Impl(id, mDataSourceDesc, mMetadata, mFlags).getInstance();
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/MediaLibraryService2Impl.java b/packages/MediaComponents/src/com/android/media/MediaLibraryService2Impl.java
new file mode 100644
index 0000000..cf34cd4
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/MediaLibraryService2Impl.java
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import android.app.PendingIntent;
+import android.content.Context;
+import android.media.MediaLibraryService2;
+import android.media.MediaLibraryService2.LibraryRoot;
+import android.media.MediaLibraryService2.MediaLibrarySession;
+import android.media.MediaLibraryService2.MediaLibrarySession.Builder;
+import android.media.MediaLibraryService2.MediaLibrarySession.MediaLibrarySessionCallback;
+import android.media.MediaPlayerBase;
+import android.media.MediaPlaylistAgent;
+import android.media.MediaSession2;
+import android.media.MediaSession2.ControllerInfo;
+import android.media.MediaSessionService2;
+import android.media.SessionToken2;
+import android.media.VolumeProvider2;
+import android.media.update.MediaLibraryService2Provider;
+import android.os.Bundle;
+import android.text.TextUtils;
+
+import com.android.media.MediaSession2Impl.BuilderBaseImpl;
+
+import java.util.concurrent.Executor;
+
+public class MediaLibraryService2Impl extends MediaSessionService2Impl implements
+ MediaLibraryService2Provider {
+ private final MediaSessionService2 mInstance;
+ private MediaLibrarySession mLibrarySession;
+
+ public MediaLibraryService2Impl(MediaLibraryService2 instance) {
+ super(instance);
+ mInstance = instance;
+ }
+
+ @Override
+ public void onCreate_impl() {
+ super.onCreate_impl();
+
+ // Effectively final
+ MediaSession2 session = getSession();
+ if (!(session instanceof MediaLibrarySession)) {
+ throw new RuntimeException("Expected MediaLibrarySession, but returned MediaSession2");
+ }
+ mLibrarySession = (MediaLibrarySession) getSession();
+ }
+
+ @Override
+ int getSessionType() {
+ return SessionToken2.TYPE_LIBRARY_SERVICE;
+ }
+
+ public static class MediaLibrarySessionImpl extends MediaSession2Impl
+ implements MediaLibrarySessionProvider {
+ public MediaLibrarySessionImpl(Context context,
+ MediaPlayerBase player, String id, MediaPlaylistAgent playlistAgent,
+ VolumeProvider2 volumeProvider,
+ PendingIntent sessionActivity, Executor callbackExecutor,
+ MediaLibrarySessionCallback callback) {
+ super(context, player, id, playlistAgent, volumeProvider, sessionActivity,
+ callbackExecutor, callback);
+ // Don't put any extra initialization here. Here's the reason.
+ // System service will recognize this session inside of the super constructor and would
+ // connect to this session assuming that initialization is finished. However, if any
+ // initialization logic is here, calls from the server would fail.
+ // see: MediaSession2Stub#connect()
+ }
+
+ @Override
+ MediaLibrarySession createInstance() {
+ return new MediaLibrarySession(this);
+ }
+
+ @Override
+ MediaLibrarySession getInstance() {
+ return (MediaLibrarySession) super.getInstance();
+ }
+
+ @Override
+ MediaLibrarySessionCallback getCallback() {
+ return (MediaLibrarySessionCallback) super.getCallback();
+ }
+
+ @Override
+ public void notifyChildrenChanged_impl(ControllerInfo controller, String parentId,
+ int itemCount, Bundle extras) {
+ if (controller == null) {
+ throw new IllegalArgumentException("controller shouldn't be null");
+ }
+ if (parentId == null) {
+ throw new IllegalArgumentException("parentId shouldn't be null");
+ }
+ getSessionStub().notifyChildrenChangedNotLocked(controller, parentId, itemCount,
+ extras);
+ }
+
+ @Override
+ public void notifyChildrenChanged_impl(String parentId, int itemCount, Bundle extras) {
+ if (parentId == null) {
+ throw new IllegalArgumentException("parentId shouldn't be null");
+ }
+ getSessionStub().notifyChildrenChangedNotLocked(parentId, itemCount, extras);
+ }
+
+ @Override
+ public void notifySearchResultChanged_impl(ControllerInfo controller, String query,
+ int itemCount, Bundle extras) {
+ ensureCallingThread();
+ if (controller == null) {
+ throw new IllegalArgumentException("controller shouldn't be null");
+ }
+ if (TextUtils.isEmpty(query)) {
+ throw new IllegalArgumentException("query shouldn't be empty");
+ }
+ getSessionStub().notifySearchResultChanged(controller, query, itemCount, extras);
+ }
+ }
+
+ public static class BuilderImpl
+ extends BuilderBaseImpl<MediaLibrarySession, MediaLibrarySessionCallback> {
+ public BuilderImpl(MediaLibraryService2 service, Builder instance,
+ Executor callbackExecutor, MediaLibrarySessionCallback callback) {
+ super(service);
+ setSessionCallback_impl(callbackExecutor, callback);
+ }
+
+ @Override
+ public MediaLibrarySession build_impl() {
+ return new MediaLibrarySessionImpl(mContext, mPlayer, mId, mPlaylistAgent,
+ mVolumeProvider, mSessionActivity, mCallbackExecutor, mCallback).getInstance();
+ }
+ }
+
+ public static final class LibraryRootImpl implements LibraryRootProvider {
+ private final LibraryRoot mInstance;
+ private final String mRootId;
+ private final Bundle mExtras;
+
+ public LibraryRootImpl(LibraryRoot instance, String rootId, Bundle extras) {
+ if (rootId == null) {
+ throw new IllegalArgumentException("rootId shouldn't be null.");
+ }
+ mInstance = instance;
+ mRootId = rootId;
+ mExtras = extras;
+ }
+
+ @Override
+ public String getRootId_impl() {
+ return mRootId;
+ }
+
+ @Override
+ public Bundle getExtras_impl() {
+ return mExtras;
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/MediaMetadata2Impl.java b/packages/MediaComponents/src/com/android/media/MediaMetadata2Impl.java
new file mode 100644
index 0000000..cf1c532
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/MediaMetadata2Impl.java
@@ -0,0 +1,373 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import static android.media.MediaMetadata2.*;
+
+import android.annotation.Nullable;
+import android.graphics.Bitmap;
+import android.media.MediaMetadata2;
+import android.media.MediaMetadata2.BitmapKey;
+import android.media.MediaMetadata2.Builder;
+import android.media.MediaMetadata2.LongKey;
+import android.media.MediaMetadata2.RatingKey;
+import android.media.MediaMetadata2.TextKey;
+import android.media.Rating2;
+import android.media.update.MediaMetadata2Provider;
+import android.os.Bundle;
+import android.util.ArrayMap;
+import android.util.Log;
+
+import java.util.Set;
+
+public class MediaMetadata2Impl implements MediaMetadata2Provider {
+ private static final String TAG = "MediaMetadata2";
+
+ static final int METADATA_TYPE_LONG = 0;
+ static final int METADATA_TYPE_TEXT = 1;
+ static final int METADATA_TYPE_BITMAP = 2;
+ static final int METADATA_TYPE_RATING = 3;
+ static final int METADATA_TYPE_FLOAT = 4;
+ static final ArrayMap<String, Integer> METADATA_KEYS_TYPE;
+
+ static {
+ METADATA_KEYS_TYPE = new ArrayMap<String, Integer>();
+ METADATA_KEYS_TYPE.put(METADATA_KEY_TITLE, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_ARTIST, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_DURATION, METADATA_TYPE_LONG);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_ALBUM, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_AUTHOR, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_WRITER, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_COMPOSER, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_COMPILATION, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_DATE, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_YEAR, METADATA_TYPE_LONG);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_GENRE, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_TRACK_NUMBER, METADATA_TYPE_LONG);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_NUM_TRACKS, METADATA_TYPE_LONG);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_DISC_NUMBER, METADATA_TYPE_LONG);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_ALBUM_ARTIST, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_ART, METADATA_TYPE_BITMAP);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_ART_URI, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_ALBUM_ART, METADATA_TYPE_BITMAP);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_ALBUM_ART_URI, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_USER_RATING, METADATA_TYPE_RATING);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_RATING, METADATA_TYPE_RATING);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_DISPLAY_TITLE, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_DISPLAY_SUBTITLE, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_DISPLAY_DESCRIPTION, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_DISPLAY_ICON, METADATA_TYPE_BITMAP);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_DISPLAY_ICON_URI, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_MEDIA_ID, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_BT_FOLDER_TYPE, METADATA_TYPE_LONG);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_MEDIA_URI, METADATA_TYPE_TEXT);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_ADVERTISEMENT, METADATA_TYPE_LONG);
+ METADATA_KEYS_TYPE.put(METADATA_KEY_DOWNLOAD_STATUS, METADATA_TYPE_LONG);
+ }
+
+ private static final @TextKey
+ String[] PREFERRED_DESCRIPTION_ORDER = {
+ METADATA_KEY_TITLE,
+ METADATA_KEY_ARTIST,
+ METADATA_KEY_ALBUM,
+ METADATA_KEY_ALBUM_ARTIST,
+ METADATA_KEY_WRITER,
+ METADATA_KEY_AUTHOR,
+ METADATA_KEY_COMPOSER
+ };
+
+ private static final @BitmapKey
+ String[] PREFERRED_BITMAP_ORDER = {
+ METADATA_KEY_DISPLAY_ICON,
+ METADATA_KEY_ART,
+ METADATA_KEY_ALBUM_ART
+ };
+
+ private static final @TextKey
+ String[] PREFERRED_URI_ORDER = {
+ METADATA_KEY_DISPLAY_ICON_URI,
+ METADATA_KEY_ART_URI,
+ METADATA_KEY_ALBUM_ART_URI
+ };
+
+ private final MediaMetadata2 mInstance;
+ private final Bundle mBundle;
+
+ public MediaMetadata2Impl(Bundle bundle) {
+ mInstance = new MediaMetadata2(this);
+ mBundle = bundle;
+ }
+
+ public MediaMetadata2 getInstance() {
+ return mInstance;
+ }
+
+ @Override
+ public boolean containsKey_impl(String key) {
+ if (key == null) {
+ throw new IllegalArgumentException("key shouldn't be null");
+ }
+ return mBundle.containsKey(key);
+ }
+
+ @Override
+ public CharSequence getText_impl(@TextKey String key) {
+ if (key == null) {
+ throw new IllegalArgumentException("key shouldn't be null");
+ }
+ return mBundle.getCharSequence(key);
+ }
+
+ @Override
+ public @Nullable String getMediaId_impl() {
+ return mInstance.getString(METADATA_KEY_MEDIA_ID);
+ }
+
+ @Override
+ public String getString_impl(@TextKey String key) {
+ if (key == null) {
+ throw new IllegalArgumentException("key shouldn't be null");
+ }
+ CharSequence text = mBundle.getCharSequence(key);
+ if (text != null) {
+ return text.toString();
+ }
+ return null;
+ }
+
+ @Override
+ public long getLong_impl(@LongKey String key) {
+ if (key == null) {
+ throw new IllegalArgumentException("key shouldn't be null");
+ }
+ return mBundle.getLong(key, 0);
+ }
+
+ @Override
+ public Rating2 getRating_impl(@RatingKey String key) {
+ if (key == null) {
+ throw new IllegalArgumentException("key shouldn't be null");
+ }
+ // TODO(jaewan): Add backward compatibility
+ Rating2 rating = null;
+ try {
+ rating = Rating2.fromBundle(mBundle.getBundle(key));
+ } catch (Exception e) {
+ // ignore, value was not a rating
+ Log.w(TAG, "Failed to retrieve a key as Rating.", e);
+ }
+ return rating;
+ }
+
+ @Override
+ public float getFloat_impl(@FloatKey String key) {
+ if (key == null) {
+ throw new IllegalArgumentException("key shouldn't be null");
+ }
+ return mBundle.getFloat(key);
+ }
+
+ @Override
+ public Bitmap getBitmap_impl(@BitmapKey String key) {
+ if (key == null) {
+ throw new IllegalArgumentException("key shouldn't be null");
+ }
+ Bitmap bmp = null;
+ try {
+ bmp = mBundle.getParcelable(key);
+ } catch (Exception e) {
+ // ignore, value was not a bitmap
+ Log.w(TAG, "Failed to retrieve a key as Bitmap.", e);
+ }
+ return bmp;
+ }
+
+ @Override
+ public Bundle getExtras_impl() {
+ try {
+ return mBundle.getBundle(METADATA_KEY_EXTRAS);
+ } catch (Exception e) {
+ // ignore, value was not an bundle
+ Log.w(TAG, "Failed to retrieve an extra");
+ }
+ return null;
+ }
+
+ @Override
+ public int size_impl() {
+ return mBundle.size();
+ }
+
+ @Override
+ public Set<String> keySet_impl() {
+ return mBundle.keySet();
+ }
+
+ @Override
+ public Bundle toBundle_impl() {
+ return mBundle;
+ }
+
+ public static MediaMetadata2 fromBundle_impl(Bundle bundle) {
+ return (bundle == null) ? null : new MediaMetadata2Impl(bundle).getInstance();
+ }
+
+ public static final class BuilderImpl implements MediaMetadata2Provider.BuilderProvider {
+ private final MediaMetadata2.Builder mInstance;
+ private final Bundle mBundle;
+
+ public BuilderImpl(MediaMetadata2.Builder instance) {
+ mInstance = instance;
+ mBundle = new Bundle();
+ }
+
+ public BuilderImpl(MediaMetadata2.Builder instance, MediaMetadata2 source) {
+ if (source == null) {
+ throw new IllegalArgumentException("source shouldn't be null");
+ }
+ mInstance = instance;
+ mBundle = new Bundle(source.toBundle());
+ }
+
+ public BuilderImpl(int maxBitmapSize) {
+ mInstance = new MediaMetadata2.Builder(this);
+ mBundle = new Bundle();
+
+ for (String key : mBundle.keySet()) {
+ Object value = mBundle.get(key);
+ if (value instanceof Bitmap) {
+ Bitmap bmp = (Bitmap) value;
+ if (bmp.getHeight() > maxBitmapSize || bmp.getWidth() > maxBitmapSize) {
+ mInstance.putBitmap(key, scaleBitmap(bmp, maxBitmapSize));
+ }
+ }
+ }
+ }
+
+ @Override
+ public Builder putText_impl(@TextKey String key, CharSequence value) {
+ if (key == null) {
+ throw new IllegalArgumentException("key shouldn't be null");
+ }
+ if (METADATA_KEYS_TYPE.containsKey(key)) {
+ if (METADATA_KEYS_TYPE.get(key) != METADATA_TYPE_TEXT) {
+ throw new IllegalArgumentException("The " + key
+ + " key cannot be used to put a CharSequence");
+ }
+ }
+ mBundle.putCharSequence(key, value);
+ return mInstance;
+ }
+
+ @Override
+ public Builder putString_impl(@TextKey String key, String value) {
+ if (key == null) {
+ throw new IllegalArgumentException("key shouldn't be null");
+ }
+ if (METADATA_KEYS_TYPE.containsKey(key)) {
+ if (METADATA_KEYS_TYPE.get(key) != METADATA_TYPE_TEXT) {
+ throw new IllegalArgumentException("The " + key
+ + " key cannot be used to put a String");
+ }
+ }
+ mBundle.putCharSequence(key, value);
+ return mInstance;
+ }
+
+ @Override
+ public Builder putLong_impl(@LongKey String key, long value) {
+ if (key == null) {
+ throw new IllegalArgumentException("key shouldn't be null");
+ }
+ if (METADATA_KEYS_TYPE.containsKey(key)) {
+ if (METADATA_KEYS_TYPE.get(key) != METADATA_TYPE_LONG) {
+ throw new IllegalArgumentException("The " + key
+ + " key cannot be used to put a long");
+ }
+ }
+ mBundle.putLong(key, value);
+ return mInstance;
+ }
+
+ @Override
+ public Builder putRating_impl(@RatingKey String key, Rating2 value) {
+ if (key == null) {
+ throw new IllegalArgumentException("key shouldn't be null");
+ }
+ if (METADATA_KEYS_TYPE.containsKey(key)) {
+ if (METADATA_KEYS_TYPE.get(key) != METADATA_TYPE_RATING) {
+ throw new IllegalArgumentException("The " + key
+ + " key cannot be used to put a Rating");
+ }
+ }
+ mBundle.putBundle(key, value.toBundle());
+ return mInstance;
+ }
+
+ @Override
+ public Builder putBitmap_impl(@BitmapKey String key, Bitmap value) {
+ if (key == null) {
+ throw new IllegalArgumentException("key shouldn't be null");
+ }
+ if (METADATA_KEYS_TYPE.containsKey(key)) {
+ if (METADATA_KEYS_TYPE.get(key) != METADATA_TYPE_BITMAP) {
+ throw new IllegalArgumentException("The " + key
+ + " key cannot be used to put a Bitmap");
+ }
+ }
+ mBundle.putParcelable(key, value);
+ return mInstance;
+ }
+
+ @Override
+ public Builder putFloat_impl(@FloatKey String key, float value) {
+ if (key == null) {
+ throw new IllegalArgumentException("key shouldn't be null");
+ }
+ if (METADATA_KEYS_TYPE.containsKey(key)) {
+ if (METADATA_KEYS_TYPE.get(key) != METADATA_TYPE_FLOAT) {
+ throw new IllegalArgumentException("The " + key
+ + " key cannot be used to put a float");
+ }
+ }
+ mBundle.putFloat(key, value);
+ return mInstance;
+ }
+
+ @Override
+ public Builder setExtras_impl(Bundle bundle) {
+ mBundle.putBundle(METADATA_KEY_EXTRAS, bundle);
+ return mInstance;
+ }
+
+ @Override
+ public MediaMetadata2 build_impl() {
+ return new MediaMetadata2Impl(mBundle).getInstance();
+ }
+
+ private Bitmap scaleBitmap(Bitmap bmp, int maxSize) {
+ float maxSizeF = maxSize;
+ float widthScale = maxSizeF / bmp.getWidth();
+ float heightScale = maxSizeF / bmp.getHeight();
+ float scale = Math.min(widthScale, heightScale);
+ int height = (int) (bmp.getHeight() * scale);
+ int width = (int) (bmp.getWidth() * scale);
+ return Bitmap.createScaledBitmap(bmp, width, height, true);
+ }
+ }
+}
+
diff --git a/packages/MediaComponents/src/com/android/media/MediaPlaylistAgentImpl.java b/packages/MediaComponents/src/com/android/media/MediaPlaylistAgentImpl.java
new file mode 100644
index 0000000..dfd4e1a
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/MediaPlaylistAgentImpl.java
@@ -0,0 +1,224 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import android.annotation.CallbackExecutor;
+import android.annotation.NonNull;
+import android.annotation.Nullable;
+import android.media.DataSourceDesc;
+import android.media.MediaItem2;
+import android.media.MediaMetadata2;
+import android.media.MediaPlaylistAgent;
+import android.media.MediaPlaylistAgent.PlaylistEventCallback;
+import android.media.update.MediaPlaylistAgentProvider;
+import android.util.ArrayMap;
+import android.util.Log;
+
+import com.android.internal.annotations.GuardedBy;
+
+import java.util.List;
+import java.util.concurrent.Executor;
+
+public class MediaPlaylistAgentImpl implements MediaPlaylistAgentProvider {
+ private static final String TAG = "MediaPlaylistAgent";
+
+ private final MediaPlaylistAgent mInstance;
+
+ private final Object mLock = new Object();
+ @GuardedBy("mLock")
+ private final ArrayMap<PlaylistEventCallback, Executor> mCallbacks = new ArrayMap<>();
+
+ public MediaPlaylistAgentImpl(MediaPlaylistAgent instance) {
+ mInstance = instance;
+ }
+
+ @Override
+ final public void registerPlaylistEventCallback_impl(
+ @NonNull @CallbackExecutor Executor executor, @NonNull PlaylistEventCallback callback) {
+ if (executor == null) {
+ throw new IllegalArgumentException("executor shouldn't be null");
+ }
+ if (callback == null) {
+ throw new IllegalArgumentException("callback shouldn't be null");
+ }
+
+ synchronized (mLock) {
+ if (mCallbacks.get(callback) != null) {
+ Log.w(TAG, "callback is already added. Ignoring.");
+ return;
+ }
+ mCallbacks.put(callback, executor);
+ }
+ }
+
+ @Override
+ final public void unregisterPlaylistEventCallback_impl(
+ @NonNull PlaylistEventCallback callback) {
+ if (callback == null) {
+ throw new IllegalArgumentException("callback shouldn't be null");
+ }
+ synchronized (mLock) {
+ mCallbacks.remove(callback);
+ }
+ }
+
+ @Override
+ final public void notifyPlaylistChanged_impl() {
+ ArrayMap<PlaylistEventCallback, Executor> callbacks = getCallbacks();
+ List<MediaItem2> playlist= mInstance.getPlaylist();
+ MediaMetadata2 metadata = mInstance.getPlaylistMetadata();
+ for (int i = 0; i < callbacks.size(); i++) {
+ final PlaylistEventCallback callback = callbacks.keyAt(i);
+ final Executor executor = callbacks.valueAt(i);
+ executor.execute(() -> callback.onPlaylistChanged(
+ mInstance, playlist, metadata));
+ }
+ }
+
+ @Override
+ final public void notifyPlaylistMetadataChanged_impl() {
+ ArrayMap<PlaylistEventCallback, Executor> callbacks = getCallbacks();
+ for (int i = 0; i < callbacks.size(); i++) {
+ final PlaylistEventCallback callback = callbacks.keyAt(i);
+ final Executor executor = callbacks.valueAt(i);
+ executor.execute(() -> callback.onPlaylistMetadataChanged(
+ mInstance, mInstance.getPlaylistMetadata()));
+ }
+ }
+
+ @Override
+ final public void notifyShuffleModeChanged_impl() {
+ ArrayMap<PlaylistEventCallback, Executor> callbacks = getCallbacks();
+ for (int i = 0; i < callbacks.size(); i++) {
+ final PlaylistEventCallback callback = callbacks.keyAt(i);
+ final Executor executor = callbacks.valueAt(i);
+ executor.execute(() -> callback.onShuffleModeChanged(
+ mInstance, mInstance.getShuffleMode()));
+ }
+ }
+
+ @Override
+ final public void notifyRepeatModeChanged_impl() {
+ ArrayMap<PlaylistEventCallback, Executor> callbacks = getCallbacks();
+ for (int i = 0; i < callbacks.size(); i++) {
+ final PlaylistEventCallback callback = callbacks.keyAt(i);
+ final Executor executor = callbacks.valueAt(i);
+ executor.execute(() -> callback.onRepeatModeChanged(
+ mInstance, mInstance.getRepeatMode()));
+ }
+ }
+
+ @Override
+ public @Nullable List<MediaItem2> getPlaylist_impl() {
+ // empty implementation
+ return null;
+ }
+
+ @Override
+ public void setPlaylist_impl(@NonNull List<MediaItem2> list,
+ @Nullable MediaMetadata2 metadata) {
+ // empty implementation
+ }
+
+ @Override
+ public @Nullable MediaMetadata2 getPlaylistMetadata_impl() {
+ // empty implementation
+ return null;
+ }
+
+ @Override
+ public void updatePlaylistMetadata_impl(@Nullable MediaMetadata2 metadata) {
+ // empty implementation
+ }
+
+ @Override
+ public void addPlaylistItem_impl(int index, @NonNull MediaItem2 item) {
+ // empty implementation
+ }
+
+ @Override
+ public void removePlaylistItem_impl(@NonNull MediaItem2 item) {
+ // empty implementation
+ }
+
+ @Override
+ public void replacePlaylistItem_impl(int index, @NonNull MediaItem2 item) {
+ // empty implementation
+ }
+
+ @Override
+ public void skipToPlaylistItem_impl(@NonNull MediaItem2 item) {
+ // empty implementation
+ }
+
+ @Override
+ public void skipToPreviousItem_impl() {
+ // empty implementation
+ }
+
+ @Override
+ public void skipToNextItem_impl() {
+ // empty implementation
+ }
+
+ @Override
+ public int getRepeatMode_impl() {
+ return MediaPlaylistAgent.REPEAT_MODE_NONE;
+ }
+
+ @Override
+ public void setRepeatMode_impl(int repeatMode) {
+ // empty implementation
+ }
+
+ @Override
+ public int getShuffleMode_impl() {
+ // empty implementation
+ return MediaPlaylistAgent.SHUFFLE_MODE_NONE;
+ }
+
+ @Override
+ public void setShuffleMode_impl(int shuffleMode) {
+ // empty implementation
+ }
+
+ @Override
+ public @Nullable MediaItem2 getMediaItem_impl(@NonNull DataSourceDesc dsd) {
+ if (dsd == null) {
+ throw new IllegalArgumentException("dsd shouldn't be null");
+ }
+ List<MediaItem2> itemList = mInstance.getPlaylist();
+ if (itemList == null) {
+ return null;
+ }
+ for (int i = 0; i < itemList.size(); i++) {
+ MediaItem2 item = itemList.get(i);
+ if (item != null && item.getDataSourceDesc() == dsd) {
+ return item;
+ }
+ }
+ return null;
+ }
+
+ private ArrayMap<PlaylistEventCallback, Executor> getCallbacks() {
+ ArrayMap<PlaylistEventCallback, Executor> callbacks = new ArrayMap<>();
+ synchronized (mLock) {
+ callbacks.putAll(mCallbacks);
+ }
+ return callbacks;
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java b/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java
new file mode 100644
index 0000000..4ec6042
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/MediaSession2Impl.java
@@ -0,0 +1,1555 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import static android.media.SessionCommand2.COMMAND_CODE_CUSTOM;
+import static android.media.SessionToken2.TYPE_LIBRARY_SERVICE;
+import static android.media.SessionToken2.TYPE_SESSION;
+import static android.media.SessionToken2.TYPE_SESSION_SERVICE;
+
+import android.annotation.NonNull;
+import android.annotation.Nullable;
+import android.app.PendingIntent;
+import android.content.Context;
+import android.content.Intent;
+import android.content.pm.PackageManager;
+import android.content.pm.ResolveInfo;
+import android.media.AudioAttributes;
+import android.media.AudioFocusRequest;
+import android.media.AudioManager;
+import android.media.DataSourceDesc;
+import android.media.MediaController2;
+import android.media.MediaController2.PlaybackInfo;
+import android.media.MediaItem2;
+import android.media.MediaLibraryService2;
+import android.media.MediaMetadata2;
+import android.media.MediaPlayerBase;
+import android.media.MediaPlayerBase.PlayerEventCallback;
+import android.media.MediaPlayerBase.PlayerState;
+import android.media.MediaPlaylistAgent;
+import android.media.MediaPlaylistAgent.PlaylistEventCallback;
+import android.media.MediaSession2;
+import android.media.MediaSession2.Builder;
+import android.media.SessionCommand2;
+import android.media.MediaSession2.CommandButton;
+import android.media.SessionCommandGroup2;
+import android.media.MediaSession2.ControllerInfo;
+import android.media.MediaSession2.OnDataSourceMissingHelper;
+import android.media.MediaSession2.SessionCallback;
+import android.media.MediaSessionService2;
+import android.media.SessionToken2;
+import android.media.VolumeProvider2;
+import android.media.session.MediaSessionManager;
+import android.media.update.MediaSession2Provider;
+import android.os.Bundle;
+import android.os.IBinder;
+import android.os.Parcelable;
+import android.os.Process;
+import android.os.ResultReceiver;
+import android.support.annotation.GuardedBy;
+import android.text.TextUtils;
+import android.util.Log;
+
+import java.lang.ref.WeakReference;
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.concurrent.Executor;
+
+public class MediaSession2Impl implements MediaSession2Provider {
+ private static final String TAG = "MediaSession2";
+ private static final boolean DEBUG = true;//Log.isLoggable(TAG, Log.DEBUG);
+
+ private final Object mLock = new Object();
+
+ private final MediaSession2 mInstance;
+ private final Context mContext;
+ private final String mId;
+ private final Executor mCallbackExecutor;
+ private final SessionCallback mCallback;
+ private final MediaSession2Stub mSessionStub;
+ private final SessionToken2 mSessionToken;
+ private final AudioManager mAudioManager;
+ private final PendingIntent mSessionActivity;
+ private final PlayerEventCallback mPlayerEventCallback;
+ private final PlaylistEventCallback mPlaylistEventCallback;
+
+ // mPlayer is set to null when the session is closed, and we shouldn't throw an exception
+ // nor leave log always for using mPlayer when it's null. Here's the reason.
+ // When a MediaSession2 is closed, there could be a pended operation in the session callback
+ // executor that may want to access the player. Here's the sample code snippet for that.
+ //
+ // public void onFoo() {
+ // if (mPlayer == null) return; // first check
+ // mSessionCallbackExecutor.executor(() -> {
+ // // Error. Session may be closed and mPlayer can be null here.
+ // mPlayer.foo();
+ // });
+ // }
+ //
+ // By adding protective code, we can also protect APIs from being called after the close()
+ //
+ // TODO(jaewan): Should we put volatile here?
+ @GuardedBy("mLock")
+ private MediaPlayerBase mPlayer;
+ @GuardedBy("mLock")
+ private MediaPlaylistAgent mPlaylistAgent;
+ @GuardedBy("mLock")
+ private SessionPlaylistAgent mSessionPlaylistAgent;
+ @GuardedBy("mLock")
+ private VolumeProvider2 mVolumeProvider;
+ @GuardedBy("mLock")
+ private PlaybackInfo mPlaybackInfo;
+ @GuardedBy("mLock")
+ private OnDataSourceMissingHelper mDsmHelper;
+
+ /**
+ * Can be only called by the {@link Builder#build()}.
+ * @param context
+ * @param player
+ * @param id
+ * @param playlistAgent
+ * @param volumeProvider
+ * @param sessionActivity
+ * @param callbackExecutor
+ * @param callback
+ */
+ public MediaSession2Impl(Context context, MediaPlayerBase player, String id,
+ MediaPlaylistAgent playlistAgent, VolumeProvider2 volumeProvider,
+ PendingIntent sessionActivity,
+ Executor callbackExecutor, SessionCallback callback) {
+ // TODO(jaewan): Keep other params.
+ mInstance = createInstance();
+
+ // Argument checks are done by builder already.
+ // Initialize finals first.
+ mContext = context;
+ mId = id;
+ mCallback = callback;
+ mCallbackExecutor = callbackExecutor;
+ mSessionActivity = sessionActivity;
+ mSessionStub = new MediaSession2Stub(this);
+ mAudioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
+ mPlayerEventCallback = new MyPlayerEventCallback(this);
+ mPlaylistEventCallback = new MyPlaylistEventCallback(this);
+
+ // Infer type from the id and package name.
+ String libraryService = getServiceName(context, MediaLibraryService2.SERVICE_INTERFACE, id);
+ String sessionService = getServiceName(context, MediaSessionService2.SERVICE_INTERFACE, id);
+ if (sessionService != null && libraryService != null) {
+ throw new IllegalArgumentException("Ambiguous session type. Multiple"
+ + " session services define the same id=" + id);
+ } else if (libraryService != null) {
+ mSessionToken = new SessionToken2Impl(Process.myUid(), TYPE_LIBRARY_SERVICE,
+ mContext.getPackageName(), libraryService, id, mSessionStub).getInstance();
+ } else if (sessionService != null) {
+ mSessionToken = new SessionToken2Impl(Process.myUid(), TYPE_SESSION_SERVICE,
+ mContext.getPackageName(), sessionService, id, mSessionStub).getInstance();
+ } else {
+ mSessionToken = new SessionToken2Impl(Process.myUid(), TYPE_SESSION,
+ mContext.getPackageName(), null, id, mSessionStub).getInstance();
+ }
+
+ updatePlayer(player, playlistAgent, volumeProvider);
+
+ // Ask server for the sanity check, and starts
+ // Sanity check for making session ID unique 'per package' cannot be done in here.
+ // Server can only know if the package has another process and has another session with the
+ // same id. Note that 'ID is unique per package' is important for controller to distinguish
+ // a session in another package.
+ MediaSessionManager manager =
+ (MediaSessionManager) mContext.getSystemService(Context.MEDIA_SESSION_SERVICE);
+ if (!manager.createSession2(mSessionToken)) {
+ throw new IllegalStateException("Session with the same id is already used by"
+ + " another process. Use MediaController2 instead.");
+ }
+ }
+
+ MediaSession2 createInstance() {
+ return new MediaSession2(this);
+ }
+
+ private static String getServiceName(Context context, String serviceAction, String id) {
+ PackageManager manager = context.getPackageManager();
+ Intent serviceIntent = new Intent(serviceAction);
+ serviceIntent.setPackage(context.getPackageName());
+ List<ResolveInfo> services = manager.queryIntentServices(serviceIntent,
+ PackageManager.GET_META_DATA);
+ String serviceName = null;
+ if (services != null) {
+ for (int i = 0; i < services.size(); i++) {
+ String serviceId = SessionToken2Impl.getSessionId(services.get(i));
+ if (serviceId != null && TextUtils.equals(id, serviceId)) {
+ if (services.get(i).serviceInfo == null) {
+ continue;
+ }
+ if (serviceName != null) {
+ throw new IllegalArgumentException("Ambiguous session type. Multiple"
+ + " session services define the same id=" + id);
+ }
+ serviceName = services.get(i).serviceInfo.name;
+ }
+ }
+ }
+ return serviceName;
+ }
+
+ @Override
+ public void updatePlayer_impl(@NonNull MediaPlayerBase player, MediaPlaylistAgent playlistAgent,
+ VolumeProvider2 volumeProvider) throws IllegalArgumentException {
+ ensureCallingThread();
+ if (player == null) {
+ throw new IllegalArgumentException("player shouldn't be null");
+ }
+ updatePlayer(player, playlistAgent, volumeProvider);
+ }
+
+ private void updatePlayer(MediaPlayerBase player, MediaPlaylistAgent agent,
+ VolumeProvider2 volumeProvider) {
+ final MediaPlayerBase oldPlayer;
+ final MediaPlaylistAgent oldAgent;
+ final PlaybackInfo info = createPlaybackInfo(volumeProvider, player.getAudioAttributes());
+ synchronized (mLock) {
+ oldPlayer = mPlayer;
+ oldAgent = mPlaylistAgent;
+ mPlayer = player;
+ if (agent == null) {
+ mSessionPlaylistAgent = new SessionPlaylistAgent(this, mPlayer);
+ if (mDsmHelper != null) {
+ mSessionPlaylistAgent.setOnDataSourceMissingHelper(mDsmHelper);
+ }
+ agent = mSessionPlaylistAgent;
+ }
+ mPlaylistAgent = agent;
+ mVolumeProvider = volumeProvider;
+ mPlaybackInfo = info;
+ }
+ if (player != oldPlayer) {
+ player.registerPlayerEventCallback(mCallbackExecutor, mPlayerEventCallback);
+ if (oldPlayer != null) {
+ // Warning: Poorly implement player may ignore this
+ oldPlayer.unregisterPlayerEventCallback(mPlayerEventCallback);
+ }
+ }
+ if (agent != oldAgent) {
+ agent.registerPlaylistEventCallback(mCallbackExecutor, mPlaylistEventCallback);
+ if (oldAgent != null) {
+ // Warning: Poorly implement player may ignore this
+ oldAgent.unregisterPlaylistEventCallback(mPlaylistEventCallback);
+ }
+ }
+
+ if (oldPlayer != null) {
+ mSessionStub.notifyPlaybackInfoChanged(info);
+ notifyPlayerUpdatedNotLocked(oldPlayer);
+ }
+ // TODO(jaewan): Repeat the same thing for the playlist agent.
+ }
+
+ private PlaybackInfo createPlaybackInfo(VolumeProvider2 volumeProvider, AudioAttributes attrs) {
+ PlaybackInfo info;
+ if (volumeProvider == null) {
+ int stream;
+ if (attrs == null) {
+ stream = AudioManager.STREAM_MUSIC;
+ } else {
+ stream = attrs.getVolumeControlStream();
+ if (stream == AudioManager.USE_DEFAULT_STREAM_TYPE) {
+ // It may happen if the AudioAttributes doesn't have usage.
+ // Change it to the STREAM_MUSIC because it's not supported by audio manager
+ // for querying volume level.
+ stream = AudioManager.STREAM_MUSIC;
+ }
+ }
+ info = MediaController2Impl.PlaybackInfoImpl.createPlaybackInfo(
+ PlaybackInfo.PLAYBACK_TYPE_LOCAL,
+ attrs,
+ mAudioManager.isVolumeFixed()
+ ? VolumeProvider2.VOLUME_CONTROL_FIXED
+ : VolumeProvider2.VOLUME_CONTROL_ABSOLUTE,
+ mAudioManager.getStreamMaxVolume(stream),
+ mAudioManager.getStreamVolume(stream));
+ } else {
+ info = MediaController2Impl.PlaybackInfoImpl.createPlaybackInfo(
+ PlaybackInfo.PLAYBACK_TYPE_REMOTE /* ControlType */,
+ attrs,
+ volumeProvider.getControlType(),
+ volumeProvider.getMaxVolume(),
+ volumeProvider.getCurrentVolume());
+ }
+ return info;
+ }
+
+ @Override
+ public void close_impl() {
+ // Stop system service from listening this session first.
+ MediaSessionManager manager =
+ (MediaSessionManager) mContext.getSystemService(Context.MEDIA_SESSION_SERVICE);
+ manager.destroySession2(mSessionToken);
+
+ if (mSessionStub != null) {
+ if (DEBUG) {
+ Log.d(TAG, "session is now unavailable, id=" + mId);
+ }
+ // Invalidate previously published session stub.
+ mSessionStub.destroyNotLocked();
+ }
+ final MediaPlayerBase player;
+ final MediaPlaylistAgent agent;
+ synchronized (mLock) {
+ player = mPlayer;
+ mPlayer = null;
+ agent = mPlaylistAgent;
+ mPlaylistAgent = null;
+ mSessionPlaylistAgent = null;
+ }
+ if (player != null) {
+ player.unregisterPlayerEventCallback(mPlayerEventCallback);
+ }
+ if (agent != null) {
+ agent.unregisterPlaylistEventCallback(mPlaylistEventCallback);
+ }
+ }
+
+ @Override
+ public MediaPlayerBase getPlayer_impl() {
+ return getPlayer();
+ }
+
+ @Override
+ public MediaPlaylistAgent getPlaylistAgent_impl() {
+ return mPlaylistAgent;
+ }
+
+ @Override
+ public VolumeProvider2 getVolumeProvider_impl() {
+ return mVolumeProvider;
+ }
+
+ @Override
+ public SessionToken2 getToken_impl() {
+ return mSessionToken;
+ }
+
+ @Override
+ public List<ControllerInfo> getConnectedControllers_impl() {
+ return mSessionStub.getControllers();
+ }
+
+ @Override
+ public void setAudioFocusRequest_impl(AudioFocusRequest afr) {
+ // implement
+ }
+
+ @Override
+ public void play_impl() {
+ ensureCallingThread();
+ final MediaPlayerBase player = mPlayer;
+ if (player != null) {
+ player.play();
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void pause_impl() {
+ ensureCallingThread();
+ final MediaPlayerBase player = mPlayer;
+ if (player != null) {
+ player.pause();
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void stop_impl() {
+ ensureCallingThread();
+ final MediaPlayerBase player = mPlayer;
+ if (player != null) {
+ player.reset();
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void skipToPlaylistItem_impl(@NonNull MediaItem2 item) {
+ if (item == null) {
+ throw new IllegalArgumentException("item shouldn't be null");
+ }
+ final MediaPlaylistAgent agent = mPlaylistAgent;
+ if (agent != null) {
+ agent.skipToPlaylistItem(item);
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void skipToPreviousItem_impl() {
+ final MediaPlaylistAgent agent = mPlaylistAgent;
+ if (agent != null) {
+ agent.skipToPreviousItem();
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void skipToNextItem_impl() {
+ final MediaPlaylistAgent agent = mPlaylistAgent;
+ if (agent != null) {
+ agent.skipToNextItem();
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void setCustomLayout_impl(@NonNull ControllerInfo controller,
+ @NonNull List<CommandButton> layout) {
+ ensureCallingThread();
+ if (controller == null) {
+ throw new IllegalArgumentException("controller shouldn't be null");
+ }
+ if (layout == null) {
+ throw new IllegalArgumentException("layout shouldn't be null");
+ }
+ mSessionStub.notifyCustomLayoutNotLocked(controller, layout);
+ }
+
+ //////////////////////////////////////////////////////////////////////////////////////
+ // TODO(jaewan): Implement follows
+ //////////////////////////////////////////////////////////////////////////////////////
+
+ @Override
+ public void setAllowedCommands_impl(@NonNull ControllerInfo controller,
+ @NonNull SessionCommandGroup2 commands) {
+ if (controller == null) {
+ throw new IllegalArgumentException("controller shouldn't be null");
+ }
+ if (commands == null) {
+ throw new IllegalArgumentException("commands shouldn't be null");
+ }
+ mSessionStub.setAllowedCommands(controller, commands);
+ }
+
+ @Override
+ public void sendCustomCommand_impl(@NonNull ControllerInfo controller,
+ @NonNull SessionCommand2 command, Bundle args, ResultReceiver receiver) {
+ if (controller == null) {
+ throw new IllegalArgumentException("controller shouldn't be null");
+ }
+ if (command == null) {
+ throw new IllegalArgumentException("command shouldn't be null");
+ }
+ mSessionStub.sendCustomCommand(controller, command, args, receiver);
+ }
+
+ @Override
+ public void sendCustomCommand_impl(@NonNull SessionCommand2 command, Bundle args) {
+ if (command == null) {
+ throw new IllegalArgumentException("command shouldn't be null");
+ }
+ mSessionStub.sendCustomCommand(command, args);
+ }
+
+ @Override
+ public void setPlaylist_impl(@NonNull List<MediaItem2> list, MediaMetadata2 metadata) {
+ if (list == null) {
+ throw new IllegalArgumentException("list shouldn't be null");
+ }
+ ensureCallingThread();
+ final MediaPlaylistAgent agent = mPlaylistAgent;
+ if (agent != null) {
+ agent.setPlaylist(list, metadata);
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void updatePlaylistMetadata_impl(MediaMetadata2 metadata) {
+ final MediaPlaylistAgent agent = mPlaylistAgent;
+ if (agent != null) {
+ agent.updatePlaylistMetadata(metadata);
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void addPlaylistItem_impl(int index, @NonNull MediaItem2 item) {
+ if (index < 0) {
+ throw new IllegalArgumentException("index shouldn't be negative");
+ }
+ if (item == null) {
+ throw new IllegalArgumentException("item shouldn't be null");
+ }
+ final MediaPlaylistAgent agent = mPlaylistAgent;
+ if (agent != null) {
+ agent.addPlaylistItem(index, item);
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void removePlaylistItem_impl(@NonNull MediaItem2 item) {
+ if (item == null) {
+ throw new IllegalArgumentException("item shouldn't be null");
+ }
+ final MediaPlaylistAgent agent = mPlaylistAgent;
+ if (agent != null) {
+ agent.removePlaylistItem(item);
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void replacePlaylistItem_impl(int index, @NonNull MediaItem2 item) {
+ if (index < 0) {
+ throw new IllegalArgumentException("index shouldn't be negative");
+ }
+ if (item == null) {
+ throw new IllegalArgumentException("item shouldn't be null");
+ }
+ final MediaPlaylistAgent agent = mPlaylistAgent;
+ if (agent != null) {
+ agent.replacePlaylistItem(index, item);
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public List<MediaItem2> getPlaylist_impl() {
+ final MediaPlaylistAgent agent = mPlaylistAgent;
+ if (agent != null) {
+ return agent.getPlaylist();
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ return null;
+ }
+
+ @Override
+ public MediaMetadata2 getPlaylistMetadata_impl() {
+ final MediaPlaylistAgent agent = mPlaylistAgent;
+ if (agent != null) {
+ return agent.getPlaylistMetadata();
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ return null;
+ }
+
+ @Override
+ public MediaItem2 getCurrentPlaylistItem_impl() {
+ // TODO(jaewan): Implement
+ return null;
+ }
+
+ @Override
+ public int getRepeatMode_impl() {
+ final MediaPlaylistAgent agent = mPlaylistAgent;
+ if (agent != null) {
+ return agent.getRepeatMode();
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ return MediaPlaylistAgent.REPEAT_MODE_NONE;
+ }
+
+ @Override
+ public void setRepeatMode_impl(int repeatMode) {
+ final MediaPlaylistAgent agent = mPlaylistAgent;
+ if (agent != null) {
+ agent.setRepeatMode(repeatMode);
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public int getShuffleMode_impl() {
+ final MediaPlaylistAgent agent = mPlaylistAgent;
+ if (agent != null) {
+ return agent.getShuffleMode();
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ return MediaPlaylistAgent.SHUFFLE_MODE_NONE;
+ }
+
+ @Override
+ public void setShuffleMode_impl(int shuffleMode) {
+ final MediaPlaylistAgent agent = mPlaylistAgent;
+ if (agent != null) {
+ agent.setShuffleMode(shuffleMode);
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void prepare_impl() {
+ ensureCallingThread();
+ final MediaPlayerBase player = mPlayer;
+ if (player != null) {
+ player.prepare();
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public void seekTo_impl(long pos) {
+ ensureCallingThread();
+ final MediaPlayerBase player = mPlayer;
+ if (player != null) {
+ player.seekTo(pos);
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ }
+
+ @Override
+ public @PlayerState int getPlayerState_impl() {
+ final MediaPlayerBase player = mPlayer;
+ if (player != null) {
+ return mPlayer.getPlayerState();
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ return MediaPlayerBase.PLAYER_STATE_ERROR;
+ }
+
+ @Override
+ public long getCurrentPosition_impl() {
+ final MediaPlayerBase player = mPlayer;
+ if (player != null) {
+ return mPlayer.getCurrentPosition();
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ return MediaPlayerBase.UNKNOWN_TIME;
+ }
+
+ @Override
+ public long getBufferedPosition_impl() {
+ final MediaPlayerBase player = mPlayer;
+ if (player != null) {
+ return mPlayer.getBufferedPosition();
+ } else if (DEBUG) {
+ Log.d(TAG, "API calls after the close()", new IllegalStateException());
+ }
+ return MediaPlayerBase.UNKNOWN_TIME;
+ }
+
+ @Override
+ public void notifyError_impl(int errorCode, Bundle extras) {
+ mSessionStub.notifyError(errorCode, extras);
+ }
+
+ @Override
+ public void setOnDataSourceMissingHelper_impl(@NonNull OnDataSourceMissingHelper helper) {
+ if (helper == null) {
+ throw new IllegalArgumentException("helper shouldn't be null");
+ }
+ synchronized (mLock) {
+ mDsmHelper = helper;
+ if (mSessionPlaylistAgent != null) {
+ mSessionPlaylistAgent.setOnDataSourceMissingHelper(helper);
+ }
+ }
+ }
+
+ @Override
+ public void clearOnDataSourceMissingHelper_impl() {
+ synchronized (mLock) {
+ mDsmHelper = null;
+ if (mSessionPlaylistAgent != null) {
+ mSessionPlaylistAgent.clearOnDataSourceMissingHelper();
+ }
+ }
+ }
+
+ ///////////////////////////////////////////////////
+ // Protected or private methods
+ ///////////////////////////////////////////////////
+
+ // Enforces developers to call all the methods on the initially given thread
+ // because calls from the MediaController2 will be run on the thread.
+ // TODO(jaewan): Should we allow calls from the multiple thread?
+ // I prefer this way because allowing multiple thread may case tricky issue like
+ // b/63446360. If the {@link #setPlayer()} with {@code null} can be called from
+ // another thread, transport controls can be called after that.
+ // That's basically the developer's mistake, but they cannot understand what's
+ // happening behind until we tell them so.
+ // If enforcing callling thread doesn't look good, we can alternatively pick
+ // 1. Allow calls from random threads for all methods.
+ // 2. Allow calls from random threads for all methods, except for the
+ // {@link #setPlayer()}.
+ void ensureCallingThread() {
+ // TODO(jaewan): Uncomment or remove
+ /*
+ if (mHandler.getLooper() != Looper.myLooper()) {
+ throw new IllegalStateException("Run this on the given thread");
+ }*/
+ }
+
+ private void notifyPlaylistChangedOnExecutor(MediaPlaylistAgent playlistAgent,
+ List<MediaItem2> list, MediaMetadata2 metadata) {
+ if (playlistAgent != mPlaylistAgent) {
+ // Ignore calls from the old agent.
+ return;
+ }
+ mCallback.onPlaylistChanged(mInstance, playlistAgent, list, metadata);
+ mSessionStub.notifyPlaylistChangedNotLocked(list, metadata);
+ }
+
+ private void notifyPlaylistMetadataChangedOnExecutor(MediaPlaylistAgent playlistAgent,
+ MediaMetadata2 metadata) {
+ if (playlistAgent != mPlaylistAgent) {
+ // Ignore calls from the old agent.
+ return;
+ }
+ mCallback.onPlaylistMetadataChanged(mInstance, playlistAgent, metadata);
+ mSessionStub.notifyPlaylistMetadataChangedNotLocked(metadata);
+ }
+
+ private void notifyRepeatModeChangedOnExecutor(MediaPlaylistAgent playlistAgent,
+ int repeatMode) {
+ if (playlistAgent != mPlaylistAgent) {
+ // Ignore calls from the old agent.
+ return;
+ }
+ mCallback.onRepeatModeChanged(mInstance, playlistAgent, repeatMode);
+ mSessionStub.notifyRepeatModeChangedNotLocked(repeatMode);
+ }
+
+ private void notifyShuffleModeChangedOnExecutor(MediaPlaylistAgent playlistAgent,
+ int shuffleMode) {
+ if (playlistAgent != mPlaylistAgent) {
+ // Ignore calls from the old agent.
+ return;
+ }
+ mCallback.onShuffleModeChanged(mInstance, playlistAgent, shuffleMode);
+ mSessionStub.notifyShuffleModeChangedNotLocked(shuffleMode);
+ }
+
+ private void notifyPlayerUpdatedNotLocked(MediaPlayerBase oldPlayer) {
+ final MediaPlayerBase player = mPlayer;
+ // TODO(jaewan): (Can be post-P) Find better way for player.getPlayerState() //
+ // In theory, Session.getXXX() may not be the same as Player.getXXX()
+ // and we should notify information of the session.getXXX() instead of
+ // player.getXXX()
+ // Notify to controllers as well.
+ final int state = player.getPlayerState();
+ if (state != oldPlayer.getPlayerState()) {
+ mSessionStub.notifyPlayerStateChangedNotLocked(state);
+ }
+
+ final long currentTimeMs = System.currentTimeMillis();
+ final long position = player.getCurrentPosition();
+ if (position != oldPlayer.getCurrentPosition()) {
+ mSessionStub.notifyPositionChangedNotLocked(currentTimeMs, position);
+ }
+
+ final float speed = player.getPlaybackSpeed();
+ if (speed != oldPlayer.getPlaybackSpeed()) {
+ mSessionStub.notifyPlaybackSpeedChangedNotLocked(speed);
+ }
+
+ final long bufferedPosition = player.getBufferedPosition();
+ if (bufferedPosition != oldPlayer.getBufferedPosition()) {
+ mSessionStub.notifyBufferedPositionChangedNotLocked(bufferedPosition);
+ }
+ }
+
+ Context getContext() {
+ return mContext;
+ }
+
+ MediaSession2 getInstance() {
+ return mInstance;
+ }
+
+ MediaPlayerBase getPlayer() {
+ return mPlayer;
+ }
+
+ MediaPlaylistAgent getPlaylistAgent() {
+ return mPlaylistAgent;
+ }
+
+ Executor getCallbackExecutor() {
+ return mCallbackExecutor;
+ }
+
+ SessionCallback getCallback() {
+ return mCallback;
+ }
+
+ MediaSession2Stub getSessionStub() {
+ return mSessionStub;
+ }
+
+ VolumeProvider2 getVolumeProvider() {
+ return mVolumeProvider;
+ }
+
+ PlaybackInfo getPlaybackInfo() {
+ synchronized (mLock) {
+ return mPlaybackInfo;
+ }
+ }
+
+ PendingIntent getSessionActivity() {
+ return mSessionActivity;
+ }
+
+ private static class MyPlayerEventCallback extends PlayerEventCallback {
+ private final WeakReference<MediaSession2Impl> mSession;
+
+ private MyPlayerEventCallback(MediaSession2Impl session) {
+ mSession = new WeakReference<>(session);
+ }
+
+ @Override
+ public void onCurrentDataSourceChanged(MediaPlayerBase mpb, DataSourceDesc dsd) {
+ MediaSession2Impl session = getSession();
+ if (session == null || dsd == null) {
+ return;
+ }
+ session.getCallbackExecutor().execute(() -> {
+ MediaItem2 item = getMediaItem(session, dsd);
+ if (item == null) {
+ return;
+ }
+ session.getCallback().onCurrentMediaItemChanged(session.getInstance(), mpb, item);
+ // TODO (jaewan): Notify controllers through appropriate callback. (b/74505936)
+ });
+ }
+
+ @Override
+ public void onMediaPrepared(MediaPlayerBase mpb, DataSourceDesc dsd) {
+ MediaSession2Impl session = getSession();
+ if (session == null || dsd == null) {
+ return;
+ }
+ session.getCallbackExecutor().execute(() -> {
+ MediaItem2 item = getMediaItem(session, dsd);
+ if (item == null) {
+ return;
+ }
+ session.getCallback().onMediaPrepared(session.getInstance(), mpb, item);
+ // TODO (jaewan): Notify controllers through appropriate callback. (b/74505936)
+ });
+ }
+
+ @Override
+ public void onPlayerStateChanged(MediaPlayerBase mpb, int state) {
+ MediaSession2Impl session = getSession();
+ if (session == null) {
+ return;
+ }
+ session.getCallbackExecutor().execute(() -> {
+ session.getCallback().onPlayerStateChanged(session.getInstance(), mpb, state);
+ session.getSessionStub().notifyPlayerStateChangedNotLocked(state);
+ });
+ }
+
+ @Override
+ public void onBufferingStateChanged(MediaPlayerBase mpb, DataSourceDesc dsd, int state) {
+ MediaSession2Impl session = getSession();
+ if (session == null || dsd == null) {
+ return;
+ }
+ session.getCallbackExecutor().execute(() -> {
+ MediaItem2 item = getMediaItem(session, dsd);
+ if (item == null) {
+ return;
+ }
+ session.getCallback().onBufferingStateChanged(
+ session.getInstance(), mpb, item, state);
+ // TODO (jaewan): Notify controllers through appropriate callback. (b/74505936)
+ });
+ }
+
+ private MediaSession2Impl getSession() {
+ final MediaSession2Impl session = mSession.get();
+ if (session == null && DEBUG) {
+ Log.d(TAG, "Session is closed", new IllegalStateException());
+ }
+ return session;
+ }
+
+ private MediaItem2 getMediaItem(MediaSession2Impl session, DataSourceDesc dsd) {
+ MediaPlaylistAgent agent = session.getPlaylistAgent();
+ if (agent == null) {
+ if (DEBUG) {
+ Log.d(TAG, "Session is closed", new IllegalStateException());
+ }
+ return null;
+ }
+ MediaItem2 item = agent.getMediaItem(dsd);
+ if (item == null) {
+ if (DEBUG) {
+ Log.d(TAG, "Could not find matching item for dsd=" + dsd,
+ new NoSuchElementException());
+ }
+ }
+ return item;
+ }
+ }
+
+ private static class MyPlaylistEventCallback extends PlaylistEventCallback {
+ private final WeakReference<MediaSession2Impl> mSession;
+
+ private MyPlaylistEventCallback(MediaSession2Impl session) {
+ mSession = new WeakReference<>(session);
+ }
+
+ @Override
+ public void onPlaylistChanged(MediaPlaylistAgent playlistAgent, List<MediaItem2> list,
+ MediaMetadata2 metadata) {
+ final MediaSession2Impl session = mSession.get();
+ if (session == null) {
+ return;
+ }
+ session.notifyPlaylistChangedOnExecutor(playlistAgent, list, metadata);
+ }
+
+ @Override
+ public void onPlaylistMetadataChanged(MediaPlaylistAgent playlistAgent,
+ MediaMetadata2 metadata) {
+ final MediaSession2Impl session = mSession.get();
+ if (session == null) {
+ return;
+ }
+ session.notifyPlaylistMetadataChangedOnExecutor(playlistAgent, metadata);
+ }
+
+ @Override
+ public void onRepeatModeChanged(MediaPlaylistAgent playlistAgent, int repeatMode) {
+ final MediaSession2Impl session = mSession.get();
+ if (session == null) {
+ return;
+ }
+ session.notifyRepeatModeChangedOnExecutor(playlistAgent, repeatMode);
+ }
+
+ @Override
+ public void onShuffleModeChanged(MediaPlaylistAgent playlistAgent, int shuffleMode) {
+ final MediaSession2Impl session = mSession.get();
+ if (session == null) {
+ return;
+ }
+ session.notifyShuffleModeChangedOnExecutor(playlistAgent, shuffleMode);
+ }
+ }
+
+ public static final class CommandImpl implements CommandProvider {
+ private static final String KEY_COMMAND_CODE
+ = "android.media.media_session2.command.command_code";
+ private static final String KEY_COMMAND_CUSTOM_COMMAND
+ = "android.media.media_session2.command.custom_command";
+ private static final String KEY_COMMAND_EXTRAS
+ = "android.media.media_session2.command.extras";
+
+ private final SessionCommand2 mInstance;
+ private final int mCommandCode;
+ // Nonnull if it's custom command
+ private final String mCustomCommand;
+ private final Bundle mExtras;
+
+ public CommandImpl(SessionCommand2 instance, int commandCode) {
+ mInstance = instance;
+ mCommandCode = commandCode;
+ mCustomCommand = null;
+ mExtras = null;
+ }
+
+ public CommandImpl(SessionCommand2 instance, @NonNull String action,
+ @Nullable Bundle extras) {
+ if (action == null) {
+ throw new IllegalArgumentException("action shouldn't be null");
+ }
+ mInstance = instance;
+ mCommandCode = COMMAND_CODE_CUSTOM;
+ mCustomCommand = action;
+ mExtras = extras;
+ }
+
+ @Override
+ public int getCommandCode_impl() {
+ return mCommandCode;
+ }
+
+ @Override
+ public @Nullable String getCustomCommand_impl() {
+ return mCustomCommand;
+ }
+
+ @Override
+ public @Nullable Bundle getExtras_impl() {
+ return mExtras;
+ }
+
+ /**
+ * @return a new Bundle instance from the Command
+ */
+ @Override
+ public Bundle toBundle_impl() {
+ Bundle bundle = new Bundle();
+ bundle.putInt(KEY_COMMAND_CODE, mCommandCode);
+ bundle.putString(KEY_COMMAND_CUSTOM_COMMAND, mCustomCommand);
+ bundle.putBundle(KEY_COMMAND_EXTRAS, mExtras);
+ return bundle;
+ }
+
+ /**
+ * @return a new Command instance from the Bundle
+ */
+ public static SessionCommand2 fromBundle_impl(@NonNull Bundle command) {
+ if (command == null) {
+ throw new IllegalArgumentException("command shouldn't be null");
+ }
+ int code = command.getInt(KEY_COMMAND_CODE);
+ if (code != COMMAND_CODE_CUSTOM) {
+ return new SessionCommand2(code);
+ } else {
+ String customCommand = command.getString(KEY_COMMAND_CUSTOM_COMMAND);
+ if (customCommand == null) {
+ return null;
+ }
+ return new SessionCommand2(customCommand, command.getBundle(KEY_COMMAND_EXTRAS));
+ }
+ }
+
+ @Override
+ public boolean equals_impl(Object obj) {
+ if (!(obj instanceof CommandImpl)) {
+ return false;
+ }
+ CommandImpl other = (CommandImpl) obj;
+ // TODO(jaewan): Compare Commands with the generated UUID, as we're doing for the MI2.
+ return mCommandCode == other.mCommandCode
+ && TextUtils.equals(mCustomCommand, other.mCustomCommand);
+ }
+
+ @Override
+ public int hashCode_impl() {
+ final int prime = 31;
+ return ((mCustomCommand != null)
+ ? mCustomCommand.hashCode() : 0) * prime + mCommandCode;
+ }
+ }
+
+ /**
+ * Represent set of {@link SessionCommand2}.
+ */
+ public static class CommandGroupImpl implements CommandGroupProvider {
+ private static final String KEY_COMMANDS =
+ "android.media.mediasession2.commandgroup.commands";
+
+ // Prefix for all command codes
+ private static final String PREFIX_COMMAND_CODE = "COMMAND_CODE_";
+
+ // Prefix for command codes that will be sent directly to the MediaPlayerBase
+ private static final String PREFIX_COMMAND_CODE_PLAYBACK = "COMMAND_CODE_PLAYBACK_";
+
+ // Prefix for command codes that will be sent directly to the MediaPlaylistAgent
+ private static final String PREFIX_COMMAND_CODE_PLAYLIST = "COMMAND_CODE_PLAYLIST_";
+
+ private Set<SessionCommand2> mCommands = new HashSet<>();
+ private final SessionCommandGroup2 mInstance;
+
+ public CommandGroupImpl(SessionCommandGroup2 instance, Object other) {
+ mInstance = instance;
+ if (other != null && other instanceof CommandGroupImpl) {
+ mCommands.addAll(((CommandGroupImpl) other).mCommands);
+ }
+ }
+
+ public CommandGroupImpl() {
+ mInstance = new SessionCommandGroup2(this);
+ }
+
+ @Override
+ public void addCommand_impl(@NonNull SessionCommand2 command) {
+ if (command == null) {
+ throw new IllegalArgumentException("command shouldn't be null");
+ }
+ mCommands.add(command);
+ }
+
+ @Override
+ public void addAllPredefinedCommands_impl() {
+ addCommandsWithPrefix(PREFIX_COMMAND_CODE);
+ }
+
+ void addAllPlaybackCommands() {
+ addCommandsWithPrefix(PREFIX_COMMAND_CODE_PLAYBACK);
+ }
+
+ void addAllPlaylistCommands() {
+ addCommandsWithPrefix(PREFIX_COMMAND_CODE_PLAYLIST);
+ }
+
+ private void addCommandsWithPrefix(String prefix) {
+ // TODO(jaewan): (Can be post-P): Don't use reflection for this purpose.
+ final Field[] fields = MediaSession2.class.getFields();
+ if (fields != null) {
+ for (int i = 0; i < fields.length; i++) {
+ if (fields[i].getName().startsWith(prefix)) {
+ try {
+ mCommands.add(new SessionCommand2(fields[i].getInt(null)));
+ } catch (IllegalAccessException e) {
+ Log.w(TAG, "Unexpected " + fields[i] + " in MediaSession2");
+ }
+ }
+ }
+ }
+ }
+
+ @Override
+ public void removeCommand_impl(@NonNull SessionCommand2 command) {
+ if (command == null) {
+ throw new IllegalArgumentException("command shouldn't be null");
+ }
+ mCommands.remove(command);
+ }
+
+ @Override
+ public boolean hasCommand_impl(@NonNull SessionCommand2 command) {
+ if (command == null) {
+ throw new IllegalArgumentException("command shouldn't be null");
+ }
+ return mCommands.contains(command);
+ }
+
+ @Override
+ public boolean hasCommand_impl(int code) {
+ if (code == COMMAND_CODE_CUSTOM) {
+ throw new IllegalArgumentException("Use hasCommand(Command) for custom command");
+ }
+ for (SessionCommand2 command : mCommands) {
+ if (command.getCommandCode() == code) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public Set<SessionCommand2> getCommands_impl() {
+ return getCommands();
+ }
+
+ public Set<SessionCommand2> getCommands() {
+ return Collections.unmodifiableSet(mCommands);
+ }
+
+ /**
+ * @return new bundle from the CommandGroup
+ * @hide
+ */
+ @Override
+ public Bundle toBundle_impl() {
+ ArrayList<Bundle> list = new ArrayList<>();
+ for (SessionCommand2 command : mCommands) {
+ list.add(command.toBundle());
+ }
+ Bundle bundle = new Bundle();
+ bundle.putParcelableArrayList(KEY_COMMANDS, list);
+ return bundle;
+ }
+
+ /**
+ * @return new instance of CommandGroup from the bundle
+ * @hide
+ */
+ public static @Nullable SessionCommandGroup2 fromBundle_impl(Bundle commands) {
+ if (commands == null) {
+ return null;
+ }
+ List<Parcelable> list = commands.getParcelableArrayList(KEY_COMMANDS);
+ if (list == null) {
+ return null;
+ }
+ SessionCommandGroup2 commandGroup = new SessionCommandGroup2();
+ for (int i = 0; i < list.size(); i++) {
+ Parcelable parcelable = list.get(i);
+ if (!(parcelable instanceof Bundle)) {
+ continue;
+ }
+ Bundle commandBundle = (Bundle) parcelable;
+ SessionCommand2 command = SessionCommand2.fromBundle(commandBundle);
+ if (command != null) {
+ commandGroup.addCommand(command);
+ }
+ }
+ return commandGroup;
+ }
+ }
+
+ public static class ControllerInfoImpl implements ControllerInfoProvider {
+ private final ControllerInfo mInstance;
+ private final int mUid;
+ private final String mPackageName;
+ private final boolean mIsTrusted;
+ private final IMediaController2 mControllerBinder;
+
+ public ControllerInfoImpl(Context context, ControllerInfo instance, int uid,
+ int pid, @NonNull String packageName, @NonNull IMediaController2 callback) {
+ if (TextUtils.isEmpty(packageName)) {
+ throw new IllegalArgumentException("packageName shouldn't be empty");
+ }
+ if (callback == null) {
+ throw new IllegalArgumentException("callback shouldn't be null");
+ }
+
+ mInstance = instance;
+ mUid = uid;
+ mPackageName = packageName;
+ mControllerBinder = callback;
+ MediaSessionManager manager =
+ (MediaSessionManager) context.getSystemService(Context.MEDIA_SESSION_SERVICE);
+ // Ask server whether the controller is trusted.
+ // App cannot know this because apps cannot query enabled notification listener for
+ // another package, but system server can do.
+ mIsTrusted = manager.isTrustedForMediaControl(
+ new MediaSessionManager.RemoteUserInfo(packageName, pid, uid));
+ }
+
+ @Override
+ public String getPackageName_impl() {
+ return mPackageName;
+ }
+
+ @Override
+ public int getUid_impl() {
+ return mUid;
+ }
+
+ @Override
+ public boolean isTrusted_impl() {
+ return mIsTrusted;
+ }
+
+ @Override
+ public int hashCode_impl() {
+ return mControllerBinder.hashCode();
+ }
+
+ @Override
+ public boolean equals_impl(Object obj) {
+ if (!(obj instanceof ControllerInfo)) {
+ return false;
+ }
+ return equals(((ControllerInfo) obj).getProvider());
+ }
+
+ @Override
+ public String toString_impl() {
+ return "ControllerInfo {pkg=" + mPackageName + ", uid=" + mUid + ", trusted="
+ + mIsTrusted + "}";
+ }
+
+ @Override
+ public int hashCode() {
+ return mControllerBinder.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (!(obj instanceof ControllerInfoImpl)) {
+ return false;
+ }
+ ControllerInfoImpl other = (ControllerInfoImpl) obj;
+ return mControllerBinder.asBinder().equals(other.mControllerBinder.asBinder());
+ }
+
+ ControllerInfo getInstance() {
+ return mInstance;
+ }
+
+ IBinder getId() {
+ return mControllerBinder.asBinder();
+ }
+
+ IMediaController2 getControllerBinder() {
+ return mControllerBinder;
+ }
+
+ static ControllerInfoImpl from(ControllerInfo controller) {
+ return (ControllerInfoImpl) controller.getProvider();
+ }
+ }
+
+ public static class CommandButtonImpl implements CommandButtonProvider {
+ private static final String KEY_COMMAND
+ = "android.media.media_session2.command_button.command";
+ private static final String KEY_ICON_RES_ID
+ = "android.media.media_session2.command_button.icon_res_id";
+ private static final String KEY_DISPLAY_NAME
+ = "android.media.media_session2.command_button.display_name";
+ private static final String KEY_EXTRAS
+ = "android.media.media_session2.command_button.extras";
+ private static final String KEY_ENABLED
+ = "android.media.media_session2.command_button.enabled";
+
+ private final CommandButton mInstance;
+ private SessionCommand2 mCommand;
+ private int mIconResId;
+ private String mDisplayName;
+ private Bundle mExtras;
+ private boolean mEnabled;
+
+ public CommandButtonImpl(@Nullable SessionCommand2 command, int iconResId,
+ @Nullable String displayName, Bundle extras, boolean enabled) {
+ mCommand = command;
+ mIconResId = iconResId;
+ mDisplayName = displayName;
+ mExtras = extras;
+ mEnabled = enabled;
+ mInstance = new CommandButton(this);
+ }
+
+ @Override
+ public @Nullable
+ SessionCommand2 getCommand_impl() {
+ return mCommand;
+ }
+
+ @Override
+ public int getIconResId_impl() {
+ return mIconResId;
+ }
+
+ @Override
+ public @Nullable String getDisplayName_impl() {
+ return mDisplayName;
+ }
+
+ @Override
+ public @Nullable Bundle getExtras_impl() {
+ return mExtras;
+ }
+
+ @Override
+ public boolean isEnabled_impl() {
+ return mEnabled;
+ }
+
+ @NonNull Bundle toBundle() {
+ Bundle bundle = new Bundle();
+ bundle.putBundle(KEY_COMMAND, mCommand.toBundle());
+ bundle.putInt(KEY_ICON_RES_ID, mIconResId);
+ bundle.putString(KEY_DISPLAY_NAME, mDisplayName);
+ bundle.putBundle(KEY_EXTRAS, mExtras);
+ bundle.putBoolean(KEY_ENABLED, mEnabled);
+ return bundle;
+ }
+
+ static @Nullable CommandButton fromBundle(Bundle bundle) {
+ if (bundle == null) {
+ return null;
+ }
+ CommandButton.Builder builder = new CommandButton.Builder();
+ builder.setCommand(SessionCommand2.fromBundle(bundle.getBundle(KEY_COMMAND)));
+ builder.setIconResId(bundle.getInt(KEY_ICON_RES_ID, 0));
+ builder.setDisplayName(bundle.getString(KEY_DISPLAY_NAME));
+ builder.setExtras(bundle.getBundle(KEY_EXTRAS));
+ builder.setEnabled(bundle.getBoolean(KEY_ENABLED));
+ try {
+ return builder.build();
+ } catch (IllegalStateException e) {
+ // Malformed or version mismatch. Return null for now.
+ return null;
+ }
+ }
+
+ /**
+ * Builder for {@link CommandButton}.
+ */
+ public static class BuilderImpl implements CommandButtonProvider.BuilderProvider {
+ private final CommandButton.Builder mInstance;
+ private SessionCommand2 mCommand;
+ private int mIconResId;
+ private String mDisplayName;
+ private Bundle mExtras;
+ private boolean mEnabled;
+
+ public BuilderImpl(CommandButton.Builder instance) {
+ mInstance = instance;
+ mEnabled = true;
+ }
+
+ @Override
+ public CommandButton.Builder setCommand_impl(SessionCommand2 command) {
+ mCommand = command;
+ return mInstance;
+ }
+
+ @Override
+ public CommandButton.Builder setIconResId_impl(int resId) {
+ mIconResId = resId;
+ return mInstance;
+ }
+
+ @Override
+ public CommandButton.Builder setDisplayName_impl(String displayName) {
+ mDisplayName = displayName;
+ return mInstance;
+ }
+
+ @Override
+ public CommandButton.Builder setEnabled_impl(boolean enabled) {
+ mEnabled = enabled;
+ return mInstance;
+ }
+
+ @Override
+ public CommandButton.Builder setExtras_impl(Bundle extras) {
+ mExtras = extras;
+ return mInstance;
+ }
+
+ @Override
+ public CommandButton build_impl() {
+ if (mEnabled && mCommand == null) {
+ throw new IllegalStateException("Enabled button needs Command"
+ + " for controller to invoke the command");
+ }
+ if (mCommand != null && mCommand.getCommandCode() == COMMAND_CODE_CUSTOM
+ && (mIconResId == 0 || TextUtils.isEmpty(mDisplayName))) {
+ throw new IllegalStateException("Custom commands needs icon and"
+ + " and name to display");
+ }
+ return new CommandButtonImpl(mCommand, mIconResId, mDisplayName, mExtras, mEnabled)
+ .mInstance;
+ }
+ }
+ }
+
+ public static abstract class BuilderBaseImpl<T extends MediaSession2, C extends SessionCallback>
+ implements BuilderBaseProvider<T, C> {
+ final Context mContext;
+ MediaPlayerBase mPlayer;
+ String mId;
+ Executor mCallbackExecutor;
+ C mCallback;
+ MediaPlaylistAgent mPlaylistAgent;
+ VolumeProvider2 mVolumeProvider;
+ PendingIntent mSessionActivity;
+
+ /**
+ * Constructor.
+ *
+ * @param context a context
+ * @throws IllegalArgumentException if any parameter is null, or the player is a
+ * {@link MediaSession2} or {@link MediaController2}.
+ */
+ // TODO(jaewan): Also need executor
+ public BuilderBaseImpl(@NonNull Context context) {
+ if (context == null) {
+ throw new IllegalArgumentException("context shouldn't be null");
+ }
+ mContext = context;
+ // Ensure non-null
+ mId = "";
+ }
+
+ @Override
+ public void setPlayer_impl(@NonNull MediaPlayerBase player) {
+ if (player == null) {
+ throw new IllegalArgumentException("player shouldn't be null");
+ }
+ mPlayer = player;
+ }
+
+ @Override
+ public void setPlaylistAgent_impl(@NonNull MediaPlaylistAgent playlistAgent) {
+ if (playlistAgent == null) {
+ throw new IllegalArgumentException("playlistAgent shouldn't be null");
+ }
+ mPlaylistAgent = playlistAgent;
+ }
+
+ @Override
+ public void setVolumeProvider_impl(VolumeProvider2 volumeProvider) {
+ mVolumeProvider = volumeProvider;
+ }
+
+ @Override
+ public void setSessionActivity_impl(PendingIntent pi) {
+ mSessionActivity = pi;
+ }
+
+ @Override
+ public void setId_impl(@NonNull String id) {
+ if (id == null) {
+ throw new IllegalArgumentException("id shouldn't be null");
+ }
+ mId = id;
+ }
+
+ @Override
+ public void setSessionCallback_impl(@NonNull Executor executor, @NonNull C callback) {
+ if (executor == null) {
+ throw new IllegalArgumentException("executor shouldn't be null");
+ }
+ if (callback == null) {
+ throw new IllegalArgumentException("callback shouldn't be null");
+ }
+ mCallbackExecutor = executor;
+ mCallback = callback;
+ }
+
+ @Override
+ public abstract T build_impl();
+ }
+
+ public static class BuilderImpl extends BuilderBaseImpl<MediaSession2, SessionCallback> {
+ public BuilderImpl(Context context, Builder instance) {
+ super(context);
+ }
+
+ @Override
+ public MediaSession2 build_impl() {
+ if (mCallbackExecutor == null) {
+ mCallbackExecutor = mContext.getMainExecutor();
+ }
+ if (mCallback == null) {
+ mCallback = new SessionCallback() {};
+ }
+ return new MediaSession2Impl(mContext, mPlayer, mId, mPlaylistAgent,
+ mVolumeProvider, mSessionActivity, mCallbackExecutor, mCallback).getInstance();
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/MediaSession2Stub.java b/packages/MediaComponents/src/com/android/media/MediaSession2Stub.java
new file mode 100644
index 0000000..ec657d7
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/MediaSession2Stub.java
@@ -0,0 +1,1113 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import android.app.PendingIntent;
+import android.content.Context;
+import android.media.MediaController2;
+import android.media.MediaItem2;
+import android.media.MediaLibraryService2.LibraryRoot;
+import android.media.MediaMetadata2;
+import android.media.SessionCommand2;
+import android.media.MediaSession2.CommandButton;
+import android.media.SessionCommandGroup2;
+import android.media.MediaSession2.ControllerInfo;
+import android.media.Rating2;
+import android.media.VolumeProvider2;
+import android.net.Uri;
+import android.os.Binder;
+import android.os.Bundle;
+import android.os.DeadObjectException;
+import android.os.IBinder;
+import android.os.RemoteException;
+import android.os.ResultReceiver;
+import android.support.annotation.GuardedBy;
+import android.support.annotation.NonNull;
+import android.text.TextUtils;
+import android.util.ArrayMap;
+import android.util.Log;
+import android.util.SparseArray;
+
+import com.android.media.MediaLibraryService2Impl.MediaLibrarySessionImpl;
+import com.android.media.MediaSession2Impl.CommandButtonImpl;
+import com.android.media.MediaSession2Impl.CommandGroupImpl;
+import com.android.media.MediaSession2Impl.ControllerInfoImpl;
+
+import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+public class MediaSession2Stub extends IMediaSession2.Stub {
+
+ static final String ARGUMENT_KEY_POSITION = "android.media.media_session2.key_position";
+ static final String ARGUMENT_KEY_ITEM_INDEX = "android.media.media_session2.key_item_index";
+ static final String ARGUMENT_KEY_PLAYLIST_PARAMS =
+ "android.media.media_session2.key_playlist_params";
+
+ private static final String TAG = "MediaSession2Stub";
+ private static final boolean DEBUG = true; // TODO(jaewan): Rename.
+
+ private static final SparseArray<SessionCommand2> sCommandsForOnCommandRequest =
+ new SparseArray<>();
+
+ private final Object mLock = new Object();
+ private final WeakReference<MediaSession2Impl> mSession;
+
+ @GuardedBy("mLock")
+ private final ArrayMap<IBinder, ControllerInfo> mControllers = new ArrayMap<>();
+ @GuardedBy("mLock")
+ private final Set<IBinder> mConnectingControllers = new HashSet<>();
+ @GuardedBy("mLock")
+ private final ArrayMap<ControllerInfo, SessionCommandGroup2> mAllowedCommandGroupMap =
+ new ArrayMap<>();
+ @GuardedBy("mLock")
+ private final ArrayMap<ControllerInfo, Set<String>> mSubscriptions = new ArrayMap<>();
+
+ public MediaSession2Stub(MediaSession2Impl session) {
+ mSession = new WeakReference<>(session);
+
+ synchronized (sCommandsForOnCommandRequest) {
+ if (sCommandsForOnCommandRequest.size() == 0) {
+ CommandGroupImpl group = new CommandGroupImpl();
+ group.addAllPlaybackCommands();
+ group.addAllPlaylistCommands();
+ Set<SessionCommand2> commands = group.getCommands();
+ for (SessionCommand2 command : commands) {
+ sCommandsForOnCommandRequest.append(command.getCommandCode(), command);
+ }
+ }
+ }
+ }
+
+ public void destroyNotLocked() {
+ final List<ControllerInfo> list;
+ synchronized (mLock) {
+ mSession.clear();
+ list = getControllers();
+ mControllers.clear();
+ }
+ for (int i = 0; i < list.size(); i++) {
+ IMediaController2 controllerBinder =
+ ((ControllerInfoImpl) list.get(i).getProvider()).getControllerBinder();
+ try {
+ // Should be used without a lock hold to prevent potential deadlock.
+ controllerBinder.onDisconnected();
+ } catch (RemoteException e) {
+ // Controller is gone. Should be fine because we're destroying.
+ }
+ }
+ }
+
+ private MediaSession2Impl getSession() {
+ final MediaSession2Impl session = mSession.get();
+ if (session == null && DEBUG) {
+ Log.d(TAG, "Session is closed", new IllegalStateException());
+ }
+ return session;
+ }
+
+ private MediaLibrarySessionImpl getLibrarySession() throws IllegalStateException {
+ final MediaSession2Impl session = getSession();
+ if (!(session instanceof MediaLibrarySessionImpl)) {
+ throw new RuntimeException("Session isn't a library session");
+ }
+ return (MediaLibrarySessionImpl) session;
+ }
+
+ // Get controller if the command from caller to session is able to be handled.
+ private ControllerInfo getControllerIfAble(IMediaController2 caller) {
+ synchronized (mLock) {
+ final ControllerInfo controllerInfo = mControllers.get(caller.asBinder());
+ if (controllerInfo == null && DEBUG) {
+ Log.d(TAG, "Controller is disconnected", new IllegalStateException());
+ }
+ return controllerInfo;
+ }
+ }
+
+ // Get controller if the command from caller to session is able to be handled.
+ private ControllerInfo getControllerIfAble(IMediaController2 caller, int commandCode) {
+ synchronized (mLock) {
+ final ControllerInfo controllerInfo = getControllerIfAble(caller);
+ if (controllerInfo == null) {
+ return null;
+ }
+ SessionCommandGroup2 allowedCommands = mAllowedCommandGroupMap.get(controllerInfo);
+ if (allowedCommands == null) {
+ Log.w(TAG, "Controller with null allowed commands. Ignoring",
+ new IllegalStateException());
+ return null;
+ }
+ if (!allowedCommands.hasCommand(commandCode)) {
+ if (DEBUG) {
+ Log.d(TAG, "Controller isn't allowed for command " + commandCode);
+ }
+ return null;
+ }
+ return controllerInfo;
+ }
+ }
+
+ // Get controller if the command from caller to session is able to be handled.
+ private ControllerInfo getControllerIfAble(IMediaController2 caller, SessionCommand2 command) {
+ synchronized (mLock) {
+ final ControllerInfo controllerInfo = getControllerIfAble(caller);
+ if (controllerInfo == null) {
+ return null;
+ }
+ SessionCommandGroup2 allowedCommands = mAllowedCommandGroupMap.get(controllerInfo);
+ if (allowedCommands == null) {
+ Log.w(TAG, "Controller with null allowed commands. Ignoring",
+ new IllegalStateException());
+ return null;
+ }
+ if (!allowedCommands.hasCommand(command)) {
+ if (DEBUG) {
+ Log.d(TAG, "Controller isn't allowed for command " + command);
+ }
+ return null;
+ }
+ return controllerInfo;
+ }
+ }
+
+ // Return binder if the session is able to send a command to the controller.
+ private IMediaController2 getControllerBinderIfAble(ControllerInfo controller) {
+ if (getSession() == null) {
+ // getSession() already logged if session is closed.
+ return null;
+ }
+ final ControllerInfoImpl impl = ControllerInfoImpl.from(controller);
+ synchronized (mLock) {
+ if (mControllers.get(impl.getId()) != null
+ || mConnectingControllers.contains(impl.getId())) {
+ return impl.getControllerBinder();
+ }
+ if (DEBUG) {
+ Log.d(TAG, controller + " isn't connected nor connecting",
+ new IllegalArgumentException());
+ }
+ return null;
+ }
+ }
+
+ // Return binder if the session is able to send a command to the controller.
+ private IMediaController2 getControllerBinderIfAble(ControllerInfo controller,
+ int commandCode) {
+ synchronized (mLock) {
+ SessionCommandGroup2 allowedCommands = mAllowedCommandGroupMap.get(controller);
+ if (allowedCommands == null) {
+ Log.w(TAG, "Controller with null allowed commands. Ignoring");
+ return null;
+ }
+ if (!allowedCommands.hasCommand(commandCode)) {
+ if (DEBUG) {
+ Log.d(TAG, "Controller isn't allowed for command " + commandCode);
+ }
+ return null;
+ }
+ return getControllerBinderIfAble(controller);
+ }
+ }
+
+ private void onCommand(@NonNull IMediaController2 caller, int commandCode,
+ @NonNull SessionRunnable runnable) {
+ final MediaSession2Impl session = getSession();
+ final ControllerInfo controller = getControllerIfAble(caller, commandCode);
+ if (session == null || controller == null) {
+ return;
+ }
+ session.getCallbackExecutor().execute(() -> {
+ if (getControllerIfAble(caller, commandCode) == null) {
+ return;
+ }
+ SessionCommand2 command = sCommandsForOnCommandRequest.get(commandCode);
+ if (command != null) {
+ boolean accepted = session.getCallback().onCommandRequest(session.getInstance(),
+ controller, command);
+ if (!accepted) {
+ // Don't run rejected command.
+ if (DEBUG) {
+ Log.d(TAG, "Command (code=" + commandCode + ") from "
+ + controller + " was rejected by " + session);
+ }
+ return;
+ }
+ }
+ runnable.run(session, controller);
+ });
+ }
+
+ private void onBrowserCommand(@NonNull IMediaController2 caller,
+ @NonNull LibrarySessionRunnable runnable) {
+ final MediaLibrarySessionImpl session = getLibrarySession();
+ // TODO(jaewan): Consider command code
+ final ControllerInfo controller = getControllerIfAble(caller);
+ if (session == null || controller == null) {
+ return;
+ }
+ session.getCallbackExecutor().execute(() -> {
+ // TODO(jaewan): Consider command code
+ if (getControllerIfAble(caller) == null) {
+ return;
+ }
+ runnable.run(session, controller);
+ });
+ }
+
+
+ private void notifyAll(int commandCode, @NonNull NotifyRunnable runnable) {
+ List<ControllerInfo> controllers = getControllers();
+ for (int i = 0; i < controllers.size(); i++) {
+ notifyInternal(controllers.get(i),
+ getControllerBinderIfAble(controllers.get(i), commandCode), runnable);
+ }
+ }
+
+ private void notifyAll(@NonNull NotifyRunnable runnable) {
+ List<ControllerInfo> controllers = getControllers();
+ for (int i = 0; i < controllers.size(); i++) {
+ notifyInternal(controllers.get(i),
+ getControllerBinderIfAble(controllers.get(i)), runnable);
+ }
+ }
+
+ private void notify(@NonNull ControllerInfo controller, @NonNull NotifyRunnable runnable) {
+ notifyInternal(controller, getControllerBinderIfAble(controller), runnable);
+ }
+
+ private void notify(@NonNull ControllerInfo controller, int commandCode,
+ @NonNull NotifyRunnable runnable) {
+ notifyInternal(controller, getControllerBinderIfAble(controller, commandCode), runnable);
+ }
+
+ // Do not call this API directly. Use notify() instead.
+ private void notifyInternal(@NonNull ControllerInfo controller,
+ @NonNull IMediaController2 iController, @NonNull NotifyRunnable runnable) {
+ if (controller == null || iController == null) {
+ return;
+ }
+ try {
+ runnable.run(controller, iController);
+ } catch (DeadObjectException e) {
+ if (DEBUG) {
+ Log.d(TAG, controller.toString() + " is gone", e);
+ }
+ onControllerClosed(iController);
+ } catch (RemoteException e) {
+ // Currently it's TransactionTooLargeException or DeadSystemException.
+ // We'd better to leave log for those cases because
+ // - TransactionTooLargeException means that we may need to fix our code.
+ // (e.g. add pagination or special way to deliver Bitmap)
+ // - DeadSystemException means that errors around it can be ignored.
+ Log.w(TAG, "Exception in " + controller.toString(), e);
+ }
+ }
+
+ private void onControllerClosed(IMediaController2 iController) {
+ ControllerInfo controller;
+ synchronized (mLock) {
+ controller = mControllers.remove(iController.asBinder());
+ if (DEBUG) {
+ Log.d(TAG, "releasing " + controller);
+ }
+ mSubscriptions.remove(controller);
+ }
+ final MediaSession2Impl session = getSession();
+ if (session == null || controller == null) {
+ return;
+ }
+ session.getCallbackExecutor().execute(() -> {
+ session.getCallback().onDisconnected(session.getInstance(), controller);
+ });
+ }
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ // AIDL methods for session overrides
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ @Override
+ public void connect(final IMediaController2 caller, final String callingPackage)
+ throws RuntimeException {
+ final MediaSession2Impl session = getSession();
+ if (session == null) {
+ return;
+ }
+ final Context context = session.getContext();
+ final ControllerInfo controllerInfo = new ControllerInfo(context,
+ Binder.getCallingUid(), Binder.getCallingPid(), callingPackage, caller);
+ session.getCallbackExecutor().execute(() -> {
+ if (getSession() == null) {
+ return;
+ }
+ synchronized (mLock) {
+ // Keep connecting controllers.
+ // This helps sessions to call APIs in the onConnect() (e.g. setCustomLayout())
+ // instead of pending them.
+ mConnectingControllers.add(ControllerInfoImpl.from(controllerInfo).getId());
+ }
+ SessionCommandGroup2 allowedCommands = session.getCallback().onConnect(
+ session.getInstance(), controllerInfo);
+ // Don't reject connection for the request from trusted app.
+ // Otherwise server will fail to retrieve session's information to dispatch
+ // media keys to.
+ boolean accept = allowedCommands != null || controllerInfo.isTrusted();
+ if (accept) {
+ ControllerInfoImpl controllerImpl = ControllerInfoImpl.from(controllerInfo);
+ if (DEBUG) {
+ Log.d(TAG, "Accepting connection, controllerInfo=" + controllerInfo
+ + " allowedCommands=" + allowedCommands);
+ }
+ if (allowedCommands == null) {
+ // For trusted apps, send non-null allowed commands to keep connection.
+ allowedCommands = new SessionCommandGroup2();
+ }
+ synchronized (mLock) {
+ mConnectingControllers.remove(controllerImpl.getId());
+ mControllers.put(controllerImpl.getId(), controllerInfo);
+ mAllowedCommandGroupMap.put(controllerInfo, allowedCommands);
+ }
+ // If connection is accepted, notify the current state to the controller.
+ // It's needed because we cannot call synchronous calls between session/controller.
+ // Note: We're doing this after the onConnectionChanged(), but there's no guarantee
+ // that events here are notified after the onConnected() because
+ // IMediaController2 is oneway (i.e. async call) and Stub will
+ // use thread poll for incoming calls.
+ final int playerState = session.getInstance().getPlayerState();
+ final long positionEventTimeMs = System.currentTimeMillis();
+ final long positionMs = session.getInstance().getCurrentPosition();
+ final float playbackSpeed = session.getInstance().getPlaybackSpeed();
+ final long bufferedPositionMs = session.getInstance().getBufferedPosition();
+ final Bundle playbackInfoBundle = ((MediaController2Impl.PlaybackInfoImpl)
+ session.getPlaybackInfo().getProvider()).toBundle();
+ final int repeatMode = session.getInstance().getRepeatMode();
+ final int shuffleMode = session.getInstance().getShuffleMode();
+ final PendingIntent sessionActivity = session.getSessionActivity();
+ final List<MediaItem2> playlist =
+ allowedCommands.hasCommand(SessionCommand2.COMMAND_CODE_PLAYLIST_GET_LIST)
+ ? session.getInstance().getPlaylist() : null;
+ final List<Bundle> playlistBundle;
+ if (playlist != null) {
+ playlistBundle = new ArrayList<>();
+ // TODO(jaewan): Find a way to avoid concurrent modification exception.
+ for (int i = 0; i < playlist.size(); i++) {
+ final MediaItem2 item = playlist.get(i);
+ if (item != null) {
+ final Bundle itemBundle = item.toBundle();
+ if (itemBundle != null) {
+ playlistBundle.add(itemBundle);
+ }
+ }
+ }
+ } else {
+ playlistBundle = null;
+ }
+
+ // Double check if session is still there, because close() can be called in another
+ // thread.
+ if (getSession() == null) {
+ return;
+ }
+ try {
+ caller.onConnected(MediaSession2Stub.this, allowedCommands.toBundle(),
+ playerState, positionEventTimeMs, positionMs, playbackSpeed,
+ bufferedPositionMs, playbackInfoBundle, repeatMode, shuffleMode,
+ playlistBundle, sessionActivity);
+ } catch (RemoteException e) {
+ // Controller may be died prematurely.
+ // TODO(jaewan): Handle here.
+ }
+ } else {
+ synchronized (mLock) {
+ mConnectingControllers.remove(ControllerInfoImpl.from(controllerInfo).getId());
+ }
+ if (DEBUG) {
+ Log.d(TAG, "Rejecting connection, controllerInfo=" + controllerInfo);
+ }
+ try {
+ caller.onDisconnected();
+ } catch (RemoteException e) {
+ // Controller may be died prematurely.
+ // Not an issue because we'll ignore it anyway.
+ }
+ }
+ });
+ }
+
+ @Override
+ public void release(final IMediaController2 caller) throws RemoteException {
+ onControllerClosed(caller);
+ }
+
+ @Override
+ public void setVolumeTo(final IMediaController2 caller, final int value, final int flags)
+ throws RuntimeException {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_SET_VOLUME,
+ (session, controller) -> {
+ VolumeProvider2 volumeProvider = session.getVolumeProvider();
+ if (volumeProvider == null) {
+ // TODO(jaewan): Set local stream volume
+ } else {
+ volumeProvider.onSetVolumeTo(value);
+ }
+ });
+ }
+
+ @Override
+ public void adjustVolume(IMediaController2 caller, int direction, int flags)
+ throws RuntimeException {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_SET_VOLUME,
+ (session, controller) -> {
+ VolumeProvider2 volumeProvider = session.getVolumeProvider();
+ if (volumeProvider == null) {
+ // TODO(jaewan): Adjust local stream volume
+ } else {
+ volumeProvider.onAdjustVolume(direction);
+ }
+ });
+ }
+
+ @Override
+ public void sendTransportControlCommand(IMediaController2 caller,
+ int commandCode, Bundle args) throws RuntimeException {
+ onCommand(caller, commandCode, (session, controller) -> {
+ switch (commandCode) {
+ case SessionCommand2.COMMAND_CODE_PLAYBACK_PLAY:
+ session.getInstance().play();
+ break;
+ case SessionCommand2.COMMAND_CODE_PLAYBACK_PAUSE:
+ session.getInstance().pause();
+ break;
+ case SessionCommand2.COMMAND_CODE_PLAYBACK_STOP:
+ session.getInstance().stop();
+ break;
+ case SessionCommand2.COMMAND_CODE_PLAYBACK_PREPARE:
+ session.getInstance().prepare();
+ break;
+ case SessionCommand2.COMMAND_CODE_PLAYBACK_SEEK_TO:
+ session.getInstance().seekTo(args.getLong(ARGUMENT_KEY_POSITION));
+ break;
+ default:
+ // TODO(jaewan): Resend unknown (new) commands through the custom command.
+ }
+ });
+ }
+
+ @Override
+ public void sendCustomCommand(final IMediaController2 caller, final Bundle commandBundle,
+ final Bundle args, final ResultReceiver receiver) {
+ final MediaSession2Impl session = getSession();
+ if (session == null) {
+ return;
+ }
+ final SessionCommand2 command = SessionCommand2.fromBundle(commandBundle);
+ if (command == null) {
+ Log.w(TAG, "sendCustomCommand(): Ignoring null command from "
+ + getControllerIfAble(caller));
+ return;
+ }
+ final ControllerInfo controller = getControllerIfAble(caller, command);
+ if (controller == null) {
+ return;
+ }
+ session.getCallbackExecutor().execute(() -> {
+ if (getControllerIfAble(caller, command) == null) {
+ return;
+ }
+ session.getCallback().onCustomCommand(session.getInstance(),
+ controller, command, args, receiver);
+ });
+ }
+
+ @Override
+ public void prepareFromUri(final IMediaController2 caller, final Uri uri,
+ final Bundle extras) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_SESSION_PREPARE_FROM_URI,
+ (session, controller) -> {
+ if (uri == null) {
+ Log.w(TAG, "prepareFromUri(): Ignoring null uri from " + controller);
+ return;
+ }
+ session.getCallback().onPrepareFromUri(session.getInstance(), controller, uri,
+ extras);
+ });
+ }
+
+ @Override
+ public void prepareFromSearch(final IMediaController2 caller, final String query,
+ final Bundle extras) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_SESSION_PREPARE_FROM_SEARCH,
+ (session, controller) -> {
+ if (TextUtils.isEmpty(query)) {
+ Log.w(TAG, "prepareFromSearch(): Ignoring empty query from " + controller);
+ return;
+ }
+ session.getCallback().onPrepareFromSearch(session.getInstance(),
+ controller, query, extras);
+ });
+ }
+
+ @Override
+ public void prepareFromMediaId(final IMediaController2 caller, final String mediaId,
+ final Bundle extras) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_SESSION_PREPARE_FROM_MEDIA_ID,
+ (session, controller) -> {
+ if (mediaId == null) {
+ Log.w(TAG, "prepareFromMediaId(): Ignoring null mediaId from " + controller);
+ return;
+ }
+ session.getCallback().onPrepareFromMediaId(session.getInstance(),
+ controller, mediaId, extras);
+ });
+ }
+
+ @Override
+ public void playFromUri(final IMediaController2 caller, final Uri uri,
+ final Bundle extras) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_SESSION_PLAY_FROM_URI,
+ (session, controller) -> {
+ if (uri == null) {
+ Log.w(TAG, "playFromUri(): Ignoring null uri from " + controller);
+ return;
+ }
+ session.getCallback().onPlayFromUri(session.getInstance(), controller, uri,
+ extras);
+ });
+ }
+
+ @Override
+ public void playFromSearch(final IMediaController2 caller, final String query,
+ final Bundle extras) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_SESSION_PLAY_FROM_SEARCH,
+ (session, controller) -> {
+ if (TextUtils.isEmpty(query)) {
+ Log.w(TAG, "playFromSearch(): Ignoring empty query from " + controller);
+ return;
+ }
+ session.getCallback().onPlayFromSearch(session.getInstance(),
+ controller, query, extras);
+ });
+ }
+
+ @Override
+ public void playFromMediaId(final IMediaController2 caller, final String mediaId,
+ final Bundle extras) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_SESSION_PLAY_FROM_MEDIA_ID,
+ (session, controller) -> {
+ if (mediaId == null) {
+ Log.w(TAG, "playFromMediaId(): Ignoring null mediaId from " + controller);
+ return;
+ }
+ session.getCallback().onPlayFromMediaId(session.getInstance(), controller,
+ mediaId, extras);
+ });
+ }
+
+ @Override
+ public void setRating(final IMediaController2 caller, final String mediaId,
+ final Bundle ratingBundle) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_SESSION_SET_RATING,
+ (session, controller) -> {
+ if (mediaId == null) {
+ Log.w(TAG, "setRating(): Ignoring null mediaId from " + controller);
+ return;
+ }
+ if (ratingBundle == null) {
+ Log.w(TAG, "setRating(): Ignoring null ratingBundle from " + controller);
+ return;
+ }
+ Rating2 rating = Rating2.fromBundle(ratingBundle);
+ if (rating == null) {
+ if (ratingBundle == null) {
+ Log.w(TAG, "setRating(): Ignoring null rating from " + controller);
+ return;
+ }
+ return;
+ }
+ session.getCallback().onSetRating(session.getInstance(), controller, mediaId,
+ rating);
+ });
+ }
+
+ @Override
+ public void setPlaylist(final IMediaController2 caller, final List<Bundle> playlist,
+ final Bundle metadata) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_PLAYLIST_SET_LIST, (session, controller) -> {
+ if (playlist == null) {
+ Log.w(TAG, "setPlaylist(): Ignoring null playlist from " + controller);
+ return;
+ }
+ List<MediaItem2> list = new ArrayList<>();
+ for (int i = 0; i < playlist.size(); i++) {
+ // Recreates UUID in the playlist
+ MediaItem2 item = MediaItem2Impl.fromBundle(playlist.get(i), null);
+ if (item != null) {
+ list.add(item);
+ }
+ }
+ session.getInstance().setPlaylist(list, MediaMetadata2.fromBundle(metadata));
+ });
+ }
+
+ @Override
+ public void updatePlaylistMetadata(final IMediaController2 caller, final Bundle metadata) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_PLAYLIST_SET_LIST_METADATA,
+ (session, controller) -> {
+ session.getInstance().updatePlaylistMetadata(MediaMetadata2.fromBundle(metadata));
+ });
+ }
+
+ @Override
+ public void addPlaylistItem(IMediaController2 caller, int index, Bundle mediaItem) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_PLAYLIST_ADD_ITEM,
+ (session, controller) -> {
+ // Resets the UUID from the incoming media id, so controller may reuse a media
+ // item multiple times for addPlaylistItem.
+ session.getInstance().addPlaylistItem(index,
+ MediaItem2Impl.fromBundle(mediaItem, null));
+ });
+ }
+
+ @Override
+ public void removePlaylistItem(IMediaController2 caller, Bundle mediaItem) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_PLAYLIST_REMOVE_ITEM,
+ (session, controller) -> {
+ MediaItem2 item = MediaItem2.fromBundle(mediaItem);
+ // Note: MediaItem2 has hidden UUID to identify it across the processes.
+ session.getInstance().removePlaylistItem(item);
+ });
+ }
+
+ @Override
+ public void replacePlaylistItem(IMediaController2 caller, int index, Bundle mediaItem) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_PLAYLIST_REPLACE_ITEM,
+ (session, controller) -> {
+ // Resets the UUID from the incoming media id, so controller may reuse a media
+ // item multiple times for replacePlaylistItem.
+ session.getInstance().replacePlaylistItem(index,
+ MediaItem2Impl.fromBundle(mediaItem, null));
+ });
+ }
+
+ @Override
+ public void skipToPlaylistItem(IMediaController2 caller, Bundle mediaItem) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_PLAYLIST_SKIP_TO_PLAYLIST_ITEM,
+ (session, controller) -> {
+ if (mediaItem == null) {
+ Log.w(TAG, "skipToPlaylistItem(): Ignoring null mediaItem from "
+ + controller);
+ }
+ // Note: MediaItem2 has hidden UUID to identify it across the processes.
+ session.getInstance().skipToPlaylistItem(MediaItem2.fromBundle(mediaItem));
+ });
+ }
+
+ @Override
+ public void skipToPreviousItem(IMediaController2 caller) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_PLAYLIST_SKIP_PREV_ITEM,
+ (session, controller) -> {
+ session.getInstance().skipToPreviousItem();
+ });
+ }
+
+ @Override
+ public void skipToNextItem(IMediaController2 caller) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_PLAYLIST_SKIP_NEXT_ITEM,
+ (session, controller) -> {
+ session.getInstance().skipToNextItem();
+ });
+ }
+
+ @Override
+ public void setRepeatMode(IMediaController2 caller, int repeatMode) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_PLAYLIST_SET_REPEAT_MODE,
+ (session, controller) -> {
+ session.getInstance().setRepeatMode(repeatMode);
+ });
+ }
+
+ @Override
+ public void setShuffleMode(IMediaController2 caller, int shuffleMode) {
+ onCommand(caller, SessionCommand2.COMMAND_CODE_PLAYLIST_SET_SHUFFLE_MODE,
+ (session, controller) -> {
+ session.getInstance().setShuffleMode(shuffleMode);
+ });
+ }
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ // AIDL methods for LibrarySession overrides
+ //////////////////////////////////////////////////////////////////////////////////////////////
+
+ @Override
+ public void getLibraryRoot(final IMediaController2 caller, final Bundle rootHints)
+ throws RuntimeException {
+ onBrowserCommand(caller, (session, controller) -> {
+ final LibraryRoot root = session.getCallback().onGetLibraryRoot(session.getInstance(),
+ controller, rootHints);
+ notify(controller, (unused, iController) -> {
+ iController.onGetLibraryRootDone(rootHints,
+ root == null ? null : root.getRootId(),
+ root == null ? null : root.getExtras());
+ });
+ });
+ }
+
+ @Override
+ public void getItem(final IMediaController2 caller, final String mediaId)
+ throws RuntimeException {
+ onBrowserCommand(caller, (session, controller) -> {
+ if (mediaId == null) {
+ if (DEBUG) {
+ Log.d(TAG, "mediaId shouldn't be null");
+ }
+ return;
+ }
+ final MediaItem2 result = session.getCallback().onGetItem(session.getInstance(),
+ controller, mediaId);
+ notify(controller, (unused, iController) -> {
+ iController.onGetItemDone(mediaId, result == null ? null : result.toBundle());
+ });
+ });
+ }
+
+ @Override
+ public void getChildren(final IMediaController2 caller, final String parentId,
+ final int page, final int pageSize, final Bundle extras) throws RuntimeException {
+ onBrowserCommand(caller, (session, controller) -> {
+ if (parentId == null) {
+ if (DEBUG) {
+ Log.d(TAG, "parentId shouldn't be null");
+ }
+ return;
+ }
+ if (page < 1 || pageSize < 1) {
+ if (DEBUG) {
+ Log.d(TAG, "Neither page nor pageSize should be less than 1");
+ }
+ return;
+ }
+ List<MediaItem2> result = session.getCallback().onGetChildren(session.getInstance(),
+ controller, parentId, page, pageSize, extras);
+ if (result != null && result.size() > pageSize) {
+ throw new IllegalArgumentException("onGetChildren() shouldn't return media items "
+ + "more than pageSize. result.size()=" + result.size() + " pageSize="
+ + pageSize);
+ }
+ final List<Bundle> bundleList;
+ if (result != null) {
+ bundleList = new ArrayList<>();
+ for (MediaItem2 item : result) {
+ bundleList.add(item == null ? null : item.toBundle());
+ }
+ } else {
+ bundleList = null;
+ }
+ notify(controller, (unused, iController) -> {
+ iController.onGetChildrenDone(parentId, page, pageSize, bundleList, extras);
+ });
+ });
+ }
+
+ @Override
+ public void search(IMediaController2 caller, String query, Bundle extras) {
+ onBrowserCommand(caller, (session, controller) -> {
+ if (TextUtils.isEmpty(query)) {
+ Log.w(TAG, "search(): Ignoring empty query from " + controller);
+ return;
+ }
+ session.getCallback().onSearch(session.getInstance(), controller, query, extras);
+ });
+ }
+
+ @Override
+ public void getSearchResult(final IMediaController2 caller, final String query,
+ final int page, final int pageSize, final Bundle extras) {
+ onBrowserCommand(caller, (session, controller) -> {
+ if (TextUtils.isEmpty(query)) {
+ Log.w(TAG, "getSearchResult(): Ignoring empty query from " + controller);
+ return;
+ }
+ if (page < 1 || pageSize < 1) {
+ Log.w(TAG, "getSearchResult(): Ignoring negative page / pageSize."
+ + " page=" + page + " pageSize=" + pageSize + " from " + controller);
+ return;
+ }
+ List<MediaItem2> result = session.getCallback().onGetSearchResult(session.getInstance(),
+ controller, query, page, pageSize, extras);
+ if (result != null && result.size() > pageSize) {
+ throw new IllegalArgumentException("onGetSearchResult() shouldn't return media "
+ + "items more than pageSize. result.size()=" + result.size() + " pageSize="
+ + pageSize);
+ }
+ final List<Bundle> bundleList;
+ if (result != null) {
+ bundleList = new ArrayList<>();
+ for (MediaItem2 item : result) {
+ bundleList.add(item == null ? null : item.toBundle());
+ }
+ } else {
+ bundleList = null;
+ }
+ notify(controller, (unused, iController) -> {
+ iController.onGetSearchResultDone(query, page, pageSize, bundleList, extras);
+ });
+ });
+ }
+
+ @Override
+ public void subscribe(final IMediaController2 caller, final String parentId,
+ final Bundle option) {
+ onBrowserCommand(caller, (session, controller) -> {
+ if (parentId == null) {
+ Log.w(TAG, "subscribe(): Ignoring null parentId from " + controller);
+ return;
+ }
+ session.getCallback().onSubscribe(session.getInstance(),
+ controller, parentId, option);
+ synchronized (mLock) {
+ Set<String> subscription = mSubscriptions.get(controller);
+ if (subscription == null) {
+ subscription = new HashSet<>();
+ mSubscriptions.put(controller, subscription);
+ }
+ subscription.add(parentId);
+ }
+ });
+ }
+
+ @Override
+ public void unsubscribe(final IMediaController2 caller, final String parentId) {
+ onBrowserCommand(caller, (session, controller) -> {
+ if (parentId == null) {
+ Log.w(TAG, "unsubscribe(): Ignoring null parentId from " + controller);
+ return;
+ }
+ session.getCallback().onUnsubscribe(session.getInstance(), controller, parentId);
+ synchronized (mLock) {
+ mSubscriptions.remove(controller);
+ }
+ });
+ }
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ // APIs for MediaSession2Impl
+ //////////////////////////////////////////////////////////////////////////////////////////////
+
+ // TODO(jaewan): (Can be Post-P) Need a way to get controller with permissions
+ public List<ControllerInfo> getControllers() {
+ ArrayList<ControllerInfo> controllers = new ArrayList<>();
+ synchronized (mLock) {
+ for (int i = 0; i < mControllers.size(); i++) {
+ controllers.add(mControllers.valueAt(i));
+ }
+ }
+ return controllers;
+ }
+
+ // Should be used without a lock to prevent potential deadlock.
+ public void notifyPlayerStateChangedNotLocked(int state) {
+ notifyAll((controller, iController) -> {
+ iController.onPlayerStateChanged(state);
+ });
+ }
+
+ // TODO(jaewan): Rename
+ public void notifyPositionChangedNotLocked(long eventTimeMs, long positionMs) {
+ notifyAll((controller, iController) -> {
+ iController.onPositionChanged(eventTimeMs, positionMs);
+ });
+ }
+
+ public void notifyPlaybackSpeedChangedNotLocked(float speed) {
+ notifyAll((controller, iController) -> {
+ iController.onPlaybackSpeedChanged(speed);
+ });
+ }
+
+ public void notifyBufferedPositionChangedNotLocked(long bufferedPositionMs) {
+ notifyAll((controller, iController) -> {
+ iController.onBufferedPositionChanged(bufferedPositionMs);
+ });
+ }
+
+ public void notifyCustomLayoutNotLocked(ControllerInfo controller, List<CommandButton> layout) {
+ notify(controller, (unused, iController) -> {
+ List<Bundle> layoutBundles = new ArrayList<>();
+ for (int i = 0; i < layout.size(); i++) {
+ Bundle bundle = ((CommandButtonImpl) layout.get(i).getProvider()).toBundle();
+ if (bundle != null) {
+ layoutBundles.add(bundle);
+ }
+ }
+ iController.onCustomLayoutChanged(layoutBundles);
+ });
+ }
+
+ public void notifyPlaylistChangedNotLocked(List<MediaItem2> playlist, MediaMetadata2 metadata) {
+ final List<Bundle> bundleList;
+ if (playlist != null) {
+ bundleList = new ArrayList<>();
+ for (int i = 0; i < playlist.size(); i++) {
+ if (playlist.get(i) != null) {
+ Bundle bundle = playlist.get(i).toBundle();
+ if (bundle != null) {
+ bundleList.add(bundle);
+ }
+ }
+ }
+ } else {
+ bundleList = null;
+ }
+ final Bundle metadataBundle = (metadata == null) ? null : metadata.toBundle();
+ notifyAll((controller, iController) -> {
+ if (getControllerBinderIfAble(controller,
+ SessionCommand2.COMMAND_CODE_PLAYLIST_GET_LIST) != null) {
+ iController.onPlaylistChanged(bundleList, metadataBundle);
+ } else if (getControllerBinderIfAble(controller,
+ SessionCommand2.COMMAND_CODE_PLAYLIST_GET_LIST_METADATA) != null) {
+ iController.onPlaylistMetadataChanged(metadataBundle);
+ }
+ });
+ }
+
+ public void notifyPlaylistMetadataChangedNotLocked(MediaMetadata2 metadata) {
+ final Bundle metadataBundle = (metadata == null) ? null : metadata.toBundle();
+ notifyAll(SessionCommand2.COMMAND_CODE_PLAYLIST_GET_LIST_METADATA,
+ (unused, iController) -> {
+ iController.onPlaylistMetadataChanged(metadataBundle);
+ });
+ }
+
+ public void notifyRepeatModeChangedNotLocked(int repeatMode) {
+ notifyAll((unused, iController) -> {
+ iController.onRepeatModeChanged(repeatMode);
+ });
+ }
+
+ public void notifyShuffleModeChangedNotLocked(int shuffleMode) {
+ notifyAll((unused, iController) -> {
+ iController.onShuffleModeChanged(shuffleMode);
+ });
+ }
+
+ public void notifyPlaybackInfoChanged(MediaController2.PlaybackInfo playbackInfo) {
+ final Bundle playbackInfoBundle =
+ ((MediaController2Impl.PlaybackInfoImpl) playbackInfo.getProvider()).toBundle();
+ notifyAll((unused, iController) -> {
+ iController.onPlaybackInfoChanged(playbackInfoBundle);
+ });
+ }
+
+ public void setAllowedCommands(ControllerInfo controller, SessionCommandGroup2 commands) {
+ synchronized (mLock) {
+ mAllowedCommandGroupMap.put(controller, commands);
+ }
+ notify(controller, (unused, iController) -> {
+ iController.onAllowedCommandsChanged(commands.toBundle());
+ });
+ }
+
+ public void sendCustomCommand(ControllerInfo controller, SessionCommand2 command, Bundle args,
+ ResultReceiver receiver) {
+ if (receiver != null && controller == null) {
+ throw new IllegalArgumentException("Controller shouldn't be null if result receiver is"
+ + " specified");
+ }
+ if (command == null) {
+ throw new IllegalArgumentException("command shouldn't be null");
+ }
+ notify(controller, (unused, iController) -> {
+ Bundle commandBundle = command.toBundle();
+ iController.onCustomCommand(commandBundle, args, null);
+ });
+ }
+
+ public void sendCustomCommand(SessionCommand2 command, Bundle args) {
+ if (command == null) {
+ throw new IllegalArgumentException("command shouldn't be null");
+ }
+ Bundle commandBundle = command.toBundle();
+ notifyAll((unused, iController) -> {
+ iController.onCustomCommand(commandBundle, args, null);
+ });
+ }
+
+ public void notifyError(int errorCode, Bundle extras) {
+ notifyAll((unused, iController) -> {
+ iController.onError(errorCode, extras);
+ });
+ }
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ // APIs for MediaLibrarySessionImpl
+ //////////////////////////////////////////////////////////////////////////////////////////////
+
+ public void notifySearchResultChanged(ControllerInfo controller, String query, int itemCount,
+ Bundle extras) {
+ notify(controller, (unused, iController) -> {
+ iController.onSearchResultChanged(query, itemCount, extras);
+ });
+ }
+
+ public void notifyChildrenChangedNotLocked(ControllerInfo controller, String parentId,
+ int itemCount, Bundle extras) {
+ notify(controller, (unused, iController) -> {
+ if (isSubscribed(controller, parentId)) {
+ iController.onChildrenChanged(parentId, itemCount, extras);
+ }
+ });
+ }
+
+ public void notifyChildrenChangedNotLocked(String parentId, int itemCount, Bundle extras) {
+ notifyAll((controller, iController) -> {
+ if (isSubscribed(controller, parentId)) {
+ iController.onChildrenChanged(parentId, itemCount, extras);
+ }
+ });
+ }
+
+ private boolean isSubscribed(ControllerInfo controller, String parentId) {
+ synchronized (mLock) {
+ Set<String> subscriptions = mSubscriptions.get(controller);
+ if (subscriptions == null || !subscriptions.contains(parentId)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ //////////////////////////////////////////////////////////////////////////////////////////////
+ // Misc
+ //////////////////////////////////////////////////////////////////////////////////////////////
+
+ @FunctionalInterface
+ private interface SessionRunnable {
+ void run(final MediaSession2Impl session, final ControllerInfo controller);
+ }
+
+ @FunctionalInterface
+ private interface LibrarySessionRunnable {
+ void run(final MediaLibrarySessionImpl session, final ControllerInfo controller);
+ }
+
+ @FunctionalInterface
+ private interface NotifyRunnable {
+ void run(final ControllerInfo controller,
+ final IMediaController2 iController) throws RemoteException;
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/MediaSessionService2Impl.java b/packages/MediaComponents/src/com/android/media/MediaSessionService2Impl.java
new file mode 100644
index 0000000..c33eb65
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/MediaSessionService2Impl.java
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import static android.content.Context.NOTIFICATION_SERVICE;
+
+import android.app.Notification;
+import android.app.NotificationManager;
+import android.content.Context;
+import android.content.Intent;
+import android.media.MediaPlayerBase;
+import android.media.MediaPlayerBase.PlayerEventCallback;
+import android.media.MediaSession2;
+import android.media.MediaSessionService2;
+import android.media.MediaSessionService2.MediaNotification;
+import android.media.SessionToken2;
+import android.media.SessionToken2.TokenType;
+import android.media.update.MediaSessionService2Provider;
+import android.os.IBinder;
+import android.support.annotation.GuardedBy;
+import android.util.Log;
+
+// TODO(jaewan): Need a test for session service itself.
+public class MediaSessionService2Impl implements MediaSessionService2Provider {
+
+ private static final String TAG = "MPSessionService"; // to meet 23 char limit in Log tag
+ private static final boolean DEBUG = true; // TODO(jaewan): Change this. (b/74094611)
+
+ private final MediaSessionService2 mInstance;
+ private final PlayerEventCallback mCallback = new SessionServiceEventCallback();
+
+ private final Object mLock = new Object();
+ @GuardedBy("mLock")
+ private NotificationManager mNotificationManager;
+ @GuardedBy("mLock")
+ private Intent mStartSelfIntent;
+
+ private boolean mIsRunningForeground;
+ private MediaSession2 mSession;
+
+ public MediaSessionService2Impl(MediaSessionService2 instance) {
+ if (DEBUG) {
+ Log.d(TAG, "MediaSessionService2Impl(" + instance + ")");
+ }
+ mInstance = instance;
+ }
+
+ @Override
+ public MediaSession2 getSession_impl() {
+ return getSession();
+ }
+
+ MediaSession2 getSession() {
+ synchronized (mLock) {
+ return mSession;
+ }
+ }
+
+ @Override
+ public MediaNotification onUpdateNotification_impl() {
+ // Provide default notification UI later.
+ return null;
+ }
+
+ @Override
+ public void onCreate_impl() {
+ mNotificationManager = (NotificationManager) mInstance.getSystemService(
+ NOTIFICATION_SERVICE);
+ mStartSelfIntent = new Intent(mInstance, mInstance.getClass());
+
+ SessionToken2 token = new SessionToken2(mInstance, mInstance.getPackageName(),
+ mInstance.getClass().getName());
+ if (token.getType() != getSessionType()) {
+ throw new RuntimeException("Expected session service, but was " + token.getType());
+ }
+ mSession = mInstance.onCreateSession(token.getId());
+ if (mSession == null || !token.getId().equals(mSession.getToken().getId())) {
+ throw new RuntimeException("Expected session with id " + token.getId()
+ + ", but got " + mSession);
+ }
+ // TODO(jaewan): Uncomment here.
+ // mSession.registerPlayerEventCallback(mCallback, mSession.getExecutor());
+ }
+
+ @TokenType int getSessionType() {
+ return SessionToken2.TYPE_SESSION_SERVICE;
+ }
+
+ public IBinder onBind_impl(Intent intent) {
+ if (MediaSessionService2.SERVICE_INTERFACE.equals(intent.getAction())) {
+ return ((MediaSession2Impl) mSession.getProvider()).getSessionStub().asBinder();
+ }
+ return null;
+ }
+
+ private void updateNotification(int playerState) {
+ MediaNotification mediaNotification = mInstance.onUpdateNotification();
+ if (mediaNotification == null) {
+ return;
+ }
+ switch(playerState) {
+ case MediaPlayerBase.PLAYER_STATE_PLAYING:
+ if (!mIsRunningForeground) {
+ mIsRunningForeground = true;
+ mInstance.startForegroundService(mStartSelfIntent);
+ mInstance.startForeground(mediaNotification.getNotificationId(),
+ mediaNotification.getNotification());
+ return;
+ }
+ break;
+ case MediaPlayerBase.PLAYER_STATE_IDLE:
+ case MediaPlayerBase.PLAYER_STATE_ERROR:
+ if (mIsRunningForeground) {
+ mIsRunningForeground = false;
+ mInstance.stopForeground(true);
+ return;
+ }
+ break;
+ }
+ mNotificationManager.notify(mediaNotification.getNotificationId(),
+ mediaNotification.getNotification());
+ }
+
+ private class SessionServiceEventCallback extends PlayerEventCallback {
+ @Override
+ public void onPlayerStateChanged(MediaPlayerBase player, int state) {
+ // TODO: Implement this
+ return;
+ }
+ }
+
+ public static class MediaNotificationImpl implements MediaNotificationProvider {
+ private int mNotificationId;
+ private Notification mNotification;
+
+ public MediaNotificationImpl(MediaNotification instance, int notificationId,
+ Notification notification) {
+ if (notification == null) {
+ throw new IllegalArgumentException("notification shouldn't be null");
+ }
+ mNotificationId = notificationId;
+ mNotification = notification;
+ }
+
+ @Override
+ public int getNotificationId_impl() {
+ return mNotificationId;
+ }
+
+ @Override
+ public Notification getNotification_impl() {
+ return mNotification;
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/Rating2Impl.java b/packages/MediaComponents/src/com/android/media/Rating2Impl.java
new file mode 100644
index 0000000..d558129
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/Rating2Impl.java
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import static android.media.Rating2.*;
+
+import android.content.Context;
+import android.media.Rating2;
+import android.media.Rating2.Style;
+import android.media.update.Rating2Provider;
+import android.os.Bundle;
+import android.util.Log;
+
+import java.util.Objects;
+
+public final class Rating2Impl implements Rating2Provider {
+ private static final String TAG = "Rating2";
+
+ private static final String KEY_STYLE = "android.media.rating2.style";
+ private static final String KEY_VALUE = "android.media.rating2.value";
+
+ private final static float RATING_NOT_RATED = -1.0f;
+
+ private final Rating2 mInstance;
+ private final int mRatingStyle;
+ private final float mRatingValue;
+
+ private Rating2Impl(@Style int ratingStyle, float rating) {
+ mRatingStyle = ratingStyle;
+ mRatingValue = rating;
+ mInstance = new Rating2(this);
+ }
+
+ @Override
+ public String toString_impl() {
+ return "Rating2:style=" + mRatingStyle + " rating="
+ + (mRatingValue < 0.0f ? "unrated" : String.valueOf(mRatingValue));
+ }
+
+ @Override
+ public boolean equals_impl(Object obj) {
+ if (!(obj instanceof Rating2)) {
+ return false;
+ }
+ Rating2Impl other = (Rating2Impl) ((Rating2) obj).getProvider();
+ return mRatingStyle == other.mRatingStyle
+ && mRatingValue == other.mRatingValue;
+ }
+
+ @Override
+ public int hashCode_impl() {
+ return Objects.hash(mRatingStyle, mRatingValue);
+ }
+
+ Rating2 getInstance() {
+ return mInstance;
+ }
+
+ public static Rating2 fromBundle_impl(Bundle bundle) {
+ if (bundle == null) {
+ return null;
+ }
+ return new Rating2Impl(bundle.getInt(KEY_STYLE), bundle.getFloat(KEY_VALUE)).getInstance();
+ }
+
+ public Bundle toBundle_impl() {
+ Bundle bundle = new Bundle();
+ bundle.putInt(KEY_STYLE, mRatingStyle);
+ bundle.putFloat(KEY_VALUE, mRatingValue);
+ return bundle;
+ }
+
+ public static Rating2 newUnratedRating_impl(@Style int ratingStyle) {
+ switch(ratingStyle) {
+ case RATING_HEART:
+ case RATING_THUMB_UP_DOWN:
+ case RATING_3_STARS:
+ case RATING_4_STARS:
+ case RATING_5_STARS:
+ case RATING_PERCENTAGE:
+ return new Rating2Impl(ratingStyle, RATING_NOT_RATED).getInstance();
+ default:
+ return null;
+ }
+ }
+
+ public static Rating2 newHeartRating_impl(boolean hasHeart) {
+ return new Rating2Impl(RATING_HEART, hasHeart ? 1.0f : 0.0f).getInstance();
+ }
+
+ public static Rating2 newThumbRating_impl(boolean thumbIsUp) {
+ return new Rating2Impl(RATING_THUMB_UP_DOWN, thumbIsUp ? 1.0f : 0.0f).getInstance();
+ }
+
+ public static Rating2 newStarRating_impl(int starRatingStyle, float starRating) {
+ float maxRating = RATING_NOT_RATED;
+ switch(starRatingStyle) {
+ case RATING_3_STARS:
+ maxRating = 3.0f;
+ break;
+ case RATING_4_STARS:
+ maxRating = 4.0f;
+ break;
+ case RATING_5_STARS:
+ maxRating = 5.0f;
+ break;
+ default:
+ Log.e(TAG, "Invalid rating style (" + starRatingStyle + ") for a star rating");
+ return null;
+ }
+ if ((starRating < 0.0f) || (starRating > maxRating)) {
+ Log.e(TAG, "Trying to set out of range star-based rating");
+ return null;
+ }
+ return new Rating2Impl(starRatingStyle, starRating).getInstance();
+ }
+
+ public static Rating2 newPercentageRating_impl(float percent) {
+ if ((percent < 0.0f) || (percent > 100.0f)) {
+ Log.e(TAG, "Invalid percentage-based rating value");
+ return null;
+ } else {
+ return new Rating2Impl(RATING_PERCENTAGE, percent).getInstance();
+ }
+ }
+
+ @Override
+ public boolean isRated_impl() {
+ return mRatingValue >= 0.0f;
+ }
+
+ @Override
+ public int getRatingStyle_impl() {
+ return mRatingStyle;
+ }
+
+ @Override
+ public boolean hasHeart_impl() {
+ if (mRatingStyle != RATING_HEART) {
+ return false;
+ } else {
+ return (mRatingValue == 1.0f);
+ }
+ }
+
+ @Override
+ public boolean isThumbUp_impl() {
+ if (mRatingStyle != RATING_THUMB_UP_DOWN) {
+ return false;
+ } else {
+ return (mRatingValue == 1.0f);
+ }
+ }
+
+ @Override
+ public float getStarRating_impl() {
+ switch (mRatingStyle) {
+ case RATING_3_STARS:
+ case RATING_4_STARS:
+ case RATING_5_STARS:
+ if (mInstance.isRated()) {
+ return mRatingValue;
+ }
+ default:
+ return -1.0f;
+ }
+ }
+
+ @Override
+ public float getPercentRating_impl() {
+ if ((mRatingStyle != RATING_PERCENTAGE) || !mInstance.isRated()) {
+ return -1.0f;
+ } else {
+ return mRatingValue;
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/RoutePlayer.java b/packages/MediaComponents/src/com/android/media/RoutePlayer.java
new file mode 100644
index 0000000..9450d34
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/RoutePlayer.java
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import android.content.Context;
+import android.media.DataSourceDesc;
+import android.media.session.MediaSession;
+import android.media.session.PlaybackState;
+import android.net.Uri;
+import android.os.Build;
+import android.os.Bundle;
+import android.support.annotation.RequiresApi;
+
+import com.android.support.mediarouter.media.MediaItemStatus;
+import com.android.support.mediarouter.media.MediaRouter;
+import com.android.support.mediarouter.media.MediaSessionStatus;
+import com.android.support.mediarouter.media.RemotePlaybackClient;
+import com.android.support.mediarouter.media.RemotePlaybackClient.ItemActionCallback;
+import com.android.support.mediarouter.media.RemotePlaybackClient.SessionActionCallback;
+import com.android.support.mediarouter.media.RemotePlaybackClient.StatusCallback;
+
+import java.util.Map;
+
+@RequiresApi(api = Build.VERSION_CODES.LOLLIPOP)
+public class RoutePlayer extends MediaSession.Callback {
+ public static final long PLAYBACK_ACTIONS = PlaybackState.ACTION_PAUSE
+ | PlaybackState.ACTION_PLAY | PlaybackState.ACTION_SEEK_TO
+ | PlaybackState.ACTION_FAST_FORWARD | PlaybackState.ACTION_REWIND;
+
+ private RemotePlaybackClient mClient;
+ private String mSessionId;
+ private String mItemId;
+ private PlayerEventCallback mCallback;
+
+ private StatusCallback mStatusCallback = new StatusCallback() {
+ @Override
+ public void onItemStatusChanged(Bundle data,
+ String sessionId, MediaSessionStatus sessionStatus,
+ String itemId, MediaItemStatus itemStatus) {
+ updateSessionStatus(sessionId, sessionStatus);
+ updateItemStatus(itemId, itemStatus);
+ }
+ };
+
+ public RoutePlayer(Context context, MediaRouter.RouteInfo route) {
+ mClient = new RemotePlaybackClient(context, route);
+ mClient.setStatusCallback(mStatusCallback);
+ mClient.startSession(null, new SessionActionCallback() {
+ @Override
+ public void onResult(Bundle data,
+ String sessionId, MediaSessionStatus sessionStatus) {
+ updateSessionStatus(sessionId, sessionStatus);
+ }
+ });
+ }
+
+ @Override
+ public void onPlay() {
+ mClient.resume(null, new SessionActionCallback() {
+ @Override
+ public void onResult(Bundle data,
+ String sessionId, MediaSessionStatus sessionStatus) {
+ updateSessionStatus(sessionId, sessionStatus);
+ }
+ });
+ }
+
+ @Override
+ public void onPause() {
+ mClient.pause(null, new SessionActionCallback() {
+ @Override
+ public void onResult(Bundle data,
+ String sessionId, MediaSessionStatus sessionStatus) {
+ updateSessionStatus(sessionId, sessionStatus);
+ }
+ });
+ }
+
+ @Override
+ public void onSeekTo(long pos) {
+ mClient.seek(mItemId, pos, null, new ItemActionCallback() {
+ @Override
+ public void onResult(Bundle data,
+ String sessionId, MediaSessionStatus sessionStatus,
+ String itemId, MediaItemStatus itemStatus) {
+ updateSessionStatus(sessionId, sessionStatus);
+ updateItemStatus(itemId, itemStatus);
+ }
+ });
+ }
+
+ @Override
+ public void onStop() {
+ mClient.stop(null, new SessionActionCallback() {
+ @Override
+ public void onResult(Bundle data,
+ String sessionId, MediaSessionStatus sessionStatus) {
+ updateSessionStatus(sessionId, sessionStatus);
+ }
+ });
+ }
+
+ public void setPlayerEventCallback(PlayerEventCallback callback) {
+ mCallback = callback;
+ }
+
+ public void openVideo(DataSourceDesc dsd) {
+ mClient.play(dsd.getUri(), "video/mp4", null, 0, null, new ItemActionCallback() {
+ @Override
+ public void onResult(Bundle data,
+ String sessionId, MediaSessionStatus sessionStatus,
+ String itemId, MediaItemStatus itemStatus) {
+ updateSessionStatus(sessionId, sessionStatus);
+ updateItemStatus(itemId, itemStatus);
+ playInternal(dsd.getUri());
+ }
+ });
+ }
+
+ public void release() {
+ if (mClient != null) {
+ mClient.release();
+ mClient = null;
+ }
+ if (mCallback != null) {
+ mCallback = null;
+ }
+ }
+
+ private void playInternal(Uri uri) {
+ mClient.play(uri, "video/mp4", null, 0, null, new ItemActionCallback() {
+ @Override
+ public void onResult(Bundle data,
+ String sessionId, MediaSessionStatus sessionStatus,
+ String itemId, MediaItemStatus itemStatus) {
+ updateSessionStatus(sessionId, sessionStatus);
+ updateItemStatus(itemId, itemStatus);
+ }
+ });
+ }
+
+ private void updateSessionStatus(String sessionId, MediaSessionStatus sessionStatus) {
+ mSessionId = sessionId;
+ }
+
+ private void updateItemStatus(String itemId, MediaItemStatus itemStatus) {
+ mItemId = itemId;
+ if (itemStatus == null || mCallback == null) return;
+ mCallback.onPlayerStateChanged(itemStatus);
+ }
+
+ public static abstract class PlayerEventCallback {
+ public void onPlayerStateChanged(MediaItemStatus itemStatus) { }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/SessionPlaylistAgent.java b/packages/MediaComponents/src/com/android/media/SessionPlaylistAgent.java
new file mode 100644
index 0000000..1c570aa
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/SessionPlaylistAgent.java
@@ -0,0 +1,494 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import android.annotation.NonNull;
+import android.annotation.Nullable;
+import android.media.DataSourceDesc;
+import android.media.MediaItem2;
+import android.media.MediaMetadata2;
+import android.media.MediaPlayerBase;
+import android.media.MediaPlayerBase.PlayerEventCallback;
+import android.media.MediaPlaylistAgent;
+import android.media.MediaSession2.OnDataSourceMissingHelper;
+import android.util.ArrayMap;
+
+import com.android.internal.annotations.GuardedBy;
+import com.android.internal.annotations.VisibleForTesting;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ThreadLocalRandom;
+
+public class SessionPlaylistAgent extends MediaPlaylistAgent {
+ private static final String TAG = "SessionPlaylistAgent";
+ @VisibleForTesting
+ static final int END_OF_PLAYLIST = -1;
+ @VisibleForTesting
+ static final int NO_VALID_ITEMS = -2;
+
+ private final PlayItem mEopPlayItem = new PlayItem(END_OF_PLAYLIST, null);
+
+ private final Object mLock = new Object();
+ private final MediaSession2Impl mSessionImpl;
+ private final MyPlayerEventCallback mPlayerCallback;
+
+ @GuardedBy("mLock")
+ private MediaPlayerBase mPlayer;
+ @GuardedBy("mLock")
+ private OnDataSourceMissingHelper mDsmHelper;
+ // TODO: Check if having the same item is okay (b/74090741)
+ @GuardedBy("mLock")
+ private ArrayList<MediaItem2> mPlaylist = new ArrayList<>();
+ @GuardedBy("mLock")
+ private ArrayList<MediaItem2> mShuffledList = new ArrayList<>();
+ @GuardedBy("mLock")
+ private Map<MediaItem2, DataSourceDesc> mItemDsdMap = new ArrayMap<>();
+ @GuardedBy("mLock")
+ private MediaMetadata2 mMetadata;
+ @GuardedBy("mLock")
+ private int mRepeatMode;
+ @GuardedBy("mLock")
+ private int mShuffleMode;
+ @GuardedBy("mLock")
+ private PlayItem mCurrent;
+
+ // Called on session callback executor.
+ private class MyPlayerEventCallback extends PlayerEventCallback {
+ public void onCurrentDataSourceChanged(@NonNull MediaPlayerBase mpb,
+ @Nullable DataSourceDesc dsd) {
+ if (mPlayer != mpb) {
+ return;
+ }
+ synchronized (mLock) {
+ if (dsd == null && mCurrent != null) {
+ mCurrent = getNextValidPlayItemLocked(mCurrent.shuffledIdx, 1);
+ updateCurrentIfNeededLocked();
+ }
+ }
+ }
+ }
+
+ private class PlayItem {
+ int shuffledIdx;
+ DataSourceDesc dsd;
+ MediaItem2 mediaItem;
+
+ PlayItem(int shuffledIdx) {
+ this(shuffledIdx, null);
+ }
+
+ PlayItem(int shuffledIdx, DataSourceDesc dsd) {
+ this.shuffledIdx = shuffledIdx;
+ if (shuffledIdx >= 0) {
+ this.mediaItem = mShuffledList.get(shuffledIdx);
+ if (dsd == null) {
+ synchronized (mLock) {
+ this.dsd = retrieveDataSourceDescLocked(this.mediaItem);
+ }
+ } else {
+ this.dsd = dsd;
+ }
+ }
+ }
+
+ boolean isValid() {
+ if (this == mEopPlayItem) {
+ return true;
+ }
+ if (mediaItem == null) {
+ return false;
+ }
+ if (dsd == null) {
+ return false;
+ }
+ if (shuffledIdx >= mShuffledList.size()) {
+ return false;
+ }
+ if (mediaItem != mShuffledList.get(shuffledIdx)) {
+ return false;
+ }
+ if (mediaItem.getDataSourceDesc() != null
+ && !mediaItem.getDataSourceDesc().equals(dsd)) {
+ return false;
+ }
+ return true;
+ }
+ }
+
+ public SessionPlaylistAgent(@NonNull MediaSession2Impl sessionImpl,
+ @NonNull MediaPlayerBase player) {
+ if (sessionImpl == null) {
+ throw new IllegalArgumentException("sessionImpl shouldn't be null");
+ }
+ if (player == null) {
+ throw new IllegalArgumentException("player shouldn't be null");
+ }
+ mSessionImpl = sessionImpl;
+ mPlayer = player;
+ mPlayerCallback = new MyPlayerEventCallback();
+ mPlayer.registerPlayerEventCallback(mSessionImpl.getCallbackExecutor(), mPlayerCallback);
+ }
+
+ public void setPlayer(@NonNull MediaPlayerBase player) {
+ if (player == null) {
+ throw new IllegalArgumentException("player shouldn't be null");
+ }
+ synchronized (mLock) {
+ if (player == mPlayer) {
+ return;
+ }
+ mPlayer.unregisterPlayerEventCallback(mPlayerCallback);
+ mPlayer = player;
+ mPlayer.registerPlayerEventCallback(
+ mSessionImpl.getCallbackExecutor(), mPlayerCallback);
+ updatePlayerDataSourceLocked();
+ }
+ }
+
+ public void setOnDataSourceMissingHelper(OnDataSourceMissingHelper helper) {
+ synchronized (mLock) {
+ mDsmHelper = helper;
+ }
+ }
+
+ public void clearOnDataSourceMissingHelper() {
+ synchronized (mLock) {
+ mDsmHelper = null;
+ }
+ }
+
+ @Override
+ public @Nullable List<MediaItem2> getPlaylist() {
+ synchronized (mLock) {
+ return Collections.unmodifiableList(mPlaylist);
+ }
+ }
+
+ @Override
+ public void setPlaylist(@NonNull List<MediaItem2> list, @Nullable MediaMetadata2 metadata) {
+ if (list == null) {
+ throw new IllegalArgumentException("list shouldn't be null");
+ }
+
+ synchronized (mLock) {
+ mItemDsdMap.clear();
+
+ mPlaylist.clear();
+ mPlaylist.addAll(list);
+ applyShuffleModeLocked();
+
+ mMetadata = metadata;
+ mCurrent = getNextValidPlayItemLocked(END_OF_PLAYLIST, 1);
+ updatePlayerDataSourceLocked();
+ }
+ notifyPlaylistChanged();
+ }
+
+ @Override
+ public @Nullable MediaMetadata2 getPlaylistMetadata() {
+ return mMetadata;
+ }
+
+ @Override
+ public void updatePlaylistMetadata(@Nullable MediaMetadata2 metadata) {
+ synchronized (mLock) {
+ if (metadata == mMetadata) {
+ return;
+ }
+ mMetadata = metadata;
+ }
+ notifyPlaylistMetadataChanged();
+ }
+
+ @Override
+ public void addPlaylistItem(int index, @NonNull MediaItem2 item) {
+ if (item == null) {
+ throw new IllegalArgumentException("item shouldn't be null");
+ }
+ synchronized (mLock) {
+ index = clamp(index, mPlaylist.size());
+ int shuffledIdx = index;
+ mPlaylist.add(index, item);
+ if (mShuffleMode == MediaPlaylistAgent.SHUFFLE_MODE_NONE) {
+ mShuffledList.add(index, item);
+ } else {
+ // Add the item in random position of mShuffledList.
+ shuffledIdx = ThreadLocalRandom.current().nextInt(mShuffledList.size() + 1);
+ mShuffledList.add(shuffledIdx, item);
+ }
+ if (!hasValidItem()) {
+ mCurrent = getNextValidPlayItemLocked(END_OF_PLAYLIST, 1);
+ updatePlayerDataSourceLocked();
+ } else {
+ updateCurrentIfNeededLocked();
+ }
+ }
+ notifyPlaylistChanged();
+ }
+
+ @Override
+ public void removePlaylistItem(@NonNull MediaItem2 item) {
+ if (item == null) {
+ throw new IllegalArgumentException("item shouldn't be null");
+ }
+ synchronized (mLock) {
+ if (!mPlaylist.remove(item)) {
+ return;
+ }
+ mShuffledList.remove(item);
+ mItemDsdMap.remove(item);
+ updateCurrentIfNeededLocked();
+ }
+ notifyPlaylistChanged();
+ }
+
+ @Override
+ public void replacePlaylistItem(int index, @NonNull MediaItem2 item) {
+ if (item == null) {
+ throw new IllegalArgumentException("item shouldn't be null");
+ }
+ synchronized (mLock) {
+ if (mPlaylist.size() <= 0) {
+ return;
+ }
+ index = clamp(index, mPlaylist.size() - 1);
+ int shuffledIdx = mShuffledList.indexOf(mPlaylist.get(index));
+ mItemDsdMap.remove(mShuffledList.get(shuffledIdx));
+ mShuffledList.set(shuffledIdx, item);
+ mPlaylist.set(index, item);
+ if (!hasValidItem()) {
+ mCurrent = getNextValidPlayItemLocked(END_OF_PLAYLIST, 1);
+ updatePlayerDataSourceLocked();
+ } else {
+ updateCurrentIfNeededLocked();
+ }
+ }
+ notifyPlaylistChanged();
+ }
+
+ @Override
+ public void skipToPlaylistItem(@NonNull MediaItem2 item) {
+ if (item == null) {
+ throw new IllegalArgumentException("item shouldn't be null");
+ }
+ synchronized (mLock) {
+ if (!hasValidItem() || item.equals(mCurrent.mediaItem)) {
+ return;
+ }
+ int shuffledIdx = mShuffledList.indexOf(item);
+ if (shuffledIdx < 0) {
+ return;
+ }
+ mCurrent = new PlayItem(shuffledIdx);
+ updateCurrentIfNeededLocked();
+ }
+ }
+
+ @Override
+ public void skipToPreviousItem() {
+ synchronized (mLock) {
+ if (!hasValidItem()) {
+ return;
+ }
+ PlayItem prev = getNextValidPlayItemLocked(mCurrent.shuffledIdx, -1);
+ if (prev != mEopPlayItem) {
+ mCurrent = prev;
+ }
+ updateCurrentIfNeededLocked();
+ }
+ }
+
+ @Override
+ public void skipToNextItem() {
+ synchronized (mLock) {
+ if (!hasValidItem() || mCurrent == mEopPlayItem) {
+ return;
+ }
+ PlayItem next = getNextValidPlayItemLocked(mCurrent.shuffledIdx, 1);
+ if (next != mEopPlayItem) {
+ mCurrent = next;
+ }
+ updateCurrentIfNeededLocked();
+ }
+ }
+
+ @Override
+ public int getRepeatMode() {
+ return mRepeatMode;
+ }
+
+ @Override
+ public void setRepeatMode(int repeatMode) {
+ if (repeatMode < MediaPlaylistAgent.REPEAT_MODE_NONE
+ || repeatMode > MediaPlaylistAgent.REPEAT_MODE_GROUP) {
+ return;
+ }
+ synchronized (mLock) {
+ if (mRepeatMode == repeatMode) {
+ return;
+ }
+ mRepeatMode = repeatMode;
+ switch (repeatMode) {
+ case MediaPlaylistAgent.REPEAT_MODE_ONE:
+ if (mCurrent != null && mCurrent != mEopPlayItem) {
+ mPlayer.loopCurrent(true);
+ }
+ break;
+ case MediaPlaylistAgent.REPEAT_MODE_ALL:
+ case MediaPlaylistAgent.REPEAT_MODE_GROUP:
+ if (mCurrent == mEopPlayItem) {
+ mCurrent = getNextValidPlayItemLocked(END_OF_PLAYLIST, 1);
+ updatePlayerDataSourceLocked();
+ }
+ // pass through
+ case MediaPlaylistAgent.REPEAT_MODE_NONE:
+ mPlayer.loopCurrent(false);
+ break;
+ }
+ }
+ notifyRepeatModeChanged();
+ }
+
+ @Override
+ public int getShuffleMode() {
+ return mShuffleMode;
+ }
+
+ @Override
+ public void setShuffleMode(int shuffleMode) {
+ if (shuffleMode < MediaPlaylistAgent.SHUFFLE_MODE_NONE
+ || shuffleMode > MediaPlaylistAgent.SHUFFLE_MODE_GROUP) {
+ return;
+ }
+ synchronized (mLock) {
+ if (mShuffleMode == shuffleMode) {
+ return;
+ }
+ mShuffleMode = shuffleMode;
+ applyShuffleModeLocked();
+ updateCurrentIfNeededLocked();
+ }
+ notifyShuffleModeChanged();
+ }
+
+ @VisibleForTesting
+ int getCurShuffledIndex() {
+ return hasValidItem() ? mCurrent.shuffledIdx : NO_VALID_ITEMS;
+ }
+
+ private boolean hasValidItem() {
+ return mCurrent != null;
+ }
+
+ private DataSourceDesc retrieveDataSourceDescLocked(MediaItem2 item) {
+ DataSourceDesc dsd = item.getDataSourceDesc();
+ if (dsd != null) {
+ mItemDsdMap.put(item, dsd);
+ return dsd;
+ }
+ dsd = mItemDsdMap.get(item);
+ if (dsd != null) {
+ return dsd;
+ }
+ OnDataSourceMissingHelper helper = mDsmHelper;
+ if (helper != null) {
+ // TODO: Do not call onDataSourceMissing with the lock (b/74090741).
+ dsd = helper.onDataSourceMissing(mSessionImpl.getInstance(), item);
+ if (dsd != null) {
+ mItemDsdMap.put(item, dsd);
+ }
+ }
+ return dsd;
+ }
+
+ // TODO: consider to call updateCurrentIfNeededLocked inside (b/74090741)
+ private PlayItem getNextValidPlayItemLocked(int curShuffledIdx, int direction) {
+ int size = mPlaylist.size();
+ if (curShuffledIdx == END_OF_PLAYLIST) {
+ curShuffledIdx = (direction > 0) ? -1 : size;
+ }
+ for (int i = 0; i < size; i++) {
+ curShuffledIdx += direction;
+ if (curShuffledIdx < 0 || curShuffledIdx >= mPlaylist.size()) {
+ if (mRepeatMode == REPEAT_MODE_NONE) {
+ return (i == size - 1) ? null : mEopPlayItem;
+ } else {
+ curShuffledIdx = curShuffledIdx < 0 ? mPlaylist.size() - 1 : 0;
+ }
+ }
+ DataSourceDesc dsd = retrieveDataSourceDescLocked(mShuffledList.get(curShuffledIdx));
+ if (dsd != null) {
+ return new PlayItem(curShuffledIdx, dsd);
+ }
+ }
+ return null;
+ }
+
+ private void updateCurrentIfNeededLocked() {
+ if (!hasValidItem() || mCurrent.isValid()) {
+ return;
+ }
+ int shuffledIdx = mShuffledList.indexOf(mCurrent.mediaItem);
+ if (shuffledIdx >= 0) {
+ // Added an item.
+ mCurrent.shuffledIdx = shuffledIdx;
+ return;
+ }
+
+ if (mCurrent.shuffledIdx >= mShuffledList.size()) {
+ mCurrent = getNextValidPlayItemLocked(mShuffledList.size() - 1, 1);
+ } else {
+ mCurrent.mediaItem = mShuffledList.get(mCurrent.shuffledIdx);
+ if (retrieveDataSourceDescLocked(mCurrent.mediaItem) == null) {
+ mCurrent = getNextValidPlayItemLocked(mCurrent.shuffledIdx, 1);
+ }
+ }
+ updatePlayerDataSourceLocked();
+ return;
+ }
+
+ private void updatePlayerDataSourceLocked() {
+ if (mCurrent == null || mCurrent == mEopPlayItem) {
+ return;
+ }
+ if (mPlayer.getCurrentDataSource() != mCurrent.dsd) {
+ mPlayer.setDataSource(mCurrent.dsd);
+ mPlayer.loopCurrent(mRepeatMode == MediaPlaylistAgent.REPEAT_MODE_ONE);
+ }
+ // TODO: Call setNextDataSource (b/74090741)
+ }
+
+ private void applyShuffleModeLocked() {
+ mShuffledList.clear();
+ mShuffledList.addAll(mPlaylist);
+ if (mShuffleMode == MediaPlaylistAgent.SHUFFLE_MODE_ALL
+ || mShuffleMode == MediaPlaylistAgent.SHUFFLE_MODE_GROUP) {
+ Collections.shuffle(mShuffledList);
+ }
+ }
+
+ // Clamps value to [0, size]
+ private static int clamp(int value, int size) {
+ if (value < 0) {
+ return 0;
+ }
+ return (value > size) ? size : value;
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/SessionToken2Impl.java b/packages/MediaComponents/src/com/android/media/SessionToken2Impl.java
new file mode 100644
index 0000000..a5cf8c4
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/SessionToken2Impl.java
@@ -0,0 +1,260 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import static android.media.SessionToken2.TYPE_SESSION;
+import static android.media.SessionToken2.TYPE_SESSION_SERVICE;
+import static android.media.SessionToken2.TYPE_LIBRARY_SERVICE;
+
+import android.content.Context;
+import android.content.Intent;
+import android.content.pm.PackageManager;
+import android.content.pm.PackageManager.NameNotFoundException;
+import android.content.pm.ResolveInfo;
+import android.media.MediaLibraryService2;
+import android.media.MediaSessionService2;
+import android.media.SessionToken2;
+import android.media.SessionToken2.TokenType;
+import android.media.update.SessionToken2Provider;
+import android.os.Bundle;
+import android.os.IBinder;
+import android.text.TextUtils;
+
+import java.util.List;
+
+public class SessionToken2Impl implements SessionToken2Provider {
+ private static final String KEY_UID = "android.media.token.uid";
+ private static final String KEY_TYPE = "android.media.token.type";
+ private static final String KEY_PACKAGE_NAME = "android.media.token.package_name";
+ private static final String KEY_SERVICE_NAME = "android.media.token.service_name";
+ private static final String KEY_ID = "android.media.token.id";
+ private static final String KEY_SESSION_BINDER = "android.media.token.session_binder";
+
+ private final SessionToken2 mInstance;
+ private final int mUid;
+ private final @TokenType int mType;
+ private final String mPackageName;
+ private final String mServiceName;
+ private final String mId;
+ private final IMediaSession2 mSessionBinder;
+
+ /**
+ * Public constructor for the legacy support (i.e. browser can try connecting to any browser
+ * service if it knows the service name)
+ */
+ public SessionToken2Impl(Context context, SessionToken2 instance,
+ String packageName, String serviceName, int uid) {
+ if (TextUtils.isEmpty(packageName)) {
+ throw new IllegalArgumentException("packageName shouldn't be empty");
+ }
+ if (TextUtils.isEmpty(serviceName)) {
+ throw new IllegalArgumentException("serviceName shouldn't be empty");
+ }
+ mInstance = instance;
+ // Calculate uid if it's not specified.
+ final PackageManager manager = context.getPackageManager();
+ if (uid < 0) {
+ try {
+ uid = manager.getPackageUid(packageName, 0);
+ } catch (NameNotFoundException e) {
+ throw new IllegalArgumentException("Cannot find package " + packageName);
+ }
+ }
+ mUid = uid;
+
+ // Infer id and type from package name and service name
+ // TODO(jaewan): Handle multi-user.
+ String id = getSessionIdFromService(manager, MediaLibraryService2.SERVICE_INTERFACE,
+ packageName, serviceName);
+ if (id != null) {
+ mId = id;
+ mType = TYPE_LIBRARY_SERVICE;
+ } else {
+ // retry with session service
+ mId = getSessionIdFromService(manager, MediaSessionService2.SERVICE_INTERFACE,
+ packageName, serviceName);
+ mType = TYPE_SESSION_SERVICE;
+ }
+ if (mId == null) {
+ throw new IllegalArgumentException("service " + serviceName + " doesn't implement"
+ + " session service nor library service. Use service's full name.");
+ }
+ mPackageName = packageName;
+ mServiceName = serviceName;
+ mSessionBinder = null;
+ }
+
+ SessionToken2Impl(int uid, int type, String packageName, String serviceName, String id,
+ IMediaSession2 sessionBinder) {
+ // TODO(jaewan): Add sanity check (b/73863865)
+ mUid = uid;
+ mType = type;
+ mPackageName = packageName;
+ mServiceName = serviceName;
+ mId = id;
+ mSessionBinder = sessionBinder;
+ mInstance = new SessionToken2(this);
+ }
+
+ private static String getSessionIdFromService(PackageManager manager, String serviceInterface,
+ String packageName, String serviceName) {
+ Intent serviceIntent = new Intent(serviceInterface);
+ serviceIntent.setPackage(packageName);
+ // Use queryIntentServices to find services with MediaLibraryService2.SERVICE_INTERFACE.
+ // We cannot use resolveService with intent specified class name, because resolveService
+ // ignores actions if Intent.setClassName() is specified.
+ List<ResolveInfo> list = manager.queryIntentServices(
+ serviceIntent, PackageManager.GET_META_DATA);
+ if (list != null) {
+ for (int i = 0; i < list.size(); i++) {
+ ResolveInfo resolveInfo = list.get(i);
+ if (resolveInfo == null || resolveInfo.serviceInfo == null) {
+ continue;
+ }
+ if (TextUtils.equals(resolveInfo.serviceInfo.name, serviceName)) {
+ return getSessionId(resolveInfo);
+ }
+ }
+ }
+ return null;
+ }
+
+ public static String getSessionId(ResolveInfo resolveInfo) {
+ if (resolveInfo == null || resolveInfo.serviceInfo == null) {
+ return null;
+ } else if (resolveInfo.serviceInfo.metaData == null) {
+ return "";
+ } else {
+ return resolveInfo.serviceInfo.metaData.getString(
+ MediaSessionService2.SERVICE_META_DATA, "");
+ }
+ }
+
+ public SessionToken2 getInstance() {
+ return mInstance;
+ }
+
+ @Override
+ public String getPackageName_impl() {
+ return mPackageName;
+ }
+
+ @Override
+ public int getUid_impl() {
+ return mUid;
+ }
+
+ @Override
+ public String getId_imp() {
+ return mId;
+ }
+
+ @Override
+ public int getType_impl() {
+ return mType;
+ }
+
+ String getServiceName() {
+ return mServiceName;
+ }
+
+ IMediaSession2 getSessionBinder() {
+ return mSessionBinder;
+ }
+
+ public static SessionToken2 fromBundle_impl(Bundle bundle) {
+ if (bundle == null) {
+ return null;
+ }
+ final int uid = bundle.getInt(KEY_UID);
+ final @TokenType int type = bundle.getInt(KEY_TYPE, -1);
+ final String packageName = bundle.getString(KEY_PACKAGE_NAME);
+ final String serviceName = bundle.getString(KEY_SERVICE_NAME);
+ final String id = bundle.getString(KEY_ID);
+ final IBinder sessionBinder = bundle.getBinder(KEY_SESSION_BINDER);
+
+ // Sanity check.
+ switch (type) {
+ case TYPE_SESSION:
+ if (sessionBinder == null) {
+ throw new IllegalArgumentException("Unexpected sessionBinder for session,"
+ + " binder=" + sessionBinder);
+ }
+ break;
+ case TYPE_SESSION_SERVICE:
+ case TYPE_LIBRARY_SERVICE:
+ if (TextUtils.isEmpty(serviceName)) {
+ throw new IllegalArgumentException("Session service needs service name");
+ }
+ break;
+ default:
+ throw new IllegalArgumentException("Invalid type");
+ }
+ if (TextUtils.isEmpty(packageName) || id == null) {
+ throw new IllegalArgumentException("Package name nor ID cannot be null.");
+ }
+ return new SessionToken2Impl(uid, type, packageName, serviceName, id,
+ sessionBinder != null ? IMediaSession2.Stub.asInterface(sessionBinder) : null)
+ .getInstance();
+ }
+
+ @Override
+ public Bundle toBundle_impl() {
+ Bundle bundle = new Bundle();
+ bundle.putInt(KEY_UID, mUid);
+ bundle.putString(KEY_PACKAGE_NAME, mPackageName);
+ bundle.putString(KEY_SERVICE_NAME, mServiceName);
+ bundle.putString(KEY_ID, mId);
+ bundle.putInt(KEY_TYPE, mType);
+ bundle.putBinder(KEY_SESSION_BINDER,
+ mSessionBinder != null ? mSessionBinder.asBinder() : null);
+ return bundle;
+ }
+
+ @Override
+ public int hashCode_impl() {
+ final int prime = 31;
+ return mType
+ + prime * (mUid
+ + prime * (mPackageName.hashCode()
+ + prime * (mId.hashCode()
+ + prime * (mServiceName != null ? mServiceName.hashCode() : 0))));
+ }
+
+ @Override
+ public boolean equals_impl(Object obj) {
+ if (!(obj instanceof SessionToken2)) {
+ return false;
+ }
+ SessionToken2Impl other = from((SessionToken2) obj);
+ return mUid == other.mUid
+ && TextUtils.equals(mPackageName, other.mPackageName)
+ && TextUtils.equals(mServiceName, other.mServiceName)
+ && TextUtils.equals(mId, other.mId)
+ && mType == other.mType;
+ }
+
+ @Override
+ public String toString_impl() {
+ return "SessionToken {pkg=" + mPackageName + " id=" + mId + " type=" + mType
+ + " service=" + mServiceName + " binder=" + mSessionBinder + "}";
+ }
+
+ static SessionToken2Impl from(SessionToken2 token) {
+ return ((SessionToken2Impl) token.getProvider());
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/VolumeProvider2Impl.java b/packages/MediaComponents/src/com/android/media/VolumeProvider2Impl.java
new file mode 100644
index 0000000..bf22e1b
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/VolumeProvider2Impl.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.media;
+
+import static android.media.VolumeProvider2.VOLUME_CONTROL_ABSOLUTE;
+import static android.media.VolumeProvider2.VOLUME_CONTROL_FIXED;
+import static android.media.VolumeProvider2.VOLUME_CONTROL_RELATIVE;
+
+import android.media.VolumeProvider2;
+import android.media.update.VolumeProvider2Provider;
+
+public class VolumeProvider2Impl implements VolumeProvider2Provider {
+ private final VolumeProvider2 mInstance;
+ private final int mControlType;
+ private final int mMaxVolume;
+
+ private int mCurrentVolume;
+ private Callback mCallback;
+
+ public VolumeProvider2Impl(VolumeProvider2 instance,
+ @VolumeProvider2.ControlType int controlType, int maxVolume, int currentVolume) {
+ if (controlType != VOLUME_CONTROL_FIXED && controlType != VOLUME_CONTROL_RELATIVE
+ && controlType != VOLUME_CONTROL_ABSOLUTE) {
+ throw new IllegalArgumentException("wrong controlType " + controlType);
+ }
+ if (maxVolume < 0 || currentVolume < 0) {
+ throw new IllegalArgumentException("volume shouldn't be negative"
+ + ", maxVolume=" + maxVolume + ", currentVolume=" + currentVolume);
+ }
+ if (currentVolume > maxVolume) {
+ throw new IllegalArgumentException("currentVolume shouldn't be greater than maxVolume"
+ + ", maxVolume=" + maxVolume + ", currentVolume=" + currentVolume);
+ }
+ mInstance = instance;
+ mControlType = controlType;
+ mMaxVolume = maxVolume;
+ mCurrentVolume = currentVolume;
+ }
+
+ @Override
+ public int getControlType_impl() {
+ return mControlType;
+ }
+
+ @Override
+ public int getMaxVolume_impl() {
+ return mMaxVolume;
+ }
+
+ @Override
+ public int getCurrentVolume_impl() {
+ return mCurrentVolume;
+ }
+
+ @Override
+ public void setCurrentVolume_impl(int currentVolume) {
+ if (currentVolume < 0) {
+ throw new IllegalArgumentException("currentVolume shouldn't be negative"
+ + ", currentVolume=" + currentVolume);
+ }
+ mCurrentVolume = currentVolume;
+ if (mCallback != null) {
+ mCallback.onVolumeChanged(mInstance);
+ }
+ }
+
+ /**
+ * Sets a callback to receive volume changes.
+ */
+ public void setCallback(Callback callback) {
+ mCallback = callback;
+ }
+
+ /**
+ * Listens for changes to the volume.
+ */
+ public static abstract class Callback {
+ public abstract void onVolumeChanged(VolumeProvider2 volumeProvider);
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/subtitle/ClosedCaptionRenderer.java b/packages/MediaComponents/src/com/android/media/subtitle/ClosedCaptionRenderer.java
new file mode 100644
index 0000000..ff7eec9
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/subtitle/ClosedCaptionRenderer.java
@@ -0,0 +1,1501 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.subtitle;
+
+import android.content.Context;
+import android.content.res.Resources;
+import android.graphics.Canvas;
+import android.graphics.Color;
+import android.graphics.Paint;
+import android.graphics.Rect;
+import android.graphics.Typeface;
+import android.media.MediaFormat;
+import android.text.Spannable;
+import android.text.SpannableStringBuilder;
+import android.text.TextPaint;
+import android.text.style.CharacterStyle;
+import android.text.style.StyleSpan;
+import android.text.style.UnderlineSpan;
+import android.text.style.UpdateAppearance;
+import android.util.AttributeSet;
+import android.util.Log;
+import android.util.TypedValue;
+import android.view.Gravity;
+import android.view.View;
+import android.view.ViewGroup;
+import android.view.accessibility.CaptioningManager;
+import android.view.accessibility.CaptioningManager.CaptionStyle;
+import android.view.accessibility.CaptioningManager.CaptioningChangeListener;
+import android.widget.LinearLayout;
+import android.widget.TextView;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Vector;
+
+// Note: This is forked from android.media.ClosedCaptionRenderer since P
+public class ClosedCaptionRenderer extends SubtitleController.Renderer {
+ private final Context mContext;
+ private Cea608CCWidget mCCWidget;
+
+ public ClosedCaptionRenderer(Context context) {
+ mContext = context;
+ }
+
+ @Override
+ public boolean supports(MediaFormat format) {
+ if (format.containsKey(MediaFormat.KEY_MIME)) {
+ String mimeType = format.getString(MediaFormat.KEY_MIME);
+ return MediaFormat.MIMETYPE_TEXT_CEA_608.equals(mimeType);
+ }
+ return false;
+ }
+
+ @Override
+ public SubtitleTrack createTrack(MediaFormat format) {
+ String mimeType = format.getString(MediaFormat.KEY_MIME);
+ if (MediaFormat.MIMETYPE_TEXT_CEA_608.equals(mimeType)) {
+ if (mCCWidget == null) {
+ mCCWidget = new Cea608CCWidget(mContext);
+ }
+ return new Cea608CaptionTrack(mCCWidget, format);
+ }
+ throw new RuntimeException("No matching format: " + format.toString());
+ }
+}
+
+class Cea608CaptionTrack extends SubtitleTrack {
+ private final Cea608CCParser mCCParser;
+ private final Cea608CCWidget mRenderingWidget;
+
+ Cea608CaptionTrack(Cea608CCWidget renderingWidget, MediaFormat format) {
+ super(format);
+
+ mRenderingWidget = renderingWidget;
+ mCCParser = new Cea608CCParser(mRenderingWidget);
+ }
+
+ @Override
+ public void onData(byte[] data, boolean eos, long runID) {
+ mCCParser.parse(data);
+ }
+
+ @Override
+ public RenderingWidget getRenderingWidget() {
+ return mRenderingWidget;
+ }
+
+ @Override
+ public void updateView(Vector<Cue> activeCues) {
+ // Overriding with NO-OP, CC rendering by-passes this
+ }
+}
+
+/**
+ * Abstract widget class to render a closed caption track.
+ */
+abstract class ClosedCaptionWidget extends ViewGroup implements SubtitleTrack.RenderingWidget {
+
+ interface ClosedCaptionLayout {
+ void setCaptionStyle(CaptionStyle captionStyle);
+ void setFontScale(float scale);
+ }
+
+ private static final CaptionStyle DEFAULT_CAPTION_STYLE = CaptionStyle.DEFAULT;
+
+ /** Captioning manager, used to obtain and track caption properties. */
+ private final CaptioningManager mManager;
+
+ /** Current caption style. */
+ protected CaptionStyle mCaptionStyle;
+
+ /** Callback for rendering changes. */
+ protected OnChangedListener mListener;
+
+ /** Concrete layout of CC. */
+ protected ClosedCaptionLayout mClosedCaptionLayout;
+
+ /** Whether a caption style change listener is registered. */
+ private boolean mHasChangeListener;
+
+ public ClosedCaptionWidget(Context context) {
+ this(context, null);
+ }
+
+ public ClosedCaptionWidget(Context context, AttributeSet attrs) {
+ this(context, attrs, 0);
+ }
+
+ public ClosedCaptionWidget(Context context, AttributeSet attrs, int defStyle) {
+ this(context, attrs, defStyle, 0);
+ }
+
+ public ClosedCaptionWidget(Context context, AttributeSet attrs, int defStyleAttr,
+ int defStyleRes) {
+ super(context, attrs, defStyleAttr, defStyleRes);
+
+ // Cannot render text over video when layer type is hardware.
+ setLayerType(View.LAYER_TYPE_SOFTWARE, null);
+
+ mManager = (CaptioningManager) context.getSystemService(Context.CAPTIONING_SERVICE);
+ mCaptionStyle = DEFAULT_CAPTION_STYLE.applyStyle(mManager.getUserStyle());
+
+ mClosedCaptionLayout = createCaptionLayout(context);
+ mClosedCaptionLayout.setCaptionStyle(mCaptionStyle);
+ mClosedCaptionLayout.setFontScale(mManager.getFontScale());
+ addView((ViewGroup) mClosedCaptionLayout, LayoutParams.MATCH_PARENT,
+ LayoutParams.MATCH_PARENT);
+
+ requestLayout();
+ }
+
+ public abstract ClosedCaptionLayout createCaptionLayout(Context context);
+
+ @Override
+ public void setOnChangedListener(OnChangedListener listener) {
+ mListener = listener;
+ }
+
+ @Override
+ public void setSize(int width, int height) {
+ final int widthSpec = MeasureSpec.makeMeasureSpec(width, MeasureSpec.EXACTLY);
+ final int heightSpec = MeasureSpec.makeMeasureSpec(height, MeasureSpec.EXACTLY);
+
+ measure(widthSpec, heightSpec);
+ layout(0, 0, width, height);
+ }
+
+ @Override
+ public void setVisible(boolean visible) {
+ if (visible) {
+ setVisibility(View.VISIBLE);
+ } else {
+ setVisibility(View.GONE);
+ }
+
+ manageChangeListener();
+ }
+
+ @Override
+ public void onAttachedToWindow() {
+ super.onAttachedToWindow();
+
+ manageChangeListener();
+ }
+
+ @Override
+ public void onDetachedFromWindow() {
+ super.onDetachedFromWindow();
+
+ manageChangeListener();
+ }
+
+ @Override
+ protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
+ super.onMeasure(widthMeasureSpec, heightMeasureSpec);
+ ((ViewGroup) mClosedCaptionLayout).measure(widthMeasureSpec, heightMeasureSpec);
+ }
+
+ @Override
+ protected void onLayout(boolean changed, int l, int t, int r, int b) {
+ ((ViewGroup) mClosedCaptionLayout).layout(l, t, r, b);
+ }
+
+ /**
+ * Manages whether this renderer is listening for caption style changes.
+ */
+ private final CaptioningChangeListener mCaptioningListener = new CaptioningChangeListener() {
+ @Override
+ public void onUserStyleChanged(CaptionStyle userStyle) {
+ mCaptionStyle = DEFAULT_CAPTION_STYLE.applyStyle(userStyle);
+ mClosedCaptionLayout.setCaptionStyle(mCaptionStyle);
+ }
+
+ @Override
+ public void onFontScaleChanged(float fontScale) {
+ mClosedCaptionLayout.setFontScale(fontScale);
+ }
+ };
+
+ private void manageChangeListener() {
+ final boolean needsListener = isAttachedToWindow() && getVisibility() == View.VISIBLE;
+ if (mHasChangeListener != needsListener) {
+ mHasChangeListener = needsListener;
+
+ if (needsListener) {
+ mManager.addCaptioningChangeListener(mCaptioningListener);
+ } else {
+ mManager.removeCaptioningChangeListener(mCaptioningListener);
+ }
+ }
+ }
+}
+
+/**
+ * CCParser processes CEA-608 closed caption data.
+ *
+ * It calls back into OnDisplayChangedListener upon
+ * display change with styled text for rendering.
+ *
+ */
+class Cea608CCParser {
+ public static final int MAX_ROWS = 15;
+ public static final int MAX_COLS = 32;
+
+ private static final String TAG = "Cea608CCParser";
+ private static final boolean DEBUG = Log.isLoggable(TAG, Log.DEBUG);
+
+ private static final int INVALID = -1;
+
+ // EIA-CEA-608: Table 70 - Control Codes
+ private static final int RCL = 0x20;
+ private static final int BS = 0x21;
+ private static final int AOF = 0x22;
+ private static final int AON = 0x23;
+ private static final int DER = 0x24;
+ private static final int RU2 = 0x25;
+ private static final int RU3 = 0x26;
+ private static final int RU4 = 0x27;
+ private static final int FON = 0x28;
+ private static final int RDC = 0x29;
+ private static final int TR = 0x2a;
+ private static final int RTD = 0x2b;
+ private static final int EDM = 0x2c;
+ private static final int CR = 0x2d;
+ private static final int ENM = 0x2e;
+ private static final int EOC = 0x2f;
+
+ // Transparent Space
+ private static final char TS = '\u00A0';
+
+ // Captioning Modes
+ private static final int MODE_UNKNOWN = 0;
+ private static final int MODE_PAINT_ON = 1;
+ private static final int MODE_ROLL_UP = 2;
+ private static final int MODE_POP_ON = 3;
+ private static final int MODE_TEXT = 4;
+
+ private final DisplayListener mListener;
+
+ private int mMode = MODE_PAINT_ON;
+ private int mRollUpSize = 4;
+ private int mPrevCtrlCode = INVALID;
+
+ private CCMemory mDisplay = new CCMemory();
+ private CCMemory mNonDisplay = new CCMemory();
+ private CCMemory mTextMem = new CCMemory();
+
+ Cea608CCParser(DisplayListener listener) {
+ mListener = listener;
+ }
+
+ public void parse(byte[] data) {
+ CCData[] ccData = CCData.fromByteArray(data);
+
+ for (int i = 0; i < ccData.length; i++) {
+ if (DEBUG) {
+ Log.d(TAG, ccData[i].toString());
+ }
+
+ if (handleCtrlCode(ccData[i])
+ || handleTabOffsets(ccData[i])
+ || handlePACCode(ccData[i])
+ || handleMidRowCode(ccData[i])) {
+ continue;
+ }
+
+ handleDisplayableChars(ccData[i]);
+ }
+ }
+
+ interface DisplayListener {
+ void onDisplayChanged(SpannableStringBuilder[] styledTexts);
+ CaptionStyle getCaptionStyle();
+ }
+
+ private CCMemory getMemory() {
+ // get the CC memory to operate on for current mode
+ switch (mMode) {
+ case MODE_POP_ON:
+ return mNonDisplay;
+ case MODE_TEXT:
+ // TODO(chz): support only caption mode for now,
+ // in text mode, dump everything to text mem.
+ return mTextMem;
+ case MODE_PAINT_ON:
+ case MODE_ROLL_UP:
+ return mDisplay;
+ default:
+ Log.w(TAG, "unrecoginized mode: " + mMode);
+ }
+ return mDisplay;
+ }
+
+ private boolean handleDisplayableChars(CCData ccData) {
+ if (!ccData.isDisplayableChar()) {
+ return false;
+ }
+
+ // Extended char includes 1 automatic backspace
+ if (ccData.isExtendedChar()) {
+ getMemory().bs();
+ }
+
+ getMemory().writeText(ccData.getDisplayText());
+
+ if (mMode == MODE_PAINT_ON || mMode == MODE_ROLL_UP) {
+ updateDisplay();
+ }
+
+ return true;
+ }
+
+ private boolean handleMidRowCode(CCData ccData) {
+ StyleCode m = ccData.getMidRow();
+ if (m != null) {
+ getMemory().writeMidRowCode(m);
+ return true;
+ }
+ return false;
+ }
+
+ private boolean handlePACCode(CCData ccData) {
+ PAC pac = ccData.getPAC();
+
+ if (pac != null) {
+ if (mMode == MODE_ROLL_UP) {
+ getMemory().moveBaselineTo(pac.getRow(), mRollUpSize);
+ }
+ getMemory().writePAC(pac);
+ return true;
+ }
+
+ return false;
+ }
+
+ private boolean handleTabOffsets(CCData ccData) {
+ int tabs = ccData.getTabOffset();
+
+ if (tabs > 0) {
+ getMemory().tab(tabs);
+ return true;
+ }
+
+ return false;
+ }
+
+ private boolean handleCtrlCode(CCData ccData) {
+ int ctrlCode = ccData.getCtrlCode();
+
+ if (mPrevCtrlCode != INVALID && mPrevCtrlCode == ctrlCode) {
+ // discard double ctrl codes (but if there's a 3rd one, we still take that)
+ mPrevCtrlCode = INVALID;
+ return true;
+ }
+
+ switch(ctrlCode) {
+ case RCL:
+ // select pop-on style
+ mMode = MODE_POP_ON;
+ break;
+ case BS:
+ getMemory().bs();
+ break;
+ case DER:
+ getMemory().der();
+ break;
+ case RU2:
+ case RU3:
+ case RU4:
+ mRollUpSize = (ctrlCode - 0x23);
+ // erase memory if currently in other style
+ if (mMode != MODE_ROLL_UP) {
+ mDisplay.erase();
+ mNonDisplay.erase();
+ }
+ // select roll-up style
+ mMode = MODE_ROLL_UP;
+ break;
+ case FON:
+ Log.i(TAG, "Flash On");
+ break;
+ case RDC:
+ // select paint-on style
+ mMode = MODE_PAINT_ON;
+ break;
+ case TR:
+ mMode = MODE_TEXT;
+ mTextMem.erase();
+ break;
+ case RTD:
+ mMode = MODE_TEXT;
+ break;
+ case EDM:
+ // erase display memory
+ mDisplay.erase();
+ updateDisplay();
+ break;
+ case CR:
+ if (mMode == MODE_ROLL_UP) {
+ getMemory().rollUp(mRollUpSize);
+ } else {
+ getMemory().cr();
+ }
+ if (mMode == MODE_ROLL_UP) {
+ updateDisplay();
+ }
+ break;
+ case ENM:
+ // erase non-display memory
+ mNonDisplay.erase();
+ break;
+ case EOC:
+ // swap display/non-display memory
+ swapMemory();
+ // switch to pop-on style
+ mMode = MODE_POP_ON;
+ updateDisplay();
+ break;
+ case INVALID:
+ default:
+ mPrevCtrlCode = INVALID;
+ return false;
+ }
+
+ mPrevCtrlCode = ctrlCode;
+
+ // handled
+ return true;
+ }
+
+ private void updateDisplay() {
+ if (mListener != null) {
+ CaptionStyle captionStyle = mListener.getCaptionStyle();
+ mListener.onDisplayChanged(mDisplay.getStyledText(captionStyle));
+ }
+ }
+
+ private void swapMemory() {
+ CCMemory temp = mDisplay;
+ mDisplay = mNonDisplay;
+ mNonDisplay = temp;
+ }
+
+ private static class StyleCode {
+ static final int COLOR_WHITE = 0;
+ static final int COLOR_GREEN = 1;
+ static final int COLOR_BLUE = 2;
+ static final int COLOR_CYAN = 3;
+ static final int COLOR_RED = 4;
+ static final int COLOR_YELLOW = 5;
+ static final int COLOR_MAGENTA = 6;
+ static final int COLOR_INVALID = 7;
+
+ static final int STYLE_ITALICS = 0x00000001;
+ static final int STYLE_UNDERLINE = 0x00000002;
+
+ static final String[] mColorMap = {
+ "WHITE", "GREEN", "BLUE", "CYAN", "RED", "YELLOW", "MAGENTA", "INVALID"
+ };
+
+ final int mStyle;
+ final int mColor;
+
+ static StyleCode fromByte(byte data2) {
+ int style = 0;
+ int color = (data2 >> 1) & 0x7;
+
+ if ((data2 & 0x1) != 0) {
+ style |= STYLE_UNDERLINE;
+ }
+
+ if (color == COLOR_INVALID) {
+ // WHITE ITALICS
+ color = COLOR_WHITE;
+ style |= STYLE_ITALICS;
+ }
+
+ return new StyleCode(style, color);
+ }
+
+ StyleCode(int style, int color) {
+ mStyle = style;
+ mColor = color;
+ }
+
+ boolean isItalics() {
+ return (mStyle & STYLE_ITALICS) != 0;
+ }
+
+ boolean isUnderline() {
+ return (mStyle & STYLE_UNDERLINE) != 0;
+ }
+
+ int getColor() {
+ return mColor;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder str = new StringBuilder();
+ str.append("{");
+ str.append(mColorMap[mColor]);
+ if ((mStyle & STYLE_ITALICS) != 0) {
+ str.append(", ITALICS");
+ }
+ if ((mStyle & STYLE_UNDERLINE) != 0) {
+ str.append(", UNDERLINE");
+ }
+ str.append("}");
+
+ return str.toString();
+ }
+ }
+
+ private static class PAC extends StyleCode {
+ final int mRow;
+ final int mCol;
+
+ static PAC fromBytes(byte data1, byte data2) {
+ int[] rowTable = {11, 1, 3, 12, 14, 5, 7, 9};
+ int row = rowTable[data1 & 0x07] + ((data2 & 0x20) >> 5);
+ int style = 0;
+ if ((data2 & 1) != 0) {
+ style |= STYLE_UNDERLINE;
+ }
+ if ((data2 & 0x10) != 0) {
+ // indent code
+ int indent = (data2 >> 1) & 0x7;
+ return new PAC(row, indent * 4, style, COLOR_WHITE);
+ } else {
+ // style code
+ int color = (data2 >> 1) & 0x7;
+
+ if (color == COLOR_INVALID) {
+ // WHITE ITALICS
+ color = COLOR_WHITE;
+ style |= STYLE_ITALICS;
+ }
+ return new PAC(row, -1, style, color);
+ }
+ }
+
+ PAC(int row, int col, int style, int color) {
+ super(style, color);
+ mRow = row;
+ mCol = col;
+ }
+
+ boolean isIndentPAC() {
+ return (mCol >= 0);
+ }
+
+ int getRow() {
+ return mRow;
+ }
+
+ int getCol() {
+ return mCol;
+ }
+
+ @Override
+ public String toString() {
+ return String.format("{%d, %d}, %s",
+ mRow, mCol, super.toString());
+ }
+ }
+
+ /**
+ * Mutable version of BackgroundSpan to facilitate text rendering with edge styles.
+ */
+ public static class MutableBackgroundColorSpan extends CharacterStyle
+ implements UpdateAppearance {
+ private int mColor;
+
+ public MutableBackgroundColorSpan(int color) {
+ mColor = color;
+ }
+
+ public void setBackgroundColor(int color) {
+ mColor = color;
+ }
+
+ public int getBackgroundColor() {
+ return mColor;
+ }
+
+ @Override
+ public void updateDrawState(TextPaint ds) {
+ ds.bgColor = mColor;
+ }
+ }
+
+ /* CCLineBuilder keeps track of displayable chars, as well as
+ * MidRow styles and PACs, for a single line of CC memory.
+ *
+ * It generates styled text via getStyledText() method.
+ */
+ private static class CCLineBuilder {
+ private final StringBuilder mDisplayChars;
+ private final StyleCode[] mMidRowStyles;
+ private final StyleCode[] mPACStyles;
+
+ CCLineBuilder(String str) {
+ mDisplayChars = new StringBuilder(str);
+ mMidRowStyles = new StyleCode[mDisplayChars.length()];
+ mPACStyles = new StyleCode[mDisplayChars.length()];
+ }
+
+ void setCharAt(int index, char ch) {
+ mDisplayChars.setCharAt(index, ch);
+ mMidRowStyles[index] = null;
+ }
+
+ void setMidRowAt(int index, StyleCode m) {
+ mDisplayChars.setCharAt(index, ' ');
+ mMidRowStyles[index] = m;
+ }
+
+ void setPACAt(int index, PAC pac) {
+ mPACStyles[index] = pac;
+ }
+
+ char charAt(int index) {
+ return mDisplayChars.charAt(index);
+ }
+
+ int length() {
+ return mDisplayChars.length();
+ }
+
+ void applyStyleSpan(
+ SpannableStringBuilder styledText,
+ StyleCode s, int start, int end) {
+ if (s.isItalics()) {
+ styledText.setSpan(
+ new StyleSpan(android.graphics.Typeface.ITALIC),
+ start, end, Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
+ }
+ if (s.isUnderline()) {
+ styledText.setSpan(
+ new UnderlineSpan(),
+ start, end, Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
+ }
+ }
+
+ SpannableStringBuilder getStyledText(CaptionStyle captionStyle) {
+ SpannableStringBuilder styledText = new SpannableStringBuilder(mDisplayChars);
+ int start = -1, next = 0;
+ int styleStart = -1;
+ StyleCode curStyle = null;
+ while (next < mDisplayChars.length()) {
+ StyleCode newStyle = null;
+ if (mMidRowStyles[next] != null) {
+ // apply mid-row style change
+ newStyle = mMidRowStyles[next];
+ } else if (mPACStyles[next] != null
+ && (styleStart < 0 || start < 0)) {
+ // apply PAC style change, only if:
+ // 1. no style set, or
+ // 2. style set, but prev char is none-displayable
+ newStyle = mPACStyles[next];
+ }
+ if (newStyle != null) {
+ curStyle = newStyle;
+ if (styleStart >= 0 && start >= 0) {
+ applyStyleSpan(styledText, newStyle, styleStart, next);
+ }
+ styleStart = next;
+ }
+
+ if (mDisplayChars.charAt(next) != TS) {
+ if (start < 0) {
+ start = next;
+ }
+ } else if (start >= 0) {
+ int expandedStart = mDisplayChars.charAt(start) == ' ' ? start : start - 1;
+ int expandedEnd = mDisplayChars.charAt(next - 1) == ' ' ? next : next + 1;
+ styledText.setSpan(
+ new MutableBackgroundColorSpan(captionStyle.backgroundColor),
+ expandedStart, expandedEnd,
+ Spannable.SPAN_EXCLUSIVE_EXCLUSIVE);
+ if (styleStart >= 0) {
+ applyStyleSpan(styledText, curStyle, styleStart, expandedEnd);
+ }
+ start = -1;
+ }
+ next++;
+ }
+
+ return styledText;
+ }
+ }
+
+ /*
+ * CCMemory models a console-style display.
+ */
+ private static class CCMemory {
+ private final String mBlankLine;
+ private final CCLineBuilder[] mLines = new CCLineBuilder[MAX_ROWS + 2];
+ private int mRow;
+ private int mCol;
+
+ CCMemory() {
+ char[] blank = new char[MAX_COLS + 2];
+ Arrays.fill(blank, TS);
+ mBlankLine = new String(blank);
+ }
+
+ void erase() {
+ // erase all lines
+ for (int i = 0; i < mLines.length; i++) {
+ mLines[i] = null;
+ }
+ mRow = MAX_ROWS;
+ mCol = 1;
+ }
+
+ void der() {
+ if (mLines[mRow] != null) {
+ for (int i = 0; i < mCol; i++) {
+ if (mLines[mRow].charAt(i) != TS) {
+ for (int j = mCol; j < mLines[mRow].length(); j++) {
+ mLines[j].setCharAt(j, TS);
+ }
+ return;
+ }
+ }
+ mLines[mRow] = null;
+ }
+ }
+
+ void tab(int tabs) {
+ moveCursorByCol(tabs);
+ }
+
+ void bs() {
+ moveCursorByCol(-1);
+ if (mLines[mRow] != null) {
+ mLines[mRow].setCharAt(mCol, TS);
+ if (mCol == MAX_COLS - 1) {
+ // Spec recommendation:
+ // if cursor was at col 32, move cursor
+ // back to col 31 and erase both col 31&32
+ mLines[mRow].setCharAt(MAX_COLS, TS);
+ }
+ }
+ }
+
+ void cr() {
+ moveCursorTo(mRow + 1, 1);
+ }
+
+ void rollUp(int windowSize) {
+ int i;
+ for (i = 0; i <= mRow - windowSize; i++) {
+ mLines[i] = null;
+ }
+ int startRow = mRow - windowSize + 1;
+ if (startRow < 1) {
+ startRow = 1;
+ }
+ for (i = startRow; i < mRow; i++) {
+ mLines[i] = mLines[i + 1];
+ }
+ for (i = mRow; i < mLines.length; i++) {
+ // clear base row
+ mLines[i] = null;
+ }
+ // default to col 1, in case PAC is not sent
+ mCol = 1;
+ }
+
+ void writeText(String text) {
+ for (int i = 0; i < text.length(); i++) {
+ getLineBuffer(mRow).setCharAt(mCol, text.charAt(i));
+ moveCursorByCol(1);
+ }
+ }
+
+ void writeMidRowCode(StyleCode m) {
+ getLineBuffer(mRow).setMidRowAt(mCol, m);
+ moveCursorByCol(1);
+ }
+
+ void writePAC(PAC pac) {
+ if (pac.isIndentPAC()) {
+ moveCursorTo(pac.getRow(), pac.getCol());
+ } else {
+ moveCursorTo(pac.getRow(), 1);
+ }
+ getLineBuffer(mRow).setPACAt(mCol, pac);
+ }
+
+ SpannableStringBuilder[] getStyledText(CaptionStyle captionStyle) {
+ ArrayList<SpannableStringBuilder> rows = new ArrayList<>(MAX_ROWS);
+ for (int i = 1; i <= MAX_ROWS; i++) {
+ rows.add(mLines[i] != null ?
+ mLines[i].getStyledText(captionStyle) : null);
+ }
+ return rows.toArray(new SpannableStringBuilder[MAX_ROWS]);
+ }
+
+ private static int clamp(int x, int min, int max) {
+ return x < min ? min : (x > max ? max : x);
+ }
+
+ private void moveCursorTo(int row, int col) {
+ mRow = clamp(row, 1, MAX_ROWS);
+ mCol = clamp(col, 1, MAX_COLS);
+ }
+
+ private void moveCursorToRow(int row) {
+ mRow = clamp(row, 1, MAX_ROWS);
+ }
+
+ private void moveCursorByCol(int col) {
+ mCol = clamp(mCol + col, 1, MAX_COLS);
+ }
+
+ private void moveBaselineTo(int baseRow, int windowSize) {
+ if (mRow == baseRow) {
+ return;
+ }
+ int actualWindowSize = windowSize;
+ if (baseRow < actualWindowSize) {
+ actualWindowSize = baseRow;
+ }
+ if (mRow < actualWindowSize) {
+ actualWindowSize = mRow;
+ }
+
+ int i;
+ if (baseRow < mRow) {
+ // copy from bottom to top row
+ for (i = actualWindowSize - 1; i >= 0; i--) {
+ mLines[baseRow - i] = mLines[mRow - i];
+ }
+ } else {
+ // copy from top to bottom row
+ for (i = 0; i < actualWindowSize; i++) {
+ mLines[baseRow - i] = mLines[mRow - i];
+ }
+ }
+ // clear rest of the rows
+ for (i = 0; i <= baseRow - windowSize; i++) {
+ mLines[i] = null;
+ }
+ for (i = baseRow + 1; i < mLines.length; i++) {
+ mLines[i] = null;
+ }
+ }
+
+ private CCLineBuilder getLineBuffer(int row) {
+ if (mLines[row] == null) {
+ mLines[row] = new CCLineBuilder(mBlankLine);
+ }
+ return mLines[row];
+ }
+ }
+
+ /*
+ * CCData parses the raw CC byte pair into displayable chars,
+ * misc control codes, Mid-Row or Preamble Address Codes.
+ */
+ private static class CCData {
+ private final byte mType;
+ private final byte mData1;
+ private final byte mData2;
+
+ private static final String[] mCtrlCodeMap = {
+ "RCL", "BS" , "AOF", "AON",
+ "DER", "RU2", "RU3", "RU4",
+ "FON", "RDC", "TR" , "RTD",
+ "EDM", "CR" , "ENM", "EOC",
+ };
+
+ private static final String[] mSpecialCharMap = {
+ "\u00AE",
+ "\u00B0",
+ "\u00BD",
+ "\u00BF",
+ "\u2122",
+ "\u00A2",
+ "\u00A3",
+ "\u266A", // Eighth note
+ "\u00E0",
+ "\u00A0", // Transparent space
+ "\u00E8",
+ "\u00E2",
+ "\u00EA",
+ "\u00EE",
+ "\u00F4",
+ "\u00FB",
+ };
+
+ private static final String[] mSpanishCharMap = {
+ // Spanish and misc chars
+ "\u00C1", // A
+ "\u00C9", // E
+ "\u00D3", // I
+ "\u00DA", // O
+ "\u00DC", // U
+ "\u00FC", // u
+ "\u2018", // opening single quote
+ "\u00A1", // inverted exclamation mark
+ "*",
+ "'",
+ "\u2014", // em dash
+ "\u00A9", // Copyright
+ "\u2120", // Servicemark
+ "\u2022", // round bullet
+ "\u201C", // opening double quote
+ "\u201D", // closing double quote
+ // French
+ "\u00C0",
+ "\u00C2",
+ "\u00C7",
+ "\u00C8",
+ "\u00CA",
+ "\u00CB",
+ "\u00EB",
+ "\u00CE",
+ "\u00CF",
+ "\u00EF",
+ "\u00D4",
+ "\u00D9",
+ "\u00F9",
+ "\u00DB",
+ "\u00AB",
+ "\u00BB"
+ };
+
+ private static final String[] mProtugueseCharMap = {
+ // Portuguese
+ "\u00C3",
+ "\u00E3",
+ "\u00CD",
+ "\u00CC",
+ "\u00EC",
+ "\u00D2",
+ "\u00F2",
+ "\u00D5",
+ "\u00F5",
+ "{",
+ "}",
+ "\\",
+ "^",
+ "_",
+ "|",
+ "~",
+ // German and misc chars
+ "\u00C4",
+ "\u00E4",
+ "\u00D6",
+ "\u00F6",
+ "\u00DF",
+ "\u00A5",
+ "\u00A4",
+ "\u2502", // vertical bar
+ "\u00C5",
+ "\u00E5",
+ "\u00D8",
+ "\u00F8",
+ "\u250C", // top-left corner
+ "\u2510", // top-right corner
+ "\u2514", // lower-left corner
+ "\u2518", // lower-right corner
+ };
+
+ static CCData[] fromByteArray(byte[] data) {
+ CCData[] ccData = new CCData[data.length / 3];
+
+ for (int i = 0; i < ccData.length; i++) {
+ ccData[i] = new CCData(
+ data[i * 3],
+ data[i * 3 + 1],
+ data[i * 3 + 2]);
+ }
+
+ return ccData;
+ }
+
+ CCData(byte type, byte data1, byte data2) {
+ mType = type;
+ mData1 = data1;
+ mData2 = data2;
+ }
+
+ int getCtrlCode() {
+ if ((mData1 == 0x14 || mData1 == 0x1c)
+ && mData2 >= 0x20 && mData2 <= 0x2f) {
+ return mData2;
+ }
+ return INVALID;
+ }
+
+ StyleCode getMidRow() {
+ // only support standard Mid-row codes, ignore
+ // optional background/foreground mid-row codes
+ if ((mData1 == 0x11 || mData1 == 0x19)
+ && mData2 >= 0x20 && mData2 <= 0x2f) {
+ return StyleCode.fromByte(mData2);
+ }
+ return null;
+ }
+
+ PAC getPAC() {
+ if ((mData1 & 0x70) == 0x10
+ && (mData2 & 0x40) == 0x40
+ && ((mData1 & 0x07) != 0 || (mData2 & 0x20) == 0)) {
+ return PAC.fromBytes(mData1, mData2);
+ }
+ return null;
+ }
+
+ int getTabOffset() {
+ if ((mData1 == 0x17 || mData1 == 0x1f)
+ && mData2 >= 0x21 && mData2 <= 0x23) {
+ return mData2 & 0x3;
+ }
+ return 0;
+ }
+
+ boolean isDisplayableChar() {
+ return isBasicChar() || isSpecialChar() || isExtendedChar();
+ }
+
+ String getDisplayText() {
+ String str = getBasicChars();
+
+ if (str == null) {
+ str = getSpecialChar();
+
+ if (str == null) {
+ str = getExtendedChar();
+ }
+ }
+
+ return str;
+ }
+
+ private String ctrlCodeToString(int ctrlCode) {
+ return mCtrlCodeMap[ctrlCode - 0x20];
+ }
+
+ private boolean isBasicChar() {
+ return mData1 >= 0x20 && mData1 <= 0x7f;
+ }
+
+ private boolean isSpecialChar() {
+ return ((mData1 == 0x11 || mData1 == 0x19)
+ && mData2 >= 0x30 && mData2 <= 0x3f);
+ }
+
+ private boolean isExtendedChar() {
+ return ((mData1 == 0x12 || mData1 == 0x1A
+ || mData1 == 0x13 || mData1 == 0x1B)
+ && mData2 >= 0x20 && mData2 <= 0x3f);
+ }
+
+ private char getBasicChar(byte data) {
+ char c;
+ // replace the non-ASCII ones
+ switch (data) {
+ case 0x2A: c = '\u00E1'; break;
+ case 0x5C: c = '\u00E9'; break;
+ case 0x5E: c = '\u00ED'; break;
+ case 0x5F: c = '\u00F3'; break;
+ case 0x60: c = '\u00FA'; break;
+ case 0x7B: c = '\u00E7'; break;
+ case 0x7C: c = '\u00F7'; break;
+ case 0x7D: c = '\u00D1'; break;
+ case 0x7E: c = '\u00F1'; break;
+ case 0x7F: c = '\u2588'; break; // Full block
+ default: c = (char) data; break;
+ }
+ return c;
+ }
+
+ private String getBasicChars() {
+ if (mData1 >= 0x20 && mData1 <= 0x7f) {
+ StringBuilder builder = new StringBuilder(2);
+ builder.append(getBasicChar(mData1));
+ if (mData2 >= 0x20 && mData2 <= 0x7f) {
+ builder.append(getBasicChar(mData2));
+ }
+ return builder.toString();
+ }
+
+ return null;
+ }
+
+ private String getSpecialChar() {
+ if ((mData1 == 0x11 || mData1 == 0x19)
+ && mData2 >= 0x30 && mData2 <= 0x3f) {
+ return mSpecialCharMap[mData2 - 0x30];
+ }
+
+ return null;
+ }
+
+ private String getExtendedChar() {
+ if ((mData1 == 0x12 || mData1 == 0x1A)
+ && mData2 >= 0x20 && mData2 <= 0x3f){
+ // 1 Spanish/French char
+ return mSpanishCharMap[mData2 - 0x20];
+ } else if ((mData1 == 0x13 || mData1 == 0x1B)
+ && mData2 >= 0x20 && mData2 <= 0x3f){
+ // 1 Portuguese/German/Danish char
+ return mProtugueseCharMap[mData2 - 0x20];
+ }
+
+ return null;
+ }
+
+ @Override
+ public String toString() {
+ String str;
+
+ if (mData1 < 0x10 && mData2 < 0x10) {
+ // Null Pad, ignore
+ return String.format("[%d]Null: %02x %02x", mType, mData1, mData2);
+ }
+
+ int ctrlCode = getCtrlCode();
+ if (ctrlCode != INVALID) {
+ return String.format("[%d]%s", mType, ctrlCodeToString(ctrlCode));
+ }
+
+ int tabOffset = getTabOffset();
+ if (tabOffset > 0) {
+ return String.format("[%d]Tab%d", mType, tabOffset);
+ }
+
+ PAC pac = getPAC();
+ if (pac != null) {
+ return String.format("[%d]PAC: %s", mType, pac.toString());
+ }
+
+ StyleCode m = getMidRow();
+ if (m != null) {
+ return String.format("[%d]Mid-row: %s", mType, m.toString());
+ }
+
+ if (isDisplayableChar()) {
+ return String.format("[%d]Displayable: %s (%02x %02x)",
+ mType, getDisplayText(), mData1, mData2);
+ }
+
+ return String.format("[%d]Invalid: %02x %02x", mType, mData1, mData2);
+ }
+ }
+}
+
+/**
+ * Widget capable of rendering CEA-608 closed captions.
+ */
+class Cea608CCWidget extends ClosedCaptionWidget implements Cea608CCParser.DisplayListener {
+ private static final Rect mTextBounds = new Rect();
+ private static final String mDummyText = "1234567890123456789012345678901234";
+
+ public Cea608CCWidget(Context context) {
+ this(context, null);
+ }
+
+ public Cea608CCWidget(Context context, AttributeSet attrs) {
+ this(context, attrs, 0);
+ }
+
+ public Cea608CCWidget(Context context, AttributeSet attrs, int defStyle) {
+ this(context, attrs, defStyle, 0);
+ }
+
+ public Cea608CCWidget(Context context, AttributeSet attrs, int defStyleAttr,
+ int defStyleRes) {
+ super(context, attrs, defStyleAttr, defStyleRes);
+ }
+
+ @Override
+ public ClosedCaptionLayout createCaptionLayout(Context context) {
+ return new CCLayout(context);
+ }
+
+ @Override
+ public void onDisplayChanged(SpannableStringBuilder[] styledTexts) {
+ ((CCLayout) mClosedCaptionLayout).update(styledTexts);
+
+ if (mListener != null) {
+ mListener.onChanged(this);
+ }
+ }
+
+ @Override
+ public CaptionStyle getCaptionStyle() {
+ return mCaptionStyle;
+ }
+
+ private static class CCLineBox extends TextView {
+ private static final float FONT_PADDING_RATIO = 0.75f;
+ private static final float EDGE_OUTLINE_RATIO = 0.1f;
+ private static final float EDGE_SHADOW_RATIO = 0.05f;
+ private float mOutlineWidth;
+ private float mShadowRadius;
+ private float mShadowOffset;
+
+ private int mTextColor = Color.WHITE;
+ private int mBgColor = Color.BLACK;
+ private int mEdgeType = CaptionStyle.EDGE_TYPE_NONE;
+ private int mEdgeColor = Color.TRANSPARENT;
+
+ CCLineBox(Context context) {
+ super(context);
+ setGravity(Gravity.CENTER);
+ setBackgroundColor(Color.TRANSPARENT);
+ setTextColor(Color.WHITE);
+ setTypeface(Typeface.MONOSPACE);
+ setVisibility(View.INVISIBLE);
+
+ final Resources res = getContext().getResources();
+
+ // get the default (will be updated later during measure)
+ mOutlineWidth = res.getDimensionPixelSize(
+ com.android.internal.R.dimen.subtitle_outline_width);
+ mShadowRadius = res.getDimensionPixelSize(
+ com.android.internal.R.dimen.subtitle_shadow_radius);
+ mShadowOffset = res.getDimensionPixelSize(
+ com.android.internal.R.dimen.subtitle_shadow_offset);
+ }
+
+ void setCaptionStyle(CaptionStyle captionStyle) {
+ mTextColor = captionStyle.foregroundColor;
+ mBgColor = captionStyle.backgroundColor;
+ mEdgeType = captionStyle.edgeType;
+ mEdgeColor = captionStyle.edgeColor;
+
+ setTextColor(mTextColor);
+ if (mEdgeType == CaptionStyle.EDGE_TYPE_DROP_SHADOW) {
+ setShadowLayer(mShadowRadius, mShadowOffset, mShadowOffset, mEdgeColor);
+ } else {
+ setShadowLayer(0, 0, 0, 0);
+ }
+ invalidate();
+ }
+
+ @Override
+ protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
+ float fontSize = MeasureSpec.getSize(heightMeasureSpec) * FONT_PADDING_RATIO;
+ setTextSize(TypedValue.COMPLEX_UNIT_PX, fontSize);
+
+ mOutlineWidth = EDGE_OUTLINE_RATIO * fontSize + 1.0f;
+ mShadowRadius = EDGE_SHADOW_RATIO * fontSize + 1.0f;;
+ mShadowOffset = mShadowRadius;
+
+ // set font scale in the X direction to match the required width
+ setScaleX(1.0f);
+ getPaint().getTextBounds(mDummyText, 0, mDummyText.length(), mTextBounds);
+ float actualTextWidth = mTextBounds.width();
+ float requiredTextWidth = MeasureSpec.getSize(widthMeasureSpec);
+ setScaleX(requiredTextWidth / actualTextWidth);
+
+ super.onMeasure(widthMeasureSpec, heightMeasureSpec);
+ }
+
+ @Override
+ protected void onDraw(Canvas c) {
+ if (mEdgeType == CaptionStyle.EDGE_TYPE_UNSPECIFIED
+ || mEdgeType == CaptionStyle.EDGE_TYPE_NONE
+ || mEdgeType == CaptionStyle.EDGE_TYPE_DROP_SHADOW) {
+ // these edge styles don't require a second pass
+ super.onDraw(c);
+ return;
+ }
+
+ if (mEdgeType == CaptionStyle.EDGE_TYPE_OUTLINE) {
+ drawEdgeOutline(c);
+ } else {
+ // Raised or depressed
+ drawEdgeRaisedOrDepressed(c);
+ }
+ }
+
+ private void drawEdgeOutline(Canvas c) {
+ TextPaint textPaint = getPaint();
+
+ Paint.Style previousStyle = textPaint.getStyle();
+ Paint.Join previousJoin = textPaint.getStrokeJoin();
+ float previousWidth = textPaint.getStrokeWidth();
+
+ setTextColor(mEdgeColor);
+ textPaint.setStyle(Paint.Style.FILL_AND_STROKE);
+ textPaint.setStrokeJoin(Paint.Join.ROUND);
+ textPaint.setStrokeWidth(mOutlineWidth);
+
+ // Draw outline and background only.
+ super.onDraw(c);
+
+ // Restore original settings.
+ setTextColor(mTextColor);
+ textPaint.setStyle(previousStyle);
+ textPaint.setStrokeJoin(previousJoin);
+ textPaint.setStrokeWidth(previousWidth);
+
+ // Remove the background.
+ setBackgroundSpans(Color.TRANSPARENT);
+ // Draw foreground only.
+ super.onDraw(c);
+ // Restore the background.
+ setBackgroundSpans(mBgColor);
+ }
+
+ private void drawEdgeRaisedOrDepressed(Canvas c) {
+ TextPaint textPaint = getPaint();
+
+ Paint.Style previousStyle = textPaint.getStyle();
+ textPaint.setStyle(Paint.Style.FILL);
+
+ final boolean raised = mEdgeType == CaptionStyle.EDGE_TYPE_RAISED;
+ final int colorUp = raised ? Color.WHITE : mEdgeColor;
+ final int colorDown = raised ? mEdgeColor : Color.WHITE;
+ final float offset = mShadowRadius / 2f;
+
+ // Draw background and text with shadow up
+ setShadowLayer(mShadowRadius, -offset, -offset, colorUp);
+ super.onDraw(c);
+
+ // Remove the background.
+ setBackgroundSpans(Color.TRANSPARENT);
+
+ // Draw text with shadow down
+ setShadowLayer(mShadowRadius, +offset, +offset, colorDown);
+ super.onDraw(c);
+
+ // Restore settings
+ textPaint.setStyle(previousStyle);
+
+ // Restore the background.
+ setBackgroundSpans(mBgColor);
+ }
+
+ private void setBackgroundSpans(int color) {
+ CharSequence text = getText();
+ if (text instanceof Spannable) {
+ Spannable spannable = (Spannable) text;
+ Cea608CCParser.MutableBackgroundColorSpan[] bgSpans = spannable.getSpans(
+ 0, spannable.length(), Cea608CCParser.MutableBackgroundColorSpan.class);
+ for (int i = 0; i < bgSpans.length; i++) {
+ bgSpans[i].setBackgroundColor(color);
+ }
+ }
+ }
+ }
+
+ private static class CCLayout extends LinearLayout implements ClosedCaptionLayout {
+ private static final int MAX_ROWS = Cea608CCParser.MAX_ROWS;
+ private static final float SAFE_AREA_RATIO = 0.9f;
+
+ private final CCLineBox[] mLineBoxes = new CCLineBox[MAX_ROWS];
+
+ CCLayout(Context context) {
+ super(context);
+ setGravity(Gravity.START);
+ setOrientation(LinearLayout.VERTICAL);
+ for (int i = 0; i < MAX_ROWS; i++) {
+ mLineBoxes[i] = new CCLineBox(getContext());
+ addView(mLineBoxes[i], LayoutParams.WRAP_CONTENT, LayoutParams.WRAP_CONTENT);
+ }
+ }
+
+ @Override
+ public void setCaptionStyle(CaptionStyle captionStyle) {
+ for (int i = 0; i < MAX_ROWS; i++) {
+ mLineBoxes[i].setCaptionStyle(captionStyle);
+ }
+ }
+
+ @Override
+ public void setFontScale(float fontScale) {
+ // Ignores the font scale changes of the system wide CC preference.
+ }
+
+ void update(SpannableStringBuilder[] textBuffer) {
+ for (int i = 0; i < MAX_ROWS; i++) {
+ if (textBuffer[i] != null) {
+ mLineBoxes[i].setText(textBuffer[i], TextView.BufferType.SPANNABLE);
+ mLineBoxes[i].setVisibility(View.VISIBLE);
+ } else {
+ mLineBoxes[i].setVisibility(View.INVISIBLE);
+ }
+ }
+ }
+
+ @Override
+ protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
+ super.onMeasure(widthMeasureSpec, heightMeasureSpec);
+
+ int safeWidth = getMeasuredWidth();
+ int safeHeight = getMeasuredHeight();
+
+ // CEA-608 assumes 4:3 video
+ if (safeWidth * 3 >= safeHeight * 4) {
+ safeWidth = safeHeight * 4 / 3;
+ } else {
+ safeHeight = safeWidth * 3 / 4;
+ }
+ safeWidth *= SAFE_AREA_RATIO;
+ safeHeight *= SAFE_AREA_RATIO;
+
+ int lineHeight = safeHeight / MAX_ROWS;
+ int lineHeightMeasureSpec = MeasureSpec.makeMeasureSpec(
+ lineHeight, MeasureSpec.EXACTLY);
+ int lineWidthMeasureSpec = MeasureSpec.makeMeasureSpec(
+ safeWidth, MeasureSpec.EXACTLY);
+
+ for (int i = 0; i < MAX_ROWS; i++) {
+ mLineBoxes[i].measure(lineWidthMeasureSpec, lineHeightMeasureSpec);
+ }
+ }
+
+ @Override
+ protected void onLayout(boolean changed, int l, int t, int r, int b) {
+ // safe caption area
+ int viewPortWidth = r - l;
+ int viewPortHeight = b - t;
+ int safeWidth, safeHeight;
+ // CEA-608 assumes 4:3 video
+ if (viewPortWidth * 3 >= viewPortHeight * 4) {
+ safeWidth = viewPortHeight * 4 / 3;
+ safeHeight = viewPortHeight;
+ } else {
+ safeWidth = viewPortWidth;
+ safeHeight = viewPortWidth * 3 / 4;
+ }
+ safeWidth *= SAFE_AREA_RATIO;
+ safeHeight *= SAFE_AREA_RATIO;
+ int left = (viewPortWidth - safeWidth) / 2;
+ int top = (viewPortHeight - safeHeight) / 2;
+
+ for (int i = 0; i < MAX_ROWS; i++) {
+ mLineBoxes[i].layout(
+ left,
+ top + safeHeight * i / MAX_ROWS,
+ left + safeWidth,
+ top + safeHeight * (i + 1) / MAX_ROWS);
+ }
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/subtitle/MediaTimeProvider.java b/packages/MediaComponents/src/com/android/media/subtitle/MediaTimeProvider.java
new file mode 100644
index 0000000..af36d7f
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/subtitle/MediaTimeProvider.java
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.subtitle;
+
+// Note: This is just copied from android.media.MediaTimeProvider.
+public interface MediaTimeProvider {
+ // we do not allow negative media time
+ /**
+ * Presentation time value if no timed event notification is requested.
+ */
+ public final static long NO_TIME = -1;
+
+ /**
+ * Cancels all previous notification request from this listener if any. It
+ * registers the listener to get seek and stop notifications. If timeUs is
+ * not negative, it also registers the listener for a timed event
+ * notification when the presentation time reaches (becomes greater) than
+ * the value specified. This happens immediately if the current media time
+ * is larger than or equal to timeUs.
+ *
+ * @param timeUs presentation time to get timed event callback at (or
+ * {@link #NO_TIME})
+ */
+ public void notifyAt(long timeUs, OnMediaTimeListener listener);
+
+ /**
+ * Cancels all previous notification request from this listener if any. It
+ * registers the listener to get seek and stop notifications. If the media
+ * is stopped, the listener will immediately receive a stop notification.
+ * Otherwise, it will receive a timed event notificaton.
+ */
+ public void scheduleUpdate(OnMediaTimeListener listener);
+
+ /**
+ * Cancels all previous notification request from this listener if any.
+ */
+ public void cancelNotifications(OnMediaTimeListener listener);
+
+ /**
+ * Get the current presentation time.
+ *
+ * @param precise Whether getting a precise time is important. This is
+ * more costly.
+ * @param monotonic Whether returned time should be monotonic: that is,
+ * greater than or equal to the last returned time. Don't
+ * always set this to true. E.g. this has undesired
+ * consequences if the media is seeked between calls.
+ * @throws IllegalStateException if the media is not initialized
+ */
+ public long getCurrentTimeUs(boolean precise, boolean monotonic)
+ throws IllegalStateException;
+
+ public static interface OnMediaTimeListener {
+ /**
+ * Called when the registered time was reached naturally.
+ *
+ * @param timeUs current media time
+ */
+ void onTimedEvent(long timeUs);
+
+ /**
+ * Called when the media time changed due to seeking.
+ *
+ * @param timeUs current media time
+ */
+ void onSeek(long timeUs);
+
+ /**
+ * Called when the playback stopped. This is not called on pause, only
+ * on full stop, at which point there is no further current media time.
+ */
+ void onStop();
+ }
+}
+
diff --git a/packages/MediaComponents/src/com/android/media/subtitle/SubtitleController.java b/packages/MediaComponents/src/com/android/media/subtitle/SubtitleController.java
new file mode 100644
index 0000000..a4d55d7
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/subtitle/SubtitleController.java
@@ -0,0 +1,508 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.subtitle;
+
+import java.util.Locale;
+import java.util.Vector;
+
+import android.content.Context;
+import android.media.MediaFormat;
+import android.media.MediaPlayer2;
+import android.media.MediaPlayer2.TrackInfo;
+import android.os.Handler;
+import android.os.Looper;
+import android.os.Message;
+import android.view.accessibility.CaptioningManager;
+
+import com.android.media.subtitle.SubtitleTrack.RenderingWidget;
+
+// Note: This is forked from android.media.SubtitleController since P
+/**
+ * The subtitle controller provides the architecture to display subtitles for a
+ * media source. It allows specifying which tracks to display, on which anchor
+ * to display them, and also allows adding external, out-of-band subtitle tracks.
+ */
+public class SubtitleController {
+ private MediaTimeProvider mTimeProvider;
+ private Vector<Renderer> mRenderers;
+ private Vector<SubtitleTrack> mTracks;
+ private SubtitleTrack mSelectedTrack;
+ private boolean mShowing;
+ private CaptioningManager mCaptioningManager;
+ private Handler mHandler;
+
+ private static final int WHAT_SHOW = 1;
+ private static final int WHAT_HIDE = 2;
+ private static final int WHAT_SELECT_TRACK = 3;
+ private static final int WHAT_SELECT_DEFAULT_TRACK = 4;
+
+ private final Handler.Callback mCallback = new Handler.Callback() {
+ @Override
+ public boolean handleMessage(Message msg) {
+ switch (msg.what) {
+ case WHAT_SHOW:
+ doShow();
+ return true;
+ case WHAT_HIDE:
+ doHide();
+ return true;
+ case WHAT_SELECT_TRACK:
+ doSelectTrack((SubtitleTrack)msg.obj);
+ return true;
+ case WHAT_SELECT_DEFAULT_TRACK:
+ doSelectDefaultTrack();
+ return true;
+ default:
+ return false;
+ }
+ }
+ };
+
+ private CaptioningManager.CaptioningChangeListener mCaptioningChangeListener =
+ new CaptioningManager.CaptioningChangeListener() {
+ @Override
+ public void onEnabledChanged(boolean enabled) {
+ selectDefaultTrack();
+ }
+
+ @Override
+ public void onLocaleChanged(Locale locale) {
+ selectDefaultTrack();
+ }
+ };
+
+ public SubtitleController(Context context) {
+ this(context, null, null);
+ }
+
+ /**
+ * Creates a subtitle controller for a media playback object that implements
+ * the MediaTimeProvider interface.
+ *
+ * @param timeProvider
+ */
+ public SubtitleController(
+ Context context,
+ MediaTimeProvider timeProvider,
+ Listener listener) {
+ mTimeProvider = timeProvider;
+ mListener = listener;
+
+ mRenderers = new Vector<Renderer>();
+ mShowing = false;
+ mTracks = new Vector<SubtitleTrack>();
+ mCaptioningManager =
+ (CaptioningManager)context.getSystemService(Context.CAPTIONING_SERVICE);
+ }
+
+ @Override
+ protected void finalize() throws Throwable {
+ mCaptioningManager.removeCaptioningChangeListener(
+ mCaptioningChangeListener);
+ super.finalize();
+ }
+
+ /**
+ * @return the available subtitle tracks for this media. These include
+ * the tracks found by {@link MediaPlayer} as well as any tracks added
+ * manually via {@link #addTrack}.
+ */
+ public SubtitleTrack[] getTracks() {
+ synchronized(mTracks) {
+ SubtitleTrack[] tracks = new SubtitleTrack[mTracks.size()];
+ mTracks.toArray(tracks);
+ return tracks;
+ }
+ }
+
+ /**
+ * @return the currently selected subtitle track
+ */
+ public SubtitleTrack getSelectedTrack() {
+ return mSelectedTrack;
+ }
+
+ private RenderingWidget getRenderingWidget() {
+ if (mSelectedTrack == null) {
+ return null;
+ }
+ return mSelectedTrack.getRenderingWidget();
+ }
+
+ /**
+ * Selects a subtitle track. As a result, this track will receive
+ * in-band data from the {@link MediaPlayer}. However, this does
+ * not change the subtitle visibility.
+ *
+ * Should be called from the anchor's (UI) thread. {@see #Anchor.getSubtitleLooper}
+ *
+ * @param track The subtitle track to select. This must be one of the
+ * tracks in {@link #getTracks}.
+ * @return true if the track was successfully selected.
+ */
+ public boolean selectTrack(SubtitleTrack track) {
+ if (track != null && !mTracks.contains(track)) {
+ return false;
+ }
+
+ processOnAnchor(mHandler.obtainMessage(WHAT_SELECT_TRACK, track));
+ return true;
+ }
+
+ private void doSelectTrack(SubtitleTrack track) {
+ mTrackIsExplicit = true;
+ if (mSelectedTrack == track) {
+ return;
+ }
+
+ if (mSelectedTrack != null) {
+ mSelectedTrack.hide();
+ mSelectedTrack.setTimeProvider(null);
+ }
+
+ mSelectedTrack = track;
+ if (mAnchor != null) {
+ mAnchor.setSubtitleWidget(getRenderingWidget());
+ }
+
+ if (mSelectedTrack != null) {
+ mSelectedTrack.setTimeProvider(mTimeProvider);
+ mSelectedTrack.show();
+ }
+
+ if (mListener != null) {
+ mListener.onSubtitleTrackSelected(track);
+ }
+ }
+
+ /**
+ * @return the default subtitle track based on system preferences, or null,
+ * if no such track exists in this manager.
+ *
+ * Supports HLS-flags: AUTOSELECT, FORCED & DEFAULT.
+ *
+ * 1. If captioning is disabled, only consider FORCED tracks. Otherwise,
+ * consider all tracks, but prefer non-FORCED ones.
+ * 2. If user selected "Default" caption language:
+ * a. If there is a considered track with DEFAULT=yes, returns that track
+ * (favor the first one in the current language if there are more than
+ * one default tracks, or the first in general if none of them are in
+ * the current language).
+ * b. Otherwise, if there is a track with AUTOSELECT=yes in the current
+ * language, return that one.
+ * c. If there are no default tracks, and no autoselectable tracks in the
+ * current language, return null.
+ * 3. If there is a track with the caption language, select that one. Prefer
+ * the one with AUTOSELECT=no.
+ *
+ * The default values for these flags are DEFAULT=no, AUTOSELECT=yes
+ * and FORCED=no.
+ */
+ public SubtitleTrack getDefaultTrack() {
+ SubtitleTrack bestTrack = null;
+ int bestScore = -1;
+
+ Locale selectedLocale = mCaptioningManager.getLocale();
+ Locale locale = selectedLocale;
+ if (locale == null) {
+ locale = Locale.getDefault();
+ }
+ boolean selectForced = !mCaptioningManager.isEnabled();
+
+ synchronized(mTracks) {
+ for (SubtitleTrack track: mTracks) {
+ MediaFormat format = track.getFormat();
+ String language = format.getString(MediaFormat.KEY_LANGUAGE);
+ boolean forced =
+ format.getInteger(MediaFormat.KEY_IS_FORCED_SUBTITLE, 0) != 0;
+ boolean autoselect =
+ format.getInteger(MediaFormat.KEY_IS_AUTOSELECT, 1) != 0;
+ boolean is_default =
+ format.getInteger(MediaFormat.KEY_IS_DEFAULT, 0) != 0;
+
+ boolean languageMatches =
+ (locale == null ||
+ locale.getLanguage().equals("") ||
+ locale.getISO3Language().equals(language) ||
+ locale.getLanguage().equals(language));
+ // is_default is meaningless unless caption language is 'default'
+ int score = (forced ? 0 : 8) +
+ (((selectedLocale == null) && is_default) ? 4 : 0) +
+ (autoselect ? 0 : 2) + (languageMatches ? 1 : 0);
+
+ if (selectForced && !forced) {
+ continue;
+ }
+
+ // we treat null locale/language as matching any language
+ if ((selectedLocale == null && is_default) ||
+ (languageMatches &&
+ (autoselect || forced || selectedLocale != null))) {
+ if (score > bestScore) {
+ bestScore = score;
+ bestTrack = track;
+ }
+ }
+ }
+ }
+ return bestTrack;
+ }
+
+ private boolean mTrackIsExplicit = false;
+ private boolean mVisibilityIsExplicit = false;
+
+ /** should be called from anchor thread */
+ public void selectDefaultTrack() {
+ processOnAnchor(mHandler.obtainMessage(WHAT_SELECT_DEFAULT_TRACK));
+ }
+
+ private void doSelectDefaultTrack() {
+ if (mTrackIsExplicit) {
+ // If track selection is explicit, but visibility
+ // is not, it falls back to the captioning setting
+ if (!mVisibilityIsExplicit) {
+ if (mCaptioningManager.isEnabled() ||
+ (mSelectedTrack != null &&
+ mSelectedTrack.getFormat().getInteger(
+ MediaFormat.KEY_IS_FORCED_SUBTITLE, 0) != 0)) {
+ show();
+ } else if (mSelectedTrack != null
+ && mSelectedTrack.getTrackType() == TrackInfo.MEDIA_TRACK_TYPE_SUBTITLE) {
+ hide();
+ }
+ mVisibilityIsExplicit = false;
+ }
+ return;
+ }
+
+ // We can have a default (forced) track even if captioning
+ // is not enabled. This is handled by getDefaultTrack().
+ // Show this track unless subtitles were explicitly hidden.
+ SubtitleTrack track = getDefaultTrack();
+ if (track != null) {
+ selectTrack(track);
+ mTrackIsExplicit = false;
+ if (!mVisibilityIsExplicit) {
+ show();
+ mVisibilityIsExplicit = false;
+ }
+ }
+ }
+
+ /** must be called from anchor thread */
+ public void reset() {
+ checkAnchorLooper();
+ hide();
+ selectTrack(null);
+ mTracks.clear();
+ mTrackIsExplicit = false;
+ mVisibilityIsExplicit = false;
+ mCaptioningManager.removeCaptioningChangeListener(
+ mCaptioningChangeListener);
+ }
+
+ /**
+ * Adds a new, external subtitle track to the manager.
+ *
+ * @param format the format of the track that will include at least
+ * the MIME type {@link MediaFormat@KEY_MIME}.
+ * @return the created {@link SubtitleTrack} object
+ */
+ public SubtitleTrack addTrack(MediaFormat format) {
+ synchronized(mRenderers) {
+ for (Renderer renderer: mRenderers) {
+ if (renderer.supports(format)) {
+ SubtitleTrack track = renderer.createTrack(format);
+ if (track != null) {
+ synchronized(mTracks) {
+ if (mTracks.size() == 0) {
+ mCaptioningManager.addCaptioningChangeListener(
+ mCaptioningChangeListener);
+ }
+ mTracks.add(track);
+ }
+ return track;
+ }
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Show the selected (or default) subtitle track.
+ *
+ * Should be called from the anchor's (UI) thread. {@see #Anchor.getSubtitleLooper}
+ */
+ public void show() {
+ processOnAnchor(mHandler.obtainMessage(WHAT_SHOW));
+ }
+
+ private void doShow() {
+ mShowing = true;
+ mVisibilityIsExplicit = true;
+ if (mSelectedTrack != null) {
+ mSelectedTrack.show();
+ }
+ }
+
+ /**
+ * Hide the selected (or default) subtitle track.
+ *
+ * Should be called from the anchor's (UI) thread. {@see #Anchor.getSubtitleLooper}
+ */
+ public void hide() {
+ processOnAnchor(mHandler.obtainMessage(WHAT_HIDE));
+ }
+
+ private void doHide() {
+ mVisibilityIsExplicit = true;
+ if (mSelectedTrack != null) {
+ mSelectedTrack.hide();
+ }
+ mShowing = false;
+ }
+
+ /**
+ * Interface for supporting a single or multiple subtitle types in {@link
+ * MediaPlayer}.
+ */
+ public abstract static class Renderer {
+ /**
+ * Called by {@link MediaPlayer}'s {@link SubtitleController} when a new
+ * subtitle track is detected, to see if it should use this object to
+ * parse and display this subtitle track.
+ *
+ * @param format the format of the track that will include at least
+ * the MIME type {@link MediaFormat@KEY_MIME}.
+ *
+ * @return true if and only if the track format is supported by this
+ * renderer
+ */
+ public abstract boolean supports(MediaFormat format);
+
+ /**
+ * Called by {@link MediaPlayer}'s {@link SubtitleController} for each
+ * subtitle track that was detected and is supported by this object to
+ * create a {@link SubtitleTrack} object. This object will be created
+ * for each track that was found. If the track is selected for display,
+ * this object will be used to parse and display the track data.
+ *
+ * @param format the format of the track that will include at least
+ * the MIME type {@link MediaFormat@KEY_MIME}.
+ * @return a {@link SubtitleTrack} object that will be used to parse
+ * and render the subtitle track.
+ */
+ public abstract SubtitleTrack createTrack(MediaFormat format);
+ }
+
+ /**
+ * Add support for a subtitle format in {@link MediaPlayer}.
+ *
+ * @param renderer a {@link SubtitleController.Renderer} object that adds
+ * support for a subtitle format.
+ */
+ public void registerRenderer(Renderer renderer) {
+ synchronized(mRenderers) {
+ // TODO how to get available renderers in the system
+ if (!mRenderers.contains(renderer)) {
+ // TODO should added renderers override existing ones (to allow replacing?)
+ mRenderers.add(renderer);
+ }
+ }
+ }
+
+ public boolean hasRendererFor(MediaFormat format) {
+ synchronized(mRenderers) {
+ // TODO how to get available renderers in the system
+ for (Renderer renderer: mRenderers) {
+ if (renderer.supports(format)) {
+ return true;
+ }
+ }
+ return false;
+ }
+ }
+
+ /**
+ * Subtitle anchor, an object that is able to display a subtitle renderer,
+ * e.g. a VideoView.
+ */
+ public interface Anchor {
+ /**
+ * Anchor should use the supplied subtitle rendering widget, or
+ * none if it is null.
+ */
+ public void setSubtitleWidget(RenderingWidget subtitleWidget);
+
+ /**
+ * Anchors provide the looper on which all track visibility changes
+ * (track.show/hide, setSubtitleWidget) will take place.
+ */
+ public Looper getSubtitleLooper();
+ }
+
+ private Anchor mAnchor;
+
+ /**
+ * called from anchor's looper (if any, both when unsetting and
+ * setting)
+ */
+ public void setAnchor(Anchor anchor) {
+ if (mAnchor == anchor) {
+ return;
+ }
+
+ if (mAnchor != null) {
+ checkAnchorLooper();
+ mAnchor.setSubtitleWidget(null);
+ }
+ mAnchor = anchor;
+ mHandler = null;
+ if (mAnchor != null) {
+ mHandler = new Handler(mAnchor.getSubtitleLooper(), mCallback);
+ checkAnchorLooper();
+ mAnchor.setSubtitleWidget(getRenderingWidget());
+ }
+ }
+
+ private void checkAnchorLooper() {
+ assert mHandler != null : "Should have a looper already";
+ assert Looper.myLooper() == mHandler.getLooper()
+ : "Must be called from the anchor's looper";
+ }
+
+ private void processOnAnchor(Message m) {
+ assert mHandler != null : "Should have a looper already";
+ if (Looper.myLooper() == mHandler.getLooper()) {
+ mHandler.dispatchMessage(m);
+ } else {
+ mHandler.sendMessage(m);
+ }
+ }
+
+ public interface Listener {
+ /**
+ * Called when a subtitle track has been selected.
+ *
+ * @param track selected subtitle track or null
+ */
+ public void onSubtitleTrackSelected(SubtitleTrack track);
+ }
+
+ private Listener mListener;
+}
diff --git a/packages/MediaComponents/src/com/android/media/subtitle/SubtitleTrack.java b/packages/MediaComponents/src/com/android/media/subtitle/SubtitleTrack.java
new file mode 100644
index 0000000..6b9064a
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/subtitle/SubtitleTrack.java
@@ -0,0 +1,696 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.subtitle;
+
+import android.graphics.Canvas;
+import android.media.MediaFormat;
+import android.media.MediaPlayer2.TrackInfo;
+import android.media.SubtitleData;
+import android.os.Handler;
+import android.util.Log;
+import android.util.LongSparseArray;
+import android.util.Pair;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.Vector;
+
+// Note: This is forked from android.media.SubtitleTrack since P
+/**
+ * A subtitle track abstract base class that is responsible for parsing and displaying
+ * an instance of a particular type of subtitle.
+ */
+public abstract class SubtitleTrack implements MediaTimeProvider.OnMediaTimeListener {
+ private static final String TAG = "SubtitleTrack";
+ private long mLastUpdateTimeMs;
+ private long mLastTimeMs;
+
+ private Runnable mRunnable;
+
+ final private LongSparseArray<Run> mRunsByEndTime = new LongSparseArray<Run>();
+ final private LongSparseArray<Run> mRunsByID = new LongSparseArray<Run>();
+
+ private CueList mCues;
+ final private Vector<Cue> mActiveCues = new Vector<Cue>();
+ protected boolean mVisible;
+
+ public boolean DEBUG = false;
+
+ protected Handler mHandler = new Handler();
+
+ private MediaFormat mFormat;
+
+ public SubtitleTrack(MediaFormat format) {
+ mFormat = format;
+ mCues = new CueList();
+ clearActiveCues();
+ mLastTimeMs = -1;
+ }
+
+ public final MediaFormat getFormat() {
+ return mFormat;
+ }
+
+ private long mNextScheduledTimeMs = -1;
+
+ public void onData(SubtitleData data) {
+ long runID = data.getStartTimeUs() + 1;
+ onData(data.getData(), true /* eos */, runID);
+ setRunDiscardTimeMs(
+ runID,
+ (data.getStartTimeUs() + data.getDurationUs()) / 1000);
+ }
+
+ /**
+ * Called when there is input data for the subtitle track. The
+ * complete subtitle for a track can include multiple whole units
+ * (runs). Each of these units can have multiple sections. The
+ * contents of a run are submitted in sequential order, with eos
+ * indicating the last section of the run. Calls from different
+ * runs must not be intermixed.
+ *
+ * @param data subtitle data byte buffer
+ * @param eos true if this is the last section of the run.
+ * @param runID mostly-unique ID for this run of data. Subtitle cues
+ * with runID of 0 are discarded immediately after
+ * display. Cues with runID of ~0 are discarded
+ * only at the deletion of the track object. Cues
+ * with other runID-s are discarded at the end of the
+ * run, which defaults to the latest timestamp of
+ * any of its cues (with this runID).
+ */
+ protected abstract void onData(byte[] data, boolean eos, long runID);
+
+ /**
+ * Called when adding the subtitle rendering widget to the view hierarchy,
+ * as well as when showing or hiding the subtitle track, or when the video
+ * surface position has changed.
+ *
+ * @return the widget that renders this subtitle track. For most renderers
+ * there should be a single shared instance that is used for all
+ * tracks supported by that renderer, as at most one subtitle track
+ * is visible at one time.
+ */
+ public abstract RenderingWidget getRenderingWidget();
+
+ /**
+ * Called when the active cues have changed, and the contents of the subtitle
+ * view should be updated.
+ */
+ public abstract void updateView(Vector<Cue> activeCues);
+
+ protected synchronized void updateActiveCues(boolean rebuild, long timeMs) {
+ // out-of-order times mean seeking or new active cues being added
+ // (during their own timespan)
+ if (rebuild || mLastUpdateTimeMs > timeMs) {
+ clearActiveCues();
+ }
+
+ for(Iterator<Pair<Long, Cue> > it =
+ mCues.entriesBetween(mLastUpdateTimeMs, timeMs).iterator(); it.hasNext(); ) {
+ Pair<Long, Cue> event = it.next();
+ Cue cue = event.second;
+
+ if (cue.mEndTimeMs == event.first) {
+ // remove past cues
+ if (DEBUG) Log.v(TAG, "Removing " + cue);
+ mActiveCues.remove(cue);
+ if (cue.mRunID == 0) {
+ it.remove();
+ }
+ } else if (cue.mStartTimeMs == event.first) {
+ // add new cues
+ // TRICKY: this will happen in start order
+ if (DEBUG) Log.v(TAG, "Adding " + cue);
+ if (cue.mInnerTimesMs != null) {
+ cue.onTime(timeMs);
+ }
+ mActiveCues.add(cue);
+ } else if (cue.mInnerTimesMs != null) {
+ // cue is modified
+ cue.onTime(timeMs);
+ }
+ }
+
+ /* complete any runs */
+ while (mRunsByEndTime.size() > 0 &&
+ mRunsByEndTime.keyAt(0) <= timeMs) {
+ removeRunsByEndTimeIndex(0); // removes element
+ }
+ mLastUpdateTimeMs = timeMs;
+ }
+
+ private void removeRunsByEndTimeIndex(int ix) {
+ Run run = mRunsByEndTime.valueAt(ix);
+ while (run != null) {
+ Cue cue = run.mFirstCue;
+ while (cue != null) {
+ mCues.remove(cue);
+ Cue nextCue = cue.mNextInRun;
+ cue.mNextInRun = null;
+ cue = nextCue;
+ }
+ mRunsByID.remove(run.mRunID);
+ Run nextRun = run.mNextRunAtEndTimeMs;
+ run.mPrevRunAtEndTimeMs = null;
+ run.mNextRunAtEndTimeMs = null;
+ run = nextRun;
+ }
+ mRunsByEndTime.removeAt(ix);
+ }
+
+ @Override
+ protected void finalize() throws Throwable {
+ /* remove all cues (untangle all cross-links) */
+ int size = mRunsByEndTime.size();
+ for(int ix = size - 1; ix >= 0; ix--) {
+ removeRunsByEndTimeIndex(ix);
+ }
+
+ super.finalize();
+ }
+
+ private synchronized void takeTime(long timeMs) {
+ mLastTimeMs = timeMs;
+ }
+
+ protected synchronized void clearActiveCues() {
+ if (DEBUG) Log.v(TAG, "Clearing " + mActiveCues.size() + " active cues");
+ mActiveCues.clear();
+ mLastUpdateTimeMs = -1;
+ }
+
+ protected void scheduleTimedEvents() {
+ /* get times for the next event */
+ if (mTimeProvider != null) {
+ mNextScheduledTimeMs = mCues.nextTimeAfter(mLastTimeMs);
+ if (DEBUG) Log.d(TAG, "sched @" + mNextScheduledTimeMs + " after " + mLastTimeMs);
+ mTimeProvider.notifyAt(
+ mNextScheduledTimeMs >= 0 ?
+ (mNextScheduledTimeMs * 1000) : MediaTimeProvider.NO_TIME,
+ this);
+ }
+ }
+
+ @Override
+ public void onTimedEvent(long timeUs) {
+ if (DEBUG) Log.d(TAG, "onTimedEvent " + timeUs);
+ synchronized (this) {
+ long timeMs = timeUs / 1000;
+ updateActiveCues(false, timeMs);
+ takeTime(timeMs);
+ }
+ updateView(mActiveCues);
+ scheduleTimedEvents();
+ }
+
+ @Override
+ public void onSeek(long timeUs) {
+ if (DEBUG) Log.d(TAG, "onSeek " + timeUs);
+ synchronized (this) {
+ long timeMs = timeUs / 1000;
+ updateActiveCues(true, timeMs);
+ takeTime(timeMs);
+ }
+ updateView(mActiveCues);
+ scheduleTimedEvents();
+ }
+
+ @Override
+ public void onStop() {
+ synchronized (this) {
+ if (DEBUG) Log.d(TAG, "onStop");
+ clearActiveCues();
+ mLastTimeMs = -1;
+ }
+ updateView(mActiveCues);
+ mNextScheduledTimeMs = -1;
+ mTimeProvider.notifyAt(MediaTimeProvider.NO_TIME, this);
+ }
+
+ protected MediaTimeProvider mTimeProvider;
+
+ public void show() {
+ if (mVisible) {
+ return;
+ }
+
+ mVisible = true;
+ RenderingWidget renderingWidget = getRenderingWidget();
+ if (renderingWidget != null) {
+ renderingWidget.setVisible(true);
+ }
+ if (mTimeProvider != null) {
+ mTimeProvider.scheduleUpdate(this);
+ }
+ }
+
+ public void hide() {
+ if (!mVisible) {
+ return;
+ }
+
+ if (mTimeProvider != null) {
+ mTimeProvider.cancelNotifications(this);
+ }
+ RenderingWidget renderingWidget = getRenderingWidget();
+ if (renderingWidget != null) {
+ renderingWidget.setVisible(false);
+ }
+ mVisible = false;
+ }
+
+ protected synchronized boolean addCue(Cue cue) {
+ mCues.add(cue);
+
+ if (cue.mRunID != 0) {
+ Run run = mRunsByID.get(cue.mRunID);
+ if (run == null) {
+ run = new Run();
+ mRunsByID.put(cue.mRunID, run);
+ run.mEndTimeMs = cue.mEndTimeMs;
+ } else if (run.mEndTimeMs < cue.mEndTimeMs) {
+ run.mEndTimeMs = cue.mEndTimeMs;
+ }
+
+ // link-up cues in the same run
+ cue.mNextInRun = run.mFirstCue;
+ run.mFirstCue = cue;
+ }
+
+ // if a cue is added that should be visible, need to refresh view
+ long nowMs = -1;
+ if (mTimeProvider != null) {
+ try {
+ nowMs = mTimeProvider.getCurrentTimeUs(
+ false /* precise */, true /* monotonic */) / 1000;
+ } catch (IllegalStateException e) {
+ // handle as it we are not playing
+ }
+ }
+
+ if (DEBUG) Log.v(TAG, "mVisible=" + mVisible + ", " +
+ cue.mStartTimeMs + " <= " + nowMs + ", " +
+ cue.mEndTimeMs + " >= " + mLastTimeMs);
+
+ if (mVisible &&
+ cue.mStartTimeMs <= nowMs &&
+ // we don't trust nowMs, so check any cue since last callback
+ cue.mEndTimeMs >= mLastTimeMs) {
+ if (mRunnable != null) {
+ mHandler.removeCallbacks(mRunnable);
+ }
+ final SubtitleTrack track = this;
+ final long thenMs = nowMs;
+ mRunnable = new Runnable() {
+ @Override
+ public void run() {
+ // even with synchronized, it is possible that we are going
+ // to do multiple updates as the runnable could be already
+ // running.
+ synchronized (track) {
+ mRunnable = null;
+ updateActiveCues(true, thenMs);
+ updateView(mActiveCues);
+ }
+ }
+ };
+ // delay update so we don't update view on every cue. TODO why 10?
+ if (mHandler.postDelayed(mRunnable, 10 /* delay */)) {
+ if (DEBUG) Log.v(TAG, "scheduling update");
+ } else {
+ if (DEBUG) Log.w(TAG, "failed to schedule subtitle view update");
+ }
+ return true;
+ }
+
+ if (mVisible &&
+ cue.mEndTimeMs >= mLastTimeMs &&
+ (cue.mStartTimeMs < mNextScheduledTimeMs ||
+ mNextScheduledTimeMs < 0)) {
+ scheduleTimedEvents();
+ }
+
+ return false;
+ }
+
+ public synchronized void setTimeProvider(MediaTimeProvider timeProvider) {
+ if (mTimeProvider == timeProvider) {
+ return;
+ }
+ if (mTimeProvider != null) {
+ mTimeProvider.cancelNotifications(this);
+ }
+ mTimeProvider = timeProvider;
+ if (mTimeProvider != null) {
+ mTimeProvider.scheduleUpdate(this);
+ }
+ }
+
+
+ static class CueList {
+ private static final String TAG = "CueList";
+ // simplistic, inefficient implementation
+ private SortedMap<Long, Vector<Cue> > mCues;
+ public boolean DEBUG = false;
+
+ private boolean addEvent(Cue cue, long timeMs) {
+ Vector<Cue> cues = mCues.get(timeMs);
+ if (cues == null) {
+ cues = new Vector<Cue>(2);
+ mCues.put(timeMs, cues);
+ } else if (cues.contains(cue)) {
+ // do not duplicate cues
+ return false;
+ }
+
+ cues.add(cue);
+ return true;
+ }
+
+ private void removeEvent(Cue cue, long timeMs) {
+ Vector<Cue> cues = mCues.get(timeMs);
+ if (cues != null) {
+ cues.remove(cue);
+ if (cues.size() == 0) {
+ mCues.remove(timeMs);
+ }
+ }
+ }
+
+ public void add(Cue cue) {
+ // ignore non-positive-duration cues
+ if (cue.mStartTimeMs >= cue.mEndTimeMs)
+ return;
+
+ if (!addEvent(cue, cue.mStartTimeMs)) {
+ return;
+ }
+
+ long lastTimeMs = cue.mStartTimeMs;
+ if (cue.mInnerTimesMs != null) {
+ for (long timeMs: cue.mInnerTimesMs) {
+ if (timeMs > lastTimeMs && timeMs < cue.mEndTimeMs) {
+ addEvent(cue, timeMs);
+ lastTimeMs = timeMs;
+ }
+ }
+ }
+
+ addEvent(cue, cue.mEndTimeMs);
+ }
+
+ public void remove(Cue cue) {
+ removeEvent(cue, cue.mStartTimeMs);
+ if (cue.mInnerTimesMs != null) {
+ for (long timeMs: cue.mInnerTimesMs) {
+ removeEvent(cue, timeMs);
+ }
+ }
+ removeEvent(cue, cue.mEndTimeMs);
+ }
+
+ public Iterable<Pair<Long, Cue>> entriesBetween(
+ final long lastTimeMs, final long timeMs) {
+ return new Iterable<Pair<Long, Cue> >() {
+ @Override
+ public Iterator<Pair<Long, Cue> > iterator() {
+ if (DEBUG) Log.d(TAG, "slice (" + lastTimeMs + ", " + timeMs + "]=");
+ try {
+ return new EntryIterator(
+ mCues.subMap(lastTimeMs + 1, timeMs + 1));
+ } catch(IllegalArgumentException e) {
+ return new EntryIterator(null);
+ }
+ }
+ };
+ }
+
+ public long nextTimeAfter(long timeMs) {
+ SortedMap<Long, Vector<Cue>> tail = null;
+ try {
+ tail = mCues.tailMap(timeMs + 1);
+ if (tail != null) {
+ return tail.firstKey();
+ } else {
+ return -1;
+ }
+ } catch(IllegalArgumentException e) {
+ return -1;
+ } catch(NoSuchElementException e) {
+ return -1;
+ }
+ }
+
+ class EntryIterator implements Iterator<Pair<Long, Cue> > {
+ @Override
+ public boolean hasNext() {
+ return !mDone;
+ }
+
+ @Override
+ public Pair<Long, Cue> next() {
+ if (mDone) {
+ throw new NoSuchElementException("");
+ }
+ mLastEntry = new Pair<Long, Cue>(
+ mCurrentTimeMs, mListIterator.next());
+ mLastListIterator = mListIterator;
+ if (!mListIterator.hasNext()) {
+ nextKey();
+ }
+ return mLastEntry;
+ }
+
+ @Override
+ public void remove() {
+ // only allow removing end tags
+ if (mLastListIterator == null ||
+ mLastEntry.second.mEndTimeMs != mLastEntry.first) {
+ throw new IllegalStateException("");
+ }
+
+ // remove end-cue
+ mLastListIterator.remove();
+ mLastListIterator = null;
+ if (mCues.get(mLastEntry.first).size() == 0) {
+ mCues.remove(mLastEntry.first);
+ }
+
+ // remove rest of the cues
+ Cue cue = mLastEntry.second;
+ removeEvent(cue, cue.mStartTimeMs);
+ if (cue.mInnerTimesMs != null) {
+ for (long timeMs: cue.mInnerTimesMs) {
+ removeEvent(cue, timeMs);
+ }
+ }
+ }
+
+ public EntryIterator(SortedMap<Long, Vector<Cue> > cues) {
+ if (DEBUG) Log.v(TAG, cues + "");
+ mRemainingCues = cues;
+ mLastListIterator = null;
+ nextKey();
+ }
+
+ private void nextKey() {
+ do {
+ try {
+ if (mRemainingCues == null) {
+ throw new NoSuchElementException("");
+ }
+ mCurrentTimeMs = mRemainingCues.firstKey();
+ mListIterator =
+ mRemainingCues.get(mCurrentTimeMs).iterator();
+ try {
+ mRemainingCues =
+ mRemainingCues.tailMap(mCurrentTimeMs + 1);
+ } catch (IllegalArgumentException e) {
+ mRemainingCues = null;
+ }
+ mDone = false;
+ } catch (NoSuchElementException e) {
+ mDone = true;
+ mRemainingCues = null;
+ mListIterator = null;
+ return;
+ }
+ } while (!mListIterator.hasNext());
+ }
+
+ private long mCurrentTimeMs;
+ private Iterator<Cue> mListIterator;
+ private boolean mDone;
+ private SortedMap<Long, Vector<Cue> > mRemainingCues;
+ private Iterator<Cue> mLastListIterator;
+ private Pair<Long,Cue> mLastEntry;
+ }
+
+ CueList() {
+ mCues = new TreeMap<Long, Vector<Cue>>();
+ }
+ }
+
+ public static class Cue {
+ public long mStartTimeMs;
+ public long mEndTimeMs;
+ public long[] mInnerTimesMs;
+ public long mRunID;
+
+ public Cue mNextInRun;
+
+ public void onTime(long timeMs) { }
+ }
+
+ /** update mRunsByEndTime (with default end time) */
+ protected void finishedRun(long runID) {
+ if (runID != 0 && runID != ~0) {
+ Run run = mRunsByID.get(runID);
+ if (run != null) {
+ run.storeByEndTimeMs(mRunsByEndTime);
+ }
+ }
+ }
+
+ /** update mRunsByEndTime with given end time */
+ public void setRunDiscardTimeMs(long runID, long timeMs) {
+ if (runID != 0 && runID != ~0) {
+ Run run = mRunsByID.get(runID);
+ if (run != null) {
+ run.mEndTimeMs = timeMs;
+ run.storeByEndTimeMs(mRunsByEndTime);
+ }
+ }
+ }
+
+ /** whether this is a text track who fires events instead getting rendered */
+ public int getTrackType() {
+ return getRenderingWidget() == null
+ ? TrackInfo.MEDIA_TRACK_TYPE_TIMEDTEXT
+ : TrackInfo.MEDIA_TRACK_TYPE_SUBTITLE;
+ }
+
+
+ private static class Run {
+ public Cue mFirstCue;
+ public Run mNextRunAtEndTimeMs;
+ public Run mPrevRunAtEndTimeMs;
+ public long mEndTimeMs = -1;
+ public long mRunID = 0;
+ private long mStoredEndTimeMs = -1;
+
+ public void storeByEndTimeMs(LongSparseArray<Run> runsByEndTime) {
+ // remove old value if any
+ int ix = runsByEndTime.indexOfKey(mStoredEndTimeMs);
+ if (ix >= 0) {
+ if (mPrevRunAtEndTimeMs == null) {
+ assert(this == runsByEndTime.valueAt(ix));
+ if (mNextRunAtEndTimeMs == null) {
+ runsByEndTime.removeAt(ix);
+ } else {
+ runsByEndTime.setValueAt(ix, mNextRunAtEndTimeMs);
+ }
+ }
+ removeAtEndTimeMs();
+ }
+
+ // add new value
+ if (mEndTimeMs >= 0) {
+ mPrevRunAtEndTimeMs = null;
+ mNextRunAtEndTimeMs = runsByEndTime.get(mEndTimeMs);
+ if (mNextRunAtEndTimeMs != null) {
+ mNextRunAtEndTimeMs.mPrevRunAtEndTimeMs = this;
+ }
+ runsByEndTime.put(mEndTimeMs, this);
+ mStoredEndTimeMs = mEndTimeMs;
+ }
+ }
+
+ public void removeAtEndTimeMs() {
+ Run prev = mPrevRunAtEndTimeMs;
+
+ if (mPrevRunAtEndTimeMs != null) {
+ mPrevRunAtEndTimeMs.mNextRunAtEndTimeMs = mNextRunAtEndTimeMs;
+ mPrevRunAtEndTimeMs = null;
+ }
+ if (mNextRunAtEndTimeMs != null) {
+ mNextRunAtEndTimeMs.mPrevRunAtEndTimeMs = prev;
+ mNextRunAtEndTimeMs = null;
+ }
+ }
+ }
+
+ /**
+ * Interface for rendering subtitles onto a Canvas.
+ */
+ public interface RenderingWidget {
+ /**
+ * Sets the widget's callback, which is used to send updates when the
+ * rendered data has changed.
+ *
+ * @param callback update callback
+ */
+ public void setOnChangedListener(OnChangedListener callback);
+
+ /**
+ * Sets the widget's size.
+ *
+ * @param width width in pixels
+ * @param height height in pixels
+ */
+ public void setSize(int width, int height);
+
+ /**
+ * Sets whether the widget should draw subtitles.
+ *
+ * @param visible true if subtitles should be drawn, false otherwise
+ */
+ public void setVisible(boolean visible);
+
+ /**
+ * Renders subtitles onto a {@link Canvas}.
+ *
+ * @param c canvas on which to render subtitles
+ */
+ public void draw(Canvas c);
+
+ /**
+ * Called when the widget is attached to a window.
+ */
+ public void onAttachedToWindow();
+
+ /**
+ * Called when the widget is detached from a window.
+ */
+ public void onDetachedFromWindow();
+
+ /**
+ * Callback used to send updates about changes to rendering data.
+ */
+ public interface OnChangedListener {
+ /**
+ * Called when the rendering data has changed.
+ *
+ * @param renderingWidget the widget whose data has changed
+ */
+ public void onChanged(RenderingWidget renderingWidget);
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/update/ApiFactory.java b/packages/MediaComponents/src/com/android/media/update/ApiFactory.java
new file mode 100644
index 0000000..d7be549
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/update/ApiFactory.java
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.update;
+
+import android.app.Notification;
+import android.content.Context;
+import android.content.pm.ApplicationInfo;
+import android.media.MediaBrowser2;
+import android.media.MediaBrowser2.BrowserCallback;
+import android.media.MediaController2;
+import android.media.MediaController2.ControllerCallback;
+import android.media.MediaItem2;
+import android.media.MediaLibraryService2;
+import android.media.MediaLibraryService2.LibraryRoot;
+import android.media.MediaLibraryService2.MediaLibrarySession;
+import android.media.MediaLibraryService2.MediaLibrarySession.MediaLibrarySessionCallback;
+import android.media.MediaMetadata2;
+import android.media.MediaPlaylistAgent;
+import android.media.MediaSession2;
+import android.media.SessionCommand2;
+import android.media.SessionCommandGroup2;
+import android.media.MediaSession2.ControllerInfo;
+import android.media.MediaSession2.SessionCallback;
+import android.media.MediaSessionService2;
+import android.media.MediaSessionService2.MediaNotification;
+import android.media.Rating2;
+import android.media.SessionToken2;
+import android.media.VolumeProvider2;
+import android.media.update.MediaBrowser2Provider;
+import android.media.update.MediaControlView2Provider;
+import android.media.update.MediaController2Provider;
+import android.media.update.MediaItem2Provider;
+import android.media.update.MediaLibraryService2Provider.LibraryRootProvider;
+import android.media.update.MediaMetadata2Provider;
+import android.media.update.MediaPlaylistAgentProvider;
+import android.media.update.MediaSession2Provider;
+import android.media.update.MediaSession2Provider.BuilderBaseProvider;
+import android.media.update.MediaSession2Provider.CommandButtonProvider.BuilderProvider;
+import android.media.update.MediaSessionService2Provider;
+import android.media.update.MediaSessionService2Provider.MediaNotificationProvider;
+import android.media.update.SessionToken2Provider;
+import android.media.update.StaticProvider;
+import android.media.update.VideoView2Provider;
+import android.media.update.ViewGroupProvider;
+import android.media.update.VolumeProvider2Provider;
+import android.os.Bundle;
+import android.os.IInterface;
+import android.support.annotation.Nullable;
+import android.util.AttributeSet;
+import android.widget.MediaControlView2;
+import android.widget.VideoView2;
+
+import com.android.media.IMediaController2;
+import com.android.media.MediaBrowser2Impl;
+import com.android.media.MediaController2Impl;
+import com.android.media.MediaItem2Impl;
+import com.android.media.MediaLibraryService2Impl;
+import com.android.media.MediaLibraryService2Impl.LibraryRootImpl;
+import com.android.media.MediaMetadata2Impl;
+import com.android.media.MediaPlaylistAgentImpl;
+import com.android.media.MediaSession2Impl;
+import com.android.media.MediaSessionService2Impl;
+import com.android.media.Rating2Impl;
+import com.android.media.SessionToken2Impl;
+import com.android.media.VolumeProvider2Impl;
+import com.android.widget.MediaControlView2Impl;
+import com.android.widget.VideoView2Impl;
+
+import java.util.concurrent.Executor;
+
+public final class ApiFactory implements StaticProvider {
+ private ApiFactory() { }
+
+ public static StaticProvider initialize(ApplicationInfo updatableInfo) {
+ ApiHelper.initialize(updatableInfo);
+ return new ApiFactory();
+ }
+
+ @Override
+ public MediaController2Provider createMediaController2(
+ Context context, MediaController2 instance, SessionToken2 token,
+ Executor executor, ControllerCallback callback) {
+ return new MediaController2Impl(context, instance, token, executor, callback);
+ }
+
+ @Override
+ public MediaBrowser2Provider createMediaBrowser2(Context context, MediaBrowser2 instance,
+ SessionToken2 token, Executor executor, BrowserCallback callback) {
+ return new MediaBrowser2Impl(context, instance, token, executor, callback);
+ }
+
+ @Override
+ public MediaSession2Provider.CommandProvider createMediaSession2Command(
+ SessionCommand2 instance, int commandCode, String action, Bundle extra) {
+ if (action == null && extra == null) {
+ return new MediaSession2Impl.CommandImpl(instance, commandCode);
+ }
+ return new MediaSession2Impl.CommandImpl(instance, action, extra);
+ }
+
+ @Override
+ public SessionCommand2 fromBundle_MediaSession2Command(Bundle command) {
+ return MediaSession2Impl.CommandImpl.fromBundle_impl(command);
+ }
+
+ @Override
+ public MediaSession2Provider.CommandGroupProvider createMediaSession2CommandGroup(
+ SessionCommandGroup2 instance, SessionCommandGroup2 other) {
+ return new MediaSession2Impl.CommandGroupImpl(instance,
+ (other == null) ? null : other.getProvider());
+ }
+
+ @Override
+ public SessionCommandGroup2 fromBundle_MediaSession2CommandGroup(Bundle commands) {
+ return MediaSession2Impl.CommandGroupImpl.fromBundle_impl(commands);
+ }
+
+ @Override
+ public MediaSession2Provider.ControllerInfoProvider createMediaSession2ControllerInfo(
+ Context context, ControllerInfo instance, int uid, int pid, String packageName,
+ IInterface callback) {
+ return new MediaSession2Impl.ControllerInfoImpl(context,
+ instance, uid, pid, packageName, (IMediaController2) callback);
+ }
+
+ @Override
+ public BuilderProvider createMediaSession2CommandButtonBuilder(
+ MediaSession2.CommandButton.Builder instance) {
+ return new MediaSession2Impl.CommandButtonImpl.BuilderImpl(instance);
+ }
+
+ public BuilderBaseProvider<MediaSession2, SessionCallback> createMediaSession2Builder(
+ Context context, MediaSession2.Builder instance) {
+ return new MediaSession2Impl.BuilderImpl(context, instance);
+ }
+
+ @Override
+ public MediaSessionService2Provider createMediaSessionService2(MediaSessionService2 instance) {
+ return new MediaSessionService2Impl(instance);
+ }
+
+ @Override
+ public MediaNotificationProvider createMediaSessionService2MediaNotification(
+ MediaNotification instance, int notificationId, Notification notification) {
+ return new MediaSessionService2Impl.MediaNotificationImpl(
+ instance, notificationId, notification);
+ }
+
+ @Override
+ public MediaSessionService2Provider createMediaLibraryService2(MediaLibraryService2 instance) {
+ return new MediaLibraryService2Impl(instance);
+ }
+
+ @Override
+ public BuilderBaseProvider<MediaLibrarySession, MediaLibrarySessionCallback>
+ createMediaLibraryService2Builder(MediaLibraryService2 service,
+ MediaLibrarySession.Builder instance, Executor callbackExecutor,
+ MediaLibrarySessionCallback callback) {
+ return new MediaLibraryService2Impl.BuilderImpl(service, instance, callbackExecutor,
+ callback);
+ }
+
+ @Override
+ public LibraryRootProvider createMediaLibraryService2LibraryRoot(
+ LibraryRoot instance, String rootId, Bundle extras) {
+ return new LibraryRootImpl(instance, rootId, extras);
+ }
+
+ @Override
+ public MediaControlView2Provider createMediaControlView2(MediaControlView2 instance,
+ ViewGroupProvider superProvider, ViewGroupProvider privateProvider,
+ @Nullable AttributeSet attrs, int defStyleAttr, int defStyleRes) {
+ return new MediaControlView2Impl(instance, superProvider, privateProvider);
+ }
+
+ @Override
+ public VideoView2Provider createVideoView2(
+ VideoView2 instance, ViewGroupProvider superProvider, ViewGroupProvider privateProvider,
+ @Nullable AttributeSet attrs, int defStyleAttr, int defStyleRes) {
+ return new VideoView2Impl(instance, superProvider, privateProvider);
+ }
+
+ @Override
+ public SessionToken2Provider createSessionToken2(Context context, SessionToken2 instance,
+ String packageName, String serviceName, int uid) {
+ return new SessionToken2Impl(context, instance, packageName, serviceName, uid);
+ }
+
+ @Override
+ public SessionToken2 fromBundle_SessionToken2(Bundle bundle) {
+ return SessionToken2Impl.fromBundle_impl(bundle);
+ }
+
+ @Override
+ public MediaItem2Provider.BuilderProvider createMediaItem2Builder(MediaItem2.Builder instance,
+ int flags) {
+ return new MediaItem2Impl.BuilderImpl(instance, flags);
+ }
+
+ @Override
+ public MediaItem2 fromBundle_MediaItem2(Bundle bundle) {
+ return MediaItem2Impl.fromBundle_impl(bundle);
+ }
+
+ @Override
+ public VolumeProvider2Provider createVolumeProvider2(VolumeProvider2 instance, int controlType,
+ int maxVolume, int currentVolume) {
+ return new VolumeProvider2Impl(instance, controlType, maxVolume, currentVolume);
+ }
+
+ @Override
+ public MediaMetadata2 fromBundle_MediaMetadata2(Bundle bundle) {
+ return MediaMetadata2Impl.fromBundle_impl(bundle);
+ }
+
+ @Override
+ public MediaMetadata2Provider.BuilderProvider createMediaMetadata2Builder(
+ MediaMetadata2.Builder instance) {
+ return new MediaMetadata2Impl.BuilderImpl(instance);
+ }
+
+ @Override
+ public MediaMetadata2Provider.BuilderProvider createMediaMetadata2Builder(
+ MediaMetadata2.Builder instance, MediaMetadata2 source) {
+ return new MediaMetadata2Impl.BuilderImpl(instance, source);
+ }
+
+ @Override
+ public Rating2 fromBundle_Rating2(Bundle bundle) {
+ return Rating2Impl.fromBundle_impl(bundle);
+ }
+
+ @Override
+ public Rating2 newUnratedRating_Rating2(int ratingStyle) {
+ return Rating2Impl.newUnratedRating_impl(ratingStyle);
+ }
+
+ @Override
+ public Rating2 newHeartRating_Rating2(boolean hasHeart) {
+ return Rating2Impl.newHeartRating_impl(hasHeart);
+ }
+
+ @Override
+ public Rating2 newThumbRating_Rating2(boolean thumbIsUp) {
+ return Rating2Impl.newThumbRating_impl(thumbIsUp);
+ }
+
+ @Override
+ public Rating2 newStarRating_Rating2(int starRatingStyle, float starRating) {
+ return Rating2Impl.newStarRating_impl(starRatingStyle, starRating);
+ }
+
+ @Override
+ public Rating2 newPercentageRating_Rating2(float percent) {
+ return Rating2Impl.newPercentageRating_impl(percent);
+ }
+
+ @Override
+ public MediaPlaylistAgentProvider createMediaPlaylistAgent(MediaPlaylistAgent instance) {
+ return new MediaPlaylistAgentImpl(instance);
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/media/update/ApiHelper.java b/packages/MediaComponents/src/com/android/media/update/ApiHelper.java
new file mode 100644
index 0000000..ad8bb48
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/media/update/ApiHelper.java
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.update;
+
+import android.annotation.Nullable;
+import android.content.Context;
+import android.content.ContextWrapper;
+import android.content.pm.ApplicationInfo;
+import android.content.pm.PackageManager.NameNotFoundException;
+import android.content.res.Resources;
+import android.content.res.Resources.Theme;
+import android.content.res.XmlResourceParser;
+import android.support.annotation.GuardedBy;
+import android.support.v4.widget.Space;
+import android.support.v7.widget.ButtonBarLayout;
+import android.util.AttributeSet;
+import android.view.ContextThemeWrapper;
+import android.view.LayoutInflater;
+import android.view.View;
+import android.view.ViewGroup;
+
+import com.android.support.mediarouter.app.MediaRouteButton;
+import com.android.support.mediarouter.app.MediaRouteExpandCollapseButton;
+import com.android.support.mediarouter.app.MediaRouteVolumeSlider;
+import com.android.support.mediarouter.app.OverlayListView;
+
+public final class ApiHelper {
+ private static ApplicationInfo sUpdatableInfo;
+
+ @GuardedBy("this")
+ private static Theme sLibTheme;
+
+ private ApiHelper() { }
+
+ static void initialize(ApplicationInfo updatableInfo) {
+ if (sUpdatableInfo != null) {
+ throw new IllegalStateException("initialize should only be called once");
+ }
+
+ sUpdatableInfo = updatableInfo;
+ }
+
+ public static Resources getLibResources(Context context) {
+ return getLibTheme(context).getResources();
+ }
+
+ public static Theme getLibTheme(Context context) {
+ if (sLibTheme != null) return sLibTheme;
+
+ return getLibThemeSynchronized(context);
+ }
+
+ public static Theme getLibTheme(Context context, int themeId) {
+ Theme theme = getLibResources(context).newTheme();
+ theme.applyStyle(themeId, true);
+ return theme;
+ }
+
+ public static LayoutInflater getLayoutInflater(Context context) {
+ return getLayoutInflater(context, null);
+ }
+
+ public static LayoutInflater getLayoutInflater(Context context, Theme theme) {
+ if (theme == null) {
+ theme = getLibTheme(context);
+ }
+
+ // TODO (b/72975976): Avoid to use ContextThemeWrapper with app context and lib theme.
+ LayoutInflater layoutInflater = LayoutInflater.from(context).cloneInContext(
+ new ContextThemeWrapper(context, theme));
+ layoutInflater.setFactory2(new LayoutInflater.Factory2() {
+ @Override
+ public View onCreateView(
+ View parent, String name, Context context, AttributeSet attrs) {
+ if (MediaRouteButton.class.getCanonicalName().equals(name)) {
+ return new MediaRouteButton(context, attrs);
+ } else if (MediaRouteVolumeSlider.class.getCanonicalName().equals(name)) {
+ return new MediaRouteVolumeSlider(context, attrs);
+ } else if (MediaRouteExpandCollapseButton.class.getCanonicalName().equals(name)) {
+ return new MediaRouteExpandCollapseButton(context, attrs);
+ } else if (OverlayListView.class.getCanonicalName().equals(name)) {
+ return new OverlayListView(context, attrs);
+ } else if (ButtonBarLayout.class.getCanonicalName().equals(name)) {
+ return new ButtonBarLayout(context, attrs);
+ } else if (Space.class.getCanonicalName().equals(name)) {
+ return new Space(context, attrs);
+ }
+ return null;
+ }
+
+ @Override
+ public View onCreateView(String name, Context context, AttributeSet attrs) {
+ return onCreateView(null, name, context, attrs);
+ }
+ });
+ return layoutInflater;
+ }
+
+ public static View inflateLibLayout(Context context, int libResId) {
+ return inflateLibLayout(context, getLibTheme(context), libResId, null, false);
+ }
+
+ public static View inflateLibLayout(Context context, Theme theme, int libResId) {
+ return inflateLibLayout(context, theme, libResId, null, false);
+ }
+
+ public static View inflateLibLayout(Context context, Theme theme, int libResId,
+ @Nullable ViewGroup root, boolean attachToRoot) {
+ try (XmlResourceParser parser = getLibResources(context).getLayout(libResId)) {
+ return getLayoutInflater(context, theme).inflate(parser, root, attachToRoot);
+ }
+ }
+
+ private static synchronized Theme getLibThemeSynchronized(Context context) {
+ if (sLibTheme != null) return sLibTheme;
+
+ if (sUpdatableInfo == null) {
+ throw new IllegalStateException("initialize hasn't been called yet");
+ }
+
+ try {
+ return sLibTheme = context.getPackageManager()
+ .getResourcesForApplication(sUpdatableInfo).newTheme();
+ } catch (NameNotFoundException e) {
+ throw new RuntimeException(e);
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/api24/media/MediaRouterApi24.java b/packages/MediaComponents/src/com/android/support/mediarouter/api24/media/MediaRouterApi24.java
new file mode 100644
index 0000000..1146af6
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/api24/media/MediaRouterApi24.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.media;
+
+// @@RequiresApi(24)
+final class MediaRouterApi24 {
+ public static final class RouteInfo {
+ public static int getDeviceType(Object routeObj) {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).getDeviceType();
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteActionProvider.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteActionProvider.java
new file mode 100644
index 0000000..d3e8d47
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteActionProvider.java
@@ -0,0 +1,333 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.app;
+
+import android.annotation.NonNull;
+import android.annotation.Nullable;
+import android.content.Context;
+import android.support.v4.view.ActionProvider;
+import android.util.Log;
+import android.view.View;
+import android.view.ViewGroup;
+
+import com.android.support.mediarouter.media.MediaRouteSelector;
+import com.android.support.mediarouter.media.MediaRouter;
+
+import java.lang.ref.WeakReference;
+
+/**
+ * The media route action provider displays a {@link MediaRouteButton media route button}
+ * in the application's {@link ActionBar} to allow the user to select routes and
+ * to control the currently selected route.
+ * <p>
+ * The application must specify the kinds of routes that the user should be allowed
+ * to select by specifying a {@link MediaRouteSelector selector} with the
+ * {@link #setRouteSelector} method.
+ * </p><p>
+ * Refer to {@link MediaRouteButton} for a description of the button that will
+ * appear in the action bar menu. Note that instead of disabling the button
+ * when no routes are available, the action provider will instead make the
+ * menu item invisible. In this way, the button will only be visible when it
+ * is possible for the user to discover and select a matching route.
+ * </p>
+ *
+ * <h3>Prerequisites</h3>
+ * <p>
+ * To use the media route action provider, the activity must be a subclass of
+ * {@link AppCompatActivity} from the <code>android.support.v7.appcompat</code>
+ * support library. Refer to support library documentation for details.
+ * </p>
+ *
+ * <h3>Example</h3>
+ * <p>
+ * </p><p>
+ * The application should define a menu resource to include the provider in the
+ * action bar options menu. Note that the support library action bar uses attributes
+ * that are defined in the application's resource namespace rather than the framework's
+ * resource namespace to configure each item.
+ * </p><pre>
+ * <menu xmlns:android="http://schemas.android.com/apk/res/android"
+ * xmlns:app="http://schemas.android.com/apk/res-auto">
+ * <item android:id="@+id/media_route_menu_item"
+ * android:title="@string/media_route_menu_title"
+ * app:showAsAction="always"
+ * app:actionProviderClass="android.support.v7.app.MediaRouteActionProvider"/>
+ * </menu>
+ * </pre><p>
+ * Then configure the menu and set the route selector for the chooser.
+ * </p><pre>
+ * public class MyActivity extends AppCompatActivity {
+ * private MediaRouter mRouter;
+ * private MediaRouter.Callback mCallback;
+ * private MediaRouteSelector mSelector;
+ *
+ * protected void onCreate(Bundle savedInstanceState) {
+ * super.onCreate(savedInstanceState);
+ *
+ * mRouter = Mediarouter.getInstance(this);
+ * mSelector = new MediaRouteSelector.Builder()
+ * .addControlCategory(MediaControlIntent.CATEGORY_LIVE_AUDIO)
+ * .addControlCategory(MediaControlIntent.CATEGORY_REMOTE_PLAYBACK)
+ * .build();
+ * mCallback = new MyCallback();
+ * }
+ *
+ * // Add the callback on start to tell the media router what kinds of routes
+ * // the application is interested in so that it can try to discover suitable ones.
+ * public void onStart() {
+ * super.onStart();
+ *
+ * mediaRouter.addCallback(mSelector, mCallback,
+ * MediaRouter.CALLBACK_FLAG_REQUEST_DISCOVERY);
+ *
+ * MediaRouter.RouteInfo route = mediaRouter.updateSelectedRoute(mSelector);
+ * // do something with the route...
+ * }
+ *
+ * // Remove the selector on stop to tell the media router that it no longer
+ * // needs to invest effort trying to discover routes of these kinds for now.
+ * public void onStop() {
+ * super.onStop();
+ *
+ * mediaRouter.removeCallback(mCallback);
+ * }
+ *
+ * public boolean onCreateOptionsMenu(Menu menu) {
+ * super.onCreateOptionsMenu(menu);
+ *
+ * getMenuInflater().inflate(R.menu.sample_media_router_menu, menu);
+ *
+ * MenuItem mediaRouteMenuItem = menu.findItem(R.id.media_route_menu_item);
+ * MediaRouteActionProvider mediaRouteActionProvider =
+ * (MediaRouteActionProvider)MenuItemCompat.getActionProvider(mediaRouteMenuItem);
+ * mediaRouteActionProvider.setRouteSelector(mSelector);
+ * return true;
+ * }
+ *
+ * private final class MyCallback extends MediaRouter.Callback {
+ * // Implement callback methods as needed.
+ * }
+ * }
+ * </pre>
+ *
+ * @see #setRouteSelector
+ */
+public class MediaRouteActionProvider extends ActionProvider {
+ private static final String TAG = "MediaRouteActionProvider";
+
+ private final MediaRouter mRouter;
+ private final MediaRouterCallback mCallback;
+
+ private MediaRouteSelector mSelector = MediaRouteSelector.EMPTY;
+ private MediaRouteDialogFactory mDialogFactory = MediaRouteDialogFactory.getDefault();
+ private MediaRouteButton mButton;
+
+ /**
+ * Creates the action provider.
+ *
+ * @param context The context.
+ */
+ public MediaRouteActionProvider(Context context) {
+ super(context);
+
+ mRouter = MediaRouter.getInstance(context);
+ mCallback = new MediaRouterCallback(this);
+ }
+
+ /**
+ * Gets the media route selector for filtering the routes that the user can
+ * select using the media route chooser dialog.
+ *
+ * @return The selector, never null.
+ */
+ @NonNull
+ public MediaRouteSelector getRouteSelector() {
+ return mSelector;
+ }
+
+ /**
+ * Sets the media route selector for filtering the routes that the user can
+ * select using the media route chooser dialog.
+ *
+ * @param selector The selector, must not be null.
+ */
+ public void setRouteSelector(@NonNull MediaRouteSelector selector) {
+ if (selector == null) {
+ throw new IllegalArgumentException("selector must not be null");
+ }
+
+ if (!mSelector.equals(selector)) {
+ // FIXME: We currently have no way of knowing whether the action provider
+ // is still needed by the UI. Unfortunately this means the action provider
+ // may leak callbacks until garbage collection occurs. This may result in
+ // media route providers doing more work than necessary in the short term
+ // while trying to discover routes that are no longer of interest to the
+ // application. To solve this problem, the action provider will need some
+ // indication from the framework that it is being destroyed.
+ if (!mSelector.isEmpty()) {
+ mRouter.removeCallback(mCallback);
+ }
+ if (!selector.isEmpty()) {
+ mRouter.addCallback(selector, mCallback);
+ }
+ mSelector = selector;
+ refreshRoute();
+
+ if (mButton != null) {
+ mButton.setRouteSelector(selector);
+ }
+ }
+ }
+
+ /**
+ * Gets the media route dialog factory to use when showing the route chooser
+ * or controller dialog.
+ *
+ * @return The dialog factory, never null.
+ */
+ @NonNull
+ public MediaRouteDialogFactory getDialogFactory() {
+ return mDialogFactory;
+ }
+
+ /**
+ * Sets the media route dialog factory to use when showing the route chooser
+ * or controller dialog.
+ *
+ * @param factory The dialog factory, must not be null.
+ */
+ public void setDialogFactory(@NonNull MediaRouteDialogFactory factory) {
+ if (factory == null) {
+ throw new IllegalArgumentException("factory must not be null");
+ }
+
+ if (mDialogFactory != factory) {
+ mDialogFactory = factory;
+
+ if (mButton != null) {
+ mButton.setDialogFactory(factory);
+ }
+ }
+ }
+
+ /**
+ * Gets the associated media route button, or null if it has not yet been created.
+ */
+ @Nullable
+ public MediaRouteButton getMediaRouteButton() {
+ return mButton;
+ }
+
+ /**
+ * Called when the media route button is being created.
+ * <p>
+ * Subclasses may override this method to customize the button.
+ * </p>
+ */
+ public MediaRouteButton onCreateMediaRouteButton() {
+ return new MediaRouteButton(getContext());
+ }
+
+ @Override
+ @SuppressWarnings("deprecation")
+ public View onCreateActionView() {
+ if (mButton != null) {
+ Log.e(TAG, "onCreateActionView: this ActionProvider is already associated " +
+ "with a menu item. Don't reuse MediaRouteActionProvider instances! " +
+ "Abandoning the old menu item...");
+ }
+
+ mButton = onCreateMediaRouteButton();
+ mButton.setCheatSheetEnabled(true);
+ mButton.setRouteSelector(mSelector);
+ mButton.setDialogFactory(mDialogFactory);
+ mButton.setLayoutParams(new ViewGroup.LayoutParams(
+ ViewGroup.LayoutParams.WRAP_CONTENT,
+ ViewGroup.LayoutParams.MATCH_PARENT));
+ return mButton;
+ }
+
+ @Override
+ public boolean onPerformDefaultAction() {
+ if (mButton != null) {
+ return mButton.showDialog();
+ }
+ return false;
+ }
+
+ @Override
+ public boolean overridesItemVisibility() {
+ return true;
+ }
+
+ @Override
+ public boolean isVisible() {
+ return mRouter.isRouteAvailable(mSelector,
+ MediaRouter.AVAILABILITY_FLAG_IGNORE_DEFAULT_ROUTE);
+ }
+
+ void refreshRoute() {
+ refreshVisibility();
+ }
+
+ private static final class MediaRouterCallback extends MediaRouter.Callback {
+ private final WeakReference<MediaRouteActionProvider> mProviderWeak;
+
+ public MediaRouterCallback(MediaRouteActionProvider provider) {
+ mProviderWeak = new WeakReference<MediaRouteActionProvider>(provider);
+ }
+
+ @Override
+ public void onRouteAdded(MediaRouter router, MediaRouter.RouteInfo info) {
+ refreshRoute(router);
+ }
+
+ @Override
+ public void onRouteRemoved(MediaRouter router, MediaRouter.RouteInfo info) {
+ refreshRoute(router);
+ }
+
+ @Override
+ public void onRouteChanged(MediaRouter router, MediaRouter.RouteInfo info) {
+ refreshRoute(router);
+ }
+
+ @Override
+ public void onProviderAdded(MediaRouter router, MediaRouter.ProviderInfo provider) {
+ refreshRoute(router);
+ }
+
+ @Override
+ public void onProviderRemoved(MediaRouter router, MediaRouter.ProviderInfo provider) {
+ refreshRoute(router);
+ }
+
+ @Override
+ public void onProviderChanged(MediaRouter router, MediaRouter.ProviderInfo provider) {
+ refreshRoute(router);
+ }
+
+ private void refreshRoute(MediaRouter router) {
+ MediaRouteActionProvider provider = mProviderWeak.get();
+ if (provider != null) {
+ provider.refreshRoute();
+ } else {
+ router.removeCallback(this);
+ }
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteButton.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteButton.java
new file mode 100644
index 0000000..fde8a63
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteButton.java
@@ -0,0 +1,629 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.app;
+
+import android.annotation.NonNull;
+import android.app.Activity;
+import android.app.FragmentManager;
+import android.content.Context;
+import android.content.ContextWrapper;
+import android.content.res.ColorStateList;
+import android.content.res.Resources;
+import android.content.res.TypedArray;
+import android.graphics.Canvas;
+import android.graphics.drawable.AnimationDrawable;
+import android.graphics.drawable.Drawable;
+import android.os.AsyncTask;
+import android.support.v4.graphics.drawable.DrawableCompat;
+import android.support.v7.widget.TooltipCompat;
+import android.util.AttributeSet;
+import android.util.Log;
+import android.util.SparseArray;
+import android.view.SoundEffectConstants;
+import android.view.View;
+
+import com.android.media.update.ApiHelper;
+import com.android.media.update.R;
+import com.android.support.mediarouter.media.MediaRouteSelector;
+import com.android.support.mediarouter.media.MediaRouter;
+
+/**
+ * The media route button allows the user to select routes and to control the
+ * currently selected route.
+ * <p>
+ * The application must specify the kinds of routes that the user should be allowed
+ * to select by specifying a {@link MediaRouteSelector selector} with the
+ * {@link #setRouteSelector} method.
+ * </p><p>
+ * When the default route is selected or when the currently selected route does not
+ * match the {@link #getRouteSelector() selector}, the button will appear in
+ * an inactive state indicating that the application is not connected to a
+ * route of the kind that it wants to use. Clicking on the button opens
+ * a {@link MediaRouteChooserDialog} to allow the user to select a route.
+ * If no non-default routes match the selector and it is not possible for an active
+ * scan to discover any matching routes, then the button is disabled and cannot
+ * be clicked.
+ * </p><p>
+ * When a non-default route is selected that matches the selector, the button will
+ * appear in an active state indicating that the application is connected
+ * to a route of the kind that it wants to use. The button may also appear
+ * in an intermediary connecting state if the route is in the process of connecting
+ * to the destination but has not yet completed doing so. In either case, clicking
+ * on the button opens a {@link MediaRouteControllerDialog} to allow the user
+ * to control or disconnect from the current route.
+ * </p>
+ *
+ * <h3>Prerequisites</h3>
+ * <p>
+ * To use the media route button, the activity must be a subclass of
+ * {@link FragmentActivity} from the <code>android.support.v4</code>
+ * support library. Refer to support library documentation for details.
+ * </p>
+ *
+ * @see MediaRouteActionProvider
+ * @see #setRouteSelector
+ */
+public class MediaRouteButton extends View {
+ private static final String TAG = "MediaRouteButton";
+
+ private static final String CHOOSER_FRAGMENT_TAG =
+ "android.support.v7.mediarouter:MediaRouteChooserDialogFragment";
+ private static final String CONTROLLER_FRAGMENT_TAG =
+ "android.support.v7.mediarouter:MediaRouteControllerDialogFragment";
+
+ private final MediaRouter mRouter;
+ private final MediaRouterCallback mCallback;
+
+ private MediaRouteSelector mSelector = MediaRouteSelector.EMPTY;
+ private int mRouteCallbackFlags;
+ private MediaRouteDialogFactory mDialogFactory = MediaRouteDialogFactory.getDefault();
+
+ private boolean mAttachedToWindow;
+
+ private static final SparseArray<Drawable.ConstantState> sRemoteIndicatorCache =
+ new SparseArray<>(2);
+ private RemoteIndicatorLoader mRemoteIndicatorLoader;
+ private Drawable mRemoteIndicator;
+ private boolean mRemoteActive;
+ private boolean mIsConnecting;
+
+ private ColorStateList mButtonTint;
+ private int mMinWidth;
+ private int mMinHeight;
+
+ // The checked state is used when connected to a remote route.
+ private static final int[] CHECKED_STATE_SET = {
+ android.R.attr.state_checked
+ };
+
+ // The checkable state is used while connecting to a remote route.
+ private static final int[] CHECKABLE_STATE_SET = {
+ android.R.attr.state_checkable
+ };
+
+ public MediaRouteButton(Context context) {
+ this(context, null);
+ }
+
+ public MediaRouteButton(Context context, AttributeSet attrs) {
+ this(context, attrs, R.attr.mediaRouteButtonStyle);
+ }
+
+ public MediaRouteButton(Context context, AttributeSet attrs, int defStyleAttr) {
+ super(MediaRouterThemeHelper.createThemedButtonContext(context), attrs, defStyleAttr);
+ context = getContext();
+
+ mRouter = MediaRouter.getInstance(context);
+ mCallback = new MediaRouterCallback();
+
+ Resources.Theme theme = ApiHelper.getLibResources(context).newTheme();
+ theme.applyStyle(MediaRouterThemeHelper.getRouterThemeId(context), true);
+ TypedArray a = theme.obtainStyledAttributes(attrs,
+ R.styleable.MediaRouteButton, defStyleAttr, 0);
+
+ mButtonTint = a.getColorStateList(R.styleable.MediaRouteButton_mediaRouteButtonTint);
+ mMinWidth = a.getDimensionPixelSize(
+ R.styleable.MediaRouteButton_android_minWidth, 0);
+ mMinHeight = a.getDimensionPixelSize(
+ R.styleable.MediaRouteButton_android_minHeight, 0);
+ int remoteIndicatorResId = a.getResourceId(
+ R.styleable.MediaRouteButton_externalRouteEnabledDrawable, 0);
+ a.recycle();
+
+ if (remoteIndicatorResId != 0) {
+ Drawable.ConstantState remoteIndicatorState =
+ sRemoteIndicatorCache.get(remoteIndicatorResId);
+ if (remoteIndicatorState != null) {
+ setRemoteIndicatorDrawable(remoteIndicatorState.newDrawable());
+ } else {
+ mRemoteIndicatorLoader = new RemoteIndicatorLoader(remoteIndicatorResId);
+ mRemoteIndicatorLoader.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR);
+ }
+ }
+
+ updateContentDescription();
+ setClickable(true);
+ }
+
+ /**
+ * Gets the media route selector for filtering the routes that the user can
+ * select using the media route chooser dialog.
+ *
+ * @return The selector, never null.
+ */
+ @NonNull
+ public MediaRouteSelector getRouteSelector() {
+ return mSelector;
+ }
+
+ /**
+ * Sets the media route selector for filtering the routes that the user can
+ * select using the media route chooser dialog.
+ *
+ * @param selector The selector.
+ */
+ public void setRouteSelector(MediaRouteSelector selector) {
+ setRouteSelector(selector, 0);
+ }
+
+ /**
+ * Sets the media route selector for filtering the routes that the user can
+ * select using the media route chooser dialog.
+ *
+ * @param selector The selector.
+ * @param flags Flags to control the behavior of the callback. May be zero or a combination of
+ * {@link #MediaRouter.CALLBACK_FLAG_PERFORM_ACTIVE_SCAN} and
+ * {@link #MediaRouter.CALLBACK_FLAG_UNFILTERED_EVENTS}.
+ */
+ public void setRouteSelector(MediaRouteSelector selector, int flags) {
+ if (mSelector.equals(selector) && mRouteCallbackFlags == flags) {
+ return;
+ }
+ if (!mSelector.isEmpty()) {
+ mRouter.removeCallback(mCallback);
+ }
+ if (selector == null || selector.isEmpty()) {
+ mSelector = MediaRouteSelector.EMPTY;
+ return;
+ }
+
+ mSelector = selector;
+ mRouteCallbackFlags = flags;
+
+ if (mAttachedToWindow) {
+ mRouter.addCallback(selector, mCallback, flags);
+ refreshRoute();
+ }
+ }
+
+ /**
+ * Gets the media route dialog factory to use when showing the route chooser
+ * or controller dialog.
+ *
+ * @return The dialog factory, never null.
+ */
+ @NonNull
+ public MediaRouteDialogFactory getDialogFactory() {
+ return mDialogFactory;
+ }
+
+ /**
+ * Sets the media route dialog factory to use when showing the route chooser
+ * or controller dialog.
+ *
+ * @param factory The dialog factory, must not be null.
+ */
+ public void setDialogFactory(@NonNull MediaRouteDialogFactory factory) {
+ if (factory == null) {
+ throw new IllegalArgumentException("factory must not be null");
+ }
+
+ mDialogFactory = factory;
+ }
+
+ /**
+ * Show the route chooser or controller dialog.
+ * <p>
+ * If the default route is selected or if the currently selected route does
+ * not match the {@link #getRouteSelector selector}, then shows the route chooser dialog.
+ * Otherwise, shows the route controller dialog to offer the user
+ * a choice to disconnect from the route or perform other control actions
+ * such as setting the route's volume.
+ * </p><p>
+ * The application can customize the dialogs by calling {@link #setDialogFactory}
+ * to provide a customized dialog factory.
+ * </p>
+ *
+ * @return True if the dialog was actually shown.
+ *
+ * @throws IllegalStateException if the activity is not a subclass of
+ * {@link FragmentActivity}.
+ */
+ public boolean showDialog() {
+ if (!mAttachedToWindow) {
+ return false;
+ }
+
+ final FragmentManager fm = getActivity().getFragmentManager();
+ if (fm == null) {
+ throw new IllegalStateException("The activity must be a subclass of FragmentActivity");
+ }
+
+ MediaRouter.RouteInfo route = mRouter.getSelectedRoute();
+ if (route.isDefaultOrBluetooth() || !route.matchesSelector(mSelector)) {
+ if (fm.findFragmentByTag(CHOOSER_FRAGMENT_TAG) != null) {
+ Log.w(TAG, "showDialog(): Route chooser dialog already showing!");
+ return false;
+ }
+ MediaRouteChooserDialogFragment f =
+ mDialogFactory.onCreateChooserDialogFragment();
+ f.setRouteSelector(mSelector);
+ f.show(fm, CHOOSER_FRAGMENT_TAG);
+ } else {
+ if (fm.findFragmentByTag(CONTROLLER_FRAGMENT_TAG) != null) {
+ Log.w(TAG, "showDialog(): Route controller dialog already showing!");
+ return false;
+ }
+ MediaRouteControllerDialogFragment f =
+ mDialogFactory.onCreateControllerDialogFragment();
+ f.show(fm, CONTROLLER_FRAGMENT_TAG);
+ }
+ return true;
+ }
+
+
+ private Activity getActivity() {
+ // Gross way of unwrapping the Activity so we can get the FragmentManager
+ Context context = getContext();
+ while (context instanceof ContextWrapper) {
+ if (context instanceof Activity) {
+ return (Activity)context;
+ }
+ context = ((ContextWrapper)context).getBaseContext();
+ }
+ return null;
+ }
+
+ /**
+ * Sets whether to enable showing a toast with the content descriptor of the
+ * button when the button is long pressed.
+ */
+ void setCheatSheetEnabled(boolean enable) {
+ TooltipCompat.setTooltipText(this, enable
+ ? ApiHelper.getLibResources(getContext())
+ .getString(R.string.mr_button_content_description)
+ : null);
+ }
+
+ @Override
+ public boolean performClick() {
+ // Send the appropriate accessibility events and call listeners
+ boolean handled = super.performClick();
+ if (!handled) {
+ playSoundEffect(SoundEffectConstants.CLICK);
+ }
+ return showDialog() || handled;
+ }
+
+ @Override
+ protected int[] onCreateDrawableState(int extraSpace) {
+ final int[] drawableState = super.onCreateDrawableState(extraSpace + 1);
+
+ // Technically we should be handling this more completely, but these
+ // are implementation details here. Checkable is used to express the connecting
+ // drawable state and it's mutually exclusive with check for the purposes
+ // of state selection here.
+ if (mIsConnecting) {
+ mergeDrawableStates(drawableState, CHECKABLE_STATE_SET);
+ } else if (mRemoteActive) {
+ mergeDrawableStates(drawableState, CHECKED_STATE_SET);
+ }
+ return drawableState;
+ }
+
+ @Override
+ protected void drawableStateChanged() {
+ super.drawableStateChanged();
+
+ if (mRemoteIndicator != null) {
+ int[] myDrawableState = getDrawableState();
+ mRemoteIndicator.setState(myDrawableState);
+ invalidate();
+ }
+ }
+
+ /**
+ * Sets a drawable to use as the remote route indicator.
+ */
+ public void setRemoteIndicatorDrawable(Drawable d) {
+ if (mRemoteIndicatorLoader != null) {
+ mRemoteIndicatorLoader.cancel(false);
+ }
+
+ if (mRemoteIndicator != null) {
+ mRemoteIndicator.setCallback(null);
+ unscheduleDrawable(mRemoteIndicator);
+ }
+ if (d != null) {
+ if (mButtonTint != null) {
+ d = DrawableCompat.wrap(d.mutate());
+ DrawableCompat.setTintList(d, mButtonTint);
+ }
+ d.setCallback(this);
+ d.setState(getDrawableState());
+ d.setVisible(getVisibility() == VISIBLE, false);
+ }
+ mRemoteIndicator = d;
+
+ refreshDrawableState();
+ if (mAttachedToWindow && mRemoteIndicator != null
+ && mRemoteIndicator.getCurrent() instanceof AnimationDrawable) {
+ AnimationDrawable curDrawable = (AnimationDrawable) mRemoteIndicator.getCurrent();
+ if (mIsConnecting) {
+ if (!curDrawable.isRunning()) {
+ curDrawable.start();
+ }
+ } else if (mRemoteActive) {
+ if (curDrawable.isRunning()) {
+ curDrawable.stop();
+ }
+ curDrawable.selectDrawable(curDrawable.getNumberOfFrames() - 1);
+ }
+ }
+ }
+
+ @Override
+ protected boolean verifyDrawable(Drawable who) {
+ return super.verifyDrawable(who) || who == mRemoteIndicator;
+ }
+
+ @Override
+ public void jumpDrawablesToCurrentState() {
+ // We can't call super to handle the background so we do it ourselves.
+ //super.jumpDrawablesToCurrentState();
+ if (getBackground() != null) {
+ DrawableCompat.jumpToCurrentState(getBackground());
+ }
+
+ // Handle our own remote indicator.
+ if (mRemoteIndicator != null) {
+ DrawableCompat.jumpToCurrentState(mRemoteIndicator);
+ }
+ }
+
+ @Override
+ public void setVisibility(int visibility) {
+ super.setVisibility(visibility);
+
+ if (mRemoteIndicator != null) {
+ mRemoteIndicator.setVisible(getVisibility() == VISIBLE, false);
+ }
+ }
+
+ @Override
+ public void onAttachedToWindow() {
+ super.onAttachedToWindow();
+
+ mAttachedToWindow = true;
+ if (!mSelector.isEmpty()) {
+ mRouter.addCallback(mSelector, mCallback, mRouteCallbackFlags);
+ }
+ refreshRoute();
+ }
+
+ @Override
+ public void onDetachedFromWindow() {
+ mAttachedToWindow = false;
+ if (!mSelector.isEmpty()) {
+ mRouter.removeCallback(mCallback);
+ }
+
+ super.onDetachedFromWindow();
+ }
+
+ @Override
+ protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
+ final int widthSize = MeasureSpec.getSize(widthMeasureSpec);
+ final int heightSize = MeasureSpec.getSize(heightMeasureSpec);
+ final int widthMode = MeasureSpec.getMode(widthMeasureSpec);
+ final int heightMode = MeasureSpec.getMode(heightMeasureSpec);
+
+ final int width = Math.max(mMinWidth, mRemoteIndicator != null ?
+ mRemoteIndicator.getIntrinsicWidth() + getPaddingLeft() + getPaddingRight() : 0);
+ final int height = Math.max(mMinHeight, mRemoteIndicator != null ?
+ mRemoteIndicator.getIntrinsicHeight() + getPaddingTop() + getPaddingBottom() : 0);
+
+ int measuredWidth;
+ switch (widthMode) {
+ case MeasureSpec.EXACTLY:
+ measuredWidth = widthSize;
+ break;
+ case MeasureSpec.AT_MOST:
+ measuredWidth = Math.min(widthSize, width);
+ break;
+ default:
+ case MeasureSpec.UNSPECIFIED:
+ measuredWidth = width;
+ break;
+ }
+
+ int measuredHeight;
+ switch (heightMode) {
+ case MeasureSpec.EXACTLY:
+ measuredHeight = heightSize;
+ break;
+ case MeasureSpec.AT_MOST:
+ measuredHeight = Math.min(heightSize, height);
+ break;
+ default:
+ case MeasureSpec.UNSPECIFIED:
+ measuredHeight = height;
+ break;
+ }
+
+ setMeasuredDimension(measuredWidth, measuredHeight);
+ }
+
+ @Override
+ protected void onDraw(Canvas canvas) {
+ super.onDraw(canvas);
+
+ if (mRemoteIndicator != null) {
+ final int left = getPaddingLeft();
+ final int right = getWidth() - getPaddingRight();
+ final int top = getPaddingTop();
+ final int bottom = getHeight() - getPaddingBottom();
+
+ final int drawWidth = mRemoteIndicator.getIntrinsicWidth();
+ final int drawHeight = mRemoteIndicator.getIntrinsicHeight();
+ final int drawLeft = left + (right - left - drawWidth) / 2;
+ final int drawTop = top + (bottom - top - drawHeight) / 2;
+
+ mRemoteIndicator.setBounds(drawLeft, drawTop,
+ drawLeft + drawWidth, drawTop + drawHeight);
+ mRemoteIndicator.draw(canvas);
+ }
+ }
+
+ void refreshRoute() {
+ final MediaRouter.RouteInfo route = mRouter.getSelectedRoute();
+ final boolean isRemote = !route.isDefaultOrBluetooth() && route.matchesSelector(mSelector);
+ final boolean isConnecting = isRemote && route.isConnecting();
+ boolean needsRefresh = false;
+ if (mRemoteActive != isRemote) {
+ mRemoteActive = isRemote;
+ needsRefresh = true;
+ }
+ if (mIsConnecting != isConnecting) {
+ mIsConnecting = isConnecting;
+ needsRefresh = true;
+ }
+
+ if (needsRefresh) {
+ updateContentDescription();
+ refreshDrawableState();
+ }
+ if (mAttachedToWindow) {
+ setEnabled(mRouter.isRouteAvailable(mSelector,
+ MediaRouter.AVAILABILITY_FLAG_IGNORE_DEFAULT_ROUTE));
+ }
+ if (mRemoteIndicator != null
+ && mRemoteIndicator.getCurrent() instanceof AnimationDrawable) {
+ AnimationDrawable curDrawable = (AnimationDrawable) mRemoteIndicator.getCurrent();
+ if (mAttachedToWindow) {
+ if ((needsRefresh || isConnecting) && !curDrawable.isRunning()) {
+ curDrawable.start();
+ }
+ } else if (isRemote && !isConnecting) {
+ // When the route is already connected before the view is attached, show the last
+ // frame of the connected animation immediately.
+ if (curDrawable.isRunning()) {
+ curDrawable.stop();
+ }
+ curDrawable.selectDrawable(curDrawable.getNumberOfFrames() - 1);
+ }
+ }
+ }
+
+ private void updateContentDescription() {
+ int resId;
+ if (mIsConnecting) {
+ resId = R.string.mr_cast_button_connecting;
+ } else if (mRemoteActive) {
+ resId = R.string.mr_cast_button_connected;
+ } else {
+ resId = R.string.mr_cast_button_disconnected;
+ }
+ setContentDescription(ApiHelper.getLibResources(getContext()).getString(resId));
+ }
+
+ private final class MediaRouterCallback extends MediaRouter.Callback {
+ MediaRouterCallback() {
+ }
+
+ @Override
+ public void onRouteAdded(MediaRouter router, MediaRouter.RouteInfo info) {
+ refreshRoute();
+ }
+
+ @Override
+ public void onRouteRemoved(MediaRouter router, MediaRouter.RouteInfo info) {
+ refreshRoute();
+ }
+
+ @Override
+ public void onRouteChanged(MediaRouter router, MediaRouter.RouteInfo info) {
+ refreshRoute();
+ }
+
+ @Override
+ public void onRouteSelected(MediaRouter router, MediaRouter.RouteInfo info) {
+ refreshRoute();
+ }
+
+ @Override
+ public void onRouteUnselected(MediaRouter router, MediaRouter.RouteInfo info) {
+ refreshRoute();
+ }
+
+ @Override
+ public void onProviderAdded(MediaRouter router, MediaRouter.ProviderInfo provider) {
+ refreshRoute();
+ }
+
+ @Override
+ public void onProviderRemoved(MediaRouter router, MediaRouter.ProviderInfo provider) {
+ refreshRoute();
+ }
+
+ @Override
+ public void onProviderChanged(MediaRouter router, MediaRouter.ProviderInfo provider) {
+ refreshRoute();
+ }
+ }
+
+ private final class RemoteIndicatorLoader extends AsyncTask<Void, Void, Drawable> {
+ private final int mResId;
+
+ RemoteIndicatorLoader(int resId) {
+ mResId = resId;
+ }
+
+ @Override
+ protected Drawable doInBackground(Void... params) {
+ return ApiHelper.getLibResources(getContext()).getDrawable(mResId);
+ }
+
+ @Override
+ protected void onPostExecute(Drawable remoteIndicator) {
+ cacheAndReset(remoteIndicator);
+ setRemoteIndicatorDrawable(remoteIndicator);
+ }
+
+ @Override
+ protected void onCancelled(Drawable remoteIndicator) {
+ cacheAndReset(remoteIndicator);
+ }
+
+ private void cacheAndReset(Drawable remoteIndicator) {
+ if (remoteIndicator != null) {
+ sRemoteIndicatorCache.put(mResId, remoteIndicator.getConstantState());
+ }
+ mRemoteIndicatorLoader = null;
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialog.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialog.java
new file mode 100644
index 0000000..cac64d9
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialog.java
@@ -0,0 +1,405 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.app;
+
+import static com.android.support.mediarouter.media.MediaRouter.RouteInfo.CONNECTION_STATE_CONNECTED;
+import static com.android.support.mediarouter.media.MediaRouter.RouteInfo.CONNECTION_STATE_CONNECTING;
+
+import android.annotation.NonNull;
+import android.app.Dialog;
+import android.content.Context;
+import android.content.res.Resources;
+import android.content.res.TypedArray;
+import android.graphics.drawable.Drawable;
+import android.net.Uri;
+import android.os.Bundle;
+import android.os.Handler;
+import android.os.Message;
+import android.os.SystemClock;
+import android.support.v7.app.AppCompatDialog;
+import android.text.TextUtils;
+import android.util.Log;
+import android.view.ContextThemeWrapper;
+import android.view.Gravity;
+import android.view.LayoutInflater;
+import android.view.View;
+import android.view.ViewGroup;
+import android.widget.AdapterView;
+import android.widget.ArrayAdapter;
+import android.widget.ImageView;
+import android.widget.ListView;
+import android.widget.TextView;
+
+import com.android.media.update.ApiHelper;
+import com.android.media.update.R;
+import com.android.support.mediarouter.media.MediaRouteSelector;
+import com.android.support.mediarouter.media.MediaRouter;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+
+/**
+ * This class implements the route chooser dialog for {@link MediaRouter}.
+ * <p>
+ * This dialog allows the user to choose a route that matches a given selector.
+ * </p>
+ *
+ * @see MediaRouteButton
+ * @see MediaRouteActionProvider
+ */
+public class MediaRouteChooserDialog extends Dialog {
+ static final String TAG = "MediaRouteChooserDialog";
+
+ // Do not update the route list immediately to avoid unnatural dialog change.
+ private static final long UPDATE_ROUTES_DELAY_MS = 300L;
+ static final int MSG_UPDATE_ROUTES = 1;
+
+ private final MediaRouter mRouter;
+ private final MediaRouterCallback mCallback;
+
+ private TextView mTitleView;
+ private MediaRouteSelector mSelector = MediaRouteSelector.EMPTY;
+ private ArrayList<MediaRouter.RouteInfo> mRoutes;
+ private RouteAdapter mAdapter;
+ private ListView mListView;
+ private boolean mAttachedToWindow;
+ private long mLastUpdateTime;
+ private final Handler mHandler = new Handler() {
+ @Override
+ public void handleMessage(Message message) {
+ switch (message.what) {
+ case MSG_UPDATE_ROUTES:
+ updateRoutes((List<MediaRouter.RouteInfo>) message.obj);
+ break;
+ }
+ }
+ };
+
+ public MediaRouteChooserDialog(Context context) {
+ this(context, 0);
+ }
+
+ public MediaRouteChooserDialog(Context context, int theme) {
+ // TODO (b/72975976): Avoid to use ContextThemeWrapper with app context and lib theme.
+ super(new ContextThemeWrapper(context, ApiHelper.getLibTheme(context,
+ MediaRouterThemeHelper.getRouterThemeId(context))), theme);
+ context = getContext();
+
+ mRouter = MediaRouter.getInstance(context);
+ mCallback = new MediaRouterCallback();
+ }
+
+ /**
+ * Gets the media route selector for filtering the routes that the user can select.
+ *
+ * @return The selector, never null.
+ */
+ @NonNull
+ public MediaRouteSelector getRouteSelector() {
+ return mSelector;
+ }
+
+ /**
+ * Sets the media route selector for filtering the routes that the user can select.
+ *
+ * @param selector The selector, must not be null.
+ */
+ public void setRouteSelector(@NonNull MediaRouteSelector selector) {
+ if (selector == null) {
+ throw new IllegalArgumentException("selector must not be null");
+ }
+
+ if (!mSelector.equals(selector)) {
+ mSelector = selector;
+
+ if (mAttachedToWindow) {
+ mRouter.removeCallback(mCallback);
+ mRouter.addCallback(selector, mCallback,
+ MediaRouter.CALLBACK_FLAG_PERFORM_ACTIVE_SCAN);
+ }
+
+ refreshRoutes();
+ }
+ }
+
+ /**
+ * Called to filter the set of routes that should be included in the list.
+ * <p>
+ * The default implementation iterates over all routes in the provided list and
+ * removes those for which {@link #onFilterRoute} returns false.
+ * </p>
+ *
+ * @param routes The list of routes to filter in-place, never null.
+ */
+ public void onFilterRoutes(@NonNull List<MediaRouter.RouteInfo> routes) {
+ for (int i = routes.size(); i-- > 0; ) {
+ if (!onFilterRoute(routes.get(i))) {
+ routes.remove(i);
+ }
+ }
+ }
+
+ /**
+ * Returns true if the route should be included in the list.
+ * <p>
+ * The default implementation returns true for enabled non-default routes that
+ * match the selector. Subclasses can override this method to filter routes
+ * differently.
+ * </p>
+ *
+ * @param route The route to consider, never null.
+ * @return True if the route should be included in the chooser dialog.
+ */
+ public boolean onFilterRoute(@NonNull MediaRouter.RouteInfo route) {
+ return !route.isDefaultOrBluetooth() && route.isEnabled()
+ && route.matchesSelector(mSelector);
+ }
+
+ @Override
+ public void setTitle(CharSequence title) {
+ mTitleView.setText(title);
+ }
+
+ @Override
+ public void setTitle(int titleId) {
+ mTitleView.setText(titleId);
+ }
+
+ @Override
+ protected void onCreate(Bundle savedInstanceState) {
+ super.onCreate(savedInstanceState);
+
+ setContentView(ApiHelper.inflateLibLayout(getContext(), ApiHelper.getLibTheme(getContext(),
+ MediaRouterThemeHelper.getRouterThemeId(getContext())),
+ R.layout.mr_chooser_dialog));
+
+ mRoutes = new ArrayList<>();
+ mAdapter = new RouteAdapter(getContext(), mRoutes);
+ mListView = (ListView)findViewById(R.id.mr_chooser_list);
+ mListView.setAdapter(mAdapter);
+ mListView.setOnItemClickListener(mAdapter);
+ mListView.setEmptyView(findViewById(android.R.id.empty));
+ mTitleView = findViewById(R.id.mr_chooser_title);
+
+ updateLayout();
+ }
+
+ /**
+ * Sets the width of the dialog. Also called when configuration changes.
+ */
+ void updateLayout() {
+ getWindow().setLayout(MediaRouteDialogHelper.getDialogWidth(getContext()),
+ ViewGroup.LayoutParams.WRAP_CONTENT);
+ }
+
+ @Override
+ public void onAttachedToWindow() {
+ super.onAttachedToWindow();
+
+ mAttachedToWindow = true;
+ mRouter.addCallback(mSelector, mCallback, MediaRouter.CALLBACK_FLAG_PERFORM_ACTIVE_SCAN);
+ refreshRoutes();
+ }
+
+ @Override
+ public void onDetachedFromWindow() {
+ mAttachedToWindow = false;
+ mRouter.removeCallback(mCallback);
+ mHandler.removeMessages(MSG_UPDATE_ROUTES);
+
+ super.onDetachedFromWindow();
+ }
+
+ /**
+ * Refreshes the list of routes that are shown in the chooser dialog.
+ */
+ public void refreshRoutes() {
+ if (mAttachedToWindow) {
+ ArrayList<MediaRouter.RouteInfo> routes = new ArrayList<>(mRouter.getRoutes());
+ onFilterRoutes(routes);
+ Collections.sort(routes, RouteComparator.sInstance);
+ if (SystemClock.uptimeMillis() - mLastUpdateTime >= UPDATE_ROUTES_DELAY_MS) {
+ updateRoutes(routes);
+ } else {
+ mHandler.removeMessages(MSG_UPDATE_ROUTES);
+ mHandler.sendMessageAtTime(mHandler.obtainMessage(MSG_UPDATE_ROUTES, routes),
+ mLastUpdateTime + UPDATE_ROUTES_DELAY_MS);
+ }
+ }
+ }
+
+ void updateRoutes(List<MediaRouter.RouteInfo> routes) {
+ mLastUpdateTime = SystemClock.uptimeMillis();
+ mRoutes.clear();
+ mRoutes.addAll(routes);
+ mAdapter.notifyDataSetChanged();
+ }
+
+ private final class RouteAdapter extends ArrayAdapter<MediaRouter.RouteInfo>
+ implements ListView.OnItemClickListener {
+ private final Drawable mDefaultIcon;
+ private final Drawable mTvIcon;
+ private final Drawable mSpeakerIcon;
+ private final Drawable mSpeakerGroupIcon;
+
+ public RouteAdapter(Context context, List<MediaRouter.RouteInfo> routes) {
+ super(context, 0, routes);
+
+ TypedArray styledAttributes = ApiHelper.getLibTheme(context,
+ MediaRouterThemeHelper.getRouterThemeId(context)).obtainStyledAttributes(
+ new int[] {
+ R.attr.mediaRouteDefaultIconDrawable,
+ R.attr.mediaRouteTvIconDrawable,
+ R.attr.mediaRouteSpeakerIconDrawable,
+ R.attr.mediaRouteSpeakerGroupIconDrawable
+ });
+
+ mDefaultIcon = styledAttributes.getDrawable(0);
+ mTvIcon = styledAttributes.getDrawable(1);
+ mSpeakerIcon = styledAttributes.getDrawable(2);
+ mSpeakerGroupIcon = styledAttributes.getDrawable(3);
+ styledAttributes.recycle();
+ }
+
+ @Override
+ public boolean areAllItemsEnabled() {
+ return false;
+ }
+
+ @Override
+ public boolean isEnabled(int position) {
+ return getItem(position).isEnabled();
+ }
+
+ @Override
+ public View getView(int position, View convertView, ViewGroup parent) {
+ View view = convertView;
+ if (view == null) {
+ view = ApiHelper.inflateLibLayout(getContext(),
+ ApiHelper.getLibTheme(getContext(),
+ MediaRouterThemeHelper.getRouterThemeId(getContext())),
+ R.layout.mr_chooser_list_item, parent, false);
+ }
+
+ MediaRouter.RouteInfo route = getItem(position);
+ TextView text1 = (TextView) view.findViewById(R.id.mr_chooser_route_name);
+ TextView text2 = (TextView) view.findViewById(R.id.mr_chooser_route_desc);
+ text1.setText(route.getName());
+ String description = route.getDescription();
+ boolean isConnectedOrConnecting =
+ route.getConnectionState() == CONNECTION_STATE_CONNECTED
+ || route.getConnectionState() == CONNECTION_STATE_CONNECTING;
+ if (isConnectedOrConnecting && !TextUtils.isEmpty(description)) {
+ text1.setGravity(Gravity.BOTTOM);
+ text2.setVisibility(View.VISIBLE);
+ text2.setText(description);
+ } else {
+ text1.setGravity(Gravity.CENTER_VERTICAL);
+ text2.setVisibility(View.GONE);
+ text2.setText("");
+ }
+ view.setEnabled(route.isEnabled());
+
+ ImageView iconView = (ImageView) view.findViewById(R.id.mr_chooser_route_icon);
+ if (iconView != null) {
+ iconView.setImageDrawable(getIconDrawable(route));
+ }
+ return view;
+ }
+
+ @Override
+ public void onItemClick(AdapterView<?> parent, View view, int position, long id) {
+ MediaRouter.RouteInfo route = getItem(position);
+ if (route.isEnabled()) {
+ route.select();
+ dismiss();
+ }
+ }
+
+ private Drawable getIconDrawable(MediaRouter.RouteInfo route) {
+ Uri iconUri = route.getIconUri();
+ if (iconUri != null) {
+ try {
+ InputStream is = getContext().getContentResolver().openInputStream(iconUri);
+ Drawable drawable = Drawable.createFromStream(is, null);
+ if (drawable != null) {
+ return drawable;
+ }
+ } catch (IOException e) {
+ Log.w(TAG, "Failed to load " + iconUri, e);
+ // Falls back.
+ }
+ }
+ return getDefaultIconDrawable(route);
+ }
+
+ private Drawable getDefaultIconDrawable(MediaRouter.RouteInfo route) {
+ // If the type of the receiver device is specified, use it.
+ switch (route.getDeviceType()) {
+ case MediaRouter.RouteInfo.DEVICE_TYPE_TV:
+ return mTvIcon;
+ case MediaRouter.RouteInfo.DEVICE_TYPE_SPEAKER:
+ return mSpeakerIcon;
+ }
+
+ // Otherwise, make the best guess based on other route information.
+ if (route instanceof MediaRouter.RouteGroup) {
+ // Only speakers can be grouped for now.
+ return mSpeakerGroupIcon;
+ }
+ return mDefaultIcon;
+ }
+ }
+
+ private final class MediaRouterCallback extends MediaRouter.Callback {
+ MediaRouterCallback() {
+ }
+
+ @Override
+ public void onRouteAdded(MediaRouter router, MediaRouter.RouteInfo info) {
+ refreshRoutes();
+ }
+
+ @Override
+ public void onRouteRemoved(MediaRouter router, MediaRouter.RouteInfo info) {
+ refreshRoutes();
+ }
+
+ @Override
+ public void onRouteChanged(MediaRouter router, MediaRouter.RouteInfo info) {
+ refreshRoutes();
+ }
+
+ @Override
+ public void onRouteSelected(MediaRouter router, MediaRouter.RouteInfo route) {
+ dismiss();
+ }
+ }
+
+ static final class RouteComparator implements Comparator<MediaRouter.RouteInfo> {
+ public static final RouteComparator sInstance = new RouteComparator();
+
+ @Override
+ public int compare(MediaRouter.RouteInfo lhs, MediaRouter.RouteInfo rhs) {
+ return lhs.getName().compareToIgnoreCase(rhs.getName());
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialogFragment.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialogFragment.java
new file mode 100644
index 0000000..65e6b29
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteChooserDialogFragment.java
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.app;
+
+import android.app.Dialog;
+import android.app.DialogFragment;
+import android.content.Context;
+import android.content.res.Configuration;
+import android.os.Bundle;
+
+import com.android.support.mediarouter.media.MediaRouteSelector;
+
+/**
+ * Media route chooser dialog fragment.
+ * <p>
+ * Creates a {@link MediaRouteChooserDialog}. The application may subclass
+ * this dialog fragment to customize the media route chooser dialog.
+ * </p>
+ */
+public class MediaRouteChooserDialogFragment extends DialogFragment {
+ private final String ARGUMENT_SELECTOR = "selector";
+
+ private MediaRouteChooserDialog mDialog;
+ private MediaRouteSelector mSelector;
+
+ /**
+ * Creates a media route chooser dialog fragment.
+ * <p>
+ * All subclasses of this class must also possess a default constructor.
+ * </p>
+ */
+ public MediaRouteChooserDialogFragment() {
+ setCancelable(true);
+ }
+
+ /**
+ * Gets the media route selector for filtering the routes that the user can select.
+ *
+ * @return The selector, never null.
+ */
+ public MediaRouteSelector getRouteSelector() {
+ ensureRouteSelector();
+ return mSelector;
+ }
+
+ private void ensureRouteSelector() {
+ if (mSelector == null) {
+ Bundle args = getArguments();
+ if (args != null) {
+ mSelector = MediaRouteSelector.fromBundle(args.getBundle(ARGUMENT_SELECTOR));
+ }
+ if (mSelector == null) {
+ mSelector = MediaRouteSelector.EMPTY;
+ }
+ }
+ }
+
+ /**
+ * Sets the media route selector for filtering the routes that the user can select.
+ * This method must be called before the fragment is added.
+ *
+ * @param selector The selector to set.
+ */
+ public void setRouteSelector(MediaRouteSelector selector) {
+ if (selector == null) {
+ throw new IllegalArgumentException("selector must not be null");
+ }
+
+ ensureRouteSelector();
+ if (!mSelector.equals(selector)) {
+ mSelector = selector;
+
+ Bundle args = getArguments();
+ if (args == null) {
+ args = new Bundle();
+ }
+ args.putBundle(ARGUMENT_SELECTOR, selector.asBundle());
+ setArguments(args);
+
+ MediaRouteChooserDialog dialog = (MediaRouteChooserDialog)getDialog();
+ if (dialog != null) {
+ dialog.setRouteSelector(selector);
+ }
+ }
+ }
+
+ /**
+ * Called when the chooser dialog is being created.
+ * <p>
+ * Subclasses may override this method to customize the dialog.
+ * </p>
+ */
+ public MediaRouteChooserDialog onCreateChooserDialog(
+ Context context, Bundle savedInstanceState) {
+ return new MediaRouteChooserDialog(context);
+ }
+
+ @Override
+ public Dialog onCreateDialog(Bundle savedInstanceState) {
+ mDialog = onCreateChooserDialog(getContext(), savedInstanceState);
+ mDialog.setRouteSelector(getRouteSelector());
+ return mDialog;
+ }
+
+ @Override
+ public void onConfigurationChanged(Configuration newConfig) {
+ super.onConfigurationChanged(newConfig);
+ if (mDialog != null) {
+ mDialog.updateLayout();
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialog.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialog.java
new file mode 100644
index 0000000..060cfca
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialog.java
@@ -0,0 +1,1486 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.app;
+
+import static android.support.v4.media.session.PlaybackStateCompat.ACTION_PAUSE;
+import static android.support.v4.media.session.PlaybackStateCompat.ACTION_PLAY;
+import static android.support.v4.media.session.PlaybackStateCompat.ACTION_PLAY_PAUSE;
+import static android.support.v4.media.session.PlaybackStateCompat.ACTION_STOP;
+
+import android.app.AlertDialog;
+import android.app.PendingIntent;
+import android.content.ContentResolver;
+import android.content.Context;
+import android.content.res.Resources;
+import android.graphics.Bitmap;
+import android.graphics.BitmapFactory;
+import android.graphics.Rect;
+import android.graphics.drawable.BitmapDrawable;
+import android.net.Uri;
+import android.os.AsyncTask;
+import android.os.Bundle;
+import android.os.RemoteException;
+import android.os.SystemClock;
+import android.support.v4.media.MediaDescriptionCompat;
+import android.support.v4.media.MediaMetadataCompat;
+import android.support.v4.media.session.MediaControllerCompat;
+import android.support.v4.media.session.MediaSessionCompat;
+import android.support.v4.media.session.PlaybackStateCompat;
+import android.support.v4.util.ObjectsCompat;
+import android.support.v4.view.accessibility.AccessibilityEventCompat;
+import android.support.v7.graphics.Palette;
+import android.text.TextUtils;
+import android.util.Log;
+import android.view.ContextThemeWrapper;
+import android.view.KeyEvent;
+import android.view.LayoutInflater;
+import android.view.View;
+import android.view.View.MeasureSpec;
+import android.view.ViewGroup;
+import android.view.ViewTreeObserver;
+import android.view.accessibility.AccessibilityEvent;
+import android.view.accessibility.AccessibilityManager;
+import android.view.animation.AccelerateDecelerateInterpolator;
+import android.view.animation.AlphaAnimation;
+import android.view.animation.Animation;
+import android.view.animation.AnimationSet;
+import android.view.animation.AnimationUtils;
+import android.view.animation.Interpolator;
+import android.view.animation.Transformation;
+import android.view.animation.TranslateAnimation;
+import android.widget.ArrayAdapter;
+import android.widget.Button;
+import android.widget.FrameLayout;
+import android.widget.ImageButton;
+import android.widget.ImageView;
+import android.widget.LinearLayout;
+import android.widget.RelativeLayout;
+import android.widget.SeekBar;
+import android.widget.TextView;
+
+import com.android.media.update.ApiHelper;
+import com.android.media.update.R;
+import com.android.support.mediarouter.media.MediaRouteSelector;
+import com.android.support.mediarouter.media.MediaRouter;
+import com.android.support.mediarouter.app.OverlayListView.OverlayObject;
+
+import java.io.BufferedInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URL;
+import java.net.URLConnection;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * This class implements the route controller dialog for {@link MediaRouter}.
+ * <p>
+ * This dialog allows the user to control or disconnect from the currently selected route.
+ * </p>
+ *
+ * @see MediaRouteButton
+ * @see MediaRouteActionProvider
+ */
+public class MediaRouteControllerDialog extends AlertDialog {
+ // Tags should be less than 24 characters long (see docs for android.util.Log.isLoggable())
+ static final String TAG = "MediaRouteCtrlDialog";
+ static final boolean DEBUG = Log.isLoggable(TAG, Log.DEBUG);
+
+ // Time to wait before updating the volume when the user lets go of the seek bar
+ // to allow the route provider time to propagate the change and publish a new
+ // route descriptor.
+ static final int VOLUME_UPDATE_DELAY_MILLIS = 500;
+ static final int CONNECTION_TIMEOUT_MILLIS = (int) TimeUnit.SECONDS.toMillis(30L);
+
+ private static final int BUTTON_NEUTRAL_RES_ID = android.R.id.button3;
+ static final int BUTTON_DISCONNECT_RES_ID = android.R.id.button2;
+ static final int BUTTON_STOP_RES_ID = android.R.id.button1;
+
+ final MediaRouter mRouter;
+ private final MediaRouterCallback mCallback;
+ final MediaRouter.RouteInfo mRoute;
+
+ Context mContext;
+ private boolean mCreated;
+ private boolean mAttachedToWindow;
+
+ private int mDialogContentWidth;
+
+ private View mCustomControlView;
+
+ private Button mDisconnectButton;
+ private Button mStopCastingButton;
+ private ImageButton mPlaybackControlButton;
+ private ImageButton mCloseButton;
+ private MediaRouteExpandCollapseButton mGroupExpandCollapseButton;
+
+ private FrameLayout mExpandableAreaLayout;
+ private LinearLayout mDialogAreaLayout;
+ FrameLayout mDefaultControlLayout;
+ private FrameLayout mCustomControlLayout;
+ private ImageView mArtView;
+ private TextView mTitleView;
+ private TextView mSubtitleView;
+ private TextView mRouteNameTextView;
+
+ private boolean mVolumeControlEnabled = true;
+ // Layout for media controllers including play/pause button and the main volume slider.
+ private LinearLayout mMediaMainControlLayout;
+ private RelativeLayout mPlaybackControlLayout;
+ private LinearLayout mVolumeControlLayout;
+ private View mDividerView;
+
+ OverlayListView mVolumeGroupList;
+ VolumeGroupAdapter mVolumeGroupAdapter;
+ private List<MediaRouter.RouteInfo> mGroupMemberRoutes;
+ Set<MediaRouter.RouteInfo> mGroupMemberRoutesAdded;
+ private Set<MediaRouter.RouteInfo> mGroupMemberRoutesRemoved;
+ Set<MediaRouter.RouteInfo> mGroupMemberRoutesAnimatingWithBitmap;
+ SeekBar mVolumeSlider;
+ VolumeChangeListener mVolumeChangeListener;
+ MediaRouter.RouteInfo mRouteInVolumeSliderTouched;
+ private int mVolumeGroupListItemIconSize;
+ private int mVolumeGroupListItemHeight;
+ private int mVolumeGroupListMaxHeight;
+ private final int mVolumeGroupListPaddingTop;
+ Map<MediaRouter.RouteInfo, SeekBar> mVolumeSliderMap;
+
+ MediaControllerCompat mMediaController;
+ MediaControllerCallback mControllerCallback;
+ PlaybackStateCompat mState;
+ MediaDescriptionCompat mDescription;
+
+ FetchArtTask mFetchArtTask;
+ Bitmap mArtIconBitmap;
+ Uri mArtIconUri;
+ boolean mArtIconIsLoaded;
+ Bitmap mArtIconLoadedBitmap;
+ int mArtIconBackgroundColor;
+
+ boolean mHasPendingUpdate;
+ boolean mPendingUpdateAnimationNeeded;
+
+ boolean mIsGroupExpanded;
+ boolean mIsGroupListAnimating;
+ boolean mIsGroupListAnimationPending;
+ int mGroupListAnimationDurationMs;
+ private int mGroupListFadeInDurationMs;
+ private int mGroupListFadeOutDurationMs;
+
+ private Interpolator mInterpolator;
+ private Interpolator mLinearOutSlowInInterpolator;
+ private Interpolator mFastOutSlowInInterpolator;
+ private Interpolator mAccelerateDecelerateInterpolator;
+
+ final AccessibilityManager mAccessibilityManager;
+
+ Runnable mGroupListFadeInAnimation = new Runnable() {
+ @Override
+ public void run() {
+ startGroupListFadeInAnimation();
+ }
+ };
+
+ public MediaRouteControllerDialog(Context context) {
+ this(context, 0);
+ }
+
+ public MediaRouteControllerDialog(Context context, int theme) {
+ // TODO (b/72975976): Avoid to use ContextThemeWrapper with app context and lib theme.
+ super(new ContextThemeWrapper(context, ApiHelper.getLibTheme(context,
+ MediaRouterThemeHelper.getRouterThemeId(context))), theme);
+ mContext = getContext();
+
+ mControllerCallback = new MediaControllerCallback();
+ mRouter = MediaRouter.getInstance(mContext);
+ mCallback = new MediaRouterCallback();
+ mRoute = mRouter.getSelectedRoute();
+ setMediaSession(mRouter.getMediaSessionToken());
+ mVolumeGroupListPaddingTop = ApiHelper.getLibResources(context).getDimensionPixelSize(
+ R.dimen.mr_controller_volume_group_list_padding_top);
+ mAccessibilityManager =
+ (AccessibilityManager) mContext.getSystemService(Context.ACCESSIBILITY_SERVICE);
+ mLinearOutSlowInInterpolator = AnimationUtils.loadInterpolator(
+ mContext, android.R.interpolator.linear_out_slow_in);
+ mFastOutSlowInInterpolator = AnimationUtils.loadInterpolator(
+ mContext, android.R.interpolator.fast_out_slow_in);
+ mAccelerateDecelerateInterpolator = new AccelerateDecelerateInterpolator();
+ }
+
+ /**
+ * Gets the route that this dialog is controlling.
+ */
+ public MediaRouter.RouteInfo getRoute() {
+ return mRoute;
+ }
+
+ private MediaRouter.RouteGroup getGroup() {
+ if (mRoute instanceof MediaRouter.RouteGroup) {
+ return (MediaRouter.RouteGroup) mRoute;
+ }
+ return null;
+ }
+
+ /**
+ * Provides the subclass an opportunity to create a view that will replace the default media
+ * controls for the currently playing content.
+ *
+ * @param savedInstanceState The dialog's saved instance state.
+ * @return The media control view, or null if none.
+ */
+ public View onCreateMediaControlView(Bundle savedInstanceState) {
+ return null;
+ }
+
+ /**
+ * Gets the media control view that was created by {@link #onCreateMediaControlView(Bundle)}.
+ *
+ * @return The media control view, or null if none.
+ */
+ public View getMediaControlView() {
+ return mCustomControlView;
+ }
+
+ /**
+ * Sets whether to enable the volume slider and volume control using the volume keys
+ * when the route supports it.
+ * <p>
+ * The default value is true.
+ * </p>
+ */
+ public void setVolumeControlEnabled(boolean enable) {
+ if (mVolumeControlEnabled != enable) {
+ mVolumeControlEnabled = enable;
+ if (mCreated) {
+ update(false);
+ }
+ }
+ }
+
+ /**
+ * Returns whether to enable the volume slider and volume control using the volume keys
+ * when the route supports it.
+ */
+ public boolean isVolumeControlEnabled() {
+ return mVolumeControlEnabled;
+ }
+
+ /**
+ * Set the session to use for metadata and transport controls. The dialog
+ * will listen to changes on this session and update the UI automatically in
+ * response to changes.
+ *
+ * @param sessionToken The token for the session to use.
+ */
+ private void setMediaSession(MediaSessionCompat.Token sessionToken) {
+ if (mMediaController != null) {
+ mMediaController.unregisterCallback(mControllerCallback);
+ mMediaController = null;
+ }
+ if (sessionToken == null) {
+ return;
+ }
+ if (!mAttachedToWindow) {
+ return;
+ }
+ try {
+ mMediaController = new MediaControllerCompat(mContext, sessionToken);
+ } catch (RemoteException e) {
+ Log.e(TAG, "Error creating media controller in setMediaSession.", e);
+ }
+ if (mMediaController != null) {
+ mMediaController.registerCallback(mControllerCallback);
+ }
+ MediaMetadataCompat metadata = mMediaController == null ? null
+ : mMediaController.getMetadata();
+ mDescription = metadata == null ? null : metadata.getDescription();
+ mState = mMediaController == null ? null : mMediaController.getPlaybackState();
+ updateArtIconIfNeeded();
+ update(false);
+ }
+
+ /**
+ * Gets the session to use for metadata and transport controls.
+ *
+ * @return The token for the session to use or null if none.
+ */
+ public MediaSessionCompat.Token getMediaSession() {
+ return mMediaController == null ? null : mMediaController.getSessionToken();
+ }
+
+ @Override
+ protected void onCreate(Bundle savedInstanceState) {
+ super.onCreate(savedInstanceState);
+
+ getWindow().setBackgroundDrawableResource(android.R.color.transparent);
+
+ setContentView(ApiHelper.inflateLibLayout(mContext,
+ ApiHelper.getLibTheme(mContext, MediaRouterThemeHelper.getRouterThemeId(mContext)),
+ R.layout.mr_controller_material_dialog_b));
+
+ // Remove the neutral button.
+ findViewById(BUTTON_NEUTRAL_RES_ID).setVisibility(View.GONE);
+
+ ClickListener listener = new ClickListener();
+
+ mExpandableAreaLayout = findViewById(R.id.mr_expandable_area);
+ mExpandableAreaLayout.setOnClickListener(new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ dismiss();
+ }
+ });
+ mDialogAreaLayout = findViewById(R.id.mr_dialog_area);
+ mDialogAreaLayout.setOnClickListener(new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ // Eat unhandled touch events.
+ }
+ });
+ int color = MediaRouterThemeHelper.getButtonTextColor(mContext);
+ mDisconnectButton = findViewById(BUTTON_DISCONNECT_RES_ID);
+ mDisconnectButton.setText(
+ ApiHelper.getLibResources(mContext).getString(R.string.mr_controller_disconnect));
+ mDisconnectButton.setTextColor(color);
+ mDisconnectButton.setOnClickListener(listener);
+
+ mStopCastingButton = findViewById(BUTTON_STOP_RES_ID);
+ mStopCastingButton.setText(
+ ApiHelper.getLibResources(mContext).getString(R.string.mr_controller_stop_casting));
+ mStopCastingButton.setTextColor(color);
+ mStopCastingButton.setOnClickListener(listener);
+
+ mRouteNameTextView = findViewById(R.id.mr_name);
+ mCloseButton = findViewById(R.id.mr_close);
+ mCloseButton.setOnClickListener(listener);
+ mCustomControlLayout = findViewById(R.id.mr_custom_control);
+ mDefaultControlLayout = findViewById(R.id.mr_default_control);
+
+ // Start the session activity when a content item (album art, title or subtitle) is clicked.
+ View.OnClickListener onClickListener = new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ if (mMediaController != null) {
+ PendingIntent pi = mMediaController.getSessionActivity();
+ if (pi != null) {
+ try {
+ pi.send();
+ dismiss();
+ } catch (PendingIntent.CanceledException e) {
+ Log.e(TAG, pi + " was not sent, it had been canceled.");
+ }
+ }
+ }
+ }
+ };
+ mArtView = findViewById(R.id.mr_art);
+ mArtView.setOnClickListener(onClickListener);
+ findViewById(R.id.mr_control_title_container).setOnClickListener(onClickListener);
+
+ mMediaMainControlLayout = findViewById(R.id.mr_media_main_control);
+ mDividerView = findViewById(R.id.mr_control_divider);
+
+ mPlaybackControlLayout = findViewById(R.id.mr_playback_control);
+ mTitleView = findViewById(R.id.mr_control_title);
+ mSubtitleView = findViewById(R.id.mr_control_subtitle);
+ mPlaybackControlButton = findViewById(R.id.mr_control_playback_ctrl);
+ mPlaybackControlButton.setOnClickListener(listener);
+
+ mVolumeControlLayout = findViewById(R.id.mr_volume_control);
+ mVolumeControlLayout.setVisibility(View.GONE);
+ mVolumeSlider = findViewById(R.id.mr_volume_slider);
+ mVolumeSlider.setTag(mRoute);
+ mVolumeChangeListener = new VolumeChangeListener();
+ mVolumeSlider.setOnSeekBarChangeListener(mVolumeChangeListener);
+
+ mVolumeGroupList = findViewById(R.id.mr_volume_group_list);
+ mGroupMemberRoutes = new ArrayList<MediaRouter.RouteInfo>();
+ mVolumeGroupAdapter = new VolumeGroupAdapter(mVolumeGroupList.getContext(),
+ mGroupMemberRoutes);
+ mVolumeGroupList.setAdapter(mVolumeGroupAdapter);
+ mGroupMemberRoutesAnimatingWithBitmap = new HashSet<>();
+
+ MediaRouterThemeHelper.setMediaControlsBackgroundColor(mContext,
+ mMediaMainControlLayout, mVolumeGroupList, getGroup() != null);
+ MediaRouterThemeHelper.setVolumeSliderColor(mContext,
+ (MediaRouteVolumeSlider) mVolumeSlider, mMediaMainControlLayout);
+ mVolumeSliderMap = new HashMap<>();
+ mVolumeSliderMap.put(mRoute, mVolumeSlider);
+
+ mGroupExpandCollapseButton =
+ findViewById(R.id.mr_group_expand_collapse);
+ mGroupExpandCollapseButton.setOnClickListener(new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ mIsGroupExpanded = !mIsGroupExpanded;
+ if (mIsGroupExpanded) {
+ mVolumeGroupList.setVisibility(View.VISIBLE);
+ }
+ loadInterpolator();
+ updateLayoutHeight(true);
+ }
+ });
+ loadInterpolator();
+ mGroupListAnimationDurationMs = ApiHelper.getLibResources(mContext).getInteger(
+ R.integer.mr_controller_volume_group_list_animation_duration_ms);
+ mGroupListFadeInDurationMs = ApiHelper.getLibResources(mContext).getInteger(
+ R.integer.mr_controller_volume_group_list_fade_in_duration_ms);
+ mGroupListFadeOutDurationMs = ApiHelper.getLibResources(mContext).getInteger(
+ R.integer.mr_controller_volume_group_list_fade_out_duration_ms);
+
+ mCustomControlView = onCreateMediaControlView(savedInstanceState);
+ if (mCustomControlView != null) {
+ mCustomControlLayout.addView(mCustomControlView);
+ mCustomControlLayout.setVisibility(View.VISIBLE);
+ }
+ mCreated = true;
+ updateLayout();
+ }
+
+ /**
+ * Sets the width of the dialog. Also called when configuration changes.
+ */
+ void updateLayout() {
+ int width = MediaRouteDialogHelper.getDialogWidth(mContext);
+ getWindow().setLayout(width, ViewGroup.LayoutParams.WRAP_CONTENT);
+
+ View decorView = getWindow().getDecorView();
+ mDialogContentWidth = width - decorView.getPaddingLeft() - decorView.getPaddingRight();
+
+ Resources res = ApiHelper.getLibResources(mContext);
+ mVolumeGroupListItemIconSize = res.getDimensionPixelSize(
+ R.dimen.mr_controller_volume_group_list_item_icon_size);
+ mVolumeGroupListItemHeight = res.getDimensionPixelSize(
+ R.dimen.mr_controller_volume_group_list_item_height);
+ mVolumeGroupListMaxHeight = res.getDimensionPixelSize(
+ R.dimen.mr_controller_volume_group_list_max_height);
+
+ // Fetch art icons again for layout changes to resize it accordingly
+ mArtIconBitmap = null;
+ mArtIconUri = null;
+ updateArtIconIfNeeded();
+ update(false);
+ }
+
+ @Override
+ public void onAttachedToWindow() {
+ super.onAttachedToWindow();
+ mAttachedToWindow = true;
+
+ mRouter.addCallback(MediaRouteSelector.EMPTY, mCallback,
+ MediaRouter.CALLBACK_FLAG_UNFILTERED_EVENTS);
+ setMediaSession(mRouter.getMediaSessionToken());
+ }
+
+ @Override
+ public void onDetachedFromWindow() {
+ mRouter.removeCallback(mCallback);
+ setMediaSession(null);
+ mAttachedToWindow = false;
+ super.onDetachedFromWindow();
+ }
+
+ @Override
+ public boolean onKeyDown(int keyCode, KeyEvent event) {
+ if (keyCode == KeyEvent.KEYCODE_VOLUME_DOWN
+ || keyCode == KeyEvent.KEYCODE_VOLUME_UP) {
+ mRoute.requestUpdateVolume(keyCode == KeyEvent.KEYCODE_VOLUME_DOWN ? -1 : 1);
+ return true;
+ }
+ return super.onKeyDown(keyCode, event);
+ }
+
+ @Override
+ public boolean onKeyUp(int keyCode, KeyEvent event) {
+ if (keyCode == KeyEvent.KEYCODE_VOLUME_DOWN
+ || keyCode == KeyEvent.KEYCODE_VOLUME_UP) {
+ return true;
+ }
+ return super.onKeyUp(keyCode, event);
+ }
+
+ void update(boolean animate) {
+ // Defer dialog updates if a user is adjusting a volume in the list
+ if (mRouteInVolumeSliderTouched != null) {
+ mHasPendingUpdate = true;
+ mPendingUpdateAnimationNeeded |= animate;
+ return;
+ }
+ mHasPendingUpdate = false;
+ mPendingUpdateAnimationNeeded = false;
+ if (!mRoute.isSelected() || mRoute.isDefaultOrBluetooth()) {
+ dismiss();
+ return;
+ }
+ if (!mCreated) {
+ return;
+ }
+
+ mRouteNameTextView.setText(mRoute.getName());
+ mDisconnectButton.setVisibility(mRoute.canDisconnect() ? View.VISIBLE : View.GONE);
+ if (mCustomControlView == null && mArtIconIsLoaded) {
+ if (isBitmapRecycled(mArtIconLoadedBitmap)) {
+ Log.w(TAG, "Can't set artwork image with recycled bitmap: " + mArtIconLoadedBitmap);
+ } else {
+ mArtView.setImageBitmap(mArtIconLoadedBitmap);
+ mArtView.setBackgroundColor(mArtIconBackgroundColor);
+ }
+ clearLoadedBitmap();
+ }
+ updateVolumeControlLayout();
+ updatePlaybackControlLayout();
+ updateLayoutHeight(animate);
+ }
+
+ private boolean isBitmapRecycled(Bitmap bitmap) {
+ return bitmap != null && bitmap.isRecycled();
+ }
+
+ private boolean canShowPlaybackControlLayout() {
+ return mCustomControlView == null && (mDescription != null || mState != null);
+ }
+
+ /**
+ * Returns the height of main media controller which includes playback control and master
+ * volume control.
+ */
+ private int getMainControllerHeight(boolean showPlaybackControl) {
+ int height = 0;
+ if (showPlaybackControl || mVolumeControlLayout.getVisibility() == View.VISIBLE) {
+ height += mMediaMainControlLayout.getPaddingTop()
+ + mMediaMainControlLayout.getPaddingBottom();
+ if (showPlaybackControl) {
+ height += mPlaybackControlLayout.getMeasuredHeight();
+ }
+ if (mVolumeControlLayout.getVisibility() == View.VISIBLE) {
+ height += mVolumeControlLayout.getMeasuredHeight();
+ }
+ if (showPlaybackControl && mVolumeControlLayout.getVisibility() == View.VISIBLE) {
+ height += mDividerView.getMeasuredHeight();
+ }
+ }
+ return height;
+ }
+
+ private void updateMediaControlVisibility(boolean canShowPlaybackControlLayout) {
+ // TODO: Update the top and bottom padding of the control layout according to the display
+ // height.
+ mDividerView.setVisibility((mVolumeControlLayout.getVisibility() == View.VISIBLE
+ && canShowPlaybackControlLayout) ? View.VISIBLE : View.GONE);
+ mMediaMainControlLayout.setVisibility((mVolumeControlLayout.getVisibility() == View.GONE
+ && !canShowPlaybackControlLayout) ? View.GONE : View.VISIBLE);
+ }
+
+ void updateLayoutHeight(final boolean animate) {
+ // We need to defer the update until the first layout has occurred, as we don't yet know the
+ // overall visible display size in which the window this view is attached to has been
+ // positioned in.
+ mDefaultControlLayout.requestLayout();
+ ViewTreeObserver observer = mDefaultControlLayout.getViewTreeObserver();
+ observer.addOnGlobalLayoutListener(new ViewTreeObserver.OnGlobalLayoutListener() {
+ @Override
+ public void onGlobalLayout() {
+ mDefaultControlLayout.getViewTreeObserver().removeGlobalOnLayoutListener(this);
+ if (mIsGroupListAnimating) {
+ mIsGroupListAnimationPending = true;
+ } else {
+ updateLayoutHeightInternal(animate);
+ }
+ }
+ });
+ }
+
+ /**
+ * Updates the height of views and hide artwork or metadata if space is limited.
+ */
+ void updateLayoutHeightInternal(boolean animate) {
+ // Measure the size of widgets and get the height of main components.
+ int oldHeight = getLayoutHeight(mMediaMainControlLayout);
+ setLayoutHeight(mMediaMainControlLayout, ViewGroup.LayoutParams.MATCH_PARENT);
+ updateMediaControlVisibility(canShowPlaybackControlLayout());
+ View decorView = getWindow().getDecorView();
+ decorView.measure(
+ MeasureSpec.makeMeasureSpec(getWindow().getAttributes().width, MeasureSpec.EXACTLY),
+ MeasureSpec.UNSPECIFIED);
+ setLayoutHeight(mMediaMainControlLayout, oldHeight);
+ int artViewHeight = 0;
+ if (mCustomControlView == null && mArtView.getDrawable() instanceof BitmapDrawable) {
+ Bitmap art = ((BitmapDrawable) mArtView.getDrawable()).getBitmap();
+ if (art != null) {
+ artViewHeight = getDesiredArtHeight(art.getWidth(), art.getHeight());
+ mArtView.setScaleType(art.getWidth() >= art.getHeight()
+ ? ImageView.ScaleType.FIT_XY : ImageView.ScaleType.FIT_CENTER);
+ }
+ }
+ int mainControllerHeight = getMainControllerHeight(canShowPlaybackControlLayout());
+ int volumeGroupListCount = mGroupMemberRoutes.size();
+ // Scale down volume group list items in landscape mode.
+ int expandedGroupListHeight = getGroup() == null ? 0 :
+ mVolumeGroupListItemHeight * getGroup().getRoutes().size();
+ if (volumeGroupListCount > 0) {
+ expandedGroupListHeight += mVolumeGroupListPaddingTop;
+ }
+ expandedGroupListHeight = Math.min(expandedGroupListHeight, mVolumeGroupListMaxHeight);
+ int visibleGroupListHeight = mIsGroupExpanded ? expandedGroupListHeight : 0;
+
+ int desiredControlLayoutHeight =
+ Math.max(artViewHeight, visibleGroupListHeight) + mainControllerHeight;
+ Rect visibleRect = new Rect();
+ decorView.getWindowVisibleDisplayFrame(visibleRect);
+ // Height of non-control views in decor view.
+ // This includes title bar, button bar, and dialog's vertical padding which should be
+ // always shown.
+ int nonControlViewHeight = mDialogAreaLayout.getMeasuredHeight()
+ - mDefaultControlLayout.getMeasuredHeight();
+ // Maximum allowed height for controls to fit screen.
+ int maximumControlViewHeight = visibleRect.height() - nonControlViewHeight;
+
+ // Show artwork if it fits the screen.
+ if (mCustomControlView == null && artViewHeight > 0
+ && desiredControlLayoutHeight <= maximumControlViewHeight) {
+ mArtView.setVisibility(View.VISIBLE);
+ setLayoutHeight(mArtView, artViewHeight);
+ } else {
+ if (getLayoutHeight(mVolumeGroupList) + mMediaMainControlLayout.getMeasuredHeight()
+ >= mDefaultControlLayout.getMeasuredHeight()) {
+ mArtView.setVisibility(View.GONE);
+ }
+ artViewHeight = 0;
+ desiredControlLayoutHeight = visibleGroupListHeight + mainControllerHeight;
+ }
+ // Show the playback control if it fits the screen.
+ if (canShowPlaybackControlLayout()
+ && desiredControlLayoutHeight <= maximumControlViewHeight) {
+ mPlaybackControlLayout.setVisibility(View.VISIBLE);
+ } else {
+ mPlaybackControlLayout.setVisibility(View.GONE);
+ }
+ updateMediaControlVisibility(mPlaybackControlLayout.getVisibility() == View.VISIBLE);
+ mainControllerHeight = getMainControllerHeight(
+ mPlaybackControlLayout.getVisibility() == View.VISIBLE);
+ desiredControlLayoutHeight =
+ Math.max(artViewHeight, visibleGroupListHeight) + mainControllerHeight;
+
+ // Limit the volume group list height to fit the screen.
+ if (desiredControlLayoutHeight > maximumControlViewHeight) {
+ visibleGroupListHeight -= (desiredControlLayoutHeight - maximumControlViewHeight);
+ desiredControlLayoutHeight = maximumControlViewHeight;
+ }
+ // Update the layouts with the computed heights.
+ mMediaMainControlLayout.clearAnimation();
+ mVolumeGroupList.clearAnimation();
+ mDefaultControlLayout.clearAnimation();
+ if (animate) {
+ animateLayoutHeight(mMediaMainControlLayout, mainControllerHeight);
+ animateLayoutHeight(mVolumeGroupList, visibleGroupListHeight);
+ animateLayoutHeight(mDefaultControlLayout, desiredControlLayoutHeight);
+ } else {
+ setLayoutHeight(mMediaMainControlLayout, mainControllerHeight);
+ setLayoutHeight(mVolumeGroupList, visibleGroupListHeight);
+ setLayoutHeight(mDefaultControlLayout, desiredControlLayoutHeight);
+ }
+ // Maximize the window size with a transparent layout in advance for smooth animation.
+ setLayoutHeight(mExpandableAreaLayout, visibleRect.height());
+ rebuildVolumeGroupList(animate);
+ }
+
+ void updateVolumeGroupItemHeight(View item) {
+ LinearLayout container = (LinearLayout) item.findViewById(R.id.volume_item_container);
+ setLayoutHeight(container, mVolumeGroupListItemHeight);
+ View icon = item.findViewById(R.id.mr_volume_item_icon);
+ ViewGroup.LayoutParams lp = icon.getLayoutParams();
+ lp.width = mVolumeGroupListItemIconSize;
+ lp.height = mVolumeGroupListItemIconSize;
+ icon.setLayoutParams(lp);
+ }
+
+ private void animateLayoutHeight(final View view, int targetHeight) {
+ final int startValue = getLayoutHeight(view);
+ final int endValue = targetHeight;
+ Animation anim = new Animation() {
+ @Override
+ protected void applyTransformation(float interpolatedTime, Transformation t) {
+ int height = startValue - (int) ((startValue - endValue) * interpolatedTime);
+ setLayoutHeight(view, height);
+ }
+ };
+ anim.setDuration(mGroupListAnimationDurationMs);
+ if (android.os.Build.VERSION.SDK_INT >= 21) {
+ anim.setInterpolator(mInterpolator);
+ }
+ view.startAnimation(anim);
+ }
+
+ void loadInterpolator() {
+ mInterpolator =
+ mIsGroupExpanded ? mLinearOutSlowInInterpolator : mFastOutSlowInInterpolator;
+ }
+
+ private void updateVolumeControlLayout() {
+ if (isVolumeControlAvailable(mRoute)) {
+ if (mVolumeControlLayout.getVisibility() == View.GONE) {
+ mVolumeControlLayout.setVisibility(View.VISIBLE);
+ mVolumeSlider.setMax(mRoute.getVolumeMax());
+ mVolumeSlider.setProgress(mRoute.getVolume());
+ mGroupExpandCollapseButton.setVisibility(getGroup() == null ? View.GONE
+ : View.VISIBLE);
+ }
+ } else {
+ mVolumeControlLayout.setVisibility(View.GONE);
+ }
+ }
+
+ private void rebuildVolumeGroupList(boolean animate) {
+ List<MediaRouter.RouteInfo> routes = getGroup() == null ? null : getGroup().getRoutes();
+ if (routes == null) {
+ mGroupMemberRoutes.clear();
+ mVolumeGroupAdapter.notifyDataSetChanged();
+ } else if (MediaRouteDialogHelper.listUnorderedEquals(mGroupMemberRoutes, routes)) {
+ mVolumeGroupAdapter.notifyDataSetChanged();
+ } else {
+ HashMap<MediaRouter.RouteInfo, Rect> previousRouteBoundMap = animate
+ ? MediaRouteDialogHelper.getItemBoundMap(mVolumeGroupList, mVolumeGroupAdapter)
+ : null;
+ HashMap<MediaRouter.RouteInfo, BitmapDrawable> previousRouteBitmapMap = animate
+ ? MediaRouteDialogHelper.getItemBitmapMap(mContext, mVolumeGroupList,
+ mVolumeGroupAdapter) : null;
+ mGroupMemberRoutesAdded =
+ MediaRouteDialogHelper.getItemsAdded(mGroupMemberRoutes, routes);
+ mGroupMemberRoutesRemoved = MediaRouteDialogHelper.getItemsRemoved(mGroupMemberRoutes,
+ routes);
+ mGroupMemberRoutes.addAll(0, mGroupMemberRoutesAdded);
+ mGroupMemberRoutes.removeAll(mGroupMemberRoutesRemoved);
+ mVolumeGroupAdapter.notifyDataSetChanged();
+ if (animate && mIsGroupExpanded
+ && mGroupMemberRoutesAdded.size() + mGroupMemberRoutesRemoved.size() > 0) {
+ animateGroupListItems(previousRouteBoundMap, previousRouteBitmapMap);
+ } else {
+ mGroupMemberRoutesAdded = null;
+ mGroupMemberRoutesRemoved = null;
+ }
+ }
+ }
+
+ private void animateGroupListItems(final Map<MediaRouter.RouteInfo, Rect> previousRouteBoundMap,
+ final Map<MediaRouter.RouteInfo, BitmapDrawable> previousRouteBitmapMap) {
+ mVolumeGroupList.setEnabled(false);
+ mVolumeGroupList.requestLayout();
+ mIsGroupListAnimating = true;
+ ViewTreeObserver observer = mVolumeGroupList.getViewTreeObserver();
+ observer.addOnGlobalLayoutListener(new ViewTreeObserver.OnGlobalLayoutListener() {
+ @Override
+ public void onGlobalLayout() {
+ mVolumeGroupList.getViewTreeObserver().removeGlobalOnLayoutListener(this);
+ animateGroupListItemsInternal(previousRouteBoundMap, previousRouteBitmapMap);
+ }
+ });
+ }
+
+ void animateGroupListItemsInternal(
+ Map<MediaRouter.RouteInfo, Rect> previousRouteBoundMap,
+ Map<MediaRouter.RouteInfo, BitmapDrawable> previousRouteBitmapMap) {
+ if (mGroupMemberRoutesAdded == null || mGroupMemberRoutesRemoved == null) {
+ return;
+ }
+ int groupSizeDelta = mGroupMemberRoutesAdded.size() - mGroupMemberRoutesRemoved.size();
+ boolean listenerRegistered = false;
+ Animation.AnimationListener listener = new Animation.AnimationListener() {
+ @Override
+ public void onAnimationStart(Animation animation) {
+ mVolumeGroupList.startAnimationAll();
+ mVolumeGroupList.postDelayed(mGroupListFadeInAnimation,
+ mGroupListAnimationDurationMs);
+ }
+
+ @Override
+ public void onAnimationEnd(Animation animation) { }
+
+ @Override
+ public void onAnimationRepeat(Animation animation) { }
+ };
+
+ // Animate visible items from previous positions to current positions except routes added
+ // just before. Added routes will remain hidden until translate animation finishes.
+ int first = mVolumeGroupList.getFirstVisiblePosition();
+ for (int i = 0; i < mVolumeGroupList.getChildCount(); ++i) {
+ View view = mVolumeGroupList.getChildAt(i);
+ int position = first + i;
+ MediaRouter.RouteInfo route = mVolumeGroupAdapter.getItem(position);
+ Rect previousBounds = previousRouteBoundMap.get(route);
+ int currentTop = view.getTop();
+ int previousTop = previousBounds != null ? previousBounds.top
+ : (currentTop + mVolumeGroupListItemHeight * groupSizeDelta);
+ AnimationSet animSet = new AnimationSet(true);
+ if (mGroupMemberRoutesAdded != null && mGroupMemberRoutesAdded.contains(route)) {
+ previousTop = currentTop;
+ Animation alphaAnim = new AlphaAnimation(0.0f, 0.0f);
+ alphaAnim.setDuration(mGroupListFadeInDurationMs);
+ animSet.addAnimation(alphaAnim);
+ }
+ Animation translationAnim = new TranslateAnimation(0, 0, previousTop - currentTop, 0);
+ translationAnim.setDuration(mGroupListAnimationDurationMs);
+ animSet.addAnimation(translationAnim);
+ animSet.setFillAfter(true);
+ animSet.setFillEnabled(true);
+ animSet.setInterpolator(mInterpolator);
+ if (!listenerRegistered) {
+ listenerRegistered = true;
+ animSet.setAnimationListener(listener);
+ }
+ view.clearAnimation();
+ view.startAnimation(animSet);
+ previousRouteBoundMap.remove(route);
+ previousRouteBitmapMap.remove(route);
+ }
+
+ // If a member route doesn't exist any longer, it can be either removed or moved out of the
+ // ListView layout boundary. In this case, use the previously captured bitmaps for
+ // animation.
+ for (Map.Entry<MediaRouter.RouteInfo, BitmapDrawable> item
+ : previousRouteBitmapMap.entrySet()) {
+ final MediaRouter.RouteInfo route = item.getKey();
+ final BitmapDrawable bitmap = item.getValue();
+ final Rect bounds = previousRouteBoundMap.get(route);
+ OverlayObject object = null;
+ if (mGroupMemberRoutesRemoved.contains(route)) {
+ object = new OverlayObject(bitmap, bounds).setAlphaAnimation(1.0f, 0.0f)
+ .setDuration(mGroupListFadeOutDurationMs)
+ .setInterpolator(mInterpolator);
+ } else {
+ int deltaY = groupSizeDelta * mVolumeGroupListItemHeight;
+ object = new OverlayObject(bitmap, bounds).setTranslateYAnimation(deltaY)
+ .setDuration(mGroupListAnimationDurationMs)
+ .setInterpolator(mInterpolator)
+ .setAnimationEndListener(new OverlayObject.OnAnimationEndListener() {
+ @Override
+ public void onAnimationEnd() {
+ mGroupMemberRoutesAnimatingWithBitmap.remove(route);
+ mVolumeGroupAdapter.notifyDataSetChanged();
+ }
+ });
+ mGroupMemberRoutesAnimatingWithBitmap.add(route);
+ }
+ mVolumeGroupList.addOverlayObject(object);
+ }
+ }
+
+ void startGroupListFadeInAnimation() {
+ clearGroupListAnimation(true);
+ mVolumeGroupList.requestLayout();
+ ViewTreeObserver observer = mVolumeGroupList.getViewTreeObserver();
+ observer.addOnGlobalLayoutListener(new ViewTreeObserver.OnGlobalLayoutListener() {
+ @Override
+ public void onGlobalLayout() {
+ mVolumeGroupList.getViewTreeObserver().removeGlobalOnLayoutListener(this);
+ startGroupListFadeInAnimationInternal();
+ }
+ });
+ }
+
+ void startGroupListFadeInAnimationInternal() {
+ if (mGroupMemberRoutesAdded != null && mGroupMemberRoutesAdded.size() != 0) {
+ fadeInAddedRoutes();
+ } else {
+ finishAnimation(true);
+ }
+ }
+
+ void finishAnimation(boolean animate) {
+ mGroupMemberRoutesAdded = null;
+ mGroupMemberRoutesRemoved = null;
+ mIsGroupListAnimating = false;
+ if (mIsGroupListAnimationPending) {
+ mIsGroupListAnimationPending = false;
+ updateLayoutHeight(animate);
+ }
+ mVolumeGroupList.setEnabled(true);
+ }
+
+ private void fadeInAddedRoutes() {
+ Animation.AnimationListener listener = new Animation.AnimationListener() {
+ @Override
+ public void onAnimationStart(Animation animation) { }
+
+ @Override
+ public void onAnimationEnd(Animation animation) {
+ finishAnimation(true);
+ }
+
+ @Override
+ public void onAnimationRepeat(Animation animation) { }
+ };
+ boolean listenerRegistered = false;
+ int first = mVolumeGroupList.getFirstVisiblePosition();
+ for (int i = 0; i < mVolumeGroupList.getChildCount(); ++i) {
+ View view = mVolumeGroupList.getChildAt(i);
+ int position = first + i;
+ MediaRouter.RouteInfo route = mVolumeGroupAdapter.getItem(position);
+ if (mGroupMemberRoutesAdded.contains(route)) {
+ Animation alphaAnim = new AlphaAnimation(0.0f, 1.0f);
+ alphaAnim.setDuration(mGroupListFadeInDurationMs);
+ alphaAnim.setFillEnabled(true);
+ alphaAnim.setFillAfter(true);
+ if (!listenerRegistered) {
+ listenerRegistered = true;
+ alphaAnim.setAnimationListener(listener);
+ }
+ view.clearAnimation();
+ view.startAnimation(alphaAnim);
+ }
+ }
+ }
+
+ void clearGroupListAnimation(boolean exceptAddedRoutes) {
+ int first = mVolumeGroupList.getFirstVisiblePosition();
+ for (int i = 0; i < mVolumeGroupList.getChildCount(); ++i) {
+ View view = mVolumeGroupList.getChildAt(i);
+ int position = first + i;
+ MediaRouter.RouteInfo route = mVolumeGroupAdapter.getItem(position);
+ if (exceptAddedRoutes && mGroupMemberRoutesAdded != null
+ && mGroupMemberRoutesAdded.contains(route)) {
+ continue;
+ }
+ LinearLayout container = (LinearLayout) view.findViewById(R.id.volume_item_container);
+ container.setVisibility(View.VISIBLE);
+ AnimationSet animSet = new AnimationSet(true);
+ Animation alphaAnim = new AlphaAnimation(1.0f, 1.0f);
+ alphaAnim.setDuration(0);
+ animSet.addAnimation(alphaAnim);
+ Animation translationAnim = new TranslateAnimation(0, 0, 0, 0);
+ translationAnim.setDuration(0);
+ animSet.setFillAfter(true);
+ animSet.setFillEnabled(true);
+ view.clearAnimation();
+ view.startAnimation(animSet);
+ }
+ mVolumeGroupList.stopAnimationAll();
+ if (!exceptAddedRoutes) {
+ finishAnimation(false);
+ }
+ }
+
+ private void updatePlaybackControlLayout() {
+ if (canShowPlaybackControlLayout()) {
+ CharSequence title = mDescription == null ? null : mDescription.getTitle();
+ boolean hasTitle = !TextUtils.isEmpty(title);
+
+ CharSequence subtitle = mDescription == null ? null : mDescription.getSubtitle();
+ boolean hasSubtitle = !TextUtils.isEmpty(subtitle);
+
+ boolean showTitle = false;
+ boolean showSubtitle = false;
+ if (mRoute.getPresentationDisplayId()
+ != MediaRouter.RouteInfo.PRESENTATION_DISPLAY_ID_NONE) {
+ // The user is currently casting screen.
+ mTitleView.setText(ApiHelper.getLibResources(mContext).getString(
+ R.string.mr_controller_casting_screen));
+ showTitle = true;
+ } else if (mState == null || mState.getState() == PlaybackStateCompat.STATE_NONE) {
+ // Show "No media selected" as we don't yet know the playback state.
+ mTitleView.setText(ApiHelper.getLibResources(mContext).getString(
+ R.string.mr_controller_no_media_selected));
+ showTitle = true;
+ } else if (!hasTitle && !hasSubtitle) {
+ mTitleView.setText(ApiHelper.getLibResources(mContext).getString(
+ R.string.mr_controller_no_info_available));
+ showTitle = true;
+ } else {
+ if (hasTitle) {
+ mTitleView.setText(title);
+ showTitle = true;
+ }
+ if (hasSubtitle) {
+ mSubtitleView.setText(subtitle);
+ showSubtitle = true;
+ }
+ }
+ mTitleView.setVisibility(showTitle ? View.VISIBLE : View.GONE);
+ mSubtitleView.setVisibility(showSubtitle ? View.VISIBLE : View.GONE);
+
+ if (mState != null) {
+ boolean isPlaying = mState.getState() == PlaybackStateCompat.STATE_BUFFERING
+ || mState.getState() == PlaybackStateCompat.STATE_PLAYING;
+ Context playbackControlButtonContext = mPlaybackControlButton.getContext();
+ boolean visible = true;
+ int iconDrawableAttr = 0;
+ int iconDescResId = 0;
+ if (isPlaying && isPauseActionSupported()) {
+ iconDrawableAttr = R.attr.mediaRoutePauseDrawable;
+ iconDescResId = R.string.mr_controller_pause;
+ } else if (isPlaying && isStopActionSupported()) {
+ iconDrawableAttr = R.attr.mediaRouteStopDrawable;
+ iconDescResId = R.string.mr_controller_stop;
+ } else if (!isPlaying && isPlayActionSupported()) {
+ iconDrawableAttr = R.attr.mediaRoutePlayDrawable;
+ iconDescResId = R.string.mr_controller_play;
+ } else {
+ visible = false;
+ }
+ mPlaybackControlButton.setVisibility(visible ? View.VISIBLE : View.GONE);
+ if (visible) {
+ mPlaybackControlButton.setImageResource(
+ MediaRouterThemeHelper.getThemeResource(
+ playbackControlButtonContext, iconDrawableAttr));
+ mPlaybackControlButton.setContentDescription(
+ playbackControlButtonContext.getResources()
+ .getText(iconDescResId));
+ }
+ }
+ }
+ }
+
+ private boolean isPlayActionSupported() {
+ return (mState.getActions() & (ACTION_PLAY | ACTION_PLAY_PAUSE)) != 0;
+ }
+
+ private boolean isPauseActionSupported() {
+ return (mState.getActions() & (ACTION_PAUSE | ACTION_PLAY_PAUSE)) != 0;
+ }
+
+ private boolean isStopActionSupported() {
+ return (mState.getActions() & ACTION_STOP) != 0;
+ }
+
+ boolean isVolumeControlAvailable(MediaRouter.RouteInfo route) {
+ return mVolumeControlEnabled && route.getVolumeHandling()
+ == MediaRouter.RouteInfo.PLAYBACK_VOLUME_VARIABLE;
+ }
+
+ private static int getLayoutHeight(View view) {
+ return view.getLayoutParams().height;
+ }
+
+ static void setLayoutHeight(View view, int height) {
+ ViewGroup.LayoutParams lp = view.getLayoutParams();
+ lp.height = height;
+ view.setLayoutParams(lp);
+ }
+
+ private static boolean uriEquals(Uri uri1, Uri uri2) {
+ if (uri1 != null && uri1.equals(uri2)) {
+ return true;
+ } else if (uri1 == null && uri2 == null) {
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Returns desired art height to fit into controller dialog.
+ */
+ int getDesiredArtHeight(int originalWidth, int originalHeight) {
+ if (originalWidth >= originalHeight) {
+ // For landscape art, fit width to dialog width.
+ return (int) ((float) mDialogContentWidth * originalHeight / originalWidth + 0.5f);
+ }
+ // For portrait art, fit height to 16:9 ratio case's height.
+ return (int) ((float) mDialogContentWidth * 9 / 16 + 0.5f);
+ }
+
+ void updateArtIconIfNeeded() {
+ if (mCustomControlView != null || !isIconChanged()) {
+ return;
+ }
+ if (mFetchArtTask != null) {
+ mFetchArtTask.cancel(true);
+ }
+ mFetchArtTask = new FetchArtTask();
+ mFetchArtTask.execute();
+ }
+
+ /**
+ * Clear the bitmap loaded by FetchArtTask. Will be called after the loaded bitmaps are applied
+ * to artwork, or no longer valid.
+ */
+ void clearLoadedBitmap() {
+ mArtIconIsLoaded = false;
+ mArtIconLoadedBitmap = null;
+ mArtIconBackgroundColor = 0;
+ }
+
+ /**
+ * Returns whether a new art image is different from an original art image. Compares
+ * Bitmap objects first, and then compares URIs only if bitmap is unchanged with
+ * a null value.
+ */
+ private boolean isIconChanged() {
+ Bitmap newBitmap = mDescription == null ? null : mDescription.getIconBitmap();
+ Uri newUri = mDescription == null ? null : mDescription.getIconUri();
+ Bitmap oldBitmap = mFetchArtTask == null ? mArtIconBitmap : mFetchArtTask.getIconBitmap();
+ Uri oldUri = mFetchArtTask == null ? mArtIconUri : mFetchArtTask.getIconUri();
+ if (oldBitmap != newBitmap) {
+ return true;
+ } else if (oldBitmap == null && !uriEquals(oldUri, newUri)) {
+ return true;
+ }
+ return false;
+ }
+
+ private final class MediaRouterCallback extends MediaRouter.Callback {
+ MediaRouterCallback() {
+ }
+
+ @Override
+ public void onRouteUnselected(MediaRouter router, MediaRouter.RouteInfo route) {
+ update(false);
+ }
+
+ @Override
+ public void onRouteChanged(MediaRouter router, MediaRouter.RouteInfo route) {
+ update(true);
+ }
+
+ @Override
+ public void onRouteVolumeChanged(MediaRouter router, MediaRouter.RouteInfo route) {
+ SeekBar volumeSlider = mVolumeSliderMap.get(route);
+ int volume = route.getVolume();
+ if (DEBUG) {
+ Log.d(TAG, "onRouteVolumeChanged(), route.getVolume:" + volume);
+ }
+ if (volumeSlider != null && mRouteInVolumeSliderTouched != route) {
+ volumeSlider.setProgress(volume);
+ }
+ }
+ }
+
+ private final class MediaControllerCallback extends MediaControllerCompat.Callback {
+ MediaControllerCallback() {
+ }
+
+ @Override
+ public void onSessionDestroyed() {
+ if (mMediaController != null) {
+ mMediaController.unregisterCallback(mControllerCallback);
+ mMediaController = null;
+ }
+ }
+
+ @Override
+ public void onPlaybackStateChanged(PlaybackStateCompat state) {
+ mState = state;
+ update(false);
+ }
+
+ @Override
+ public void onMetadataChanged(MediaMetadataCompat metadata) {
+ mDescription = metadata == null ? null : metadata.getDescription();
+ updateArtIconIfNeeded();
+ update(false);
+ }
+ }
+
+ private final class ClickListener implements View.OnClickListener {
+ ClickListener() {
+ }
+
+ @Override
+ public void onClick(View v) {
+ int id = v.getId();
+ if (id == BUTTON_STOP_RES_ID || id == BUTTON_DISCONNECT_RES_ID) {
+ if (mRoute.isSelected()) {
+ mRouter.unselect(id == BUTTON_STOP_RES_ID ?
+ MediaRouter.UNSELECT_REASON_STOPPED :
+ MediaRouter.UNSELECT_REASON_DISCONNECTED);
+ }
+ dismiss();
+ } else if (id == R.id.mr_control_playback_ctrl) {
+ if (mMediaController != null && mState != null) {
+ boolean isPlaying = mState.getState() == PlaybackStateCompat.STATE_PLAYING;
+ int actionDescResId = 0;
+ if (isPlaying && isPauseActionSupported()) {
+ mMediaController.getTransportControls().pause();
+ actionDescResId = R.string.mr_controller_pause;
+ } else if (isPlaying && isStopActionSupported()) {
+ mMediaController.getTransportControls().stop();
+ actionDescResId = R.string.mr_controller_stop;
+ } else if (!isPlaying && isPlayActionSupported()){
+ mMediaController.getTransportControls().play();
+ actionDescResId = R.string.mr_controller_play;
+ }
+ // Announce the action for accessibility.
+ if (mAccessibilityManager != null && mAccessibilityManager.isEnabled()
+ && actionDescResId != 0) {
+ AccessibilityEvent event = AccessibilityEvent.obtain(
+ AccessibilityEventCompat.TYPE_ANNOUNCEMENT);
+ event.setPackageName(mContext.getPackageName());
+ event.setClassName(getClass().getName());
+ event.getText().add(
+ ApiHelper.getLibResources(mContext).getString(actionDescResId));
+ mAccessibilityManager.sendAccessibilityEvent(event);
+ }
+ }
+ } else if (id == R.id.mr_close) {
+ dismiss();
+ }
+ }
+ }
+
+ private class VolumeChangeListener implements SeekBar.OnSeekBarChangeListener {
+ private final Runnable mStopTrackingTouch = new Runnable() {
+ @Override
+ public void run() {
+ if (mRouteInVolumeSliderTouched != null) {
+ mRouteInVolumeSliderTouched = null;
+ if (mHasPendingUpdate) {
+ update(mPendingUpdateAnimationNeeded);
+ }
+ }
+ }
+ };
+
+ VolumeChangeListener() {
+ }
+
+ @Override
+ public void onStartTrackingTouch(SeekBar seekBar) {
+ if (mRouteInVolumeSliderTouched != null) {
+ mVolumeSlider.removeCallbacks(mStopTrackingTouch);
+ }
+ mRouteInVolumeSliderTouched = (MediaRouter.RouteInfo) seekBar.getTag();
+ }
+
+ @Override
+ public void onStopTrackingTouch(SeekBar seekBar) {
+ // Defer resetting mVolumeSliderTouched to allow the media route provider
+ // a little time to settle into its new state and publish the final
+ // volume update.
+ mVolumeSlider.postDelayed(mStopTrackingTouch, VOLUME_UPDATE_DELAY_MILLIS);
+ }
+
+ @Override
+ public void onProgressChanged(SeekBar seekBar, int progress, boolean fromUser) {
+ if (fromUser) {
+ MediaRouter.RouteInfo route = (MediaRouter.RouteInfo) seekBar.getTag();
+ if (DEBUG) {
+ Log.d(TAG, "onProgressChanged(): calling "
+ + "MediaRouter.RouteInfo.requestSetVolume(" + progress + ")");
+ }
+ route.requestSetVolume(progress);
+ }
+ }
+ }
+
+ private class VolumeGroupAdapter extends ArrayAdapter<MediaRouter.RouteInfo> {
+ final float mDisabledAlpha;
+
+ public VolumeGroupAdapter(Context context, List<MediaRouter.RouteInfo> objects) {
+ super(context, 0, objects);
+ mDisabledAlpha = MediaRouterThemeHelper.getDisabledAlpha(context);
+ }
+
+ @Override
+ public boolean isEnabled(int position) {
+ return false;
+ }
+
+ @Override
+ public View getView(final int position, View convertView, ViewGroup parent) {
+ View v = convertView;
+ if (v == null) {
+ v = LayoutInflater.from(parent.getContext()).inflate(
+ R.layout.mr_controller_volume_item, parent, false);
+ } else {
+ updateVolumeGroupItemHeight(v);
+ }
+
+ MediaRouter.RouteInfo route = getItem(position);
+ if (route != null) {
+ boolean isEnabled = route.isEnabled();
+
+ TextView routeName = (TextView) v.findViewById(R.id.mr_name);
+ routeName.setEnabled(isEnabled);
+ routeName.setText(route.getName());
+
+ MediaRouteVolumeSlider volumeSlider =
+ (MediaRouteVolumeSlider) v.findViewById(R.id.mr_volume_slider);
+ MediaRouterThemeHelper.setVolumeSliderColor(
+ parent.getContext(), volumeSlider, mVolumeGroupList);
+ volumeSlider.setTag(route);
+ mVolumeSliderMap.put(route, volumeSlider);
+ volumeSlider.setHideThumb(!isEnabled);
+ volumeSlider.setEnabled(isEnabled);
+ if (isEnabled) {
+ if (isVolumeControlAvailable(route)) {
+ volumeSlider.setMax(route.getVolumeMax());
+ volumeSlider.setProgress(route.getVolume());
+ volumeSlider.setOnSeekBarChangeListener(mVolumeChangeListener);
+ } else {
+ volumeSlider.setMax(100);
+ volumeSlider.setProgress(100);
+ volumeSlider.setEnabled(false);
+ }
+ }
+
+ ImageView volumeItemIcon =
+ (ImageView) v.findViewById(R.id.mr_volume_item_icon);
+ volumeItemIcon.setAlpha(isEnabled ? 0xFF : (int) (0xFF * mDisabledAlpha));
+
+ // If overlay bitmap exists, real view should remain hidden until
+ // the animation ends.
+ LinearLayout container = (LinearLayout) v.findViewById(R.id.volume_item_container);
+ container.setVisibility(mGroupMemberRoutesAnimatingWithBitmap.contains(route)
+ ? View.INVISIBLE : View.VISIBLE);
+
+ // Routes which are being added will be invisible until animation ends.
+ if (mGroupMemberRoutesAdded != null && mGroupMemberRoutesAdded.contains(route)) {
+ Animation alphaAnim = new AlphaAnimation(0.0f, 0.0f);
+ alphaAnim.setDuration(0);
+ alphaAnim.setFillEnabled(true);
+ alphaAnim.setFillAfter(true);
+ v.clearAnimation();
+ v.startAnimation(alphaAnim);
+ }
+ }
+ return v;
+ }
+ }
+
+ private class FetchArtTask extends AsyncTask<Void, Void, Bitmap> {
+ // Show animation only when fetching takes a long time.
+ private static final long SHOW_ANIM_TIME_THRESHOLD_MILLIS = 120L;
+
+ private final Bitmap mIconBitmap;
+ private final Uri mIconUri;
+ private int mBackgroundColor;
+ private long mStartTimeMillis;
+
+ FetchArtTask() {
+ Bitmap bitmap = mDescription == null ? null : mDescription.getIconBitmap();
+ if (isBitmapRecycled(bitmap)) {
+ Log.w(TAG, "Can't fetch the given art bitmap because it's already recycled.");
+ bitmap = null;
+ }
+ mIconBitmap = bitmap;
+ mIconUri = mDescription == null ? null : mDescription.getIconUri();
+ }
+
+ public Bitmap getIconBitmap() {
+ return mIconBitmap;
+ }
+
+ public Uri getIconUri() {
+ return mIconUri;
+ }
+
+ @Override
+ protected void onPreExecute() {
+ mStartTimeMillis = SystemClock.uptimeMillis();
+ clearLoadedBitmap();
+ }
+
+ @Override
+ protected Bitmap doInBackground(Void... arg) {
+ Bitmap art = null;
+ if (mIconBitmap != null) {
+ art = mIconBitmap;
+ } else if (mIconUri != null) {
+ InputStream stream = null;
+ try {
+ if ((stream = openInputStreamByScheme(mIconUri)) == null) {
+ Log.w(TAG, "Unable to open: " + mIconUri);
+ return null;
+ }
+ // Query art size.
+ BitmapFactory.Options options = new BitmapFactory.Options();
+ options.inJustDecodeBounds = true;
+ BitmapFactory.decodeStream(stream, null, options);
+ if (options.outWidth == 0 || options.outHeight == 0) {
+ return null;
+ }
+ // Rewind the stream in order to restart art decoding.
+ try {
+ stream.reset();
+ } catch (IOException e) {
+ // Failed to rewind the stream, try to reopen it.
+ stream.close();
+ if ((stream = openInputStreamByScheme(mIconUri)) == null) {
+ Log.w(TAG, "Unable to open: " + mIconUri);
+ return null;
+ }
+ }
+ // Calculate required size to decode the art and possibly resize it.
+ options.inJustDecodeBounds = false;
+ int reqHeight = getDesiredArtHeight(options.outWidth, options.outHeight);
+ int ratio = options.outHeight / reqHeight;
+ options.inSampleSize = Math.max(1, Integer.highestOneBit(ratio));
+ if (isCancelled()) {
+ return null;
+ }
+ art = BitmapFactory.decodeStream(stream, null, options);
+ } catch (IOException e){
+ Log.w(TAG, "Unable to open: " + mIconUri, e);
+ } finally {
+ if (stream != null) {
+ try {
+ stream.close();
+ } catch (IOException e) {
+ }
+ }
+ }
+ }
+ if (isBitmapRecycled(art)) {
+ Log.w(TAG, "Can't use recycled bitmap: " + art);
+ return null;
+ }
+ if (art != null && art.getWidth() < art.getHeight()) {
+ // Portrait art requires dominant color as background color.
+ Palette palette = new Palette.Builder(art).maximumColorCount(1).generate();
+ mBackgroundColor = palette.getSwatches().isEmpty()
+ ? 0 : palette.getSwatches().get(0).getRgb();
+ }
+ return art;
+ }
+
+ @Override
+ protected void onPostExecute(Bitmap art) {
+ mFetchArtTask = null;
+ if (!ObjectsCompat.equals(mArtIconBitmap, mIconBitmap)
+ || !ObjectsCompat.equals(mArtIconUri, mIconUri)) {
+ mArtIconBitmap = mIconBitmap;
+ mArtIconLoadedBitmap = art;
+ mArtIconUri = mIconUri;
+ mArtIconBackgroundColor = mBackgroundColor;
+ mArtIconIsLoaded = true;
+ long elapsedTimeMillis = SystemClock.uptimeMillis() - mStartTimeMillis;
+ // Loaded bitmap will be applied on the next update
+ update(elapsedTimeMillis > SHOW_ANIM_TIME_THRESHOLD_MILLIS);
+ }
+ }
+
+ private InputStream openInputStreamByScheme(Uri uri) throws IOException {
+ String scheme = uri.getScheme().toLowerCase();
+ InputStream stream = null;
+ if (ContentResolver.SCHEME_ANDROID_RESOURCE.equals(scheme)
+ || ContentResolver.SCHEME_CONTENT.equals(scheme)
+ || ContentResolver.SCHEME_FILE.equals(scheme)) {
+ stream = mContext.getContentResolver().openInputStream(uri);
+ } else {
+ URL url = new URL(uri.toString());
+ URLConnection conn = url.openConnection();
+ conn.setConnectTimeout(CONNECTION_TIMEOUT_MILLIS);
+ conn.setReadTimeout(CONNECTION_TIMEOUT_MILLIS);
+ stream = conn.getInputStream();
+ }
+ return (stream == null) ? null : new BufferedInputStream(stream);
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialogFragment.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialogFragment.java
new file mode 100644
index 0000000..215d74f
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteControllerDialogFragment.java
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.app;
+
+import android.app.Dialog;
+import android.app.DialogFragment;
+import android.content.Context;
+import android.content.res.Configuration;
+import android.os.Bundle;
+
+/**
+ * Media route controller dialog fragment.
+ * <p>
+ * Creates a {@link MediaRouteControllerDialog}. The application may subclass
+ * this dialog fragment to customize the media route controller dialog.
+ * </p>
+ */
+public class MediaRouteControllerDialogFragment extends DialogFragment {
+ private MediaRouteControllerDialog mDialog;
+ /**
+ * Creates a media route controller dialog fragment.
+ * <p>
+ * All subclasses of this class must also possess a default constructor.
+ * </p>
+ */
+ public MediaRouteControllerDialogFragment() {
+ setCancelable(true);
+ }
+
+ /**
+ * Called when the controller dialog is being created.
+ * <p>
+ * Subclasses may override this method to customize the dialog.
+ * </p>
+ */
+ public MediaRouteControllerDialog onCreateControllerDialog(
+ Context context, Bundle savedInstanceState) {
+ return new MediaRouteControllerDialog(context);
+ }
+
+ @Override
+ public Dialog onCreateDialog(Bundle savedInstanceState) {
+ mDialog = onCreateControllerDialog(getContext(), savedInstanceState);
+ return mDialog;
+ }
+
+ @Override
+ public void onStop() {
+ super.onStop();
+ if (mDialog != null) {
+ mDialog.clearGroupListAnimation(false);
+ }
+ }
+
+ @Override
+ public void onConfigurationChanged(Configuration newConfig) {
+ super.onConfigurationChanged(newConfig);
+ if (mDialog != null) {
+ mDialog.updateLayout();
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogFactory.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogFactory.java
new file mode 100644
index 0000000..a9eaf39
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogFactory.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.app;
+
+import android.support.annotation.NonNull;
+
+/**
+ * The media route dialog factory is responsible for creating the media route
+ * chooser and controller dialogs as needed.
+ * <p>
+ * The application can customize the dialogs by providing a subclass of the
+ * dialog factory to the {@link MediaRouteButton} using the
+ * {@link MediaRouteButton#setDialogFactory setDialogFactory} method.
+ * </p>
+ */
+public class MediaRouteDialogFactory {
+ private static final MediaRouteDialogFactory sDefault = new MediaRouteDialogFactory();
+
+ /**
+ * Creates a default media route dialog factory.
+ */
+ public MediaRouteDialogFactory() {
+ }
+
+ /**
+ * Gets the default factory instance.
+ *
+ * @return The default media route dialog factory, never null.
+ */
+ @NonNull
+ public static MediaRouteDialogFactory getDefault() {
+ return sDefault;
+ }
+
+ /**
+ * Called when the chooser dialog is being opened and it is time to create the fragment.
+ * <p>
+ * Subclasses may override this method to create a customized fragment.
+ * </p>
+ *
+ * @return The media route chooser dialog fragment, must not be null.
+ */
+ @NonNull
+ public MediaRouteChooserDialogFragment onCreateChooserDialogFragment() {
+ return new MediaRouteChooserDialogFragment();
+ }
+
+ /**
+ * Called when the controller dialog is being opened and it is time to create the fragment.
+ * <p>
+ * Subclasses may override this method to create a customized fragment.
+ * </p>
+ *
+ * @return The media route controller dialog fragment, must not be null.
+ */
+ @NonNull
+ public MediaRouteControllerDialogFragment onCreateControllerDialogFragment() {
+ return new MediaRouteControllerDialogFragment();
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogHelper.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogHelper.java
new file mode 100644
index 0000000..9aabf7b
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDialogHelper.java
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.app;
+
+import android.content.Context;
+import android.graphics.Bitmap;
+import android.graphics.Canvas;
+import android.graphics.Rect;
+import android.graphics.drawable.BitmapDrawable;
+import android.util.DisplayMetrics;
+import android.util.TypedValue;
+import android.view.View;
+import android.view.ViewGroup;
+import android.widget.ArrayAdapter;
+import android.widget.ListView;
+
+import com.android.media.update.ApiHelper;
+import com.android.media.update.R;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+final class MediaRouteDialogHelper {
+ /**
+ * The framework should set the dialog width properly, but somehow it doesn't work, hence
+ * duplicating a similar logic here to determine the appropriate dialog width.
+ */
+ public static int getDialogWidth(Context context) {
+ DisplayMetrics metrics = ApiHelper.getLibResources(context).getDisplayMetrics();
+ boolean isPortrait = metrics.widthPixels < metrics.heightPixels;
+
+ TypedValue value = new TypedValue();
+ ApiHelper.getLibResources(context).getValue(isPortrait
+ ? R.dimen.mr_dialog_fixed_width_minor
+ : R.dimen.mr_dialog_fixed_width_major, value, true);
+ if (value.type == TypedValue.TYPE_DIMENSION) {
+ return (int) value.getDimension(metrics);
+ } else if (value.type == TypedValue.TYPE_FRACTION) {
+ return (int) value.getFraction(metrics.widthPixels, metrics.widthPixels);
+ }
+ return ViewGroup.LayoutParams.WRAP_CONTENT;
+ }
+
+ /**
+ * Compares two lists regardless of order.
+ *
+ * @param list1 A list
+ * @param list2 A list to be compared with {@code list1}
+ * @return True if two lists have exactly same items regardless of order, false otherwise.
+ */
+ public static <E> boolean listUnorderedEquals(List<E> list1, List<E> list2) {
+ HashSet<E> set1 = new HashSet<>(list1);
+ HashSet<E> set2 = new HashSet<>(list2);
+ return set1.equals(set2);
+ }
+
+ /**
+ * Compares two lists and returns a set of items which exist
+ * after-list but before-list, which means newly added items.
+ *
+ * @param before A list
+ * @param after A list to be compared with {@code before}
+ * @return A set of items which contains newly added items while
+ * comparing {@code after} to {@code before}.
+ */
+ public static <E> Set<E> getItemsAdded(List<E> before, List<E> after) {
+ HashSet<E> set = new HashSet<>(after);
+ set.removeAll(before);
+ return set;
+ }
+
+ /**
+ * Compares two lists and returns a set of items which exist
+ * before-list but after-list, which means removed items.
+ *
+ * @param before A list
+ * @param after A list to be compared with {@code before}
+ * @return A set of items which contains removed items while
+ * comparing {@code after} to {@code before}.
+ */
+ public static <E> Set<E> getItemsRemoved(List<E> before, List<E> after) {
+ HashSet<E> set = new HashSet<>(before);
+ set.removeAll(after);
+ return set;
+ }
+
+ /**
+ * Generates an item-Rect map which indicates where member
+ * items are located in the given ListView.
+ *
+ * @param listView A list view
+ * @param adapter An array adapter which contains an array of items.
+ * @return A map of items and bounds of their views located in the given list view.
+ */
+ public static <E> HashMap<E, Rect> getItemBoundMap(ListView listView,
+ ArrayAdapter<E> adapter) {
+ HashMap<E, Rect> itemBoundMap = new HashMap<>();
+ int firstVisiblePosition = listView.getFirstVisiblePosition();
+ for (int i = 0; i < listView.getChildCount(); ++i) {
+ int position = firstVisiblePosition + i;
+ E item = adapter.getItem(position);
+ View view = listView.getChildAt(i);
+ itemBoundMap.put(item,
+ new Rect(view.getLeft(), view.getTop(), view.getRight(), view.getBottom()));
+ }
+ return itemBoundMap;
+ }
+
+ /**
+ * Generates an item-BitmapDrawable map which stores snapshots
+ * of member items in the given ListView.
+ *
+ * @param context A context
+ * @param listView A list view
+ * @param adapter An array adapter which contains an array of items.
+ * @return A map of items and snapshots of their views in the given list view.
+ */
+ public static <E> HashMap<E, BitmapDrawable> getItemBitmapMap(Context context,
+ ListView listView, ArrayAdapter<E> adapter) {
+ HashMap<E, BitmapDrawable> itemBitmapMap = new HashMap<>();
+ int firstVisiblePosition = listView.getFirstVisiblePosition();
+ for (int i = 0; i < listView.getChildCount(); ++i) {
+ int position = firstVisiblePosition + i;
+ E item = adapter.getItem(position);
+ View view = listView.getChildAt(i);
+ itemBitmapMap.put(item, getViewBitmap(context, view));
+ }
+ return itemBitmapMap;
+ }
+
+ private static BitmapDrawable getViewBitmap(Context context, View view) {
+ Bitmap bitmap = Bitmap.createBitmap(view.getWidth(), view.getHeight(),
+ Bitmap.Config.ARGB_8888);
+ Canvas canvas = new Canvas(bitmap);
+ view.draw(canvas);
+ return new BitmapDrawable(context.getResources(), bitmap);
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDiscoveryFragment.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDiscoveryFragment.java
new file mode 100644
index 0000000..02ee118
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteDiscoveryFragment.java
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.app;
+
+import android.os.Bundle;
+import android.support.v4.app.Fragment;
+
+import com.android.support.mediarouter.media.MediaRouter;
+import com.android.support.mediarouter.media.MediaRouteSelector;
+
+/**
+ * Media route discovery fragment.
+ * <p>
+ * This fragment takes care of registering a callback for media route discovery
+ * during the {@link Fragment#onStart onStart()} phase
+ * and removing it during the {@link Fragment#onStop onStop()} phase.
+ * </p><p>
+ * The application must supply a route selector to specify the kinds of routes
+ * to discover. The application may also override {@link #onCreateCallback} to
+ * provide the {@link MediaRouter} callback to register.
+ * </p><p>
+ * Note that the discovery callback makes the application be connected with all the
+ * {@link android.support.v7.media.MediaRouteProviderService media route provider services}
+ * while it is registered.
+ * </p>
+ */
+public class MediaRouteDiscoveryFragment extends Fragment {
+ private final String ARGUMENT_SELECTOR = "selector";
+
+ private MediaRouter mRouter;
+ private MediaRouteSelector mSelector;
+ private MediaRouter.Callback mCallback;
+
+ public MediaRouteDiscoveryFragment() {
+ }
+
+ /**
+ * Gets the media router instance.
+ */
+ public MediaRouter getMediaRouter() {
+ ensureRouter();
+ return mRouter;
+ }
+
+ private void ensureRouter() {
+ if (mRouter == null) {
+ mRouter = MediaRouter.getInstance(getContext());
+ }
+ }
+
+ /**
+ * Gets the media route selector for filtering the routes to be discovered.
+ *
+ * @return The selector, never null.
+ */
+ public MediaRouteSelector getRouteSelector() {
+ ensureRouteSelector();
+ return mSelector;
+ }
+
+ /**
+ * Sets the media route selector for filtering the routes to be discovered.
+ * This method must be called before the fragment is added.
+ *
+ * @param selector The selector to set.
+ */
+ public void setRouteSelector(MediaRouteSelector selector) {
+ if (selector == null) {
+ throw new IllegalArgumentException("selector must not be null");
+ }
+
+ ensureRouteSelector();
+ if (!mSelector.equals(selector)) {
+ mSelector = selector;
+
+ Bundle args = getArguments();
+ if (args == null) {
+ args = new Bundle();
+ }
+ args.putBundle(ARGUMENT_SELECTOR, selector.asBundle());
+ setArguments(args);
+
+ if (mCallback != null) {
+ mRouter.removeCallback(mCallback);
+ mRouter.addCallback(mSelector, mCallback, onPrepareCallbackFlags());
+ }
+ }
+ }
+
+ private void ensureRouteSelector() {
+ if (mSelector == null) {
+ Bundle args = getArguments();
+ if (args != null) {
+ mSelector = MediaRouteSelector.fromBundle(args.getBundle(ARGUMENT_SELECTOR));
+ }
+ if (mSelector == null) {
+ mSelector = MediaRouteSelector.EMPTY;
+ }
+ }
+ }
+
+ /**
+ * Called to create the {@link android.support.v7.media.MediaRouter.Callback callback}
+ * that will be registered.
+ * <p>
+ * The default callback does nothing. The application may override this method to
+ * supply its own callback.
+ * </p>
+ *
+ * @return The new callback, or null if no callback should be registered.
+ */
+ public MediaRouter.Callback onCreateCallback() {
+ return new MediaRouter.Callback() { };
+ }
+
+ /**
+ * Called to prepare the callback flags that will be used when the
+ * {@link android.support.v7.media.MediaRouter.Callback callback} is registered.
+ * <p>
+ * The default implementation returns {@link MediaRouter#CALLBACK_FLAG_REQUEST_DISCOVERY}.
+ * </p>
+ *
+ * @return The desired callback flags.
+ */
+ public int onPrepareCallbackFlags() {
+ return MediaRouter.CALLBACK_FLAG_REQUEST_DISCOVERY;
+ }
+
+ @Override
+ public void onStart() {
+ super.onStart();
+
+ ensureRouteSelector();
+ ensureRouter();
+ mCallback = onCreateCallback();
+ if (mCallback != null) {
+ mRouter.addCallback(mSelector, mCallback, onPrepareCallbackFlags());
+ }
+ }
+
+ @Override
+ public void onStop() {
+ if (mCallback != null) {
+ mRouter.removeCallback(mCallback);
+ mCallback = null;
+ }
+
+ super.onStop();
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteExpandCollapseButton.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteExpandCollapseButton.java
new file mode 100644
index 0000000..6a0a95a
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteExpandCollapseButton.java
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.app;
+
+import android.content.Context;
+import android.graphics.ColorFilter;
+import android.graphics.PorterDuff;
+import android.graphics.PorterDuffColorFilter;
+import android.graphics.drawable.AnimationDrawable;
+import android.support.v4.content.ContextCompat;
+import android.util.AttributeSet;
+import android.view.View;
+import android.widget.ImageButton;
+
+import com.android.media.update.ApiHelper;
+import com.android.media.update.R;
+
+/**
+ * Chevron/Caret button to expand/collapse group volume list with animation.
+ */
+public class MediaRouteExpandCollapseButton extends ImageButton {
+ final AnimationDrawable mExpandAnimationDrawable;
+ final AnimationDrawable mCollapseAnimationDrawable;
+ final String mExpandGroupDescription;
+ final String mCollapseGroupDescription;
+ boolean mIsGroupExpanded;
+ OnClickListener mListener;
+
+ public MediaRouteExpandCollapseButton(Context context) {
+ this(context, null);
+ }
+
+ public MediaRouteExpandCollapseButton(Context context, AttributeSet attrs) {
+ this(context, attrs, 0);
+ }
+
+ public MediaRouteExpandCollapseButton(Context context, AttributeSet attrs, int defStyleAttr) {
+ super(context, attrs, defStyleAttr);
+ mExpandAnimationDrawable = (AnimationDrawable)
+ ApiHelper.getLibResources(context).getDrawable(R.drawable.mr_group_expand);
+ mCollapseAnimationDrawable = (AnimationDrawable)
+ ApiHelper.getLibResources(context).getDrawable(R.drawable.mr_group_collapse);
+
+ ColorFilter filter = new PorterDuffColorFilter(
+ MediaRouterThemeHelper.getControllerColor(context, defStyleAttr),
+ PorterDuff.Mode.SRC_IN);
+ mExpandAnimationDrawable.setColorFilter(filter);
+ mCollapseAnimationDrawable.setColorFilter(filter);
+
+ mExpandGroupDescription =
+ ApiHelper.getLibResources(context).getString(R.string.mr_controller_expand_group);
+ mCollapseGroupDescription =
+ ApiHelper.getLibResources(context).getString(R.string.mr_controller_collapse_group);
+
+ setImageDrawable(mExpandAnimationDrawable.getFrame(0));
+ setContentDescription(mExpandGroupDescription);
+
+ super.setOnClickListener(new OnClickListener() {
+ @Override
+ public void onClick(View view) {
+ mIsGroupExpanded = !mIsGroupExpanded;
+ if (mIsGroupExpanded) {
+ setImageDrawable(mExpandAnimationDrawable);
+ mExpandAnimationDrawable.start();
+ setContentDescription(mCollapseGroupDescription);
+ } else {
+ setImageDrawable(mCollapseAnimationDrawable);
+ mCollapseAnimationDrawable.start();
+ setContentDescription(mExpandGroupDescription);
+ }
+ if (mListener != null) {
+ mListener.onClick(view);
+ }
+ }
+ });
+ }
+
+ @Override
+ public void setOnClickListener(OnClickListener listener) {
+ mListener = listener;
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteVolumeSlider.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteVolumeSlider.java
new file mode 100644
index 0000000..d05d20e
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouteVolumeSlider.java
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.app;
+
+import android.content.Context;
+import android.graphics.Color;
+import android.graphics.PorterDuff;
+import android.graphics.drawable.Drawable;
+import android.util.AttributeSet;
+import android.util.Log;
+import android.widget.SeekBar;
+
+/**
+ * Volume slider with showing, hiding, and applying alpha supports to the thumb.
+ */
+public class MediaRouteVolumeSlider extends SeekBar {
+ private static final String TAG = "MediaRouteVolumeSlider";
+
+ private final float mDisabledAlpha;
+
+ private boolean mHideThumb;
+ private Drawable mThumb;
+ private int mColor;
+
+ public MediaRouteVolumeSlider(Context context) {
+ this(context, null);
+ }
+
+ public MediaRouteVolumeSlider(Context context, AttributeSet attrs) {
+ super(context, attrs);
+ mDisabledAlpha = MediaRouterThemeHelper.getDisabledAlpha(context);
+ }
+
+ public MediaRouteVolumeSlider(Context context, AttributeSet attrs, int defStyleAttr) {
+ super(context, attrs, defStyleAttr);
+ mDisabledAlpha = MediaRouterThemeHelper.getDisabledAlpha(context);
+ }
+
+ @Override
+ protected void drawableStateChanged() {
+ super.drawableStateChanged();
+ int alpha = isEnabled() ? 0xFF : (int) (0xFF * mDisabledAlpha);
+
+ // The thumb drawable is a collection of drawables and its current drawables are changed per
+ // state. Apply the color filter and alpha on every state change.
+ if (mThumb != null) {
+ mThumb.setColorFilter(mColor, PorterDuff.Mode.SRC_IN);
+ mThumb.setAlpha(alpha);
+ }
+
+ getProgressDrawable().setColorFilter(mColor, PorterDuff.Mode.SRC_IN);
+ getProgressDrawable().setAlpha(alpha);
+ }
+
+ @Override
+ public void setThumb(Drawable thumb) {
+ mThumb = thumb;
+ super.setThumb(mHideThumb ? null : mThumb);
+ }
+
+ /**
+ * Sets whether to show or hide thumb.
+ */
+ public void setHideThumb(boolean hideThumb) {
+ if (mHideThumb == hideThumb) {
+ return;
+ }
+ mHideThumb = hideThumb;
+ super.setThumb(mHideThumb ? null : mThumb);
+ }
+
+ /**
+ * Sets the volume slider color. The change takes effect next time drawable state is changed.
+ * <p>
+ * The color cannot be translucent, otherwise the underlying progress bar will be seen through
+ * the thumb.
+ * </p>
+ */
+ public void setColor(int color) {
+ if (mColor == color) {
+ return;
+ }
+ if (Color.alpha(color) != 0xFF) {
+ Log.e(TAG, "Volume slider color cannot be translucent: #" + Integer.toHexString(color));
+ }
+ mColor = color;
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouterThemeHelper.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouterThemeHelper.java
new file mode 100644
index 0000000..63f042f
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/MediaRouterThemeHelper.java
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.app;
+
+import android.content.Context;
+import android.content.res.TypedArray;
+import android.graphics.Color;
+import android.support.annotation.IntDef;
+import android.support.v4.graphics.ColorUtils;
+import android.util.TypedValue;
+import android.view.ContextThemeWrapper;
+import android.view.View;
+
+import com.android.media.update.R;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+
+final class MediaRouterThemeHelper {
+ private static final float MIN_CONTRAST = 3.0f;
+
+ @IntDef({COLOR_DARK_ON_LIGHT_BACKGROUND, COLOR_WHITE_ON_DARK_BACKGROUND})
+ @Retention(RetentionPolicy.SOURCE)
+ private @interface ControllerColorType {}
+
+ static final int COLOR_DARK_ON_LIGHT_BACKGROUND = 0xDE000000; /* Opacity of 87% */
+ static final int COLOR_WHITE_ON_DARK_BACKGROUND = Color.WHITE;
+
+ private MediaRouterThemeHelper() {
+ }
+
+ static Context createThemedButtonContext(Context context) {
+ // Apply base Media Router theme.
+ context = new ContextThemeWrapper(context, getRouterThemeId(context));
+
+ // Apply custom Media Router theme.
+ int style = getThemeResource(context, R.attr.mediaRouteTheme);
+ if (style != 0) {
+ context = new ContextThemeWrapper(context, style);
+ }
+
+ return context;
+ }
+
+ /*
+ * The following two methods are to be used in conjunction. They should be used to prepare
+ * the context and theme for a super class constructor (the latter method relies on the
+ * former method to properly prepare the context):
+ * super(context = createThemedDialogContext(context, theme),
+ * createThemedDialogStyle(context));
+ *
+ * It will apply theme in the following order (style lookups will be done in reverse):
+ * 1) Current theme
+ * 2) Supplied theme
+ * 3) Base Media Router theme
+ * 4) Custom Media Router theme, if provided
+ */
+ static Context createThemedDialogContext(Context context, int theme, boolean alertDialog) {
+ // 1) Current theme is already applied to the context
+
+ // 2) If no theme is supplied, look it up from the context (dialogTheme/alertDialogTheme)
+ if (theme == 0) {
+ theme = getThemeResource(context,
+ !alertDialog ? android.R.attr.dialogTheme : android.R.attr.alertDialogTheme);
+ }
+ // Apply it
+ context = new ContextThemeWrapper(context, theme);
+
+ // 3) If a custom Media Router theme is provided then apply the base theme
+ if (getThemeResource(context, R.attr.mediaRouteTheme) != 0) {
+ context = new ContextThemeWrapper(context, getRouterThemeId(context));
+ }
+
+ return context;
+ }
+ // This method should be used in conjunction with the previous method.
+ static int createThemedDialogStyle(Context context) {
+ // 4) Apply the custom Media Router theme
+ int theme = getThemeResource(context, R.attr.mediaRouteTheme);
+ if (theme == 0) {
+ // 3) No custom MediaRouter theme was provided so apply the base theme instead
+ theme = getRouterThemeId(context);
+ }
+
+ return theme;
+ }
+ // END. Previous two methods should be used in conjunction.
+
+ static int getThemeResource(Context context, int attr) {
+ TypedValue value = new TypedValue();
+ return context.getTheme().resolveAttribute(attr, value, true) ? value.resourceId : 0;
+ }
+
+ static float getDisabledAlpha(Context context) {
+ TypedValue value = new TypedValue();
+ return context.getTheme().resolveAttribute(android.R.attr.disabledAlpha, value, true)
+ ? value.getFloat() : 0.5f;
+ }
+
+ static @ControllerColorType int getControllerColor(Context context, int style) {
+ int primaryColor = getThemeColor(context, style, android.R.attr.colorPrimary);
+ if (primaryColor == 0) {
+ primaryColor = getThemeColor(context, style, android.R.attr.colorPrimary);
+ if (primaryColor == 0) {
+ primaryColor = 0xFF000000;
+ }
+ }
+ if (ColorUtils.calculateContrast(COLOR_WHITE_ON_DARK_BACKGROUND, primaryColor)
+ >= MIN_CONTRAST) {
+ return COLOR_WHITE_ON_DARK_BACKGROUND;
+ }
+ return COLOR_DARK_ON_LIGHT_BACKGROUND;
+ }
+
+ static int getButtonTextColor(Context context) {
+ int primaryColor = getThemeColor(context, 0, android.R.attr.colorPrimary);
+ int backgroundColor = getThemeColor(context, 0, android.R.attr.colorBackground);
+
+ if (ColorUtils.calculateContrast(primaryColor, backgroundColor) < MIN_CONTRAST) {
+ // Default to colorAccent if the contrast ratio is low.
+ return getThemeColor(context, 0, android.R.attr.colorAccent);
+ }
+ return primaryColor;
+ }
+
+ static void setMediaControlsBackgroundColor(
+ Context context, View mainControls, View groupControls, boolean hasGroup) {
+ int primaryColor = getThemeColor(context, 0, android.R.attr.colorPrimary);
+ int primaryDarkColor = getThemeColor(context, 0, android.R.attr.colorPrimaryDark);
+ if (hasGroup && getControllerColor(context, 0) == COLOR_DARK_ON_LIGHT_BACKGROUND) {
+ // Instead of showing dark controls in a possibly dark (i.e. the primary dark), model
+ // the white dialog and use the primary color for the group controls.
+ primaryDarkColor = primaryColor;
+ primaryColor = Color.WHITE;
+ }
+ mainControls.setBackgroundColor(primaryColor);
+ groupControls.setBackgroundColor(primaryDarkColor);
+ // Also store the background colors to the view tags. They are used in
+ // setVolumeSliderColor() below.
+ mainControls.setTag(primaryColor);
+ groupControls.setTag(primaryDarkColor);
+ }
+
+ static void setVolumeSliderColor(
+ Context context, MediaRouteVolumeSlider volumeSlider, View backgroundView) {
+ int controllerColor = getControllerColor(context, 0);
+ if (Color.alpha(controllerColor) != 0xFF) {
+ // Composite with the background in order not to show the underlying progress bar
+ // through the thumb.
+ int backgroundColor = (int) backgroundView.getTag();
+ controllerColor = ColorUtils.compositeColors(controllerColor, backgroundColor);
+ }
+ volumeSlider.setColor(controllerColor);
+ }
+
+ private static boolean isLightTheme(Context context) {
+ TypedValue value = new TypedValue();
+ // TODO(sungsoo): Switch to com.android.internal.R.attr.isLightTheme
+ return context.getTheme().resolveAttribute(android.support.v7.appcompat.R.attr.isLightTheme,
+ value, true) && value.data != 0;
+ }
+
+ private static int getThemeColor(Context context, int style, int attr) {
+ if (style != 0) {
+ int[] attrs = { attr };
+ TypedArray ta = context.obtainStyledAttributes(style, attrs);
+ int color = ta.getColor(0, 0);
+ ta.recycle();
+ if (color != 0) {
+ return color;
+ }
+ }
+ TypedValue value = new TypedValue();
+ context.getTheme().resolveAttribute(attr, value, true);
+ if (value.resourceId != 0) {
+ return context.getResources().getColor(value.resourceId);
+ }
+ return value.data;
+ }
+
+ static int getRouterThemeId(Context context) {
+ int themeId;
+ if (isLightTheme(context)) {
+ if (getControllerColor(context, 0) == COLOR_DARK_ON_LIGHT_BACKGROUND) {
+ themeId = R.style.Theme_MediaRouter_Light;
+ } else {
+ themeId = R.style.Theme_MediaRouter_Light_DarkControlPanel;
+ }
+ } else {
+ if (getControllerColor(context, 0) == COLOR_DARK_ON_LIGHT_BACKGROUND) {
+ themeId = R.style.Theme_MediaRouter_LightControlPanel;
+ } else {
+ themeId = R.style.Theme_MediaRouter;
+ }
+ }
+ return themeId;
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/app/OverlayListView.java b/packages/MediaComponents/src/com/android/support/mediarouter/app/OverlayListView.java
new file mode 100644
index 0000000..b00dee2
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/app/OverlayListView.java
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.app;
+
+import android.content.Context;
+import android.graphics.Canvas;
+import android.graphics.Rect;
+import android.graphics.drawable.BitmapDrawable;
+import android.util.AttributeSet;
+import android.view.animation.Interpolator;
+import android.widget.ListView;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * A ListView which has an additional overlay layer. {@link BitmapDrawable}
+ * can be added to the layer and can be animated.
+ */
+public final class OverlayListView extends ListView {
+ private final List<OverlayObject> mOverlayObjects = new ArrayList<>();
+
+ public OverlayListView(Context context) {
+ super(context);
+ }
+
+ public OverlayListView(Context context, AttributeSet attrs) {
+ super(context, attrs);
+ }
+
+ public OverlayListView(Context context, AttributeSet attrs, int defStyleAttr) {
+ super(context, attrs, defStyleAttr);
+ }
+
+ /**
+ * Adds an object to the overlay layer.
+ *
+ * @param object An object to be added.
+ */
+ public void addOverlayObject(OverlayObject object) {
+ mOverlayObjects.add(object);
+ }
+
+ /**
+ * Starts all animations of objects in the overlay layer.
+ */
+ public void startAnimationAll() {
+ for (OverlayObject object : mOverlayObjects) {
+ if (!object.isAnimationStarted()) {
+ object.startAnimation(getDrawingTime());
+ }
+ }
+ }
+
+ /**
+ * Stops all animations of objects in the overlay layer.
+ */
+ public void stopAnimationAll() {
+ for (OverlayObject object : mOverlayObjects) {
+ object.stopAnimation();
+ }
+ }
+
+ @Override
+ public void onDraw(Canvas canvas) {
+ super.onDraw(canvas);
+ if (mOverlayObjects.size() > 0) {
+ Iterator<OverlayObject> it = mOverlayObjects.iterator();
+ while (it.hasNext()) {
+ OverlayObject object = it.next();
+ BitmapDrawable bitmap = object.getBitmapDrawable();
+ if (bitmap != null) {
+ bitmap.draw(canvas);
+ }
+ if (!object.update(getDrawingTime())) {
+ it.remove();
+ }
+ }
+ }
+ }
+
+ /**
+ * A class that represents an object to be shown in the overlay layer.
+ */
+ public static class OverlayObject {
+ private BitmapDrawable mBitmap;
+ private float mCurrentAlpha = 1.0f;
+ private Rect mCurrentBounds;
+ private Interpolator mInterpolator;
+ private long mDuration;
+ private Rect mStartRect;
+ private int mDeltaY;
+ private float mStartAlpha = 1.0f;
+ private float mEndAlpha = 1.0f;
+ private long mStartTime;
+ private boolean mIsAnimationStarted;
+ private boolean mIsAnimationEnded;
+ private OnAnimationEndListener mListener;
+
+ public OverlayObject(BitmapDrawable bitmap, Rect startRect) {
+ mBitmap = bitmap;
+ mStartRect = startRect;
+ mCurrentBounds = new Rect(startRect);
+ if (mBitmap != null && mCurrentBounds != null) {
+ mBitmap.setAlpha((int) (mCurrentAlpha * 255));
+ mBitmap.setBounds(mCurrentBounds);
+ }
+ }
+
+ /**
+ * Returns the bitmap that this object represents.
+ *
+ * @return BitmapDrawable that this object has.
+ */
+ public BitmapDrawable getBitmapDrawable() {
+ return mBitmap;
+ }
+
+ /**
+ * Returns the started status of the animation.
+ *
+ * @return True if the animation has started, false otherwise.
+ */
+ public boolean isAnimationStarted() {
+ return mIsAnimationStarted;
+ }
+
+ /**
+ * Sets animation for varying alpha.
+ *
+ * @param startAlpha Starting alpha value for the animation, where 1.0 means
+ * fully opaque and 0.0 means fully transparent.
+ * @param endAlpha Ending alpha value for the animation.
+ * @return This OverlayObject to allow for chaining of calls.
+ */
+ public OverlayObject setAlphaAnimation(float startAlpha, float endAlpha) {
+ mStartAlpha = startAlpha;
+ mEndAlpha = endAlpha;
+ return this;
+ }
+
+ /**
+ * Sets animation for moving objects vertically.
+ *
+ * @param deltaY Distance to move in pixels.
+ * @return This OverlayObject to allow for chaining of calls.
+ */
+ public OverlayObject setTranslateYAnimation(int deltaY) {
+ mDeltaY = deltaY;
+ return this;
+ }
+
+ /**
+ * Sets how long the animation will last.
+ *
+ * @param duration Duration in milliseconds
+ * @return This OverlayObject to allow for chaining of calls.
+ */
+ public OverlayObject setDuration(long duration) {
+ mDuration = duration;
+ return this;
+ }
+
+ /**
+ * Sets the acceleration curve for this animation.
+ *
+ * @param interpolator The interpolator which defines the acceleration curve
+ * @return This OverlayObject to allow for chaining of calls.
+ */
+ public OverlayObject setInterpolator(Interpolator interpolator) {
+ mInterpolator = interpolator;
+ return this;
+ }
+
+ /**
+ * Binds an animation end listener to the animation.
+ *
+ * @param listener the animation end listener to be notified.
+ * @return This OverlayObject to allow for chaining of calls.
+ */
+ public OverlayObject setAnimationEndListener(OnAnimationEndListener listener) {
+ mListener = listener;
+ return this;
+ }
+
+ /**
+ * Starts the animation and sets the start time.
+ *
+ * @param startTime Start time to be set in Millis
+ */
+ public void startAnimation(long startTime) {
+ mStartTime = startTime;
+ mIsAnimationStarted = true;
+ }
+
+ /**
+ * Stops the animation.
+ */
+ public void stopAnimation() {
+ mIsAnimationStarted = true;
+ mIsAnimationEnded = true;
+ if (mListener != null) {
+ mListener.onAnimationEnd();
+ }
+ }
+
+ /**
+ * Calculates and updates current bounds and alpha value.
+ *
+ * @param currentTime Current time.in millis
+ */
+ public boolean update(long currentTime) {
+ if (mIsAnimationEnded) {
+ return false;
+ }
+ float normalizedTime = (currentTime - mStartTime) / (float) mDuration;
+ normalizedTime = Math.max(0.0f, Math.min(1.0f, normalizedTime));
+ if (!mIsAnimationStarted) {
+ normalizedTime = 0.0f;
+ }
+ float interpolatedTime = (mInterpolator == null) ? normalizedTime
+ : mInterpolator.getInterpolation(normalizedTime);
+ int deltaY = (int) (mDeltaY * interpolatedTime);
+ mCurrentBounds.top = mStartRect.top + deltaY;
+ mCurrentBounds.bottom = mStartRect.bottom + deltaY;
+ mCurrentAlpha = mStartAlpha + (mEndAlpha - mStartAlpha) * interpolatedTime;
+ if (mBitmap != null && mCurrentBounds != null) {
+ mBitmap.setAlpha((int) (mCurrentAlpha * 255));
+ mBitmap.setBounds(mCurrentBounds);
+ }
+ if (mIsAnimationStarted && normalizedTime >= 1.0f) {
+ mIsAnimationEnded = true;
+ if (mListener != null) {
+ mListener.onAnimationEnd();
+ }
+ }
+ return !mIsAnimationEnded;
+ }
+
+ /**
+ * An animation listener that receives notifications when the animation ends.
+ */
+ public interface OnAnimationEndListener {
+ /**
+ * Notifies the end of the animation.
+ */
+ public void onAnimationEnd();
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr1/MediaRouterJellybeanMr1.java b/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr1/MediaRouterJellybeanMr1.java
new file mode 100644
index 0000000..f8539bd
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr1/MediaRouterJellybeanMr1.java
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.media;
+
+import android.content.Context;
+import android.hardware.display.DisplayManager;
+import android.os.Build;
+import android.os.Handler;
+import android.support.annotation.RequiresApi;
+import android.util.Log;
+import android.view.Display;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
+// @@RequiresApi(17)
+final class MediaRouterJellybeanMr1 {
+ private static final String TAG = "MediaRouterJellybeanMr1";
+
+ public static Object createCallback(Callback callback) {
+ return new CallbackProxy<Callback>(callback);
+ }
+
+ public static final class RouteInfo {
+ public static boolean isEnabled(Object routeObj) {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).isEnabled();
+ }
+
+ public static Display getPresentationDisplay(Object routeObj) {
+ // android.media.MediaRouter.RouteInfo.getPresentationDisplay() was
+ // added in API 17. However, some factory releases of JB MR1 missed it.
+ try {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).getPresentationDisplay();
+ } catch (NoSuchMethodError ex) {
+ Log.w(TAG, "Cannot get presentation display for the route.", ex);
+ }
+ return null;
+ }
+ }
+
+ public static interface Callback extends MediaRouterJellybean.Callback {
+ public void onRoutePresentationDisplayChanged(Object routeObj);
+ }
+
+ /**
+ * Workaround the fact that the version of MediaRouter.addCallback() that accepts a
+ * flag to perform an active scan does not exist in JB MR1 so we need to force
+ * wifi display scans directly through the DisplayManager.
+ * Do not use on JB MR2 and above.
+ */
+ public static final class ActiveScanWorkaround implements Runnable {
+ // Time between wifi display scans when actively scanning in milliseconds.
+ private static final int WIFI_DISPLAY_SCAN_INTERVAL = 15000;
+
+ private final DisplayManager mDisplayManager;
+ private final Handler mHandler;
+ private Method mScanWifiDisplaysMethod;
+
+ private boolean mActivelyScanningWifiDisplays;
+
+ public ActiveScanWorkaround(Context context, Handler handler) {
+ if (Build.VERSION.SDK_INT != 17) {
+ throw new UnsupportedOperationException();
+ }
+
+ mDisplayManager = (DisplayManager) context.getSystemService(Context.DISPLAY_SERVICE);
+ mHandler = handler;
+ try {
+ mScanWifiDisplaysMethod = DisplayManager.class.getMethod("scanWifiDisplays");
+ } catch (NoSuchMethodException ex) {
+ }
+ }
+
+ public void setActiveScanRouteTypes(int routeTypes) {
+ // On JB MR1, there is no API to scan wifi display routes.
+ // Instead we must make a direct call into the DisplayManager to scan
+ // wifi displays on this version but only when live video routes are requested.
+ // See also the JellybeanMr2Impl implementation of this method.
+ // This was fixed in JB MR2 by adding a new overload of addCallback() to
+ // enable active scanning on request.
+ if ((routeTypes & MediaRouterJellybean.ROUTE_TYPE_LIVE_VIDEO) != 0) {
+ if (!mActivelyScanningWifiDisplays) {
+ if (mScanWifiDisplaysMethod != null) {
+ mActivelyScanningWifiDisplays = true;
+ mHandler.post(this);
+ } else {
+ Log.w(TAG, "Cannot scan for wifi displays because the "
+ + "DisplayManager.scanWifiDisplays() method is "
+ + "not available on this device.");
+ }
+ }
+ } else {
+ if (mActivelyScanningWifiDisplays) {
+ mActivelyScanningWifiDisplays = false;
+ mHandler.removeCallbacks(this);
+ }
+ }
+ }
+
+ @Override
+ public void run() {
+ if (mActivelyScanningWifiDisplays) {
+ try {
+ mScanWifiDisplaysMethod.invoke(mDisplayManager);
+ } catch (IllegalAccessException ex) {
+ Log.w(TAG, "Cannot scan for wifi displays.", ex);
+ } catch (InvocationTargetException ex) {
+ Log.w(TAG, "Cannot scan for wifi displays.", ex);
+ }
+ mHandler.postDelayed(this, WIFI_DISPLAY_SCAN_INTERVAL);
+ }
+ }
+ }
+
+ /**
+ * Workaround the fact that the isConnecting() method does not exist in JB MR1.
+ * Do not use on JB MR2 and above.
+ */
+ public static final class IsConnectingWorkaround {
+ private Method mGetStatusCodeMethod;
+ private int mStatusConnecting;
+
+ public IsConnectingWorkaround() {
+ if (Build.VERSION.SDK_INT != 17) {
+ throw new UnsupportedOperationException();
+ }
+
+ try {
+ Field statusConnectingField =
+ android.media.MediaRouter.RouteInfo.class.getField("STATUS_CONNECTING");
+ mStatusConnecting = statusConnectingField.getInt(null);
+ mGetStatusCodeMethod =
+ android.media.MediaRouter.RouteInfo.class.getMethod("getStatusCode");
+ } catch (NoSuchFieldException ex) {
+ } catch (NoSuchMethodException ex) {
+ } catch (IllegalAccessException ex) {
+ }
+ }
+
+ public boolean isConnecting(Object routeObj) {
+ android.media.MediaRouter.RouteInfo route =
+ (android.media.MediaRouter.RouteInfo)routeObj;
+
+ if (mGetStatusCodeMethod != null) {
+ try {
+ int statusCode = (Integer)mGetStatusCodeMethod.invoke(route);
+ return statusCode == mStatusConnecting;
+ } catch (IllegalAccessException ex) {
+ } catch (InvocationTargetException ex) {
+ }
+ }
+
+ // Assume not connecting.
+ return false;
+ }
+ }
+
+ static class CallbackProxy<T extends Callback>
+ extends MediaRouterJellybean.CallbackProxy<T> {
+ public CallbackProxy(T callback) {
+ super(callback);
+ }
+
+ @Override
+ public void onRoutePresentationDisplayChanged(android.media.MediaRouter router,
+ android.media.MediaRouter.RouteInfo route) {
+ mCallback.onRoutePresentationDisplayChanged(route);
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr2/MediaRouterJellybeanMr2.java b/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr2/MediaRouterJellybeanMr2.java
new file mode 100644
index 0000000..1103549
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/jellybean-mr2/MediaRouterJellybeanMr2.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.media;
+
+// @@RequiresApi(18)
+final class MediaRouterJellybeanMr2 {
+ public static Object getDefaultRoute(Object routerObj) {
+ return ((android.media.MediaRouter)routerObj).getDefaultRoute();
+ }
+
+ public static void addCallback(Object routerObj, int types, Object callbackObj, int flags) {
+ ((android.media.MediaRouter)routerObj).addCallback(types,
+ (android.media.MediaRouter.Callback)callbackObj, flags);
+ }
+
+ public static final class RouteInfo {
+ public static CharSequence getDescription(Object routeObj) {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).getDescription();
+ }
+
+ public static boolean isConnecting(Object routeObj) {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).isConnecting();
+ }
+ }
+
+ public static final class UserRouteInfo {
+ public static void setDescription(Object routeObj, CharSequence description) {
+ ((android.media.MediaRouter.UserRouteInfo)routeObj).setDescription(description);
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/jellybean/MediaRouterJellybean.java b/packages/MediaComponents/src/com/android/support/mediarouter/jellybean/MediaRouterJellybean.java
new file mode 100644
index 0000000..0bb59b8
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/jellybean/MediaRouterJellybean.java
@@ -0,0 +1,462 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.media;
+
+import android.content.Context;
+import android.graphics.drawable.Drawable;
+import android.media.AudioManager;
+import android.os.Build;
+import android.util.Log;
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.List;
+
+// @@RequiresApi(16)
+final class MediaRouterJellybean {
+ private static final String TAG = "MediaRouterJellybean";
+
+ // android.media.AudioSystem.DEVICE_OUT_BLUETOOTH_A2DP = 0x80;
+ // android.media.AudioSystem.DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES = 0x100;
+ // android.media.AudioSystem.DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER = 0x200;
+ public static final int DEVICE_OUT_BLUETOOTH = 0x80 | 0x100 | 0x200;
+
+ public static final int ROUTE_TYPE_LIVE_AUDIO = 0x1;
+ public static final int ROUTE_TYPE_LIVE_VIDEO = 0x2;
+ public static final int ROUTE_TYPE_USER = 0x00800000;
+
+ public static final int ALL_ROUTE_TYPES =
+ MediaRouterJellybean.ROUTE_TYPE_LIVE_AUDIO
+ | MediaRouterJellybean.ROUTE_TYPE_LIVE_VIDEO
+ | MediaRouterJellybean.ROUTE_TYPE_USER;
+
+ public static Object getMediaRouter(Context context) {
+ return context.getSystemService(Context.MEDIA_ROUTER_SERVICE);
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ public static List getRoutes(Object routerObj) {
+ final android.media.MediaRouter router = (android.media.MediaRouter)routerObj;
+ final int count = router.getRouteCount();
+ List out = new ArrayList(count);
+ for (int i = 0; i < count; i++) {
+ out.add(router.getRouteAt(i));
+ }
+ return out;
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ public static List getCategories(Object routerObj) {
+ final android.media.MediaRouter router = (android.media.MediaRouter)routerObj;
+ final int count = router.getCategoryCount();
+ List out = new ArrayList(count);
+ for (int i = 0; i < count; i++) {
+ out.add(router.getCategoryAt(i));
+ }
+ return out;
+ }
+
+ public static Object getSelectedRoute(Object routerObj, int type) {
+ return ((android.media.MediaRouter)routerObj).getSelectedRoute(type);
+ }
+
+ public static void selectRoute(Object routerObj, int types, Object routeObj) {
+ ((android.media.MediaRouter)routerObj).selectRoute(types,
+ (android.media.MediaRouter.RouteInfo)routeObj);
+ }
+
+ public static void addCallback(Object routerObj, int types, Object callbackObj) {
+ ((android.media.MediaRouter)routerObj).addCallback(types,
+ (android.media.MediaRouter.Callback)callbackObj);
+ }
+
+ public static void removeCallback(Object routerObj, Object callbackObj) {
+ ((android.media.MediaRouter)routerObj).removeCallback(
+ (android.media.MediaRouter.Callback)callbackObj);
+ }
+
+ public static Object createRouteCategory(Object routerObj,
+ String name, boolean isGroupable) {
+ return ((android.media.MediaRouter)routerObj).createRouteCategory(name, isGroupable);
+ }
+
+ public static Object createUserRoute(Object routerObj, Object categoryObj) {
+ return ((android.media.MediaRouter)routerObj).createUserRoute(
+ (android.media.MediaRouter.RouteCategory)categoryObj);
+ }
+
+ public static void addUserRoute(Object routerObj, Object routeObj) {
+ ((android.media.MediaRouter)routerObj).addUserRoute(
+ (android.media.MediaRouter.UserRouteInfo)routeObj);
+ }
+
+ public static void removeUserRoute(Object routerObj, Object routeObj) {
+ ((android.media.MediaRouter)routerObj).removeUserRoute(
+ (android.media.MediaRouter.UserRouteInfo)routeObj);
+ }
+
+ public static Object createCallback(Callback callback) {
+ return new CallbackProxy<Callback>(callback);
+ }
+
+ public static Object createVolumeCallback(VolumeCallback callback) {
+ return new VolumeCallbackProxy<VolumeCallback>(callback);
+ }
+
+ static boolean checkRoutedToBluetooth(Context context) {
+ try {
+ AudioManager audioManager = (AudioManager) context.getSystemService(
+ Context.AUDIO_SERVICE);
+ Method method = audioManager.getClass().getDeclaredMethod(
+ "getDevicesForStream", int.class);
+ int device = (Integer) method.invoke(audioManager, AudioManager.STREAM_MUSIC);
+ return (device & DEVICE_OUT_BLUETOOTH) != 0;
+ } catch (Exception e) {
+ return false;
+ }
+ }
+
+ public static final class RouteInfo {
+ public static CharSequence getName(Object routeObj, Context context) {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).getName(context);
+ }
+
+ public static CharSequence getStatus(Object routeObj) {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).getStatus();
+ }
+
+ public static int getSupportedTypes(Object routeObj) {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).getSupportedTypes();
+ }
+
+ public static Object getCategory(Object routeObj) {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).getCategory();
+ }
+
+ public static Drawable getIconDrawable(Object routeObj) {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).getIconDrawable();
+ }
+
+ public static int getPlaybackType(Object routeObj) {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).getPlaybackType();
+ }
+
+ public static int getPlaybackStream(Object routeObj) {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).getPlaybackStream();
+ }
+
+ public static int getVolume(Object routeObj) {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).getVolume();
+ }
+
+ public static int getVolumeMax(Object routeObj) {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).getVolumeMax();
+ }
+
+ public static int getVolumeHandling(Object routeObj) {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).getVolumeHandling();
+ }
+
+ public static Object getTag(Object routeObj) {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).getTag();
+ }
+
+ public static void setTag(Object routeObj, Object tag) {
+ ((android.media.MediaRouter.RouteInfo)routeObj).setTag(tag);
+ }
+
+ public static void requestSetVolume(Object routeObj, int volume) {
+ ((android.media.MediaRouter.RouteInfo)routeObj).requestSetVolume(volume);
+ }
+
+ public static void requestUpdateVolume(Object routeObj, int direction) {
+ ((android.media.MediaRouter.RouteInfo)routeObj).requestUpdateVolume(direction);
+ }
+
+ public static Object getGroup(Object routeObj) {
+ return ((android.media.MediaRouter.RouteInfo)routeObj).getGroup();
+ }
+
+ public static boolean isGroup(Object routeObj) {
+ return routeObj instanceof android.media.MediaRouter.RouteGroup;
+ }
+ }
+
+ public static final class RouteGroup {
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ public static List getGroupedRoutes(Object groupObj) {
+ final android.media.MediaRouter.RouteGroup group =
+ (android.media.MediaRouter.RouteGroup)groupObj;
+ final int count = group.getRouteCount();
+ List out = new ArrayList(count);
+ for (int i = 0; i < count; i++) {
+ out.add(group.getRouteAt(i));
+ }
+ return out;
+ }
+ }
+
+ public static final class UserRouteInfo {
+ public static void setName(Object routeObj, CharSequence name) {
+ ((android.media.MediaRouter.UserRouteInfo)routeObj).setName(name);
+ }
+
+ public static void setStatus(Object routeObj, CharSequence status) {
+ ((android.media.MediaRouter.UserRouteInfo)routeObj).setStatus(status);
+ }
+
+ public static void setIconDrawable(Object routeObj, Drawable icon) {
+ ((android.media.MediaRouter.UserRouteInfo)routeObj).setIconDrawable(icon);
+ }
+
+ public static void setPlaybackType(Object routeObj, int type) {
+ ((android.media.MediaRouter.UserRouteInfo)routeObj).setPlaybackType(type);
+ }
+
+ public static void setPlaybackStream(Object routeObj, int stream) {
+ ((android.media.MediaRouter.UserRouteInfo)routeObj).setPlaybackStream(stream);
+ }
+
+ public static void setVolume(Object routeObj, int volume) {
+ ((android.media.MediaRouter.UserRouteInfo)routeObj).setVolume(volume);
+ }
+
+ public static void setVolumeMax(Object routeObj, int volumeMax) {
+ ((android.media.MediaRouter.UserRouteInfo)routeObj).setVolumeMax(volumeMax);
+ }
+
+ public static void setVolumeHandling(Object routeObj, int volumeHandling) {
+ ((android.media.MediaRouter.UserRouteInfo)routeObj).setVolumeHandling(volumeHandling);
+ }
+
+ public static void setVolumeCallback(Object routeObj, Object volumeCallbackObj) {
+ ((android.media.MediaRouter.UserRouteInfo)routeObj).setVolumeCallback(
+ (android.media.MediaRouter.VolumeCallback)volumeCallbackObj);
+ }
+
+ public static void setRemoteControlClient(Object routeObj, Object rccObj) {
+ ((android.media.MediaRouter.UserRouteInfo)routeObj).setRemoteControlClient(
+ (android.media.RemoteControlClient)rccObj);
+ }
+ }
+
+ public static final class RouteCategory {
+ public static CharSequence getName(Object categoryObj, Context context) {
+ return ((android.media.MediaRouter.RouteCategory)categoryObj).getName(context);
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ public static List getRoutes(Object categoryObj) {
+ ArrayList out = new ArrayList();
+ ((android.media.MediaRouter.RouteCategory)categoryObj).getRoutes(out);
+ return out;
+ }
+
+ public static int getSupportedTypes(Object categoryObj) {
+ return ((android.media.MediaRouter.RouteCategory)categoryObj).getSupportedTypes();
+ }
+
+ public static boolean isGroupable(Object categoryObj) {
+ return ((android.media.MediaRouter.RouteCategory)categoryObj).isGroupable();
+ }
+ }
+
+ public static interface Callback {
+ public void onRouteSelected(int type, Object routeObj);
+ public void onRouteUnselected(int type, Object routeObj);
+ public void onRouteAdded(Object routeObj);
+ public void onRouteRemoved(Object routeObj);
+ public void onRouteChanged(Object routeObj);
+ public void onRouteGrouped(Object routeObj, Object groupObj, int index);
+ public void onRouteUngrouped(Object routeObj, Object groupObj);
+ public void onRouteVolumeChanged(Object routeObj);
+ }
+
+ public static interface VolumeCallback {
+ public void onVolumeSetRequest(Object routeObj, int volume);
+ public void onVolumeUpdateRequest(Object routeObj, int direction);
+ }
+
+ /**
+ * Workaround for limitations of selectRoute() on JB and JB MR1.
+ * Do not use on JB MR2 and above.
+ */
+ public static final class SelectRouteWorkaround {
+ private Method mSelectRouteIntMethod;
+
+ public SelectRouteWorkaround() {
+ if (Build.VERSION.SDK_INT < 16 || Build.VERSION.SDK_INT > 17) {
+ throw new UnsupportedOperationException();
+ }
+ try {
+ mSelectRouteIntMethod = android.media.MediaRouter.class.getMethod(
+ "selectRouteInt", int.class, android.media.MediaRouter.RouteInfo.class);
+ } catch (NoSuchMethodException ex) {
+ }
+ }
+
+ public void selectRoute(Object routerObj, int types, Object routeObj) {
+ android.media.MediaRouter router = (android.media.MediaRouter)routerObj;
+ android.media.MediaRouter.RouteInfo route =
+ (android.media.MediaRouter.RouteInfo)routeObj;
+
+ int routeTypes = route.getSupportedTypes();
+ if ((routeTypes & ROUTE_TYPE_USER) == 0) {
+ // Handle non-user routes.
+ // On JB and JB MR1, the selectRoute() API only supports programmatically
+ // selecting user routes. So instead we rely on the hidden selectRouteInt()
+ // method on these versions of the platform.
+ // This limitation was removed in JB MR2.
+ if (mSelectRouteIntMethod != null) {
+ try {
+ mSelectRouteIntMethod.invoke(router, types, route);
+ return; // success!
+ } catch (IllegalAccessException ex) {
+ Log.w(TAG, "Cannot programmatically select non-user route. "
+ + "Media routing may not work.", ex);
+ } catch (InvocationTargetException ex) {
+ Log.w(TAG, "Cannot programmatically select non-user route. "
+ + "Media routing may not work.", ex);
+ }
+ } else {
+ Log.w(TAG, "Cannot programmatically select non-user route "
+ + "because the platform is missing the selectRouteInt() "
+ + "method. Media routing may not work.");
+ }
+ }
+
+ // Default handling.
+ router.selectRoute(types, route);
+ }
+ }
+
+ /**
+ * Workaround the fact that the getDefaultRoute() method does not exist in JB and JB MR1.
+ * Do not use on JB MR2 and above.
+ */
+ public static final class GetDefaultRouteWorkaround {
+ private Method mGetSystemAudioRouteMethod;
+
+ public GetDefaultRouteWorkaround() {
+ if (Build.VERSION.SDK_INT < 16 || Build.VERSION.SDK_INT > 17) {
+ throw new UnsupportedOperationException();
+ }
+ try {
+ mGetSystemAudioRouteMethod =
+ android.media.MediaRouter.class.getMethod("getSystemAudioRoute");
+ } catch (NoSuchMethodException ex) {
+ }
+ }
+
+ public Object getDefaultRoute(Object routerObj) {
+ android.media.MediaRouter router = (android.media.MediaRouter)routerObj;
+
+ if (mGetSystemAudioRouteMethod != null) {
+ try {
+ return mGetSystemAudioRouteMethod.invoke(router);
+ } catch (IllegalAccessException ex) {
+ } catch (InvocationTargetException ex) {
+ }
+ }
+
+ // Could not find the method or it does not work.
+ // Return the first route and hope for the best.
+ return router.getRouteAt(0);
+ }
+ }
+
+ static class CallbackProxy<T extends Callback>
+ extends android.media.MediaRouter.Callback {
+ protected final T mCallback;
+
+ public CallbackProxy(T callback) {
+ mCallback = callback;
+ }
+
+ @Override
+ public void onRouteSelected(android.media.MediaRouter router,
+ int type, android.media.MediaRouter.RouteInfo route) {
+ mCallback.onRouteSelected(type, route);
+ }
+
+ @Override
+ public void onRouteUnselected(android.media.MediaRouter router,
+ int type, android.media.MediaRouter.RouteInfo route) {
+ mCallback.onRouteUnselected(type, route);
+ }
+
+ @Override
+ public void onRouteAdded(android.media.MediaRouter router,
+ android.media.MediaRouter.RouteInfo route) {
+ mCallback.onRouteAdded(route);
+ }
+
+ @Override
+ public void onRouteRemoved(android.media.MediaRouter router,
+ android.media.MediaRouter.RouteInfo route) {
+ mCallback.onRouteRemoved(route);
+ }
+
+ @Override
+ public void onRouteChanged(android.media.MediaRouter router,
+ android.media.MediaRouter.RouteInfo route) {
+ mCallback.onRouteChanged(route);
+ }
+
+ @Override
+ public void onRouteGrouped(android.media.MediaRouter router,
+ android.media.MediaRouter.RouteInfo route,
+ android.media.MediaRouter.RouteGroup group, int index) {
+ mCallback.onRouteGrouped(route, group, index);
+ }
+
+ @Override
+ public void onRouteUngrouped(android.media.MediaRouter router,
+ android.media.MediaRouter.RouteInfo route,
+ android.media.MediaRouter.RouteGroup group) {
+ mCallback.onRouteUngrouped(route, group);
+ }
+
+ @Override
+ public void onRouteVolumeChanged(android.media.MediaRouter router,
+ android.media.MediaRouter.RouteInfo route) {
+ mCallback.onRouteVolumeChanged(route);
+ }
+ }
+
+ static class VolumeCallbackProxy<T extends VolumeCallback>
+ extends android.media.MediaRouter.VolumeCallback {
+ protected final T mCallback;
+
+ public VolumeCallbackProxy(T callback) {
+ mCallback = callback;
+ }
+
+ @Override
+ public void onVolumeSetRequest(android.media.MediaRouter.RouteInfo route,
+ int volume) {
+ mCallback.onVolumeSetRequest(route, volume);
+ }
+
+ @Override
+ public void onVolumeUpdateRequest(android.media.MediaRouter.RouteInfo route,
+ int direction) {
+ mCallback.onVolumeUpdateRequest(route, direction);
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaControlIntent.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaControlIntent.java
new file mode 100644
index 0000000..1d9e777
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaControlIntent.java
@@ -0,0 +1,1228 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.media;
+
+import android.app.PendingIntent;
+import android.content.Intent;
+import android.net.Uri;
+
+/**
+ * Constants for media control intents.
+ * <p>
+ * This class declares a set of standard media control intent categories and actions that
+ * applications can use to identify the capabilities of media routes and control them.
+ * </p>
+ *
+ * <h3>Media control intent categories</h3>
+ * <p>
+ * Media control intent categories specify means by which applications can
+ * send media to the destination of a media route. Categories are sometimes referred
+ * to as describing "types" or "kinds" of routes.
+ * </p><p>
+ * For example, if a route supports the {@link #CATEGORY_REMOTE_PLAYBACK remote playback category},
+ * then an application can ask it to play media remotely by sending a
+ * {@link #ACTION_PLAY play} or {@link #ACTION_ENQUEUE enqueue} intent with the Uri of the
+ * media content to play. Such a route may then be referred to as
+ * a "remote playback route" because it supports remote playback requests. It is common
+ * for a route to support multiple categories of requests at the same time, such as
+ * live audio and live video.
+ * </p><p>
+ * The following standard route categories are defined.
+ * </p><ul>
+ * <li>{@link #CATEGORY_LIVE_AUDIO Live audio}: The route supports streaming live audio
+ * from the device to the destination. Live audio routes include local speakers
+ * and Bluetooth headsets.
+ * <li>{@link #CATEGORY_LIVE_VIDEO Live video}: The route supports streaming live video
+ * from the device to the destination. Live video routes include local displays
+ * and wireless displays that support mirroring and
+ * {@link android.app.Presentation presentations}. Live video routes typically also
+ * support live audio capabilities.
+ * <li>{@link #CATEGORY_REMOTE_PLAYBACK Remote playback}: The route supports sending
+ * remote playback requests for media content to the destination. The content to be
+ * played is identified by a Uri and mime-type.
+ * </ul><p>
+ * Media route providers may define custom media control intent categories of their own in
+ * addition to the standard ones. Custom categories can be used to provide a variety
+ * of features to applications that recognize and know how to use them. For example,
+ * a media route provider might define a custom category to indicate that its routes
+ * support a special device-specific control interface in addition to other
+ * standard features.
+ * </p><p>
+ * Applications can determine which categories a route supports by using the
+ * {@link MediaRouter.RouteInfo#supportsControlCategory MediaRouter.RouteInfo.supportsControlCategory}
+ * or {@link MediaRouter.RouteInfo#getControlFilters MediaRouter.RouteInfo.getControlFilters}
+ * methods. Applications can also specify the types of routes that they want to use by
+ * creating {@link MediaRouteSelector media route selectors} that contain the desired
+ * categories and are used to filter routes in several parts of the media router API.
+ * </p>
+ *
+ * <h3>Media control intent actions</h3>
+ * <p>
+ * Media control intent actions specify particular functions that applications
+ * can ask the destination of a media route to perform. Media route control requests
+ * take the form of intents in a similar manner to other intents used to start activities
+ * or send broadcasts. The difference is that media control intents are directed to
+ * routes rather than activity or broadcast receiver components.
+ * </p><p>
+ * Each media route control intent specifies an action, a category and some number of parameters
+ * that are supplied as extras. Applications send media control requests to routes using the
+ * {@link MediaRouter.RouteInfo#sendControlRequest MediaRouter.RouteInfo.sendControlRequest}
+ * method and receive results via a callback.
+ * </p><p>
+ * All media control intent actions are associated with the media control intent categories
+ * that support them. Thus only remote playback routes may perform remote playback actions.
+ * The documentation of each action specifies the category to which the action belongs,
+ * the parameters it requires, and the results it returns.
+ * </p>
+ *
+ * <h3>Live audio and live video routes</h3>
+ * <p>
+ * {@link #CATEGORY_LIVE_AUDIO Live audio} and {@link #CATEGORY_LIVE_VIDEO live video}
+ * routes present media using standard system interfaces such as audio streams,
+ * {@link android.app.Presentation presentations} or display mirroring. These routes are
+ * the easiest to use because applications simply render content locally on the device
+ * and the system streams it to the route destination automatically.
+ * </p><p>
+ * In most cases, applications can stream content to live audio and live video routes in
+ * the same way they would play the content locally without any modification. However,
+ * applications may also be able to take advantage of more sophisticated features such
+ * as second-screen presentation APIs that are particular to these routes.
+ * </p>
+ *
+ * <h3>Remote playback routes</h3>
+ * <p>
+ * {@link #CATEGORY_REMOTE_PLAYBACK Remote playback} routes present media remotely
+ * by playing content from a Uri.
+ * These routes destinations take responsibility for fetching and rendering content
+ * on their own. Applications do not render the content themselves; instead, applications
+ * send control requests to initiate play, pause, resume, or stop media items and receive
+ * status updates as they change state.
+ * </p>
+ *
+ * <h4>Sessions</h4>
+ * <p>
+ * Each remote media playback action is conducted within the scope of a session.
+ * Sessions are used to prevent applications from accidentally interfering with one
+ * another because at most one session can be valid at a time.
+ * </p><p>
+ * A session can be created using the {@link #ACTION_START_SESSION start session action}
+ * and terminated using the {@link #ACTION_END_SESSION end session action} when the
+ * route provides explicit session management features.
+ * </p><p>
+ * Explicit session management was added in a later revision of the protocol so not
+ * all routes support it. If the route does not support explicit session management
+ * then implicit session management may still be used. Implicit session management
+ * relies on the use of the {@link #ACTION_PLAY play} and {@link #ACTION_ENQUEUE enqueue}
+ * actions which have the side-effect of creating a new session if none is provided
+ * as argument.
+ * </p><p>
+ * When a new session is created, the previous session is invalidated and any ongoing
+ * media playback is stopped before the requested action is performed. Any attempt
+ * to use an invalidated session will result in an error. (Protocol implementations
+ * are encouraged to aggressively discard information associated with invalidated sessions
+ * since it is no longer of use.)
+ * </p><p>
+ * Each session is identified by a unique session id that may be used to control
+ * the session using actions such as pause, resume, stop and end session.
+ * </p>
+ *
+ * <h4>Media items</h4>
+ * <p>
+ * Each successful {@link #ACTION_PLAY play} or {@link #ACTION_ENQUEUE enqueue} action
+ * returns a unique media item id that an application can use to monitor and control
+ * playback. The media item id may be passed to other actions such as
+ * {@link #ACTION_SEEK seek} or {@link #ACTION_GET_STATUS get status}. It will also appear
+ * as a parameter in status update broadcasts to identify the associated playback request.
+ * </p><p>
+ * Each media item is scoped to the session in which it was created. Therefore media item
+ * ids are only ever used together with session ids. Media item ids are meaningless
+ * on their own. When the session is invalidated, all of its media items are also
+ * invalidated.
+ * </p>
+ *
+ * <h4>The playback queue</h4>
+ * <p>
+ * Each session has its own playback queue that consists of the media items that
+ * are pending, playing, buffering or paused. Items are added to the queue when
+ * a playback request is issued. Items are removed from the queue when they are no
+ * longer eligible for playback (enter terminal states).
+ * </p><p>
+ * As described in the {@link MediaItemStatus} class, media items initially
+ * start in a pending state, transition to the playing (or buffering or paused) state
+ * during playback, and end in a finished, canceled, invalidated or error state.
+ * Once the current item enters a terminal state, playback proceeds on to the
+ * next item.
+ * </p><p>
+ * The application should determine whether the route supports queuing by checking
+ * whether the {@link #ACTION_ENQUEUE} action is declared in the route's control filter
+ * using {@link MediaRouter.RouteInfo#supportsControlRequest RouteInfo.supportsControlRequest}.
+ * </p><p>
+ * If the {@link #ACTION_ENQUEUE} action is supported by the route, then the route promises
+ * to allow at least two items (possibly more) to be enqueued at a time. Enqueued items play
+ * back to back one after the other as the previous item completes. Ideally there should
+ * be no audible pause between items for standard audio content types.
+ * </p><p>
+ * If the {@link #ACTION_ENQUEUE} action is not supported by the route, then the queue
+ * effectively contains at most one item at a time. Each play action has the effect of
+ * clearing the queue and resetting its state before the next item is played.
+ * </p>
+ *
+ * <h4>Impact of pause, resume, stop and play actions on the playback queue</h4>
+ * <p>
+ * The pause, resume and stop actions affect the session's whole queue. Pause causes
+ * the playback queue to be suspended no matter which item is currently playing.
+ * Resume reverses the effects of pause. Stop clears the queue and also resets
+ * the pause flag just like resume.
+ * </p><p>
+ * As described earlier, the play action has the effect of clearing the queue
+ * and completely resetting its state (like the stop action) then enqueuing a
+ * new media item to be played immediately. Play is therefore equivalent
+ * to stop followed by an action to enqueue an item.
+ * </p><p>
+ * The play action is also special in that it can be used to create new sessions.
+ * An application with simple needs may find that it only needs to use play
+ * (and occasionally stop) to control playback.
+ * </p>
+ *
+ * <h4>Resolving conflicts between applications</h4>
+ * <p>
+ * When an application has a valid session, it is essentially in control of remote playback
+ * on the route. No other application can view or modify the remote playback state
+ * of that application's session without knowing its id.
+ * </p><p>
+ * However, other applications can perform actions that have the effect of stopping
+ * playback and invalidating the current session. When this occurs, the former application
+ * will be informed that it has lost control by way of individual media item status
+ * update broadcasts that indicate that its queued media items have become
+ * {@link MediaItemStatus#PLAYBACK_STATE_INVALIDATED invalidated}. This broadcast
+ * implies that playback was terminated abnormally by an external cause.
+ * </p><p>
+ * Applications should handle conflicts conservatively to allow other applications to
+ * smoothly assume control over the route. When a conflict occurs, the currently playing
+ * application should release its session and allow the new application to use the
+ * route until such time as the user intervenes to take over the route again and begin
+ * a new playback session.
+ * </p>
+ *
+ * <h4>Basic actions</h4>
+ * <p>
+ * The following basic actions must be supported (all or nothing) by all remote
+ * playback routes. These actions form the basis of the remote playback protocol
+ * and are required in all implementations.
+ * </p><ul>
+ * <li>{@link #ACTION_PLAY Play}: Starts playing content specified by a given Uri
+ * and returns a new media item id to describe the request. Implicitly creates a new
+ * session if no session id was specified as a parameter.
+ * <li>{@link #ACTION_SEEK Seek}: Sets the content playback position of a specific media item.
+ * <li>{@link #ACTION_GET_STATUS Get status}: Gets the status of a media item
+ * including the item's current playback position and progress.
+ * <li>{@link #ACTION_PAUSE Pause}: Pauses playback of the queue.
+ * <li>{@link #ACTION_RESUME Resume}: Resumes playback of the queue.
+ * <li>{@link #ACTION_STOP Stop}: Stops playback, clears the queue, and resets the
+ * pause state.
+ * </ul>
+ *
+ * <h4>Queue actions</h4>
+ * <p>
+ * The following queue actions must be supported (all or nothing) by remote
+ * playback routes that offer optional queuing capabilities.
+ * </p><ul>
+ * <li>{@link #ACTION_ENQUEUE Enqueue}: Enqueues content specified by a given Uri
+ * and returns a new media item id to describe the request. Implicitly creates a new
+ * session if no session id was specified as a parameter.
+ * <li>{@link #ACTION_REMOVE Remove}: Removes a specified media item from the queue.
+ * </ul>
+ *
+ * <h4>Session actions</h4>
+ * <p>
+ * The following session actions must be supported (all or nothing) by remote
+ * playback routes that offer optional session management capabilities.
+ * </p><ul>
+ * <li>{@link #ACTION_START_SESSION Start session}: Starts a new session explicitly.
+ * <li>{@link #ACTION_GET_SESSION_STATUS Get session status}: Gets the status of a session.
+ * <li>{@link #ACTION_END_SESSION End session}: Ends a session explicitly.
+ * </ul>
+ *
+ * <h4>Implementation note</h4>
+ * <p>
+ * Implementations of the remote playback protocol must implement <em>all</em> of the
+ * documented actions, parameters and results. Note that the documentation is written from
+ * the perspective of a client of the protocol. In particular, whenever a parameter
+ * is described as being "optional", it is only from the perspective of the client.
+ * Compliant media route provider implementations of this protocol must support all
+ * of the features described herein.
+ * </p>
+ */
+public final class MediaControlIntent {
+ /* Route categories. */
+
+ /**
+ * Media control category: Live audio.
+ * <p>
+ * A route that supports live audio routing will allow the media audio stream
+ * to be sent to supported destinations. This can include internal speakers or
+ * audio jacks on the device itself, A2DP devices, and more.
+ * </p><p>
+ * When a live audio route is selected, audio routing is transparent to the application.
+ * All audio played on the media stream will be routed to the selected destination.
+ * </p><p>
+ * Refer to the class documentation for details about live audio routes.
+ * </p>
+ */
+ public static final String CATEGORY_LIVE_AUDIO = "android.media.intent.category.LIVE_AUDIO";
+
+ /**
+ * Media control category: Live video.
+ * <p>
+ * A route that supports live video routing will allow a mirrored version
+ * of the device's primary display or a customized
+ * {@link android.app.Presentation Presentation} to be sent to supported
+ * destinations.
+ * </p><p>
+ * When a live video route is selected, audio and video routing is transparent
+ * to the application. By default, audio and video is routed to the selected
+ * destination. For certain live video routes, the application may also use a
+ * {@link android.app.Presentation Presentation} to replace the mirrored view
+ * on the external display with different content.
+ * </p><p>
+ * Refer to the class documentation for details about live video routes.
+ * </p>
+ *
+ * @see MediaRouter.RouteInfo#getPresentationDisplay()
+ * @see android.app.Presentation
+ */
+ public static final String CATEGORY_LIVE_VIDEO = "android.media.intent.category.LIVE_VIDEO";
+
+ /**
+ * Media control category: Remote playback.
+ * <p>
+ * A route that supports remote playback routing will allow an application to send
+ * requests to play content remotely to supported destinations.
+ * </p><p>
+ * Remote playback routes destinations operate independently of the local device.
+ * When a remote playback route is selected, the application can control the content
+ * playing on the destination by sending media control actions to the route.
+ * The application may also receive status updates from the route regarding
+ * remote playback.
+ * </p><p>
+ * Refer to the class documentation for details about remote playback routes.
+ * </p>
+ *
+ * @see MediaRouter.RouteInfo#sendControlRequest
+ */
+ public static final String CATEGORY_REMOTE_PLAYBACK =
+ "android.media.intent.category.REMOTE_PLAYBACK";
+
+ /* Remote playback actions that affect individual items. */
+
+ /**
+ * Remote playback media control action: Play media item.
+ * <p>
+ * Used with routes that support {@link #CATEGORY_REMOTE_PLAYBACK remote playback}
+ * media control.
+ * </p><p>
+ * This action causes a remote playback route to start playing content with
+ * the {@link Uri} specified in the {@link Intent}'s {@link Intent#getData() data uri}.
+ * The action returns a media session id and media item id which can be used
+ * to control playback using other remote playback actions.
+ * </p><p>
+ * Once initiated, playback of the specified content will be managed independently
+ * by the destination. The application will receive status updates as the state
+ * of the media item changes.
+ * </p><p>
+ * If the data uri specifies an HTTP or HTTPS scheme, then the destination is
+ * responsible for following HTTP redirects to a reasonable depth of at least 3
+ * levels as might typically be handled by a web browser. If an HTTP error
+ * occurs, then the destination should send a {@link MediaItemStatus status update}
+ * back to the client indicating the {@link MediaItemStatus#PLAYBACK_STATE_ERROR error}
+ * {@link MediaItemStatus#getPlaybackState() playback state}.
+ * </p>
+ *
+ * <h3>One item at a time</h3>
+ * <p>
+ * Each successful play action <em>replaces</em> the previous play action.
+ * If an item is already playing, then it is canceled, the session's playback queue
+ * is cleared and the new item begins playing immediately (regardless of
+ * whether the previously playing item had been paused).
+ * </p><p>
+ * Play is therefore equivalent to {@link #ACTION_STOP stop} followed by an action
+ * to enqueue a new media item to be played immediately.
+ * </p>
+ *
+ * <h3>Sessions</h3>
+ * <p>
+ * This request has the effect of implicitly creating a media session whenever the
+ * application does not specify the {@link #EXTRA_SESSION_ID session id} parameter.
+ * Because there can only be at most one valid session at a time, creating a new session
+ * has the side-effect of invalidating any existing sessions and their media items,
+ * then handling the playback request with a new session.
+ * </p><p>
+ * If the application specifies an invalid session id, then an error is returned.
+ * When this happens, the application should assume that its session
+ * is no longer valid. To obtain a new session, the application may try again
+ * and omit the session id parameter. However, the application should
+ * only retry requests due to an explicit action performed by the user,
+ * such as the user clicking on a "play" button in the UI, since another
+ * application may be trying to take control of the route and the former
+ * application should try to stay out of its way.
+ * </p><p>
+ * For more information on sessions, queues and media items, please refer to the
+ * class documentation.
+ * </p>
+ *
+ * <h3>Request parameters</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_ID} <em>(optional)</em>: Specifies the session id of the
+ * session to which the playback request belongs. If omitted, a new session
+ * is created implicitly.
+ * <li>{@link #EXTRA_ITEM_CONTENT_POSITION} <em>(optional)</em>: Specifies the initial
+ * content playback position as a long integer number of milliseconds from
+ * the beginning of the content.
+ * <li>{@link #EXTRA_ITEM_METADATA} <em>(optional)</em>: Specifies metadata associated
+ * with the content such as the title of a song.
+ * <li>{@link #EXTRA_ITEM_STATUS_UPDATE_RECEIVER} <em>(optional)</em>: Specifies a
+ * {@link PendingIntent} for a broadcast receiver that will receive status updates
+ * about the media item.
+ * </ul>
+ *
+ * <h3>Result data</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_ID} <em>(always returned)</em>: Specifies the session id of the
+ * session that was affected by the request. This will be a new session in
+ * the case where no session id was supplied as a parameter.
+ * <li>{@link #EXTRA_SESSION_STATUS} <em>(optional, old implementations may
+ * omit this key)</em>: Specifies the status of the media session.
+ * <li>{@link #EXTRA_ITEM_ID} <em>(always returned)</em>: Specifies an opaque string identifier
+ * to use to refer to the media item in subsequent requests such as
+ * {@link #ACTION_GET_STATUS}.
+ * <li>{@link #EXTRA_ITEM_STATUS} <em>(always returned)</em>: Specifies the initial status of
+ * the new media item.
+ * </ul>
+ *
+ * <h3>Status updates</h3>
+ * <p>
+ * If the client supplies an
+ * {@link #EXTRA_ITEM_STATUS_UPDATE_RECEIVER item status update receiver}
+ * then the media route provider is responsible for sending status updates to the receiver
+ * when significant media item state changes occur such as when playback starts or
+ * stops. The receiver will not be invoked for content playback position changes.
+ * The application may retrieve the current playback position when necessary
+ * using the {@link #ACTION_GET_STATUS} request.
+ * </p><p>
+ * Refer to {@link MediaItemStatus} for details.
+ * </p>
+ *
+ * <h3>Errors</h3>
+ * <p>
+ * This action returns an error if a session id was provided but is unknown or
+ * no longer valid, if the item Uri or content type is not supported, or if
+ * any other arguments are invalid.
+ * </p><ul>
+ * <li>{@link #EXTRA_ERROR_CODE} <em>(optional)</em>: Specifies the cause of the error.
+ * </ul>
+ *
+ * <h3>Example</h3>
+ * <pre>
+ * MediaRouter mediaRouter = MediaRouter.getInstance(context);
+ * MediaRouter.RouteInfo route = mediaRouter.getSelectedRoute();
+ * Intent intent = new Intent(MediaControlIntent.ACTION_PLAY);
+ * intent.addCategory(MediaControlIntent.CATEGORY_REMOTE_PLAYBACK);
+ * intent.setDataAndType("http://example.com/videos/movie.mp4", "video/mp4");
+ * if (route.supportsControlRequest(intent)) {
+ * MediaRouter.ControlRequestCallback callback = new MediaRouter.ControlRequestCallback() {
+ * public void onResult(Bundle data) {
+ * // The request succeeded.
+ * // Playback may be controlled using the returned session and item id.
+ * String sessionId = data.getString(MediaControlIntent.EXTRA_SESSION_ID);
+ * String itemId = data.getString(MediaControlIntent.EXTRA_ITEM_ID);
+ * MediaItemStatus status = MediaItemStatus.fromBundle(data.getBundle(
+ * MediaControlIntent.EXTRA_ITEM_STATUS));
+ * // ...
+ * }
+ *
+ * public void onError(String message, Bundle data) {
+ * // An error occurred!
+ * }
+ * };
+ * route.sendControlRequest(intent, callback);
+ * }</pre>
+ *
+ * @see MediaRouter.RouteInfo#sendControlRequest
+ * @see #CATEGORY_REMOTE_PLAYBACK
+ * @see #ACTION_SEEK
+ * @see #ACTION_GET_STATUS
+ * @see #ACTION_PAUSE
+ * @see #ACTION_RESUME
+ * @see #ACTION_STOP
+ */
+ public static final String ACTION_PLAY = "android.media.intent.action.PLAY";
+
+ /**
+ * Remote playback media control action: Enqueue media item.
+ * <p>
+ * Used with routes that support {@link #CATEGORY_REMOTE_PLAYBACK remote playback}
+ * media control.
+ * </p><p>
+ * This action works just like {@link #ACTION_PLAY play} except that it does
+ * not clear the queue or reset the pause state when it enqueues the
+ * new media item into the session's playback queue. This action only
+ * enqueues a media item with no other side-effects on the queue.
+ * </p><p>
+ * If the queue is currently empty and then the item will play immediately
+ * (assuming the queue is not paused). Otherwise, the item will play
+ * after all earlier items in the queue have finished or been removed.
+ * </p><p>
+ * The enqueue action can be used to create new sessions just like play.
+ * Its parameters and results are also the same. Only the queuing behavior
+ * is different.
+ * </p>
+ *
+ * @see #ACTION_PLAY
+ */
+ public static final String ACTION_ENQUEUE = "android.media.intent.action.ENQUEUE";
+
+ /**
+ * Remote playback media control action: Seek media item to a new playback position.
+ * <p>
+ * Used with routes that support {@link #CATEGORY_REMOTE_PLAYBACK remote playback}
+ * media control.
+ * </p><p>
+ * This action causes a remote playback route to modify the current playback position
+ * of the specified media item.
+ * </p><p>
+ * This action only affects the playback position of the media item; not its playback state.
+ * If the playback queue is paused, then seeking sets the position but the item
+ * remains paused. Likewise if the item is playing, then seeking will cause playback
+ * to jump to the new position and continue playing from that point. If the item has
+ * not yet started playing, then the new playback position is remembered by the
+ * queue and used as the item's initial content position when playback eventually begins.
+ * </p><p>
+ * If successful, the media item's playback position is changed.
+ * </p>
+ *
+ * <h3>Request parameters</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_ID} <em>(required)</em>: Specifies the session id of the session
+ * to which the media item belongs.
+ * <li>{@link #EXTRA_ITEM_ID} <em>(required)</em>: Specifies the media item id of
+ * the media item to seek.
+ * <li>{@link #EXTRA_ITEM_CONTENT_POSITION} <em>(required)</em>: Specifies the new
+ * content position for playback as a long integer number of milliseconds from
+ * the beginning of the content.
+ * </ul>
+ *
+ * <h3>Result data</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_STATUS} <em>(optional, old implementations may
+ * omit this key)</em>: Specifies the status of the media session.
+ * <li>{@link #EXTRA_ITEM_STATUS} <em>(always returned)</em>: Specifies the new status of
+ * the media item.
+ * </ul>
+ *
+ * <h3>Errors</h3>
+ * <p>
+ * This action returns an error if the session id or media item id are unknown
+ * or no longer valid, if the content position is invalid, or if the media item
+ * is in a terminal state.
+ * </p><ul>
+ * <li>{@link #EXTRA_ERROR_CODE} <em>(optional)</em>: Specifies the cause of the error.
+ * </ul>
+ *
+ * @see MediaRouter.RouteInfo#sendControlRequest
+ * @see #CATEGORY_REMOTE_PLAYBACK
+ */
+ public static final String ACTION_SEEK = "android.media.intent.action.SEEK";
+
+ /**
+ * Remote playback media control action: Get media item playback status
+ * and progress information.
+ * <p>
+ * Used with routes that support {@link #CATEGORY_REMOTE_PLAYBACK remote playback}
+ * media control.
+ * </p><p>
+ * This action asks a remote playback route to provide updated playback status and progress
+ * information about the specified media item.
+ * </p>
+ *
+ * <h3>Request parameters</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_ID} <em>(required)</em>: Specifies the session id of the session
+ * to which the media item belongs.
+ * <li>{@link #EXTRA_ITEM_ID} <em>(required)</em>: Specifies the media item id of
+ * the media item to query.
+ * </ul>
+ *
+ * <h3>Result data</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_STATUS} <em>(optional, old implementations may
+ * omit this key)</em>: Specifies the status of the media session.
+ * <li>{@link #EXTRA_ITEM_STATUS} <em>(always returned)</em>: Specifies the current status of
+ * the media item.
+ * </ul>
+ *
+ * <h3>Errors</h3>
+ * <p>
+ * This action returns an error if the session id or media item id are unknown
+ * or no longer valid.
+ * </p><ul>
+ * <li>{@link #EXTRA_ERROR_CODE} <em>(optional)</em>: Specifies the cause of the error.
+ * </ul>
+ *
+ * @see MediaRouter.RouteInfo#sendControlRequest
+ * @see #CATEGORY_REMOTE_PLAYBACK
+ * @see #EXTRA_ITEM_STATUS_UPDATE_RECEIVER
+ */
+ public static final String ACTION_GET_STATUS = "android.media.intent.action.GET_STATUS";
+
+ /**
+ * Remote playback media control action: Remove media item from session's queue.
+ * <p>
+ * Used with routes that support {@link #CATEGORY_REMOTE_PLAYBACK remote playback}
+ * media control.
+ * </p><p>
+ * This action asks a remote playback route to remove the specified media item
+ * from the session's playback queue. If the current item is removed, then
+ * playback will proceed to the next media item (assuming the queue has not been
+ * paused).
+ * </p><p>
+ * This action does not affect the pause state of the queue. If the queue was paused
+ * then it remains paused (even if it is now empty) until a resume, stop or play
+ * action is issued that causes the pause state to be cleared.
+ * </p>
+ *
+ * <h3>Request parameters</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_ID} <em>(required)</em>: Specifies the session id of the session
+ * to which the media item belongs.
+ * <li>{@link #EXTRA_ITEM_ID} <em>(required)</em>: Specifies the media item id of
+ * the media item to remove.
+ * </ul>
+ *
+ * <h3>Result data</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_STATUS} <em>(optional, old implementations may
+ * omit this key)</em>: Specifies the status of the media session.
+ * <li>{@link #EXTRA_ITEM_STATUS} <em>(always returned)</em>: Specifies the new status of
+ * the media item.
+ * </ul>
+ *
+ * <h3>Errors</h3>
+ * <p>
+ * This action returns an error if the session id or media item id are unknown
+ * or no longer valid, or if the media item is in a terminal state (and therefore
+ * no longer in the queue).
+ * </p><ul>
+ * <li>{@link #EXTRA_ERROR_CODE} <em>(optional)</em>: Specifies the cause of the error.
+ * </ul>
+ *
+ * @see MediaRouter.RouteInfo#sendControlRequest
+ * @see #CATEGORY_REMOTE_PLAYBACK
+ */
+ public static final String ACTION_REMOVE = "android.media.intent.action.REMOVE";
+
+ /* Remote playback actions that affect the whole playback queue. */
+
+ /**
+ * Remote playback media control action: Pause media playback.
+ * <p>
+ * Used with routes that support {@link #CATEGORY_REMOTE_PLAYBACK remote playback}
+ * media control.
+ * </p><p>
+ * This action causes the playback queue of the specified session to be paused.
+ * </p>
+ *
+ * <h3>Request parameters</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_ID} <em>(required)</em>: Specifies the session id of the session
+ * whose playback queue is to be paused.
+ * </ul>
+ *
+ * <h3>Result data</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_STATUS} <em>(optional, old implementations may
+ * omit this key)</em>: Specifies the status of the media session.
+ * </ul>
+ *
+ * <h3>Errors</h3>
+ * <p>
+ * This action returns an error if the session id is unknown or no longer valid.
+ * </p><ul>
+ * <li>{@link #EXTRA_ERROR_CODE} <em>(optional)</em>: Specifies the cause of the error.
+ * </ul>
+ *
+ * @see MediaRouter.RouteInfo#sendControlRequest
+ * @see #CATEGORY_REMOTE_PLAYBACK
+ * @see #ACTION_RESUME
+ */
+ public static final String ACTION_PAUSE = "android.media.intent.action.PAUSE";
+
+ /**
+ * Remote playback media control action: Resume media playback (unpause).
+ * <p>
+ * Used with routes that support {@link #CATEGORY_REMOTE_PLAYBACK remote playback}
+ * media control.
+ * </p><p>
+ * This action causes the playback queue of the specified session to be resumed.
+ * Reverses the effects of {@link #ACTION_PAUSE}.
+ * </p>
+ *
+ * <h3>Request parameters</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_ID} <em>(required)</em>: Specifies the session id of the session
+ * whose playback queue is to be resumed.
+ * </ul>
+ *
+ * <h3>Result data</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_STATUS} <em>(optional, old implementations may
+ * omit this key)</em>: Specifies the status of the media session.
+ * </ul>
+ *
+ * <h3>Errors</h3>
+ * <p>
+ * This action returns an error if the session id is unknown or no longer valid.
+ * </p><ul>
+ * <li>{@link #EXTRA_ERROR_CODE} <em>(optional)</em>: Specifies the cause of the error.
+ * </ul>
+ *
+ * @see MediaRouter.RouteInfo#sendControlRequest
+ * @see #CATEGORY_REMOTE_PLAYBACK
+ * @see #ACTION_PAUSE
+ */
+ public static final String ACTION_RESUME = "android.media.intent.action.RESUME";
+
+ /**
+ * Remote playback media control action: Stop media playback (clear queue and unpause).
+ * <p>
+ * Used with routes that support {@link #CATEGORY_REMOTE_PLAYBACK remote playback}
+ * media control.
+ * </p><p>
+ * This action causes a remote playback route to stop playback, cancel and remove
+ * all media items from the session's media item queue and, reset the queue's
+ * pause state.
+ * </p><p>
+ * If successful, the status of all media items in the queue is set to
+ * {@link MediaItemStatus#PLAYBACK_STATE_CANCELED canceled} and a status update is sent
+ * to the appropriate status update receivers indicating the new status of each item.
+ * </p>
+ *
+ * <h3>Request parameters</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_ID} <em>(required)</em>: Specifies the session id of
+ * the session whose playback queue is to be stopped (cleared and unpaused).
+ * </ul>
+ *
+ * <h3>Result data</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_STATUS} <em>(optional, old implementations may
+ * omit this key)</em>: Specifies the status of the media session.
+ * </ul>
+ *
+ * <h3>Errors</h3>
+ * <p>
+ * This action returns an error if the session id is unknown or no longer valid.
+ * </p><ul>
+ * <li>{@link #EXTRA_ERROR_CODE} <em>(optional)</em>: Specifies the cause of the error.
+ * </ul>
+ *
+ * @see MediaRouter.RouteInfo#sendControlRequest
+ * @see #CATEGORY_REMOTE_PLAYBACK
+ */
+ public static final String ACTION_STOP = "android.media.intent.action.STOP";
+
+ /**
+ * Remote playback media control action: Start session.
+ * <p>
+ * Used with routes that support {@link #CATEGORY_REMOTE_PLAYBACK remote playback}
+ * media control.
+ * </p><p>
+ * This action causes a remote playback route to invalidate the current session
+ * and start a new session. The new session initially has an empty queue.
+ * </p><p>
+ * If successful, the status of all media items in the previous session's queue is set to
+ * {@link MediaItemStatus#PLAYBACK_STATE_INVALIDATED invalidated} and a status update
+ * is sent to the appropriate status update receivers indicating the new status
+ * of each item. The previous session becomes no longer valid and the new session
+ * takes control of the route.
+ * </p>
+ *
+ * <h3>Request parameters</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_STATUS_UPDATE_RECEIVER} <em>(optional)</em>: Specifies a
+ * {@link PendingIntent} for a broadcast receiver that will receive status updates
+ * about the media session.
+ * <li>{@link #EXTRA_MESSAGE_RECEIVER} <em>(optional)</em>: Specifies a
+ * {@link PendingIntent} for a broadcast receiver that will receive messages from
+ * the media session.
+ * </ul>
+ *
+ * <h3>Result data</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_ID} <em>(always returned)</em>: Specifies the session id of the
+ * session that was started by the request. This will always be a brand new session
+ * distinct from any other previously created sessions.
+ * <li>{@link #EXTRA_SESSION_STATUS} <em>(always returned)</em>: Specifies the
+ * status of the media session.
+ * </ul>
+ *
+ * <h3>Status updates</h3>
+ * <p>
+ * If the client supplies a
+ * {@link #EXTRA_SESSION_STATUS_UPDATE_RECEIVER status update receiver}
+ * then the media route provider is responsible for sending status updates to the receiver
+ * when significant media session state changes occur such as when the session's
+ * queue is paused or resumed or when the session is terminated or invalidated.
+ * </p><p>
+ * Refer to {@link MediaSessionStatus} for details.
+ * </p>
+ *
+ * <h3>Custom messages</h3>
+ * <p>
+ * If the client supplies a {@link #EXTRA_MESSAGE_RECEIVER message receiver}
+ * then the media route provider is responsible for sending messages to the receiver
+ * when the session has any messages to send.
+ * </p><p>
+ * Refer to {@link #EXTRA_MESSAGE} for details.
+ * </p>
+ *
+ * <h3>Errors</h3>
+ * <p>
+ * This action returns an error if the session could not be created.
+ * </p><ul>
+ * <li>{@link #EXTRA_ERROR_CODE} <em>(optional)</em>: Specifies the cause of the error.
+ * </ul>
+ *
+ * @see MediaRouter.RouteInfo#sendControlRequest
+ * @see #CATEGORY_REMOTE_PLAYBACK
+ */
+ public static final String ACTION_START_SESSION = "android.media.intent.action.START_SESSION";
+
+ /**
+ * Remote playback media control action: Get media session status information.
+ * <p>
+ * Used with routes that support {@link #CATEGORY_REMOTE_PLAYBACK remote playback}
+ * media control.
+ * </p><p>
+ * This action asks a remote playback route to provide updated status information
+ * about the specified media session.
+ * </p>
+ *
+ * <h3>Request parameters</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_ID} <em>(required)</em>: Specifies the session id of the
+ * session whose status is to be retrieved.
+ * </ul>
+ *
+ * <h3>Result data</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_STATUS} <em>(always returned)</em>: Specifies the
+ * current status of the media session.
+ * </ul>
+ *
+ * <h3>Errors</h3>
+ * <p>
+ * This action returns an error if the session id is unknown or no longer valid.
+ * </p><ul>
+ * <li>{@link #EXTRA_ERROR_CODE} <em>(optional)</em>: Specifies the cause of the error.
+ * </ul>
+ *
+ * @see MediaRouter.RouteInfo#sendControlRequest
+ * @see #CATEGORY_REMOTE_PLAYBACK
+ * @see #EXTRA_SESSION_STATUS_UPDATE_RECEIVER
+ */
+ public static final String ACTION_GET_SESSION_STATUS =
+ "android.media.intent.action.GET_SESSION_STATUS";
+
+ /**
+ * Remote playback media control action: End session.
+ * <p>
+ * Used with routes that support {@link #CATEGORY_REMOTE_PLAYBACK remote playback}
+ * media control.
+ * </p><p>
+ * This action causes a remote playback route to end the specified session.
+ * The session becomes no longer valid and the route ceases to be under control
+ * of the session.
+ * </p><p>
+ * If successful, the status of the session is set to
+ * {@link MediaSessionStatus#SESSION_STATE_ENDED} and a status update is sent to
+ * the session's status update receiver.
+ * </p><p>
+ * Additionally, the status of all media items in the queue is set to
+ * {@link MediaItemStatus#PLAYBACK_STATE_CANCELED canceled} and a status update is sent
+ * to the appropriate status update receivers indicating the new status of each item.
+ * </p>
+ *
+ * <h3>Request parameters</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_ID} <em>(required)</em>: Specifies the session id of
+ * the session to end.
+ * </ul>
+ *
+ * <h3>Result data</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_STATUS} <em>(always returned)</em>: Specifies the
+ * status of the media session.
+ * </ul>
+ *
+ * <h3>Errors</h3>
+ * <p>
+ * This action returns an error if the session id is unknown or no longer valid.
+ * In other words, it is an error to attempt to end a session other than the
+ * current session.
+ * </p><ul>
+ * <li>{@link #EXTRA_ERROR_CODE} <em>(optional)</em>: Specifies the cause of the error.
+ * </ul>
+ *
+ * @see MediaRouter.RouteInfo#sendControlRequest
+ * @see #CATEGORY_REMOTE_PLAYBACK
+ */
+ public static final String ACTION_END_SESSION = "android.media.intent.action.END_SESSION";
+
+ /**
+ * Custom media control action: Send {@link #EXTRA_MESSAGE}.
+ * <p>
+ * This action asks a route to handle a message described by EXTRA_MESSAGE.
+ * </p>
+ *
+ * <h3>Request parameters</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_ID} <em>(required)</em>: Specifies the session id of the session
+ * to which will handle this message.
+ * <li>{@link #EXTRA_MESSAGE} <em>(required)</em>: Specifies the message to send.
+ * </ul>
+ *
+ * <h3>Result data</h3>
+ * Any messages defined by each media route provider.
+ *
+ * <h3>Errors</h3>
+ * Any error messages defined by each media route provider.
+ *
+ * @see MediaRouter.RouteInfo#sendControlRequest
+ */
+ public static final String ACTION_SEND_MESSAGE = "android.media.intent.action.SEND_MESSAGE";
+
+ /* Extras and related constants. */
+
+ /**
+ * Bundle extra: Media session id.
+ * <p>
+ * An opaque unique identifier that identifies the remote playback media session.
+ * </p><p>
+ * Used with various actions to specify the id of the media session to be controlled.
+ * </p><p>
+ * Included in broadcast intents sent to
+ * {@link #EXTRA_ITEM_STATUS_UPDATE_RECEIVER item status update receivers} to identify
+ * the session to which the item in question belongs.
+ * </p><p>
+ * Included in broadcast intents sent to
+ * {@link #EXTRA_SESSION_STATUS_UPDATE_RECEIVER session status update receivers} to identify
+ * the session.
+ * </p><p>
+ * The value is a unique string value generated by the media route provider
+ * to represent one particular media session.
+ * </p>
+ *
+ * @see #ACTION_PLAY
+ * @see #ACTION_SEEK
+ * @see #ACTION_GET_STATUS
+ * @see #ACTION_PAUSE
+ * @see #ACTION_RESUME
+ * @see #ACTION_STOP
+ * @see #ACTION_START_SESSION
+ * @see #ACTION_GET_SESSION_STATUS
+ * @see #ACTION_END_SESSION
+ */
+ public static final String EXTRA_SESSION_ID =
+ "android.media.intent.extra.SESSION_ID";
+
+ /**
+ * Bundle extra: Media session status.
+ * <p>
+ * Returned as a result from media session actions such as {@link #ACTION_START_SESSION},
+ * {@link #ACTION_PAUSE}, and {@link #ACTION_GET_SESSION_STATUS}
+ * to describe the status of the specified media session.
+ * </p><p>
+ * Included in broadcast intents sent to
+ * {@link #EXTRA_SESSION_STATUS_UPDATE_RECEIVER session status update receivers} to provide
+ * updated status information.
+ * </p><p>
+ * The value is a {@link android.os.Bundle} of data that can be converted into
+ * a {@link MediaSessionStatus} object using
+ * {@link MediaSessionStatus#fromBundle MediaSessionStatus.fromBundle}.
+ * </p>
+ *
+ * @see #ACTION_PLAY
+ * @see #ACTION_SEEK
+ * @see #ACTION_GET_STATUS
+ * @see #ACTION_PAUSE
+ * @see #ACTION_RESUME
+ * @see #ACTION_STOP
+ * @see #ACTION_START_SESSION
+ * @see #ACTION_GET_SESSION_STATUS
+ * @see #ACTION_END_SESSION
+ */
+ public static final String EXTRA_SESSION_STATUS =
+ "android.media.intent.extra.SESSION_STATUS";
+
+ /**
+ * Bundle extra: Media session status update receiver.
+ * <p>
+ * Used with {@link #ACTION_START_SESSION} to specify a {@link PendingIntent} for a
+ * broadcast receiver that will receive status updates about the media session.
+ * </p><p>
+ * Whenever the status of the media session changes, the media route provider will
+ * send a broadcast to the pending intent with extras that identify the session
+ * id and its updated status.
+ * </p><p>
+ * The value is a {@link PendingIntent}.
+ * </p>
+ *
+ * <h3>Broadcast extras</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_ID} <em>(required)</em>: Specifies the session id of
+ * the session.
+ * <li>{@link #EXTRA_SESSION_STATUS} <em>(required)</em>: Specifies the status of the
+ * session as a bundle that can be decoded into a {@link MediaSessionStatus} object.
+ * </ul>
+ *
+ * @see #ACTION_START_SESSION
+ */
+ public static final String EXTRA_SESSION_STATUS_UPDATE_RECEIVER =
+ "android.media.intent.extra.SESSION_STATUS_UPDATE_RECEIVER";
+
+ /**
+ * Bundle extra: Media message receiver.
+ * <p>
+ * Used with {@link #ACTION_START_SESSION} to specify a {@link PendingIntent} for a
+ * broadcast receiver that will receive messages from the media session.
+ * </p><p>
+ * When the media session has a message to send, the media route provider will
+ * send a broadcast to the pending intent with extras that identify the session
+ * id and its message.
+ * </p><p>
+ * The value is a {@link PendingIntent}.
+ * </p>
+ *
+ * <h3>Broadcast extras</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_ID} <em>(required)</em>: Specifies the session id of
+ * the session.
+ * <li>{@link #EXTRA_MESSAGE} <em>(required)</em>: Specifies the message from
+ * the session as a bundle object.
+ * </ul>
+ *
+ * @see #ACTION_START_SESSION
+ */
+ public static final String EXTRA_MESSAGE_RECEIVER =
+ "android.media.intent.extra.MESSAGE_RECEIVER";
+
+ /**
+ * Bundle extra: Media item id.
+ * <p>
+ * An opaque unique identifier returned as a result from {@link #ACTION_PLAY} or
+ * {@link #ACTION_ENQUEUE} that represents the media item that was created by the
+ * playback request.
+ * </p><p>
+ * Used with various actions to specify the id of the media item to be controlled.
+ * </p><p>
+ * Included in broadcast intents sent to
+ * {@link #EXTRA_ITEM_STATUS_UPDATE_RECEIVER status update receivers} to identify
+ * the item in question.
+ * </p><p>
+ * The value is a unique string value generated by the media route provider
+ * to represent one particular media item.
+ * </p>
+ *
+ * @see #ACTION_PLAY
+ * @see #ACTION_ENQUEUE
+ * @see #ACTION_SEEK
+ * @see #ACTION_GET_STATUS
+ */
+ public static final String EXTRA_ITEM_ID =
+ "android.media.intent.extra.ITEM_ID";
+
+ /**
+ * Bundle extra: Media item status.
+ * <p>
+ * Returned as a result from media item actions such as {@link #ACTION_PLAY},
+ * {@link #ACTION_ENQUEUE}, {@link #ACTION_SEEK}, and {@link #ACTION_GET_STATUS}
+ * to describe the status of the specified media item.
+ * </p><p>
+ * Included in broadcast intents sent to
+ * {@link #EXTRA_ITEM_STATUS_UPDATE_RECEIVER item status update receivers} to provide
+ * updated status information.
+ * </p><p>
+ * The value is a {@link android.os.Bundle} of data that can be converted into
+ * a {@link MediaItemStatus} object using
+ * {@link MediaItemStatus#fromBundle MediaItemStatus.fromBundle}.
+ * </p>
+ *
+ * @see #ACTION_PLAY
+ * @see #ACTION_ENQUEUE
+ * @see #ACTION_SEEK
+ * @see #ACTION_GET_STATUS
+ */
+ public static final String EXTRA_ITEM_STATUS =
+ "android.media.intent.extra.ITEM_STATUS";
+
+ /**
+ * Long extra: Media item content position.
+ * <p>
+ * Used with {@link #ACTION_PLAY} or {@link #ACTION_ENQUEUE} to specify the
+ * starting playback position.
+ * </p><p>
+ * Used with {@link #ACTION_SEEK} to set a new playback position.
+ * </p><p>
+ * The value is a long integer number of milliseconds from the beginning of the content.
+ * <p>
+ *
+ * @see #ACTION_PLAY
+ * @see #ACTION_ENQUEUE
+ * @see #ACTION_SEEK
+ */
+ public static final String EXTRA_ITEM_CONTENT_POSITION =
+ "android.media.intent.extra.ITEM_POSITION";
+
+ /**
+ * Bundle extra: Media item metadata.
+ * <p>
+ * Used with {@link #ACTION_PLAY} or {@link #ACTION_ENQUEUE} to specify metadata
+ * associated with the content of a media item.
+ * </p><p>
+ * The value is a {@link android.os.Bundle} of metadata key-value pairs as defined
+ * in {@link MediaItemMetadata}.
+ * </p>
+ *
+ * @see #ACTION_PLAY
+ * @see #ACTION_ENQUEUE
+ */
+ public static final String EXTRA_ITEM_METADATA =
+ "android.media.intent.extra.ITEM_METADATA";
+
+ /**
+ * Bundle extra: HTTP request headers.
+ * <p>
+ * Used with {@link #ACTION_PLAY} or {@link #ACTION_ENQUEUE} to specify HTTP request
+ * headers to be included when fetching to the content indicated by the media
+ * item's data Uri.
+ * </p><p>
+ * This extra may be used to provide authentication tokens and other
+ * parameters to the server separately from the media item's data Uri.
+ * </p><p>
+ * The value is a {@link android.os.Bundle} of string based key-value pairs
+ * that describe the HTTP request headers.
+ * </p>
+ *
+ * @see #ACTION_PLAY
+ * @see #ACTION_ENQUEUE
+ */
+ public static final String EXTRA_ITEM_HTTP_HEADERS =
+ "android.media.intent.extra.HTTP_HEADERS";
+
+ /**
+ * Bundle extra: Media item status update receiver.
+ * <p>
+ * Used with {@link #ACTION_PLAY} or {@link #ACTION_ENQUEUE} to specify
+ * a {@link PendingIntent} for a
+ * broadcast receiver that will receive status updates about a particular
+ * media item.
+ * </p><p>
+ * Whenever the status of the media item changes, the media route provider will
+ * send a broadcast to the pending intent with extras that identify the session
+ * to which the item belongs, the session status, the item's id
+ * and the item's updated status.
+ * </p><p>
+ * The same pending intent and broadcast receiver may be shared by any number of
+ * media items since the broadcast intent includes the media session id
+ * and media item id.
+ * </p><p>
+ * The value is a {@link PendingIntent}.
+ * </p>
+ *
+ * <h3>Broadcast extras</h3>
+ * <ul>
+ * <li>{@link #EXTRA_SESSION_ID} <em>(required)</em>: Specifies the session id of
+ * the session to which the item in question belongs.
+ * <li>{@link #EXTRA_SESSION_STATUS} <em>(optional, old implementations may
+ * omit this key)</em>: Specifies the status of the media session.
+ * <li>{@link #EXTRA_ITEM_ID} <em>(required)</em>: Specifies the media item id of the
+ * media item in question.
+ * <li>{@link #EXTRA_ITEM_STATUS} <em>(required)</em>: Specifies the status of the
+ * item as a bundle that can be decoded into a {@link MediaItemStatus} object.
+ * </ul>
+ *
+ * @see #ACTION_PLAY
+ * @see #ACTION_ENQUEUE
+ */
+ public static final String EXTRA_ITEM_STATUS_UPDATE_RECEIVER =
+ "android.media.intent.extra.ITEM_STATUS_UPDATE_RECEIVER";
+
+ /**
+ * Bundle extra: Message.
+ * <p>
+ * Used with {@link #ACTION_SEND_MESSAGE}, and included in broadcast intents sent to
+ * {@link #EXTRA_MESSAGE_RECEIVER message receivers} to describe a message between a
+ * session and a media route provider.
+ * </p><p>
+ * The value is a {@link android.os.Bundle}.
+ * </p>
+ */
+ public static final String EXTRA_MESSAGE = "android.media.intent.extra.MESSAGE";
+
+ /**
+ * Integer extra: Error code.
+ * <p>
+ * Used with all media control requests to describe the cause of an error.
+ * This extra may be omitted when the error is unknown.
+ * </p><p>
+ * The value is one of: {@link #ERROR_UNKNOWN}, {@link #ERROR_UNSUPPORTED_OPERATION},
+ * {@link #ERROR_INVALID_SESSION_ID}, {@link #ERROR_INVALID_ITEM_ID}.
+ * </p>
+ */
+ public static final String EXTRA_ERROR_CODE = "android.media.intent.extra.ERROR_CODE";
+
+ /**
+ * Error code: An unknown error occurred.
+ *
+ * @see #EXTRA_ERROR_CODE
+ */
+ public static final int ERROR_UNKNOWN = 0;
+
+ /**
+ * Error code: The operation is not supported.
+ *
+ * @see #EXTRA_ERROR_CODE
+ */
+ public static final int ERROR_UNSUPPORTED_OPERATION = 1;
+
+ /**
+ * Error code: The session id specified in the request was invalid.
+ *
+ * @see #EXTRA_ERROR_CODE
+ */
+ public static final int ERROR_INVALID_SESSION_ID = 2;
+
+ /**
+ * Error code: The item id specified in the request was invalid.
+ *
+ * @see #EXTRA_ERROR_CODE
+ */
+ public static final int ERROR_INVALID_ITEM_ID = 3;
+
+ private MediaControlIntent() {
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemMetadata.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemMetadata.java
new file mode 100644
index 0000000..d52ddb6
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemMetadata.java
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.media;
+
+import android.os.Bundle;
+
+/**
+ * Constants for specifying metadata about a media item as a {@link Bundle}.
+ * <p>
+ * This class is part of the remote playback protocol described by the
+ * {@link MediaControlIntent MediaControlIntent} class.
+ * </p><p>
+ * Media item metadata is described as a bundle of key/value pairs as defined
+ * in this class. The documentation specifies the type of value associated
+ * with each key.
+ * </p><p>
+ * An application may specify additional custom metadata keys but there is no guarantee
+ * that they will be recognized by the destination.
+ * </p>
+ */
+public final class MediaItemMetadata {
+ /*
+ * Note: MediaMetadataRetriever also defines a collection of metadata keys that can be
+ * retrieved from a content stream although the representation is somewhat different here
+ * since we are sending the data to a remote endpoint.
+ */
+
+ private MediaItemMetadata() {
+ }
+
+ /**
+ * String key: Album artist name.
+ * <p>
+ * The value is a string suitable for display.
+ * </p>
+ */
+ public static final String KEY_ALBUM_ARTIST = "android.media.metadata.ALBUM_ARTIST";
+
+ /**
+ * String key: Album title.
+ * <p>
+ * The value is a string suitable for display.
+ * </p>
+ */
+ public static final String KEY_ALBUM_TITLE = "android.media.metadata.ALBUM_TITLE";
+
+ /**
+ * String key: Artwork Uri.
+ * <p>
+ * The value is a string URI for an image file associated with the media item,
+ * such as album or cover art.
+ * </p>
+ */
+ public static final String KEY_ARTWORK_URI = "android.media.metadata.ARTWORK_URI";
+
+ /**
+ * String key: Artist name.
+ * <p>
+ * The value is a string suitable for display.
+ * </p>
+ */
+ public static final String KEY_ARTIST = "android.media.metadata.ARTIST";
+
+ /**
+ * String key: Author name.
+ * <p>
+ * The value is a string suitable for display.
+ * </p>
+ */
+ public static final String KEY_AUTHOR = "android.media.metadata.AUTHOR";
+
+ /**
+ * String key: Composer name.
+ * <p>
+ * The value is a string suitable for display.
+ * </p>
+ */
+ public static final String KEY_COMPOSER = "android.media.metadata.COMPOSER";
+
+ /**
+ * String key: Track title.
+ * <p>
+ * The value is a string suitable for display.
+ * </p>
+ */
+ public static final String KEY_TITLE = "android.media.metadata.TITLE";
+
+ /**
+ * Integer key: Year of publication.
+ * <p>
+ * The value is an integer year number.
+ * </p>
+ */
+ public static final String KEY_YEAR = "android.media.metadata.YEAR";
+
+ /**
+ * Integer key: Track number (such as a track on a CD).
+ * <p>
+ * The value is a one-based integer track number.
+ * </p>
+ */
+ public static final String KEY_TRACK_NUMBER = "android.media.metadata.TRACK_NUMBER";
+
+ /**
+ * Integer key: Disc number within a collection.
+ * <p>
+ * The value is a one-based integer disc number.
+ * </p>
+ */
+ public static final String KEY_DISC_NUMBER = "android.media.metadata.DISC_NUMBER";
+
+ /**
+ * Long key: Item playback duration in milliseconds.
+ * <p>
+ * The value is a <code>long</code> number of milliseconds.
+ * </p><p>
+ * The duration metadata is only a hint to enable a remote media player to
+ * guess the duration of the content before it actually opens the media stream.
+ * The remote media player should still determine the actual content duration from
+ * the media stream itself independent of the value that may be specified by this key.
+ * </p>
+ */
+ public static final String KEY_DURATION = "android.media.metadata.DURATION";
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemStatus.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemStatus.java
new file mode 100644
index 0000000..90ea2d5
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaItemStatus.java
@@ -0,0 +1,392 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.media;
+
+import android.app.PendingIntent;
+import android.os.Bundle;
+import android.os.SystemClock;
+import android.support.v4.util.TimeUtils;
+
+/**
+ * Describes the playback status of a media item.
+ * <p>
+ * This class is part of the remote playback protocol described by the
+ * {@link MediaControlIntent MediaControlIntent} class.
+ * </p><p>
+ * As a media item is played, it transitions through a sequence of states including:
+ * {@link #PLAYBACK_STATE_PENDING pending}, {@link #PLAYBACK_STATE_BUFFERING buffering},
+ * {@link #PLAYBACK_STATE_PLAYING playing}, {@link #PLAYBACK_STATE_PAUSED paused},
+ * {@link #PLAYBACK_STATE_FINISHED finished}, {@link #PLAYBACK_STATE_CANCELED canceled},
+ * {@link #PLAYBACK_STATE_INVALIDATED invalidated}, and
+ * {@link #PLAYBACK_STATE_ERROR error}. Refer to the documentation of each state
+ * for an explanation of its meaning.
+ * </p><p>
+ * While the item is playing, the playback status may also include progress information
+ * about the {@link #getContentPosition content position} and
+ * {@link #getContentDuration content duration} although not all route destinations
+ * will report it.
+ * </p><p>
+ * To monitor playback status, the application should supply a {@link PendingIntent} to use as the
+ * {@link MediaControlIntent#EXTRA_ITEM_STATUS_UPDATE_RECEIVER item status update receiver}
+ * for a given {@link MediaControlIntent#ACTION_PLAY playback request}. Note that
+ * the status update receiver will only be invoked for major status changes such as a
+ * transition from playing to finished.
+ * </p><p class="note">
+ * The status update receiver will not be invoked for minor progress updates such as
+ * changes to playback position or duration. If the application wants to monitor
+ * playback progress, then it must use the
+ * {@link MediaControlIntent#ACTION_GET_STATUS get status request} to poll for changes
+ * periodically and estimate the playback position while playing. Note that there may
+ * be a significant power impact to polling so the application is advised only
+ * to poll when the screen is on and never more than about once every 5 seconds or so.
+ * </p><p>
+ * This object is immutable once created using a {@link Builder} instance.
+ * </p>
+ */
+public final class MediaItemStatus {
+ static final String KEY_TIMESTAMP = "timestamp";
+ static final String KEY_PLAYBACK_STATE = "playbackState";
+ static final String KEY_CONTENT_POSITION = "contentPosition";
+ static final String KEY_CONTENT_DURATION = "contentDuration";
+ static final String KEY_EXTRAS = "extras";
+
+ final Bundle mBundle;
+
+ /**
+ * Playback state: Pending.
+ * <p>
+ * Indicates that the media item has not yet started playback but will be played eventually.
+ * </p>
+ */
+ public static final int PLAYBACK_STATE_PENDING = 0;
+
+ /**
+ * Playback state: Playing.
+ * <p>
+ * Indicates that the media item is currently playing.
+ * </p>
+ */
+ public static final int PLAYBACK_STATE_PLAYING = 1;
+
+ /**
+ * Playback state: Paused.
+ * <p>
+ * Indicates that playback of the media item has been paused. Playback can be
+ * resumed using the {@link MediaControlIntent#ACTION_RESUME resume} action.
+ * </p>
+ */
+ public static final int PLAYBACK_STATE_PAUSED = 2;
+
+ /**
+ * Playback state: Buffering or seeking to a new position.
+ * <p>
+ * Indicates that the media item has been temporarily interrupted
+ * to fetch more content. Playback will continue automatically
+ * when enough content has been buffered.
+ * </p>
+ */
+ public static final int PLAYBACK_STATE_BUFFERING = 3;
+
+ /**
+ * Playback state: Finished.
+ * <p>
+ * Indicates that the media item played to the end of the content and finished normally.
+ * </p><p>
+ * A finished media item cannot be resumed. To play the content again, the application
+ * must send a new {@link MediaControlIntent#ACTION_PLAY play} or
+ * {@link MediaControlIntent#ACTION_ENQUEUE enqueue} action.
+ * </p>
+ */
+ public static final int PLAYBACK_STATE_FINISHED = 4;
+
+ /**
+ * Playback state: Canceled.
+ * <p>
+ * Indicates that the media item was explicitly removed from the queue by the
+ * application. Items may be canceled and removed from the queue using
+ * the {@link MediaControlIntent#ACTION_REMOVE remove} or
+ * {@link MediaControlIntent#ACTION_STOP stop} action or by issuing
+ * another {@link MediaControlIntent#ACTION_PLAY play} action that has the
+ * side-effect of clearing the queue.
+ * </p><p>
+ * A canceled media item cannot be resumed. To play the content again, the
+ * application must send a new {@link MediaControlIntent#ACTION_PLAY play} or
+ * {@link MediaControlIntent#ACTION_ENQUEUE enqueue} action.
+ * </p>
+ */
+ public static final int PLAYBACK_STATE_CANCELED = 5;
+
+ /**
+ * Playback state: Invalidated.
+ * <p>
+ * Indicates that the media item was invalidated permanently and involuntarily.
+ * This state is used to indicate that the media item was invalidated and removed
+ * from the queue because the session to which it belongs was invalidated
+ * (typically by another application taking control of the route).
+ * </p><p>
+ * When invalidation occurs, the application should generally wait for the user
+ * to perform an explicit action, such as clicking on a play button in the UI,
+ * before creating a new media session to avoid unnecessarily interrupting
+ * another application that may have just started using the route.
+ * </p><p>
+ * An invalidated media item cannot be resumed. To play the content again, the application
+ * must send a new {@link MediaControlIntent#ACTION_PLAY play} or
+ * {@link MediaControlIntent#ACTION_ENQUEUE enqueue} action.
+ * </p>
+ */
+ public static final int PLAYBACK_STATE_INVALIDATED = 6;
+
+ /**
+ * Playback state: Playback halted or aborted due to an error.
+ * <p>
+ * Examples of errors are no network connectivity when attempting to retrieve content
+ * from a server, or expired user credentials when trying to play subscription-based
+ * content.
+ * </p><p>
+ * A media item in the error state cannot be resumed. To play the content again,
+ * the application must send a new {@link MediaControlIntent#ACTION_PLAY play} or
+ * {@link MediaControlIntent#ACTION_ENQUEUE enqueue} action.
+ * </p>
+ */
+ public static final int PLAYBACK_STATE_ERROR = 7;
+
+ /**
+ * Integer extra: HTTP status code.
+ * <p>
+ * Specifies the HTTP status code that was encountered when the content
+ * was requested after all redirects were followed. This key only needs to
+ * specified when the content uri uses the HTTP or HTTPS scheme and an error
+ * occurred. This key may be omitted if the content was able to be played
+ * successfully; there is no need to report a 200 (OK) status code.
+ * </p><p>
+ * The value is an integer HTTP status code, such as 401 (Unauthorized),
+ * 404 (Not Found), or 500 (Server Error), or 0 if none.
+ * </p>
+ */
+ public static final String EXTRA_HTTP_STATUS_CODE =
+ "android.media.status.extra.HTTP_STATUS_CODE";
+
+ /**
+ * Bundle extra: HTTP response headers.
+ * <p>
+ * Specifies the HTTP response headers that were returned when the content was
+ * requested from the network. The headers may include additional information
+ * about the content or any errors conditions that were encountered while
+ * trying to fetch the content.
+ * </p><p>
+ * The value is a {@link android.os.Bundle} of string based key-value pairs
+ * that describe the HTTP response headers.
+ * </p>
+ */
+ public static final String EXTRA_HTTP_RESPONSE_HEADERS =
+ "android.media.status.extra.HTTP_RESPONSE_HEADERS";
+
+ MediaItemStatus(Bundle bundle) {
+ mBundle = bundle;
+ }
+
+ /**
+ * Gets the timestamp associated with the status information in
+ * milliseconds since boot in the {@link SystemClock#elapsedRealtime} time base.
+ *
+ * @return The status timestamp in the {@link SystemClock#elapsedRealtime()} time base.
+ */
+ public long getTimestamp() {
+ return mBundle.getLong(KEY_TIMESTAMP);
+ }
+
+ /**
+ * Gets the playback state of the media item.
+ *
+ * @return The playback state. One of {@link #PLAYBACK_STATE_PENDING},
+ * {@link #PLAYBACK_STATE_PLAYING}, {@link #PLAYBACK_STATE_PAUSED},
+ * {@link #PLAYBACK_STATE_BUFFERING}, {@link #PLAYBACK_STATE_FINISHED},
+ * {@link #PLAYBACK_STATE_CANCELED}, {@link #PLAYBACK_STATE_INVALIDATED},
+ * or {@link #PLAYBACK_STATE_ERROR}.
+ */
+ public int getPlaybackState() {
+ return mBundle.getInt(KEY_PLAYBACK_STATE, PLAYBACK_STATE_ERROR);
+ }
+
+ /**
+ * Gets the content playback position as a long integer number of milliseconds
+ * from the beginning of the content.
+ *
+ * @return The content playback position in milliseconds, or -1 if unknown.
+ */
+ public long getContentPosition() {
+ return mBundle.getLong(KEY_CONTENT_POSITION, -1);
+ }
+
+ /**
+ * Gets the total duration of the content to be played as a long integer number of
+ * milliseconds.
+ *
+ * @return The content duration in milliseconds, or -1 if unknown.
+ */
+ public long getContentDuration() {
+ return mBundle.getLong(KEY_CONTENT_DURATION, -1);
+ }
+
+ /**
+ * Gets a bundle of extras for this status object.
+ * The extras will be ignored by the media router but they may be used
+ * by applications.
+ */
+ public Bundle getExtras() {
+ return mBundle.getBundle(KEY_EXTRAS);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder result = new StringBuilder();
+ result.append("MediaItemStatus{ ");
+ result.append("timestamp=");
+ TimeUtils.formatDuration(SystemClock.elapsedRealtime() - getTimestamp(), result);
+ result.append(" ms ago");
+ result.append(", playbackState=").append(playbackStateToString(getPlaybackState()));
+ result.append(", contentPosition=").append(getContentPosition());
+ result.append(", contentDuration=").append(getContentDuration());
+ result.append(", extras=").append(getExtras());
+ result.append(" }");
+ return result.toString();
+ }
+
+ private static String playbackStateToString(int playbackState) {
+ switch (playbackState) {
+ case PLAYBACK_STATE_PENDING:
+ return "pending";
+ case PLAYBACK_STATE_BUFFERING:
+ return "buffering";
+ case PLAYBACK_STATE_PLAYING:
+ return "playing";
+ case PLAYBACK_STATE_PAUSED:
+ return "paused";
+ case PLAYBACK_STATE_FINISHED:
+ return "finished";
+ case PLAYBACK_STATE_CANCELED:
+ return "canceled";
+ case PLAYBACK_STATE_INVALIDATED:
+ return "invalidated";
+ case PLAYBACK_STATE_ERROR:
+ return "error";
+ }
+ return Integer.toString(playbackState);
+ }
+
+ /**
+ * Converts this object to a bundle for serialization.
+ *
+ * @return The contents of the object represented as a bundle.
+ */
+ public Bundle asBundle() {
+ return mBundle;
+ }
+
+ /**
+ * Creates an instance from a bundle.
+ *
+ * @param bundle The bundle, or null if none.
+ * @return The new instance, or null if the bundle was null.
+ */
+ public static MediaItemStatus fromBundle(Bundle bundle) {
+ return bundle != null ? new MediaItemStatus(bundle) : null;
+ }
+
+ /**
+ * Builder for {@link MediaItemStatus media item status objects}.
+ */
+ public static final class Builder {
+ private final Bundle mBundle;
+
+ /**
+ * Creates a media item status builder using the current time as the
+ * reference timestamp.
+ *
+ * @param playbackState The item playback state.
+ */
+ public Builder(int playbackState) {
+ mBundle = new Bundle();
+ setTimestamp(SystemClock.elapsedRealtime());
+ setPlaybackState(playbackState);
+ }
+
+ /**
+ * Creates a media item status builder whose initial contents are
+ * copied from an existing status.
+ */
+ public Builder(MediaItemStatus status) {
+ if (status == null) {
+ throw new IllegalArgumentException("status must not be null");
+ }
+
+ mBundle = new Bundle(status.mBundle);
+ }
+
+ /**
+ * Sets the timestamp associated with the status information in
+ * milliseconds since boot in the {@link SystemClock#elapsedRealtime} time base.
+ */
+ public Builder setTimestamp(long elapsedRealtimeTimestamp) {
+ mBundle.putLong(KEY_TIMESTAMP, elapsedRealtimeTimestamp);
+ return this;
+ }
+
+ /**
+ * Sets the playback state of the media item.
+ */
+ public Builder setPlaybackState(int playbackState) {
+ mBundle.putInt(KEY_PLAYBACK_STATE, playbackState);
+ return this;
+ }
+
+ /**
+ * Sets the content playback position as a long integer number of milliseconds
+ * from the beginning of the content.
+ */
+ public Builder setContentPosition(long positionMilliseconds) {
+ mBundle.putLong(KEY_CONTENT_POSITION, positionMilliseconds);
+ return this;
+ }
+
+ /**
+ * Sets the total duration of the content to be played as a long integer number
+ * of milliseconds.
+ */
+ public Builder setContentDuration(long durationMilliseconds) {
+ mBundle.putLong(KEY_CONTENT_DURATION, durationMilliseconds);
+ return this;
+ }
+
+ /**
+ * Sets a bundle of extras for this status object.
+ * The extras will be ignored by the media router but they may be used
+ * by applications.
+ */
+ public Builder setExtras(Bundle extras) {
+ mBundle.putBundle(KEY_EXTRAS, extras);
+ return this;
+ }
+
+ /**
+ * Builds the {@link MediaItemStatus media item status object}.
+ */
+ public MediaItemStatus build() {
+ return new MediaItemStatus(mBundle);
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteDescriptor.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteDescriptor.java
new file mode 100644
index 0000000..6bc84fc
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteDescriptor.java
@@ -0,0 +1,693 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.support.mediarouter.media;
+
+import android.content.IntentFilter;
+import android.content.IntentSender;
+import android.net.Uri;
+import android.os.Bundle;
+import android.text.TextUtils;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Describes the properties of a route.
+ * <p>
+ * Each route is uniquely identified by an opaque id string. This token
+ * may take any form as long as it is unique within the media route provider.
+ * </p><p>
+ * This object is immutable once created using a {@link Builder} instance.
+ * </p>
+ */
+public final class MediaRouteDescriptor {
+ static final String KEY_ID = "id";
+ static final String KEY_GROUP_MEMBER_IDS = "groupMemberIds";
+ static final String KEY_NAME = "name";
+ static final String KEY_DESCRIPTION = "status";
+ static final String KEY_ICON_URI = "iconUri";
+ static final String KEY_ENABLED = "enabled";
+ static final String KEY_CONNECTING = "connecting";
+ static final String KEY_CONNECTION_STATE = "connectionState";
+ static final String KEY_CONTROL_FILTERS = "controlFilters";
+ static final String KEY_PLAYBACK_TYPE = "playbackType";
+ static final String KEY_PLAYBACK_STREAM = "playbackStream";
+ static final String KEY_DEVICE_TYPE = "deviceType";
+ static final String KEY_VOLUME = "volume";
+ static final String KEY_VOLUME_MAX = "volumeMax";
+ static final String KEY_VOLUME_HANDLING = "volumeHandling";
+ static final String KEY_PRESENTATION_DISPLAY_ID = "presentationDisplayId";
+ static final String KEY_EXTRAS = "extras";
+ static final String KEY_CAN_DISCONNECT = "canDisconnect";
+ static final String KEY_SETTINGS_INTENT = "settingsIntent";
+ static final String KEY_MIN_CLIENT_VERSION = "minClientVersion";
+ static final String KEY_MAX_CLIENT_VERSION = "maxClientVersion";
+
+ final Bundle mBundle;
+ List<IntentFilter> mControlFilters;
+
+ MediaRouteDescriptor(Bundle bundle, List<IntentFilter> controlFilters) {
+ mBundle = bundle;
+ mControlFilters = controlFilters;
+ }
+
+ /**
+ * Gets the unique id of the route.
+ * <p>
+ * The route id associated with a route descriptor functions as a stable
+ * identifier for the route and must be unique among all routes offered
+ * by the provider.
+ * </p>
+ */
+ public String getId() {
+ return mBundle.getString(KEY_ID);
+ }
+
+ /**
+ * Gets the group member ids of the route.
+ * <p>
+ * A route descriptor that has one or more group member route ids
+ * represents a route group. A member route may belong to another group.
+ * </p>
+ * @hide
+ */
+ // @RestrictTo(LIBRARY_GROUP)
+ public List<String> getGroupMemberIds() {
+ return mBundle.getStringArrayList(KEY_GROUP_MEMBER_IDS);
+ }
+
+ /**
+ * Gets the user-visible name of the route.
+ * <p>
+ * The route name identifies the destination represented by the route.
+ * It may be a user-supplied name, an alias, or device serial number.
+ * </p>
+ */
+ public String getName() {
+ return mBundle.getString(KEY_NAME);
+ }
+
+ /**
+ * Gets the user-visible description of the route.
+ * <p>
+ * The route description describes the kind of destination represented by the route.
+ * It may be a user-supplied string, a model number or brand of device.
+ * </p>
+ */
+ public String getDescription() {
+ return mBundle.getString(KEY_DESCRIPTION);
+ }
+
+ /**
+ * Gets the URI of the icon representing this route.
+ * <p>
+ * This icon will be used in picker UIs if available.
+ * </p>
+ */
+ public Uri getIconUri() {
+ String iconUri = mBundle.getString(KEY_ICON_URI);
+ return iconUri == null ? null : Uri.parse(iconUri);
+ }
+
+ /**
+ * Gets whether the route is enabled.
+ */
+ public boolean isEnabled() {
+ return mBundle.getBoolean(KEY_ENABLED, true);
+ }
+
+ /**
+ * Gets whether the route is connecting.
+ * @deprecated Use {@link #getConnectionState} instead
+ */
+ @Deprecated
+ public boolean isConnecting() {
+ return mBundle.getBoolean(KEY_CONNECTING, false);
+ }
+
+ /**
+ * Gets the connection state of the route.
+ *
+ * @return The connection state of this route:
+ * {@link MediaRouter.RouteInfo#CONNECTION_STATE_DISCONNECTED},
+ * {@link MediaRouter.RouteInfo#CONNECTION_STATE_CONNECTING}, or
+ * {@link MediaRouter.RouteInfo#CONNECTION_STATE_CONNECTED}.
+ */
+ public int getConnectionState() {
+ return mBundle.getInt(KEY_CONNECTION_STATE,
+ MediaRouter.RouteInfo.CONNECTION_STATE_DISCONNECTED);
+ }
+
+ /**
+ * Gets whether the route can be disconnected without stopping playback.
+ * <p>
+ * The route can normally be disconnected without stopping playback when
+ * the destination device on the route is connected to two or more source
+ * devices. The route provider should update the route immediately when the
+ * number of connected devices changes.
+ * </p><p>
+ * To specify that the route should disconnect without stopping use
+ * {@link MediaRouter#unselect(int)} with
+ * {@link MediaRouter#UNSELECT_REASON_DISCONNECTED}.
+ * </p>
+ */
+ public boolean canDisconnectAndKeepPlaying() {
+ return mBundle.getBoolean(KEY_CAN_DISCONNECT, false);
+ }
+
+ /**
+ * Gets an {@link IntentSender} for starting a settings activity for this
+ * route. The activity may have specific route settings or general settings
+ * for the connected device or route provider.
+ *
+ * @return An {@link IntentSender} to start a settings activity.
+ */
+ public IntentSender getSettingsActivity() {
+ return mBundle.getParcelable(KEY_SETTINGS_INTENT);
+ }
+
+ /**
+ * Gets the route's {@link MediaControlIntent media control intent} filters.
+ */
+ public List<IntentFilter> getControlFilters() {
+ ensureControlFilters();
+ return mControlFilters;
+ }
+
+ void ensureControlFilters() {
+ if (mControlFilters == null) {
+ mControlFilters = mBundle.<IntentFilter>getParcelableArrayList(KEY_CONTROL_FILTERS);
+ if (mControlFilters == null) {
+ mControlFilters = Collections.<IntentFilter>emptyList();
+ }
+ }
+ }
+
+ /**
+ * Gets the type of playback associated with this route.
+ *
+ * @return The type of playback associated with this route:
+ * {@link MediaRouter.RouteInfo#PLAYBACK_TYPE_LOCAL} or
+ * {@link MediaRouter.RouteInfo#PLAYBACK_TYPE_REMOTE}.
+ */
+ public int getPlaybackType() {
+ return mBundle.getInt(KEY_PLAYBACK_TYPE, MediaRouter.RouteInfo.PLAYBACK_TYPE_REMOTE);
+ }
+
+ /**
+ * Gets the route's playback stream.
+ */
+ public int getPlaybackStream() {
+ return mBundle.getInt(KEY_PLAYBACK_STREAM, -1);
+ }
+
+ /**
+ * Gets the type of the receiver device associated with this route.
+ *
+ * @return The type of the receiver device associated with this route:
+ * {@link MediaRouter.RouteInfo#DEVICE_TYPE_TV} or
+ * {@link MediaRouter.RouteInfo#DEVICE_TYPE_SPEAKER}.
+ */
+ public int getDeviceType() {
+ return mBundle.getInt(KEY_DEVICE_TYPE);
+ }
+
+ /**
+ * Gets the route's current volume, or 0 if unknown.
+ */
+ public int getVolume() {
+ return mBundle.getInt(KEY_VOLUME);
+ }
+
+ /**
+ * Gets the route's maximum volume, or 0 if unknown.
+ */
+ public int getVolumeMax() {
+ return mBundle.getInt(KEY_VOLUME_MAX);
+ }
+
+ /**
+ * Gets information about how volume is handled on the route.
+ *
+ * @return How volume is handled on the route:
+ * {@link MediaRouter.RouteInfo#PLAYBACK_VOLUME_FIXED} or
+ * {@link MediaRouter.RouteInfo#PLAYBACK_VOLUME_VARIABLE}.
+ */
+ public int getVolumeHandling() {
+ return mBundle.getInt(KEY_VOLUME_HANDLING,
+ MediaRouter.RouteInfo.PLAYBACK_VOLUME_FIXED);
+ }
+
+ /**
+ * Gets the route's presentation display id, or -1 if none.
+ */
+ public int getPresentationDisplayId() {
+ return mBundle.getInt(
+ KEY_PRESENTATION_DISPLAY_ID, MediaRouter.RouteInfo.PRESENTATION_DISPLAY_ID_NONE);
+ }
+
+ /**
+ * Gets a bundle of extras for this route descriptor.
+ * The extras will be ignored by the media router but they may be used
+ * by applications.
+ */
+ public Bundle getExtras() {
+ return mBundle.getBundle(KEY_EXTRAS);
+ }
+
+ /**
+ * Gets the minimum client version required for this route.
+ * @hide
+ */
+ // @RestrictTo(LIBRARY_GROUP)
+ public int getMinClientVersion() {
+ return mBundle.getInt(KEY_MIN_CLIENT_VERSION,
+ MediaRouteProviderProtocol.CLIENT_VERSION_START);
+ }
+
+ /**
+ * Gets the maximum client version required for this route.
+ * @hide
+ */
+ // @RestrictTo(LIBRARY_GROUP)
+ public int getMaxClientVersion() {
+ return mBundle.getInt(KEY_MAX_CLIENT_VERSION, Integer.MAX_VALUE);
+ }
+
+ /**
+ * Returns true if the route descriptor has all of the required fields.
+ */
+ public boolean isValid() {
+ ensureControlFilters();
+ if (TextUtils.isEmpty(getId())
+ || TextUtils.isEmpty(getName())
+ || mControlFilters.contains(null)) {
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder result = new StringBuilder();
+ result.append("MediaRouteDescriptor{ ");
+ result.append("id=").append(getId());
+ result.append(", groupMemberIds=").append(getGroupMemberIds());
+ result.append(", name=").append(getName());
+ result.append(", description=").append(getDescription());
+ result.append(", iconUri=").append(getIconUri());
+ result.append(", isEnabled=").append(isEnabled());
+ result.append(", isConnecting=").append(isConnecting());
+ result.append(", connectionState=").append(getConnectionState());
+ result.append(", controlFilters=").append(Arrays.toString(getControlFilters().toArray()));
+ result.append(", playbackType=").append(getPlaybackType());
+ result.append(", playbackStream=").append(getPlaybackStream());
+ result.append(", deviceType=").append(getDeviceType());
+ result.append(", volume=").append(getVolume());
+ result.append(", volumeMax=").append(getVolumeMax());
+ result.append(", volumeHandling=").append(getVolumeHandling());
+ result.append(", presentationDisplayId=").append(getPresentationDisplayId());
+ result.append(", extras=").append(getExtras());
+ result.append(", isValid=").append(isValid());
+ result.append(", minClientVersion=").append(getMinClientVersion());
+ result.append(", maxClientVersion=").append(getMaxClientVersion());
+ result.append(" }");
+ return result.toString();
+ }
+
+ /**
+ * Converts this object to a bundle for serialization.
+ *
+ * @return The contents of the object represented as a bundle.
+ */
+ public Bundle asBundle() {
+ return mBundle;
+ }
+
+ /**
+ * Creates an instance from a bundle.
+ *
+ * @param bundle The bundle, or null if none.
+ * @return The new instance, or null if the bundle was null.
+ */
+ public static MediaRouteDescriptor fromBundle(Bundle bundle) {
+ return bundle != null ? new MediaRouteDescriptor(bundle, null) : null;
+ }
+
+ /**
+ * Builder for {@link MediaRouteDescriptor media route descriptors}.
+ */
+ public static final class Builder {
+ private final Bundle mBundle;
+ private ArrayList<String> mGroupMemberIds;
+ private ArrayList<IntentFilter> mControlFilters;
+
+ /**
+ * Creates a media route descriptor builder.
+ *
+ * @param id The unique id of the route.
+ * @param name The user-visible name of the route.
+ */
+ public Builder(String id, String name) {
+ mBundle = new Bundle();
+ setId(id);
+ setName(name);
+ }
+
+ /**
+ * Creates a media route descriptor builder whose initial contents are
+ * copied from an existing descriptor.
+ */
+ public Builder(MediaRouteDescriptor descriptor) {
+ if (descriptor == null) {
+ throw new IllegalArgumentException("descriptor must not be null");
+ }
+
+ mBundle = new Bundle(descriptor.mBundle);
+
+ descriptor.ensureControlFilters();
+ if (!descriptor.mControlFilters.isEmpty()) {
+ mControlFilters = new ArrayList<IntentFilter>(descriptor.mControlFilters);
+ }
+ }
+
+ /**
+ * Sets the unique id of the route.
+ * <p>
+ * The route id associated with a route descriptor functions as a stable
+ * identifier for the route and must be unique among all routes offered
+ * by the provider.
+ * </p>
+ */
+ public Builder setId(String id) {
+ mBundle.putString(KEY_ID, id);
+ return this;
+ }
+
+ /**
+ * Adds a group member id of the route.
+ * <p>
+ * A route descriptor that has one or more group member route ids
+ * represents a route group. A member route may belong to another group.
+ * </p>
+ * @hide
+ */
+ // @RestrictTo(LIBRARY_GROUP)
+ public Builder addGroupMemberId(String groupMemberId) {
+ if (TextUtils.isEmpty(groupMemberId)) {
+ throw new IllegalArgumentException("groupMemberId must not be empty");
+ }
+
+ if (mGroupMemberIds == null) {
+ mGroupMemberIds = new ArrayList<>();
+ }
+ if (!mGroupMemberIds.contains(groupMemberId)) {
+ mGroupMemberIds.add(groupMemberId);
+ }
+ return this;
+ }
+
+ /**
+ * Adds a list of group member ids of the route.
+ * <p>
+ * A route descriptor that has one or more group member route ids
+ * represents a route group. A member route may belong to another group.
+ * </p>
+ * @hide
+ */
+ // @RestrictTo(LIBRARY_GROUP)
+ public Builder addGroupMemberIds(Collection<String> groupMemberIds) {
+ if (groupMemberIds == null) {
+ throw new IllegalArgumentException("groupMemberIds must not be null");
+ }
+
+ if (!groupMemberIds.isEmpty()) {
+ for (String groupMemberId : groupMemberIds) {
+ addGroupMemberId(groupMemberId);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Sets the user-visible name of the route.
+ * <p>
+ * The route name identifies the destination represented by the route.
+ * It may be a user-supplied name, an alias, or device serial number.
+ * </p>
+ */
+ public Builder setName(String name) {
+ mBundle.putString(KEY_NAME, name);
+ return this;
+ }
+
+ /**
+ * Sets the user-visible description of the route.
+ * <p>
+ * The route description describes the kind of destination represented by the route.
+ * It may be a user-supplied string, a model number or brand of device.
+ * </p>
+ */
+ public Builder setDescription(String description) {
+ mBundle.putString(KEY_DESCRIPTION, description);
+ return this;
+ }
+
+ /**
+ * Sets the URI of the icon representing this route.
+ * <p>
+ * This icon will be used in picker UIs if available.
+ * </p><p>
+ * The URI must be one of the following formats:
+ * <ul>
+ * <li>content ({@link android.content.ContentResolver#SCHEME_CONTENT})</li>
+ * <li>android.resource ({@link android.content.ContentResolver#SCHEME_ANDROID_RESOURCE})
+ * </li>
+ * <li>file ({@link android.content.ContentResolver#SCHEME_FILE})</li>
+ * </ul>
+ * </p>
+ */
+ public Builder setIconUri(Uri iconUri) {
+ if (iconUri == null) {
+ throw new IllegalArgumentException("iconUri must not be null");
+ }
+ mBundle.putString(KEY_ICON_URI, iconUri.toString());
+ return this;
+ }
+
+ /**
+ * Sets whether the route is enabled.
+ * <p>
+ * Disabled routes represent routes that a route provider knows about, such as paired
+ * Wifi Display receivers, but that are not currently available for use.
+ * </p>
+ */
+ public Builder setEnabled(boolean enabled) {
+ mBundle.putBoolean(KEY_ENABLED, enabled);
+ return this;
+ }
+
+ /**
+ * Sets whether the route is in the process of connecting and is not yet
+ * ready for use.
+ * @deprecated Use {@link #setConnectionState} instead.
+ */
+ @Deprecated
+ public Builder setConnecting(boolean connecting) {
+ mBundle.putBoolean(KEY_CONNECTING, connecting);
+ return this;
+ }
+
+ /**
+ * Sets the route's connection state.
+ *
+ * @param connectionState The connection state of the route:
+ * {@link MediaRouter.RouteInfo#CONNECTION_STATE_DISCONNECTED},
+ * {@link MediaRouter.RouteInfo#CONNECTION_STATE_CONNECTING}, or
+ * {@link MediaRouter.RouteInfo#CONNECTION_STATE_CONNECTED}.
+ */
+ public Builder setConnectionState(int connectionState) {
+ mBundle.putInt(KEY_CONNECTION_STATE, connectionState);
+ return this;
+ }
+
+ /**
+ * Sets whether the route can be disconnected without stopping playback.
+ */
+ public Builder setCanDisconnect(boolean canDisconnect) {
+ mBundle.putBoolean(KEY_CAN_DISCONNECT, canDisconnect);
+ return this;
+ }
+
+ /**
+ * Sets an intent sender for launching the settings activity for this
+ * route.
+ */
+ public Builder setSettingsActivity(IntentSender is) {
+ mBundle.putParcelable(KEY_SETTINGS_INTENT, is);
+ return this;
+ }
+
+ /**
+ * Adds a {@link MediaControlIntent media control intent} filter for the route.
+ */
+ public Builder addControlFilter(IntentFilter filter) {
+ if (filter == null) {
+ throw new IllegalArgumentException("filter must not be null");
+ }
+
+ if (mControlFilters == null) {
+ mControlFilters = new ArrayList<IntentFilter>();
+ }
+ if (!mControlFilters.contains(filter)) {
+ mControlFilters.add(filter);
+ }
+ return this;
+ }
+
+ /**
+ * Adds a list of {@link MediaControlIntent media control intent} filters for the route.
+ */
+ public Builder addControlFilters(Collection<IntentFilter> filters) {
+ if (filters == null) {
+ throw new IllegalArgumentException("filters must not be null");
+ }
+
+ if (!filters.isEmpty()) {
+ for (IntentFilter filter : filters) {
+ addControlFilter(filter);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Sets the route's playback type.
+ *
+ * @param playbackType The playback type of the route:
+ * {@link MediaRouter.RouteInfo#PLAYBACK_TYPE_LOCAL} or
+ * {@link MediaRouter.RouteInfo#PLAYBACK_TYPE_REMOTE}.
+ */
+ public Builder setPlaybackType(int playbackType) {
+ mBundle.putInt(KEY_PLAYBACK_TYPE, playbackType);
+ return this;
+ }
+
+ /**
+ * Sets the route's playback stream.
+ */
+ public Builder setPlaybackStream(int playbackStream) {
+ mBundle.putInt(KEY_PLAYBACK_STREAM, playbackStream);
+ return this;
+ }
+
+ /**
+ * Sets the route's receiver device type.
+ *
+ * @param deviceType The receive device type of the route:
+ * {@link MediaRouter.RouteInfo#DEVICE_TYPE_TV} or
+ * {@link MediaRouter.RouteInfo#DEVICE_TYPE_SPEAKER}.
+ */
+ public Builder setDeviceType(int deviceType) {
+ mBundle.putInt(KEY_DEVICE_TYPE, deviceType);
+ return this;
+ }
+
+ /**
+ * Sets the route's current volume, or 0 if unknown.
+ */
+ public Builder setVolume(int volume) {
+ mBundle.putInt(KEY_VOLUME, volume);
+ return this;
+ }
+
+ /**
+ * Sets the route's maximum volume, or 0 if unknown.
+ */
+ public Builder setVolumeMax(int volumeMax) {
+ mBundle.putInt(KEY_VOLUME_MAX, volumeMax);
+ return this;
+ }
+
+ /**
+ * Sets the route's volume handling.
+ *
+ * @param volumeHandling how volume is handled on the route:
+ * {@link MediaRouter.RouteInfo#PLAYBACK_VOLUME_FIXED} or
+ * {@link MediaRouter.RouteInfo#PLAYBACK_VOLUME_VARIABLE}.
+ */
+ public Builder setVolumeHandling(int volumeHandling) {
+ mBundle.putInt(KEY_VOLUME_HANDLING, volumeHandling);
+ return this;
+ }
+
+ /**
+ * Sets the route's presentation display id, or -1 if none.
+ */
+ public Builder setPresentationDisplayId(int presentationDisplayId) {
+ mBundle.putInt(KEY_PRESENTATION_DISPLAY_ID, presentationDisplayId);
+ return this;
+ }
+
+ /**
+ * Sets a bundle of extras for this route descriptor.
+ * The extras will be ignored by the media router but they may be used
+ * by applications.
+ */
+ public Builder setExtras(Bundle extras) {
+ mBundle.putBundle(KEY_EXTRAS, extras);
+ return this;
+ }
+
+ /**
+ * Sets the route's minimum client version.
+ * A router whose version is lower than this will not be able to connect to this route.
+ * @hide
+ */
+ // @RestrictTo(LIBRARY_GROUP)
+ public Builder setMinClientVersion(int minVersion) {
+ mBundle.putInt(KEY_MIN_CLIENT_VERSION, minVersion);
+ return this;
+ }
+
+ /**
+ * Sets the route's maximum client version.
+ * A router whose version is higher than this will not be able to connect to this route.
+ * @hide
+ */
+ // @RestrictTo(LIBRARY_GROUP)
+ public Builder setMaxClientVersion(int maxVersion) {
+ mBundle.putInt(KEY_MAX_CLIENT_VERSION, maxVersion);
+ return this;
+ }
+
+ /**
+ * Builds the {@link MediaRouteDescriptor media route descriptor}.
+ */
+ public MediaRouteDescriptor build() {
+ if (mControlFilters != null) {
+ mBundle.putParcelableArrayList(KEY_CONTROL_FILTERS, mControlFilters);
+ }
+ if (mGroupMemberIds != null) {
+ mBundle.putStringArrayList(KEY_GROUP_MEMBER_IDS, mGroupMemberIds);
+ }
+ return new MediaRouteDescriptor(mBundle, mControlFilters);
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteDiscoveryRequest.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteDiscoveryRequest.java
new file mode 100644
index 0000000..039627f
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteDiscoveryRequest.java
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.support.mediarouter.media;
+
+import android.os.Bundle;
+
+/**
+ * Describes the kinds of routes that the media router would like to discover
+ * and whether to perform active scanning.
+ * <p>
+ * This object is immutable once created.
+ * </p>
+ */
+public final class MediaRouteDiscoveryRequest {
+ private static final String KEY_SELECTOR = "selector";
+ private static final String KEY_ACTIVE_SCAN = "activeScan";
+
+ private final Bundle mBundle;
+ private MediaRouteSelector mSelector;
+
+ /**
+ * Creates a media route discovery request.
+ *
+ * @param selector The route selector that specifies the kinds of routes to discover.
+ * @param activeScan True if active scanning should be performed.
+ */
+ public MediaRouteDiscoveryRequest(MediaRouteSelector selector, boolean activeScan) {
+ if (selector == null) {
+ throw new IllegalArgumentException("selector must not be null");
+ }
+
+ mBundle = new Bundle();
+ mSelector = selector;
+ mBundle.putBundle(KEY_SELECTOR, selector.asBundle());
+ mBundle.putBoolean(KEY_ACTIVE_SCAN, activeScan);
+ }
+
+ private MediaRouteDiscoveryRequest(Bundle bundle) {
+ mBundle = bundle;
+ }
+
+ /**
+ * Gets the route selector that specifies the kinds of routes to discover.
+ */
+ public MediaRouteSelector getSelector() {
+ ensureSelector();
+ return mSelector;
+ }
+
+ private void ensureSelector() {
+ if (mSelector == null) {
+ mSelector = MediaRouteSelector.fromBundle(mBundle.getBundle(KEY_SELECTOR));
+ if (mSelector == null) {
+ mSelector = MediaRouteSelector.EMPTY;
+ }
+ }
+ }
+
+ /**
+ * Returns true if active scanning should be performed.
+ *
+ * @see MediaRouter#CALLBACK_FLAG_PERFORM_ACTIVE_SCAN
+ */
+ public boolean isActiveScan() {
+ return mBundle.getBoolean(KEY_ACTIVE_SCAN);
+ }
+
+ /**
+ * Returns true if the discovery request has all of the required fields.
+ */
+ public boolean isValid() {
+ ensureSelector();
+ return mSelector.isValid();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o instanceof MediaRouteDiscoveryRequest) {
+ MediaRouteDiscoveryRequest other = (MediaRouteDiscoveryRequest)o;
+ return getSelector().equals(other.getSelector())
+ && isActiveScan() == other.isActiveScan();
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return getSelector().hashCode() ^ (isActiveScan() ? 1 : 0);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder result = new StringBuilder();
+ result.append("DiscoveryRequest{ selector=").append(getSelector());
+ result.append(", activeScan=").append(isActiveScan());
+ result.append(", isValid=").append(isValid());
+ result.append(" }");
+ return result.toString();
+ }
+
+ /**
+ * Converts this object to a bundle for serialization.
+ *
+ * @return The contents of the object represented as a bundle.
+ */
+ public Bundle asBundle() {
+ return mBundle;
+ }
+
+ /**
+ * Creates an instance from a bundle.
+ *
+ * @param bundle The bundle, or null if none.
+ * @return The new instance, or null if the bundle was null.
+ */
+ public static MediaRouteDiscoveryRequest fromBundle(Bundle bundle) {
+ return bundle != null ? new MediaRouteDiscoveryRequest(bundle) : null;
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProvider.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProvider.java
new file mode 100644
index 0000000..91a2e1a
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProvider.java
@@ -0,0 +1,447 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.media;
+
+import android.annotation.NonNull;
+import android.annotation.Nullable;
+import android.content.ComponentName;
+import android.content.Context;
+import android.content.Intent;
+import android.os.Handler;
+import android.os.Message;
+import android.support.v4.util.ObjectsCompat;
+
+import com.android.support.mediarouter.media.MediaRouter.ControlRequestCallback;
+
+/**
+ * Media route providers are used to publish additional media routes for
+ * use within an application. Media route providers may also be declared
+ * as a service to publish additional media routes to all applications
+ * in the system.
+ * <p>
+ * The purpose of a media route provider is to discover media routes that satisfy
+ * the criteria specified by the current {@link MediaRouteDiscoveryRequest} and publish a
+ * {@link MediaRouteProviderDescriptor} with information about each route by calling
+ * {@link #setDescriptor} to notify the currently registered {@link Callback}.
+ * </p><p>
+ * The provider should watch for changes to the discovery request by implementing
+ * {@link #onDiscoveryRequestChanged} and updating the set of routes that it is
+ * attempting to discover. It should also handle route control requests such
+ * as volume changes or {@link MediaControlIntent media control intents}
+ * by implementing {@link #onCreateRouteController} to return a {@link RouteController}
+ * for a particular route.
+ * </p><p>
+ * A media route provider may be used privately within the scope of a single
+ * application process by calling {@link MediaRouter#addProvider MediaRouter.addProvider}
+ * to add it to the local {@link MediaRouter}. A media route provider may also be made
+ * available globally to all applications by registering a {@link MediaRouteProviderService}
+ * in the provider's manifest. When the media route provider is registered
+ * as a service, all applications that use the media router API will be able to
+ * discover and used the provider's routes without having to install anything else.
+ * </p><p>
+ * This object must only be accessed on the main thread.
+ * </p>
+ */
+public abstract class MediaRouteProvider {
+ static final int MSG_DELIVER_DESCRIPTOR_CHANGED = 1;
+ static final int MSG_DELIVER_DISCOVERY_REQUEST_CHANGED = 2;
+
+ private final Context mContext;
+ private final ProviderMetadata mMetadata;
+ private final ProviderHandler mHandler = new ProviderHandler();
+
+ private Callback mCallback;
+
+ private MediaRouteDiscoveryRequest mDiscoveryRequest;
+ private boolean mPendingDiscoveryRequestChange;
+
+ private MediaRouteProviderDescriptor mDescriptor;
+ private boolean mPendingDescriptorChange;
+
+ /**
+ * Creates a media route provider.
+ *
+ * @param context The context.
+ */
+ public MediaRouteProvider(@NonNull Context context) {
+ this(context, null);
+ }
+
+ MediaRouteProvider(Context context, ProviderMetadata metadata) {
+ if (context == null) {
+ throw new IllegalArgumentException("context must not be null");
+ }
+
+ mContext = context;
+ if (metadata == null) {
+ mMetadata = new ProviderMetadata(new ComponentName(context, getClass()));
+ } else {
+ mMetadata = metadata;
+ }
+ }
+
+ /**
+ * Gets the context of the media route provider.
+ */
+ public final Context getContext() {
+ return mContext;
+ }
+
+ /**
+ * Gets the provider's handler which is associated with the main thread.
+ */
+ public final Handler getHandler() {
+ return mHandler;
+ }
+
+ /**
+ * Gets some metadata about the provider's implementation.
+ */
+ public final ProviderMetadata getMetadata() {
+ return mMetadata;
+ }
+
+ /**
+ * Sets a callback to invoke when the provider's descriptor changes.
+ *
+ * @param callback The callback to use, or null if none.
+ */
+ public final void setCallback(@Nullable Callback callback) {
+ MediaRouter.checkCallingThread();
+ mCallback = callback;
+ }
+
+ /**
+ * Gets the current discovery request which informs the provider about the
+ * kinds of routes to discover and whether to perform active scanning.
+ *
+ * @return The current discovery request, or null if no discovery is needed at this time.
+ *
+ * @see #onDiscoveryRequestChanged
+ */
+ @Nullable
+ public final MediaRouteDiscoveryRequest getDiscoveryRequest() {
+ return mDiscoveryRequest;
+ }
+
+ /**
+ * Sets a discovery request to inform the provider about the kinds of
+ * routes that its clients would like to discover and whether to perform active scanning.
+ *
+ * @param request The discovery request, or null if no discovery is needed at this time.
+ *
+ * @see #onDiscoveryRequestChanged
+ */
+ public final void setDiscoveryRequest(MediaRouteDiscoveryRequest request) {
+ MediaRouter.checkCallingThread();
+
+ if (ObjectsCompat.equals(mDiscoveryRequest, request)) {
+ return;
+ }
+
+ mDiscoveryRequest = request;
+ if (!mPendingDiscoveryRequestChange) {
+ mPendingDiscoveryRequestChange = true;
+ mHandler.sendEmptyMessage(MSG_DELIVER_DISCOVERY_REQUEST_CHANGED);
+ }
+ }
+
+ void deliverDiscoveryRequestChanged() {
+ mPendingDiscoveryRequestChange = false;
+ onDiscoveryRequestChanged(mDiscoveryRequest);
+ }
+
+ /**
+ * Called by the media router when the {@link MediaRouteDiscoveryRequest discovery request}
+ * has changed.
+ * <p>
+ * Whenever an applications calls {@link MediaRouter#addCallback} to register
+ * a callback, it also provides a selector to specify the kinds of routes that
+ * it is interested in. The media router combines all of these selectors together
+ * to generate a {@link MediaRouteDiscoveryRequest} and notifies each provider when a change
+ * occurs by calling {@link #setDiscoveryRequest} which posts a message to invoke
+ * this method asynchronously.
+ * </p><p>
+ * The provider should examine the {@link MediaControlIntent media control categories}
+ * in the discovery request's {@link MediaRouteSelector selector} to determine what
+ * kinds of routes it should try to discover and whether it should perform active
+ * or passive scans. In many cases, the provider may be able to save power by
+ * determining that the selector does not contain any categories that it supports
+ * and it can therefore avoid performing any scans at all.
+ * </p>
+ *
+ * @param request The new discovery request, or null if no discovery is needed at this time.
+ *
+ * @see MediaRouter#addCallback
+ */
+ public void onDiscoveryRequestChanged(@Nullable MediaRouteDiscoveryRequest request) {
+ }
+
+ /**
+ * Gets the provider's descriptor.
+ * <p>
+ * The descriptor describes the state of the media route provider and
+ * the routes that it publishes. Watch for changes to the descriptor
+ * by registering a {@link Callback callback} with {@link #setCallback}.
+ * </p>
+ *
+ * @return The media route provider descriptor, or null if none.
+ *
+ * @see Callback#onDescriptorChanged
+ */
+ @Nullable
+ public final MediaRouteProviderDescriptor getDescriptor() {
+ return mDescriptor;
+ }
+
+ /**
+ * Sets the provider's descriptor.
+ * <p>
+ * The provider must call this method to notify the currently registered
+ * {@link Callback callback} about the change to the provider's descriptor.
+ * </p>
+ *
+ * @param descriptor The updated route provider descriptor, or null if none.
+ *
+ * @see Callback#onDescriptorChanged
+ */
+ public final void setDescriptor(@Nullable MediaRouteProviderDescriptor descriptor) {
+ MediaRouter.checkCallingThread();
+
+ if (mDescriptor != descriptor) {
+ mDescriptor = descriptor;
+ if (!mPendingDescriptorChange) {
+ mPendingDescriptorChange = true;
+ mHandler.sendEmptyMessage(MSG_DELIVER_DESCRIPTOR_CHANGED);
+ }
+ }
+ }
+
+ void deliverDescriptorChanged() {
+ mPendingDescriptorChange = false;
+
+ if (mCallback != null) {
+ mCallback.onDescriptorChanged(this, mDescriptor);
+ }
+ }
+
+ /**
+ * Called by the media router to obtain a route controller for a particular route.
+ * <p>
+ * The media router will invoke the {@link RouteController#onRelease} method of the route
+ * controller when it is no longer needed to allow it to free its resources.
+ * </p>
+ *
+ * @param routeId The unique id of the route.
+ * @return The route controller. Returns null if there is no such route or if the route
+ * cannot be controlled using the route controller interface.
+ */
+ @Nullable
+ public RouteController onCreateRouteController(@NonNull String routeId) {
+ if (routeId == null) {
+ throw new IllegalArgumentException("routeId cannot be null");
+ }
+ return null;
+ }
+
+ /**
+ * Called by the media router to obtain a route controller for a particular route which is a
+ * member of {@link MediaRouter.RouteGroup}.
+ * <p>
+ * The media router will invoke the {@link RouteController#onRelease} method of the route
+ * controller when it is no longer needed to allow it to free its resources.
+ * </p>
+ *
+ * @param routeId The unique id of the member route.
+ * @param routeGroupId The unique id of the route group.
+ * @return The route controller. Returns null if there is no such route or if the route
+ * cannot be controlled using the route controller interface.
+ * @hide
+ */
+ // @RestrictTo(LIBRARY_GROUP)
+ @Nullable
+ public RouteController onCreateRouteController(@NonNull String routeId,
+ @NonNull String routeGroupId) {
+ if (routeId == null) {
+ throw new IllegalArgumentException("routeId cannot be null");
+ }
+ if (routeGroupId == null) {
+ throw new IllegalArgumentException("routeGroupId cannot be null");
+ }
+ return onCreateRouteController(routeId);
+ }
+
+ /**
+ * Describes properties of the route provider's implementation.
+ * <p>
+ * This object is immutable once created.
+ * </p>
+ */
+ public static final class ProviderMetadata {
+ private final ComponentName mComponentName;
+
+ ProviderMetadata(ComponentName componentName) {
+ if (componentName == null) {
+ throw new IllegalArgumentException("componentName must not be null");
+ }
+ mComponentName = componentName;
+ }
+
+ /**
+ * Gets the provider's package name.
+ */
+ public String getPackageName() {
+ return mComponentName.getPackageName();
+ }
+
+ /**
+ * Gets the provider's component name.
+ */
+ public ComponentName getComponentName() {
+ return mComponentName;
+ }
+
+ @Override
+ public String toString() {
+ return "ProviderMetadata{ componentName="
+ + mComponentName.flattenToShortString() + " }";
+ }
+ }
+
+ /**
+ * Provides control over a particular route.
+ * <p>
+ * The media router obtains a route controller for a route whenever it needs
+ * to control a route. When a route is selected, the media router invokes
+ * the {@link #onSelect} method of its route controller. While selected,
+ * the media router may call other methods of the route controller to
+ * request that it perform certain actions to the route. When a route is
+ * unselected, the media router invokes the {@link #onUnselect} method of its
+ * route controller. When the media route no longer needs the route controller
+ * it will invoke the {@link #onRelease} method to allow the route controller
+ * to free its resources.
+ * </p><p>
+ * There may be multiple route controllers simultaneously active for the
+ * same route. Each route controller will be released separately.
+ * </p><p>
+ * All operations on the route controller are asynchronous and
+ * results are communicated via callbacks.
+ * </p>
+ */
+ public static abstract class RouteController {
+ /**
+ * Releases the route controller, allowing it to free its resources.
+ */
+ public void onRelease() {
+ }
+
+ /**
+ * Selects the route.
+ */
+ public void onSelect() {
+ }
+
+ /**
+ * Unselects the route.
+ */
+ public void onUnselect() {
+ }
+
+ /**
+ * Unselects the route and provides a reason. The default implementation
+ * calls {@link #onUnselect()}.
+ * <p>
+ * The reason provided will be one of the following:
+ * <ul>
+ * <li>{@link MediaRouter#UNSELECT_REASON_UNKNOWN}</li>
+ * <li>{@link MediaRouter#UNSELECT_REASON_DISCONNECTED}</li>
+ * <li>{@link MediaRouter#UNSELECT_REASON_STOPPED}</li>
+ * <li>{@link MediaRouter#UNSELECT_REASON_ROUTE_CHANGED}</li>
+ * </ul>
+ *
+ * @param reason The reason for unselecting the route.
+ */
+ public void onUnselect(int reason) {
+ onUnselect();
+ }
+
+ /**
+ * Requests to set the volume of the route.
+ *
+ * @param volume The new volume value between 0 and {@link MediaRouteDescriptor#getVolumeMax}.
+ */
+ public void onSetVolume(int volume) {
+ }
+
+ /**
+ * Requests an incremental volume update for the route.
+ *
+ * @param delta The delta to add to the current volume.
+ */
+ public void onUpdateVolume(int delta) {
+ }
+
+ /**
+ * Performs a {@link MediaControlIntent media control} request
+ * asynchronously on behalf of the route.
+ *
+ * @param intent A {@link MediaControlIntent media control intent}.
+ * @param callback A {@link ControlRequestCallback} to invoke with the result
+ * of the request, or null if no result is required.
+ * @return True if the controller intends to handle the request and will
+ * invoke the callback when finished. False if the controller will not
+ * handle the request and will not invoke the callback.
+ *
+ * @see MediaControlIntent
+ */
+ public boolean onControlRequest(Intent intent, @Nullable ControlRequestCallback callback) {
+ return false;
+ }
+ }
+
+ /**
+ * Callback which is invoked when route information becomes available or changes.
+ */
+ public static abstract class Callback {
+ /**
+ * Called when information about a route provider and its routes changes.
+ *
+ * @param provider The media route provider that changed, never null.
+ * @param descriptor The new media route provider descriptor, or null if none.
+ */
+ public void onDescriptorChanged(@NonNull MediaRouteProvider provider,
+ @Nullable MediaRouteProviderDescriptor descriptor) {
+ }
+ }
+
+ private final class ProviderHandler extends Handler {
+ ProviderHandler() {
+ }
+
+ @Override
+ public void handleMessage(Message msg) {
+ switch (msg.what) {
+ case MSG_DELIVER_DESCRIPTOR_CHANGED:
+ deliverDescriptorChanged();
+ break;
+ case MSG_DELIVER_DISCOVERY_REQUEST_CHANGED:
+ deliverDiscoveryRequestChanged();
+ break;
+ }
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderDescriptor.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderDescriptor.java
new file mode 100644
index 0000000..eb1ce09
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderDescriptor.java
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.support.mediarouter.media;
+
+import android.os.Bundle;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Describes the state of a media route provider and the routes that it publishes.
+ * <p>
+ * This object is immutable once created using a {@link Builder} instance.
+ * </p>
+ */
+public final class MediaRouteProviderDescriptor {
+ private static final String KEY_ROUTES = "routes";
+
+ private final Bundle mBundle;
+ private List<MediaRouteDescriptor> mRoutes;
+
+ private MediaRouteProviderDescriptor(Bundle bundle, List<MediaRouteDescriptor> routes) {
+ mBundle = bundle;
+ mRoutes = routes;
+ }
+
+ /**
+ * Gets the list of all routes that this provider has published.
+ */
+ public List<MediaRouteDescriptor> getRoutes() {
+ ensureRoutes();
+ return mRoutes;
+ }
+
+ private void ensureRoutes() {
+ if (mRoutes == null) {
+ ArrayList<Bundle> routeBundles = mBundle.<Bundle>getParcelableArrayList(KEY_ROUTES);
+ if (routeBundles == null || routeBundles.isEmpty()) {
+ mRoutes = Collections.<MediaRouteDescriptor>emptyList();
+ } else {
+ final int count = routeBundles.size();
+ mRoutes = new ArrayList<MediaRouteDescriptor>(count);
+ for (int i = 0; i < count; i++) {
+ mRoutes.add(MediaRouteDescriptor.fromBundle(routeBundles.get(i)));
+ }
+ }
+ }
+ }
+
+ /**
+ * Returns true if the route provider descriptor and all of the routes that
+ * it contains have all of the required fields.
+ * <p>
+ * This verification is deep. If the provider descriptor is known to be
+ * valid then it is not necessary to call {@link #isValid} on each of its routes.
+ * </p>
+ */
+ public boolean isValid() {
+ ensureRoutes();
+ final int routeCount = mRoutes.size();
+ for (int i = 0; i < routeCount; i++) {
+ MediaRouteDescriptor route = mRoutes.get(i);
+ if (route == null || !route.isValid()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder result = new StringBuilder();
+ result.append("MediaRouteProviderDescriptor{ ");
+ result.append("routes=").append(
+ Arrays.toString(getRoutes().toArray()));
+ result.append(", isValid=").append(isValid());
+ result.append(" }");
+ return result.toString();
+ }
+
+ /**
+ * Converts this object to a bundle for serialization.
+ *
+ * @return The contents of the object represented as a bundle.
+ */
+ public Bundle asBundle() {
+ return mBundle;
+ }
+
+ /**
+ * Creates an instance from a bundle.
+ *
+ * @param bundle The bundle, or null if none.
+ * @return The new instance, or null if the bundle was null.
+ */
+ public static MediaRouteProviderDescriptor fromBundle(Bundle bundle) {
+ return bundle != null ? new MediaRouteProviderDescriptor(bundle, null) : null;
+ }
+
+ /**
+ * Builder for {@link MediaRouteProviderDescriptor media route provider descriptors}.
+ */
+ public static final class Builder {
+ private final Bundle mBundle;
+ private ArrayList<MediaRouteDescriptor> mRoutes;
+
+ /**
+ * Creates an empty media route provider descriptor builder.
+ */
+ public Builder() {
+ mBundle = new Bundle();
+ }
+
+ /**
+ * Creates a media route provider descriptor builder whose initial contents are
+ * copied from an existing descriptor.
+ */
+ public Builder(MediaRouteProviderDescriptor descriptor) {
+ if (descriptor == null) {
+ throw new IllegalArgumentException("descriptor must not be null");
+ }
+
+ mBundle = new Bundle(descriptor.mBundle);
+
+ descriptor.ensureRoutes();
+ if (!descriptor.mRoutes.isEmpty()) {
+ mRoutes = new ArrayList<MediaRouteDescriptor>(descriptor.mRoutes);
+ }
+ }
+
+ /**
+ * Adds a route.
+ */
+ public Builder addRoute(MediaRouteDescriptor route) {
+ if (route == null) {
+ throw new IllegalArgumentException("route must not be null");
+ }
+
+ if (mRoutes == null) {
+ mRoutes = new ArrayList<MediaRouteDescriptor>();
+ } else if (mRoutes.contains(route)) {
+ throw new IllegalArgumentException("route descriptor already added");
+ }
+ mRoutes.add(route);
+ return this;
+ }
+
+ /**
+ * Adds a list of routes.
+ */
+ public Builder addRoutes(Collection<MediaRouteDescriptor> routes) {
+ if (routes == null) {
+ throw new IllegalArgumentException("routes must not be null");
+ }
+
+ if (!routes.isEmpty()) {
+ for (MediaRouteDescriptor route : routes) {
+ addRoute(route);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Sets the list of routes.
+ */
+ Builder setRoutes(Collection<MediaRouteDescriptor> routes) {
+ if (routes == null || routes.isEmpty()) {
+ mRoutes = null;
+ mBundle.remove(KEY_ROUTES);
+ } else {
+ mRoutes = new ArrayList<>(routes);
+ }
+ return this;
+ }
+
+ /**
+ * Builds the {@link MediaRouteProviderDescriptor media route provider descriptor}.
+ */
+ public MediaRouteProviderDescriptor build() {
+ if (mRoutes != null) {
+ final int count = mRoutes.size();
+ ArrayList<Bundle> routeBundles = new ArrayList<Bundle>(count);
+ for (int i = 0; i < count; i++) {
+ routeBundles.add(mRoutes.get(i).asBundle());
+ }
+ mBundle.putParcelableArrayList(KEY_ROUTES, routeBundles);
+ }
+ return new MediaRouteProviderDescriptor(mBundle, mRoutes);
+ }
+ }
+}
\ No newline at end of file
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderProtocol.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderProtocol.java
new file mode 100644
index 0000000..6be9343
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderProtocol.java
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.media;
+
+import android.content.Intent;
+import android.os.Messenger;
+
+/**
+ * Defines the communication protocol for media route provider services.
+ */
+abstract class MediaRouteProviderProtocol {
+ /**
+ * The {@link Intent} that must be declared as handled by the service.
+ * Put this in your manifest.
+ */
+ public static final String SERVICE_INTERFACE =
+ "android.media.MediaRouteProviderService";
+
+ /*
+ * Messages sent from the client to the service.
+ * DO NOT RENUMBER THESE!
+ */
+
+ /** (client v1)
+ * Register client.
+ * - replyTo : client messenger
+ * - arg1 : request id
+ * - arg2 : client version
+ */
+ public static final int CLIENT_MSG_REGISTER = 1;
+
+ /** (client v1)
+ * Unregister client.
+ * - replyTo : client messenger
+ * - arg1 : request id
+ */
+ public static final int CLIENT_MSG_UNREGISTER = 2;
+
+ /** (client v1)
+ * Create route controller.
+ * - replyTo : client messenger
+ * - arg1 : request id
+ * - arg2 : route controller id
+ * - CLIENT_DATA_ROUTE_ID : route id string
+ */
+ public static final int CLIENT_MSG_CREATE_ROUTE_CONTROLLER = 3;
+
+ /** (client v1)
+ * Release route controller.
+ * - replyTo : client messenger
+ * - arg1 : request id
+ * - arg2 : route controller id
+ */
+ public static final int CLIENT_MSG_RELEASE_ROUTE_CONTROLLER = 4;
+
+ /** (client v1)
+ * Select route.
+ * - replyTo : client messenger
+ * - arg1 : request id
+ * - arg2 : route controller id
+ */
+ public static final int CLIENT_MSG_SELECT_ROUTE = 5;
+
+ /** (client v1)
+ * Unselect route.
+ * - replyTo : client messenger
+ * - arg1 : request id
+ * - arg2 : route controller id
+ */
+ public static final int CLIENT_MSG_UNSELECT_ROUTE = 6;
+
+ /** (client v1)
+ * Set route volume.
+ * - replyTo : client messenger
+ * - arg1 : request id
+ * - arg2 : route controller id
+ * - CLIENT_DATA_VOLUME : volume integer
+ */
+ public static final int CLIENT_MSG_SET_ROUTE_VOLUME = 7;
+
+ /** (client v1)
+ * Update route volume.
+ * - replyTo : client messenger
+ * - arg1 : request id
+ * - arg2 : route controller id
+ * - CLIENT_DATA_VOLUME : volume delta integer
+ */
+ public static final int CLIENT_MSG_UPDATE_ROUTE_VOLUME = 8;
+
+ /** (client v1)
+ * Route control request.
+ * - replyTo : client messenger
+ * - arg1 : request id
+ * - arg2 : route controller id
+ * - obj : media control intent
+ */
+ public static final int CLIENT_MSG_ROUTE_CONTROL_REQUEST = 9;
+
+ /** (client v1)
+ * Sets the discovery request.
+ * - replyTo : client messenger
+ * - arg1 : request id
+ * - obj : discovery request bundle, or null if none
+ */
+ public static final int CLIENT_MSG_SET_DISCOVERY_REQUEST = 10;
+
+ public static final String CLIENT_DATA_ROUTE_ID = "routeId";
+ public static final String CLIENT_DATA_ROUTE_LIBRARY_GROUP = "routeGroupId";
+ public static final String CLIENT_DATA_VOLUME = "volume";
+ public static final String CLIENT_DATA_UNSELECT_REASON = "unselectReason";
+
+ /*
+ * Messages sent from the service to the client.
+ * DO NOT RENUMBER THESE!
+ */
+
+ /** (service v1)
+ * Generic failure sent in response to any unrecognized or malformed request.
+ * - arg1 : request id
+ */
+ public static final int SERVICE_MSG_GENERIC_FAILURE = 0;
+
+ /** (service v1)
+ * Generic failure sent in response to a successful message.
+ * - arg1 : request id
+ */
+ public static final int SERVICE_MSG_GENERIC_SUCCESS = 1;
+
+ /** (service v1)
+ * Registration succeeded.
+ * - arg1 : request id
+ * - arg2 : server version
+ * - obj : route provider descriptor bundle, or null
+ */
+ public static final int SERVICE_MSG_REGISTERED = 2;
+
+ /** (service v1)
+ * Route control request success result.
+ * - arg1 : request id
+ * - obj : result data bundle, or null
+ */
+ public static final int SERVICE_MSG_CONTROL_REQUEST_SUCCEEDED = 3;
+
+ /** (service v1)
+ * Route control request failure result.
+ * - arg1 : request id
+ * - obj : result data bundle, or null
+ * - SERVICE_DATA_ERROR: error message
+ */
+ public static final int SERVICE_MSG_CONTROL_REQUEST_FAILED = 4;
+
+ /** (service v1)
+ * Route provider descriptor changed. (unsolicited event)
+ * - arg1 : reserved (0)
+ * - obj : route provider descriptor bundle, or null
+ */
+ public static final int SERVICE_MSG_DESCRIPTOR_CHANGED = 5;
+
+ public static final String SERVICE_DATA_ERROR = "error";
+
+ /*
+ * Recognized client version numbers. (Reserved for future use.)
+ * DO NOT RENUMBER THESE!
+ */
+
+ /**
+ * The client version used from the beginning.
+ */
+ public static final int CLIENT_VERSION_1 = 1;
+
+ /**
+ * The client version used from support library v24.1.0.
+ */
+ public static final int CLIENT_VERSION_2 = 2;
+
+ /**
+ * The current client version.
+ */
+ public static final int CLIENT_VERSION_CURRENT = CLIENT_VERSION_2;
+
+ /*
+ * Recognized server version numbers. (Reserved for future use.)
+ * DO NOT RENUMBER THESE!
+ */
+
+ /**
+ * The service version used from the beginning.
+ */
+ public static final int SERVICE_VERSION_1 = 1;
+
+ /**
+ * The current service version.
+ */
+ public static final int SERVICE_VERSION_CURRENT = SERVICE_VERSION_1;
+
+ static final int CLIENT_VERSION_START = CLIENT_VERSION_1;
+
+ /**
+ * Returns true if the messenger object is valid.
+ * <p>
+ * The messenger constructor and unparceling code does not check whether the
+ * provided IBinder is a valid IMessenger object. As a result, it's possible
+ * for a peer to send an invalid IBinder that will result in crashes downstream.
+ * This method checks that the messenger is in a valid state.
+ * </p>
+ */
+ public static boolean isValidRemoteMessenger(Messenger messenger) {
+ try {
+ return messenger != null && messenger.getBinder() != null;
+ } catch (NullPointerException ex) {
+ // If the messenger was constructed with a binder interface other than
+ // IMessenger then the call to getBinder() will crash with an NPE.
+ return false;
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderService.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderService.java
new file mode 100644
index 0000000..43cde10
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteProviderService.java
@@ -0,0 +1,759 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.media;
+
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_DATA_ROUTE_ID;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_DATA_ROUTE_LIBRARY_GROUP;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_DATA_UNSELECT_REASON;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_DATA_VOLUME;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_CREATE_ROUTE_CONTROLLER;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_REGISTER;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_RELEASE_ROUTE_CONTROLLER;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_ROUTE_CONTROL_REQUEST;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_SELECT_ROUTE;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_SET_DISCOVERY_REQUEST;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_SET_ROUTE_VOLUME;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_UNREGISTER;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_UNSELECT_ROUTE;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_UPDATE_ROUTE_VOLUME;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_VERSION_1;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_DATA_ERROR;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .SERVICE_MSG_CONTROL_REQUEST_FAILED;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .SERVICE_MSG_CONTROL_REQUEST_SUCCEEDED;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .SERVICE_MSG_DESCRIPTOR_CHANGED;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .SERVICE_MSG_GENERIC_FAILURE;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .SERVICE_MSG_GENERIC_SUCCESS;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_MSG_REGISTERED;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_VERSION_CURRENT;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.isValidRemoteMessenger;
+
+import android.app.Service;
+import android.content.Intent;
+import android.os.Bundle;
+import android.os.DeadObjectException;
+import android.os.Handler;
+import android.os.IBinder;
+import android.os.IBinder.DeathRecipient;
+import android.os.Message;
+import android.os.Messenger;
+import android.os.RemoteException;
+import android.support.annotation.VisibleForTesting;
+import android.support.v4.util.ObjectsCompat;
+import android.util.Log;
+import android.util.SparseArray;
+
+import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+
+/**
+ * Base class for media route provider services.
+ * <p>
+ * A media router will bind to media route provider services when a callback is added via
+ * {@link MediaRouter#addCallback(MediaRouteSelector, MediaRouter.Callback, int)} with a discovery
+ * flag: {@link MediaRouter#CALLBACK_FLAG_REQUEST_DISCOVERY},
+ * {@link MediaRouter#CALLBACK_FLAG_FORCE_DISCOVERY}, or
+ * {@link MediaRouter#CALLBACK_FLAG_PERFORM_ACTIVE_SCAN}, and will unbind when the callback
+ * is removed via {@link MediaRouter#removeCallback(MediaRouter.Callback)}.
+ * </p><p>
+ * To implement your own media route provider service, extend this class and
+ * override the {@link #onCreateMediaRouteProvider} method to return an
+ * instance of your {@link MediaRouteProvider}.
+ * </p><p>
+ * Declare your media route provider service in your application manifest
+ * like this:
+ * </p>
+ * <pre>
+ * <service android:name=".MyMediaRouteProviderService"
+ * android:label="@string/my_media_route_provider_service">
+ * <intent-filter>
+ * <action android:name="android.media.MediaRouteProviderService" />
+ * </intent-filter>
+ * </service>
+ * </pre>
+ */
+public abstract class MediaRouteProviderService extends Service {
+ static final String TAG = "MediaRouteProviderSrv"; // max. 23 chars
+ static final boolean DEBUG = Log.isLoggable(TAG, Log.DEBUG);
+
+ private final ArrayList<ClientRecord> mClients = new ArrayList<ClientRecord>();
+ private final ReceiveHandler mReceiveHandler;
+ private final Messenger mReceiveMessenger;
+ final PrivateHandler mPrivateHandler;
+ private final ProviderCallback mProviderCallback;
+
+ MediaRouteProvider mProvider;
+ private MediaRouteDiscoveryRequest mCompositeDiscoveryRequest;
+
+ /**
+ * The {@link Intent} that must be declared as handled by the service.
+ * Put this in your manifest.
+ */
+ public static final String SERVICE_INTERFACE = MediaRouteProviderProtocol.SERVICE_INTERFACE;
+
+ /*
+ * Private messages used internally. (Yes, you can renumber these.)
+ */
+
+ static final int PRIVATE_MSG_CLIENT_DIED = 1;
+
+ /**
+ * Creates a media route provider service.
+ */
+ public MediaRouteProviderService() {
+ mReceiveHandler = new ReceiveHandler(this);
+ mReceiveMessenger = new Messenger(mReceiveHandler);
+ mPrivateHandler = new PrivateHandler();
+ mProviderCallback = new ProviderCallback();
+ }
+
+ /**
+ * Called by the system when it is time to create the media route provider.
+ *
+ * @return The media route provider offered by this service, or null if
+ * this service has decided not to offer a media route provider.
+ */
+ public abstract MediaRouteProvider onCreateMediaRouteProvider();
+
+ /**
+ * Gets the media route provider offered by this service.
+ *
+ * @return The media route provider offered by this service, or null if
+ * it has not yet been created.
+ *
+ * @see #onCreateMediaRouteProvider()
+ */
+ public MediaRouteProvider getMediaRouteProvider() {
+ return mProvider;
+ }
+
+ @Override
+ public IBinder onBind(Intent intent) {
+ if (intent.getAction().equals(SERVICE_INTERFACE)) {
+ if (mProvider == null) {
+ MediaRouteProvider provider = onCreateMediaRouteProvider();
+ if (provider != null) {
+ String providerPackage = provider.getMetadata().getPackageName();
+ if (!providerPackage.equals(getPackageName())) {
+ throw new IllegalStateException("onCreateMediaRouteProvider() returned "
+ + "a provider whose package name does not match the package "
+ + "name of the service. A media route provider service can "
+ + "only export its own media route providers. "
+ + "Provider package name: " + providerPackage
+ + ". Service package name: " + getPackageName() + ".");
+ }
+ mProvider = provider;
+ mProvider.setCallback(mProviderCallback);
+ }
+ }
+ if (mProvider != null) {
+ return mReceiveMessenger.getBinder();
+ }
+ }
+ return null;
+ }
+
+ @Override
+ public boolean onUnbind(Intent intent) {
+ if (mProvider != null) {
+ mProvider.setCallback(null);
+ }
+ return super.onUnbind(intent);
+ }
+
+ boolean onRegisterClient(Messenger messenger, int requestId, int version) {
+ if (version >= CLIENT_VERSION_1) {
+ int index = findClient(messenger);
+ if (index < 0) {
+ ClientRecord client = new ClientRecord(messenger, version);
+ if (client.register()) {
+ mClients.add(client);
+ if (DEBUG) {
+ Log.d(TAG, client + ": Registered, version=" + version);
+ }
+ if (requestId != 0) {
+ MediaRouteProviderDescriptor descriptor = mProvider.getDescriptor();
+ sendReply(messenger, SERVICE_MSG_REGISTERED,
+ requestId, SERVICE_VERSION_CURRENT,
+ createDescriptorBundleForClientVersion(descriptor,
+ client.mVersion), null);
+ }
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ boolean onUnregisterClient(Messenger messenger, int requestId) {
+ int index = findClient(messenger);
+ if (index >= 0) {
+ ClientRecord client = mClients.remove(index);
+ if (DEBUG) {
+ Log.d(TAG, client + ": Unregistered");
+ }
+ client.dispose();
+ sendGenericSuccess(messenger, requestId);
+ return true;
+ }
+ return false;
+ }
+
+ void onBinderDied(Messenger messenger) {
+ int index = findClient(messenger);
+ if (index >= 0) {
+ ClientRecord client = mClients.remove(index);
+ if (DEBUG) {
+ Log.d(TAG, client + ": Binder died");
+ }
+ client.dispose();
+ }
+ }
+
+ boolean onCreateRouteController(Messenger messenger, int requestId,
+ int controllerId, String routeId, String routeGroupId) {
+ ClientRecord client = getClient(messenger);
+ if (client != null) {
+ if (client.createRouteController(routeId, routeGroupId, controllerId)) {
+ if (DEBUG) {
+ Log.d(TAG, client + ": Route controller created, controllerId=" + controllerId
+ + ", routeId=" + routeId + ", routeGroupId=" + routeGroupId);
+ }
+ sendGenericSuccess(messenger, requestId);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ boolean onReleaseRouteController(Messenger messenger, int requestId,
+ int controllerId) {
+ ClientRecord client = getClient(messenger);
+ if (client != null) {
+ if (client.releaseRouteController(controllerId)) {
+ if (DEBUG) {
+ Log.d(TAG, client + ": Route controller released"
+ + ", controllerId=" + controllerId);
+ }
+ sendGenericSuccess(messenger, requestId);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ boolean onSelectRoute(Messenger messenger, int requestId,
+ int controllerId) {
+ ClientRecord client = getClient(messenger);
+ if (client != null) {
+ MediaRouteProvider.RouteController controller =
+ client.getRouteController(controllerId);
+ if (controller != null) {
+ controller.onSelect();
+ if (DEBUG) {
+ Log.d(TAG, client + ": Route selected"
+ + ", controllerId=" + controllerId);
+ }
+ sendGenericSuccess(messenger, requestId);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ boolean onUnselectRoute(Messenger messenger, int requestId,
+ int controllerId, int reason) {
+ ClientRecord client = getClient(messenger);
+ if (client != null) {
+ MediaRouteProvider.RouteController controller =
+ client.getRouteController(controllerId);
+ if (controller != null) {
+ controller.onUnselect(reason);
+ if (DEBUG) {
+ Log.d(TAG, client + ": Route unselected"
+ + ", controllerId=" + controllerId);
+ }
+ sendGenericSuccess(messenger, requestId);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ boolean onSetRouteVolume(Messenger messenger, int requestId,
+ int controllerId, int volume) {
+ ClientRecord client = getClient(messenger);
+ if (client != null) {
+ MediaRouteProvider.RouteController controller =
+ client.getRouteController(controllerId);
+ if (controller != null) {
+ controller.onSetVolume(volume);
+ if (DEBUG) {
+ Log.d(TAG, client + ": Route volume changed"
+ + ", controllerId=" + controllerId + ", volume=" + volume);
+ }
+ sendGenericSuccess(messenger, requestId);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ boolean onUpdateRouteVolume(Messenger messenger, int requestId,
+ int controllerId, int delta) {
+ ClientRecord client = getClient(messenger);
+ if (client != null) {
+ MediaRouteProvider.RouteController controller =
+ client.getRouteController(controllerId);
+ if (controller != null) {
+ controller.onUpdateVolume(delta);
+ if (DEBUG) {
+ Log.d(TAG, client + ": Route volume updated"
+ + ", controllerId=" + controllerId + ", delta=" + delta);
+ }
+ sendGenericSuccess(messenger, requestId);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ boolean onRouteControlRequest(final Messenger messenger, final int requestId,
+ final int controllerId, final Intent intent) {
+ final ClientRecord client = getClient(messenger);
+ if (client != null) {
+ MediaRouteProvider.RouteController controller =
+ client.getRouteController(controllerId);
+ if (controller != null) {
+ MediaRouter.ControlRequestCallback callback = null;
+ if (requestId != 0) {
+ callback = new MediaRouter.ControlRequestCallback() {
+ @Override
+ public void onResult(Bundle data) {
+ if (DEBUG) {
+ Log.d(TAG, client + ": Route control request succeeded"
+ + ", controllerId=" + controllerId
+ + ", intent=" + intent
+ + ", data=" + data);
+ }
+ if (findClient(messenger) >= 0) {
+ sendReply(messenger, SERVICE_MSG_CONTROL_REQUEST_SUCCEEDED,
+ requestId, 0, data, null);
+ }
+ }
+
+ @Override
+ public void onError(String error, Bundle data) {
+ if (DEBUG) {
+ Log.d(TAG, client + ": Route control request failed"
+ + ", controllerId=" + controllerId
+ + ", intent=" + intent
+ + ", error=" + error + ", data=" + data);
+ }
+ if (findClient(messenger) >= 0) {
+ if (error != null) {
+ Bundle bundle = new Bundle();
+ bundle.putString(SERVICE_DATA_ERROR, error);
+ sendReply(messenger, SERVICE_MSG_CONTROL_REQUEST_FAILED,
+ requestId, 0, data, bundle);
+ } else {
+ sendReply(messenger, SERVICE_MSG_CONTROL_REQUEST_FAILED,
+ requestId, 0, data, null);
+ }
+ }
+ }
+ };
+ }
+ if (controller.onControlRequest(intent, callback)) {
+ if (DEBUG) {
+ Log.d(TAG, client + ": Route control request delivered"
+ + ", controllerId=" + controllerId + ", intent=" + intent);
+ }
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ boolean onSetDiscoveryRequest(Messenger messenger, int requestId,
+ MediaRouteDiscoveryRequest request) {
+ ClientRecord client = getClient(messenger);
+ if (client != null) {
+ boolean actuallyChanged = client.setDiscoveryRequest(request);
+ if (DEBUG) {
+ Log.d(TAG, client + ": Set discovery request, request=" + request
+ + ", actuallyChanged=" + actuallyChanged
+ + ", compositeDiscoveryRequest=" + mCompositeDiscoveryRequest);
+ }
+ sendGenericSuccess(messenger, requestId);
+ return true;
+ }
+ return false;
+ }
+
+ void sendDescriptorChanged(MediaRouteProviderDescriptor descriptor) {
+ final int count = mClients.size();
+ for (int i = 0; i < count; i++) {
+ ClientRecord client = mClients.get(i);
+ sendReply(client.mMessenger, SERVICE_MSG_DESCRIPTOR_CHANGED, 0, 0,
+ createDescriptorBundleForClientVersion(descriptor, client.mVersion), null);
+ if (DEBUG) {
+ Log.d(TAG, client + ": Sent descriptor change event, descriptor=" + descriptor);
+ }
+ }
+ }
+
+ @VisibleForTesting
+ static Bundle createDescriptorBundleForClientVersion(MediaRouteProviderDescriptor descriptor,
+ int clientVersion) {
+ if (descriptor == null) {
+ return null;
+ }
+ MediaRouteProviderDescriptor.Builder builder =
+ new MediaRouteProviderDescriptor.Builder(descriptor);
+ builder.setRoutes(null);
+ for (MediaRouteDescriptor route : descriptor.getRoutes()) {
+ if (clientVersion >= route.getMinClientVersion()
+ && clientVersion <= route.getMaxClientVersion()) {
+ builder.addRoute(route);
+ }
+ }
+ return builder.build().asBundle();
+ }
+
+ boolean updateCompositeDiscoveryRequest() {
+ MediaRouteDiscoveryRequest composite = null;
+ MediaRouteSelector.Builder selectorBuilder = null;
+ boolean activeScan = false;
+ final int count = mClients.size();
+ for (int i = 0; i < count; i++) {
+ MediaRouteDiscoveryRequest request = mClients.get(i).mDiscoveryRequest;
+ if (request != null
+ && (!request.getSelector().isEmpty() || request.isActiveScan())) {
+ activeScan |= request.isActiveScan();
+ if (composite == null) {
+ composite = request;
+ } else {
+ if (selectorBuilder == null) {
+ selectorBuilder = new MediaRouteSelector.Builder(composite.getSelector());
+ }
+ selectorBuilder.addSelector(request.getSelector());
+ }
+ }
+ }
+ if (selectorBuilder != null) {
+ composite = new MediaRouteDiscoveryRequest(selectorBuilder.build(), activeScan);
+ }
+ if (!ObjectsCompat.equals(mCompositeDiscoveryRequest, composite)) {
+ mCompositeDiscoveryRequest = composite;
+ mProvider.setDiscoveryRequest(composite);
+ return true;
+ }
+ return false;
+ }
+
+ private ClientRecord getClient(Messenger messenger) {
+ int index = findClient(messenger);
+ return index >= 0 ? mClients.get(index) : null;
+ }
+
+ int findClient(Messenger messenger) {
+ final int count = mClients.size();
+ for (int i = 0; i < count; i++) {
+ ClientRecord client = mClients.get(i);
+ if (client.hasMessenger(messenger)) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ static void sendGenericFailure(Messenger messenger, int requestId) {
+ if (requestId != 0) {
+ sendReply(messenger, SERVICE_MSG_GENERIC_FAILURE, requestId, 0, null, null);
+ }
+ }
+
+ private static void sendGenericSuccess(Messenger messenger, int requestId) {
+ if (requestId != 0) {
+ sendReply(messenger, SERVICE_MSG_GENERIC_SUCCESS, requestId, 0, null, null);
+ }
+ }
+
+ static void sendReply(Messenger messenger, int what,
+ int requestId, int arg, Object obj, Bundle data) {
+ Message msg = Message.obtain();
+ msg.what = what;
+ msg.arg1 = requestId;
+ msg.arg2 = arg;
+ msg.obj = obj;
+ msg.setData(data);
+ try {
+ messenger.send(msg);
+ } catch (DeadObjectException ex) {
+ // The client died.
+ } catch (RemoteException ex) {
+ Log.e(TAG, "Could not send message to " + getClientId(messenger), ex);
+ }
+ }
+
+ static String getClientId(Messenger messenger) {
+ return "Client connection " + messenger.getBinder().toString();
+ }
+
+ private final class PrivateHandler extends Handler {
+ PrivateHandler() {
+ }
+
+ @Override
+ public void handleMessage(Message msg) {
+ switch (msg.what) {
+ case PRIVATE_MSG_CLIENT_DIED:
+ onBinderDied((Messenger)msg.obj);
+ break;
+ }
+ }
+ }
+
+ private final class ProviderCallback extends MediaRouteProvider.Callback {
+ ProviderCallback() {
+ }
+
+ @Override
+ public void onDescriptorChanged(MediaRouteProvider provider,
+ MediaRouteProviderDescriptor descriptor) {
+ sendDescriptorChanged(descriptor);
+ }
+ }
+
+ private final class ClientRecord implements DeathRecipient {
+ public final Messenger mMessenger;
+ public final int mVersion;
+ public MediaRouteDiscoveryRequest mDiscoveryRequest;
+
+ private final SparseArray<MediaRouteProvider.RouteController> mControllers =
+ new SparseArray<MediaRouteProvider.RouteController>();
+
+ public ClientRecord(Messenger messenger, int version) {
+ mMessenger = messenger;
+ mVersion = version;
+ }
+
+ public boolean register() {
+ try {
+ mMessenger.getBinder().linkToDeath(this, 0);
+ return true;
+ } catch (RemoteException ex) {
+ binderDied();
+ }
+ return false;
+ }
+
+ public void dispose() {
+ int count = mControllers.size();
+ for (int i = 0; i < count; i++) {
+ mControllers.valueAt(i).onRelease();
+ }
+ mControllers.clear();
+
+ mMessenger.getBinder().unlinkToDeath(this, 0);
+
+ setDiscoveryRequest(null);
+ }
+
+ public boolean hasMessenger(Messenger other) {
+ return mMessenger.getBinder() == other.getBinder();
+ }
+
+ public boolean createRouteController(String routeId, String routeGroupId,
+ int controllerId) {
+ if (mControllers.indexOfKey(controllerId) < 0) {
+ MediaRouteProvider.RouteController controller = routeGroupId == null
+ ? mProvider.onCreateRouteController(routeId)
+ : mProvider.onCreateRouteController(routeId, routeGroupId);
+ if (controller != null) {
+ mControllers.put(controllerId, controller);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public boolean releaseRouteController(int controllerId) {
+ MediaRouteProvider.RouteController controller = mControllers.get(controllerId);
+ if (controller != null) {
+ mControllers.remove(controllerId);
+ controller.onRelease();
+ return true;
+ }
+ return false;
+ }
+
+ public MediaRouteProvider.RouteController getRouteController(int controllerId) {
+ return mControllers.get(controllerId);
+ }
+
+ public boolean setDiscoveryRequest(MediaRouteDiscoveryRequest request) {
+ if (!ObjectsCompat.equals(mDiscoveryRequest, request)) {
+ mDiscoveryRequest = request;
+ return updateCompositeDiscoveryRequest();
+ }
+ return false;
+ }
+
+ // Runs on a binder thread.
+ @Override
+ public void binderDied() {
+ mPrivateHandler.obtainMessage(PRIVATE_MSG_CLIENT_DIED, mMessenger).sendToTarget();
+ }
+
+ @Override
+ public String toString() {
+ return getClientId(mMessenger);
+ }
+ }
+
+ /**
+ * Handler that receives messages from clients.
+ * <p>
+ * This inner class is static and only retains a weak reference to the service
+ * to prevent the service from being leaked in case one of the clients is holding an
+ * active reference to the server's messenger.
+ * </p><p>
+ * This handler should not be used to handle any messages other than those
+ * that come from the client.
+ * </p>
+ */
+ private static final class ReceiveHandler extends Handler {
+ private final WeakReference<MediaRouteProviderService> mServiceRef;
+
+ public ReceiveHandler(MediaRouteProviderService service) {
+ mServiceRef = new WeakReference<MediaRouteProviderService>(service);
+ }
+
+ @Override
+ public void handleMessage(Message msg) {
+ final Messenger messenger = msg.replyTo;
+ if (isValidRemoteMessenger(messenger)) {
+ final int what = msg.what;
+ final int requestId = msg.arg1;
+ final int arg = msg.arg2;
+ final Object obj = msg.obj;
+ final Bundle data = msg.peekData();
+ if (!processMessage(what, messenger, requestId, arg, obj, data)) {
+ if (DEBUG) {
+ Log.d(TAG, getClientId(messenger) + ": Message failed, what=" + what
+ + ", requestId=" + requestId + ", arg=" + arg
+ + ", obj=" + obj + ", data=" + data);
+ }
+ sendGenericFailure(messenger, requestId);
+ }
+ } else {
+ if (DEBUG) {
+ Log.d(TAG, "Ignoring message without valid reply messenger.");
+ }
+ }
+ }
+
+ private boolean processMessage(int what,
+ Messenger messenger, int requestId, int arg, Object obj, Bundle data) {
+ MediaRouteProviderService service = mServiceRef.get();
+ if (service != null) {
+ switch (what) {
+ case CLIENT_MSG_REGISTER:
+ return service.onRegisterClient(messenger, requestId, arg);
+
+ case CLIENT_MSG_UNREGISTER:
+ return service.onUnregisterClient(messenger, requestId);
+
+ case CLIENT_MSG_CREATE_ROUTE_CONTROLLER: {
+ String routeId = data.getString(CLIENT_DATA_ROUTE_ID);
+ String routeGroupId = data.getString(CLIENT_DATA_ROUTE_LIBRARY_GROUP);
+ if (routeId != null) {
+ return service.onCreateRouteController(
+ messenger, requestId, arg, routeId, routeGroupId);
+ }
+ break;
+ }
+
+ case CLIENT_MSG_RELEASE_ROUTE_CONTROLLER:
+ return service.onReleaseRouteController(messenger, requestId, arg);
+
+ case CLIENT_MSG_SELECT_ROUTE:
+ return service.onSelectRoute(messenger, requestId, arg);
+
+ case CLIENT_MSG_UNSELECT_ROUTE:
+ int reason = data == null ?
+ MediaRouter.UNSELECT_REASON_UNKNOWN
+ : data.getInt(CLIENT_DATA_UNSELECT_REASON,
+ MediaRouter.UNSELECT_REASON_UNKNOWN);
+ return service.onUnselectRoute(messenger, requestId, arg, reason);
+
+ case CLIENT_MSG_SET_ROUTE_VOLUME: {
+ int volume = data.getInt(CLIENT_DATA_VOLUME, -1);
+ if (volume >= 0) {
+ return service.onSetRouteVolume(
+ messenger, requestId, arg, volume);
+ }
+ break;
+ }
+
+ case CLIENT_MSG_UPDATE_ROUTE_VOLUME: {
+ int delta = data.getInt(CLIENT_DATA_VOLUME, 0);
+ if (delta != 0) {
+ return service.onUpdateRouteVolume(
+ messenger, requestId, arg, delta);
+ }
+ break;
+ }
+
+ case CLIENT_MSG_ROUTE_CONTROL_REQUEST:
+ if (obj instanceof Intent) {
+ return service.onRouteControlRequest(
+ messenger, requestId, arg, (Intent)obj);
+ }
+ break;
+
+ case CLIENT_MSG_SET_DISCOVERY_REQUEST: {
+ if (obj == null || obj instanceof Bundle) {
+ MediaRouteDiscoveryRequest request =
+ MediaRouteDiscoveryRequest.fromBundle((Bundle)obj);
+ return service.onSetDiscoveryRequest(
+ messenger, requestId,
+ request != null && request.isValid() ? request : null);
+ }
+ }
+ }
+ }
+ return false;
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteSelector.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteSelector.java
new file mode 100644
index 0000000..5669b19
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouteSelector.java
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.support.mediarouter.media;
+
+import android.content.IntentFilter;
+import android.os.Bundle;
+import android.support.annotation.NonNull;
+import android.support.annotation.Nullable;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Describes the capabilities of routes that applications would like to discover and use.
+ * <p>
+ * This object is immutable once created using a {@link Builder} instance.
+ * </p>
+ *
+ * <h3>Example</h3>
+ * <pre>
+ * MediaRouteSelector selectorBuilder = new MediaRouteSelector.Builder()
+ * .addControlCategory(MediaControlIntent.CATEGORY_LIVE_VIDEO)
+ * .addControlCategory(MediaControlIntent.CATEGORY_REMOTE_PLAYBACK)
+ * .build();
+ *
+ * MediaRouter router = MediaRouter.getInstance(context);
+ * router.addCallback(selector, callback, MediaRouter.CALLBACK_FLAG_REQUEST_DISCOVERY);
+ * </pre>
+ */
+public final class MediaRouteSelector {
+ static final String KEY_CONTROL_CATEGORIES = "controlCategories";
+
+ private final Bundle mBundle;
+ List<String> mControlCategories;
+
+ /**
+ * An empty media route selector that will not match any routes.
+ */
+ public static final MediaRouteSelector EMPTY = new MediaRouteSelector(new Bundle(), null);
+
+ MediaRouteSelector(Bundle bundle, List<String> controlCategories) {
+ mBundle = bundle;
+ mControlCategories = controlCategories;
+ }
+
+ /**
+ * Gets the list of {@link MediaControlIntent media control categories} in the selector.
+ *
+ * @return The list of categories.
+ */
+ public List<String> getControlCategories() {
+ ensureControlCategories();
+ return mControlCategories;
+ }
+
+ void ensureControlCategories() {
+ if (mControlCategories == null) {
+ mControlCategories = mBundle.getStringArrayList(KEY_CONTROL_CATEGORIES);
+ if (mControlCategories == null || mControlCategories.isEmpty()) {
+ mControlCategories = Collections.<String>emptyList();
+ }
+ }
+ }
+
+ /**
+ * Returns true if the selector contains the specified category.
+ *
+ * @param category The category to check.
+ * @return True if the category is present.
+ */
+ public boolean hasControlCategory(String category) {
+ if (category != null) {
+ ensureControlCategories();
+ final int categoryCount = mControlCategories.size();
+ for (int i = 0; i < categoryCount; i++) {
+ if (mControlCategories.get(i).equals(category)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Returns true if the selector matches at least one of the specified control filters.
+ *
+ * @param filters The list of control filters to consider.
+ * @return True if a match is found.
+ */
+ public boolean matchesControlFilters(List<IntentFilter> filters) {
+ if (filters != null) {
+ ensureControlCategories();
+ final int categoryCount = mControlCategories.size();
+ if (categoryCount != 0) {
+ final int filterCount = filters.size();
+ for (int i = 0; i < filterCount; i++) {
+ final IntentFilter filter = filters.get(i);
+ if (filter != null) {
+ for (int j = 0; j < categoryCount; j++) {
+ if (filter.hasCategory(mControlCategories.get(j))) {
+ return true;
+ }
+ }
+ }
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Returns true if this selector contains all of the capabilities described
+ * by the specified selector.
+ *
+ * @param selector The selector to be examined.
+ * @return True if this selector contains all of the capabilities described
+ * by the specified selector.
+ */
+ public boolean contains(MediaRouteSelector selector) {
+ if (selector != null) {
+ ensureControlCategories();
+ selector.ensureControlCategories();
+ return mControlCategories.containsAll(selector.mControlCategories);
+ }
+ return false;
+ }
+
+ /**
+ * Returns true if the selector does not specify any capabilities.
+ */
+ public boolean isEmpty() {
+ ensureControlCategories();
+ return mControlCategories.isEmpty();
+ }
+
+ /**
+ * Returns true if the selector has all of the required fields.
+ */
+ public boolean isValid() {
+ ensureControlCategories();
+ if (mControlCategories.contains(null)) {
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o instanceof MediaRouteSelector) {
+ MediaRouteSelector other = (MediaRouteSelector)o;
+ ensureControlCategories();
+ other.ensureControlCategories();
+ return mControlCategories.equals(other.mControlCategories);
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ ensureControlCategories();
+ return mControlCategories.hashCode();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder result = new StringBuilder();
+ result.append("MediaRouteSelector{ ");
+ result.append("controlCategories=").append(
+ Arrays.toString(getControlCategories().toArray()));
+ result.append(" }");
+ return result.toString();
+ }
+
+ /**
+ * Converts this object to a bundle for serialization.
+ *
+ * @return The contents of the object represented as a bundle.
+ */
+ public Bundle asBundle() {
+ return mBundle;
+ }
+
+ /**
+ * Creates an instance from a bundle.
+ *
+ * @param bundle The bundle, or null if none.
+ * @return The new instance, or null if the bundle was null.
+ */
+ public static MediaRouteSelector fromBundle(@Nullable Bundle bundle) {
+ return bundle != null ? new MediaRouteSelector(bundle, null) : null;
+ }
+
+ /**
+ * Builder for {@link MediaRouteSelector media route selectors}.
+ */
+ public static final class Builder {
+ private ArrayList<String> mControlCategories;
+
+ /**
+ * Creates an empty media route selector builder.
+ */
+ public Builder() {
+ }
+
+ /**
+ * Creates a media route selector descriptor builder whose initial contents are
+ * copied from an existing selector.
+ */
+ public Builder(@NonNull MediaRouteSelector selector) {
+ if (selector == null) {
+ throw new IllegalArgumentException("selector must not be null");
+ }
+
+ selector.ensureControlCategories();
+ if (!selector.mControlCategories.isEmpty()) {
+ mControlCategories = new ArrayList<String>(selector.mControlCategories);
+ }
+ }
+
+ /**
+ * Adds a {@link MediaControlIntent media control category} to the builder.
+ *
+ * @param category The category to add to the set of desired capabilities, such as
+ * {@link MediaControlIntent#CATEGORY_LIVE_AUDIO}.
+ * @return The builder instance for chaining.
+ */
+ @NonNull
+ public Builder addControlCategory(@NonNull String category) {
+ if (category == null) {
+ throw new IllegalArgumentException("category must not be null");
+ }
+
+ if (mControlCategories == null) {
+ mControlCategories = new ArrayList<String>();
+ }
+ if (!mControlCategories.contains(category)) {
+ mControlCategories.add(category);
+ }
+ return this;
+ }
+
+ /**
+ * Adds a list of {@link MediaControlIntent media control categories} to the builder.
+ *
+ * @param categories The list categories to add to the set of desired capabilities,
+ * such as {@link MediaControlIntent#CATEGORY_LIVE_AUDIO}.
+ * @return The builder instance for chaining.
+ */
+ @NonNull
+ public Builder addControlCategories(@NonNull Collection<String> categories) {
+ if (categories == null) {
+ throw new IllegalArgumentException("categories must not be null");
+ }
+
+ if (!categories.isEmpty()) {
+ for (String category : categories) {
+ addControlCategory(category);
+ }
+ }
+ return this;
+ }
+
+ /**
+ * Adds the contents of an existing media route selector to the builder.
+ *
+ * @param selector The media route selector whose contents are to be added.
+ * @return The builder instance for chaining.
+ */
+ @NonNull
+ public Builder addSelector(@NonNull MediaRouteSelector selector) {
+ if (selector == null) {
+ throw new IllegalArgumentException("selector must not be null");
+ }
+
+ addControlCategories(selector.getControlCategories());
+ return this;
+ }
+
+ /**
+ * Builds the {@link MediaRouteSelector media route selector}.
+ */
+ @NonNull
+ public MediaRouteSelector build() {
+ if (mControlCategories == null) {
+ return EMPTY;
+ }
+ Bundle bundle = new Bundle();
+ bundle.putStringArrayList(KEY_CONTROL_CATEGORIES, mControlCategories);
+ return new MediaRouteSelector(bundle, mControlCategories);
+ }
+ }
+}
\ No newline at end of file
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouter.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouter.java
new file mode 100644
index 0000000..db0052e
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaRouter.java
@@ -0,0 +1,2999 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.media;
+
+import android.annotation.IntDef;
+import android.annotation.NonNull;
+import android.annotation.Nullable;
+import android.app.ActivityManager;
+import android.content.ComponentName;
+import android.content.ContentResolver;
+import android.content.Context;
+import android.content.Intent;
+import android.content.IntentFilter;
+import android.content.IntentSender;
+import android.content.pm.PackageManager.NameNotFoundException;
+import android.content.res.Resources;
+import android.net.Uri;
+import android.os.Bundle;
+import android.os.Handler;
+import android.os.Looper;
+import android.os.Message;
+import android.support.v4.app.ActivityManagerCompat;
+import android.support.v4.hardware.display.DisplayManagerCompat;
+import android.support.v4.media.VolumeProviderCompat;
+import android.support.v4.media.session.MediaSessionCompat;
+import android.support.v4.util.Pair;
+import android.text.TextUtils;
+import android.util.Log;
+import android.view.Display;
+
+import com.android.support.mediarouter.media.MediaRouteProvider.ProviderMetadata;
+import com.android.support.mediarouter.media.MediaRouteProvider.RouteController;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * MediaRouter allows applications to control the routing of media channels
+ * and streams from the current device to external speakers and destination devices.
+ * <p>
+ * A MediaRouter instance is retrieved through {@link #getInstance}. Applications
+ * can query the media router about the currently selected route and its capabilities
+ * to determine how to send content to the route's destination. Applications can
+ * also {@link RouteInfo#sendControlRequest send control requests} to the route
+ * to ask the route's destination to perform certain remote control functions
+ * such as playing media.
+ * </p><p>
+ * See also {@link MediaRouteProvider} for information on how an application
+ * can publish new media routes to the media router.
+ * </p><p>
+ * The media router API is not thread-safe; all interactions with it must be
+ * done from the main thread of the process.
+ * </p>
+ */
+public final class MediaRouter {
+ static final String TAG = "MediaRouter";
+ static final boolean DEBUG = Log.isLoggable(TAG, Log.DEBUG);
+
+ /**
+ * Passed to {@link android.support.v7.media.MediaRouteProvider.RouteController#onUnselect(int)}
+ * and {@link Callback#onRouteUnselected(MediaRouter, RouteInfo, int)} when the reason the route
+ * was unselected is unknown.
+ */
+ public static final int UNSELECT_REASON_UNKNOWN = 0;
+ /**
+ * Passed to {@link android.support.v7.media.MediaRouteProvider.RouteController#onUnselect(int)}
+ * and {@link Callback#onRouteUnselected(MediaRouter, RouteInfo, int)} when the user pressed
+ * the disconnect button to disconnect and keep playing.
+ * <p>
+ *
+ * @see MediaRouteDescriptor#canDisconnectAndKeepPlaying()
+ */
+ public static final int UNSELECT_REASON_DISCONNECTED = 1;
+ /**
+ * Passed to {@link android.support.v7.media.MediaRouteProvider.RouteController#onUnselect(int)}
+ * and {@link Callback#onRouteUnselected(MediaRouter, RouteInfo, int)} when the user pressed
+ * the stop casting button.
+ */
+ public static final int UNSELECT_REASON_STOPPED = 2;
+ /**
+ * Passed to {@link android.support.v7.media.MediaRouteProvider.RouteController#onUnselect(int)}
+ * and {@link Callback#onRouteUnselected(MediaRouter, RouteInfo, int)} when the user selected
+ * a different route.
+ */
+ public static final int UNSELECT_REASON_ROUTE_CHANGED = 3;
+
+ // Maintains global media router state for the process.
+ // This field is initialized in MediaRouter.getInstance() before any
+ // MediaRouter objects are instantiated so it is guaranteed to be
+ // valid whenever any instance method is invoked.
+ static GlobalMediaRouter sGlobal;
+
+ // Context-bound state of the media router.
+ final Context mContext;
+ final ArrayList<CallbackRecord> mCallbackRecords = new ArrayList<CallbackRecord>();
+
+ @IntDef(flag = true,
+ value = {
+ CALLBACK_FLAG_PERFORM_ACTIVE_SCAN,
+ CALLBACK_FLAG_REQUEST_DISCOVERY,
+ CALLBACK_FLAG_UNFILTERED_EVENTS
+ }
+ )
+ @Retention(RetentionPolicy.SOURCE)
+ private @interface CallbackFlags {}
+
+ /**
+ * Flag for {@link #addCallback}: Actively scan for routes while this callback
+ * is registered.
+ * <p>
+ * When this flag is specified, the media router will actively scan for new
+ * routes. Certain routes, such as wifi display routes, may not be discoverable
+ * except when actively scanning. This flag is typically used when the route picker
+ * dialog has been opened by the user to ensure that the route information is
+ * up to date.
+ * </p><p>
+ * Active scanning may consume a significant amount of power and may have intrusive
+ * effects on wireless connectivity. Therefore it is important that active scanning
+ * only be requested when it is actually needed to satisfy a user request to
+ * discover and select a new route.
+ * </p><p>
+ * This flag implies {@link #CALLBACK_FLAG_REQUEST_DISCOVERY} but performing
+ * active scans is much more expensive than a normal discovery request.
+ * </p>
+ *
+ * @see #CALLBACK_FLAG_REQUEST_DISCOVERY
+ */
+ public static final int CALLBACK_FLAG_PERFORM_ACTIVE_SCAN = 1 << 0;
+
+ /**
+ * Flag for {@link #addCallback}: Do not filter route events.
+ * <p>
+ * When this flag is specified, the callback will be invoked for events that affect any
+ * route even if they do not match the callback's filter.
+ * </p>
+ */
+ public static final int CALLBACK_FLAG_UNFILTERED_EVENTS = 1 << 1;
+
+ /**
+ * Flag for {@link #addCallback}: Request passive route discovery while this
+ * callback is registered, except on {@link ActivityManager#isLowRamDevice low-RAM devices}.
+ * <p>
+ * When this flag is specified, the media router will try to discover routes.
+ * Although route discovery is intended to be efficient, checking for new routes may
+ * result in some network activity and could slowly drain the battery. Therefore
+ * applications should only specify {@link #CALLBACK_FLAG_REQUEST_DISCOVERY} when
+ * they are running in the foreground and would like to provide the user with the
+ * option of connecting to new routes.
+ * </p><p>
+ * Applications should typically add a callback using this flag in the
+ * {@link android.app.Activity activity's} {@link android.app.Activity#onStart onStart}
+ * method and remove it in the {@link android.app.Activity#onStop onStop} method.
+ * The {@link android.support.v7.app.MediaRouteDiscoveryFragment} fragment may
+ * also be used for this purpose.
+ * </p><p class="note">
+ * On {@link ActivityManager#isLowRamDevice low-RAM devices} this flag
+ * will be ignored. Refer to
+ * {@link #addCallback(MediaRouteSelector, Callback, int) addCallback} for details.
+ * </p>
+ *
+ * @see android.support.v7.app.MediaRouteDiscoveryFragment
+ */
+ public static final int CALLBACK_FLAG_REQUEST_DISCOVERY = 1 << 2;
+
+ /**
+ * Flag for {@link #addCallback}: Request passive route discovery while this
+ * callback is registered, even on {@link ActivityManager#isLowRamDevice low-RAM devices}.
+ * <p class="note">
+ * This flag has a significant performance impact on low-RAM devices
+ * since it may cause many media route providers to be started simultaneously.
+ * It is much better to use {@link #CALLBACK_FLAG_REQUEST_DISCOVERY} instead to avoid
+ * performing passive discovery on these devices altogether. Refer to
+ * {@link #addCallback(MediaRouteSelector, Callback, int) addCallback} for details.
+ * </p>
+ *
+ * @see android.support.v7.app.MediaRouteDiscoveryFragment
+ */
+ public static final int CALLBACK_FLAG_FORCE_DISCOVERY = 1 << 3;
+
+ /**
+ * Flag for {@link #isRouteAvailable}: Ignore the default route.
+ * <p>
+ * This flag is used to determine whether a matching non-default route is available.
+ * This constraint may be used to decide whether to offer the route chooser dialog
+ * to the user. There is no point offering the chooser if there are no
+ * non-default choices.
+ * </p>
+ */
+ public static final int AVAILABILITY_FLAG_IGNORE_DEFAULT_ROUTE = 1 << 0;
+
+ /**
+ * Flag for {@link #isRouteAvailable}: Require an actual route to be matched.
+ * <p>
+ * If this flag is not set, then {@link #isRouteAvailable} will return true
+ * if it is possible to discover a matching route even if discovery is not in
+ * progress or if no matching route has yet been found. This feature is used to
+ * save resources by removing the need to perform passive route discovery on
+ * {@link ActivityManager#isLowRamDevice low-RAM devices}.
+ * </p><p>
+ * If this flag is set, then {@link #isRouteAvailable} will only return true if
+ * a matching route has actually been discovered.
+ * </p>
+ */
+ public static final int AVAILABILITY_FLAG_REQUIRE_MATCH = 1 << 1;
+
+ private MediaRouter(Context context) {
+ mContext = context;
+ }
+
+ /**
+ * Gets an instance of the media router service associated with the context.
+ * <p>
+ * The application is responsible for holding a strong reference to the returned
+ * {@link MediaRouter} instance, such as by storing the instance in a field of
+ * the {@link android.app.Activity}, to ensure that the media router remains alive
+ * as long as the application is using its features.
+ * </p><p>
+ * In other words, the support library only holds a {@link WeakReference weak reference}
+ * to each media router instance. When there are no remaining strong references to the
+ * media router instance, all of its callbacks will be removed and route discovery
+ * will no longer be performed on its behalf.
+ * </p>
+ *
+ * @return The media router instance for the context. The application must hold
+ * a strong reference to this object as long as it is in use.
+ */
+ public static MediaRouter getInstance(@NonNull Context context) {
+ if (context == null) {
+ throw new IllegalArgumentException("context must not be null");
+ }
+ checkCallingThread();
+
+ if (sGlobal == null) {
+ sGlobal = new GlobalMediaRouter(context.getApplicationContext());
+ sGlobal.start();
+ }
+ return sGlobal.getRouter(context);
+ }
+
+ /**
+ * Gets information about the {@link MediaRouter.RouteInfo routes} currently known to
+ * this media router.
+ */
+ public List<RouteInfo> getRoutes() {
+ checkCallingThread();
+ return sGlobal.getRoutes();
+ }
+
+ /**
+ * Gets information about the {@link MediaRouter.ProviderInfo route providers}
+ * currently known to this media router.
+ */
+ public List<ProviderInfo> getProviders() {
+ checkCallingThread();
+ return sGlobal.getProviders();
+ }
+
+ /**
+ * Gets the default route for playing media content on the system.
+ * <p>
+ * The system always provides a default route.
+ * </p>
+ *
+ * @return The default route, which is guaranteed to never be null.
+ */
+ @NonNull
+ public RouteInfo getDefaultRoute() {
+ checkCallingThread();
+ return sGlobal.getDefaultRoute();
+ }
+
+ /**
+ * Gets a bluetooth route for playing media content on the system.
+ *
+ * @return A bluetooth route, if exist, otherwise null.
+ */
+ public RouteInfo getBluetoothRoute() {
+ checkCallingThread();
+ return sGlobal.getBluetoothRoute();
+ }
+
+ /**
+ * Gets the currently selected route.
+ * <p>
+ * The application should examine the route's
+ * {@link RouteInfo#getControlFilters media control intent filters} to assess the
+ * capabilities of the route before attempting to use it.
+ * </p>
+ *
+ * <h3>Example</h3>
+ * <pre>
+ * public boolean playMovie() {
+ * MediaRouter mediaRouter = MediaRouter.getInstance(context);
+ * MediaRouter.RouteInfo route = mediaRouter.getSelectedRoute();
+ *
+ * // First try using the remote playback interface, if supported.
+ * if (route.supportsControlCategory(MediaControlIntent.CATEGORY_REMOTE_PLAYBACK)) {
+ * // The route supports remote playback.
+ * // Try to send it the Uri of the movie to play.
+ * Intent intent = new Intent(MediaControlIntent.ACTION_PLAY);
+ * intent.addCategory(MediaControlIntent.CATEGORY_REMOTE_PLAYBACK);
+ * intent.setDataAndType("http://example.com/videos/movie.mp4", "video/mp4");
+ * if (route.supportsControlRequest(intent)) {
+ * route.sendControlRequest(intent, null);
+ * return true; // sent the request to play the movie
+ * }
+ * }
+ *
+ * // If remote playback was not possible, then play locally.
+ * if (route.supportsControlCategory(MediaControlIntent.CATEGORY_LIVE_VIDEO)) {
+ * // The route supports live video streaming.
+ * // Prepare to play content locally in a window or in a presentation.
+ * return playMovieInWindow();
+ * }
+ *
+ * // Neither interface is supported, so we can't play the movie to this route.
+ * return false;
+ * }
+ * </pre>
+ *
+ * @return The selected route, which is guaranteed to never be null.
+ *
+ * @see RouteInfo#getControlFilters
+ * @see RouteInfo#supportsControlCategory
+ * @see RouteInfo#supportsControlRequest
+ */
+ @NonNull
+ public RouteInfo getSelectedRoute() {
+ checkCallingThread();
+ return sGlobal.getSelectedRoute();
+ }
+
+ /**
+ * Returns the selected route if it matches the specified selector, otherwise
+ * selects the default route and returns it. If there is one live audio route
+ * (usually Bluetooth A2DP), it will be selected instead of default route.
+ *
+ * @param selector The selector to match.
+ * @return The previously selected route if it matched the selector, otherwise the
+ * newly selected default route which is guaranteed to never be null.
+ *
+ * @see MediaRouteSelector
+ * @see RouteInfo#matchesSelector
+ */
+ @NonNull
+ public RouteInfo updateSelectedRoute(@NonNull MediaRouteSelector selector) {
+ if (selector == null) {
+ throw new IllegalArgumentException("selector must not be null");
+ }
+ checkCallingThread();
+
+ if (DEBUG) {
+ Log.d(TAG, "updateSelectedRoute: " + selector);
+ }
+ RouteInfo route = sGlobal.getSelectedRoute();
+ if (!route.isDefaultOrBluetooth() && !route.matchesSelector(selector)) {
+ route = sGlobal.chooseFallbackRoute();
+ sGlobal.selectRoute(route);
+ }
+ return route;
+ }
+
+ /**
+ * Selects the specified route.
+ *
+ * @param route The route to select.
+ */
+ public void selectRoute(@NonNull RouteInfo route) {
+ if (route == null) {
+ throw new IllegalArgumentException("route must not be null");
+ }
+ checkCallingThread();
+
+ if (DEBUG) {
+ Log.d(TAG, "selectRoute: " + route);
+ }
+ sGlobal.selectRoute(route);
+ }
+
+ /**
+ * Unselects the current round and selects the default route instead.
+ * <p>
+ * The reason given must be one of:
+ * <ul>
+ * <li>{@link MediaRouter#UNSELECT_REASON_UNKNOWN}</li>
+ * <li>{@link MediaRouter#UNSELECT_REASON_DISCONNECTED}</li>
+ * <li>{@link MediaRouter#UNSELECT_REASON_STOPPED}</li>
+ * <li>{@link MediaRouter#UNSELECT_REASON_ROUTE_CHANGED}</li>
+ * </ul>
+ *
+ * @param reason The reason for disconnecting the current route.
+ */
+ public void unselect(int reason) {
+ if (reason < MediaRouter.UNSELECT_REASON_UNKNOWN ||
+ reason > MediaRouter.UNSELECT_REASON_ROUTE_CHANGED) {
+ throw new IllegalArgumentException("Unsupported reason to unselect route");
+ }
+ checkCallingThread();
+
+ // Choose the fallback route if it's not already selected.
+ // Otherwise, select the default route.
+ RouteInfo fallbackRoute = sGlobal.chooseFallbackRoute();
+ if (sGlobal.getSelectedRoute() != fallbackRoute) {
+ sGlobal.selectRoute(fallbackRoute, reason);
+ } else {
+ sGlobal.selectRoute(sGlobal.getDefaultRoute(), reason);
+ }
+ }
+
+ /**
+ * Returns true if there is a route that matches the specified selector.
+ * <p>
+ * This method returns true if there are any available routes that match the
+ * selector regardless of whether they are enabled or disabled. If the
+ * {@link #AVAILABILITY_FLAG_IGNORE_DEFAULT_ROUTE} flag is specified, then
+ * the method will only consider non-default routes.
+ * </p>
+ * <p class="note">
+ * On {@link ActivityManager#isLowRamDevice low-RAM devices} this method
+ * will return true if it is possible to discover a matching route even if
+ * discovery is not in progress or if no matching route has yet been found.
+ * Use {@link #AVAILABILITY_FLAG_REQUIRE_MATCH} to require an actual match.
+ * </p>
+ *
+ * @param selector The selector to match.
+ * @param flags Flags to control the determination of whether a route may be
+ * available. May be zero or some combination of
+ * {@link #AVAILABILITY_FLAG_IGNORE_DEFAULT_ROUTE} and
+ * {@link #AVAILABILITY_FLAG_REQUIRE_MATCH}.
+ * @return True if a matching route may be available.
+ */
+ public boolean isRouteAvailable(@NonNull MediaRouteSelector selector, int flags) {
+ if (selector == null) {
+ throw new IllegalArgumentException("selector must not be null");
+ }
+ checkCallingThread();
+
+ return sGlobal.isRouteAvailable(selector, flags);
+ }
+
+ /**
+ * Registers a callback to discover routes that match the selector and to receive
+ * events when they change.
+ * <p>
+ * This is a convenience method that has the same effect as calling
+ * {@link #addCallback(MediaRouteSelector, Callback, int)} without flags.
+ * </p>
+ *
+ * @param selector A route selector that indicates the kinds of routes that the
+ * callback would like to discover.
+ * @param callback The callback to add.
+ * @see #removeCallback
+ */
+ public void addCallback(MediaRouteSelector selector, Callback callback) {
+ addCallback(selector, callback, 0);
+ }
+
+ /**
+ * Registers a callback to discover routes that match the selector and to receive
+ * events when they change.
+ * <p>
+ * The selector describes the kinds of routes that the application wants to
+ * discover. For example, if the application wants to use
+ * live audio routes then it should include the
+ * {@link MediaControlIntent#CATEGORY_LIVE_AUDIO live audio media control intent category}
+ * in its selector when it adds a callback to the media router.
+ * The selector may include any number of categories.
+ * </p><p>
+ * If the callback has already been registered, then the selector is added to
+ * the set of selectors being monitored by the callback.
+ * </p><p>
+ * By default, the callback will only be invoked for events that affect routes
+ * that match the specified selector. Event filtering may be disabled by specifying
+ * the {@link #CALLBACK_FLAG_UNFILTERED_EVENTS} flag when the callback is registered.
+ * </p><p>
+ * Applications should use the {@link #isRouteAvailable} method to determine
+ * whether is it possible to discover a route with the desired capabilities
+ * and therefore whether the media route button should be shown to the user.
+ * </p><p>
+ * The {@link #CALLBACK_FLAG_REQUEST_DISCOVERY} flag should be used while the application
+ * is in the foreground to request that passive discovery be performed if there are
+ * sufficient resources to allow continuous passive discovery.
+ * On {@link ActivityManager#isLowRamDevice low-RAM devices} this flag will be
+ * ignored to conserve resources.
+ * </p><p>
+ * The {@link #CALLBACK_FLAG_FORCE_DISCOVERY} flag should be used when
+ * passive discovery absolutely must be performed, even on low-RAM devices.
+ * This flag has a significant performance impact on low-RAM devices
+ * since it may cause many media route providers to be started simultaneously.
+ * It is much better to use {@link #CALLBACK_FLAG_REQUEST_DISCOVERY} instead to avoid
+ * performing passive discovery on these devices altogether.
+ * </p><p>
+ * The {@link #CALLBACK_FLAG_PERFORM_ACTIVE_SCAN} flag should be used when the
+ * media route chooser dialog is showing to confirm the presence of available
+ * routes that the user may connect to. This flag may use substantially more
+ * power.
+ * </p>
+ *
+ * <h3>Example</h3>
+ * <pre>
+ * public class MyActivity extends Activity {
+ * private MediaRouter mRouter;
+ * private MediaRouter.Callback mCallback;
+ * private MediaRouteSelector mSelector;
+ *
+ * protected void onCreate(Bundle savedInstanceState) {
+ * super.onCreate(savedInstanceState);
+ *
+ * mRouter = Mediarouter.getInstance(this);
+ * mCallback = new MyCallback();
+ * mSelector = new MediaRouteSelector.Builder()
+ * .addControlCategory(MediaControlIntent.CATEGORY_LIVE_AUDIO)
+ * .addControlCategory(MediaControlIntent.CATEGORY_REMOTE_PLAYBACK)
+ * .build();
+ * }
+ *
+ * // Add the callback on start to tell the media router what kinds of routes
+ * // the application is interested in so that it can try to discover suitable ones.
+ * public void onStart() {
+ * super.onStart();
+ *
+ * mediaRouter.addCallback(mSelector, mCallback,
+ * MediaRouter.CALLBACK_FLAG_REQUEST_DISCOVERY);
+ *
+ * MediaRouter.RouteInfo route = mediaRouter.updateSelectedRoute(mSelector);
+ * // do something with the route...
+ * }
+ *
+ * // Remove the selector on stop to tell the media router that it no longer
+ * // needs to invest effort trying to discover routes of these kinds for now.
+ * public void onStop() {
+ * super.onStop();
+ *
+ * mediaRouter.removeCallback(mCallback);
+ * }
+ *
+ * private final class MyCallback extends MediaRouter.Callback {
+ * // Implement callback methods as needed.
+ * }
+ * }
+ * </pre>
+ *
+ * @param selector A route selector that indicates the kinds of routes that the
+ * callback would like to discover.
+ * @param callback The callback to add.
+ * @param flags Flags to control the behavior of the callback.
+ * May be zero or a combination of {@link #CALLBACK_FLAG_PERFORM_ACTIVE_SCAN} and
+ * {@link #CALLBACK_FLAG_UNFILTERED_EVENTS}.
+ * @see #removeCallback
+ */
+ public void addCallback(@NonNull MediaRouteSelector selector, @NonNull Callback callback,
+ @CallbackFlags int flags) {
+ if (selector == null) {
+ throw new IllegalArgumentException("selector must not be null");
+ }
+ if (callback == null) {
+ throw new IllegalArgumentException("callback must not be null");
+ }
+ checkCallingThread();
+
+ if (DEBUG) {
+ Log.d(TAG, "addCallback: selector=" + selector
+ + ", callback=" + callback + ", flags=" + Integer.toHexString(flags));
+ }
+
+ CallbackRecord record;
+ int index = findCallbackRecord(callback);
+ if (index < 0) {
+ record = new CallbackRecord(this, callback);
+ mCallbackRecords.add(record);
+ } else {
+ record = mCallbackRecords.get(index);
+ }
+ boolean updateNeeded = false;
+ if ((flags & ~record.mFlags) != 0) {
+ record.mFlags |= flags;
+ updateNeeded = true;
+ }
+ if (!record.mSelector.contains(selector)) {
+ record.mSelector = new MediaRouteSelector.Builder(record.mSelector)
+ .addSelector(selector)
+ .build();
+ updateNeeded = true;
+ }
+ if (updateNeeded) {
+ sGlobal.updateDiscoveryRequest();
+ }
+ }
+
+ /**
+ * Removes the specified callback. It will no longer receive events about
+ * changes to media routes.
+ *
+ * @param callback The callback to remove.
+ * @see #addCallback
+ */
+ public void removeCallback(@NonNull Callback callback) {
+ if (callback == null) {
+ throw new IllegalArgumentException("callback must not be null");
+ }
+ checkCallingThread();
+
+ if (DEBUG) {
+ Log.d(TAG, "removeCallback: callback=" + callback);
+ }
+
+ int index = findCallbackRecord(callback);
+ if (index >= 0) {
+ mCallbackRecords.remove(index);
+ sGlobal.updateDiscoveryRequest();
+ }
+ }
+
+ private int findCallbackRecord(Callback callback) {
+ final int count = mCallbackRecords.size();
+ for (int i = 0; i < count; i++) {
+ if (mCallbackRecords.get(i).mCallback == callback) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ /**
+ * Registers a media route provider within this application process.
+ * <p>
+ * The provider will be added to the list of providers that all {@link MediaRouter}
+ * instances within this process can use to discover routes.
+ * </p>
+ *
+ * @param providerInstance The media route provider instance to add.
+ *
+ * @see MediaRouteProvider
+ * @see #removeCallback
+ */
+ public void addProvider(@NonNull MediaRouteProvider providerInstance) {
+ if (providerInstance == null) {
+ throw new IllegalArgumentException("providerInstance must not be null");
+ }
+ checkCallingThread();
+
+ if (DEBUG) {
+ Log.d(TAG, "addProvider: " + providerInstance);
+ }
+ sGlobal.addProvider(providerInstance);
+ }
+
+ /**
+ * Unregisters a media route provider within this application process.
+ * <p>
+ * The provider will be removed from the list of providers that all {@link MediaRouter}
+ * instances within this process can use to discover routes.
+ * </p>
+ *
+ * @param providerInstance The media route provider instance to remove.
+ *
+ * @see MediaRouteProvider
+ * @see #addCallback
+ */
+ public void removeProvider(@NonNull MediaRouteProvider providerInstance) {
+ if (providerInstance == null) {
+ throw new IllegalArgumentException("providerInstance must not be null");
+ }
+ checkCallingThread();
+
+ if (DEBUG) {
+ Log.d(TAG, "removeProvider: " + providerInstance);
+ }
+ sGlobal.removeProvider(providerInstance);
+ }
+
+ /**
+ * Adds a remote control client to enable remote control of the volume
+ * of the selected route.
+ * <p>
+ * The remote control client must have previously been registered with
+ * the audio manager using the {@link android.media.AudioManager#registerRemoteControlClient
+ * AudioManager.registerRemoteControlClient} method.
+ * </p>
+ *
+ * @param remoteControlClient The {@link android.media.RemoteControlClient} to register.
+ */
+ public void addRemoteControlClient(@NonNull Object remoteControlClient) {
+ if (remoteControlClient == null) {
+ throw new IllegalArgumentException("remoteControlClient must not be null");
+ }
+ checkCallingThread();
+
+ if (DEBUG) {
+ Log.d(TAG, "addRemoteControlClient: " + remoteControlClient);
+ }
+ sGlobal.addRemoteControlClient(remoteControlClient);
+ }
+
+ /**
+ * Removes a remote control client.
+ *
+ * @param remoteControlClient The {@link android.media.RemoteControlClient}
+ * to unregister.
+ */
+ public void removeRemoteControlClient(@NonNull Object remoteControlClient) {
+ if (remoteControlClient == null) {
+ throw new IllegalArgumentException("remoteControlClient must not be null");
+ }
+
+ if (DEBUG) {
+ Log.d(TAG, "removeRemoteControlClient: " + remoteControlClient);
+ }
+ sGlobal.removeRemoteControlClient(remoteControlClient);
+ }
+
+ /**
+ * Sets the media session to enable remote control of the volume of the
+ * selected route. This should be used instead of
+ * {@link #addRemoteControlClient} when using media sessions. Set the
+ * session to null to clear it.
+ *
+ * @param mediaSession The {@link android.media.session.MediaSession} to
+ * use.
+ */
+ public void setMediaSession(Object mediaSession) {
+ if (DEBUG) {
+ Log.d(TAG, "addMediaSession: " + mediaSession);
+ }
+ sGlobal.setMediaSession(mediaSession);
+ }
+
+ /**
+ * Sets a compat media session to enable remote control of the volume of the
+ * selected route. This should be used instead of
+ * {@link #addRemoteControlClient} when using {@link MediaSessionCompat}.
+ * Set the session to null to clear it.
+ *
+ * @param mediaSession
+ */
+ public void setMediaSessionCompat(MediaSessionCompat mediaSession) {
+ if (DEBUG) {
+ Log.d(TAG, "addMediaSessionCompat: " + mediaSession);
+ }
+ sGlobal.setMediaSessionCompat(mediaSession);
+ }
+
+ public MediaSessionCompat.Token getMediaSessionToken() {
+ return sGlobal.getMediaSessionToken();
+ }
+
+ /**
+ * Ensures that calls into the media router are on the correct thread.
+ * It pays to be a little paranoid when global state invariants are at risk.
+ */
+ static void checkCallingThread() {
+ if (Looper.myLooper() != Looper.getMainLooper()) {
+ throw new IllegalStateException("The media router service must only be "
+ + "accessed on the application's main thread.");
+ }
+ }
+
+ static <T> boolean equal(T a, T b) {
+ return a == b || (a != null && b != null && a.equals(b));
+ }
+
+ /**
+ * Provides information about a media route.
+ * <p>
+ * Each media route has a list of {@link MediaControlIntent media control}
+ * {@link #getControlFilters intent filters} that describe the capabilities of the
+ * route and the manner in which it is used and controlled.
+ * </p>
+ */
+ public static class RouteInfo {
+ private final ProviderInfo mProvider;
+ private final String mDescriptorId;
+ private final String mUniqueId;
+ private String mName;
+ private String mDescription;
+ private Uri mIconUri;
+ private boolean mEnabled;
+ private boolean mConnecting;
+ private int mConnectionState;
+ private boolean mCanDisconnect;
+ private final ArrayList<IntentFilter> mControlFilters = new ArrayList<>();
+ private int mPlaybackType;
+ private int mPlaybackStream;
+ private int mDeviceType;
+ private int mVolumeHandling;
+ private int mVolume;
+ private int mVolumeMax;
+ private Display mPresentationDisplay;
+ private int mPresentationDisplayId = PRESENTATION_DISPLAY_ID_NONE;
+ private Bundle mExtras;
+ private IntentSender mSettingsIntent;
+ MediaRouteDescriptor mDescriptor;
+
+ @IntDef({CONNECTION_STATE_DISCONNECTED, CONNECTION_STATE_CONNECTING,
+ CONNECTION_STATE_CONNECTED})
+ @Retention(RetentionPolicy.SOURCE)
+ private @interface ConnectionState {}
+
+ /**
+ * The default connection state indicating the route is disconnected.
+ *
+ * @see #getConnectionState
+ */
+ public static final int CONNECTION_STATE_DISCONNECTED = 0;
+
+ /**
+ * A connection state indicating the route is in the process of connecting and is not yet
+ * ready for use.
+ *
+ * @see #getConnectionState
+ */
+ public static final int CONNECTION_STATE_CONNECTING = 1;
+
+ /**
+ * A connection state indicating the route is connected.
+ *
+ * @see #getConnectionState
+ */
+ public static final int CONNECTION_STATE_CONNECTED = 2;
+
+ @IntDef({PLAYBACK_TYPE_LOCAL,PLAYBACK_TYPE_REMOTE})
+ @Retention(RetentionPolicy.SOURCE)
+ private @interface PlaybackType {}
+
+ /**
+ * The default playback type, "local", indicating the presentation of the media
+ * is happening on the same device (e.g. a phone, a tablet) as where it is
+ * controlled from.
+ *
+ * @see #getPlaybackType
+ */
+ public static final int PLAYBACK_TYPE_LOCAL = 0;
+
+ /**
+ * A playback type indicating the presentation of the media is happening on
+ * a different device (i.e. the remote device) than where it is controlled from.
+ *
+ * @see #getPlaybackType
+ */
+ public static final int PLAYBACK_TYPE_REMOTE = 1;
+
+ @IntDef({DEVICE_TYPE_UNKNOWN, DEVICE_TYPE_TV, DEVICE_TYPE_SPEAKER, DEVICE_TYPE_BLUETOOTH})
+ @Retention(RetentionPolicy.SOURCE)
+ private @interface DeviceType {}
+
+ /**
+ * The default receiver device type of the route indicating the type is unknown.
+ *
+ * @see #getDeviceType
+ * @hide
+ */
+ // @RestrictTo(LIBRARY_GROUP)
+ public static final int DEVICE_TYPE_UNKNOWN = 0;
+
+ /**
+ * A receiver device type of the route indicating the presentation of the media is happening
+ * on a TV.
+ *
+ * @see #getDeviceType
+ */
+ public static final int DEVICE_TYPE_TV = 1;
+
+ /**
+ * A receiver device type of the route indicating the presentation of the media is happening
+ * on a speaker.
+ *
+ * @see #getDeviceType
+ */
+ public static final int DEVICE_TYPE_SPEAKER = 2;
+
+ /**
+ * A receiver device type of the route indicating the presentation of the media is happening
+ * on a bluetooth device such as a bluetooth speaker.
+ *
+ * @see #getDeviceType
+ * @hide
+ */
+ // @RestrictTo(LIBRARY_GROUP)
+ public static final int DEVICE_TYPE_BLUETOOTH = 3;
+
+ @IntDef({PLAYBACK_VOLUME_FIXED,PLAYBACK_VOLUME_VARIABLE})
+ @Retention(RetentionPolicy.SOURCE)
+ private @interface PlaybackVolume {}
+
+ /**
+ * Playback information indicating the playback volume is fixed, i.e. it cannot be
+ * controlled from this object. An example of fixed playback volume is a remote player,
+ * playing over HDMI where the user prefers to control the volume on the HDMI sink, rather
+ * than attenuate at the source.
+ *
+ * @see #getVolumeHandling
+ */
+ public static final int PLAYBACK_VOLUME_FIXED = 0;
+
+ /**
+ * Playback information indicating the playback volume is variable and can be controlled
+ * from this object.
+ *
+ * @see #getVolumeHandling
+ */
+ public static final int PLAYBACK_VOLUME_VARIABLE = 1;
+
+ /**
+ * The default presentation display id indicating no presentation display is associated
+ * with the route.
+ * @hide
+ */
+ // @RestrictTo(LIBRARY_GROUP)
+ public static final int PRESENTATION_DISPLAY_ID_NONE = -1;
+
+ static final int CHANGE_GENERAL = 1 << 0;
+ static final int CHANGE_VOLUME = 1 << 1;
+ static final int CHANGE_PRESENTATION_DISPLAY = 1 << 2;
+
+ // Should match to SystemMediaRouteProvider.PACKAGE_NAME.
+ static final String SYSTEM_MEDIA_ROUTE_PROVIDER_PACKAGE_NAME = "android";
+
+ RouteInfo(ProviderInfo provider, String descriptorId, String uniqueId) {
+ mProvider = provider;
+ mDescriptorId = descriptorId;
+ mUniqueId = uniqueId;
+ }
+
+ /**
+ * Gets information about the provider of this media route.
+ */
+ public ProviderInfo getProvider() {
+ return mProvider;
+ }
+
+ /**
+ * Gets the unique id of the route.
+ * <p>
+ * The route unique id functions as a stable identifier by which the route is known.
+ * For example, an application can use this id as a token to remember the
+ * selected route across restarts or to communicate its identity to a service.
+ * </p>
+ *
+ * @return The unique id of the route, never null.
+ */
+ @NonNull
+ public String getId() {
+ return mUniqueId;
+ }
+
+ /**
+ * Gets the user-visible name of the route.
+ * <p>
+ * The route name identifies the destination represented by the route.
+ * It may be a user-supplied name, an alias, or device serial number.
+ * </p>
+ *
+ * @return The user-visible name of a media route. This is the string presented
+ * to users who may select this as the active route.
+ */
+ public String getName() {
+ return mName;
+ }
+
+ /**
+ * Gets the user-visible description of the route.
+ * <p>
+ * The route description describes the kind of destination represented by the route.
+ * It may be a user-supplied string, a model number or brand of device.
+ * </p>
+ *
+ * @return The description of the route, or null if none.
+ */
+ @Nullable
+ public String getDescription() {
+ return mDescription;
+ }
+
+ /**
+ * Gets the URI of the icon representing this route.
+ * <p>
+ * This icon will be used in picker UIs if available.
+ * </p>
+ *
+ * @return The URI of the icon representing this route, or null if none.
+ */
+ public Uri getIconUri() {
+ return mIconUri;
+ }
+
+ /**
+ * Returns true if this route is enabled and may be selected.
+ *
+ * @return True if this route is enabled.
+ */
+ public boolean isEnabled() {
+ return mEnabled;
+ }
+
+ /**
+ * Returns true if the route is in the process of connecting and is not
+ * yet ready for use.
+ *
+ * @return True if this route is in the process of connecting.
+ */
+ public boolean isConnecting() {
+ return mConnecting;
+ }
+
+ /**
+ * Gets the connection state of the route.
+ *
+ * @return The connection state of this route: {@link #CONNECTION_STATE_DISCONNECTED},
+ * {@link #CONNECTION_STATE_CONNECTING}, or {@link #CONNECTION_STATE_CONNECTED}.
+ */
+ @ConnectionState
+ public int getConnectionState() {
+ return mConnectionState;
+ }
+
+ /**
+ * Returns true if this route is currently selected.
+ *
+ * @return True if this route is currently selected.
+ *
+ * @see MediaRouter#getSelectedRoute
+ */
+ public boolean isSelected() {
+ checkCallingThread();
+ return sGlobal.getSelectedRoute() == this;
+ }
+
+ /**
+ * Returns true if this route is the default route.
+ *
+ * @return True if this route is the default route.
+ *
+ * @see MediaRouter#getDefaultRoute
+ */
+ public boolean isDefault() {
+ checkCallingThread();
+ return sGlobal.getDefaultRoute() == this;
+ }
+
+ /**
+ * Returns true if this route is a bluetooth route.
+ *
+ * @return True if this route is a bluetooth route.
+ *
+ * @see MediaRouter#getBluetoothRoute
+ */
+ public boolean isBluetooth() {
+ checkCallingThread();
+ return sGlobal.getBluetoothRoute() == this;
+ }
+
+ /**
+ * Returns true if this route is the default route and the device speaker.
+ *
+ * @return True if this route is the default route and the device speaker.
+ */
+ public boolean isDeviceSpeaker() {
+ int defaultAudioRouteNameResourceId = Resources.getSystem().getIdentifier(
+ "default_audio_route_name", "string", "android");
+ return isDefault()
+ && Resources.getSystem().getText(defaultAudioRouteNameResourceId).equals(mName);
+ }
+
+ /**
+ * Gets a list of {@link MediaControlIntent media control intent} filters that
+ * describe the capabilities of this route and the media control actions that
+ * it supports.
+ *
+ * @return A list of intent filters that specifies the media control intents that
+ * this route supports.
+ *
+ * @see MediaControlIntent
+ * @see #supportsControlCategory
+ * @see #supportsControlRequest
+ */
+ public List<IntentFilter> getControlFilters() {
+ return mControlFilters;
+ }
+
+ /**
+ * Returns true if the route supports at least one of the capabilities
+ * described by a media route selector.
+ *
+ * @param selector The selector that specifies the capabilities to check.
+ * @return True if the route supports at least one of the capabilities
+ * described in the media route selector.
+ */
+ public boolean matchesSelector(@NonNull MediaRouteSelector selector) {
+ if (selector == null) {
+ throw new IllegalArgumentException("selector must not be null");
+ }
+ checkCallingThread();
+ return selector.matchesControlFilters(mControlFilters);
+ }
+
+ /**
+ * Returns true if the route supports the specified
+ * {@link MediaControlIntent media control} category.
+ * <p>
+ * Media control categories describe the capabilities of this route
+ * such as whether it supports live audio streaming or remote playback.
+ * </p>
+ *
+ * @param category A {@link MediaControlIntent media control} category
+ * such as {@link MediaControlIntent#CATEGORY_LIVE_AUDIO},
+ * {@link MediaControlIntent#CATEGORY_LIVE_VIDEO},
+ * {@link MediaControlIntent#CATEGORY_REMOTE_PLAYBACK}, or a provider-defined
+ * media control category.
+ * @return True if the route supports the specified intent category.
+ *
+ * @see MediaControlIntent
+ * @see #getControlFilters
+ */
+ public boolean supportsControlCategory(@NonNull String category) {
+ if (category == null) {
+ throw new IllegalArgumentException("category must not be null");
+ }
+ checkCallingThread();
+
+ int count = mControlFilters.size();
+ for (int i = 0; i < count; i++) {
+ if (mControlFilters.get(i).hasCategory(category)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Returns true if the route supports the specified
+ * {@link MediaControlIntent media control} category and action.
+ * <p>
+ * Media control actions describe specific requests that an application
+ * can ask a route to perform.
+ * </p>
+ *
+ * @param category A {@link MediaControlIntent media control} category
+ * such as {@link MediaControlIntent#CATEGORY_LIVE_AUDIO},
+ * {@link MediaControlIntent#CATEGORY_LIVE_VIDEO},
+ * {@link MediaControlIntent#CATEGORY_REMOTE_PLAYBACK}, or a provider-defined
+ * media control category.
+ * @param action A {@link MediaControlIntent media control} action
+ * such as {@link MediaControlIntent#ACTION_PLAY}.
+ * @return True if the route supports the specified intent action.
+ *
+ * @see MediaControlIntent
+ * @see #getControlFilters
+ */
+ public boolean supportsControlAction(@NonNull String category, @NonNull String action) {
+ if (category == null) {
+ throw new IllegalArgumentException("category must not be null");
+ }
+ if (action == null) {
+ throw new IllegalArgumentException("action must not be null");
+ }
+ checkCallingThread();
+
+ int count = mControlFilters.size();
+ for (int i = 0; i < count; i++) {
+ IntentFilter filter = mControlFilters.get(i);
+ if (filter.hasCategory(category) && filter.hasAction(action)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Returns true if the route supports the specified
+ * {@link MediaControlIntent media control} request.
+ * <p>
+ * Media control requests are used to request the route to perform
+ * actions such as starting remote playback of a media item.
+ * </p>
+ *
+ * @param intent A {@link MediaControlIntent media control intent}.
+ * @return True if the route can handle the specified intent.
+ *
+ * @see MediaControlIntent
+ * @see #getControlFilters
+ */
+ public boolean supportsControlRequest(@NonNull Intent intent) {
+ if (intent == null) {
+ throw new IllegalArgumentException("intent must not be null");
+ }
+ checkCallingThread();
+
+ ContentResolver contentResolver = sGlobal.getContentResolver();
+ int count = mControlFilters.size();
+ for (int i = 0; i < count; i++) {
+ if (mControlFilters.get(i).match(contentResolver, intent, true, TAG) >= 0) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Sends a {@link MediaControlIntent media control} request to be performed
+ * asynchronously by the route's destination.
+ * <p>
+ * Media control requests are used to request the route to perform
+ * actions such as starting remote playback of a media item.
+ * </p><p>
+ * This function may only be called on a selected route. Control requests
+ * sent to unselected routes will fail.
+ * </p>
+ *
+ * @param intent A {@link MediaControlIntent media control intent}.
+ * @param callback A {@link ControlRequestCallback} to invoke with the result
+ * of the request, or null if no result is required.
+ *
+ * @see MediaControlIntent
+ */
+ public void sendControlRequest(@NonNull Intent intent,
+ @Nullable ControlRequestCallback callback) {
+ if (intent == null) {
+ throw new IllegalArgumentException("intent must not be null");
+ }
+ checkCallingThread();
+
+ sGlobal.sendControlRequest(this, intent, callback);
+ }
+
+ /**
+ * Gets the type of playback associated with this route.
+ *
+ * @return The type of playback associated with this route: {@link #PLAYBACK_TYPE_LOCAL}
+ * or {@link #PLAYBACK_TYPE_REMOTE}.
+ */
+ @PlaybackType
+ public int getPlaybackType() {
+ return mPlaybackType;
+ }
+
+ /**
+ * Gets the audio stream over which the playback associated with this route is performed.
+ *
+ * @return The stream over which the playback associated with this route is performed.
+ */
+ public int getPlaybackStream() {
+ return mPlaybackStream;
+ }
+
+ /**
+ * Gets the type of the receiver device associated with this route.
+ *
+ * @return The type of the receiver device associated with this route:
+ * {@link #DEVICE_TYPE_TV} or {@link #DEVICE_TYPE_SPEAKER}.
+ */
+ public int getDeviceType() {
+ return mDeviceType;
+ }
+
+
+ /**
+ * @hide
+ */
+ // @RestrictTo(LIBRARY_GROUP)
+ public boolean isDefaultOrBluetooth() {
+ if (isDefault() || mDeviceType == DEVICE_TYPE_BLUETOOTH) {
+ return true;
+ }
+ // This is a workaround for platform version 23 or below where the system route
+ // provider doesn't specify device type for bluetooth media routes.
+ return isSystemMediaRouteProvider(this)
+ && supportsControlCategory(MediaControlIntent.CATEGORY_LIVE_AUDIO)
+ && !supportsControlCategory(MediaControlIntent.CATEGORY_LIVE_VIDEO);
+ }
+
+ /**
+ * Returns {@code true} if the route is selectable.
+ */
+ boolean isSelectable() {
+ // This tests whether the route is still valid and enabled.
+ // The route descriptor field is set to null when the route is removed.
+ return mDescriptor != null && mEnabled;
+ }
+
+ private static boolean isSystemMediaRouteProvider(MediaRouter.RouteInfo route) {
+ return TextUtils.equals(route.getProviderInstance().getMetadata().getPackageName(),
+ SYSTEM_MEDIA_ROUTE_PROVIDER_PACKAGE_NAME);
+ }
+
+ /**
+ * Gets information about how volume is handled on the route.
+ *
+ * @return How volume is handled on the route: {@link #PLAYBACK_VOLUME_FIXED}
+ * or {@link #PLAYBACK_VOLUME_VARIABLE}.
+ */
+ @PlaybackVolume
+ public int getVolumeHandling() {
+ return mVolumeHandling;
+ }
+
+ /**
+ * Gets the current volume for this route. Depending on the route, this may only
+ * be valid if the route is currently selected.
+ *
+ * @return The volume at which the playback associated with this route is performed.
+ */
+ public int getVolume() {
+ return mVolume;
+ }
+
+ /**
+ * Gets the maximum volume at which the playback associated with this route is performed.
+ *
+ * @return The maximum volume at which the playback associated with
+ * this route is performed.
+ */
+ public int getVolumeMax() {
+ return mVolumeMax;
+ }
+
+ /**
+ * Gets whether this route supports disconnecting without interrupting
+ * playback.
+ *
+ * @return True if this route can disconnect without stopping playback,
+ * false otherwise.
+ */
+ public boolean canDisconnect() {
+ return mCanDisconnect;
+ }
+
+ /**
+ * Requests a volume change for this route asynchronously.
+ * <p>
+ * This function may only be called on a selected route. It will have
+ * no effect if the route is currently unselected.
+ * </p>
+ *
+ * @param volume The new volume value between 0 and {@link #getVolumeMax}.
+ */
+ public void requestSetVolume(int volume) {
+ checkCallingThread();
+ sGlobal.requestSetVolume(this, Math.min(mVolumeMax, Math.max(0, volume)));
+ }
+
+ /**
+ * Requests an incremental volume update for this route asynchronously.
+ * <p>
+ * This function may only be called on a selected route. It will have
+ * no effect if the route is currently unselected.
+ * </p>
+ *
+ * @param delta The delta to add to the current volume.
+ */
+ public void requestUpdateVolume(int delta) {
+ checkCallingThread();
+ if (delta != 0) {
+ sGlobal.requestUpdateVolume(this, delta);
+ }
+ }
+
+ /**
+ * Gets the {@link Display} that should be used by the application to show
+ * a {@link android.app.Presentation} on an external display when this route is selected.
+ * Depending on the route, this may only be valid if the route is currently
+ * selected.
+ * <p>
+ * The preferred presentation display may change independently of the route
+ * being selected or unselected. For example, the presentation display
+ * of the default system route may change when an external HDMI display is connected
+ * or disconnected even though the route itself has not changed.
+ * </p><p>
+ * This method may return null if there is no external display associated with
+ * the route or if the display is not ready to show UI yet.
+ * </p><p>
+ * The application should listen for changes to the presentation display
+ * using the {@link Callback#onRoutePresentationDisplayChanged} callback and
+ * show or dismiss its {@link android.app.Presentation} accordingly when the display
+ * becomes available or is removed.
+ * </p><p>
+ * This method only makes sense for
+ * {@link MediaControlIntent#CATEGORY_LIVE_VIDEO live video} routes.
+ * </p>
+ *
+ * @return The preferred presentation display to use when this route is
+ * selected or null if none.
+ *
+ * @see MediaControlIntent#CATEGORY_LIVE_VIDEO
+ * @see android.app.Presentation
+ */
+ @Nullable
+ public Display getPresentationDisplay() {
+ checkCallingThread();
+ if (mPresentationDisplayId >= 0 && mPresentationDisplay == null) {
+ mPresentationDisplay = sGlobal.getDisplay(mPresentationDisplayId);
+ }
+ return mPresentationDisplay;
+ }
+
+ /**
+ * Gets the route's presentation display id, or -1 if none.
+ * @hide
+ */
+ // @RestrictTo(LIBRARY_GROUP)
+ public int getPresentationDisplayId() {
+ return mPresentationDisplayId;
+ }
+
+ /**
+ * Gets a collection of extra properties about this route that were supplied
+ * by its media route provider, or null if none.
+ */
+ @Nullable
+ public Bundle getExtras() {
+ return mExtras;
+ }
+
+ /**
+ * Gets an intent sender for launching a settings activity for this
+ * route.
+ */
+ @Nullable
+ public IntentSender getSettingsIntent() {
+ return mSettingsIntent;
+ }
+
+ /**
+ * Selects this media route.
+ */
+ public void select() {
+ checkCallingThread();
+ sGlobal.selectRoute(this);
+ }
+
+ @Override
+ public String toString() {
+ return "MediaRouter.RouteInfo{ uniqueId=" + mUniqueId
+ + ", name=" + mName
+ + ", description=" + mDescription
+ + ", iconUri=" + mIconUri
+ + ", enabled=" + mEnabled
+ + ", connecting=" + mConnecting
+ + ", connectionState=" + mConnectionState
+ + ", canDisconnect=" + mCanDisconnect
+ + ", playbackType=" + mPlaybackType
+ + ", playbackStream=" + mPlaybackStream
+ + ", deviceType=" + mDeviceType
+ + ", volumeHandling=" + mVolumeHandling
+ + ", volume=" + mVolume
+ + ", volumeMax=" + mVolumeMax
+ + ", presentationDisplayId=" + mPresentationDisplayId
+ + ", extras=" + mExtras
+ + ", settingsIntent=" + mSettingsIntent
+ + ", providerPackageName=" + mProvider.getPackageName()
+ + " }";
+ }
+
+ int maybeUpdateDescriptor(MediaRouteDescriptor descriptor) {
+ int changes = 0;
+ if (mDescriptor != descriptor) {
+ changes = updateDescriptor(descriptor);
+ }
+ return changes;
+ }
+
+ int updateDescriptor(MediaRouteDescriptor descriptor) {
+ int changes = 0;
+ mDescriptor = descriptor;
+ if (descriptor != null) {
+ if (!equal(mName, descriptor.getName())) {
+ mName = descriptor.getName();
+ changes |= CHANGE_GENERAL;
+ }
+ if (!equal(mDescription, descriptor.getDescription())) {
+ mDescription = descriptor.getDescription();
+ changes |= CHANGE_GENERAL;
+ }
+ if (!equal(mIconUri, descriptor.getIconUri())) {
+ mIconUri = descriptor.getIconUri();
+ changes |= CHANGE_GENERAL;
+ }
+ if (mEnabled != descriptor.isEnabled()) {
+ mEnabled = descriptor.isEnabled();
+ changes |= CHANGE_GENERAL;
+ }
+ if (mConnecting != descriptor.isConnecting()) {
+ mConnecting = descriptor.isConnecting();
+ changes |= CHANGE_GENERAL;
+ }
+ if (mConnectionState != descriptor.getConnectionState()) {
+ mConnectionState = descriptor.getConnectionState();
+ changes |= CHANGE_GENERAL;
+ }
+ if (!mControlFilters.equals(descriptor.getControlFilters())) {
+ mControlFilters.clear();
+ mControlFilters.addAll(descriptor.getControlFilters());
+ changes |= CHANGE_GENERAL;
+ }
+ if (mPlaybackType != descriptor.getPlaybackType()) {
+ mPlaybackType = descriptor.getPlaybackType();
+ changes |= CHANGE_GENERAL;
+ }
+ if (mPlaybackStream != descriptor.getPlaybackStream()) {
+ mPlaybackStream = descriptor.getPlaybackStream();
+ changes |= CHANGE_GENERAL;
+ }
+ if (mDeviceType != descriptor.getDeviceType()) {
+ mDeviceType = descriptor.getDeviceType();
+ changes |= CHANGE_GENERAL;
+ }
+ if (mVolumeHandling != descriptor.getVolumeHandling()) {
+ mVolumeHandling = descriptor.getVolumeHandling();
+ changes |= CHANGE_GENERAL | CHANGE_VOLUME;
+ }
+ if (mVolume != descriptor.getVolume()) {
+ mVolume = descriptor.getVolume();
+ changes |= CHANGE_GENERAL | CHANGE_VOLUME;
+ }
+ if (mVolumeMax != descriptor.getVolumeMax()) {
+ mVolumeMax = descriptor.getVolumeMax();
+ changes |= CHANGE_GENERAL | CHANGE_VOLUME;
+ }
+ if (mPresentationDisplayId != descriptor.getPresentationDisplayId()) {
+ mPresentationDisplayId = descriptor.getPresentationDisplayId();
+ mPresentationDisplay = null;
+ changes |= CHANGE_GENERAL | CHANGE_PRESENTATION_DISPLAY;
+ }
+ if (!equal(mExtras, descriptor.getExtras())) {
+ mExtras = descriptor.getExtras();
+ changes |= CHANGE_GENERAL;
+ }
+ if (!equal(mSettingsIntent, descriptor.getSettingsActivity())) {
+ mSettingsIntent = descriptor.getSettingsActivity();
+ changes |= CHANGE_GENERAL;
+ }
+ if (mCanDisconnect != descriptor.canDisconnectAndKeepPlaying()) {
+ mCanDisconnect = descriptor.canDisconnectAndKeepPlaying();
+ changes |= CHANGE_GENERAL | CHANGE_PRESENTATION_DISPLAY;
+ }
+ }
+ return changes;
+ }
+
+ String getDescriptorId() {
+ return mDescriptorId;
+ }
+
+ /** @hide */
+ // @RestrictTo(LIBRARY_GROUP)
+ public MediaRouteProvider getProviderInstance() {
+ return mProvider.getProviderInstance();
+ }
+ }
+
+ /**
+ * Information about a route that consists of multiple other routes in a group.
+ * @hide
+ */
+ // @RestrictTo(LIBRARY_GROUP)
+ public static class RouteGroup extends RouteInfo {
+ private List<RouteInfo> mRoutes = new ArrayList<>();
+
+ RouteGroup(ProviderInfo provider, String descriptorId, String uniqueId) {
+ super(provider, descriptorId, uniqueId);
+ }
+
+ /**
+ * @return The number of routes in this group
+ */
+ public int getRouteCount() {
+ return mRoutes.size();
+ }
+
+ /**
+ * Returns the route in this group at the specified index
+ *
+ * @param index Index to fetch
+ * @return The route at index
+ */
+ public RouteInfo getRouteAt(int index) {
+ return mRoutes.get(index);
+ }
+
+ /**
+ * Returns the routes in this group
+ *
+ * @return The list of the routes in this group
+ */
+ public List<RouteInfo> getRoutes() {
+ return mRoutes;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder(super.toString());
+ sb.append('[');
+ final int count = mRoutes.size();
+ for (int i = 0; i < count; i++) {
+ if (i > 0) sb.append(", ");
+ sb.append(mRoutes.get(i));
+ }
+ sb.append(']');
+ return sb.toString();
+ }
+
+ @Override
+ int maybeUpdateDescriptor(MediaRouteDescriptor descriptor) {
+ boolean changed = false;
+ if (mDescriptor != descriptor) {
+ mDescriptor = descriptor;
+ if (descriptor != null) {
+ List<String> groupMemberIds = descriptor.getGroupMemberIds();
+ List<RouteInfo> routes = new ArrayList<>();
+ changed = groupMemberIds.size() != mRoutes.size();
+ for (String groupMemberId : groupMemberIds) {
+ String uniqueId = sGlobal.getUniqueId(getProvider(), groupMemberId);
+ RouteInfo groupMember = sGlobal.getRoute(uniqueId);
+ if (groupMember != null) {
+ routes.add(groupMember);
+ if (!changed && !mRoutes.contains(groupMember)) {
+ changed = true;
+ }
+ }
+ }
+ if (changed) {
+ mRoutes = routes;
+ }
+ }
+ }
+ return (changed ? CHANGE_GENERAL : 0) | super.updateDescriptor(descriptor);
+ }
+ }
+
+ /**
+ * Provides information about a media route provider.
+ * <p>
+ * This object may be used to determine which media route provider has
+ * published a particular route.
+ * </p>
+ */
+ public static final class ProviderInfo {
+ private final MediaRouteProvider mProviderInstance;
+ private final List<RouteInfo> mRoutes = new ArrayList<>();
+
+ private final ProviderMetadata mMetadata;
+ private MediaRouteProviderDescriptor mDescriptor;
+ private Resources mResources;
+ private boolean mResourcesNotAvailable;
+
+ ProviderInfo(MediaRouteProvider provider) {
+ mProviderInstance = provider;
+ mMetadata = provider.getMetadata();
+ }
+
+ /**
+ * Gets the provider's underlying {@link MediaRouteProvider} instance.
+ */
+ public MediaRouteProvider getProviderInstance() {
+ checkCallingThread();
+ return mProviderInstance;
+ }
+
+ /**
+ * Gets the package name of the media route provider.
+ */
+ public String getPackageName() {
+ return mMetadata.getPackageName();
+ }
+
+ /**
+ * Gets the component name of the media route provider.
+ */
+ public ComponentName getComponentName() {
+ return mMetadata.getComponentName();
+ }
+
+ /**
+ * Gets the {@link MediaRouter.RouteInfo routes} published by this route provider.
+ */
+ public List<RouteInfo> getRoutes() {
+ checkCallingThread();
+ return mRoutes;
+ }
+
+ Resources getResources() {
+ if (mResources == null && !mResourcesNotAvailable) {
+ String packageName = getPackageName();
+ Context context = sGlobal.getProviderContext(packageName);
+ if (context != null) {
+ mResources = context.getResources();
+ } else {
+ Log.w(TAG, "Unable to obtain resources for route provider package: "
+ + packageName);
+ mResourcesNotAvailable = true;
+ }
+ }
+ return mResources;
+ }
+
+ boolean updateDescriptor(MediaRouteProviderDescriptor descriptor) {
+ if (mDescriptor != descriptor) {
+ mDescriptor = descriptor;
+ return true;
+ }
+ return false;
+ }
+
+ int findRouteByDescriptorId(String id) {
+ final int count = mRoutes.size();
+ for (int i = 0; i < count; i++) {
+ if (mRoutes.get(i).mDescriptorId.equals(id)) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ @Override
+ public String toString() {
+ return "MediaRouter.RouteProviderInfo{ packageName=" + getPackageName()
+ + " }";
+ }
+ }
+
+ /**
+ * Interface for receiving events about media routing changes.
+ * All methods of this interface will be called from the application's main thread.
+ * <p>
+ * A Callback will only receive events relevant to routes that the callback
+ * was registered for unless the {@link MediaRouter#CALLBACK_FLAG_UNFILTERED_EVENTS}
+ * flag was specified in {@link MediaRouter#addCallback(MediaRouteSelector, Callback, int)}.
+ * </p>
+ *
+ * @see MediaRouter#addCallback(MediaRouteSelector, Callback, int)
+ * @see MediaRouter#removeCallback(Callback)
+ */
+ public static abstract class Callback {
+ /**
+ * Called when the supplied media route becomes selected as the active route.
+ *
+ * @param router The media router reporting the event.
+ * @param route The route that has been selected.
+ */
+ public void onRouteSelected(MediaRouter router, RouteInfo route) {
+ }
+
+ /**
+ * Called when the supplied media route becomes unselected as the active route.
+ * For detailed reason, override {@link #onRouteUnselected(MediaRouter, RouteInfo, int)}
+ * instead.
+ *
+ * @param router The media router reporting the event.
+ * @param route The route that has been unselected.
+ */
+ public void onRouteUnselected(MediaRouter router, RouteInfo route) {
+ }
+
+ /**
+ * Called when the supplied media route becomes unselected as the active route.
+ * The default implementation calls {@link #onRouteUnselected}.
+ * <p>
+ * The reason provided will be one of the following:
+ * <ul>
+ * <li>{@link MediaRouter#UNSELECT_REASON_UNKNOWN}</li>
+ * <li>{@link MediaRouter#UNSELECT_REASON_DISCONNECTED}</li>
+ * <li>{@link MediaRouter#UNSELECT_REASON_STOPPED}</li>
+ * <li>{@link MediaRouter#UNSELECT_REASON_ROUTE_CHANGED}</li>
+ * </ul>
+ *
+ * @param router The media router reporting the event.
+ * @param route The route that has been unselected.
+ * @param reason The reason for unselecting the route.
+ */
+ public void onRouteUnselected(MediaRouter router, RouteInfo route, int reason) {
+ onRouteUnselected(router, route);
+ }
+
+ /**
+ * Called when a media route has been added.
+ *
+ * @param router The media router reporting the event.
+ * @param route The route that has become available for use.
+ */
+ public void onRouteAdded(MediaRouter router, RouteInfo route) {
+ }
+
+ /**
+ * Called when a media route has been removed.
+ *
+ * @param router The media router reporting the event.
+ * @param route The route that has been removed from availability.
+ */
+ public void onRouteRemoved(MediaRouter router, RouteInfo route) {
+ }
+
+ /**
+ * Called when a property of the indicated media route has changed.
+ *
+ * @param router The media router reporting the event.
+ * @param route The route that was changed.
+ */
+ public void onRouteChanged(MediaRouter router, RouteInfo route) {
+ }
+
+ /**
+ * Called when a media route's volume changes.
+ *
+ * @param router The media router reporting the event.
+ * @param route The route whose volume changed.
+ */
+ public void onRouteVolumeChanged(MediaRouter router, RouteInfo route) {
+ }
+
+ /**
+ * Called when a media route's presentation display changes.
+ * <p>
+ * This method is called whenever the route's presentation display becomes
+ * available, is removed or has changes to some of its properties (such as its size).
+ * </p>
+ *
+ * @param router The media router reporting the event.
+ * @param route The route whose presentation display changed.
+ *
+ * @see RouteInfo#getPresentationDisplay()
+ */
+ public void onRoutePresentationDisplayChanged(MediaRouter router, RouteInfo route) {
+ }
+
+ /**
+ * Called when a media route provider has been added.
+ *
+ * @param router The media router reporting the event.
+ * @param provider The provider that has become available for use.
+ */
+ public void onProviderAdded(MediaRouter router, ProviderInfo provider) {
+ }
+
+ /**
+ * Called when a media route provider has been removed.
+ *
+ * @param router The media router reporting the event.
+ * @param provider The provider that has been removed from availability.
+ */
+ public void onProviderRemoved(MediaRouter router, ProviderInfo provider) {
+ }
+
+ /**
+ * Called when a property of the indicated media route provider has changed.
+ *
+ * @param router The media router reporting the event.
+ * @param provider The provider that was changed.
+ */
+ public void onProviderChanged(MediaRouter router, ProviderInfo provider) {
+ }
+ }
+
+ /**
+ * Callback which is invoked with the result of a media control request.
+ *
+ * @see RouteInfo#sendControlRequest
+ */
+ public static abstract class ControlRequestCallback {
+ /**
+ * Called when a media control request succeeds.
+ *
+ * @param data Result data, or null if none.
+ * Contents depend on the {@link MediaControlIntent media control action}.
+ */
+ public void onResult(Bundle data) {
+ }
+
+ /**
+ * Called when a media control request fails.
+ *
+ * @param error A localized error message which may be shown to the user, or null
+ * if the cause of the error is unclear.
+ * @param data Error data, or null if none.
+ * Contents depend on the {@link MediaControlIntent media control action}.
+ */
+ public void onError(String error, Bundle data) {
+ }
+ }
+
+ private static final class CallbackRecord {
+ public final MediaRouter mRouter;
+ public final Callback mCallback;
+ public MediaRouteSelector mSelector;
+ public int mFlags;
+
+ public CallbackRecord(MediaRouter router, Callback callback) {
+ mRouter = router;
+ mCallback = callback;
+ mSelector = MediaRouteSelector.EMPTY;
+ }
+
+ public boolean filterRouteEvent(RouteInfo route) {
+ return (mFlags & CALLBACK_FLAG_UNFILTERED_EVENTS) != 0
+ || route.matchesSelector(mSelector);
+ }
+ }
+
+ /**
+ * Global state for the media router.
+ * <p>
+ * Media routes and media route providers are global to the process; their
+ * state and the bulk of the media router implementation lives here.
+ * </p>
+ */
+ private static final class GlobalMediaRouter
+ implements SystemMediaRouteProvider.SyncCallback,
+ RegisteredMediaRouteProviderWatcher.Callback {
+ final Context mApplicationContext;
+ final ArrayList<WeakReference<MediaRouter>> mRouters = new ArrayList<>();
+ private final ArrayList<RouteInfo> mRoutes = new ArrayList<>();
+ private final Map<Pair<String, String>, String> mUniqueIdMap = new HashMap<>();
+ private final ArrayList<ProviderInfo> mProviders = new ArrayList<>();
+ private final ArrayList<RemoteControlClientRecord> mRemoteControlClients =
+ new ArrayList<>();
+ final RemoteControlClientCompat.PlaybackInfo mPlaybackInfo =
+ new RemoteControlClientCompat.PlaybackInfo();
+ private final ProviderCallback mProviderCallback = new ProviderCallback();
+ final CallbackHandler mCallbackHandler = new CallbackHandler();
+ private final DisplayManagerCompat mDisplayManager;
+ final SystemMediaRouteProvider mSystemProvider;
+ private final boolean mLowRam;
+
+ private RegisteredMediaRouteProviderWatcher mRegisteredProviderWatcher;
+ private RouteInfo mDefaultRoute;
+ private RouteInfo mBluetoothRoute;
+ RouteInfo mSelectedRoute;
+ private RouteController mSelectedRouteController;
+ // A map from route descriptor ID to RouteController for the member routes in the currently
+ // selected route group.
+ private final Map<String, RouteController> mRouteControllerMap = new HashMap<>();
+ private MediaRouteDiscoveryRequest mDiscoveryRequest;
+ private MediaSessionRecord mMediaSession;
+ MediaSessionCompat mRccMediaSession;
+ private MediaSessionCompat mCompatSession;
+ private MediaSessionCompat.OnActiveChangeListener mSessionActiveListener =
+ new MediaSessionCompat.OnActiveChangeListener() {
+ @Override
+ public void onActiveChanged() {
+ if(mRccMediaSession != null) {
+ if (mRccMediaSession.isActive()) {
+ addRemoteControlClient(mRccMediaSession.getRemoteControlClient());
+ } else {
+ removeRemoteControlClient(mRccMediaSession.getRemoteControlClient());
+ }
+ }
+ }
+ };
+
+ GlobalMediaRouter(Context applicationContext) {
+ mApplicationContext = applicationContext;
+ mDisplayManager = DisplayManagerCompat.getInstance(applicationContext);
+ mLowRam = ActivityManagerCompat.isLowRamDevice(
+ (ActivityManager)applicationContext.getSystemService(
+ Context.ACTIVITY_SERVICE));
+
+ // Add the system media route provider for interoperating with
+ // the framework media router. This one is special and receives
+ // synchronization messages from the media router.
+ mSystemProvider = SystemMediaRouteProvider.obtain(applicationContext, this);
+ }
+
+ public void start() {
+ addProvider(mSystemProvider);
+
+ // Start watching for routes published by registered media route
+ // provider services.
+ mRegisteredProviderWatcher = new RegisteredMediaRouteProviderWatcher(
+ mApplicationContext, this);
+ mRegisteredProviderWatcher.start();
+ }
+
+ public MediaRouter getRouter(Context context) {
+ MediaRouter router;
+ for (int i = mRouters.size(); --i >= 0; ) {
+ router = mRouters.get(i).get();
+ if (router == null) {
+ mRouters.remove(i);
+ } else if (router.mContext == context) {
+ return router;
+ }
+ }
+ router = new MediaRouter(context);
+ mRouters.add(new WeakReference<MediaRouter>(router));
+ return router;
+ }
+
+ public ContentResolver getContentResolver() {
+ return mApplicationContext.getContentResolver();
+ }
+
+ public Context getProviderContext(String packageName) {
+ if (packageName.equals(SystemMediaRouteProvider.PACKAGE_NAME)) {
+ return mApplicationContext;
+ }
+ try {
+ return mApplicationContext.createPackageContext(
+ packageName, Context.CONTEXT_RESTRICTED);
+ } catch (NameNotFoundException ex) {
+ return null;
+ }
+ }
+
+ public Display getDisplay(int displayId) {
+ return mDisplayManager.getDisplay(displayId);
+ }
+
+ public void sendControlRequest(RouteInfo route,
+ Intent intent, ControlRequestCallback callback) {
+ if (route == mSelectedRoute && mSelectedRouteController != null) {
+ if (mSelectedRouteController.onControlRequest(intent, callback)) {
+ return;
+ }
+ }
+ if (callback != null) {
+ callback.onError(null, null);
+ }
+ }
+
+ public void requestSetVolume(RouteInfo route, int volume) {
+ if (route == mSelectedRoute && mSelectedRouteController != null) {
+ mSelectedRouteController.onSetVolume(volume);
+ } else if (!mRouteControllerMap.isEmpty()) {
+ RouteController controller = mRouteControllerMap.get(route.mDescriptorId);
+ if (controller != null) {
+ controller.onSetVolume(volume);
+ }
+ }
+ }
+
+ public void requestUpdateVolume(RouteInfo route, int delta) {
+ if (route == mSelectedRoute && mSelectedRouteController != null) {
+ mSelectedRouteController.onUpdateVolume(delta);
+ }
+ }
+
+ public RouteInfo getRoute(String uniqueId) {
+ for (RouteInfo info : mRoutes) {
+ if (info.mUniqueId.equals(uniqueId)) {
+ return info;
+ }
+ }
+ return null;
+ }
+
+ public List<RouteInfo> getRoutes() {
+ return mRoutes;
+ }
+
+ List<ProviderInfo> getProviders() {
+ return mProviders;
+ }
+
+ @NonNull RouteInfo getDefaultRoute() {
+ if (mDefaultRoute == null) {
+ // This should never happen once the media router has been fully
+ // initialized but it is good to check for the error in case there
+ // is a bug in provider initialization.
+ throw new IllegalStateException("There is no default route. "
+ + "The media router has not yet been fully initialized.");
+ }
+ return mDefaultRoute;
+ }
+
+ RouteInfo getBluetoothRoute() {
+ return mBluetoothRoute;
+ }
+
+ @NonNull RouteInfo getSelectedRoute() {
+ if (mSelectedRoute == null) {
+ // This should never happen once the media router has been fully
+ // initialized but it is good to check for the error in case there
+ // is a bug in provider initialization.
+ throw new IllegalStateException("There is no currently selected route. "
+ + "The media router has not yet been fully initialized.");
+ }
+ return mSelectedRoute;
+ }
+
+ void selectRoute(@NonNull RouteInfo route) {
+ selectRoute(route, MediaRouter.UNSELECT_REASON_ROUTE_CHANGED);
+ }
+
+ void selectRoute(@NonNull RouteInfo route, int unselectReason) {
+ if (!mRoutes.contains(route)) {
+ Log.w(TAG, "Ignoring attempt to select removed route: " + route);
+ return;
+ }
+ if (!route.mEnabled) {
+ Log.w(TAG, "Ignoring attempt to select disabled route: " + route);
+ return;
+ }
+ setSelectedRouteInternal(route, unselectReason);
+ }
+
+ public boolean isRouteAvailable(MediaRouteSelector selector, int flags) {
+ if (selector.isEmpty()) {
+ return false;
+ }
+
+ // On low-RAM devices, do not rely on actual discovery results unless asked to.
+ if ((flags & AVAILABILITY_FLAG_REQUIRE_MATCH) == 0 && mLowRam) {
+ return true;
+ }
+
+ // Check whether any existing routes match the selector.
+ final int routeCount = mRoutes.size();
+ for (int i = 0; i < routeCount; i++) {
+ RouteInfo route = mRoutes.get(i);
+ if ((flags & AVAILABILITY_FLAG_IGNORE_DEFAULT_ROUTE) != 0
+ && route.isDefaultOrBluetooth()) {
+ continue;
+ }
+ if (route.matchesSelector(selector)) {
+ return true;
+ }
+ }
+
+ // It doesn't look like we can find a matching route right now.
+ return false;
+ }
+
+ public void updateDiscoveryRequest() {
+ // Combine all of the callback selectors and active scan flags.
+ boolean discover = false;
+ boolean activeScan = false;
+ MediaRouteSelector.Builder builder = new MediaRouteSelector.Builder();
+ for (int i = mRouters.size(); --i >= 0; ) {
+ MediaRouter router = mRouters.get(i).get();
+ if (router == null) {
+ mRouters.remove(i);
+ } else {
+ final int count = router.mCallbackRecords.size();
+ for (int j = 0; j < count; j++) {
+ CallbackRecord callback = router.mCallbackRecords.get(j);
+ builder.addSelector(callback.mSelector);
+ if ((callback.mFlags & CALLBACK_FLAG_PERFORM_ACTIVE_SCAN) != 0) {
+ activeScan = true;
+ discover = true; // perform active scan implies request discovery
+ }
+ if ((callback.mFlags & CALLBACK_FLAG_REQUEST_DISCOVERY) != 0) {
+ if (!mLowRam) {
+ discover = true;
+ }
+ }
+ if ((callback.mFlags & CALLBACK_FLAG_FORCE_DISCOVERY) != 0) {
+ discover = true;
+ }
+ }
+ }
+ }
+ MediaRouteSelector selector = discover ? builder.build() : MediaRouteSelector.EMPTY;
+
+ // Create a new discovery request.
+ if (mDiscoveryRequest != null
+ && mDiscoveryRequest.getSelector().equals(selector)
+ && mDiscoveryRequest.isActiveScan() == activeScan) {
+ return; // no change
+ }
+ if (selector.isEmpty() && !activeScan) {
+ // Discovery is not needed.
+ if (mDiscoveryRequest == null) {
+ return; // no change
+ }
+ mDiscoveryRequest = null;
+ } else {
+ // Discovery is needed.
+ mDiscoveryRequest = new MediaRouteDiscoveryRequest(selector, activeScan);
+ }
+ if (DEBUG) {
+ Log.d(TAG, "Updated discovery request: " + mDiscoveryRequest);
+ }
+ if (discover && !activeScan && mLowRam) {
+ Log.i(TAG, "Forcing passive route discovery on a low-RAM device, "
+ + "system performance may be affected. Please consider using "
+ + "CALLBACK_FLAG_REQUEST_DISCOVERY instead of "
+ + "CALLBACK_FLAG_FORCE_DISCOVERY.");
+ }
+
+ // Notify providers.
+ final int providerCount = mProviders.size();
+ for (int i = 0; i < providerCount; i++) {
+ mProviders.get(i).mProviderInstance.setDiscoveryRequest(mDiscoveryRequest);
+ }
+ }
+
+ @Override
+ public void addProvider(MediaRouteProvider providerInstance) {
+ int index = findProviderInfo(providerInstance);
+ if (index < 0) {
+ // 1. Add the provider to the list.
+ ProviderInfo provider = new ProviderInfo(providerInstance);
+ mProviders.add(provider);
+ if (DEBUG) {
+ Log.d(TAG, "Provider added: " + provider);
+ }
+ mCallbackHandler.post(CallbackHandler.MSG_PROVIDER_ADDED, provider);
+ // 2. Create the provider's contents.
+ updateProviderContents(provider, providerInstance.getDescriptor());
+ // 3. Register the provider callback.
+ providerInstance.setCallback(mProviderCallback);
+ // 4. Set the discovery request.
+ providerInstance.setDiscoveryRequest(mDiscoveryRequest);
+ }
+ }
+
+ @Override
+ public void removeProvider(MediaRouteProvider providerInstance) {
+ int index = findProviderInfo(providerInstance);
+ if (index >= 0) {
+ // 1. Unregister the provider callback.
+ providerInstance.setCallback(null);
+ // 2. Clear the discovery request.
+ providerInstance.setDiscoveryRequest(null);
+ // 3. Delete the provider's contents.
+ ProviderInfo provider = mProviders.get(index);
+ updateProviderContents(provider, null);
+ // 4. Remove the provider from the list.
+ if (DEBUG) {
+ Log.d(TAG, "Provider removed: " + provider);
+ }
+ mCallbackHandler.post(CallbackHandler.MSG_PROVIDER_REMOVED, provider);
+ mProviders.remove(index);
+ }
+ }
+
+ void updateProviderDescriptor(MediaRouteProvider providerInstance,
+ MediaRouteProviderDescriptor descriptor) {
+ int index = findProviderInfo(providerInstance);
+ if (index >= 0) {
+ // Update the provider's contents.
+ ProviderInfo provider = mProviders.get(index);
+ updateProviderContents(provider, descriptor);
+ }
+ }
+
+ private int findProviderInfo(MediaRouteProvider providerInstance) {
+ final int count = mProviders.size();
+ for (int i = 0; i < count; i++) {
+ if (mProviders.get(i).mProviderInstance == providerInstance) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ private void updateProviderContents(ProviderInfo provider,
+ MediaRouteProviderDescriptor providerDescriptor) {
+ if (provider.updateDescriptor(providerDescriptor)) {
+ // Update all existing routes and reorder them to match
+ // the order of their descriptors.
+ int targetIndex = 0;
+ boolean selectedRouteDescriptorChanged = false;
+ if (providerDescriptor != null) {
+ if (providerDescriptor.isValid()) {
+ final List<MediaRouteDescriptor> routeDescriptors =
+ providerDescriptor.getRoutes();
+ final int routeCount = routeDescriptors.size();
+ // Updating route group's contents requires all member routes' information.
+ // Add the groups to the lists and update them later.
+ List<Pair<RouteInfo, MediaRouteDescriptor>> addedGroups = new ArrayList<>();
+ List<Pair<RouteInfo, MediaRouteDescriptor>> updatedGroups =
+ new ArrayList<>();
+ for (int i = 0; i < routeCount; i++) {
+ final MediaRouteDescriptor routeDescriptor = routeDescriptors.get(i);
+ final String id = routeDescriptor.getId();
+ final int sourceIndex = provider.findRouteByDescriptorId(id);
+ if (sourceIndex < 0) {
+ // 1. Add the route to the list.
+ String uniqueId = assignRouteUniqueId(provider, id);
+ boolean isGroup = routeDescriptor.getGroupMemberIds() != null;
+ RouteInfo route = isGroup ? new RouteGroup(provider, id, uniqueId) :
+ new RouteInfo(provider, id, uniqueId);
+ provider.mRoutes.add(targetIndex++, route);
+ mRoutes.add(route);
+ // 2. Create the route's contents.
+ if (isGroup) {
+ addedGroups.add(new Pair<>(route, routeDescriptor));
+ } else {
+ route.maybeUpdateDescriptor(routeDescriptor);
+ // 3. Notify clients about addition.
+ if (DEBUG) {
+ Log.d(TAG, "Route added: " + route);
+ }
+ mCallbackHandler.post(CallbackHandler.MSG_ROUTE_ADDED, route);
+ }
+
+ } else if (sourceIndex < targetIndex) {
+ Log.w(TAG, "Ignoring route descriptor with duplicate id: "
+ + routeDescriptor);
+ } else {
+ // 1. Reorder the route within the list.
+ RouteInfo route = provider.mRoutes.get(sourceIndex);
+ Collections.swap(provider.mRoutes,
+ sourceIndex, targetIndex++);
+ // 2. Update the route's contents.
+ if (route instanceof RouteGroup) {
+ updatedGroups.add(new Pair<>(route, routeDescriptor));
+ } else {
+ // 3. Notify clients about changes.
+ if (updateRouteDescriptorAndNotify(route, routeDescriptor)
+ != 0) {
+ if (route == mSelectedRoute) {
+ selectedRouteDescriptorChanged = true;
+ }
+ }
+ }
+ }
+ }
+ // Update the new and/or existing groups.
+ for (Pair<RouteInfo, MediaRouteDescriptor> pair : addedGroups) {
+ RouteInfo route = pair.first;
+ route.maybeUpdateDescriptor(pair.second);
+ if (DEBUG) {
+ Log.d(TAG, "Route added: " + route);
+ }
+ mCallbackHandler.post(CallbackHandler.MSG_ROUTE_ADDED, route);
+ }
+ for (Pair<RouteInfo, MediaRouteDescriptor> pair : updatedGroups) {
+ RouteInfo route = pair.first;
+ if (updateRouteDescriptorAndNotify(route, pair.second) != 0) {
+ if (route == mSelectedRoute) {
+ selectedRouteDescriptorChanged = true;
+ }
+ }
+ }
+ } else {
+ Log.w(TAG, "Ignoring invalid provider descriptor: " + providerDescriptor);
+ }
+ }
+
+ // Dispose all remaining routes that do not have matching descriptors.
+ for (int i = provider.mRoutes.size() - 1; i >= targetIndex; i--) {
+ // 1. Delete the route's contents.
+ RouteInfo route = provider.mRoutes.get(i);
+ route.maybeUpdateDescriptor(null);
+ // 2. Remove the route from the list.
+ mRoutes.remove(route);
+ }
+
+ // Update the selected route if needed.
+ updateSelectedRouteIfNeeded(selectedRouteDescriptorChanged);
+
+ // Now notify clients about routes that were removed.
+ // We do this after updating the selected route to ensure
+ // that the framework media router observes the new route
+ // selection before the removal since removing the currently
+ // selected route may have side-effects.
+ for (int i = provider.mRoutes.size() - 1; i >= targetIndex; i--) {
+ RouteInfo route = provider.mRoutes.remove(i);
+ if (DEBUG) {
+ Log.d(TAG, "Route removed: " + route);
+ }
+ mCallbackHandler.post(CallbackHandler.MSG_ROUTE_REMOVED, route);
+ }
+
+ // Notify provider changed.
+ if (DEBUG) {
+ Log.d(TAG, "Provider changed: " + provider);
+ }
+ mCallbackHandler.post(CallbackHandler.MSG_PROVIDER_CHANGED, provider);
+ }
+ }
+
+ private int updateRouteDescriptorAndNotify(RouteInfo route,
+ MediaRouteDescriptor routeDescriptor) {
+ int changes = route.maybeUpdateDescriptor(routeDescriptor);
+ if (changes != 0) {
+ if ((changes & RouteInfo.CHANGE_GENERAL) != 0) {
+ if (DEBUG) {
+ Log.d(TAG, "Route changed: " + route);
+ }
+ mCallbackHandler.post(
+ CallbackHandler.MSG_ROUTE_CHANGED, route);
+ }
+ if ((changes & RouteInfo.CHANGE_VOLUME) != 0) {
+ if (DEBUG) {
+ Log.d(TAG, "Route volume changed: " + route);
+ }
+ mCallbackHandler.post(
+ CallbackHandler.MSG_ROUTE_VOLUME_CHANGED, route);
+ }
+ if ((changes & RouteInfo.CHANGE_PRESENTATION_DISPLAY) != 0) {
+ if (DEBUG) {
+ Log.d(TAG, "Route presentation display changed: "
+ + route);
+ }
+ mCallbackHandler.post(CallbackHandler.
+ MSG_ROUTE_PRESENTATION_DISPLAY_CHANGED, route);
+ }
+ }
+ return changes;
+ }
+
+ private String assignRouteUniqueId(ProviderInfo provider, String routeDescriptorId) {
+ // Although route descriptor ids are unique within a provider, it's
+ // possible for there to be two providers with the same package name.
+ // Therefore we must dedupe the composite id.
+ String componentName = provider.getComponentName().flattenToShortString();
+ String uniqueId = componentName + ":" + routeDescriptorId;
+ if (findRouteByUniqueId(uniqueId) < 0) {
+ mUniqueIdMap.put(new Pair<>(componentName, routeDescriptorId), uniqueId);
+ return uniqueId;
+ }
+ Log.w(TAG, "Either " + routeDescriptorId + " isn't unique in " + componentName
+ + " or we're trying to assign a unique ID for an already added route");
+ for (int i = 2; ; i++) {
+ String newUniqueId = String.format(Locale.US, "%s_%d", uniqueId, i);
+ if (findRouteByUniqueId(newUniqueId) < 0) {
+ mUniqueIdMap.put(new Pair<>(componentName, routeDescriptorId), newUniqueId);
+ return newUniqueId;
+ }
+ }
+ }
+
+ private int findRouteByUniqueId(String uniqueId) {
+ final int count = mRoutes.size();
+ for (int i = 0; i < count; i++) {
+ if (mRoutes.get(i).mUniqueId.equals(uniqueId)) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ private String getUniqueId(ProviderInfo provider, String routeDescriptorId) {
+ String componentName = provider.getComponentName().flattenToShortString();
+ return mUniqueIdMap.get(new Pair<>(componentName, routeDescriptorId));
+ }
+
+ private void updateSelectedRouteIfNeeded(boolean selectedRouteDescriptorChanged) {
+ // Update default route.
+ if (mDefaultRoute != null && !mDefaultRoute.isSelectable()) {
+ Log.i(TAG, "Clearing the default route because it "
+ + "is no longer selectable: " + mDefaultRoute);
+ mDefaultRoute = null;
+ }
+ if (mDefaultRoute == null && !mRoutes.isEmpty()) {
+ for (RouteInfo route : mRoutes) {
+ if (isSystemDefaultRoute(route) && route.isSelectable()) {
+ mDefaultRoute = route;
+ Log.i(TAG, "Found default route: " + mDefaultRoute);
+ break;
+ }
+ }
+ }
+
+ // Update bluetooth route.
+ if (mBluetoothRoute != null && !mBluetoothRoute.isSelectable()) {
+ Log.i(TAG, "Clearing the bluetooth route because it "
+ + "is no longer selectable: " + mBluetoothRoute);
+ mBluetoothRoute = null;
+ }
+ if (mBluetoothRoute == null && !mRoutes.isEmpty()) {
+ for (RouteInfo route : mRoutes) {
+ if (isSystemLiveAudioOnlyRoute(route) && route.isSelectable()) {
+ mBluetoothRoute = route;
+ Log.i(TAG, "Found bluetooth route: " + mBluetoothRoute);
+ break;
+ }
+ }
+ }
+
+ // Update selected route.
+ if (mSelectedRoute == null || !mSelectedRoute.isSelectable()) {
+ Log.i(TAG, "Unselecting the current route because it "
+ + "is no longer selectable: " + mSelectedRoute);
+ setSelectedRouteInternal(chooseFallbackRoute(),
+ MediaRouter.UNSELECT_REASON_UNKNOWN);
+ } else if (selectedRouteDescriptorChanged) {
+ // In case the selected route is a route group, select/unselect route controllers
+ // for the added/removed route members.
+ if (mSelectedRoute instanceof RouteGroup) {
+ List<RouteInfo> routes = ((RouteGroup) mSelectedRoute).getRoutes();
+ // Build a set of descriptor IDs for the new route group.
+ Set<String> idSet = new HashSet<>();
+ for (RouteInfo route : routes) {
+ idSet.add(route.mDescriptorId);
+ }
+ // Unselect route controllers for the removed routes.
+ Iterator<Map.Entry<String, RouteController>> iter =
+ mRouteControllerMap.entrySet().iterator();
+ while (iter.hasNext()) {
+ Map.Entry<String, RouteController> entry = iter.next();
+ if (!idSet.contains(entry.getKey())) {
+ RouteController controller = entry.getValue();
+ controller.onUnselect();
+ controller.onRelease();
+ iter.remove();
+ }
+ }
+ // Select route controllers for the added routes.
+ for (RouteInfo route : routes) {
+ if (!mRouteControllerMap.containsKey(route.mDescriptorId)) {
+ RouteController controller = route.getProviderInstance()
+ .onCreateRouteController(
+ route.mDescriptorId, mSelectedRoute.mDescriptorId);
+ controller.onSelect();
+ mRouteControllerMap.put(route.mDescriptorId, controller);
+ }
+ }
+ }
+ // Update the playback info because the properties of the route have changed.
+ updatePlaybackInfoFromSelectedRoute();
+ }
+ }
+
+ RouteInfo chooseFallbackRoute() {
+ // When the current route is removed or no longer selectable,
+ // we want to revert to a live audio route if there is
+ // one (usually Bluetooth A2DP). Failing that, use
+ // the default route.
+ for (RouteInfo route : mRoutes) {
+ if (route != mDefaultRoute
+ && isSystemLiveAudioOnlyRoute(route)
+ && route.isSelectable()) {
+ return route;
+ }
+ }
+ return mDefaultRoute;
+ }
+
+ private boolean isSystemLiveAudioOnlyRoute(RouteInfo route) {
+ return route.getProviderInstance() == mSystemProvider
+ && route.supportsControlCategory(MediaControlIntent.CATEGORY_LIVE_AUDIO)
+ && !route.supportsControlCategory(MediaControlIntent.CATEGORY_LIVE_VIDEO);
+ }
+
+ private boolean isSystemDefaultRoute(RouteInfo route) {
+ return route.getProviderInstance() == mSystemProvider
+ && route.mDescriptorId.equals(
+ SystemMediaRouteProvider.DEFAULT_ROUTE_ID);
+ }
+
+ private void setSelectedRouteInternal(@NonNull RouteInfo route, int unselectReason) {
+ // TODO: Remove the following logging when no longer needed.
+ if (sGlobal == null || (mBluetoothRoute != null && route.isDefault())) {
+ final StackTraceElement[] callStack = Thread.currentThread().getStackTrace();
+ StringBuilder sb = new StringBuilder();
+ // callStack[3] is the caller of this method.
+ for (int i = 3; i < callStack.length; i++) {
+ StackTraceElement caller = callStack[i];
+ sb.append(caller.getClassName())
+ .append(".")
+ .append(caller.getMethodName())
+ .append(":")
+ .append(caller.getLineNumber())
+ .append(" ");
+ }
+ if (sGlobal == null) {
+ Log.w(TAG, "setSelectedRouteInternal is called while sGlobal is null: pkgName="
+ + mApplicationContext.getPackageName() + ", callers=" + sb.toString());
+ } else {
+ Log.w(TAG, "Default route is selected while a BT route is available: pkgName="
+ + mApplicationContext.getPackageName() + ", callers=" + sb.toString());
+ }
+ }
+
+ if (mSelectedRoute != route) {
+ if (mSelectedRoute != null) {
+ if (DEBUG) {
+ Log.d(TAG, "Route unselected: " + mSelectedRoute + " reason: "
+ + unselectReason);
+ }
+ mCallbackHandler.post(CallbackHandler.MSG_ROUTE_UNSELECTED, mSelectedRoute,
+ unselectReason);
+ if (mSelectedRouteController != null) {
+ mSelectedRouteController.onUnselect(unselectReason);
+ mSelectedRouteController.onRelease();
+ mSelectedRouteController = null;
+ }
+ if (!mRouteControllerMap.isEmpty()) {
+ for (RouteController controller : mRouteControllerMap.values()) {
+ controller.onUnselect(unselectReason);
+ controller.onRelease();
+ }
+ mRouteControllerMap.clear();
+ }
+ }
+
+ mSelectedRoute = route;
+ mSelectedRouteController = route.getProviderInstance().onCreateRouteController(
+ route.mDescriptorId);
+ if (mSelectedRouteController != null) {
+ mSelectedRouteController.onSelect();
+ }
+ if (DEBUG) {
+ Log.d(TAG, "Route selected: " + mSelectedRoute);
+ }
+ mCallbackHandler.post(CallbackHandler.MSG_ROUTE_SELECTED, mSelectedRoute);
+
+ if (mSelectedRoute instanceof RouteGroup) {
+ List<RouteInfo> routes = ((RouteGroup) mSelectedRoute).getRoutes();
+ mRouteControllerMap.clear();
+ for (RouteInfo r : routes) {
+ RouteController controller =
+ r.getProviderInstance().onCreateRouteController(
+ r.mDescriptorId, mSelectedRoute.mDescriptorId);
+ controller.onSelect();
+ mRouteControllerMap.put(r.mDescriptorId, controller);
+ }
+ }
+
+ updatePlaybackInfoFromSelectedRoute();
+ }
+ }
+
+ @Override
+ public void onSystemRouteSelectedByDescriptorId(String id) {
+ // System route is selected, do not sync the route we selected before.
+ mCallbackHandler.removeMessages(CallbackHandler.MSG_ROUTE_SELECTED);
+ int providerIndex = findProviderInfo(mSystemProvider);
+ if (providerIndex >= 0) {
+ ProviderInfo provider = mProviders.get(providerIndex);
+ int routeIndex = provider.findRouteByDescriptorId(id);
+ if (routeIndex >= 0) {
+ provider.mRoutes.get(routeIndex).select();
+ }
+ }
+ }
+
+ public void addRemoteControlClient(Object rcc) {
+ int index = findRemoteControlClientRecord(rcc);
+ if (index < 0) {
+ RemoteControlClientRecord record = new RemoteControlClientRecord(rcc);
+ mRemoteControlClients.add(record);
+ }
+ }
+
+ public void removeRemoteControlClient(Object rcc) {
+ int index = findRemoteControlClientRecord(rcc);
+ if (index >= 0) {
+ RemoteControlClientRecord record = mRemoteControlClients.remove(index);
+ record.disconnect();
+ }
+ }
+
+ public void setMediaSession(Object session) {
+ setMediaSessionRecord(session != null ? new MediaSessionRecord(session) : null);
+ }
+
+ public void setMediaSessionCompat(final MediaSessionCompat session) {
+ mCompatSession = session;
+ if (android.os.Build.VERSION.SDK_INT >= 21) {
+ setMediaSessionRecord(session != null ? new MediaSessionRecord(session) : null);
+ } else if (android.os.Build.VERSION.SDK_INT >= 14) {
+ if (mRccMediaSession != null) {
+ removeRemoteControlClient(mRccMediaSession.getRemoteControlClient());
+ mRccMediaSession.removeOnActiveChangeListener(mSessionActiveListener);
+ }
+ mRccMediaSession = session;
+ if (session != null) {
+ session.addOnActiveChangeListener(mSessionActiveListener);
+ if (session.isActive()) {
+ addRemoteControlClient(session.getRemoteControlClient());
+ }
+ }
+ }
+ }
+
+ private void setMediaSessionRecord(MediaSessionRecord mediaSessionRecord) {
+ if (mMediaSession != null) {
+ mMediaSession.clearVolumeHandling();
+ }
+ mMediaSession = mediaSessionRecord;
+ if (mediaSessionRecord != null) {
+ updatePlaybackInfoFromSelectedRoute();
+ }
+ }
+
+ public MediaSessionCompat.Token getMediaSessionToken() {
+ if (mMediaSession != null) {
+ return mMediaSession.getToken();
+ } else if (mCompatSession != null) {
+ return mCompatSession.getSessionToken();
+ }
+ return null;
+ }
+
+ private int findRemoteControlClientRecord(Object rcc) {
+ final int count = mRemoteControlClients.size();
+ for (int i = 0; i < count; i++) {
+ RemoteControlClientRecord record = mRemoteControlClients.get(i);
+ if (record.getRemoteControlClient() == rcc) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ private void updatePlaybackInfoFromSelectedRoute() {
+ if (mSelectedRoute != null) {
+ mPlaybackInfo.volume = mSelectedRoute.getVolume();
+ mPlaybackInfo.volumeMax = mSelectedRoute.getVolumeMax();
+ mPlaybackInfo.volumeHandling = mSelectedRoute.getVolumeHandling();
+ mPlaybackInfo.playbackStream = mSelectedRoute.getPlaybackStream();
+ mPlaybackInfo.playbackType = mSelectedRoute.getPlaybackType();
+
+ final int count = mRemoteControlClients.size();
+ for (int i = 0; i < count; i++) {
+ RemoteControlClientRecord record = mRemoteControlClients.get(i);
+ record.updatePlaybackInfo();
+ }
+ if (mMediaSession != null) {
+ if (mSelectedRoute == getDefaultRoute()
+ || mSelectedRoute == getBluetoothRoute()) {
+ // Local route
+ mMediaSession.clearVolumeHandling();
+ } else {
+ @VolumeProviderCompat.ControlType int controlType =
+ VolumeProviderCompat.VOLUME_CONTROL_FIXED;
+ if (mPlaybackInfo.volumeHandling
+ == MediaRouter.RouteInfo.PLAYBACK_VOLUME_VARIABLE) {
+ controlType = VolumeProviderCompat.VOLUME_CONTROL_ABSOLUTE;
+ }
+ mMediaSession.configureVolume(controlType, mPlaybackInfo.volumeMax,
+ mPlaybackInfo.volume);
+ }
+ }
+ } else {
+ if (mMediaSession != null) {
+ mMediaSession.clearVolumeHandling();
+ }
+ }
+ }
+
+ private final class ProviderCallback extends MediaRouteProvider.Callback {
+ ProviderCallback() {
+ }
+
+ @Override
+ public void onDescriptorChanged(MediaRouteProvider provider,
+ MediaRouteProviderDescriptor descriptor) {
+ updateProviderDescriptor(provider, descriptor);
+ }
+ }
+
+ private final class MediaSessionRecord {
+ private final MediaSessionCompat mMsCompat;
+
+ private @VolumeProviderCompat.ControlType int mControlType;
+ private int mMaxVolume;
+ private VolumeProviderCompat mVpCompat;
+
+ public MediaSessionRecord(Object mediaSession) {
+ mMsCompat = MediaSessionCompat.fromMediaSession(mApplicationContext, mediaSession);
+ }
+
+ public MediaSessionRecord(MediaSessionCompat mediaSessionCompat) {
+ mMsCompat = mediaSessionCompat;
+ }
+
+ public void configureVolume(@VolumeProviderCompat.ControlType int controlType,
+ int max, int current) {
+ if (mVpCompat != null && controlType == mControlType && max == mMaxVolume) {
+ // If we haven't changed control type or max just set the
+ // new current volume
+ mVpCompat.setCurrentVolume(current);
+ } else {
+ // Otherwise create a new provider and update
+ mVpCompat = new VolumeProviderCompat(controlType, max, current) {
+ @Override
+ public void onSetVolumeTo(final int volume) {
+ mCallbackHandler.post(new Runnable() {
+ @Override
+ public void run() {
+ if (mSelectedRoute != null) {
+ mSelectedRoute.requestSetVolume(volume);
+ }
+ }
+ });
+ }
+
+ @Override
+ public void onAdjustVolume(final int direction) {
+ mCallbackHandler.post(new Runnable() {
+ @Override
+ public void run() {
+ if (mSelectedRoute != null) {
+ mSelectedRoute.requestUpdateVolume(direction);
+ }
+ }
+ });
+ }
+ };
+ mMsCompat.setPlaybackToRemote(mVpCompat);
+ }
+ }
+
+ public void clearVolumeHandling() {
+ mMsCompat.setPlaybackToLocal(mPlaybackInfo.playbackStream);
+ mVpCompat = null;
+ }
+
+ public MediaSessionCompat.Token getToken() {
+ return mMsCompat.getSessionToken();
+ }
+ }
+
+ private final class RemoteControlClientRecord
+ implements RemoteControlClientCompat.VolumeCallback {
+ private final RemoteControlClientCompat mRccCompat;
+ private boolean mDisconnected;
+
+ public RemoteControlClientRecord(Object rcc) {
+ mRccCompat = RemoteControlClientCompat.obtain(mApplicationContext, rcc);
+ mRccCompat.setVolumeCallback(this);
+ updatePlaybackInfo();
+ }
+
+ public Object getRemoteControlClient() {
+ return mRccCompat.getRemoteControlClient();
+ }
+
+ public void disconnect() {
+ mDisconnected = true;
+ mRccCompat.setVolumeCallback(null);
+ }
+
+ public void updatePlaybackInfo() {
+ mRccCompat.setPlaybackInfo(mPlaybackInfo);
+ }
+
+ @Override
+ public void onVolumeSetRequest(int volume) {
+ if (!mDisconnected && mSelectedRoute != null) {
+ mSelectedRoute.requestSetVolume(volume);
+ }
+ }
+
+ @Override
+ public void onVolumeUpdateRequest(int direction) {
+ if (!mDisconnected && mSelectedRoute != null) {
+ mSelectedRoute.requestUpdateVolume(direction);
+ }
+ }
+ }
+
+ private final class CallbackHandler extends Handler {
+ private final ArrayList<CallbackRecord> mTempCallbackRecords =
+ new ArrayList<CallbackRecord>();
+
+ private static final int MSG_TYPE_MASK = 0xff00;
+ private static final int MSG_TYPE_ROUTE = 0x0100;
+ private static final int MSG_TYPE_PROVIDER = 0x0200;
+
+ public static final int MSG_ROUTE_ADDED = MSG_TYPE_ROUTE | 1;
+ public static final int MSG_ROUTE_REMOVED = MSG_TYPE_ROUTE | 2;
+ public static final int MSG_ROUTE_CHANGED = MSG_TYPE_ROUTE | 3;
+ public static final int MSG_ROUTE_VOLUME_CHANGED = MSG_TYPE_ROUTE | 4;
+ public static final int MSG_ROUTE_PRESENTATION_DISPLAY_CHANGED = MSG_TYPE_ROUTE | 5;
+ public static final int MSG_ROUTE_SELECTED = MSG_TYPE_ROUTE | 6;
+ public static final int MSG_ROUTE_UNSELECTED = MSG_TYPE_ROUTE | 7;
+
+ public static final int MSG_PROVIDER_ADDED = MSG_TYPE_PROVIDER | 1;
+ public static final int MSG_PROVIDER_REMOVED = MSG_TYPE_PROVIDER | 2;
+ public static final int MSG_PROVIDER_CHANGED = MSG_TYPE_PROVIDER | 3;
+
+ CallbackHandler() {
+ }
+
+ public void post(int msg, Object obj) {
+ obtainMessage(msg, obj).sendToTarget();
+ }
+
+ public void post(int msg, Object obj, int arg) {
+ Message message = obtainMessage(msg, obj);
+ message.arg1 = arg;
+ message.sendToTarget();
+ }
+
+ @Override
+ public void handleMessage(Message msg) {
+ final int what = msg.what;
+ final Object obj = msg.obj;
+ final int arg = msg.arg1;
+
+ if (what == MSG_ROUTE_CHANGED
+ && getSelectedRoute().getId().equals(((RouteInfo) obj).getId())) {
+ updateSelectedRouteIfNeeded(true);
+ }
+
+ // Synchronize state with the system media router.
+ syncWithSystemProvider(what, obj);
+
+ // Invoke all registered callbacks.
+ // Build a list of callbacks before invoking them in case callbacks
+ // are added or removed during dispatch.
+ try {
+ for (int i = mRouters.size(); --i >= 0; ) {
+ MediaRouter router = mRouters.get(i).get();
+ if (router == null) {
+ mRouters.remove(i);
+ } else {
+ mTempCallbackRecords.addAll(router.mCallbackRecords);
+ }
+ }
+
+ final int callbackCount = mTempCallbackRecords.size();
+ for (int i = 0; i < callbackCount; i++) {
+ invokeCallback(mTempCallbackRecords.get(i), what, obj, arg);
+ }
+ } finally {
+ mTempCallbackRecords.clear();
+ }
+ }
+
+ private void syncWithSystemProvider(int what, Object obj) {
+ switch (what) {
+ case MSG_ROUTE_ADDED:
+ mSystemProvider.onSyncRouteAdded((RouteInfo) obj);
+ break;
+ case MSG_ROUTE_REMOVED:
+ mSystemProvider.onSyncRouteRemoved((RouteInfo) obj);
+ break;
+ case MSG_ROUTE_CHANGED:
+ mSystemProvider.onSyncRouteChanged((RouteInfo) obj);
+ break;
+ case MSG_ROUTE_SELECTED:
+ mSystemProvider.onSyncRouteSelected((RouteInfo) obj);
+ break;
+ }
+ }
+
+ private void invokeCallback(CallbackRecord record, int what, Object obj, int arg) {
+ final MediaRouter router = record.mRouter;
+ final MediaRouter.Callback callback = record.mCallback;
+ switch (what & MSG_TYPE_MASK) {
+ case MSG_TYPE_ROUTE: {
+ final RouteInfo route = (RouteInfo)obj;
+ if (!record.filterRouteEvent(route)) {
+ break;
+ }
+ switch (what) {
+ case MSG_ROUTE_ADDED:
+ callback.onRouteAdded(router, route);
+ break;
+ case MSG_ROUTE_REMOVED:
+ callback.onRouteRemoved(router, route);
+ break;
+ case MSG_ROUTE_CHANGED:
+ callback.onRouteChanged(router, route);
+ break;
+ case MSG_ROUTE_VOLUME_CHANGED:
+ callback.onRouteVolumeChanged(router, route);
+ break;
+ case MSG_ROUTE_PRESENTATION_DISPLAY_CHANGED:
+ callback.onRoutePresentationDisplayChanged(router, route);
+ break;
+ case MSG_ROUTE_SELECTED:
+ callback.onRouteSelected(router, route);
+ break;
+ case MSG_ROUTE_UNSELECTED:
+ callback.onRouteUnselected(router, route, arg);
+ break;
+ }
+ break;
+ }
+ case MSG_TYPE_PROVIDER: {
+ final ProviderInfo provider = (ProviderInfo)obj;
+ switch (what) {
+ case MSG_PROVIDER_ADDED:
+ callback.onProviderAdded(router, provider);
+ break;
+ case MSG_PROVIDER_REMOVED:
+ callback.onProviderRemoved(router, provider);
+ break;
+ case MSG_PROVIDER_CHANGED:
+ callback.onProviderChanged(router, provider);
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaSessionStatus.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaSessionStatus.java
new file mode 100644
index 0000000..3206596
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/MediaSessionStatus.java
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.media;
+
+import android.app.PendingIntent;
+import android.os.Bundle;
+import android.os.SystemClock;
+import android.support.v4.util.TimeUtils;
+
+/**
+ * Describes the playback status of a media session.
+ * <p>
+ * This class is part of the remote playback protocol described by the
+ * {@link MediaControlIntent MediaControlIntent} class.
+ * </p><p>
+ * When a media session is created, it is initially in the
+ * {@link #SESSION_STATE_ACTIVE active} state. When the media session ends
+ * normally, it transitions to the {@link #SESSION_STATE_ENDED ended} state.
+ * If the media session is invalidated due to another session forcibly taking
+ * control of the route, then it transitions to the
+ * {@link #SESSION_STATE_INVALIDATED invalidated} state.
+ * Refer to the documentation of each state for an explanation of its meaning.
+ * </p><p>
+ * To monitor session status, the application should supply a {@link PendingIntent} to use as the
+ * {@link MediaControlIntent#EXTRA_SESSION_STATUS_UPDATE_RECEIVER session status update receiver}
+ * for a given {@link MediaControlIntent#ACTION_START_SESSION session start request}.
+ * </p><p>
+ * This object is immutable once created using a {@link Builder} instance.
+ * </p>
+ */
+public final class MediaSessionStatus {
+ static final String KEY_TIMESTAMP = "timestamp";
+ static final String KEY_SESSION_STATE = "sessionState";
+ static final String KEY_QUEUE_PAUSED = "queuePaused";
+ static final String KEY_EXTRAS = "extras";
+
+ final Bundle mBundle;
+
+ /**
+ * Session state: Active.
+ * <p>
+ * Indicates that the media session is active and in control of the route.
+ * </p>
+ */
+ public static final int SESSION_STATE_ACTIVE = 0;
+
+ /**
+ * Session state: Ended.
+ * <p>
+ * Indicates that the media session was ended normally using the
+ * {@link MediaControlIntent#ACTION_END_SESSION end session} action.
+ * </p><p>
+ * A terminated media session cannot be used anymore. To play more media, the
+ * application must start a new session.
+ * </p>
+ */
+ public static final int SESSION_STATE_ENDED = 1;
+
+ /**
+ * Session state: Invalidated.
+ * <p>
+ * Indicates that the media session was invalidated involuntarily due to
+ * another session taking control of the route.
+ * </p><p>
+ * An invalidated media session cannot be used anymore. To play more media, the
+ * application must start a new session.
+ * </p>
+ */
+ public static final int SESSION_STATE_INVALIDATED = 2;
+
+ MediaSessionStatus(Bundle bundle) {
+ mBundle = bundle;
+ }
+
+ /**
+ * Gets the timestamp associated with the status information in
+ * milliseconds since boot in the {@link SystemClock#elapsedRealtime} time base.
+ *
+ * @return The status timestamp in the {@link SystemClock#elapsedRealtime()} time base.
+ */
+ public long getTimestamp() {
+ return mBundle.getLong(KEY_TIMESTAMP);
+ }
+
+ /**
+ * Gets the session state.
+ *
+ * @return The session state. One of {@link #SESSION_STATE_ACTIVE},
+ * {@link #SESSION_STATE_ENDED}, or {@link #SESSION_STATE_INVALIDATED}.
+ */
+ public int getSessionState() {
+ return mBundle.getInt(KEY_SESSION_STATE, SESSION_STATE_INVALIDATED);
+ }
+
+ /**
+ * Returns true if the session's queue is paused.
+ *
+ * @return True if the session's queue is paused.
+ */
+ public boolean isQueuePaused() {
+ return mBundle.getBoolean(KEY_QUEUE_PAUSED);
+ }
+
+ /**
+ * Gets a bundle of extras for this status object.
+ * The extras will be ignored by the media router but they may be used
+ * by applications.
+ */
+ public Bundle getExtras() {
+ return mBundle.getBundle(KEY_EXTRAS);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder result = new StringBuilder();
+ result.append("MediaSessionStatus{ ");
+ result.append("timestamp=");
+ TimeUtils.formatDuration(SystemClock.elapsedRealtime() - getTimestamp(), result);
+ result.append(" ms ago");
+ result.append(", sessionState=").append(sessionStateToString(getSessionState()));
+ result.append(", queuePaused=").append(isQueuePaused());
+ result.append(", extras=").append(getExtras());
+ result.append(" }");
+ return result.toString();
+ }
+
+ private static String sessionStateToString(int sessionState) {
+ switch (sessionState) {
+ case SESSION_STATE_ACTIVE:
+ return "active";
+ case SESSION_STATE_ENDED:
+ return "ended";
+ case SESSION_STATE_INVALIDATED:
+ return "invalidated";
+ }
+ return Integer.toString(sessionState);
+ }
+
+ /**
+ * Converts this object to a bundle for serialization.
+ *
+ * @return The contents of the object represented as a bundle.
+ */
+ public Bundle asBundle() {
+ return mBundle;
+ }
+
+ /**
+ * Creates an instance from a bundle.
+ *
+ * @param bundle The bundle, or null if none.
+ * @return The new instance, or null if the bundle was null.
+ */
+ public static MediaSessionStatus fromBundle(Bundle bundle) {
+ return bundle != null ? new MediaSessionStatus(bundle) : null;
+ }
+
+ /**
+ * Builder for {@link MediaSessionStatus media session status objects}.
+ */
+ public static final class Builder {
+ private final Bundle mBundle;
+
+ /**
+ * Creates a media session status builder using the current time as the
+ * reference timestamp.
+ *
+ * @param sessionState The session state.
+ */
+ public Builder(int sessionState) {
+ mBundle = new Bundle();
+ setTimestamp(SystemClock.elapsedRealtime());
+ setSessionState(sessionState);
+ }
+
+ /**
+ * Creates a media session status builder whose initial contents are
+ * copied from an existing status.
+ */
+ public Builder(MediaSessionStatus status) {
+ if (status == null) {
+ throw new IllegalArgumentException("status must not be null");
+ }
+
+ mBundle = new Bundle(status.mBundle);
+ }
+
+ /**
+ * Sets the timestamp associated with the status information in
+ * milliseconds since boot in the {@link SystemClock#elapsedRealtime} time base.
+ */
+ public Builder setTimestamp(long elapsedRealtimeTimestamp) {
+ mBundle.putLong(KEY_TIMESTAMP, elapsedRealtimeTimestamp);
+ return this;
+ }
+
+ /**
+ * Sets the session state.
+ */
+ public Builder setSessionState(int sessionState) {
+ mBundle.putInt(KEY_SESSION_STATE, sessionState);
+ return this;
+ }
+
+ /**
+ * Sets whether the queue is paused.
+ */
+ public Builder setQueuePaused(boolean queuePaused) {
+ mBundle.putBoolean(KEY_QUEUE_PAUSED, queuePaused);
+ return this;
+ }
+
+ /**
+ * Sets a bundle of extras for this status object.
+ * The extras will be ignored by the media router but they may be used
+ * by applications.
+ */
+ public Builder setExtras(Bundle extras) {
+ mBundle.putBundle(KEY_EXTRAS, extras);
+ return this;
+ }
+
+ /**
+ * Builds the {@link MediaSessionStatus media session status object}.
+ */
+ public MediaSessionStatus build() {
+ return new MediaSessionStatus(mBundle);
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProvider.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProvider.java
new file mode 100644
index 0000000..98e4e28
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProvider.java
@@ -0,0 +1,741 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.media;
+
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_DATA_ROUTE_ID;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_DATA_ROUTE_LIBRARY_GROUP;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_DATA_UNSELECT_REASON;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_DATA_VOLUME;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_CREATE_ROUTE_CONTROLLER;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_REGISTER;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_RELEASE_ROUTE_CONTROLLER;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_ROUTE_CONTROL_REQUEST;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_SELECT_ROUTE;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_SET_DISCOVERY_REQUEST;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_SET_ROUTE_VOLUME;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_MSG_UNREGISTER;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_UNSELECT_ROUTE;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .CLIENT_MSG_UPDATE_ROUTE_VOLUME;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.CLIENT_VERSION_CURRENT;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_DATA_ERROR;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .SERVICE_MSG_CONTROL_REQUEST_FAILED;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .SERVICE_MSG_CONTROL_REQUEST_SUCCEEDED;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .SERVICE_MSG_DESCRIPTOR_CHANGED;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .SERVICE_MSG_GENERIC_FAILURE;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol
+ .SERVICE_MSG_GENERIC_SUCCESS;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_MSG_REGISTERED;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.SERVICE_VERSION_1;
+import static com.android.support.mediarouter.media.MediaRouteProviderProtocol.isValidRemoteMessenger;
+
+import android.annotation.NonNull;
+import android.content.ComponentName;
+import android.content.Context;
+import android.content.Intent;
+import android.content.ServiceConnection;
+import android.os.Bundle;
+import android.os.DeadObjectException;
+import android.os.Handler;
+import android.os.IBinder;
+import android.os.IBinder.DeathRecipient;
+import android.os.Message;
+import android.os.Messenger;
+import android.os.RemoteException;
+import android.util.Log;
+import android.util.SparseArray;
+
+import com.android.support.mediarouter.media.MediaRouter.ControlRequestCallback;
+
+import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Maintains a connection to a particular media route provider service.
+ */
+final class RegisteredMediaRouteProvider extends MediaRouteProvider
+ implements ServiceConnection {
+ static final String TAG = "MediaRouteProviderProxy"; // max. 23 chars
+ static final boolean DEBUG = Log.isLoggable(TAG, Log.DEBUG);
+
+ private final ComponentName mComponentName;
+ final PrivateHandler mPrivateHandler;
+ private final ArrayList<Controller> mControllers = new ArrayList<Controller>();
+
+ private boolean mStarted;
+ private boolean mBound;
+ private Connection mActiveConnection;
+ private boolean mConnectionReady;
+
+ public RegisteredMediaRouteProvider(Context context, ComponentName componentName) {
+ super(context, new ProviderMetadata(componentName));
+
+ mComponentName = componentName;
+ mPrivateHandler = new PrivateHandler();
+ }
+
+ @Override
+ public RouteController onCreateRouteController(@NonNull String routeId) {
+ if (routeId == null) {
+ throw new IllegalArgumentException("routeId cannot be null");
+ }
+ return createRouteController(routeId, null);
+ }
+
+ @Override
+ public RouteController onCreateRouteController(
+ @NonNull String routeId, @NonNull String routeGroupId) {
+ if (routeId == null) {
+ throw new IllegalArgumentException("routeId cannot be null");
+ }
+ if (routeGroupId == null) {
+ throw new IllegalArgumentException("routeGroupId cannot be null");
+ }
+ return createRouteController(routeId, routeGroupId);
+ }
+
+ @Override
+ public void onDiscoveryRequestChanged(MediaRouteDiscoveryRequest request) {
+ if (mConnectionReady) {
+ mActiveConnection.setDiscoveryRequest(request);
+ }
+ updateBinding();
+ }
+
+ @Override
+ public void onServiceConnected(ComponentName name, IBinder service) {
+ if (DEBUG) {
+ Log.d(TAG, this + ": Connected");
+ }
+
+ if (mBound) {
+ disconnect();
+
+ Messenger messenger = (service != null ? new Messenger(service) : null);
+ if (isValidRemoteMessenger(messenger)) {
+ Connection connection = new Connection(messenger);
+ if (connection.register()) {
+ mActiveConnection = connection;
+ } else {
+ if (DEBUG) {
+ Log.d(TAG, this + ": Registration failed");
+ }
+ }
+ } else {
+ Log.e(TAG, this + ": Service returned invalid messenger binder");
+ }
+ }
+ }
+
+ @Override
+ public void onServiceDisconnected(ComponentName name) {
+ if (DEBUG) {
+ Log.d(TAG, this + ": Service disconnected");
+ }
+ disconnect();
+ }
+
+ @Override
+ public String toString() {
+ return "Service connection " + mComponentName.flattenToShortString();
+ }
+
+ public boolean hasComponentName(String packageName, String className) {
+ return mComponentName.getPackageName().equals(packageName)
+ && mComponentName.getClassName().equals(className);
+ }
+
+ public void start() {
+ if (!mStarted) {
+ if (DEBUG) {
+ Log.d(TAG, this + ": Starting");
+ }
+
+ mStarted = true;
+ updateBinding();
+ }
+ }
+
+ public void stop() {
+ if (mStarted) {
+ if (DEBUG) {
+ Log.d(TAG, this + ": Stopping");
+ }
+
+ mStarted = false;
+ updateBinding();
+ }
+ }
+
+ public void rebindIfDisconnected() {
+ if (mActiveConnection == null && shouldBind()) {
+ unbind();
+ bind();
+ }
+ }
+
+ private void updateBinding() {
+ if (shouldBind()) {
+ bind();
+ } else {
+ unbind();
+ }
+ }
+
+ private boolean shouldBind() {
+ if (mStarted) {
+ // Bind whenever there is a discovery request.
+ if (getDiscoveryRequest() != null) {
+ return true;
+ }
+
+ // Bind whenever the application has an active route controller.
+ // This means that one of this provider's routes is selected.
+ if (!mControllers.isEmpty()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private void bind() {
+ if (!mBound) {
+ if (DEBUG) {
+ Log.d(TAG, this + ": Binding");
+ }
+
+ Intent service = new Intent(MediaRouteProviderProtocol.SERVICE_INTERFACE);
+ service.setComponent(mComponentName);
+ try {
+ mBound = getContext().bindService(service, this, Context.BIND_AUTO_CREATE);
+ if (!mBound && DEBUG) {
+ Log.d(TAG, this + ": Bind failed");
+ }
+ } catch (SecurityException ex) {
+ if (DEBUG) {
+ Log.d(TAG, this + ": Bind failed", ex);
+ }
+ }
+ }
+ }
+
+ private void unbind() {
+ if (mBound) {
+ if (DEBUG) {
+ Log.d(TAG, this + ": Unbinding");
+ }
+
+ mBound = false;
+ disconnect();
+ getContext().unbindService(this);
+ }
+ }
+
+ private RouteController createRouteController(String routeId, String routeGroupId) {
+ MediaRouteProviderDescriptor descriptor = getDescriptor();
+ if (descriptor != null) {
+ List<MediaRouteDescriptor> routes = descriptor.getRoutes();
+ final int count = routes.size();
+ for (int i = 0; i < count; i++) {
+ final MediaRouteDescriptor route = routes.get(i);
+ if (route.getId().equals(routeId)) {
+ Controller controller = new Controller(routeId, routeGroupId);
+ mControllers.add(controller);
+ if (mConnectionReady) {
+ controller.attachConnection(mActiveConnection);
+ }
+ updateBinding();
+ return controller;
+ }
+ }
+ }
+ return null;
+ }
+
+ void onConnectionReady(Connection connection) {
+ if (mActiveConnection == connection) {
+ mConnectionReady = true;
+ attachControllersToConnection();
+
+ MediaRouteDiscoveryRequest request = getDiscoveryRequest();
+ if (request != null) {
+ mActiveConnection.setDiscoveryRequest(request);
+ }
+ }
+ }
+
+ void onConnectionDied(Connection connection) {
+ if (mActiveConnection == connection) {
+ if (DEBUG) {
+ Log.d(TAG, this + ": Service connection died");
+ }
+ disconnect();
+ }
+ }
+
+ void onConnectionError(Connection connection, String error) {
+ if (mActiveConnection == connection) {
+ if (DEBUG) {
+ Log.d(TAG, this + ": Service connection error - " + error);
+ }
+ unbind();
+ }
+ }
+
+ void onConnectionDescriptorChanged(Connection connection,
+ MediaRouteProviderDescriptor descriptor) {
+ if (mActiveConnection == connection) {
+ if (DEBUG) {
+ Log.d(TAG, this + ": Descriptor changed, descriptor=" + descriptor);
+ }
+ setDescriptor(descriptor);
+ }
+ }
+
+ private void disconnect() {
+ if (mActiveConnection != null) {
+ setDescriptor(null);
+ mConnectionReady = false;
+ detachControllersFromConnection();
+ mActiveConnection.dispose();
+ mActiveConnection = null;
+ }
+ }
+
+ void onControllerReleased(Controller controller) {
+ mControllers.remove(controller);
+ controller.detachConnection();
+ updateBinding();
+ }
+
+ private void attachControllersToConnection() {
+ int count = mControllers.size();
+ for (int i = 0; i < count; i++) {
+ mControllers.get(i).attachConnection(mActiveConnection);
+ }
+ }
+
+ private void detachControllersFromConnection() {
+ int count = mControllers.size();
+ for (int i = 0; i < count; i++) {
+ mControllers.get(i).detachConnection();
+ }
+ }
+
+ private final class Controller extends RouteController {
+ private final String mRouteId;
+ private final String mRouteGroupId;
+
+ private boolean mSelected;
+ private int mPendingSetVolume = -1;
+ private int mPendingUpdateVolumeDelta;
+
+ private Connection mConnection;
+ private int mControllerId;
+
+ public Controller(String routeId, String routeGroupId) {
+ mRouteId = routeId;
+ mRouteGroupId = routeGroupId;
+ }
+
+ public void attachConnection(Connection connection) {
+ mConnection = connection;
+ mControllerId = connection.createRouteController(mRouteId, mRouteGroupId);
+ if (mSelected) {
+ connection.selectRoute(mControllerId);
+ if (mPendingSetVolume >= 0) {
+ connection.setVolume(mControllerId, mPendingSetVolume);
+ mPendingSetVolume = -1;
+ }
+ if (mPendingUpdateVolumeDelta != 0) {
+ connection.updateVolume(mControllerId, mPendingUpdateVolumeDelta);
+ mPendingUpdateVolumeDelta = 0;
+ }
+ }
+ }
+
+ public void detachConnection() {
+ if (mConnection != null) {
+ mConnection.releaseRouteController(mControllerId);
+ mConnection = null;
+ mControllerId = 0;
+ }
+ }
+
+ @Override
+ public void onRelease() {
+ onControllerReleased(this);
+ }
+
+ @Override
+ public void onSelect() {
+ mSelected = true;
+ if (mConnection != null) {
+ mConnection.selectRoute(mControllerId);
+ }
+ }
+
+ @Override
+ public void onUnselect() {
+ onUnselect(MediaRouter.UNSELECT_REASON_UNKNOWN);
+ }
+
+ @Override
+ public void onUnselect(int reason) {
+ mSelected = false;
+ if (mConnection != null) {
+ mConnection.unselectRoute(mControllerId, reason);
+ }
+ }
+
+ @Override
+ public void onSetVolume(int volume) {
+ if (mConnection != null) {
+ mConnection.setVolume(mControllerId, volume);
+ } else {
+ mPendingSetVolume = volume;
+ mPendingUpdateVolumeDelta = 0;
+ }
+ }
+
+ @Override
+ public void onUpdateVolume(int delta) {
+ if (mConnection != null) {
+ mConnection.updateVolume(mControllerId, delta);
+ } else {
+ mPendingUpdateVolumeDelta += delta;
+ }
+ }
+
+ @Override
+ public boolean onControlRequest(Intent intent, ControlRequestCallback callback) {
+ if (mConnection != null) {
+ return mConnection.sendControlRequest(mControllerId, intent, callback);
+ }
+ return false;
+ }
+ }
+
+ private final class Connection implements DeathRecipient {
+ private final Messenger mServiceMessenger;
+ private final ReceiveHandler mReceiveHandler;
+ private final Messenger mReceiveMessenger;
+
+ private int mNextRequestId = 1;
+ private int mNextControllerId = 1;
+ private int mServiceVersion; // non-zero when registration complete
+
+ private int mPendingRegisterRequestId;
+ private final SparseArray<ControlRequestCallback> mPendingCallbacks =
+ new SparseArray<ControlRequestCallback>();
+
+ public Connection(Messenger serviceMessenger) {
+ mServiceMessenger = serviceMessenger;
+ mReceiveHandler = new ReceiveHandler(this);
+ mReceiveMessenger = new Messenger(mReceiveHandler);
+ }
+
+ public boolean register() {
+ mPendingRegisterRequestId = mNextRequestId++;
+ if (!sendRequest(CLIENT_MSG_REGISTER,
+ mPendingRegisterRequestId,
+ CLIENT_VERSION_CURRENT, null, null)) {
+ return false;
+ }
+
+ try {
+ mServiceMessenger.getBinder().linkToDeath(this, 0);
+ return true;
+ } catch (RemoteException ex) {
+ binderDied();
+ }
+ return false;
+ }
+
+ public void dispose() {
+ sendRequest(CLIENT_MSG_UNREGISTER, 0, 0, null, null);
+ mReceiveHandler.dispose();
+ mServiceMessenger.getBinder().unlinkToDeath(this, 0);
+
+ mPrivateHandler.post(new Runnable() {
+ @Override
+ public void run() {
+ failPendingCallbacks();
+ }
+ });
+ }
+
+ void failPendingCallbacks() {
+ int count = 0;
+ for (int i = 0; i < mPendingCallbacks.size(); i++) {
+ mPendingCallbacks.valueAt(i).onError(null, null);
+ }
+ mPendingCallbacks.clear();
+ }
+
+ public boolean onGenericFailure(int requestId) {
+ if (requestId == mPendingRegisterRequestId) {
+ mPendingRegisterRequestId = 0;
+ onConnectionError(this, "Registration failed");
+ }
+ ControlRequestCallback callback = mPendingCallbacks.get(requestId);
+ if (callback != null) {
+ mPendingCallbacks.remove(requestId);
+ callback.onError(null, null);
+ }
+ return true;
+ }
+
+ public boolean onGenericSuccess(int requestId) {
+ return true;
+ }
+
+ public boolean onRegistered(int requestId, int serviceVersion,
+ Bundle descriptorBundle) {
+ if (mServiceVersion == 0
+ && requestId == mPendingRegisterRequestId
+ && serviceVersion >= SERVICE_VERSION_1) {
+ mPendingRegisterRequestId = 0;
+ mServiceVersion = serviceVersion;
+ onConnectionDescriptorChanged(this,
+ MediaRouteProviderDescriptor.fromBundle(descriptorBundle));
+ onConnectionReady(this);
+ return true;
+ }
+ return false;
+ }
+
+ public boolean onDescriptorChanged(Bundle descriptorBundle) {
+ if (mServiceVersion != 0) {
+ onConnectionDescriptorChanged(this,
+ MediaRouteProviderDescriptor.fromBundle(descriptorBundle));
+ return true;
+ }
+ return false;
+ }
+
+ public boolean onControlRequestSucceeded(int requestId, Bundle data) {
+ ControlRequestCallback callback = mPendingCallbacks.get(requestId);
+ if (callback != null) {
+ mPendingCallbacks.remove(requestId);
+ callback.onResult(data);
+ return true;
+ }
+ return false;
+ }
+
+ public boolean onControlRequestFailed(int requestId, String error, Bundle data) {
+ ControlRequestCallback callback = mPendingCallbacks.get(requestId);
+ if (callback != null) {
+ mPendingCallbacks.remove(requestId);
+ callback.onError(error, data);
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public void binderDied() {
+ mPrivateHandler.post(new Runnable() {
+ @Override
+ public void run() {
+ onConnectionDied(Connection.this);
+ }
+ });
+ }
+
+ public int createRouteController(String routeId, String routeGroupId) {
+ int controllerId = mNextControllerId++;
+ Bundle data = new Bundle();
+ data.putString(CLIENT_DATA_ROUTE_ID, routeId);
+ data.putString(CLIENT_DATA_ROUTE_LIBRARY_GROUP, routeGroupId);
+ sendRequest(CLIENT_MSG_CREATE_ROUTE_CONTROLLER,
+ mNextRequestId++, controllerId, null, data);
+ return controllerId;
+ }
+
+ public void releaseRouteController(int controllerId) {
+ sendRequest(CLIENT_MSG_RELEASE_ROUTE_CONTROLLER,
+ mNextRequestId++, controllerId, null, null);
+ }
+
+ public void selectRoute(int controllerId) {
+ sendRequest(CLIENT_MSG_SELECT_ROUTE,
+ mNextRequestId++, controllerId, null, null);
+ }
+
+ public void unselectRoute(int controllerId, int reason) {
+ Bundle extras = new Bundle();
+ extras.putInt(CLIENT_DATA_UNSELECT_REASON, reason);
+ sendRequest(CLIENT_MSG_UNSELECT_ROUTE,
+ mNextRequestId++, controllerId, null, extras);
+ }
+
+ public void setVolume(int controllerId, int volume) {
+ Bundle data = new Bundle();
+ data.putInt(CLIENT_DATA_VOLUME, volume);
+ sendRequest(CLIENT_MSG_SET_ROUTE_VOLUME,
+ mNextRequestId++, controllerId, null, data);
+ }
+
+ public void updateVolume(int controllerId, int delta) {
+ Bundle data = new Bundle();
+ data.putInt(CLIENT_DATA_VOLUME, delta);
+ sendRequest(CLIENT_MSG_UPDATE_ROUTE_VOLUME,
+ mNextRequestId++, controllerId, null, data);
+ }
+
+ public boolean sendControlRequest(int controllerId, Intent intent,
+ ControlRequestCallback callback) {
+ int requestId = mNextRequestId++;
+ if (sendRequest(CLIENT_MSG_ROUTE_CONTROL_REQUEST,
+ requestId, controllerId, intent, null)) {
+ if (callback != null) {
+ mPendingCallbacks.put(requestId, callback);
+ }
+ return true;
+ }
+ return false;
+ }
+
+ public void setDiscoveryRequest(MediaRouteDiscoveryRequest request) {
+ sendRequest(CLIENT_MSG_SET_DISCOVERY_REQUEST,
+ mNextRequestId++, 0, request != null ? request.asBundle() : null, null);
+ }
+
+ private boolean sendRequest(int what, int requestId, int arg, Object obj, Bundle data) {
+ Message msg = Message.obtain();
+ msg.what = what;
+ msg.arg1 = requestId;
+ msg.arg2 = arg;
+ msg.obj = obj;
+ msg.setData(data);
+ msg.replyTo = mReceiveMessenger;
+ try {
+ mServiceMessenger.send(msg);
+ return true;
+ } catch (DeadObjectException ex) {
+ // The service died.
+ } catch (RemoteException ex) {
+ if (what != CLIENT_MSG_UNREGISTER) {
+ Log.e(TAG, "Could not send message to service.", ex);
+ }
+ }
+ return false;
+ }
+ }
+
+ private static final class PrivateHandler extends Handler {
+ PrivateHandler() {
+ }
+ }
+
+ /**
+ * Handler that receives messages from the server.
+ * <p>
+ * This inner class is static and only retains a weak reference to the connection
+ * to prevent the client from being leaked in case the service is holding an
+ * active reference to the client's messenger.
+ * </p><p>
+ * This handler should not be used to handle any messages other than those
+ * that come from the service.
+ * </p>
+ */
+ private static final class ReceiveHandler extends Handler {
+ private final WeakReference<Connection> mConnectionRef;
+
+ public ReceiveHandler(Connection connection) {
+ mConnectionRef = new WeakReference<Connection>(connection);
+ }
+
+ public void dispose() {
+ mConnectionRef.clear();
+ }
+
+ @Override
+ public void handleMessage(Message msg) {
+ Connection connection = mConnectionRef.get();
+ if (connection != null) {
+ final int what = msg.what;
+ final int requestId = msg.arg1;
+ final int arg = msg.arg2;
+ final Object obj = msg.obj;
+ final Bundle data = msg.peekData();
+ if (!processMessage(connection, what, requestId, arg, obj, data)) {
+ if (DEBUG) {
+ Log.d(TAG, "Unhandled message from server: " + msg);
+ }
+ }
+ }
+ }
+
+ private boolean processMessage(Connection connection,
+ int what, int requestId, int arg, Object obj, Bundle data) {
+ switch (what) {
+ case SERVICE_MSG_GENERIC_FAILURE:
+ connection.onGenericFailure(requestId);
+ return true;
+
+ case SERVICE_MSG_GENERIC_SUCCESS:
+ connection.onGenericSuccess(requestId);
+ return true;
+
+ case SERVICE_MSG_REGISTERED:
+ if (obj == null || obj instanceof Bundle) {
+ return connection.onRegistered(requestId, arg, (Bundle)obj);
+ }
+ break;
+
+ case SERVICE_MSG_DESCRIPTOR_CHANGED:
+ if (obj == null || obj instanceof Bundle) {
+ return connection.onDescriptorChanged((Bundle)obj);
+ }
+ break;
+
+ case SERVICE_MSG_CONTROL_REQUEST_SUCCEEDED:
+ if (obj == null || obj instanceof Bundle) {
+ return connection.onControlRequestSucceeded(
+ requestId, (Bundle)obj);
+ }
+ break;
+
+ case SERVICE_MSG_CONTROL_REQUEST_FAILED:
+ if (obj == null || obj instanceof Bundle) {
+ String error = (data == null ? null :
+ data.getString(SERVICE_DATA_ERROR));
+ return connection.onControlRequestFailed(
+ requestId, error, (Bundle)obj);
+ }
+ break;
+ }
+ return false;
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProviderWatcher.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProviderWatcher.java
new file mode 100644
index 0000000..ba1f647
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/RegisteredMediaRouteProviderWatcher.java
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.media;
+
+import android.content.BroadcastReceiver;
+import android.content.ComponentName;
+import android.content.Context;
+import android.content.Intent;
+import android.content.IntentFilter;
+import android.content.pm.PackageManager;
+import android.content.pm.ResolveInfo;
+import android.content.pm.ServiceInfo;
+import android.os.Handler;
+
+import java.util.ArrayList;
+import java.util.Collections;
+
+/**
+ * Watches for media route provider services to be installed.
+ * Adds a provider to the media router for each registered service.
+ *
+ * @see RegisteredMediaRouteProvider
+ */
+final class RegisteredMediaRouteProviderWatcher {
+ private final Context mContext;
+ private final Callback mCallback;
+ private final Handler mHandler;
+ private final PackageManager mPackageManager;
+
+ private final ArrayList<RegisteredMediaRouteProvider> mProviders =
+ new ArrayList<RegisteredMediaRouteProvider>();
+ private boolean mRunning;
+
+ public RegisteredMediaRouteProviderWatcher(Context context, Callback callback) {
+ mContext = context;
+ mCallback = callback;
+ mHandler = new Handler();
+ mPackageManager = context.getPackageManager();
+ }
+
+ public void start() {
+ if (!mRunning) {
+ mRunning = true;
+
+ IntentFilter filter = new IntentFilter();
+ filter.addAction(Intent.ACTION_PACKAGE_ADDED);
+ filter.addAction(Intent.ACTION_PACKAGE_REMOVED);
+ filter.addAction(Intent.ACTION_PACKAGE_CHANGED);
+ filter.addAction(Intent.ACTION_PACKAGE_REPLACED);
+ filter.addAction(Intent.ACTION_PACKAGE_RESTARTED);
+ filter.addDataScheme("package");
+ mContext.registerReceiver(mScanPackagesReceiver, filter, null, mHandler);
+
+ // Scan packages.
+ // Also has the side-effect of restarting providers if needed.
+ mHandler.post(mScanPackagesRunnable);
+ }
+ }
+
+ public void stop() {
+ if (mRunning) {
+ mRunning = false;
+
+ mContext.unregisterReceiver(mScanPackagesReceiver);
+ mHandler.removeCallbacks(mScanPackagesRunnable);
+
+ // Stop all providers.
+ for (int i = mProviders.size() - 1; i >= 0; i--) {
+ mProviders.get(i).stop();
+ }
+ }
+ }
+
+ void scanPackages() {
+ if (!mRunning) {
+ return;
+ }
+
+ // Add providers for all new services.
+ // Reorder the list so that providers left at the end will be the ones to remove.
+ int targetIndex = 0;
+ Intent intent = new Intent(MediaRouteProviderService.SERVICE_INTERFACE);
+ for (ResolveInfo resolveInfo : mPackageManager.queryIntentServices(intent, 0)) {
+ ServiceInfo serviceInfo = resolveInfo.serviceInfo;
+ if (serviceInfo != null) {
+ int sourceIndex = findProvider(serviceInfo.packageName, serviceInfo.name);
+ if (sourceIndex < 0) {
+ RegisteredMediaRouteProvider provider =
+ new RegisteredMediaRouteProvider(mContext,
+ new ComponentName(serviceInfo.packageName, serviceInfo.name));
+ provider.start();
+ mProviders.add(targetIndex++, provider);
+ mCallback.addProvider(provider);
+ } else if (sourceIndex >= targetIndex) {
+ RegisteredMediaRouteProvider provider = mProviders.get(sourceIndex);
+ provider.start(); // restart the provider if needed
+ provider.rebindIfDisconnected();
+ Collections.swap(mProviders, sourceIndex, targetIndex++);
+ }
+ }
+ }
+
+ // Remove providers for missing services.
+ if (targetIndex < mProviders.size()) {
+ for (int i = mProviders.size() - 1; i >= targetIndex; i--) {
+ RegisteredMediaRouteProvider provider = mProviders.get(i);
+ mCallback.removeProvider(provider);
+ mProviders.remove(provider);
+ provider.stop();
+ }
+ }
+ }
+
+ private int findProvider(String packageName, String className) {
+ int count = mProviders.size();
+ for (int i = 0; i < count; i++) {
+ RegisteredMediaRouteProvider provider = mProviders.get(i);
+ if (provider.hasComponentName(packageName, className)) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ private final BroadcastReceiver mScanPackagesReceiver = new BroadcastReceiver() {
+ @Override
+ public void onReceive(Context context, Intent intent) {
+ scanPackages();
+ }
+ };
+
+ private final Runnable mScanPackagesRunnable = new Runnable() {
+ @Override
+ public void run() {
+ scanPackages();
+ }
+ };
+
+ public interface Callback {
+ void addProvider(MediaRouteProvider provider);
+ void removeProvider(MediaRouteProvider provider);
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/RemoteControlClientCompat.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/RemoteControlClientCompat.java
new file mode 100644
index 0000000..826449b
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/RemoteControlClientCompat.java
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.support.mediarouter.media;
+
+import android.content.Context;
+import android.media.AudioManager;
+import android.os.Build;
+import android.support.annotation.RequiresApi;
+
+import java.lang.ref.WeakReference;
+
+/**
+ * Provides access to features of the remote control client.
+ *
+ * Hidden for now but we might want to make this available to applications
+ * in the future.
+ */
+abstract class RemoteControlClientCompat {
+ protected final Context mContext;
+ protected final Object mRcc;
+ protected VolumeCallback mVolumeCallback;
+
+ protected RemoteControlClientCompat(Context context, Object rcc) {
+ mContext = context;
+ mRcc = rcc;
+ }
+
+ public static RemoteControlClientCompat obtain(Context context, Object rcc) {
+ if (Build.VERSION.SDK_INT >= 16) {
+ return new JellybeanImpl(context, rcc);
+ }
+ return new LegacyImpl(context, rcc);
+ }
+
+ public Object getRemoteControlClient() {
+ return mRcc;
+ }
+
+ /**
+ * Sets the current playback information.
+ * Must be called at least once to attach to the remote control client.
+ *
+ * @param info The playback information. Must not be null.
+ */
+ public void setPlaybackInfo(PlaybackInfo info) {
+ }
+
+ /**
+ * Sets a callback to receive volume change requests from the remote control client.
+ *
+ * @param callback The volume callback to use or null if none.
+ */
+ public void setVolumeCallback(VolumeCallback callback) {
+ mVolumeCallback = callback;
+ }
+
+ /**
+ * Specifies information about the playback.
+ */
+ public static final class PlaybackInfo {
+ public int volume;
+ public int volumeMax;
+ public int volumeHandling = MediaRouter.RouteInfo.PLAYBACK_VOLUME_FIXED;
+ public int playbackStream = AudioManager.STREAM_MUSIC;
+ public int playbackType = MediaRouter.RouteInfo.PLAYBACK_TYPE_REMOTE;
+ }
+
+ /**
+ * Called when volume updates are requested by the remote control client.
+ */
+ public interface VolumeCallback {
+ /**
+ * Called when the volume should be increased or decreased.
+ *
+ * @param direction An integer indicating whether the volume is to be increased
+ * (positive value) or decreased (negative value).
+ * For bundled changes, the absolute value indicates the number of changes
+ * in the same direction, e.g. +3 corresponds to three "volume up" changes.
+ */
+ public void onVolumeUpdateRequest(int direction);
+
+ /**
+ * Called when the volume for the route should be set to the given value.
+ *
+ * @param volume An integer indicating the new volume value that should be used,
+ * always between 0 and the value set by {@link PlaybackInfo#volumeMax}.
+ */
+ public void onVolumeSetRequest(int volume);
+ }
+
+ /**
+ * Legacy implementation for platform versions prior to Jellybean.
+ * Does nothing.
+ */
+ static class LegacyImpl extends RemoteControlClientCompat {
+ public LegacyImpl(Context context, Object rcc) {
+ super(context, rcc);
+ }
+ }
+
+ /**
+ * Implementation for Jellybean.
+ *
+ * The basic idea of this implementation is to attach the RCC to a UserRouteInfo
+ * in order to hook up stream metadata and volume callbacks because there is no
+ * other API available to do so in this platform version. The UserRouteInfo itself
+ * is not attached to the MediaRouter so it is transparent to the user.
+ */
+ // @@RequiresApi(16)
+ static class JellybeanImpl extends RemoteControlClientCompat {
+ private final Object mRouterObj;
+ private final Object mUserRouteCategoryObj;
+ private final Object mUserRouteObj;
+ private boolean mRegistered;
+
+ public JellybeanImpl(Context context, Object rcc) {
+ super(context, rcc);
+
+ mRouterObj = MediaRouterJellybean.getMediaRouter(context);
+ mUserRouteCategoryObj = MediaRouterJellybean.createRouteCategory(
+ mRouterObj, "", false);
+ mUserRouteObj = MediaRouterJellybean.createUserRoute(
+ mRouterObj, mUserRouteCategoryObj);
+ }
+
+ @Override
+ public void setPlaybackInfo(PlaybackInfo info) {
+ MediaRouterJellybean.UserRouteInfo.setVolume(
+ mUserRouteObj, info.volume);
+ MediaRouterJellybean.UserRouteInfo.setVolumeMax(
+ mUserRouteObj, info.volumeMax);
+ MediaRouterJellybean.UserRouteInfo.setVolumeHandling(
+ mUserRouteObj, info.volumeHandling);
+ MediaRouterJellybean.UserRouteInfo.setPlaybackStream(
+ mUserRouteObj, info.playbackStream);
+ MediaRouterJellybean.UserRouteInfo.setPlaybackType(
+ mUserRouteObj, info.playbackType);
+
+ if (!mRegistered) {
+ mRegistered = true;
+ MediaRouterJellybean.UserRouteInfo.setVolumeCallback(mUserRouteObj,
+ MediaRouterJellybean.createVolumeCallback(
+ new VolumeCallbackWrapper(this)));
+ MediaRouterJellybean.UserRouteInfo.setRemoteControlClient(mUserRouteObj, mRcc);
+ }
+ }
+
+ private static final class VolumeCallbackWrapper
+ implements MediaRouterJellybean.VolumeCallback {
+ // Unfortunately, the framework never unregisters its volume observer from
+ // the audio service so the UserRouteInfo object may leak along with
+ // any callbacks that we attach to it. Use a weak reference to prevent
+ // the volume callback from holding strong references to anything important.
+ private final WeakReference<JellybeanImpl> mImplWeak;
+
+ public VolumeCallbackWrapper(JellybeanImpl impl) {
+ mImplWeak = new WeakReference<JellybeanImpl>(impl);
+ }
+
+ @Override
+ public void onVolumeUpdateRequest(Object routeObj, int direction) {
+ JellybeanImpl impl = mImplWeak.get();
+ if (impl != null && impl.mVolumeCallback != null) {
+ impl.mVolumeCallback.onVolumeUpdateRequest(direction);
+ }
+ }
+
+ @Override
+ public void onVolumeSetRequest(Object routeObj, int volume) {
+ JellybeanImpl impl = mImplWeak.get();
+ if (impl != null && impl.mVolumeCallback != null) {
+ impl.mVolumeCallback.onVolumeSetRequest(volume);
+ }
+ }
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/RemotePlaybackClient.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/RemotePlaybackClient.java
new file mode 100644
index 0000000..f6e1497
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/RemotePlaybackClient.java
@@ -0,0 +1,1044 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.support.mediarouter.media;
+
+import android.app.PendingIntent;
+import android.content.BroadcastReceiver;
+import android.content.Context;
+import android.content.Intent;
+import android.content.IntentFilter;
+import android.net.Uri;
+import android.os.Bundle;
+import android.support.v4.util.ObjectsCompat;
+import android.util.Log;
+
+/**
+ * A helper class for playing media on remote routes using the remote playback protocol
+ * defined by {@link MediaControlIntent}.
+ * <p>
+ * The client maintains session state and offers a simplified interface for issuing
+ * remote playback media control intents to a single route.
+ * </p>
+ */
+public class RemotePlaybackClient {
+ static final String TAG = "RemotePlaybackClient";
+ static final boolean DEBUG = Log.isLoggable(TAG, Log.DEBUG);
+
+ private final Context mContext;
+ private final MediaRouter.RouteInfo mRoute;
+ private final ActionReceiver mActionReceiver;
+ private final PendingIntent mItemStatusPendingIntent;
+ private final PendingIntent mSessionStatusPendingIntent;
+ private final PendingIntent mMessagePendingIntent;
+
+ private boolean mRouteSupportsRemotePlayback;
+ private boolean mRouteSupportsQueuing;
+ private boolean mRouteSupportsSessionManagement;
+ private boolean mRouteSupportsMessaging;
+
+ String mSessionId;
+ StatusCallback mStatusCallback;
+ OnMessageReceivedListener mOnMessageReceivedListener;
+
+ /**
+ * Creates a remote playback client for a route.
+ *
+ * @param route The media route.
+ */
+ public RemotePlaybackClient(Context context, MediaRouter.RouteInfo route) {
+ if (context == null) {
+ throw new IllegalArgumentException("context must not be null");
+ }
+ if (route == null) {
+ throw new IllegalArgumentException("route must not be null");
+ }
+
+ mContext = context;
+ mRoute = route;
+
+ IntentFilter actionFilter = new IntentFilter();
+ actionFilter.addAction(ActionReceiver.ACTION_ITEM_STATUS_CHANGED);
+ actionFilter.addAction(ActionReceiver.ACTION_SESSION_STATUS_CHANGED);
+ actionFilter.addAction(ActionReceiver.ACTION_MESSAGE_RECEIVED);
+ mActionReceiver = new ActionReceiver();
+ context.registerReceiver(mActionReceiver, actionFilter);
+
+ Intent itemStatusIntent = new Intent(ActionReceiver.ACTION_ITEM_STATUS_CHANGED);
+ itemStatusIntent.setPackage(context.getPackageName());
+ mItemStatusPendingIntent = PendingIntent.getBroadcast(
+ context, 0, itemStatusIntent, 0);
+
+ Intent sessionStatusIntent = new Intent(ActionReceiver.ACTION_SESSION_STATUS_CHANGED);
+ sessionStatusIntent.setPackage(context.getPackageName());
+ mSessionStatusPendingIntent = PendingIntent.getBroadcast(
+ context, 0, sessionStatusIntent, 0);
+
+ Intent messageIntent = new Intent(ActionReceiver.ACTION_MESSAGE_RECEIVED);
+ messageIntent.setPackage(context.getPackageName());
+ mMessagePendingIntent = PendingIntent.getBroadcast(
+ context, 0, messageIntent, 0);
+ detectFeatures();
+ }
+
+ /**
+ * Releases resources owned by the client.
+ */
+ public void release() {
+ mContext.unregisterReceiver(mActionReceiver);
+ }
+
+ /**
+ * Returns true if the route supports remote playback.
+ * <p>
+ * If the route does not support remote playback, then none of the functionality
+ * offered by the client will be available.
+ * </p><p>
+ * This method returns true if the route supports all of the following
+ * actions: {@link MediaControlIntent#ACTION_PLAY play},
+ * {@link MediaControlIntent#ACTION_SEEK seek},
+ * {@link MediaControlIntent#ACTION_GET_STATUS get status},
+ * {@link MediaControlIntent#ACTION_PAUSE pause},
+ * {@link MediaControlIntent#ACTION_RESUME resume},
+ * {@link MediaControlIntent#ACTION_STOP stop}.
+ * </p>
+ *
+ * @return True if remote playback is supported.
+ */
+ public boolean isRemotePlaybackSupported() {
+ return mRouteSupportsRemotePlayback;
+ }
+
+ /**
+ * Returns true if the route supports queuing features.
+ * <p>
+ * If the route does not support queuing, then at most one media item can be played
+ * at a time and the {@link #enqueue} method will not be available.
+ * </p><p>
+ * This method returns true if the route supports all of the basic remote playback
+ * actions and all of the following actions:
+ * {@link MediaControlIntent#ACTION_ENQUEUE enqueue},
+ * {@link MediaControlIntent#ACTION_REMOVE remove}.
+ * </p>
+ *
+ * @return True if queuing is supported. Implies {@link #isRemotePlaybackSupported}
+ * is also true.
+ *
+ * @see #isRemotePlaybackSupported
+ */
+ public boolean isQueuingSupported() {
+ return mRouteSupportsQueuing;
+ }
+
+ /**
+ * Returns true if the route supports session management features.
+ * <p>
+ * If the route does not support session management, then the session will
+ * not be created until the first media item is played.
+ * </p><p>
+ * This method returns true if the route supports all of the basic remote playback
+ * actions and all of the following actions:
+ * {@link MediaControlIntent#ACTION_START_SESSION start session},
+ * {@link MediaControlIntent#ACTION_GET_SESSION_STATUS get session status},
+ * {@link MediaControlIntent#ACTION_END_SESSION end session}.
+ * </p>
+ *
+ * @return True if session management is supported.
+ * Implies {@link #isRemotePlaybackSupported} is also true.
+ *
+ * @see #isRemotePlaybackSupported
+ */
+ public boolean isSessionManagementSupported() {
+ return mRouteSupportsSessionManagement;
+ }
+
+ /**
+ * Returns true if the route supports messages.
+ * <p>
+ * This method returns true if the route supports all of the basic remote playback
+ * actions and all of the following actions:
+ * {@link MediaControlIntent#ACTION_START_SESSION start session},
+ * {@link MediaControlIntent#ACTION_SEND_MESSAGE send message},
+ * {@link MediaControlIntent#ACTION_END_SESSION end session}.
+ * </p>
+ *
+ * @return True if session management is supported.
+ * Implies {@link #isRemotePlaybackSupported} is also true.
+ *
+ * @see #isRemotePlaybackSupported
+ */
+ public boolean isMessagingSupported() {
+ return mRouteSupportsMessaging;
+ }
+
+ /**
+ * Gets the current session id if there is one.
+ *
+ * @return The current session id, or null if none.
+ */
+ public String getSessionId() {
+ return mSessionId;
+ }
+
+ /**
+ * Sets the current session id.
+ * <p>
+ * It is usually not necessary to set the session id explicitly since
+ * it is created as a side-effect of other requests such as
+ * {@link #play}, {@link #enqueue}, and {@link #startSession}.
+ * </p>
+ *
+ * @param sessionId The new session id, or null if none.
+ */
+ public void setSessionId(String sessionId) {
+ if (!ObjectsCompat.equals(mSessionId, sessionId)) {
+ if (DEBUG) {
+ Log.d(TAG, "Session id is now: " + sessionId);
+ }
+ mSessionId = sessionId;
+ if (mStatusCallback != null) {
+ mStatusCallback.onSessionChanged(sessionId);
+ }
+ }
+ }
+
+ /**
+ * Returns true if the client currently has a session.
+ * <p>
+ * Equivalent to checking whether {@link #getSessionId} returns a non-null result.
+ * </p>
+ *
+ * @return True if there is a current session.
+ */
+ public boolean hasSession() {
+ return mSessionId != null;
+ }
+
+ /**
+ * Sets a callback that should receive status updates when the state of
+ * media sessions or media items created by this instance of the remote
+ * playback client changes.
+ * <p>
+ * The callback should be set before the session is created or any play
+ * commands are issued.
+ * </p>
+ *
+ * @param callback The callback to set. May be null to remove the previous callback.
+ */
+ public void setStatusCallback(StatusCallback callback) {
+ mStatusCallback = callback;
+ }
+
+ /**
+ * Sets a callback that should receive messages when a message is sent from
+ * media sessions created by this instance of the remote playback client changes.
+ * <p>
+ * The callback should be set before the session is created.
+ * </p>
+ *
+ * @param listener The callback to set. May be null to remove the previous callback.
+ */
+ public void setOnMessageReceivedListener(OnMessageReceivedListener listener) {
+ mOnMessageReceivedListener = listener;
+ }
+
+ /**
+ * Sends a request to play a media item.
+ * <p>
+ * Clears the queue and starts playing the new item immediately. If the queue
+ * was previously paused, then it is resumed as a side-effect of this request.
+ * </p><p>
+ * The request is issued in the current session. If no session is available, then
+ * one is created implicitly.
+ * </p><p>
+ * Please refer to {@link MediaControlIntent#ACTION_PLAY ACTION_PLAY} for
+ * more information about the semantics of this request.
+ * </p>
+ *
+ * @param contentUri The content Uri to play.
+ * @param mimeType The mime type of the content, or null if unknown.
+ * @param positionMillis The initial content position for the item in milliseconds,
+ * or <code>0</code> to start at the beginning.
+ * @param metadata The media item metadata bundle, or null if none.
+ * @param extras A bundle of extra arguments to be added to the
+ * {@link MediaControlIntent#ACTION_PLAY} intent, or null if none.
+ * @param callback A callback to invoke when the request has been
+ * processed, or null if none.
+ *
+ * @throws UnsupportedOperationException if the route does not support remote playback.
+ *
+ * @see MediaControlIntent#ACTION_PLAY
+ * @see #isRemotePlaybackSupported
+ */
+ public void play(Uri contentUri, String mimeType, Bundle metadata,
+ long positionMillis, Bundle extras, ItemActionCallback callback) {
+ playOrEnqueue(contentUri, mimeType, metadata, positionMillis,
+ extras, callback, MediaControlIntent.ACTION_PLAY);
+ }
+
+ /**
+ * Sends a request to enqueue a media item.
+ * <p>
+ * Enqueues a new item to play. If the queue was previously paused, then will
+ * remain paused.
+ * </p><p>
+ * The request is issued in the current session. If no session is available, then
+ * one is created implicitly.
+ * </p><p>
+ * Please refer to {@link MediaControlIntent#ACTION_ENQUEUE ACTION_ENQUEUE} for
+ * more information about the semantics of this request.
+ * </p>
+ *
+ * @param contentUri The content Uri to enqueue.
+ * @param mimeType The mime type of the content, or null if unknown.
+ * @param positionMillis The initial content position for the item in milliseconds,
+ * or <code>0</code> to start at the beginning.
+ * @param metadata The media item metadata bundle, or null if none.
+ * @param extras A bundle of extra arguments to be added to the
+ * {@link MediaControlIntent#ACTION_ENQUEUE} intent, or null if none.
+ * @param callback A callback to invoke when the request has been
+ * processed, or null if none.
+ *
+ * @throws UnsupportedOperationException if the route does not support queuing.
+ *
+ * @see MediaControlIntent#ACTION_ENQUEUE
+ * @see #isRemotePlaybackSupported
+ * @see #isQueuingSupported
+ */
+ public void enqueue(Uri contentUri, String mimeType, Bundle metadata,
+ long positionMillis, Bundle extras, ItemActionCallback callback) {
+ playOrEnqueue(contentUri, mimeType, metadata, positionMillis,
+ extras, callback, MediaControlIntent.ACTION_ENQUEUE);
+ }
+
+ private void playOrEnqueue(Uri contentUri, String mimeType, Bundle metadata,
+ long positionMillis, Bundle extras,
+ final ItemActionCallback callback, String action) {
+ if (contentUri == null) {
+ throw new IllegalArgumentException("contentUri must not be null");
+ }
+ throwIfRemotePlaybackNotSupported();
+ if (action.equals(MediaControlIntent.ACTION_ENQUEUE)) {
+ throwIfQueuingNotSupported();
+ }
+
+ Intent intent = new Intent(action);
+ intent.setDataAndType(contentUri, mimeType);
+ intent.putExtra(MediaControlIntent.EXTRA_ITEM_STATUS_UPDATE_RECEIVER,
+ mItemStatusPendingIntent);
+ if (metadata != null) {
+ intent.putExtra(MediaControlIntent.EXTRA_ITEM_METADATA, metadata);
+ }
+ if (positionMillis != 0) {
+ intent.putExtra(MediaControlIntent.EXTRA_ITEM_CONTENT_POSITION, positionMillis);
+ }
+ performItemAction(intent, mSessionId, null, extras, callback);
+ }
+
+ /**
+ * Sends a request to seek to a new position in a media item.
+ * <p>
+ * Seeks to a new position. If the queue was previously paused then it
+ * remains paused but the item's new position is still remembered.
+ * </p><p>
+ * The request is issued in the current session.
+ * </p><p>
+ * Please refer to {@link MediaControlIntent#ACTION_SEEK ACTION_SEEK} for
+ * more information about the semantics of this request.
+ * </p>
+ *
+ * @param itemId The item id.
+ * @param positionMillis The new content position for the item in milliseconds,
+ * or <code>0</code> to start at the beginning.
+ * @param extras A bundle of extra arguments to be added to the
+ * {@link MediaControlIntent#ACTION_SEEK} intent, or null if none.
+ * @param callback A callback to invoke when the request has been
+ * processed, or null if none.
+ *
+ * @throws IllegalStateException if there is no current session.
+ *
+ * @see MediaControlIntent#ACTION_SEEK
+ * @see #isRemotePlaybackSupported
+ */
+ public void seek(String itemId, long positionMillis, Bundle extras,
+ ItemActionCallback callback) {
+ if (itemId == null) {
+ throw new IllegalArgumentException("itemId must not be null");
+ }
+ throwIfNoCurrentSession();
+
+ Intent intent = new Intent(MediaControlIntent.ACTION_SEEK);
+ intent.putExtra(MediaControlIntent.EXTRA_ITEM_CONTENT_POSITION, positionMillis);
+ performItemAction(intent, mSessionId, itemId, extras, callback);
+ }
+
+ /**
+ * Sends a request to get the status of a media item.
+ * <p>
+ * The request is issued in the current session.
+ * </p><p>
+ * Please refer to {@link MediaControlIntent#ACTION_GET_STATUS ACTION_GET_STATUS} for
+ * more information about the semantics of this request.
+ * </p>
+ *
+ * @param itemId The item id.
+ * @param extras A bundle of extra arguments to be added to the
+ * {@link MediaControlIntent#ACTION_GET_STATUS} intent, or null if none.
+ * @param callback A callback to invoke when the request has been
+ * processed, or null if none.
+ *
+ * @throws IllegalStateException if there is no current session.
+ *
+ * @see MediaControlIntent#ACTION_GET_STATUS
+ * @see #isRemotePlaybackSupported
+ */
+ public void getStatus(String itemId, Bundle extras, ItemActionCallback callback) {
+ if (itemId == null) {
+ throw new IllegalArgumentException("itemId must not be null");
+ }
+ throwIfNoCurrentSession();
+
+ Intent intent = new Intent(MediaControlIntent.ACTION_GET_STATUS);
+ performItemAction(intent, mSessionId, itemId, extras, callback);
+ }
+
+ /**
+ * Sends a request to remove a media item from the queue.
+ * <p>
+ * The request is issued in the current session.
+ * </p><p>
+ * Please refer to {@link MediaControlIntent#ACTION_REMOVE ACTION_REMOVE} for
+ * more information about the semantics of this request.
+ * </p>
+ *
+ * @param itemId The item id.
+ * @param extras A bundle of extra arguments to be added to the
+ * {@link MediaControlIntent#ACTION_REMOVE} intent, or null if none.
+ * @param callback A callback to invoke when the request has been
+ * processed, or null if none.
+ *
+ * @throws IllegalStateException if there is no current session.
+ * @throws UnsupportedOperationException if the route does not support queuing.
+ *
+ * @see MediaControlIntent#ACTION_REMOVE
+ * @see #isRemotePlaybackSupported
+ * @see #isQueuingSupported
+ */
+ public void remove(String itemId, Bundle extras, ItemActionCallback callback) {
+ if (itemId == null) {
+ throw new IllegalArgumentException("itemId must not be null");
+ }
+ throwIfQueuingNotSupported();
+ throwIfNoCurrentSession();
+
+ Intent intent = new Intent(MediaControlIntent.ACTION_REMOVE);
+ performItemAction(intent, mSessionId, itemId, extras, callback);
+ }
+
+ /**
+ * Sends a request to pause media playback.
+ * <p>
+ * The request is issued in the current session. If playback is already paused
+ * then the request has no effect.
+ * </p><p>
+ * Please refer to {@link MediaControlIntent#ACTION_PAUSE ACTION_PAUSE} for
+ * more information about the semantics of this request.
+ * </p>
+ *
+ * @param extras A bundle of extra arguments to be added to the
+ * {@link MediaControlIntent#ACTION_PAUSE} intent, or null if none.
+ * @param callback A callback to invoke when the request has been
+ * processed, or null if none.
+ *
+ * @throws IllegalStateException if there is no current session.
+ *
+ * @see MediaControlIntent#ACTION_PAUSE
+ * @see #isRemotePlaybackSupported
+ */
+ public void pause(Bundle extras, SessionActionCallback callback) {
+ throwIfNoCurrentSession();
+
+ Intent intent = new Intent(MediaControlIntent.ACTION_PAUSE);
+ performSessionAction(intent, mSessionId, extras, callback);
+ }
+
+ /**
+ * Sends a request to resume (unpause) media playback.
+ * <p>
+ * The request is issued in the current session. If playback is not paused
+ * then the request has no effect.
+ * </p><p>
+ * Please refer to {@link MediaControlIntent#ACTION_RESUME ACTION_RESUME} for
+ * more information about the semantics of this request.
+ * </p>
+ *
+ * @param extras A bundle of extra arguments to be added to the
+ * {@link MediaControlIntent#ACTION_RESUME} intent, or null if none.
+ * @param callback A callback to invoke when the request has been
+ * processed, or null if none.
+ *
+ * @throws IllegalStateException if there is no current session.
+ *
+ * @see MediaControlIntent#ACTION_RESUME
+ * @see #isRemotePlaybackSupported
+ */
+ public void resume(Bundle extras, SessionActionCallback callback) {
+ throwIfNoCurrentSession();
+
+ Intent intent = new Intent(MediaControlIntent.ACTION_RESUME);
+ performSessionAction(intent, mSessionId, extras, callback);
+ }
+
+ /**
+ * Sends a request to stop media playback and clear the media playback queue.
+ * <p>
+ * The request is issued in the current session. If the queue is already
+ * empty then the request has no effect.
+ * </p><p>
+ * Please refer to {@link MediaControlIntent#ACTION_STOP ACTION_STOP} for
+ * more information about the semantics of this request.
+ * </p>
+ *
+ * @param extras A bundle of extra arguments to be added to the
+ * {@link MediaControlIntent#ACTION_STOP} intent, or null if none.
+ * @param callback A callback to invoke when the request has been
+ * processed, or null if none.
+ *
+ * @throws IllegalStateException if there is no current session.
+ *
+ * @see MediaControlIntent#ACTION_STOP
+ * @see #isRemotePlaybackSupported
+ */
+ public void stop(Bundle extras, SessionActionCallback callback) {
+ throwIfNoCurrentSession();
+
+ Intent intent = new Intent(MediaControlIntent.ACTION_STOP);
+ performSessionAction(intent, mSessionId, extras, callback);
+ }
+
+ /**
+ * Sends a request to start a new media playback session.
+ * <p>
+ * The application must wait for the callback to indicate that this request
+ * is complete before issuing other requests that affect the session. If this
+ * request is successful then the previous session will be invalidated.
+ * </p><p>
+ * Please refer to {@link MediaControlIntent#ACTION_START_SESSION ACTION_START_SESSION}
+ * for more information about the semantics of this request.
+ * </p>
+ *
+ * @param extras A bundle of extra arguments to be added to the
+ * {@link MediaControlIntent#ACTION_START_SESSION} intent, or null if none.
+ * @param callback A callback to invoke when the request has been
+ * processed, or null if none.
+ *
+ * @throws UnsupportedOperationException if the route does not support session management.
+ *
+ * @see MediaControlIntent#ACTION_START_SESSION
+ * @see #isRemotePlaybackSupported
+ * @see #isSessionManagementSupported
+ */
+ public void startSession(Bundle extras, SessionActionCallback callback) {
+ throwIfSessionManagementNotSupported();
+
+ Intent intent = new Intent(MediaControlIntent.ACTION_START_SESSION);
+ intent.putExtra(MediaControlIntent.EXTRA_SESSION_STATUS_UPDATE_RECEIVER,
+ mSessionStatusPendingIntent);
+ if (mRouteSupportsMessaging) {
+ intent.putExtra(MediaControlIntent.EXTRA_MESSAGE_RECEIVER, mMessagePendingIntent);
+ }
+ performSessionAction(intent, null, extras, callback);
+ }
+
+ /**
+ * Sends a message.
+ * <p>
+ * The request is issued in the current session.
+ * </p><p>
+ * Please refer to {@link MediaControlIntent#ACTION_SEND_MESSAGE} for
+ * more information about the semantics of this request.
+ * </p>
+ *
+ * @param message A bundle message denoting {@link MediaControlIntent#EXTRA_MESSAGE}.
+ * @param callback A callback to invoke when the request has been processed, or null if none.
+ *
+ * @throws IllegalStateException if there is no current session.
+ * @throws UnsupportedOperationException if the route does not support messages.
+ *
+ * @see MediaControlIntent#ACTION_SEND_MESSAGE
+ * @see #isMessagingSupported
+ */
+ public void sendMessage(Bundle message, SessionActionCallback callback) {
+ throwIfNoCurrentSession();
+ throwIfMessageNotSupported();
+
+ Intent intent = new Intent(MediaControlIntent.ACTION_SEND_MESSAGE);
+ performSessionAction(intent, mSessionId, message, callback);
+ }
+
+ /**
+ * Sends a request to get the status of the media playback session.
+ * <p>
+ * The request is issued in the current session.
+ * </p><p>
+ * Please refer to {@link MediaControlIntent#ACTION_GET_SESSION_STATUS
+ * ACTION_GET_SESSION_STATUS} for more information about the semantics of this request.
+ * </p>
+ *
+ * @param extras A bundle of extra arguments to be added to the
+ * {@link MediaControlIntent#ACTION_GET_SESSION_STATUS} intent, or null if none.
+ * @param callback A callback to invoke when the request has been
+ * processed, or null if none.
+ *
+ * @throws IllegalStateException if there is no current session.
+ * @throws UnsupportedOperationException if the route does not support session management.
+ *
+ * @see MediaControlIntent#ACTION_GET_SESSION_STATUS
+ * @see #isRemotePlaybackSupported
+ * @see #isSessionManagementSupported
+ */
+ public void getSessionStatus(Bundle extras, SessionActionCallback callback) {
+ throwIfSessionManagementNotSupported();
+ throwIfNoCurrentSession();
+
+ Intent intent = new Intent(MediaControlIntent.ACTION_GET_SESSION_STATUS);
+ performSessionAction(intent, mSessionId, extras, callback);
+ }
+
+ /**
+ * Sends a request to end the media playback session.
+ * <p>
+ * The request is issued in the current session. If this request is successful,
+ * the {@link #getSessionId session id property} will be set to null after
+ * the callback is invoked.
+ * </p><p>
+ * Please refer to {@link MediaControlIntent#ACTION_END_SESSION ACTION_END_SESSION}
+ * for more information about the semantics of this request.
+ * </p>
+ *
+ * @param extras A bundle of extra arguments to be added to the
+ * {@link MediaControlIntent#ACTION_END_SESSION} intent, or null if none.
+ * @param callback A callback to invoke when the request has been
+ * processed, or null if none.
+ *
+ * @throws IllegalStateException if there is no current session.
+ * @throws UnsupportedOperationException if the route does not support session management.
+ *
+ * @see MediaControlIntent#ACTION_END_SESSION
+ * @see #isRemotePlaybackSupported
+ * @see #isSessionManagementSupported
+ */
+ public void endSession(Bundle extras, SessionActionCallback callback) {
+ throwIfSessionManagementNotSupported();
+ throwIfNoCurrentSession();
+
+ Intent intent = new Intent(MediaControlIntent.ACTION_END_SESSION);
+ performSessionAction(intent, mSessionId, extras, callback);
+ }
+
+ private void performItemAction(final Intent intent,
+ final String sessionId, final String itemId,
+ Bundle extras, final ItemActionCallback callback) {
+ intent.addCategory(MediaControlIntent.CATEGORY_REMOTE_PLAYBACK);
+ if (sessionId != null) {
+ intent.putExtra(MediaControlIntent.EXTRA_SESSION_ID, sessionId);
+ }
+ if (itemId != null) {
+ intent.putExtra(MediaControlIntent.EXTRA_ITEM_ID, itemId);
+ }
+ if (extras != null) {
+ intent.putExtras(extras);
+ }
+ logRequest(intent);
+ mRoute.sendControlRequest(intent, new MediaRouter.ControlRequestCallback() {
+ @Override
+ public void onResult(Bundle data) {
+ if (data != null) {
+ String sessionIdResult = inferMissingResult(sessionId,
+ data.getString(MediaControlIntent.EXTRA_SESSION_ID));
+ MediaSessionStatus sessionStatus = MediaSessionStatus.fromBundle(
+ data.getBundle(MediaControlIntent.EXTRA_SESSION_STATUS));
+ String itemIdResult = inferMissingResult(itemId,
+ data.getString(MediaControlIntent.EXTRA_ITEM_ID));
+ MediaItemStatus itemStatus = MediaItemStatus.fromBundle(
+ data.getBundle(MediaControlIntent.EXTRA_ITEM_STATUS));
+ adoptSession(sessionIdResult);
+ if (sessionIdResult != null && itemIdResult != null && itemStatus != null) {
+ if (DEBUG) {
+ Log.d(TAG, "Received result from " + intent.getAction()
+ + ": data=" + bundleToString(data)
+ + ", sessionId=" + sessionIdResult
+ + ", sessionStatus=" + sessionStatus
+ + ", itemId=" + itemIdResult
+ + ", itemStatus=" + itemStatus);
+ }
+ callback.onResult(data, sessionIdResult, sessionStatus,
+ itemIdResult, itemStatus);
+ return;
+ }
+ }
+ handleInvalidResult(intent, callback, data);
+ }
+
+ @Override
+ public void onError(String error, Bundle data) {
+ handleError(intent, callback, error, data);
+ }
+ });
+ }
+
+ private void performSessionAction(final Intent intent, final String sessionId,
+ Bundle extras, final SessionActionCallback callback) {
+ intent.addCategory(MediaControlIntent.CATEGORY_REMOTE_PLAYBACK);
+ if (sessionId != null) {
+ intent.putExtra(MediaControlIntent.EXTRA_SESSION_ID, sessionId);
+ }
+ if (extras != null) {
+ intent.putExtras(extras);
+ }
+ logRequest(intent);
+ mRoute.sendControlRequest(intent, new MediaRouter.ControlRequestCallback() {
+ @Override
+ public void onResult(Bundle data) {
+ if (data != null) {
+ String sessionIdResult = inferMissingResult(sessionId,
+ data.getString(MediaControlIntent.EXTRA_SESSION_ID));
+ MediaSessionStatus sessionStatus = MediaSessionStatus.fromBundle(
+ data.getBundle(MediaControlIntent.EXTRA_SESSION_STATUS));
+ adoptSession(sessionIdResult);
+ if (sessionIdResult != null) {
+ if (DEBUG) {
+ Log.d(TAG, "Received result from " + intent.getAction()
+ + ": data=" + bundleToString(data)
+ + ", sessionId=" + sessionIdResult
+ + ", sessionStatus=" + sessionStatus);
+ }
+ try {
+ callback.onResult(data, sessionIdResult, sessionStatus);
+ } finally {
+ if (intent.getAction().equals(MediaControlIntent.ACTION_END_SESSION)
+ && sessionIdResult.equals(mSessionId)) {
+ setSessionId(null);
+ }
+ }
+ return;
+ }
+ }
+ handleInvalidResult(intent, callback, data);
+ }
+
+ @Override
+ public void onError(String error, Bundle data) {
+ handleError(intent, callback, error, data);
+ }
+ });
+ }
+
+ void adoptSession(String sessionId) {
+ if (sessionId != null) {
+ setSessionId(sessionId);
+ }
+ }
+
+ void handleInvalidResult(Intent intent, ActionCallback callback,
+ Bundle data) {
+ Log.w(TAG, "Received invalid result data from " + intent.getAction()
+ + ": data=" + bundleToString(data));
+ callback.onError(null, MediaControlIntent.ERROR_UNKNOWN, data);
+ }
+
+ void handleError(Intent intent, ActionCallback callback,
+ String error, Bundle data) {
+ final int code;
+ if (data != null) {
+ code = data.getInt(MediaControlIntent.EXTRA_ERROR_CODE,
+ MediaControlIntent.ERROR_UNKNOWN);
+ } else {
+ code = MediaControlIntent.ERROR_UNKNOWN;
+ }
+ if (DEBUG) {
+ Log.w(TAG, "Received error from " + intent.getAction()
+ + ": error=" + error
+ + ", code=" + code
+ + ", data=" + bundleToString(data));
+ }
+ callback.onError(error, code, data);
+ }
+
+ private void detectFeatures() {
+ mRouteSupportsRemotePlayback = routeSupportsAction(MediaControlIntent.ACTION_PLAY)
+ && routeSupportsAction(MediaControlIntent.ACTION_SEEK)
+ && routeSupportsAction(MediaControlIntent.ACTION_GET_STATUS)
+ && routeSupportsAction(MediaControlIntent.ACTION_PAUSE)
+ && routeSupportsAction(MediaControlIntent.ACTION_RESUME)
+ && routeSupportsAction(MediaControlIntent.ACTION_STOP);
+ mRouteSupportsQueuing = mRouteSupportsRemotePlayback
+ && routeSupportsAction(MediaControlIntent.ACTION_ENQUEUE)
+ && routeSupportsAction(MediaControlIntent.ACTION_REMOVE);
+ mRouteSupportsSessionManagement = mRouteSupportsRemotePlayback
+ && routeSupportsAction(MediaControlIntent.ACTION_START_SESSION)
+ && routeSupportsAction(MediaControlIntent.ACTION_GET_SESSION_STATUS)
+ && routeSupportsAction(MediaControlIntent.ACTION_END_SESSION);
+ mRouteSupportsMessaging = doesRouteSupportMessaging();
+ }
+
+ private boolean routeSupportsAction(String action) {
+ return mRoute.supportsControlAction(MediaControlIntent.CATEGORY_REMOTE_PLAYBACK, action);
+ }
+
+ private boolean doesRouteSupportMessaging() {
+ for (IntentFilter filter : mRoute.getControlFilters()) {
+ if (filter.hasAction(MediaControlIntent.ACTION_SEND_MESSAGE)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private void throwIfRemotePlaybackNotSupported() {
+ if (!mRouteSupportsRemotePlayback) {
+ throw new UnsupportedOperationException("The route does not support remote playback.");
+ }
+ }
+
+ private void throwIfQueuingNotSupported() {
+ if (!mRouteSupportsQueuing) {
+ throw new UnsupportedOperationException("The route does not support queuing.");
+ }
+ }
+
+ private void throwIfSessionManagementNotSupported() {
+ if (!mRouteSupportsSessionManagement) {
+ throw new UnsupportedOperationException("The route does not support "
+ + "session management.");
+ }
+ }
+
+ private void throwIfMessageNotSupported() {
+ if (!mRouteSupportsMessaging) {
+ throw new UnsupportedOperationException("The route does not support message.");
+ }
+ }
+
+ private void throwIfNoCurrentSession() {
+ if (mSessionId == null) {
+ throw new IllegalStateException("There is no current session.");
+ }
+ }
+
+ static String inferMissingResult(String request, String result) {
+ if (result == null) {
+ // Result is missing.
+ return request;
+ }
+ if (request == null || request.equals(result)) {
+ // Request didn't specify a value or result matches request.
+ return result;
+ }
+ // Result conflicts with request.
+ return null;
+ }
+
+ private static void logRequest(Intent intent) {
+ if (DEBUG) {
+ Log.d(TAG, "Sending request: " + intent);
+ }
+ }
+
+ static String bundleToString(Bundle bundle) {
+ if (bundle != null) {
+ bundle.size(); // force bundle to be unparcelled
+ return bundle.toString();
+ }
+ return "null";
+ }
+
+ private final class ActionReceiver extends BroadcastReceiver {
+ public static final String ACTION_ITEM_STATUS_CHANGED =
+ "android.support.v7.media.actions.ACTION_ITEM_STATUS_CHANGED";
+ public static final String ACTION_SESSION_STATUS_CHANGED =
+ "android.support.v7.media.actions.ACTION_SESSION_STATUS_CHANGED";
+ public static final String ACTION_MESSAGE_RECEIVED =
+ "android.support.v7.media.actions.ACTION_MESSAGE_RECEIVED";
+
+ ActionReceiver() {
+ }
+
+ @Override
+ public void onReceive(Context context, Intent intent) {
+ String sessionId = intent.getStringExtra(MediaControlIntent.EXTRA_SESSION_ID);
+ if (sessionId == null || !sessionId.equals(mSessionId)) {
+ Log.w(TAG, "Discarding spurious status callback "
+ + "with missing or invalid session id: sessionId=" + sessionId);
+ return;
+ }
+
+ MediaSessionStatus sessionStatus = MediaSessionStatus.fromBundle(
+ intent.getBundleExtra(MediaControlIntent.EXTRA_SESSION_STATUS));
+ String action = intent.getAction();
+ if (action.equals(ACTION_ITEM_STATUS_CHANGED)) {
+ String itemId = intent.getStringExtra(MediaControlIntent.EXTRA_ITEM_ID);
+ if (itemId == null) {
+ Log.w(TAG, "Discarding spurious status callback with missing item id.");
+ return;
+ }
+
+ MediaItemStatus itemStatus = MediaItemStatus.fromBundle(
+ intent.getBundleExtra(MediaControlIntent.EXTRA_ITEM_STATUS));
+ if (itemStatus == null) {
+ Log.w(TAG, "Discarding spurious status callback with missing item status.");
+ return;
+ }
+
+ if (DEBUG) {
+ Log.d(TAG, "Received item status callback: sessionId=" + sessionId
+ + ", sessionStatus=" + sessionStatus
+ + ", itemId=" + itemId
+ + ", itemStatus=" + itemStatus);
+ }
+
+ if (mStatusCallback != null) {
+ mStatusCallback.onItemStatusChanged(intent.getExtras(),
+ sessionId, sessionStatus, itemId, itemStatus);
+ }
+ } else if (action.equals(ACTION_SESSION_STATUS_CHANGED)) {
+ if (sessionStatus == null) {
+ Log.w(TAG, "Discarding spurious media status callback with "
+ +"missing session status.");
+ return;
+ }
+
+ if (DEBUG) {
+ Log.d(TAG, "Received session status callback: sessionId=" + sessionId
+ + ", sessionStatus=" + sessionStatus);
+ }
+
+ if (mStatusCallback != null) {
+ mStatusCallback.onSessionStatusChanged(intent.getExtras(),
+ sessionId, sessionStatus);
+ }
+ } else if (action.equals(ACTION_MESSAGE_RECEIVED)) {
+ if (DEBUG) {
+ Log.d(TAG, "Received message callback: sessionId=" + sessionId);
+ }
+
+ if (mOnMessageReceivedListener != null) {
+ mOnMessageReceivedListener.onMessageReceived(sessionId,
+ intent.getBundleExtra(MediaControlIntent.EXTRA_MESSAGE));
+ }
+ }
+ }
+ }
+
+ /**
+ * A callback that will receive media status updates.
+ */
+ public static abstract class StatusCallback {
+ /**
+ * Called when the status of a media item changes.
+ *
+ * @param data The result data bundle.
+ * @param sessionId The session id.
+ * @param sessionStatus The session status, or null if unknown.
+ * @param itemId The item id.
+ * @param itemStatus The item status.
+ */
+ public void onItemStatusChanged(Bundle data,
+ String sessionId, MediaSessionStatus sessionStatus,
+ String itemId, MediaItemStatus itemStatus) {
+ }
+
+ /**
+ * Called when the status of a media session changes.
+ *
+ * @param data The result data bundle.
+ * @param sessionId The session id.
+ * @param sessionStatus The session status, or null if unknown.
+ */
+ public void onSessionStatusChanged(Bundle data,
+ String sessionId, MediaSessionStatus sessionStatus) {
+ }
+
+ /**
+ * Called when the session of the remote playback client changes.
+ *
+ * @param sessionId The new session id.
+ */
+ public void onSessionChanged(String sessionId) {
+ }
+ }
+
+ /**
+ * Base callback type for remote playback requests.
+ */
+ public static abstract class ActionCallback {
+ /**
+ * Called when a media control request fails.
+ *
+ * @param error A localized error message which may be shown to the user, or null
+ * if the cause of the error is unclear.
+ * @param code The error code, or {@link MediaControlIntent#ERROR_UNKNOWN} if unknown.
+ * @param data The error data bundle, or null if none.
+ */
+ public void onError(String error, int code, Bundle data) {
+ }
+ }
+
+ /**
+ * Callback for remote playback requests that operate on items.
+ */
+ public static abstract class ItemActionCallback extends ActionCallback {
+ /**
+ * Called when the request succeeds.
+ *
+ * @param data The result data bundle.
+ * @param sessionId The session id.
+ * @param sessionStatus The session status, or null if unknown.
+ * @param itemId The item id.
+ * @param itemStatus The item status.
+ */
+ public void onResult(Bundle data, String sessionId, MediaSessionStatus sessionStatus,
+ String itemId, MediaItemStatus itemStatus) {
+ }
+ }
+
+ /**
+ * Callback for remote playback requests that operate on sessions.
+ */
+ public static abstract class SessionActionCallback extends ActionCallback {
+ /**
+ * Called when the request succeeds.
+ *
+ * @param data The result data bundle.
+ * @param sessionId The session id.
+ * @param sessionStatus The session status, or null if unknown.
+ */
+ public void onResult(Bundle data, String sessionId, MediaSessionStatus sessionStatus) {
+ }
+ }
+
+ /**
+ * A callback that will receive messages from media sessions.
+ */
+ public interface OnMessageReceivedListener {
+ /**
+ * Called when a message received.
+ *
+ * @param sessionId The session id.
+ * @param message A bundle message denoting {@link MediaControlIntent#EXTRA_MESSAGE}.
+ */
+ void onMessageReceived(String sessionId, Bundle message);
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/SystemMediaRouteProvider.java b/packages/MediaComponents/src/com/android/support/mediarouter/media/SystemMediaRouteProvider.java
new file mode 100644
index 0000000..a38491f
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/SystemMediaRouteProvider.java
@@ -0,0 +1,883 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.support.mediarouter.media;
+
+import android.content.BroadcastReceiver;
+import android.content.ComponentName;
+import android.content.Context;
+import android.content.Intent;
+import android.content.IntentFilter;
+import android.content.res.Resources;
+import android.media.AudioManager;
+import android.os.Build;
+import android.support.annotation.RequiresApi;
+import android.view.Display;
+
+import com.android.media.update.ApiHelper;
+import com.android.media.update.R;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+/**
+ * Provides routes for built-in system destinations such as the local display
+ * and speaker. On Jellybean and newer platform releases, queries the framework
+ * MediaRouter for framework-provided routes and registers non-framework-provided
+ * routes as user routes.
+ */
+abstract class SystemMediaRouteProvider extends MediaRouteProvider {
+ private static final String TAG = "SystemMediaRouteProvider";
+
+ public static final String PACKAGE_NAME = "android";
+ public static final String DEFAULT_ROUTE_ID = "DEFAULT_ROUTE";
+
+ protected SystemMediaRouteProvider(Context context) {
+ super(context, new ProviderMetadata(new ComponentName(PACKAGE_NAME,
+ SystemMediaRouteProvider.class.getName())));
+ }
+
+ public static SystemMediaRouteProvider obtain(Context context, SyncCallback syncCallback) {
+ if (Build.VERSION.SDK_INT >= 24) {
+ return new Api24Impl(context, syncCallback);
+ }
+ if (Build.VERSION.SDK_INT >= 18) {
+ return new JellybeanMr2Impl(context, syncCallback);
+ }
+ if (Build.VERSION.SDK_INT >= 17) {
+ return new JellybeanMr1Impl(context, syncCallback);
+ }
+ if (Build.VERSION.SDK_INT >= 16) {
+ return new JellybeanImpl(context, syncCallback);
+ }
+ return new LegacyImpl(context);
+ }
+
+ /**
+ * Called by the media router when a route is added to synchronize state with
+ * the framework media router.
+ */
+ public void onSyncRouteAdded(MediaRouter.RouteInfo route) {
+ }
+
+ /**
+ * Called by the media router when a route is removed to synchronize state with
+ * the framework media router.
+ */
+ public void onSyncRouteRemoved(MediaRouter.RouteInfo route) {
+ }
+
+ /**
+ * Called by the media router when a route is changed to synchronize state with
+ * the framework media router.
+ */
+ public void onSyncRouteChanged(MediaRouter.RouteInfo route) {
+ }
+
+ /**
+ * Called by the media router when a route is selected to synchronize state with
+ * the framework media router.
+ */
+ public void onSyncRouteSelected(MediaRouter.RouteInfo route) {
+ }
+
+ /**
+ * Callbacks into the media router to synchronize state with the framework media router.
+ */
+ public interface SyncCallback {
+ void onSystemRouteSelectedByDescriptorId(String id);
+ }
+
+ protected Object getDefaultRoute() {
+ return null;
+ }
+
+ protected Object getSystemRoute(MediaRouter.RouteInfo route) {
+ return null;
+ }
+
+ /**
+ * Legacy implementation for platform versions prior to Jellybean.
+ */
+ static class LegacyImpl extends SystemMediaRouteProvider {
+ static final int PLAYBACK_STREAM = AudioManager.STREAM_MUSIC;
+
+ private static final ArrayList<IntentFilter> CONTROL_FILTERS;
+ static {
+ IntentFilter f = new IntentFilter();
+ f.addCategory(MediaControlIntent.CATEGORY_LIVE_AUDIO);
+ f.addCategory(MediaControlIntent.CATEGORY_LIVE_VIDEO);
+
+ CONTROL_FILTERS = new ArrayList<IntentFilter>();
+ CONTROL_FILTERS.add(f);
+ }
+
+ final AudioManager mAudioManager;
+ private final VolumeChangeReceiver mVolumeChangeReceiver;
+ int mLastReportedVolume = -1;
+
+ public LegacyImpl(Context context) {
+ super(context);
+ mAudioManager = (AudioManager)context.getSystemService(Context.AUDIO_SERVICE);
+ mVolumeChangeReceiver = new VolumeChangeReceiver();
+
+ context.registerReceiver(mVolumeChangeReceiver,
+ new IntentFilter(VolumeChangeReceiver.VOLUME_CHANGED_ACTION));
+ publishRoutes();
+ }
+
+ void publishRoutes() {
+ Resources r = getContext().getResources();
+ int maxVolume = mAudioManager.getStreamMaxVolume(PLAYBACK_STREAM);
+ mLastReportedVolume = mAudioManager.getStreamVolume(PLAYBACK_STREAM);
+ MediaRouteDescriptor defaultRoute = new MediaRouteDescriptor.Builder(
+ DEFAULT_ROUTE_ID, r.getString(R.string.mr_system_route_name))
+ .addControlFilters(CONTROL_FILTERS)
+ .setPlaybackStream(PLAYBACK_STREAM)
+ .setPlaybackType(MediaRouter.RouteInfo.PLAYBACK_TYPE_LOCAL)
+ .setVolumeHandling(MediaRouter.RouteInfo.PLAYBACK_VOLUME_VARIABLE)
+ .setVolumeMax(maxVolume)
+ .setVolume(mLastReportedVolume)
+ .build();
+
+ MediaRouteProviderDescriptor providerDescriptor =
+ new MediaRouteProviderDescriptor.Builder()
+ .addRoute(defaultRoute)
+ .build();
+ setDescriptor(providerDescriptor);
+ }
+
+ @Override
+ public RouteController onCreateRouteController(String routeId) {
+ if (routeId.equals(DEFAULT_ROUTE_ID)) {
+ return new DefaultRouteController();
+ }
+ return null;
+ }
+
+ final class DefaultRouteController extends RouteController {
+ @Override
+ public void onSetVolume(int volume) {
+ mAudioManager.setStreamVolume(PLAYBACK_STREAM, volume, 0);
+ publishRoutes();
+ }
+
+ @Override
+ public void onUpdateVolume(int delta) {
+ int volume = mAudioManager.getStreamVolume(PLAYBACK_STREAM);
+ int maxVolume = mAudioManager.getStreamMaxVolume(PLAYBACK_STREAM);
+ int newVolume = Math.min(maxVolume, Math.max(0, volume + delta));
+ if (newVolume != volume) {
+ mAudioManager.setStreamVolume(PLAYBACK_STREAM, volume, 0);
+ }
+ publishRoutes();
+ }
+ }
+
+ final class VolumeChangeReceiver extends BroadcastReceiver {
+ // These constants come from AudioManager.
+ public static final String VOLUME_CHANGED_ACTION =
+ "android.media.VOLUME_CHANGED_ACTION";
+ public static final String EXTRA_VOLUME_STREAM_TYPE =
+ "android.media.EXTRA_VOLUME_STREAM_TYPE";
+ public static final String EXTRA_VOLUME_STREAM_VALUE =
+ "android.media.EXTRA_VOLUME_STREAM_VALUE";
+
+ @Override
+ public void onReceive(Context context, Intent intent) {
+ if (intent.getAction().equals(VOLUME_CHANGED_ACTION)) {
+ final int streamType = intent.getIntExtra(EXTRA_VOLUME_STREAM_TYPE, -1);
+ if (streamType == PLAYBACK_STREAM) {
+ final int volume = intent.getIntExtra(EXTRA_VOLUME_STREAM_VALUE, -1);
+ if (volume >= 0 && volume != mLastReportedVolume) {
+ publishRoutes();
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Jellybean implementation.
+ */
+ // @@RequiresApi(16)
+ static class JellybeanImpl extends SystemMediaRouteProvider
+ implements MediaRouterJellybean.Callback, MediaRouterJellybean.VolumeCallback {
+ private static final ArrayList<IntentFilter> LIVE_AUDIO_CONTROL_FILTERS;
+ static {
+ IntentFilter f = new IntentFilter();
+ f.addCategory(MediaControlIntent.CATEGORY_LIVE_AUDIO);
+
+ LIVE_AUDIO_CONTROL_FILTERS = new ArrayList<IntentFilter>();
+ LIVE_AUDIO_CONTROL_FILTERS.add(f);
+ }
+
+ private static final ArrayList<IntentFilter> LIVE_VIDEO_CONTROL_FILTERS;
+ static {
+ IntentFilter f = new IntentFilter();
+ f.addCategory(MediaControlIntent.CATEGORY_LIVE_VIDEO);
+
+ LIVE_VIDEO_CONTROL_FILTERS = new ArrayList<IntentFilter>();
+ LIVE_VIDEO_CONTROL_FILTERS.add(f);
+ }
+
+ private final SyncCallback mSyncCallback;
+
+ protected final Object mRouterObj;
+ protected final Object mCallbackObj;
+ protected final Object mVolumeCallbackObj;
+ protected final Object mUserRouteCategoryObj;
+ protected int mRouteTypes;
+ protected boolean mActiveScan;
+ protected boolean mCallbackRegistered;
+
+ // Maintains an association from framework routes to support library routes.
+ // Note that we cannot use the tag field for this because an application may
+ // have published its own user routes to the framework media router and already
+ // used the tag for its own purposes.
+ protected final ArrayList<SystemRouteRecord> mSystemRouteRecords =
+ new ArrayList<SystemRouteRecord>();
+
+ // Maintains an association from support library routes to framework routes.
+ protected final ArrayList<UserRouteRecord> mUserRouteRecords =
+ new ArrayList<UserRouteRecord>();
+
+ private MediaRouterJellybean.SelectRouteWorkaround mSelectRouteWorkaround;
+ private MediaRouterJellybean.GetDefaultRouteWorkaround mGetDefaultRouteWorkaround;
+
+ public JellybeanImpl(Context context, SyncCallback syncCallback) {
+ super(context);
+ mSyncCallback = syncCallback;
+ mRouterObj = MediaRouterJellybean.getMediaRouter(context);
+ mCallbackObj = createCallbackObj();
+ mVolumeCallbackObj = createVolumeCallbackObj();
+
+ Resources r = ApiHelper.getLibResources(context);
+ mUserRouteCategoryObj = MediaRouterJellybean.createRouteCategory(
+ mRouterObj, r.getString(R.string.mr_user_route_category_name), false);
+
+ updateSystemRoutes();
+ }
+
+ @Override
+ public RouteController onCreateRouteController(String routeId) {
+ int index = findSystemRouteRecordByDescriptorId(routeId);
+ if (index >= 0) {
+ SystemRouteRecord record = mSystemRouteRecords.get(index);
+ return new SystemRouteController(record.mRouteObj);
+ }
+ return null;
+ }
+
+ @Override
+ public void onDiscoveryRequestChanged(MediaRouteDiscoveryRequest request) {
+ int newRouteTypes = 0;
+ boolean newActiveScan = false;
+ if (request != null) {
+ final MediaRouteSelector selector = request.getSelector();
+ final List<String> categories = selector.getControlCategories();
+ final int count = categories.size();
+ for (int i = 0; i < count; i++) {
+ String category = categories.get(i);
+ if (category.equals(MediaControlIntent.CATEGORY_LIVE_AUDIO)) {
+ newRouteTypes |= MediaRouterJellybean.ROUTE_TYPE_LIVE_AUDIO;
+ } else if (category.equals(MediaControlIntent.CATEGORY_LIVE_VIDEO)) {
+ newRouteTypes |= MediaRouterJellybean.ROUTE_TYPE_LIVE_VIDEO;
+ } else {
+ newRouteTypes |= MediaRouterJellybean.ROUTE_TYPE_USER;
+ }
+ }
+ newActiveScan = request.isActiveScan();
+ }
+
+ if (mRouteTypes != newRouteTypes || mActiveScan != newActiveScan) {
+ mRouteTypes = newRouteTypes;
+ mActiveScan = newActiveScan;
+ updateSystemRoutes();
+ }
+ }
+
+ @Override
+ public void onRouteAdded(Object routeObj) {
+ if (addSystemRouteNoPublish(routeObj)) {
+ publishRoutes();
+ }
+ }
+
+ private void updateSystemRoutes() {
+ updateCallback();
+ boolean changed = false;
+ for (Object routeObj : MediaRouterJellybean.getRoutes(mRouterObj)) {
+ changed |= addSystemRouteNoPublish(routeObj);
+ }
+ if (changed) {
+ publishRoutes();
+ }
+ }
+
+ private boolean addSystemRouteNoPublish(Object routeObj) {
+ if (getUserRouteRecord(routeObj) == null
+ && findSystemRouteRecord(routeObj) < 0) {
+ String id = assignRouteId(routeObj);
+ SystemRouteRecord record = new SystemRouteRecord(routeObj, id);
+ updateSystemRouteDescriptor(record);
+ mSystemRouteRecords.add(record);
+ return true;
+ }
+ return false;
+ }
+
+ private String assignRouteId(Object routeObj) {
+ // TODO: The framework media router should supply a unique route id that
+ // we can use here. For now we use a hash of the route name and take care
+ // to dedupe it.
+ boolean isDefault = (getDefaultRoute() == routeObj);
+ String id = isDefault ? DEFAULT_ROUTE_ID :
+ String.format(Locale.US, "ROUTE_%08x", getRouteName(routeObj).hashCode());
+ if (findSystemRouteRecordByDescriptorId(id) < 0) {
+ return id;
+ }
+ for (int i = 2; ; i++) {
+ String newId = String.format(Locale.US, "%s_%d", id, i);
+ if (findSystemRouteRecordByDescriptorId(newId) < 0) {
+ return newId;
+ }
+ }
+ }
+
+ @Override
+ public void onRouteRemoved(Object routeObj) {
+ if (getUserRouteRecord(routeObj) == null) {
+ int index = findSystemRouteRecord(routeObj);
+ if (index >= 0) {
+ mSystemRouteRecords.remove(index);
+ publishRoutes();
+ }
+ }
+ }
+
+ @Override
+ public void onRouteChanged(Object routeObj) {
+ if (getUserRouteRecord(routeObj) == null) {
+ int index = findSystemRouteRecord(routeObj);
+ if (index >= 0) {
+ SystemRouteRecord record = mSystemRouteRecords.get(index);
+ updateSystemRouteDescriptor(record);
+ publishRoutes();
+ }
+ }
+ }
+
+ @Override
+ public void onRouteVolumeChanged(Object routeObj) {
+ if (getUserRouteRecord(routeObj) == null) {
+ int index = findSystemRouteRecord(routeObj);
+ if (index >= 0) {
+ SystemRouteRecord record = mSystemRouteRecords.get(index);
+ int newVolume = MediaRouterJellybean.RouteInfo.getVolume(routeObj);
+ if (newVolume != record.mRouteDescriptor.getVolume()) {
+ record.mRouteDescriptor =
+ new MediaRouteDescriptor.Builder(record.mRouteDescriptor)
+ .setVolume(newVolume)
+ .build();
+ publishRoutes();
+ }
+ }
+ }
+ }
+
+ @Override
+ public void onRouteSelected(int type, Object routeObj) {
+ if (routeObj != MediaRouterJellybean.getSelectedRoute(mRouterObj,
+ MediaRouterJellybean.ALL_ROUTE_TYPES)) {
+ // The currently selected route has already changed so this callback
+ // is stale. Drop it to prevent getting into sync loops.
+ return;
+ }
+
+ UserRouteRecord userRouteRecord = getUserRouteRecord(routeObj);
+ if (userRouteRecord != null) {
+ userRouteRecord.mRoute.select();
+ } else {
+ // Select the route if it already exists in the compat media router.
+ // If not, we will select it instead when the route is added.
+ int index = findSystemRouteRecord(routeObj);
+ if (index >= 0) {
+ SystemRouteRecord record = mSystemRouteRecords.get(index);
+ mSyncCallback.onSystemRouteSelectedByDescriptorId(record.mRouteDescriptorId);
+ }
+ }
+ }
+
+ @Override
+ public void onRouteUnselected(int type, Object routeObj) {
+ // Nothing to do when a route is unselected.
+ // We only need to handle when a route is selected.
+ }
+
+ @Override
+ public void onRouteGrouped(Object routeObj, Object groupObj, int index) {
+ // Route grouping is deprecated and no longer supported.
+ }
+
+ @Override
+ public void onRouteUngrouped(Object routeObj, Object groupObj) {
+ // Route grouping is deprecated and no longer supported.
+ }
+
+ @Override
+ public void onVolumeSetRequest(Object routeObj, int volume) {
+ UserRouteRecord record = getUserRouteRecord(routeObj);
+ if (record != null) {
+ record.mRoute.requestSetVolume(volume);
+ }
+ }
+
+ @Override
+ public void onVolumeUpdateRequest(Object routeObj, int direction) {
+ UserRouteRecord record = getUserRouteRecord(routeObj);
+ if (record != null) {
+ record.mRoute.requestUpdateVolume(direction);
+ }
+ }
+
+ @Override
+ public void onSyncRouteAdded(MediaRouter.RouteInfo route) {
+ if (route.getProviderInstance() != this) {
+ Object routeObj = MediaRouterJellybean.createUserRoute(
+ mRouterObj, mUserRouteCategoryObj);
+ UserRouteRecord record = new UserRouteRecord(route, routeObj);
+ MediaRouterJellybean.RouteInfo.setTag(routeObj, record);
+ MediaRouterJellybean.UserRouteInfo.setVolumeCallback(routeObj, mVolumeCallbackObj);
+ updateUserRouteProperties(record);
+ mUserRouteRecords.add(record);
+ MediaRouterJellybean.addUserRoute(mRouterObj, routeObj);
+ } else {
+ // If the newly added route is the counterpart of the currently selected
+ // route in the framework media router then ensure it is selected in
+ // the compat media router.
+ Object routeObj = MediaRouterJellybean.getSelectedRoute(
+ mRouterObj, MediaRouterJellybean.ALL_ROUTE_TYPES);
+ int index = findSystemRouteRecord(routeObj);
+ if (index >= 0) {
+ SystemRouteRecord record = mSystemRouteRecords.get(index);
+ if (record.mRouteDescriptorId.equals(route.getDescriptorId())) {
+ route.select();
+ }
+ }
+ }
+ }
+
+ @Override
+ public void onSyncRouteRemoved(MediaRouter.RouteInfo route) {
+ if (route.getProviderInstance() != this) {
+ int index = findUserRouteRecord(route);
+ if (index >= 0) {
+ UserRouteRecord record = mUserRouteRecords.remove(index);
+ MediaRouterJellybean.RouteInfo.setTag(record.mRouteObj, null);
+ MediaRouterJellybean.UserRouteInfo.setVolumeCallback(record.mRouteObj, null);
+ MediaRouterJellybean.removeUserRoute(mRouterObj, record.mRouteObj);
+ }
+ }
+ }
+
+ @Override
+ public void onSyncRouteChanged(MediaRouter.RouteInfo route) {
+ if (route.getProviderInstance() != this) {
+ int index = findUserRouteRecord(route);
+ if (index >= 0) {
+ UserRouteRecord record = mUserRouteRecords.get(index);
+ updateUserRouteProperties(record);
+ }
+ }
+ }
+
+ @Override
+ public void onSyncRouteSelected(MediaRouter.RouteInfo route) {
+ if (!route.isSelected()) {
+ // The currently selected route has already changed so this callback
+ // is stale. Drop it to prevent getting into sync loops.
+ return;
+ }
+
+ if (route.getProviderInstance() != this) {
+ int index = findUserRouteRecord(route);
+ if (index >= 0) {
+ UserRouteRecord record = mUserRouteRecords.get(index);
+ selectRoute(record.mRouteObj);
+ }
+ } else {
+ int index = findSystemRouteRecordByDescriptorId(route.getDescriptorId());
+ if (index >= 0) {
+ SystemRouteRecord record = mSystemRouteRecords.get(index);
+ selectRoute(record.mRouteObj);
+ }
+ }
+ }
+
+ protected void publishRoutes() {
+ MediaRouteProviderDescriptor.Builder builder =
+ new MediaRouteProviderDescriptor.Builder();
+ int count = mSystemRouteRecords.size();
+ for (int i = 0; i < count; i++) {
+ builder.addRoute(mSystemRouteRecords.get(i).mRouteDescriptor);
+ }
+
+ setDescriptor(builder.build());
+ }
+
+ protected int findSystemRouteRecord(Object routeObj) {
+ final int count = mSystemRouteRecords.size();
+ for (int i = 0; i < count; i++) {
+ if (mSystemRouteRecords.get(i).mRouteObj == routeObj) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ protected int findSystemRouteRecordByDescriptorId(String id) {
+ final int count = mSystemRouteRecords.size();
+ for (int i = 0; i < count; i++) {
+ if (mSystemRouteRecords.get(i).mRouteDescriptorId.equals(id)) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ protected int findUserRouteRecord(MediaRouter.RouteInfo route) {
+ final int count = mUserRouteRecords.size();
+ for (int i = 0; i < count; i++) {
+ if (mUserRouteRecords.get(i).mRoute == route) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ protected UserRouteRecord getUserRouteRecord(Object routeObj) {
+ Object tag = MediaRouterJellybean.RouteInfo.getTag(routeObj);
+ return tag instanceof UserRouteRecord ? (UserRouteRecord)tag : null;
+ }
+
+ protected void updateSystemRouteDescriptor(SystemRouteRecord record) {
+ // We must always recreate the route descriptor when making any changes
+ // because they are intended to be immutable once published.
+ MediaRouteDescriptor.Builder builder = new MediaRouteDescriptor.Builder(
+ record.mRouteDescriptorId, getRouteName(record.mRouteObj));
+ onBuildSystemRouteDescriptor(record, builder);
+ record.mRouteDescriptor = builder.build();
+ }
+
+ protected String getRouteName(Object routeObj) {
+ // Routes should not have null names but it may happen for badly configured
+ // user routes. We tolerate this by using an empty name string here but
+ // such unnamed routes will be discarded by the media router upstream
+ // (with a log message so we can track down the problem).
+ CharSequence name = MediaRouterJellybean.RouteInfo.getName(routeObj, getContext());
+ return name != null ? name.toString() : "";
+ }
+
+ protected void onBuildSystemRouteDescriptor(SystemRouteRecord record,
+ MediaRouteDescriptor.Builder builder) {
+ int supportedTypes = MediaRouterJellybean.RouteInfo.getSupportedTypes(
+ record.mRouteObj);
+ if ((supportedTypes & MediaRouterJellybean.ROUTE_TYPE_LIVE_AUDIO) != 0) {
+ builder.addControlFilters(LIVE_AUDIO_CONTROL_FILTERS);
+ }
+ if ((supportedTypes & MediaRouterJellybean.ROUTE_TYPE_LIVE_VIDEO) != 0) {
+ builder.addControlFilters(LIVE_VIDEO_CONTROL_FILTERS);
+ }
+
+ builder.setPlaybackType(
+ MediaRouterJellybean.RouteInfo.getPlaybackType(record.mRouteObj));
+ builder.setPlaybackStream(
+ MediaRouterJellybean.RouteInfo.getPlaybackStream(record.mRouteObj));
+ builder.setVolume(
+ MediaRouterJellybean.RouteInfo.getVolume(record.mRouteObj));
+ builder.setVolumeMax(
+ MediaRouterJellybean.RouteInfo.getVolumeMax(record.mRouteObj));
+ builder.setVolumeHandling(
+ MediaRouterJellybean.RouteInfo.getVolumeHandling(record.mRouteObj));
+ }
+
+ protected void updateUserRouteProperties(UserRouteRecord record) {
+ MediaRouterJellybean.UserRouteInfo.setName(
+ record.mRouteObj, record.mRoute.getName());
+ MediaRouterJellybean.UserRouteInfo.setPlaybackType(
+ record.mRouteObj, record.mRoute.getPlaybackType());
+ MediaRouterJellybean.UserRouteInfo.setPlaybackStream(
+ record.mRouteObj, record.mRoute.getPlaybackStream());
+ MediaRouterJellybean.UserRouteInfo.setVolume(
+ record.mRouteObj, record.mRoute.getVolume());
+ MediaRouterJellybean.UserRouteInfo.setVolumeMax(
+ record.mRouteObj, record.mRoute.getVolumeMax());
+ MediaRouterJellybean.UserRouteInfo.setVolumeHandling(
+ record.mRouteObj, record.mRoute.getVolumeHandling());
+ }
+
+ protected void updateCallback() {
+ if (mCallbackRegistered) {
+ mCallbackRegistered = false;
+ MediaRouterJellybean.removeCallback(mRouterObj, mCallbackObj);
+ }
+
+ if (mRouteTypes != 0) {
+ mCallbackRegistered = true;
+ MediaRouterJellybean.addCallback(mRouterObj, mRouteTypes, mCallbackObj);
+ }
+ }
+
+ protected Object createCallbackObj() {
+ return MediaRouterJellybean.createCallback(this);
+ }
+
+ protected Object createVolumeCallbackObj() {
+ return MediaRouterJellybean.createVolumeCallback(this);
+ }
+
+ protected void selectRoute(Object routeObj) {
+ if (mSelectRouteWorkaround == null) {
+ mSelectRouteWorkaround = new MediaRouterJellybean.SelectRouteWorkaround();
+ }
+ mSelectRouteWorkaround.selectRoute(mRouterObj,
+ MediaRouterJellybean.ALL_ROUTE_TYPES, routeObj);
+ }
+
+ @Override
+ protected Object getDefaultRoute() {
+ if (mGetDefaultRouteWorkaround == null) {
+ mGetDefaultRouteWorkaround = new MediaRouterJellybean.GetDefaultRouteWorkaround();
+ }
+ return mGetDefaultRouteWorkaround.getDefaultRoute(mRouterObj);
+ }
+
+ @Override
+ protected Object getSystemRoute(MediaRouter.RouteInfo route) {
+ if (route == null) {
+ return null;
+ }
+ int index = findSystemRouteRecordByDescriptorId(route.getDescriptorId());
+ if (index >= 0) {
+ return mSystemRouteRecords.get(index).mRouteObj;
+ }
+ return null;
+ }
+
+ /**
+ * Represents a route that is provided by the framework media router
+ * and published by this route provider to the support library media router.
+ */
+ protected static final class SystemRouteRecord {
+ public final Object mRouteObj;
+ public final String mRouteDescriptorId;
+ public MediaRouteDescriptor mRouteDescriptor; // assigned immediately after creation
+
+ public SystemRouteRecord(Object routeObj, String id) {
+ mRouteObj = routeObj;
+ mRouteDescriptorId = id;
+ }
+ }
+
+ /**
+ * Represents a route that is provided by the support library media router
+ * and published by this route provider to the framework media router.
+ */
+ protected static final class UserRouteRecord {
+ public final MediaRouter.RouteInfo mRoute;
+ public final Object mRouteObj;
+
+ public UserRouteRecord(MediaRouter.RouteInfo route, Object routeObj) {
+ mRoute = route;
+ mRouteObj = routeObj;
+ }
+ }
+
+ protected static final class SystemRouteController extends RouteController {
+ private final Object mRouteObj;
+
+ public SystemRouteController(Object routeObj) {
+ mRouteObj = routeObj;
+ }
+
+ @Override
+ public void onSetVolume(int volume) {
+ MediaRouterJellybean.RouteInfo.requestSetVolume(mRouteObj, volume);
+ }
+
+ @Override
+ public void onUpdateVolume(int delta) {
+ MediaRouterJellybean.RouteInfo.requestUpdateVolume(mRouteObj, delta);
+ }
+ }
+ }
+
+ /**
+ * Jellybean MR1 implementation.
+ */
+ // @@RequiresApi(17)
+ private static class JellybeanMr1Impl extends JellybeanImpl
+ implements MediaRouterJellybeanMr1.Callback {
+ private MediaRouterJellybeanMr1.ActiveScanWorkaround mActiveScanWorkaround;
+ private MediaRouterJellybeanMr1.IsConnectingWorkaround mIsConnectingWorkaround;
+
+ public JellybeanMr1Impl(Context context, SyncCallback syncCallback) {
+ super(context, syncCallback);
+ }
+
+ @Override
+ public void onRoutePresentationDisplayChanged(Object routeObj) {
+ int index = findSystemRouteRecord(routeObj);
+ if (index >= 0) {
+ SystemRouteRecord record = mSystemRouteRecords.get(index);
+ Display newPresentationDisplay =
+ MediaRouterJellybeanMr1.RouteInfo.getPresentationDisplay(routeObj);
+ int newPresentationDisplayId = (newPresentationDisplay != null
+ ? newPresentationDisplay.getDisplayId() : -1);
+ if (newPresentationDisplayId
+ != record.mRouteDescriptor.getPresentationDisplayId()) {
+ record.mRouteDescriptor =
+ new MediaRouteDescriptor.Builder(record.mRouteDescriptor)
+ .setPresentationDisplayId(newPresentationDisplayId)
+ .build();
+ publishRoutes();
+ }
+ }
+ }
+
+ @Override
+ protected void onBuildSystemRouteDescriptor(SystemRouteRecord record,
+ MediaRouteDescriptor.Builder builder) {
+ super.onBuildSystemRouteDescriptor(record, builder);
+
+ if (!MediaRouterJellybeanMr1.RouteInfo.isEnabled(record.mRouteObj)) {
+ builder.setEnabled(false);
+ }
+
+ if (isConnecting(record)) {
+ builder.setConnecting(true);
+ }
+
+ Display presentationDisplay =
+ MediaRouterJellybeanMr1.RouteInfo.getPresentationDisplay(record.mRouteObj);
+ if (presentationDisplay != null) {
+ builder.setPresentationDisplayId(presentationDisplay.getDisplayId());
+ }
+ }
+
+ @Override
+ protected void updateCallback() {
+ super.updateCallback();
+
+ if (mActiveScanWorkaround == null) {
+ mActiveScanWorkaround = new MediaRouterJellybeanMr1.ActiveScanWorkaround(
+ getContext(), getHandler());
+ }
+ mActiveScanWorkaround.setActiveScanRouteTypes(mActiveScan ? mRouteTypes : 0);
+ }
+
+ @Override
+ protected Object createCallbackObj() {
+ return MediaRouterJellybeanMr1.createCallback(this);
+ }
+
+ protected boolean isConnecting(SystemRouteRecord record) {
+ if (mIsConnectingWorkaround == null) {
+ mIsConnectingWorkaround = new MediaRouterJellybeanMr1.IsConnectingWorkaround();
+ }
+ return mIsConnectingWorkaround.isConnecting(record.mRouteObj);
+ }
+ }
+
+ /**
+ * Jellybean MR2 implementation.
+ */
+ // @@RequiresApi(18)
+ private static class JellybeanMr2Impl extends JellybeanMr1Impl {
+ public JellybeanMr2Impl(Context context, SyncCallback syncCallback) {
+ super(context, syncCallback);
+ }
+
+ @Override
+ protected void onBuildSystemRouteDescriptor(SystemRouteRecord record,
+ MediaRouteDescriptor.Builder builder) {
+ super.onBuildSystemRouteDescriptor(record, builder);
+
+ CharSequence description =
+ MediaRouterJellybeanMr2.RouteInfo.getDescription(record.mRouteObj);
+ if (description != null) {
+ builder.setDescription(description.toString());
+ }
+ }
+
+ @Override
+ protected void selectRoute(Object routeObj) {
+ MediaRouterJellybean.selectRoute(mRouterObj,
+ MediaRouterJellybean.ALL_ROUTE_TYPES, routeObj);
+ }
+
+ @Override
+ protected Object getDefaultRoute() {
+ return MediaRouterJellybeanMr2.getDefaultRoute(mRouterObj);
+ }
+
+ @Override
+ protected void updateUserRouteProperties(UserRouteRecord record) {
+ super.updateUserRouteProperties(record);
+
+ MediaRouterJellybeanMr2.UserRouteInfo.setDescription(
+ record.mRouteObj, record.mRoute.getDescription());
+ }
+
+ @Override
+ protected void updateCallback() {
+ if (mCallbackRegistered) {
+ MediaRouterJellybean.removeCallback(mRouterObj, mCallbackObj);
+ }
+
+ mCallbackRegistered = true;
+ MediaRouterJellybeanMr2.addCallback(mRouterObj, mRouteTypes, mCallbackObj,
+ MediaRouter.CALLBACK_FLAG_UNFILTERED_EVENTS
+ | (mActiveScan ? MediaRouter.CALLBACK_FLAG_PERFORM_ACTIVE_SCAN : 0));
+ }
+
+ @Override
+ protected boolean isConnecting(SystemRouteRecord record) {
+ return MediaRouterJellybeanMr2.RouteInfo.isConnecting(record.mRouteObj);
+ }
+ }
+
+ /**
+ * Api24 implementation.
+ */
+ // @@RequiresApi(24)
+ private static class Api24Impl extends JellybeanMr2Impl {
+ public Api24Impl(Context context, SyncCallback syncCallback) {
+ super(context, syncCallback);
+ }
+
+ @Override
+ protected void onBuildSystemRouteDescriptor(SystemRouteRecord record,
+ MediaRouteDescriptor.Builder builder) {
+ super.onBuildSystemRouteDescriptor(record, builder);
+
+ builder.setDeviceType(MediaRouterApi24.RouteInfo.getDeviceType(record.mRouteObj));
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/support/mediarouter/media/package.html b/packages/MediaComponents/src/com/android/support/mediarouter/media/package.html
new file mode 100644
index 0000000..be2aaf2
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/support/mediarouter/media/package.html
@@ -0,0 +1,9 @@
+<html>
+
+<body>
+
+<p>Contains APIs that control the routing of media channels and streams from the current device
+ to external speakers and destination devices.</p>
+
+</body>
+</html>
diff --git a/packages/MediaComponents/src/com/android/widget/BaseLayout.java b/packages/MediaComponents/src/com/android/widget/BaseLayout.java
new file mode 100644
index 0000000..fb6471d
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/widget/BaseLayout.java
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.widget;
+
+import android.graphics.drawable.Drawable;
+import android.media.update.ViewGroupProvider;
+import android.util.AttributeSet;
+import android.view.View;
+import android.view.ViewGroup;
+import android.view.ViewGroup.LayoutParams;
+import android.view.ViewGroup.MarginLayoutParams;
+
+import java.util.ArrayList;
+
+public class BaseLayout extends ViewGroupImpl {
+ private final ViewGroup mInstance;
+ private final ViewGroupProvider mSuperProvider;
+ private final ViewGroupProvider mPrivateProvider;
+
+ private final ArrayList<View> mMatchParentChildren = new ArrayList<>(1);
+
+ public BaseLayout(ViewGroup instance,
+ ViewGroupProvider superProvider, ViewGroupProvider privateProvider) {
+ super(instance, superProvider, privateProvider);
+ mInstance = instance;
+ mSuperProvider = superProvider;
+ mPrivateProvider = privateProvider;
+ }
+
+ @Override
+ public boolean checkLayoutParams_impl(LayoutParams p) {
+ return p instanceof MarginLayoutParams;
+ }
+
+ @Override
+ public LayoutParams generateDefaultLayoutParams_impl() {
+ return new MarginLayoutParams(LayoutParams.MATCH_PARENT, LayoutParams.MATCH_PARENT);
+ }
+
+ @Override
+ public LayoutParams generateLayoutParams_impl(AttributeSet attrs) {
+ return new MarginLayoutParams(mInstance.getContext(), attrs);
+ }
+
+ @Override
+ public LayoutParams generateLayoutParams_impl(LayoutParams lp) {
+ if (lp instanceof MarginLayoutParams) {
+ return lp;
+ }
+ return new MarginLayoutParams(lp);
+ }
+
+ @Override
+ public void onMeasure_impl(int widthMeasureSpec, int heightMeasureSpec) {
+ int count = mInstance.getChildCount();
+
+ final boolean measureMatchParentChildren =
+ View.MeasureSpec.getMode(widthMeasureSpec) != View.MeasureSpec.EXACTLY ||
+ View.MeasureSpec.getMode(heightMeasureSpec) != View.MeasureSpec.EXACTLY;
+ mMatchParentChildren.clear();
+
+ int maxHeight = 0;
+ int maxWidth = 0;
+ int childState = 0;
+
+ for (int i = 0; i < count; i++) {
+ final View child = mInstance.getChildAt(i);
+ if (child.getVisibility() != View.GONE) {
+ mPrivateProvider.measureChildWithMargins_impl(
+ child, widthMeasureSpec, 0, heightMeasureSpec, 0);
+ final MarginLayoutParams lp = (MarginLayoutParams) child.getLayoutParams();
+ maxWidth = Math.max(maxWidth,
+ child.getMeasuredWidth() + lp.leftMargin + lp.rightMargin);
+ maxHeight = Math.max(maxHeight,
+ child.getMeasuredHeight() + lp.topMargin + lp.bottomMargin);
+ childState = childState | child.getMeasuredState();
+ if (measureMatchParentChildren) {
+ if (lp.width == LayoutParams.MATCH_PARENT ||
+ lp.height == LayoutParams.MATCH_PARENT) {
+ mMatchParentChildren.add(child);
+ }
+ }
+ }
+ }
+
+ // Account for padding too
+ maxWidth += getPaddingLeftWithForeground() + getPaddingRightWithForeground();
+ maxHeight += getPaddingTopWithForeground() + getPaddingBottomWithForeground();
+
+ // Check against our minimum height and width
+ maxHeight = Math.max(maxHeight, mPrivateProvider.getSuggestedMinimumHeight_impl());
+ maxWidth = Math.max(maxWidth, mPrivateProvider.getSuggestedMinimumWidth_impl());
+
+ // Check against our foreground's minimum height and width
+ final Drawable drawable = mInstance.getForeground();
+ if (drawable != null) {
+ maxHeight = Math.max(maxHeight, drawable.getMinimumHeight());
+ maxWidth = Math.max(maxWidth, drawable.getMinimumWidth());
+ }
+
+ mPrivateProvider.setMeasuredDimension_impl(
+ mInstance.resolveSizeAndState(maxWidth, widthMeasureSpec, childState),
+ mInstance.resolveSizeAndState(maxHeight, heightMeasureSpec,
+ childState << View.MEASURED_HEIGHT_STATE_SHIFT));
+
+ count = mMatchParentChildren.size();
+ if (count > 1) {
+ for (int i = 0; i < count; i++) {
+ final View child = mMatchParentChildren.get(i);
+ final MarginLayoutParams lp = (MarginLayoutParams) child.getLayoutParams();
+
+ final int childWidthMeasureSpec;
+ if (lp.width == LayoutParams.MATCH_PARENT) {
+ final int width = Math.max(0, mInstance.getMeasuredWidth()
+ - getPaddingLeftWithForeground() - getPaddingRightWithForeground()
+ - lp.leftMargin - lp.rightMargin);
+ childWidthMeasureSpec = View.MeasureSpec.makeMeasureSpec(
+ width, View.MeasureSpec.EXACTLY);
+ } else {
+ childWidthMeasureSpec = mInstance.getChildMeasureSpec(widthMeasureSpec,
+ getPaddingLeftWithForeground() + getPaddingRightWithForeground() +
+ lp.leftMargin + lp.rightMargin,
+ lp.width);
+ }
+
+ final int childHeightMeasureSpec;
+ if (lp.height == LayoutParams.MATCH_PARENT) {
+ final int height = Math.max(0, mInstance.getMeasuredHeight()
+ - getPaddingTopWithForeground() - getPaddingBottomWithForeground()
+ - lp.topMargin - lp.bottomMargin);
+ childHeightMeasureSpec = View.MeasureSpec.makeMeasureSpec(
+ height, View.MeasureSpec.EXACTLY);
+ } else {
+ childHeightMeasureSpec = mInstance.getChildMeasureSpec(heightMeasureSpec,
+ getPaddingTopWithForeground() + getPaddingBottomWithForeground() +
+ lp.topMargin + lp.bottomMargin,
+ lp.height);
+ }
+
+ child.measure(childWidthMeasureSpec, childHeightMeasureSpec);
+ }
+ }
+ }
+
+ @Override
+ public void onLayout_impl(boolean changed, int left, int top, int right, int bottom) {
+ final int count = mInstance.getChildCount();
+
+ final int parentLeft = getPaddingLeftWithForeground();
+ final int parentRight = right - left - getPaddingRightWithForeground();
+
+ final int parentTop = getPaddingTopWithForeground();
+ final int parentBottom = bottom - top - getPaddingBottomWithForeground();
+
+ for (int i = 0; i < count; i++) {
+ final View child = mInstance.getChildAt(i);
+ if (child.getVisibility() != View.GONE) {
+ final MarginLayoutParams lp = (MarginLayoutParams) child.getLayoutParams();
+
+ final int width = child.getMeasuredWidth();
+ final int height = child.getMeasuredHeight();
+
+ int childLeft;
+ int childTop;
+
+ childLeft = parentLeft + (parentRight - parentLeft - width) / 2 +
+ lp.leftMargin - lp.rightMargin;
+
+ childTop = parentTop + (parentBottom - parentTop - height) / 2 +
+ lp.topMargin - lp.bottomMargin;
+
+ child.layout(childLeft, childTop, childLeft + width, childTop + height);
+ }
+ }
+ }
+
+ @Override
+ public boolean shouldDelayChildPressedState_impl() {
+ return false;
+ }
+
+ private int getPaddingLeftWithForeground() {
+ return mInstance.isForegroundInsidePadding() ? Math.max(mInstance.getPaddingLeft(), 0) :
+ mInstance.getPaddingLeft() + 0;
+ }
+
+ private int getPaddingRightWithForeground() {
+ return mInstance.isForegroundInsidePadding() ? Math.max(mInstance.getPaddingRight(), 0) :
+ mInstance.getPaddingRight() + 0;
+ }
+
+ private int getPaddingTopWithForeground() {
+ return mInstance.isForegroundInsidePadding() ? Math.max(mInstance.getPaddingTop(), 0) :
+ mInstance.getPaddingTop() + 0;
+ }
+
+ private int getPaddingBottomWithForeground() {
+ return mInstance.isForegroundInsidePadding() ? Math.max(mInstance.getPaddingBottom(), 0) :
+ mInstance.getPaddingBottom() + 0;
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/widget/MediaControlView2Impl.java b/packages/MediaComponents/src/com/android/widget/MediaControlView2Impl.java
new file mode 100644
index 0000000..3aff150
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/widget/MediaControlView2Impl.java
@@ -0,0 +1,1722 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.widget;
+
+import android.content.Context;
+import android.content.res.Resources;
+import android.graphics.Point;
+import android.media.MediaMetadata;
+import android.media.session.MediaController;
+import android.media.session.PlaybackState;
+import android.media.SessionToken2;
+import android.media.update.MediaControlView2Provider;
+import android.media.update.ViewGroupProvider;
+import android.os.Bundle;
+import android.support.annotation.Nullable;
+import android.util.AttributeSet;
+import android.util.Log;
+import android.view.Gravity;
+import android.view.MotionEvent;
+import android.view.View;
+import android.view.ViewGroup;
+import android.view.WindowManager;
+import android.widget.AdapterView;
+import android.widget.BaseAdapter;
+import android.widget.Button;
+import android.widget.ImageButton;
+import android.widget.ImageView;
+import android.widget.LinearLayout;
+import android.widget.ListView;
+import android.widget.MediaControlView2;
+import android.widget.ProgressBar;
+import android.widget.PopupWindow;
+import android.widget.RelativeLayout;
+import android.widget.SeekBar;
+import android.widget.SeekBar.OnSeekBarChangeListener;
+import android.widget.TextView;
+
+import com.android.media.update.ApiHelper;
+import com.android.media.update.R;
+import com.android.support.mediarouter.app.MediaRouteButton;
+import com.android.support.mediarouter.media.MediaRouter;
+import com.android.support.mediarouter.media.MediaRouteSelector;
+
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Formatter;
+import java.util.List;
+import java.util.Locale;
+
+public class MediaControlView2Impl extends BaseLayout implements MediaControlView2Provider {
+ private static final String TAG = "MediaControlView2";
+
+ private final MediaControlView2 mInstance;
+
+ static final String ARGUMENT_KEY_FULLSCREEN = "fullScreen";
+
+ // TODO: Move these constants to public api to support custom video view.
+ // TODO: Combine these constants into one regarding TrackInfo.
+ static final String KEY_VIDEO_TRACK_COUNT = "VideoTrackCount";
+ static final String KEY_AUDIO_TRACK_COUNT = "AudioTrackCount";
+ static final String KEY_SUBTITLE_TRACK_COUNT = "SubtitleTrackCount";
+ static final String KEY_PLAYBACK_SPEED = "PlaybackSpeed";
+ static final String KEY_SELECTED_AUDIO_INDEX = "SelectedAudioIndex";
+ static final String KEY_SELECTED_SUBTITLE_INDEX = "SelectedSubtitleIndex";
+ static final String EVENT_UPDATE_TRACK_STATUS = "UpdateTrackStatus";
+
+ // TODO: Remove this once integrating with MediaSession2 & MediaMetadata2
+ static final String KEY_STATE_IS_ADVERTISEMENT = "MediaTypeAdvertisement";
+ static final String EVENT_UPDATE_MEDIA_TYPE_STATUS = "UpdateMediaTypeStatus";
+
+ // String for sending command to show subtitle to MediaSession.
+ static final String COMMAND_SHOW_SUBTITLE = "showSubtitle";
+ // String for sending command to hide subtitle to MediaSession.
+ static final String COMMAND_HIDE_SUBTITLE = "hideSubtitle";
+ // TODO: remove once the implementation is revised
+ public static final String COMMAND_SET_FULLSCREEN = "setFullscreen";
+ // String for sending command to select audio track to MediaSession.
+ static final String COMMAND_SELECT_AUDIO_TRACK = "SelectTrack";
+ // String for sending command to set playback speed to MediaSession.
+ static final String COMMAND_SET_PLAYBACK_SPEED = "SetPlaybackSpeed";
+ // String for sending command to mute audio to MediaSession.
+ static final String COMMAND_MUTE= "Mute";
+ // String for sending command to unmute audio to MediaSession.
+ static final String COMMAND_UNMUTE = "Unmute";
+
+ private static final int SETTINGS_MODE_AUDIO_TRACK = 0;
+ private static final int SETTINGS_MODE_PLAYBACK_SPEED = 1;
+ private static final int SETTINGS_MODE_HELP = 2;
+ private static final int SETTINGS_MODE_SUBTITLE_TRACK = 3;
+ private static final int SETTINGS_MODE_VIDEO_QUALITY = 4;
+ private static final int SETTINGS_MODE_MAIN = 5;
+ private static final int PLAYBACK_SPEED_1x_INDEX = 3;
+
+ private static final int MEDIA_TYPE_DEFAULT = 0;
+ private static final int MEDIA_TYPE_MUSIC = 1;
+ private static final int MEDIA_TYPE_ADVERTISEMENT = 2;
+
+ private static final int SIZE_TYPE_EMBEDDED = 0;
+ private static final int SIZE_TYPE_FULL = 1;
+ // TODO: add support for Minimal size type.
+ private static final int SIZE_TYPE_MINIMAL = 2;
+
+ private static final int MAX_PROGRESS = 1000;
+ private static final int DEFAULT_PROGRESS_UPDATE_TIME_MS = 1000;
+ private static final int REWIND_TIME_MS = 10000;
+ private static final int FORWARD_TIME_MS = 30000;
+ private static final int AD_SKIP_WAIT_TIME_MS = 5000;
+ private static final int RESOURCE_NON_EXISTENT = -1;
+ private static final String RESOURCE_EMPTY = "";
+
+ private Resources mResources;
+ private MediaController mController;
+ private MediaController.TransportControls mControls;
+ private PlaybackState mPlaybackState;
+ private MediaMetadata mMetadata;
+ private int mDuration;
+ private int mPrevState;
+ private int mPrevWidth;
+ private int mPrevHeight;
+ private int mOriginalLeftBarWidth;
+ private int mVideoTrackCount;
+ private int mAudioTrackCount;
+ private int mSubtitleTrackCount;
+ private int mSettingsMode;
+ private int mSelectedSubtitleTrackIndex;
+ private int mSelectedAudioTrackIndex;
+ private int mSelectedVideoQualityIndex;
+ private int mSelectedSpeedIndex;
+ private int mEmbeddedSettingsItemWidth;
+ private int mFullSettingsItemWidth;
+ private int mEmbeddedSettingsItemHeight;
+ private int mFullSettingsItemHeight;
+ private int mSettingsWindowMargin;
+ private int mMediaType;
+ private int mSizeType;
+ private long mPlaybackActions;
+ private boolean mDragging;
+ private boolean mIsFullScreen;
+ private boolean mOverflowExpanded;
+ private boolean mIsStopped;
+ private boolean mSubtitleIsEnabled;
+ private boolean mSeekAvailable;
+ private boolean mIsAdvertisement;
+ private boolean mIsMute;
+ private boolean mNeedUXUpdate;
+
+ // Relating to Title Bar View
+ private ViewGroup mRoot;
+ private View mTitleBar;
+ private TextView mTitleView;
+ private View mAdExternalLink;
+ private ImageButton mBackButton;
+ private MediaRouteButton mRouteButton;
+ private MediaRouteSelector mRouteSelector;
+
+ // Relating to Center View
+ private ViewGroup mCenterView;
+ private View mTransportControls;
+ private ImageButton mPlayPauseButton;
+ private ImageButton mFfwdButton;
+ private ImageButton mRewButton;
+ private ImageButton mNextButton;
+ private ImageButton mPrevButton;
+
+ // Relating to Minimal Extra View
+ private LinearLayout mMinimalExtraView;
+
+ // Relating to Progress Bar View
+ private ProgressBar mProgress;
+ private View mProgressBuffer;
+
+ // Relating to Bottom Bar View
+ private ViewGroup mBottomBar;
+
+ // Relating to Bottom Bar Left View
+ private ViewGroup mBottomBarLeftView;
+ private ViewGroup mTimeView;
+ private TextView mEndTime;
+ private TextView mCurrentTime;
+ private TextView mAdSkipView;
+ private StringBuilder mFormatBuilder;
+ private Formatter mFormatter;
+
+ // Relating to Bottom Bar Right View
+ private ViewGroup mBottomBarRightView;
+ private ViewGroup mBasicControls;
+ private ViewGroup mExtraControls;
+ private ViewGroup mCustomButtons;
+ private ImageButton mSubtitleButton;
+ private ImageButton mFullScreenButton;
+ private ImageButton mOverflowButtonRight;
+ private ImageButton mOverflowButtonLeft;
+ private ImageButton mMuteButton;
+ private ImageButton mVideoQualityButton;
+ private ImageButton mSettingsButton;
+ private TextView mAdRemainingView;
+
+ // Relating to Settings List View
+ private ListView mSettingsListView;
+ private PopupWindow mSettingsWindow;
+ private SettingsAdapter mSettingsAdapter;
+ private SubSettingsAdapter mSubSettingsAdapter;
+ private List<String> mSettingsMainTextsList;
+ private List<String> mSettingsSubTextsList;
+ private List<Integer> mSettingsIconIdsList;
+ private List<String> mSubtitleDescriptionsList;
+ private List<String> mAudioTrackList;
+ private List<String> mVideoQualityList;
+ private List<String> mPlaybackSpeedTextList;
+ private List<Float> mPlaybackSpeedList;
+
+ public MediaControlView2Impl(MediaControlView2 instance,
+ ViewGroupProvider superProvider, ViewGroupProvider privateProvider) {
+ super(instance, superProvider, privateProvider);
+ mInstance = instance;
+ }
+
+ @Override
+ public void initialize(@Nullable AttributeSet attrs, int defStyleAttr, int defStyleRes) {
+ mResources = ApiHelper.getLibResources(mInstance.getContext());
+ // Inflate MediaControlView2 from XML
+ mRoot = makeControllerView();
+ mInstance.addView(mRoot);
+ }
+
+ @Override
+ public void setMediaSessionToken_impl(SessionToken2 token) {
+ // TODO: implement this
+ }
+
+ @Override
+ public void setOnFullScreenListener_impl(MediaControlView2.OnFullScreenListener l) {
+ // TODO: implement this
+ }
+
+ @Override
+ public void setController_impl(MediaController controller) {
+ mController = controller;
+ if (controller != null) {
+ mControls = controller.getTransportControls();
+ // Set mMetadata and mPlaybackState to existing MediaSession variables since they may
+ // be called before the callback is called
+ mPlaybackState = mController.getPlaybackState();
+ mMetadata = mController.getMetadata();
+ updateDuration();
+ updateTitle();
+
+ mController.registerCallback(new MediaControllerCallback());
+ }
+ }
+
+ @Override
+ public void setButtonVisibility_impl(int button, int visibility) {
+ // TODO: add member variables for Fast-Forward/Prvious/Rewind buttons to save visibility in
+ // order to prevent being overriden inside updateLayout().
+ switch (button) {
+ case MediaControlView2.BUTTON_PLAY_PAUSE:
+ if (mPlayPauseButton != null && canPause()) {
+ mPlayPauseButton.setVisibility(visibility);
+ }
+ break;
+ case MediaControlView2.BUTTON_FFWD:
+ if (mFfwdButton != null && canSeekForward()) {
+ mFfwdButton.setVisibility(visibility);
+ }
+ break;
+ case MediaControlView2.BUTTON_REW:
+ if (mRewButton != null && canSeekBackward()) {
+ mRewButton.setVisibility(visibility);
+ }
+ break;
+ case MediaControlView2.BUTTON_NEXT:
+ if (mNextButton != null) {
+ mNextButton.setVisibility(visibility);
+ }
+ break;
+ case MediaControlView2.BUTTON_PREV:
+ if (mPrevButton != null) {
+ mPrevButton.setVisibility(visibility);
+ }
+ break;
+ case MediaControlView2.BUTTON_SUBTITLE:
+ if (mSubtitleButton != null && mSubtitleTrackCount > 0) {
+ mSubtitleButton.setVisibility(visibility);
+ }
+ break;
+ case MediaControlView2.BUTTON_FULL_SCREEN:
+ if (mFullScreenButton != null) {
+ mFullScreenButton.setVisibility(visibility);
+ }
+ break;
+ case MediaControlView2.BUTTON_OVERFLOW:
+ if (mOverflowButtonRight != null) {
+ mOverflowButtonRight.setVisibility(visibility);
+ }
+ break;
+ case MediaControlView2.BUTTON_MUTE:
+ if (mMuteButton != null) {
+ mMuteButton.setVisibility(visibility);
+ }
+ break;
+ case MediaControlView2.BUTTON_SETTINGS:
+ if (mSettingsButton != null) {
+ mSettingsButton.setVisibility(visibility);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ @Override
+ public void requestPlayButtonFocus_impl() {
+ if (mPlayPauseButton != null) {
+ mPlayPauseButton.requestFocus();
+ }
+ }
+
+ @Override
+ public CharSequence getAccessibilityClassName_impl() {
+ return MediaControlView2.class.getName();
+ }
+
+ @Override
+ public boolean onTouchEvent_impl(MotionEvent ev) {
+ return false;
+ }
+
+ // TODO: Should this function be removed?
+ @Override
+ public boolean onTrackballEvent_impl(MotionEvent ev) {
+ return false;
+ }
+
+ @Override
+ public void onMeasure_impl(int widthMeasureSpec, int heightMeasureSpec) {
+ super.onMeasure_impl(widthMeasureSpec, heightMeasureSpec);
+
+ // Update layout when this view's width changes in order to avoid any UI overlap between
+ // transport controls.
+ if (mPrevWidth != mInstance.getMeasuredWidth()
+ || mPrevHeight != mInstance.getMeasuredHeight() || mNeedUXUpdate) {
+ // Dismiss SettingsWindow if it is showing.
+ mSettingsWindow.dismiss();
+
+ // These views may not have been initialized yet.
+ if (mTransportControls.getWidth() == 0 || mTimeView.getWidth() == 0) {
+ return;
+ }
+
+ int currWidth = mInstance.getMeasuredWidth();
+ int currHeight = mInstance.getMeasuredHeight();
+ WindowManager manager = (WindowManager) mInstance.getContext().getApplicationContext()
+ .getSystemService(Context.WINDOW_SERVICE);
+ Point screenSize = new Point();
+ manager.getDefaultDisplay().getSize(screenSize);
+ int screenWidth = screenSize.x;
+ int screenHeight = screenSize.y;
+ int fullIconSize = mResources.getDimensionPixelSize(R.dimen.mcv2_full_icon_size);
+ int embeddedIconSize = mResources.getDimensionPixelSize(
+ R.dimen.mcv2_embedded_icon_size);
+ int marginSize = mResources.getDimensionPixelSize(R.dimen.mcv2_icon_margin);
+
+ // TODO: add support for Advertisement Mode.
+ if (mMediaType == MEDIA_TYPE_DEFAULT) {
+ // Max number of icons inside BottomBarRightView for Music mode is 4.
+ int maxIconCount = 4;
+ updateLayout(maxIconCount, fullIconSize, embeddedIconSize, marginSize, currWidth,
+ currHeight, screenWidth, screenHeight);
+ } else if (mMediaType == MEDIA_TYPE_MUSIC) {
+ if (mNeedUXUpdate) {
+ // One-time operation for Music media type
+ mBasicControls.removeView(mMuteButton);
+ mExtraControls.addView(mMuteButton, 0);
+ mVideoQualityButton.setVisibility(View.GONE);
+ if (mFfwdButton != null) {
+ mFfwdButton.setVisibility(View.GONE);
+ }
+ if (mRewButton != null) {
+ mRewButton.setVisibility(View.GONE);
+ }
+ }
+ mNeedUXUpdate = false;
+
+ // Max number of icons inside BottomBarRightView for Music mode is 3.
+ int maxIconCount = 3;
+ updateLayout(maxIconCount, fullIconSize, embeddedIconSize, marginSize, currWidth,
+ currHeight, screenWidth, screenHeight);
+ }
+ mPrevWidth = currWidth;
+ mPrevHeight = currHeight;
+ }
+ // TODO: move this to a different location.
+ // Update title bar parameters in order to avoid overlap between title view and the right
+ // side of the title bar.
+ updateTitleBarLayout();
+ }
+
+ @Override
+ public void setEnabled_impl(boolean enabled) {
+ super.setEnabled_impl(enabled);
+
+ // TODO: Merge the below code with disableUnsupportedButtons().
+ if (mPlayPauseButton != null) {
+ mPlayPauseButton.setEnabled(enabled);
+ }
+ if (mFfwdButton != null) {
+ mFfwdButton.setEnabled(enabled);
+ }
+ if (mRewButton != null) {
+ mRewButton.setEnabled(enabled);
+ }
+ if (mNextButton != null) {
+ mNextButton.setEnabled(enabled);
+ }
+ if (mPrevButton != null) {
+ mPrevButton.setEnabled(enabled);
+ }
+ if (mProgress != null) {
+ mProgress.setEnabled(enabled);
+ }
+ disableUnsupportedButtons();
+ }
+
+ @Override
+ public void onVisibilityAggregated_impl(boolean isVisible) {
+ super.onVisibilityAggregated_impl(isVisible);
+
+ if (isVisible) {
+ disableUnsupportedButtons();
+ mInstance.removeCallbacks(mUpdateProgress);
+ mInstance.post(mUpdateProgress);
+ } else {
+ mInstance.removeCallbacks(mUpdateProgress);
+ }
+ }
+
+ public void setRouteSelector(MediaRouteSelector selector) {
+ mRouteSelector = selector;
+ if (mRouteSelector != null && !mRouteSelector.isEmpty()) {
+ mRouteButton.setRouteSelector(selector, MediaRouter.CALLBACK_FLAG_PERFORM_ACTIVE_SCAN);
+ mRouteButton.setVisibility(View.VISIBLE);
+ } else {
+ mRouteButton.setRouteSelector(MediaRouteSelector.EMPTY);
+ mRouteButton.setVisibility(View.GONE);
+ }
+ }
+
+ ///////////////////////////////////////////////////
+ // Protected or private methods
+ ///////////////////////////////////////////////////
+
+ private boolean isPlaying() {
+ if (mPlaybackState != null) {
+ return mPlaybackState.getState() == PlaybackState.STATE_PLAYING;
+ }
+ return false;
+ }
+
+ private int getCurrentPosition() {
+ mPlaybackState = mController.getPlaybackState();
+ if (mPlaybackState != null) {
+ return (int) mPlaybackState.getPosition();
+ }
+ return 0;
+ }
+
+ private int getBufferPercentage() {
+ if (mDuration == 0) {
+ return 0;
+ }
+ mPlaybackState = mController.getPlaybackState();
+ if (mPlaybackState != null) {
+ long bufferedPos = mPlaybackState.getBufferedPosition();
+ return (bufferedPos == -1) ? -1 : (int) (bufferedPos * 100 / mDuration);
+ }
+ return 0;
+ }
+
+ private boolean canPause() {
+ if (mPlaybackState != null) {
+ return (mPlaybackState.getActions() & PlaybackState.ACTION_PAUSE) != 0;
+ }
+ return true;
+ }
+
+ private boolean canSeekBackward() {
+ if (mPlaybackState != null) {
+ return (mPlaybackState.getActions() & PlaybackState.ACTION_REWIND) != 0;
+ }
+ return true;
+ }
+
+ private boolean canSeekForward() {
+ if (mPlaybackState != null) {
+ return (mPlaybackState.getActions() & PlaybackState.ACTION_FAST_FORWARD) != 0;
+ }
+ return true;
+ }
+
+ /**
+ * Create the view that holds the widgets that control playback.
+ * Derived classes can override this to create their own.
+ *
+ * @return The controller view.
+ * @hide This doesn't work as advertised
+ */
+ protected ViewGroup makeControllerView() {
+ ViewGroup root = (ViewGroup) ApiHelper.inflateLibLayout(mInstance.getContext(),
+ R.layout.media_controller);
+ initControllerView(root);
+ return root;
+ }
+
+ private void initControllerView(ViewGroup v) {
+ // Relating to Title Bar View
+ mTitleBar = v.findViewById(R.id.title_bar);
+ mTitleView = v.findViewById(R.id.title_text);
+ mAdExternalLink = v.findViewById(R.id.ad_external_link);
+ mBackButton = v.findViewById(R.id.back);
+ if (mBackButton != null) {
+ mBackButton.setOnClickListener(mBackListener);
+ mBackButton.setVisibility(View.GONE);
+ }
+ mRouteButton = v.findViewById(R.id.cast);
+
+ // Relating to Center View
+ mCenterView = v.findViewById(R.id.center_view);
+ mTransportControls = inflateTransportControls(R.layout.embedded_transport_controls);
+ mCenterView.addView(mTransportControls);
+
+ // Relating to Minimal Extra View
+ mMinimalExtraView = (LinearLayout) v.findViewById(R.id.minimal_extra_view);
+ LinearLayout.LayoutParams params =
+ (LinearLayout.LayoutParams) mMinimalExtraView.getLayoutParams();
+ int iconSize = mResources.getDimensionPixelSize(R.dimen.mcv2_embedded_icon_size);
+ int marginSize = mResources.getDimensionPixelSize(R.dimen.mcv2_icon_margin);
+ params.setMargins(0, (iconSize + marginSize * 2) * (-1), 0, 0);
+ mMinimalExtraView.setLayoutParams(params);
+ mMinimalExtraView.setVisibility(View.GONE);
+
+ // Relating to Progress Bar View
+ mProgress = v.findViewById(R.id.progress);
+ if (mProgress != null) {
+ if (mProgress instanceof SeekBar) {
+ SeekBar seeker = (SeekBar) mProgress;
+ seeker.setOnSeekBarChangeListener(mSeekListener);
+ seeker.setProgressDrawable(mResources.getDrawable(R.drawable.custom_progress));
+ seeker.setThumb(mResources.getDrawable(R.drawable.custom_progress_thumb));
+ }
+ mProgress.setMax(MAX_PROGRESS);
+ }
+ mProgressBuffer = v.findViewById(R.id.progress_buffer);
+
+ // Relating to Bottom Bar View
+ mBottomBar = v.findViewById(R.id.bottom_bar);
+
+ // Relating to Bottom Bar Left View
+ mBottomBarLeftView = v.findViewById(R.id.bottom_bar_left);
+ mTimeView = v.findViewById(R.id.time);
+ mEndTime = v.findViewById(R.id.time_end);
+ mCurrentTime = v.findViewById(R.id.time_current);
+ mAdSkipView = v.findViewById(R.id.ad_skip_time);
+ mFormatBuilder = new StringBuilder();
+ mFormatter = new Formatter(mFormatBuilder, Locale.getDefault());
+
+ // Relating to Bottom Bar Right View
+ mBottomBarRightView = v.findViewById(R.id.bottom_bar_right);
+ mBasicControls = v.findViewById(R.id.basic_controls);
+ mExtraControls = v.findViewById(R.id.extra_controls);
+ mCustomButtons = v.findViewById(R.id.custom_buttons);
+ mSubtitleButton = v.findViewById(R.id.subtitle);
+ if (mSubtitleButton != null) {
+ mSubtitleButton.setOnClickListener(mSubtitleListener);
+ }
+ mFullScreenButton = v.findViewById(R.id.fullscreen);
+ if (mFullScreenButton != null) {
+ mFullScreenButton.setOnClickListener(mFullScreenListener);
+ // TODO: Show Fullscreen button when only it is possible.
+ }
+ mOverflowButtonRight = v.findViewById(R.id.overflow_right);
+ if (mOverflowButtonRight != null) {
+ mOverflowButtonRight.setOnClickListener(mOverflowRightListener);
+ }
+ mOverflowButtonLeft = v.findViewById(R.id.overflow_left);
+ if (mOverflowButtonLeft != null) {
+ mOverflowButtonLeft.setOnClickListener(mOverflowLeftListener);
+ }
+ mMuteButton = v.findViewById(R.id.mute);
+ if (mMuteButton != null) {
+ mMuteButton.setOnClickListener(mMuteButtonListener);
+ }
+ mSettingsButton = v.findViewById(R.id.settings);
+ if (mSettingsButton != null) {
+ mSettingsButton.setOnClickListener(mSettingsButtonListener);
+ }
+ mVideoQualityButton = v.findViewById(R.id.video_quality);
+ if (mVideoQualityButton != null) {
+ mVideoQualityButton.setOnClickListener(mVideoQualityListener);
+ }
+ mAdRemainingView = v.findViewById(R.id.ad_remaining);
+
+ // Relating to Settings List View
+ initializeSettingsLists();
+ mSettingsListView = (ListView) ApiHelper.inflateLibLayout(mInstance.getContext(),
+ R.layout.settings_list);
+ mSettingsAdapter = new SettingsAdapter(mSettingsMainTextsList, mSettingsSubTextsList,
+ mSettingsIconIdsList);
+ mSubSettingsAdapter = new SubSettingsAdapter(null, 0);
+ mSettingsListView.setAdapter(mSettingsAdapter);
+ mSettingsListView.setChoiceMode(ListView.CHOICE_MODE_SINGLE);
+ mSettingsListView.setOnItemClickListener(mSettingsItemClickListener);
+
+ mEmbeddedSettingsItemWidth = mResources.getDimensionPixelSize(
+ R.dimen.mcv2_embedded_settings_width);
+ mFullSettingsItemWidth = mResources.getDimensionPixelSize(R.dimen.mcv2_full_settings_width);
+ mEmbeddedSettingsItemHeight = mResources.getDimensionPixelSize(
+ R.dimen.mcv2_embedded_settings_height);
+ mFullSettingsItemHeight = mResources.getDimensionPixelSize(
+ R.dimen.mcv2_full_settings_height);
+ mSettingsWindowMargin = (-1) * mResources.getDimensionPixelSize(
+ R.dimen.mcv2_settings_offset);
+ mSettingsWindow = new PopupWindow(mSettingsListView, mEmbeddedSettingsItemWidth,
+ ViewGroup.LayoutParams.WRAP_CONTENT, true);
+ }
+
+ /**
+ * Disable pause or seek buttons if the stream cannot be paused or seeked.
+ * This requires the control interface to be a MediaPlayerControlExt
+ */
+ private void disableUnsupportedButtons() {
+ try {
+ if (mPlayPauseButton != null && !canPause()) {
+ mPlayPauseButton.setEnabled(false);
+ }
+ if (mRewButton != null && !canSeekBackward()) {
+ mRewButton.setEnabled(false);
+ }
+ if (mFfwdButton != null && !canSeekForward()) {
+ mFfwdButton.setEnabled(false);
+ }
+ // TODO What we really should do is add a canSeek to the MediaPlayerControl interface;
+ // this scheme can break the case when applications want to allow seek through the
+ // progress bar but disable forward/backward buttons.
+ //
+ // However, currently the flags SEEK_BACKWARD_AVAILABLE, SEEK_FORWARD_AVAILABLE,
+ // and SEEK_AVAILABLE are all (un)set together; as such the aforementioned issue
+ // shouldn't arise in existing applications.
+ if (mProgress != null && !canSeekBackward() && !canSeekForward()) {
+ mProgress.setEnabled(false);
+ }
+ } catch (IncompatibleClassChangeError ex) {
+ // We were given an old version of the interface, that doesn't have
+ // the canPause/canSeekXYZ methods. This is OK, it just means we
+ // assume the media can be paused and seeked, and so we don't disable
+ // the buttons.
+ }
+ }
+
+ private final Runnable mUpdateProgress = new Runnable() {
+ @Override
+ public void run() {
+ int pos = setProgress();
+ boolean isShowing = mInstance.getVisibility() == View.VISIBLE;
+ if (!mDragging && isShowing && isPlaying()) {
+ mInstance.postDelayed(mUpdateProgress,
+ DEFAULT_PROGRESS_UPDATE_TIME_MS - (pos % DEFAULT_PROGRESS_UPDATE_TIME_MS));
+ }
+ }
+ };
+
+ private String stringForTime(int timeMs) {
+ int totalSeconds = timeMs / 1000;
+
+ int seconds = totalSeconds % 60;
+ int minutes = (totalSeconds / 60) % 60;
+ int hours = totalSeconds / 3600;
+
+ mFormatBuilder.setLength(0);
+ if (hours > 0) {
+ return mFormatter.format("%d:%02d:%02d", hours, minutes, seconds).toString();
+ } else {
+ return mFormatter.format("%02d:%02d", minutes, seconds).toString();
+ }
+ }
+
+ private int setProgress() {
+ if (mController == null || mDragging) {
+ return 0;
+ }
+ int positionOnProgressBar = 0;
+ int currentPosition = getCurrentPosition();
+ if (mDuration > 0) {
+ positionOnProgressBar = (int) (MAX_PROGRESS * (long) currentPosition / mDuration);
+ }
+ if (mProgress != null && currentPosition != mDuration) {
+ mProgress.setProgress(positionOnProgressBar);
+ // If the media is a local file, there is no need to set a buffer, so set secondary
+ // progress to maximum.
+ if (getBufferPercentage() < 0) {
+ mProgress.setSecondaryProgress(MAX_PROGRESS);
+ } else {
+ mProgress.setSecondaryProgress(getBufferPercentage() * 10);
+ }
+ }
+
+ if (mEndTime != null) {
+ mEndTime.setText(stringForTime(mDuration));
+
+ }
+ if (mCurrentTime != null) {
+ mCurrentTime.setText(stringForTime(currentPosition));
+ }
+
+ if (mIsAdvertisement) {
+ // Update the remaining number of seconds until the first 5 seconds of the
+ // advertisement.
+ if (mAdSkipView != null) {
+ if (currentPosition <= AD_SKIP_WAIT_TIME_MS) {
+ if (mAdSkipView.getVisibility() == View.GONE) {
+ mAdSkipView.setVisibility(View.VISIBLE);
+ }
+ String skipTimeText = mResources.getString(
+ R.string.MediaControlView2_ad_skip_wait_time,
+ ((AD_SKIP_WAIT_TIME_MS - currentPosition) / 1000 + 1));
+ mAdSkipView.setText(skipTimeText);
+ } else {
+ if (mAdSkipView.getVisibility() == View.VISIBLE) {
+ mAdSkipView.setVisibility(View.GONE);
+ mNextButton.setEnabled(true);
+ mNextButton.clearColorFilter();
+ }
+ }
+ }
+ // Update the remaining number of seconds of the advertisement.
+ if (mAdRemainingView != null) {
+ int remainingTime =
+ (mDuration - currentPosition < 0) ? 0 : (mDuration - currentPosition);
+ String remainingTimeText = mResources.getString(
+ R.string.MediaControlView2_ad_remaining_time,
+ stringForTime(remainingTime));
+ mAdRemainingView.setText(remainingTimeText);
+ }
+ }
+ return currentPosition;
+ }
+
+ private void togglePausePlayState() {
+ if (isPlaying()) {
+ mControls.pause();
+ mPlayPauseButton.setImageDrawable(
+ mResources.getDrawable(R.drawable.ic_play_circle_filled, null));
+ mPlayPauseButton.setContentDescription(
+ mResources.getString(R.string.mcv2_play_button_desc));
+ } else {
+ mControls.play();
+ mPlayPauseButton.setImageDrawable(
+ mResources.getDrawable(R.drawable.ic_pause_circle_filled, null));
+ mPlayPauseButton.setContentDescription(
+ mResources.getString(R.string.mcv2_pause_button_desc));
+ }
+ }
+
+ // There are two scenarios that can trigger the seekbar listener to trigger:
+ //
+ // The first is the user using the touchpad to adjust the posititon of the
+ // seekbar's thumb. In this case onStartTrackingTouch is called followed by
+ // a number of onProgressChanged notifications, concluded by onStopTrackingTouch.
+ // We're setting the field "mDragging" to true for the duration of the dragging
+ // session to avoid jumps in the position in case of ongoing playback.
+ //
+ // The second scenario involves the user operating the scroll ball, in this
+ // case there WON'T BE onStartTrackingTouch/onStopTrackingTouch notifications,
+ // we will simply apply the updated position without suspending regular updates.
+ private final OnSeekBarChangeListener mSeekListener = new OnSeekBarChangeListener() {
+ @Override
+ public void onStartTrackingTouch(SeekBar bar) {
+ if (!mSeekAvailable) {
+ return;
+ }
+
+ mDragging = true;
+
+ // By removing these pending progress messages we make sure
+ // that a) we won't update the progress while the user adjusts
+ // the seekbar and b) once the user is done dragging the thumb
+ // we will post one of these messages to the queue again and
+ // this ensures that there will be exactly one message queued up.
+ mInstance.removeCallbacks(mUpdateProgress);
+
+ // Check if playback is currently stopped. In this case, update the pause button to
+ // show the play image instead of the replay image.
+ if (mIsStopped) {
+ mPlayPauseButton.setImageDrawable(
+ mResources.getDrawable(R.drawable.ic_play_circle_filled, null));
+ mPlayPauseButton.setContentDescription(
+ mResources.getString(R.string.mcv2_play_button_desc));
+ mIsStopped = false;
+ }
+ }
+
+ @Override
+ public void onProgressChanged(SeekBar bar, int progress, boolean fromUser) {
+ if (!mSeekAvailable) {
+ return;
+ }
+ if (!fromUser) {
+ // We're not interested in programmatically generated changes to
+ // the progress bar's position.
+ return;
+ }
+ if (mDuration > 0) {
+ int position = (int) (((long) mDuration * progress) / MAX_PROGRESS);
+ mControls.seekTo(position);
+
+ if (mCurrentTime != null) {
+ mCurrentTime.setText(stringForTime(position));
+ }
+ }
+ }
+
+ @Override
+ public void onStopTrackingTouch(SeekBar bar) {
+ if (!mSeekAvailable) {
+ return;
+ }
+ mDragging = false;
+
+ setProgress();
+
+ // Ensure that progress is properly updated in the future,
+ // the call to show() does not guarantee this because it is a
+ // no-op if we are already showing.
+ mInstance.post(mUpdateProgress);
+ }
+ };
+
+ private final View.OnClickListener mPlayPauseListener = new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ togglePausePlayState();
+ }
+ };
+
+ private final View.OnClickListener mRewListener = new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ int pos = getCurrentPosition() - REWIND_TIME_MS;
+ mControls.seekTo(pos);
+ setProgress();
+ }
+ };
+
+ private final View.OnClickListener mFfwdListener = new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ int pos = getCurrentPosition() + FORWARD_TIME_MS;
+ mControls.seekTo(pos);
+ setProgress();
+ }
+ };
+
+ private final View.OnClickListener mNextListener = new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ mControls.skipToNext();
+ }
+ };
+
+ private final View.OnClickListener mPrevListener = new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ mControls.skipToPrevious();
+ }
+ };
+
+ private final View.OnClickListener mBackListener = new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ // TODO: implement
+ }
+ };
+
+ private final View.OnClickListener mSubtitleListener = new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ mSettingsMode = SETTINGS_MODE_SUBTITLE_TRACK;
+ mSubSettingsAdapter.setTexts(mSubtitleDescriptionsList);
+ mSubSettingsAdapter.setCheckPosition(mSelectedSubtitleTrackIndex);
+ displaySettingsWindow(mSubSettingsAdapter);
+ }
+ };
+
+ private final View.OnClickListener mVideoQualityListener = new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ mSettingsMode = SETTINGS_MODE_VIDEO_QUALITY;
+ mSubSettingsAdapter.setTexts(mVideoQualityList);
+ mSubSettingsAdapter.setCheckPosition(mSelectedVideoQualityIndex);
+ displaySettingsWindow(mSubSettingsAdapter);
+ }
+ };
+
+ private final View.OnClickListener mFullScreenListener = new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ final boolean isEnteringFullScreen = !mIsFullScreen;
+ // TODO: Re-arrange the button layouts according to the UX.
+ if (isEnteringFullScreen) {
+ mFullScreenButton.setImageDrawable(
+ mResources.getDrawable(R.drawable.ic_fullscreen_exit, null));
+ } else {
+ mFullScreenButton.setImageDrawable(
+ mResources.getDrawable(R.drawable.ic_fullscreen, null));
+ }
+ Bundle args = new Bundle();
+ args.putBoolean(ARGUMENT_KEY_FULLSCREEN, isEnteringFullScreen);
+ mController.sendCommand(COMMAND_SET_FULLSCREEN, args, null);
+
+ mIsFullScreen = isEnteringFullScreen;
+ }
+ };
+
+ private final View.OnClickListener mOverflowRightListener = new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ mBasicControls.setVisibility(View.GONE);
+ mExtraControls.setVisibility(View.VISIBLE);
+ }
+ };
+
+ private final View.OnClickListener mOverflowLeftListener = new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ mBasicControls.setVisibility(View.VISIBLE);
+ mExtraControls.setVisibility(View.GONE);
+ }
+ };
+
+ private final View.OnClickListener mMuteButtonListener = new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ if (!mIsMute) {
+ mMuteButton.setImageDrawable(
+ mResources.getDrawable(R.drawable.ic_mute, null));
+ mMuteButton.setContentDescription(
+ mResources.getString(R.string.mcv2_muted_button_desc));
+ mIsMute = true;
+ mController.sendCommand(COMMAND_MUTE, null, null);
+ } else {
+ mMuteButton.setImageDrawable(
+ mResources.getDrawable(R.drawable.ic_unmute, null));
+ mMuteButton.setContentDescription(
+ mResources.getString(R.string.mcv2_unmuted_button_desc));
+ mIsMute = false;
+ mController.sendCommand(COMMAND_UNMUTE, null, null);
+ }
+ }
+ };
+
+ private final View.OnClickListener mSettingsButtonListener = new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ mSettingsMode = SETTINGS_MODE_MAIN;
+ mSettingsAdapter.setSubTexts(mSettingsSubTextsList);
+ displaySettingsWindow(mSettingsAdapter);
+ }
+ };
+
+ private final AdapterView.OnItemClickListener mSettingsItemClickListener
+ = new AdapterView.OnItemClickListener() {
+ @Override
+ public void onItemClick(AdapterView<?> parent, View view, int position, long id) {
+ switch (mSettingsMode) {
+ case SETTINGS_MODE_MAIN:
+ if (position == SETTINGS_MODE_AUDIO_TRACK) {
+ mSubSettingsAdapter.setTexts(mAudioTrackList);
+ mSubSettingsAdapter.setCheckPosition(mSelectedAudioTrackIndex);
+ mSettingsMode = SETTINGS_MODE_AUDIO_TRACK;
+ } else if (position == SETTINGS_MODE_PLAYBACK_SPEED) {
+ mSubSettingsAdapter.setTexts(mPlaybackSpeedTextList);
+ mSubSettingsAdapter.setCheckPosition(mSelectedSpeedIndex);
+ mSettingsMode = SETTINGS_MODE_PLAYBACK_SPEED;
+ } else if (position == SETTINGS_MODE_HELP) {
+ // TODO: implement this.
+ mSettingsWindow.dismiss();
+ return;
+ }
+ displaySettingsWindow(mSubSettingsAdapter);
+ break;
+ case SETTINGS_MODE_AUDIO_TRACK:
+ if (position != mSelectedAudioTrackIndex) {
+ mSelectedAudioTrackIndex = position;
+ if (mAudioTrackCount > 0) {
+ Bundle extra = new Bundle();
+ extra.putInt(KEY_SELECTED_AUDIO_INDEX, position);
+ mController.sendCommand(COMMAND_SELECT_AUDIO_TRACK, extra, null);
+ }
+ mSettingsSubTextsList.set(SETTINGS_MODE_AUDIO_TRACK,
+ mSubSettingsAdapter.getMainText(position));
+ }
+ mSettingsWindow.dismiss();
+ break;
+ case SETTINGS_MODE_PLAYBACK_SPEED:
+ if (position != mSelectedSpeedIndex) {
+ mSelectedSpeedIndex = position;
+ Bundle extra = new Bundle();
+ extra.putFloat(KEY_PLAYBACK_SPEED, mPlaybackSpeedList.get(position));
+ mController.sendCommand(COMMAND_SET_PLAYBACK_SPEED, extra, null);
+ mSettingsSubTextsList.set(SETTINGS_MODE_PLAYBACK_SPEED,
+ mSubSettingsAdapter.getMainText(position));
+ }
+ mSettingsWindow.dismiss();
+ break;
+ case SETTINGS_MODE_HELP:
+ // TODO: implement this.
+ break;
+ case SETTINGS_MODE_SUBTITLE_TRACK:
+ if (position != mSelectedSubtitleTrackIndex) {
+ mSelectedSubtitleTrackIndex = position;
+ if (position > 0) {
+ Bundle extra = new Bundle();
+ extra.putInt(KEY_SELECTED_SUBTITLE_INDEX, position - 1);
+ mController.sendCommand(
+ MediaControlView2Impl.COMMAND_SHOW_SUBTITLE, extra, null);
+ mSubtitleButton.setImageDrawable(
+ mResources.getDrawable(R.drawable.ic_subtitle_on, null));
+ mSubtitleButton.setContentDescription(
+ mResources.getString(R.string.mcv2_cc_is_on));
+ mSubtitleIsEnabled = true;
+ } else {
+ mController.sendCommand(
+ MediaControlView2Impl.COMMAND_HIDE_SUBTITLE, null, null);
+ mSubtitleButton.setImageDrawable(
+ mResources.getDrawable(R.drawable.ic_subtitle_off, null));
+ mSubtitleButton.setContentDescription(
+ mResources.getString(R.string.mcv2_cc_is_off));
+
+ mSubtitleIsEnabled = false;
+ }
+ }
+ mSettingsWindow.dismiss();
+ break;
+ case SETTINGS_MODE_VIDEO_QUALITY:
+ // TODO: add support for video quality
+ mSelectedVideoQualityIndex = position;
+ mSettingsWindow.dismiss();
+ break;
+ }
+ }
+ };
+
+ private void updateDuration() {
+ if (mMetadata != null) {
+ if (mMetadata.containsKey(MediaMetadata.METADATA_KEY_DURATION)) {
+ mDuration = (int) mMetadata.getLong(MediaMetadata.METADATA_KEY_DURATION);
+ // update progress bar
+ setProgress();
+ }
+ }
+ }
+
+ private void updateTitle() {
+ if (mMetadata != null) {
+ if (mMetadata.containsKey(MediaMetadata.METADATA_KEY_TITLE)) {
+ mTitleView.setText(mMetadata.getString(MediaMetadata.METADATA_KEY_TITLE));
+ }
+ }
+ }
+
+ // The title bar is made up of two separate LinearLayouts. If the sum of the two bars are
+ // greater than the length of the title bar, reduce the size of the left bar (which makes the
+ // TextView that contains the title of the media file shrink).
+ private void updateTitleBarLayout() {
+ if (mTitleBar != null) {
+ int titleBarWidth = mTitleBar.getWidth();
+
+ View leftBar = mTitleBar.findViewById(R.id.title_bar_left);
+ View rightBar = mTitleBar.findViewById(R.id.title_bar_right);
+ int leftBarWidth = leftBar.getWidth();
+ int rightBarWidth = rightBar.getWidth();
+
+ RelativeLayout.LayoutParams params =
+ (RelativeLayout.LayoutParams) leftBar.getLayoutParams();
+ if (leftBarWidth + rightBarWidth > titleBarWidth) {
+ params.width = titleBarWidth - rightBarWidth;
+ mOriginalLeftBarWidth = leftBarWidth;
+ } else if (leftBarWidth + rightBarWidth < titleBarWidth && mOriginalLeftBarWidth != 0) {
+ params.width = mOriginalLeftBarWidth;
+ mOriginalLeftBarWidth = 0;
+ }
+ leftBar.setLayoutParams(params);
+ }
+ }
+
+ private void updateAudioMetadata() {
+ if (mMediaType != MEDIA_TYPE_MUSIC) {
+ return;
+ }
+
+ if (mMetadata != null) {
+ String titleText = "";
+ String artistText = "";
+ if (mMetadata.containsKey(MediaMetadata.METADATA_KEY_TITLE)) {
+ titleText = mMetadata.getString(MediaMetadata.METADATA_KEY_TITLE);
+ } else {
+ titleText = mResources.getString(R.string.mcv2_music_title_unknown_text);
+ }
+
+ if (mMetadata.containsKey(MediaMetadata.METADATA_KEY_ARTIST)) {
+ artistText = mMetadata.getString(MediaMetadata.METADATA_KEY_ARTIST);
+ } else {
+ artistText = mResources.getString(R.string.mcv2_music_artist_unknown_text);
+ }
+
+ // Update title for Embedded size type
+ mTitleView.setText(titleText + " - " + artistText);
+
+ // Set to true to update layout inside onMeasure()
+ mNeedUXUpdate = true;
+ }
+ }
+
+ private void updateLayout() {
+ if (mIsAdvertisement) {
+ mRewButton.setVisibility(View.GONE);
+ mFfwdButton.setVisibility(View.GONE);
+ mPrevButton.setVisibility(View.GONE);
+ mTimeView.setVisibility(View.GONE);
+
+ mAdSkipView.setVisibility(View.VISIBLE);
+ mAdRemainingView.setVisibility(View.VISIBLE);
+ mAdExternalLink.setVisibility(View.VISIBLE);
+
+ mProgress.setEnabled(false);
+ mNextButton.setEnabled(false);
+ mNextButton.setColorFilter(R.color.gray);
+ } else {
+ mRewButton.setVisibility(View.VISIBLE);
+ mFfwdButton.setVisibility(View.VISIBLE);
+ mPrevButton.setVisibility(View.VISIBLE);
+ mTimeView.setVisibility(View.VISIBLE);
+
+ mAdSkipView.setVisibility(View.GONE);
+ mAdRemainingView.setVisibility(View.GONE);
+ mAdExternalLink.setVisibility(View.GONE);
+
+ mProgress.setEnabled(true);
+ mNextButton.setEnabled(true);
+ mNextButton.clearColorFilter();
+ disableUnsupportedButtons();
+ }
+ }
+
+ private void updateLayout(int maxIconCount, int fullIconSize, int embeddedIconSize,
+ int marginSize, int currWidth, int currHeight, int screenWidth, int screenHeight) {
+ int fullBottomBarRightWidthMax = fullIconSize * maxIconCount
+ + marginSize * (maxIconCount * 2);
+ int embeddedBottomBarRightWidthMax = embeddedIconSize * maxIconCount
+ + marginSize * (maxIconCount * 2);
+ int fullWidth = mTransportControls.getWidth() + mTimeView.getWidth()
+ + fullBottomBarRightWidthMax;
+ int embeddedWidth = mTimeView.getWidth() + embeddedBottomBarRightWidthMax;
+ int screenMaxLength = Math.max(screenWidth, screenHeight);
+
+ if (fullWidth > screenMaxLength) {
+ // TODO: screen may be smaller than the length needed for Full size.
+ }
+
+ boolean isFullSize = (mMediaType == MEDIA_TYPE_DEFAULT) ? (currWidth == screenMaxLength) :
+ (currWidth == screenWidth && currHeight == screenHeight);
+
+ if (isFullSize) {
+ if (mSizeType != SIZE_TYPE_FULL) {
+ updateLayoutForSizeChange(SIZE_TYPE_FULL);
+ if (mMediaType == MEDIA_TYPE_MUSIC) {
+ mTitleView.setVisibility(View.GONE);
+ }
+ }
+ } else if (embeddedWidth <= currWidth) {
+ if (mSizeType != SIZE_TYPE_EMBEDDED) {
+ updateLayoutForSizeChange(SIZE_TYPE_EMBEDDED);
+ if (mMediaType == MEDIA_TYPE_MUSIC) {
+ mTitleView.setVisibility(View.VISIBLE);
+ }
+ }
+ } else {
+ if (mSizeType != SIZE_TYPE_MINIMAL) {
+ updateLayoutForSizeChange(SIZE_TYPE_MINIMAL);
+ if (mMediaType == MEDIA_TYPE_MUSIC) {
+ mTitleView.setVisibility(View.GONE);
+ }
+ }
+ }
+ }
+
+ private void updateLayoutForSizeChange(int sizeType) {
+ mSizeType = sizeType;
+ RelativeLayout.LayoutParams timeViewParams =
+ (RelativeLayout.LayoutParams) mTimeView.getLayoutParams();
+ SeekBar seeker = (SeekBar) mProgress;
+ switch (mSizeType) {
+ case SIZE_TYPE_EMBEDDED:
+ // Relating to Title Bar
+ mTitleBar.setVisibility(View.VISIBLE);
+ mBackButton.setVisibility(View.GONE);
+
+ // Relating to Full Screen Button
+ mMinimalExtraView.setVisibility(View.GONE);
+ mFullScreenButton = mBottomBarRightView.findViewById(R.id.fullscreen);
+ mFullScreenButton.setOnClickListener(mFullScreenListener);
+
+ // Relating to Center View
+ mCenterView.removeAllViews();
+ mBottomBarLeftView.removeView(mTransportControls);
+ mBottomBarLeftView.setVisibility(View.GONE);
+ mTransportControls = inflateTransportControls(R.layout.embedded_transport_controls);
+ mCenterView.addView(mTransportControls);
+
+ // Relating to Progress Bar
+ seeker.setThumb(mResources.getDrawable(R.drawable.custom_progress_thumb));
+ mProgressBuffer.setVisibility(View.VISIBLE);
+
+ // Relating to Bottom Bar
+ mBottomBar.setVisibility(View.VISIBLE);
+ if (timeViewParams.getRule(RelativeLayout.LEFT_OF) != 0) {
+ timeViewParams.removeRule(RelativeLayout.LEFT_OF);
+ timeViewParams.addRule(RelativeLayout.RIGHT_OF, R.id.bottom_bar_left);
+ }
+ break;
+ case SIZE_TYPE_FULL:
+ // Relating to Title Bar
+ mTitleBar.setVisibility(View.VISIBLE);
+ mBackButton.setVisibility(View.VISIBLE);
+
+ // Relating to Full Screen Button
+ mMinimalExtraView.setVisibility(View.GONE);
+ mFullScreenButton = mBottomBarRightView.findViewById(R.id.fullscreen);
+ mFullScreenButton.setOnClickListener(mFullScreenListener);
+
+ // Relating to Center View
+ mCenterView.removeAllViews();
+ mBottomBarLeftView.removeView(mTransportControls);
+ mTransportControls = inflateTransportControls(R.layout.full_transport_controls);
+ mBottomBarLeftView.addView(mTransportControls, 0);
+ mBottomBarLeftView.setVisibility(View.VISIBLE);
+
+ // Relating to Progress Bar
+ seeker.setThumb(mResources.getDrawable(R.drawable.custom_progress_thumb));
+ mProgressBuffer.setVisibility(View.VISIBLE);
+
+ // Relating to Bottom Bar
+ mBottomBar.setVisibility(View.VISIBLE);
+ if (timeViewParams.getRule(RelativeLayout.RIGHT_OF) != 0) {
+ timeViewParams.removeRule(RelativeLayout.RIGHT_OF);
+ timeViewParams.addRule(RelativeLayout.LEFT_OF, R.id.bottom_bar_right);
+ }
+ break;
+ case SIZE_TYPE_MINIMAL:
+ // Relating to Title Bar
+ mTitleBar.setVisibility(View.GONE);
+ mBackButton.setVisibility(View.GONE);
+
+ // Relating to Full Screen Button
+ mMinimalExtraView.setVisibility(View.VISIBLE);
+ mFullScreenButton = mMinimalExtraView.findViewById(R.id.fullscreen);
+ mFullScreenButton.setOnClickListener(mFullScreenListener);
+
+ // Relating to Center View
+ mCenterView.removeAllViews();
+ mBottomBarLeftView.removeView(mTransportControls);
+ mTransportControls = inflateTransportControls(R.layout.minimal_transport_controls);
+ mCenterView.addView(mTransportControls);
+
+ // Relating to Progress Bar
+ seeker.setThumb(null);
+ mProgressBuffer.setVisibility(View.GONE);
+
+ // Relating to Bottom Bar
+ mBottomBar.setVisibility(View.GONE);
+ break;
+ }
+ mTimeView.setLayoutParams(timeViewParams);
+
+ if (isPlaying()) {
+ mPlayPauseButton.setImageDrawable(
+ mResources.getDrawable(R.drawable.ic_pause_circle_filled, null));
+ mPlayPauseButton.setContentDescription(
+ mResources.getString(R.string.mcv2_pause_button_desc));
+ } else {
+ mPlayPauseButton.setImageDrawable(
+ mResources.getDrawable(R.drawable.ic_play_circle_filled, null));
+ mPlayPauseButton.setContentDescription(
+ mResources.getString(R.string.mcv2_play_button_desc));
+ }
+
+ if (mIsFullScreen) {
+ mFullScreenButton.setImageDrawable(
+ mResources.getDrawable(R.drawable.ic_fullscreen_exit, null));
+ } else {
+ mFullScreenButton.setImageDrawable(
+ mResources.getDrawable(R.drawable.ic_fullscreen, null));
+ }
+ }
+
+ private View inflateTransportControls(int layoutId) {
+ View v = ApiHelper.inflateLibLayout(mInstance.getContext(), layoutId);
+ mPlayPauseButton = v.findViewById(R.id.pause);
+ if (mPlayPauseButton != null) {
+ mPlayPauseButton.requestFocus();
+ mPlayPauseButton.setOnClickListener(mPlayPauseListener);
+ }
+ mFfwdButton = v.findViewById(R.id.ffwd);
+ if (mFfwdButton != null) {
+ mFfwdButton.setOnClickListener(mFfwdListener);
+ if (mMediaType == MEDIA_TYPE_MUSIC) {
+ mFfwdButton.setVisibility(View.GONE);
+ }
+ }
+ mRewButton = v.findViewById(R.id.rew);
+ if (mRewButton != null) {
+ mRewButton.setOnClickListener(mRewListener);
+ if (mMediaType == MEDIA_TYPE_MUSIC) {
+ mRewButton.setVisibility(View.GONE);
+ }
+ }
+ // TODO: Add support for Next and Previous buttons
+ mNextButton = v.findViewById(R.id.next);
+ if (mNextButton != null) {
+ mNextButton.setOnClickListener(mNextListener);
+ mNextButton.setVisibility(View.GONE);
+ }
+ mPrevButton = v.findViewById(R.id.prev);
+ if (mPrevButton != null) {
+ mPrevButton.setOnClickListener(mPrevListener);
+ mPrevButton.setVisibility(View.GONE);
+ }
+ return v;
+ }
+
+ private void initializeSettingsLists() {
+ mSettingsMainTextsList = new ArrayList<String>();
+ mSettingsMainTextsList.add(
+ mResources.getString(R.string.MediaControlView2_audio_track_text));
+ mSettingsMainTextsList.add(
+ mResources.getString(R.string.MediaControlView2_playback_speed_text));
+ mSettingsMainTextsList.add(
+ mResources.getString(R.string.MediaControlView2_help_text));
+
+ mSettingsSubTextsList = new ArrayList<String>();
+ mSettingsSubTextsList.add(
+ mResources.getString(R.string.MediaControlView2_audio_track_none_text));
+ mSettingsSubTextsList.add(
+ mResources.getStringArray(
+ R.array.MediaControlView2_playback_speeds)[PLAYBACK_SPEED_1x_INDEX]);
+ mSettingsSubTextsList.add(RESOURCE_EMPTY);
+
+ mSettingsIconIdsList = new ArrayList<Integer>();
+ mSettingsIconIdsList.add(R.drawable.ic_audiotrack);
+ mSettingsIconIdsList.add(R.drawable.ic_play_circle_filled);
+ mSettingsIconIdsList.add(R.drawable.ic_help);
+
+ mAudioTrackList = new ArrayList<String>();
+ mAudioTrackList.add(
+ mResources.getString(R.string.MediaControlView2_audio_track_none_text));
+
+ mVideoQualityList = new ArrayList<String>();
+ mVideoQualityList.add(
+ mResources.getString(R.string.MediaControlView2_video_quality_auto_text));
+
+ mPlaybackSpeedTextList = new ArrayList<String>(Arrays.asList(
+ mResources.getStringArray(R.array.MediaControlView2_playback_speeds)));
+ // Select the "1x" speed as the default value.
+ mSelectedSpeedIndex = PLAYBACK_SPEED_1x_INDEX;
+
+ mPlaybackSpeedList = new ArrayList<Float>();
+ int[] speeds = mResources.getIntArray(R.array.speed_multiplied_by_100);
+ for (int i = 0; i < speeds.length; i++) {
+ float speed = (float) speeds[i] / 100.0f;
+ mPlaybackSpeedList.add(speed);
+ }
+ }
+
+ private void displaySettingsWindow(BaseAdapter adapter) {
+ // Set Adapter
+ mSettingsListView.setAdapter(adapter);
+
+ // Set width of window
+ int itemWidth = (mSizeType == SIZE_TYPE_EMBEDDED)
+ ? mEmbeddedSettingsItemWidth : mFullSettingsItemWidth;
+ mSettingsWindow.setWidth(itemWidth);
+
+ // Calculate height of window and show
+ int itemHeight = (mSizeType == SIZE_TYPE_EMBEDDED)
+ ? mEmbeddedSettingsItemHeight : mFullSettingsItemHeight;
+ int totalHeight = adapter.getCount() * itemHeight;
+ mSettingsWindow.dismiss();
+ mSettingsWindow.showAsDropDown(mInstance, mSettingsWindowMargin,
+ mSettingsWindowMargin - totalHeight, Gravity.BOTTOM | Gravity.RIGHT);
+ }
+
+ private class MediaControllerCallback extends MediaController.Callback {
+ @Override
+ public void onPlaybackStateChanged(PlaybackState state) {
+ mPlaybackState = state;
+
+ // Update pause button depending on playback state for the following two reasons:
+ // 1) Need to handle case where app customizes playback state behavior when app
+ // activity is resumed.
+ // 2) Need to handle case where the media file reaches end of duration.
+ if (mPlaybackState.getState() != mPrevState) {
+ switch (mPlaybackState.getState()) {
+ case PlaybackState.STATE_PLAYING:
+ mPlayPauseButton.setImageDrawable(
+ mResources.getDrawable(R.drawable.ic_pause_circle_filled, null));
+ mPlayPauseButton.setContentDescription(
+ mResources.getString(R.string.mcv2_pause_button_desc));
+ mInstance.removeCallbacks(mUpdateProgress);
+ mInstance.post(mUpdateProgress);
+ break;
+ case PlaybackState.STATE_PAUSED:
+ mPlayPauseButton.setImageDrawable(
+ mResources.getDrawable(R.drawable.ic_play_circle_filled, null));
+ mPlayPauseButton.setContentDescription(
+ mResources.getString(R.string.mcv2_play_button_desc));
+ break;
+ case PlaybackState.STATE_STOPPED:
+ mPlayPauseButton.setImageDrawable(
+ mResources.getDrawable(R.drawable.ic_replay_circle_filled, null));
+ mPlayPauseButton.setContentDescription(
+ mResources.getString(R.string.mcv2_replay_button_desc));
+ mIsStopped = true;
+ break;
+ default:
+ break;
+ }
+ mPrevState = mPlaybackState.getState();
+ }
+
+ if (mPlaybackActions != mPlaybackState.getActions()) {
+ long newActions = mPlaybackState.getActions();
+ if ((newActions & PlaybackState.ACTION_PAUSE) != 0) {
+ mPlayPauseButton.setVisibility(View.VISIBLE);
+ }
+ if ((newActions & PlaybackState.ACTION_REWIND) != 0
+ && mMediaType != MEDIA_TYPE_MUSIC) {
+ if (mRewButton != null) {
+ mRewButton.setVisibility(View.VISIBLE);
+ }
+ }
+ if ((newActions & PlaybackState.ACTION_FAST_FORWARD) != 0
+ && mMediaType != MEDIA_TYPE_MUSIC) {
+ if (mFfwdButton != null) {
+ mFfwdButton.setVisibility(View.VISIBLE);
+ }
+ }
+ if ((newActions & PlaybackState.ACTION_SEEK_TO) != 0) {
+ mSeekAvailable = true;
+ } else {
+ mSeekAvailable = false;
+ }
+ mPlaybackActions = newActions;
+ }
+
+ // Add buttons if custom actions are present.
+ List<PlaybackState.CustomAction> customActions = mPlaybackState.getCustomActions();
+ mCustomButtons.removeAllViews();
+ if (customActions.size() > 0) {
+ for (PlaybackState.CustomAction action : customActions) {
+ ImageButton button = new ImageButton(mInstance.getContext(),
+ null /* AttributeSet */, 0 /* Style */);
+ // TODO: Apply R.style.BottomBarButton to this button using library context.
+ // Refer Constructor with argument (int defStyleRes) of View.java
+ button.setImageResource(action.getIcon());
+ button.setTooltipText(action.getName());
+ final String actionString = action.getAction().toString();
+ button.setOnClickListener(new View.OnClickListener() {
+ @Override
+ public void onClick(View v) {
+ // TODO: Currently, we are just sending extras that came from session.
+ // Is it the right behavior?
+ mControls.sendCustomAction(actionString, action.getExtras());
+ mInstance.setVisibility(View.VISIBLE);
+ }
+ });
+ mCustomButtons.addView(button);
+ }
+ }
+ }
+
+ @Override
+ public void onMetadataChanged(MediaMetadata metadata) {
+ mMetadata = metadata;
+ updateDuration();
+ updateTitle();
+ updateAudioMetadata();
+ }
+
+ @Override
+ public void onSessionEvent(String event, Bundle extras) {
+ switch (event) {
+ case EVENT_UPDATE_TRACK_STATUS:
+ mVideoTrackCount = extras.getInt(KEY_VIDEO_TRACK_COUNT);
+ // If there is one or more audio tracks, and this information has not been
+ // reflected into the Settings window yet, automatically check the first track.
+ // Otherwise, the Audio Track selection will be defaulted to "None".
+ mAudioTrackCount = extras.getInt(KEY_AUDIO_TRACK_COUNT);
+ mAudioTrackList = new ArrayList<String>();
+ if (mAudioTrackCount > 0) {
+ // TODO: add more text about track info.
+ for (int i = 0; i < mAudioTrackCount; i++) {
+ String track = mResources.getString(
+ R.string.MediaControlView2_audio_track_number_text, i + 1);
+ mAudioTrackList.add(track);
+ }
+ // Change sub text inside the Settings window.
+ mSettingsSubTextsList.set(SETTINGS_MODE_AUDIO_TRACK,
+ mAudioTrackList.get(0));
+ } else {
+ mAudioTrackList.add(mResources.getString(
+ R.string.MediaControlView2_audio_track_none_text));
+ }
+ if (mVideoTrackCount == 0 && mAudioTrackCount > 0) {
+ mMediaType = MEDIA_TYPE_MUSIC;
+ }
+
+ mSubtitleTrackCount = extras.getInt(KEY_SUBTITLE_TRACK_COUNT);
+ mSubtitleDescriptionsList = new ArrayList<String>();
+ if (mSubtitleTrackCount > 0) {
+ mSubtitleButton.setVisibility(View.VISIBLE);
+ mSubtitleButton.setEnabled(true);
+ mSubtitleDescriptionsList.add(mResources.getString(
+ R.string.MediaControlView2_subtitle_off_text));
+ for (int i = 0; i < mSubtitleTrackCount; i++) {
+ String track = mResources.getString(
+ R.string.MediaControlView2_subtitle_track_number_text, i + 1);
+ mSubtitleDescriptionsList.add(track);
+ }
+ } else {
+ mSubtitleButton.setVisibility(View.GONE);
+ mSubtitleButton.setEnabled(false);
+ }
+ break;
+ case EVENT_UPDATE_MEDIA_TYPE_STATUS:
+ boolean newStatus = extras.getBoolean(KEY_STATE_IS_ADVERTISEMENT);
+ if (newStatus != mIsAdvertisement) {
+ mIsAdvertisement = newStatus;
+ updateLayout();
+ }
+ break;
+ }
+ }
+ }
+
+ private class SettingsAdapter extends BaseAdapter {
+ private List<Integer> mIconIds;
+ private List<String> mMainTexts;
+ private List<String> mSubTexts;
+
+ public SettingsAdapter(List<String> mainTexts, @Nullable List<String> subTexts,
+ @Nullable List<Integer> iconIds) {
+ mMainTexts = mainTexts;
+ mSubTexts = subTexts;
+ mIconIds = iconIds;
+ }
+
+ public void updateSubTexts(List<String> subTexts) {
+ mSubTexts = subTexts;
+ notifyDataSetChanged();
+ }
+
+ public String getMainText(int position) {
+ if (mMainTexts != null) {
+ if (position < mMainTexts.size()) {
+ return mMainTexts.get(position);
+ }
+ }
+ return RESOURCE_EMPTY;
+ }
+
+ @Override
+ public int getCount() {
+ return (mMainTexts == null) ? 0 : mMainTexts.size();
+ }
+
+ @Override
+ public long getItemId(int position) {
+ // Auto-generated method stub--does not have any purpose here
+ // TODO: implement this.
+ return 0;
+ }
+
+ @Override
+ public Object getItem(int position) {
+ // Auto-generated method stub--does not have any purpose here
+ // TODO: implement this.
+ return null;
+ }
+
+ @Override
+ public View getView(int position, View convertView, ViewGroup container) {
+ View row;
+ if (mSizeType == SIZE_TYPE_FULL) {
+ row = ApiHelper.inflateLibLayout(mInstance.getContext(),
+ R.layout.full_settings_list_item);
+ } else {
+ row = ApiHelper.inflateLibLayout(mInstance.getContext(),
+ R.layout.embedded_settings_list_item);
+ }
+ TextView mainTextView = (TextView) row.findViewById(R.id.main_text);
+ TextView subTextView = (TextView) row.findViewById(R.id.sub_text);
+ ImageView iconView = (ImageView) row.findViewById(R.id.icon);
+
+ // Set main text
+ mainTextView.setText(mMainTexts.get(position));
+
+ // Remove sub text and center the main text if sub texts do not exist at all or the sub
+ // text at this particular position is empty.
+ if (mSubTexts == null || mSubTexts.get(position) == RESOURCE_EMPTY) {
+ subTextView.setVisibility(View.GONE);
+ } else {
+ // Otherwise, set sub text.
+ subTextView.setText(mSubTexts.get(position));
+ }
+
+ // Remove main icon and set visibility to gone if icons are set to null or the icon at
+ // this particular position is set to RESOURCE_NON_EXISTENT.
+ if (mIconIds == null || mIconIds.get(position) == RESOURCE_NON_EXISTENT) {
+ iconView.setVisibility(View.GONE);
+ } else {
+ // Otherwise, set main icon.
+ iconView.setImageDrawable(mResources.getDrawable(mIconIds.get(position), null));
+ }
+ return row;
+ }
+
+ public void setSubTexts(List<String> subTexts) {
+ mSubTexts = subTexts;
+ }
+ }
+
+ // TODO: extend this class from SettingsAdapter
+ private class SubSettingsAdapter extends BaseAdapter {
+ private List<String> mTexts;
+ private int mCheckPosition;
+
+ public SubSettingsAdapter(List<String> texts, int checkPosition) {
+ mTexts = texts;
+ mCheckPosition = checkPosition;
+ }
+
+ public String getMainText(int position) {
+ if (mTexts != null) {
+ if (position < mTexts.size()) {
+ return mTexts.get(position);
+ }
+ }
+ return RESOURCE_EMPTY;
+ }
+
+ @Override
+ public int getCount() {
+ return (mTexts == null) ? 0 : mTexts.size();
+ }
+
+ @Override
+ public long getItemId(int position) {
+ // Auto-generated method stub--does not have any purpose here
+ // TODO: implement this.
+ return 0;
+ }
+
+ @Override
+ public Object getItem(int position) {
+ // Auto-generated method stub--does not have any purpose here
+ // TODO: implement this.
+ return null;
+ }
+
+ @Override
+ public View getView(int position, View convertView, ViewGroup container) {
+ View row;
+ if (mSizeType == SIZE_TYPE_FULL) {
+ row = ApiHelper.inflateLibLayout(mInstance.getContext(),
+ R.layout.full_sub_settings_list_item);
+ } else {
+ row = ApiHelper.inflateLibLayout(mInstance.getContext(),
+ R.layout.embedded_sub_settings_list_item);
+ }
+ TextView textView = (TextView) row.findViewById(R.id.text);
+ ImageView checkView = (ImageView) row.findViewById(R.id.check);
+
+ textView.setText(mTexts.get(position));
+ if (position != mCheckPosition) {
+ checkView.setVisibility(View.INVISIBLE);
+ }
+ return row;
+ }
+
+ public void setTexts(List<String> texts) {
+ mTexts = texts;
+ }
+
+ public void setCheckPosition(int checkPosition) {
+ mCheckPosition = checkPosition;
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/widget/SubtitleView.java b/packages/MediaComponents/src/com/android/widget/SubtitleView.java
new file mode 100644
index 0000000..67b2cd1
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/widget/SubtitleView.java
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.widget;
+
+import android.content.Context;
+import android.graphics.Canvas;
+import android.os.Looper;
+import android.support.annotation.Nullable;
+import android.util.AttributeSet;
+import android.widget.FrameLayout;
+
+import com.android.media.subtitle.SubtitleController.Anchor;
+import com.android.media.subtitle.SubtitleTrack.RenderingWidget;
+
+class SubtitleView extends FrameLayout implements Anchor {
+ private static final String TAG = "SubtitleView";
+
+ private RenderingWidget mSubtitleWidget;
+ private RenderingWidget.OnChangedListener mSubtitlesChangedListener;
+
+ public SubtitleView(Context context) {
+ this(context, null);
+ }
+
+ public SubtitleView(Context context, @Nullable AttributeSet attrs) {
+ this(context, attrs, 0);
+ }
+
+ public SubtitleView(Context context, @Nullable AttributeSet attrs, int defStyleAttr) {
+ this(context, attrs, defStyleAttr, 0);
+ }
+
+ public SubtitleView(
+ Context context, @Nullable AttributeSet attrs, int defStyleAttr, int defStyleRes) {
+ super(context, attrs, defStyleAttr, defStyleRes);
+ }
+
+ @Override
+ public void setSubtitleWidget(RenderingWidget subtitleWidget) {
+ if (mSubtitleWidget == subtitleWidget) {
+ return;
+ }
+
+ final boolean attachedToWindow = isAttachedToWindow();
+ if (mSubtitleWidget != null) {
+ if (attachedToWindow) {
+ mSubtitleWidget.onDetachedFromWindow();
+ }
+
+ mSubtitleWidget.setOnChangedListener(null);
+ }
+ mSubtitleWidget = subtitleWidget;
+
+ if (subtitleWidget != null) {
+ if (mSubtitlesChangedListener == null) {
+ mSubtitlesChangedListener = new RenderingWidget.OnChangedListener() {
+ @Override
+ public void onChanged(RenderingWidget renderingWidget) {
+ invalidate();
+ }
+ };
+ }
+
+ setWillNotDraw(false);
+ subtitleWidget.setOnChangedListener(mSubtitlesChangedListener);
+
+ if (attachedToWindow) {
+ subtitleWidget.onAttachedToWindow();
+ requestLayout();
+ }
+ } else {
+ setWillNotDraw(true);
+ }
+
+ invalidate();
+ }
+
+ @Override
+ public Looper getSubtitleLooper() {
+ return Looper.getMainLooper();
+ }
+
+ @Override
+ public void onAttachedToWindow() {
+ super.onAttachedToWindow();
+
+ if (mSubtitleWidget != null) {
+ mSubtitleWidget.onAttachedToWindow();
+ }
+ }
+
+ @Override
+ public void onDetachedFromWindow() {
+ super.onDetachedFromWindow();
+
+ if (mSubtitleWidget != null) {
+ mSubtitleWidget.onDetachedFromWindow();
+ }
+ }
+
+ @Override
+ public void onLayout(boolean changed, int left, int top, int right, int bottom) {
+ super.onLayout(changed, left, top, right, bottom);
+
+ if (mSubtitleWidget != null) {
+ final int width = getWidth() - getPaddingLeft() - getPaddingRight();
+ final int height = getHeight() - getPaddingTop() - getPaddingBottom();
+
+ mSubtitleWidget.setSize(width, height);
+ }
+ }
+
+ @Override
+ public void draw(Canvas canvas) {
+ super.draw(canvas);
+
+ if (mSubtitleWidget != null) {
+ final int saveCount = canvas.save();
+ canvas.translate(getPaddingLeft(), getPaddingTop());
+ mSubtitleWidget.draw(canvas);
+ canvas.restoreToCount(saveCount);
+ }
+ }
+
+ @Override
+ public CharSequence getAccessibilityClassName() {
+ return SubtitleView.class.getName();
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java b/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java
new file mode 100644
index 0000000..fc92e85
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.widget;
+
+import android.content.Context;
+import android.graphics.Rect;
+import android.media.MediaPlayer2;
+import android.support.annotation.NonNull;
+import android.util.AttributeSet;
+import android.util.Log;
+import android.view.SurfaceHolder;
+import android.view.SurfaceView;
+import android.view.View;
+
+import static android.widget.VideoView2.VIEW_TYPE_SURFACEVIEW;
+
+class VideoSurfaceView extends SurfaceView implements VideoViewInterface, SurfaceHolder.Callback {
+ private static final String TAG = "VideoSurfaceView";
+ private static final boolean DEBUG = true; // STOPSHIP: Log.isLoggable(TAG, Log.DEBUG);
+ private SurfaceHolder mSurfaceHolder = null;
+ private SurfaceListener mSurfaceListener = null;
+ private MediaPlayer2 mMediaPlayer;
+ // A flag to indicate taking over other view should be proceed.
+ private boolean mIsTakingOverOldView;
+ private VideoViewInterface mOldView;
+
+
+ public VideoSurfaceView(Context context) {
+ this(context, null);
+ }
+
+ public VideoSurfaceView(Context context, AttributeSet attrs) {
+ this(context, attrs, 0);
+ }
+
+ public VideoSurfaceView(Context context, AttributeSet attrs, int defStyleAttr) {
+ this(context, attrs, defStyleAttr, 0);
+ }
+
+ public VideoSurfaceView(Context context, AttributeSet attrs, int defStyleAttr,
+ int defStyleRes) {
+ super(context, attrs, defStyleAttr, defStyleRes);
+ getHolder().addCallback(this);
+ }
+
+ ////////////////////////////////////////////////////
+ // implements VideoViewInterface
+ ////////////////////////////////////////////////////
+
+ @Override
+ public boolean assignSurfaceToMediaPlayer(MediaPlayer2 mp) {
+ Log.d(TAG, "assignSurfaceToMediaPlayer(): mSurfaceHolder: " + mSurfaceHolder);
+ if (mp == null || !hasAvailableSurface()) {
+ return false;
+ }
+ mp.setDisplay(mSurfaceHolder);
+ return true;
+ }
+
+ @Override
+ public void setSurfaceListener(SurfaceListener l) {
+ mSurfaceListener = l;
+ }
+
+ @Override
+ public int getViewType() {
+ return VIEW_TYPE_SURFACEVIEW;
+ }
+
+ @Override
+ public void setMediaPlayer(MediaPlayer2 mp) {
+ mMediaPlayer = mp;
+ if (mIsTakingOverOldView) {
+ takeOver(mOldView);
+ }
+ }
+
+ @Override
+ public void takeOver(@NonNull VideoViewInterface oldView) {
+ if (assignSurfaceToMediaPlayer(mMediaPlayer)) {
+ ((View) oldView).setVisibility(GONE);
+ mIsTakingOverOldView = false;
+ mOldView = null;
+ if (mSurfaceListener != null) {
+ mSurfaceListener.onSurfaceTakeOverDone(this);
+ }
+ } else {
+ mIsTakingOverOldView = true;
+ mOldView = oldView;
+ }
+ }
+
+ @Override
+ public boolean hasAvailableSurface() {
+ return (mSurfaceHolder != null && mSurfaceHolder.getSurface() != null);
+ }
+
+ ////////////////////////////////////////////////////
+ // implements SurfaceHolder.Callback
+ ////////////////////////////////////////////////////
+
+ @Override
+ public void surfaceCreated(SurfaceHolder holder) {
+ Log.d(TAG, "surfaceCreated: mSurfaceHolder: " + mSurfaceHolder + ", new holder: " + holder);
+ mSurfaceHolder = holder;
+ if (mIsTakingOverOldView) {
+ takeOver(mOldView);
+ } else {
+ assignSurfaceToMediaPlayer(mMediaPlayer);
+ }
+
+ if (mSurfaceListener != null) {
+ Rect rect = mSurfaceHolder.getSurfaceFrame();
+ mSurfaceListener.onSurfaceCreated(this, rect.width(), rect.height());
+ }
+ }
+
+ @Override
+ public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
+ if (mSurfaceListener != null) {
+ mSurfaceListener.onSurfaceChanged(this, width, height);
+ }
+ }
+
+ @Override
+ public void surfaceDestroyed(SurfaceHolder holder) {
+ // After we return from this we can't use the surface any more
+ mSurfaceHolder = null;
+ if (mSurfaceListener != null) {
+ mSurfaceListener.onSurfaceDestroyed(this);
+ }
+ }
+
+ // TODO: Investigate the way to move onMeasure() code into FrameLayout.
+ @Override
+ protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
+ int videoWidth = (mMediaPlayer == null) ? 0 : mMediaPlayer.getVideoWidth();
+ int videoHeight = (mMediaPlayer == null) ? 0 : mMediaPlayer.getVideoHeight();
+ if (DEBUG) {
+ Log.d(TAG, "onMeasure(" + MeasureSpec.toString(widthMeasureSpec) + ", "
+ + MeasureSpec.toString(heightMeasureSpec) + ")");
+ Log.i(TAG, " measuredSize: " + getMeasuredWidth() + "/" + getMeasuredHeight());
+ Log.i(TAG, " viewSize: " + getWidth() + "/" + getHeight());
+ Log.i(TAG, " mVideoWidth/height: " + videoWidth + ", " + videoHeight);
+ }
+
+ int width = getDefaultSize(videoWidth, widthMeasureSpec);
+ int height = getDefaultSize(videoHeight, heightMeasureSpec);
+
+ if (videoWidth > 0 && videoHeight > 0) {
+ int widthSpecSize = MeasureSpec.getSize(widthMeasureSpec);
+ int heightSpecSize = MeasureSpec.getSize(heightMeasureSpec);
+
+ width = widthSpecSize;
+ height = heightSpecSize;
+
+ // for compatibility, we adjust size based on aspect ratio
+ if (videoWidth * height < width * videoHeight) {
+ width = height * videoWidth / videoHeight;
+ if (DEBUG) {
+ Log.d(TAG, "image too wide, correcting. width: " + width);
+ }
+ } else if (videoWidth * height > width * videoHeight) {
+ height = width * videoHeight / videoWidth;
+ if (DEBUG) {
+ Log.d(TAG, "image too tall, correcting. height: " + height);
+ }
+ }
+ } else {
+ // no size yet, just adopt the given spec sizes
+ }
+ setMeasuredDimension(width, height);
+ if (DEBUG) {
+ Log.i(TAG, "end of onMeasure()");
+ Log.i(TAG, " measuredSize: " + getMeasuredWidth() + "/" + getMeasuredHeight());
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "ViewType: SurfaceView / Visibility: " + getVisibility()
+ + " / surfaceHolder: " + mSurfaceHolder;
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/widget/VideoTextureView.java b/packages/MediaComponents/src/com/android/widget/VideoTextureView.java
new file mode 100644
index 0000000..024a3aa
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/widget/VideoTextureView.java
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.widget;
+
+import android.content.Context;
+import android.graphics.SurfaceTexture;
+import android.media.MediaPlayer2;
+import android.support.annotation.NonNull;
+import android.support.annotation.RequiresApi;
+import android.util.AttributeSet;
+import android.util.Log;
+import android.view.Surface;
+import android.view.TextureView;
+import android.view.View;
+
+import static android.widget.VideoView2.VIEW_TYPE_TEXTUREVIEW;
+
+@RequiresApi(26)
+class VideoTextureView extends TextureView
+ implements VideoViewInterface, TextureView.SurfaceTextureListener {
+ private static final String TAG = "VideoTextureView";
+ private static final boolean DEBUG = true; // STOPSHIP: Log.isLoggable(TAG, Log.DEBUG);
+
+ private SurfaceTexture mSurfaceTexture;
+ private Surface mSurface;
+ private SurfaceListener mSurfaceListener;
+ private MediaPlayer2 mMediaPlayer;
+ // A flag to indicate taking over other view should be proceed.
+ private boolean mIsTakingOverOldView;
+ private VideoViewInterface mOldView;
+
+ public VideoTextureView(Context context) {
+ this(context, null);
+ }
+
+ public VideoTextureView(Context context, AttributeSet attrs) {
+ this(context, attrs, 0);
+ }
+
+ public VideoTextureView(Context context, AttributeSet attrs, int defStyleAttr) {
+ this(context, attrs, defStyleAttr, 0);
+ }
+
+ public VideoTextureView(
+ Context context, AttributeSet attrs, int defStyleAttr, int defStyleRes) {
+ super(context, attrs, defStyleAttr, defStyleRes);
+ setSurfaceTextureListener(this);
+ }
+
+ ////////////////////////////////////////////////////
+ // implements VideoViewInterface
+ ////////////////////////////////////////////////////
+
+ @Override
+ public boolean assignSurfaceToMediaPlayer(MediaPlayer2 mp) {
+ Log.d(TAG, "assignSurfaceToMediaPlayer(): mSurfaceTexture: " + mSurfaceTexture);
+ if (mp == null || !hasAvailableSurface()) {
+ // Surface is not ready.
+ return false;
+ }
+ mp.setSurface(mSurface);
+ return true;
+ }
+
+ @Override
+ public void setSurfaceListener(SurfaceListener l) {
+ mSurfaceListener = l;
+ }
+
+ @Override
+ public int getViewType() {
+ return VIEW_TYPE_TEXTUREVIEW;
+ }
+
+ @Override
+ public void setMediaPlayer(MediaPlayer2 mp) {
+ mMediaPlayer = mp;
+ if (mIsTakingOverOldView) {
+ takeOver(mOldView);
+ }
+ }
+
+ @Override
+ public void takeOver(@NonNull VideoViewInterface oldView) {
+ if (assignSurfaceToMediaPlayer(mMediaPlayer)) {
+ ((View) oldView).setVisibility(GONE);
+ mIsTakingOverOldView = false;
+ mOldView = null;
+ if (mSurfaceListener != null) {
+ mSurfaceListener.onSurfaceTakeOverDone(this);
+ }
+ } else {
+ mIsTakingOverOldView = true;
+ mOldView = oldView;
+ }
+ }
+
+ @Override
+ public boolean hasAvailableSurface() {
+ return (mSurfaceTexture != null && !mSurfaceTexture.isReleased() && mSurface != null);
+ }
+
+ ////////////////////////////////////////////////////
+ // implements TextureView.SurfaceTextureListener
+ ////////////////////////////////////////////////////
+
+ @Override
+ public void onSurfaceTextureAvailable(SurfaceTexture surfaceTexture, int width, int height) {
+ Log.d(TAG, "onSurfaceTextureAvailable: mSurfaceTexture: " + mSurfaceTexture
+ + ", new surface: " + surfaceTexture);
+ mSurfaceTexture = surfaceTexture;
+ mSurface = new Surface(mSurfaceTexture);
+ if (mIsTakingOverOldView) {
+ takeOver(mOldView);
+ } else {
+ assignSurfaceToMediaPlayer(mMediaPlayer);
+ }
+ if (mSurfaceListener != null) {
+ mSurfaceListener.onSurfaceCreated(this, width, height);
+ }
+ }
+
+ @Override
+ public void onSurfaceTextureSizeChanged(SurfaceTexture surfaceTexture, int width, int height) {
+ if (mSurfaceListener != null) {
+ mSurfaceListener.onSurfaceChanged(this, width, height);
+ }
+ // requestLayout(); // TODO: figure out if it should be called here?
+ }
+
+ @Override
+ public void onSurfaceTextureUpdated(SurfaceTexture surface) {
+ // no-op
+ }
+
+ @Override
+ public boolean onSurfaceTextureDestroyed(SurfaceTexture surfaceTexture) {
+ if (mSurfaceListener != null) {
+ mSurfaceListener.onSurfaceDestroyed(this);
+ }
+ mSurfaceTexture = null;
+ mSurface = null;
+ return true;
+ }
+
+ @Override
+ protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
+ int videoWidth = (mMediaPlayer == null) ? 0 : mMediaPlayer.getVideoWidth();
+ int videoHeight = (mMediaPlayer == null) ? 0 : mMediaPlayer.getVideoHeight();
+ if (DEBUG) {
+ Log.d(TAG, "onMeasure(" + MeasureSpec.toString(widthMeasureSpec) + ", "
+ + MeasureSpec.toString(heightMeasureSpec) + ")");
+ Log.i(TAG, " measuredSize: " + getMeasuredWidth() + "/" + getMeasuredHeight());
+ Log.i(TAG, " viewSize: " + getWidth() + "/" + getHeight());
+ Log.i(TAG, " mVideoWidth/height: " + videoWidth + ", " + videoHeight);
+ }
+
+ int width = getDefaultSize(videoWidth, widthMeasureSpec);
+ int height = getDefaultSize(videoHeight, heightMeasureSpec);
+
+ if (videoWidth > 0 && videoHeight > 0) {
+ int widthSpecSize = MeasureSpec.getSize(widthMeasureSpec);
+ int heightSpecSize = MeasureSpec.getSize(heightMeasureSpec);
+
+ width = widthSpecSize;
+ height = heightSpecSize;
+
+ // for compatibility, we adjust size based on aspect ratio
+ if (videoWidth * height < width * videoHeight) {
+ width = height * videoWidth / videoHeight;
+ if (DEBUG) {
+ Log.d(TAG, "image too wide, correcting. width: " + width);
+ }
+ } else if (videoWidth * height > width * videoHeight) {
+ height = width * videoHeight / videoWidth;
+ if (DEBUG) {
+ Log.d(TAG, "image too tall, correcting. height: " + height);
+ }
+ }
+ } else {
+ // no size yet, just adopt the given spec sizes
+ }
+ setMeasuredDimension(width, height);
+ if (DEBUG) {
+ Log.i(TAG, "end of onMeasure()");
+ Log.i(TAG, " measuredSize: " + getMeasuredWidth() + "/" + getMeasuredHeight());
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "ViewType: TextureView / Visibility: " + getVisibility()
+ + " / surfaceTexture: " + mSurfaceTexture;
+
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java b/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
new file mode 100644
index 0000000..97279d6
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
@@ -0,0 +1,1405 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.widget;
+
+import android.content.Context;
+import android.content.pm.ActivityInfo;
+import android.content.res.Resources;
+import android.graphics.Bitmap;
+import android.graphics.BitmapFactory;
+import android.graphics.Point;
+import android.graphics.drawable.BitmapDrawable;
+import android.graphics.drawable.Drawable;
+import android.media.AudioAttributes;
+import android.media.AudioFocusRequest;
+import android.media.AudioManager;
+import android.media.DataSourceDesc;
+import android.media.MediaMetadata;
+import android.media.MediaPlayer2;
+import android.media.MediaPlayer2.MediaPlayer2EventCallback;
+import android.media.MediaPlayer2.OnSubtitleDataListener;
+import android.media.MediaPlayer2Impl;
+import android.media.SubtitleData;
+import android.media.MediaItem2;
+import android.media.MediaMetadata2;
+import android.media.MediaMetadataRetriever;
+import android.media.Metadata;
+import android.media.PlaybackParams;
+import android.media.TimedText;
+import android.media.session.MediaController;
+import android.media.session.MediaController.PlaybackInfo;
+import android.media.session.MediaSession;
+import android.media.session.PlaybackState;
+import android.media.SessionToken2;
+import android.media.update.VideoView2Provider;
+import android.media.update.ViewGroupProvider;
+import android.net.Uri;
+import android.os.AsyncTask;
+import android.os.Bundle;
+import android.os.ResultReceiver;
+import android.support.annotation.Nullable;
+import android.util.AttributeSet;
+import android.util.DisplayMetrics;
+import android.util.Log;
+import android.util.Pair;
+import android.view.MotionEvent;
+import android.view.View;
+import android.view.ViewGroup.LayoutParams;
+import android.view.WindowManager;
+import android.view.accessibility.AccessibilityManager;
+import android.widget.ImageView;
+import android.widget.MediaControlView2;
+import android.widget.TextView;
+import android.widget.VideoView2;
+
+import com.android.internal.graphics.palette.Palette;
+import com.android.media.RoutePlayer;
+import com.android.media.subtitle.ClosedCaptionRenderer;
+import com.android.media.subtitle.SubtitleController;
+import com.android.media.subtitle.SubtitleTrack;
+import com.android.media.update.ApiHelper;
+import com.android.media.update.R;
+import com.android.support.mediarouter.media.MediaItemStatus;
+import com.android.support.mediarouter.media.MediaControlIntent;
+import com.android.support.mediarouter.media.MediaRouter;
+import com.android.support.mediarouter.media.MediaRouteSelector;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Executor;
+
+public class VideoView2Impl extends BaseLayout
+ implements VideoView2Provider, VideoViewInterface.SurfaceListener {
+ private static final String TAG = "VideoView2";
+ private static final boolean DEBUG = true; // STOPSHIP: Log.isLoggable(TAG, Log.DEBUG);
+ private static final long DEFAULT_SHOW_CONTROLLER_INTERVAL_MS = 2000;
+
+ private final VideoView2 mInstance;
+
+ private static final int STATE_ERROR = -1;
+ private static final int STATE_IDLE = 0;
+ private static final int STATE_PREPARING = 1;
+ private static final int STATE_PREPARED = 2;
+ private static final int STATE_PLAYING = 3;
+ private static final int STATE_PAUSED = 4;
+ private static final int STATE_PLAYBACK_COMPLETED = 5;
+
+ private static final int INVALID_TRACK_INDEX = -1;
+ private static final float INVALID_SPEED = 0f;
+
+ private static final int SIZE_TYPE_EMBEDDED = 0;
+ private static final int SIZE_TYPE_FULL = 1;
+ // TODO: add support for Minimal size type.
+ private static final int SIZE_TYPE_MINIMAL = 2;
+
+ private AccessibilityManager mAccessibilityManager;
+ private AudioManager mAudioManager;
+ private AudioAttributes mAudioAttributes;
+ private int mAudioFocusType = AudioManager.AUDIOFOCUS_GAIN; // legacy focus gain
+
+ private Pair<Executor, VideoView2.OnCustomActionListener> mCustomActionListenerRecord;
+ private VideoView2.OnViewTypeChangedListener mViewTypeChangedListener;
+ private VideoView2.OnFullScreenRequestListener mFullScreenRequestListener;
+
+ private VideoViewInterface mCurrentView;
+ private VideoTextureView mTextureView;
+ private VideoSurfaceView mSurfaceView;
+
+ private MediaPlayer2 mMediaPlayer;
+ private DataSourceDesc mDsd;
+ private MediaControlView2 mMediaControlView;
+ private MediaSession mMediaSession;
+ private MediaController mMediaController;
+ private Metadata mMetadata;
+ private MediaMetadata2 mMediaMetadata;
+ private MediaMetadataRetriever mRetriever;
+ private boolean mNeedUpdateMediaType;
+ private Bundle mMediaTypeData;
+ private String mTitle;
+
+ // TODO: move music view inside SurfaceView/TextureView or implement VideoViewInterface.
+ private WindowManager mManager;
+ private Resources mResources;
+ private View mMusicView;
+ private Drawable mMusicAlbumDrawable;
+ private String mMusicTitleText;
+ private String mMusicArtistText;
+ private boolean mIsMusicMediaType;
+ private int mPrevWidth;
+ private int mPrevHeight;
+ private int mDominantColor;
+ private int mSizeType;
+
+ private PlaybackState.Builder mStateBuilder;
+ private List<PlaybackState.CustomAction> mCustomActionList;
+ private int mTargetState = STATE_IDLE;
+ private int mCurrentState = STATE_IDLE;
+ private int mCurrentBufferPercentage;
+ private long mSeekWhenPrepared; // recording the seek position while preparing
+
+ private int mVideoWidth;
+ private int mVideoHeight;
+
+ private ArrayList<Integer> mVideoTrackIndices;
+ private ArrayList<Integer> mAudioTrackIndices;
+ private ArrayList<Pair<Integer, SubtitleTrack>> mSubtitleTrackIndices;
+ private SubtitleController mSubtitleController;
+
+ // selected video/audio/subtitle track index as MediaPlayer2 returns
+ private int mSelectedVideoTrackIndex;
+ private int mSelectedAudioTrackIndex;
+ private int mSelectedSubtitleTrackIndex;
+
+ private SubtitleView mSubtitleView;
+ private boolean mSubtitleEnabled;
+
+ private float mSpeed;
+ // TODO: Remove mFallbackSpeed when integration with MediaPlayer2's new setPlaybackParams().
+ // Refer: https://docs.google.com/document/d/1nzAfns6i2hJ3RkaUre3QMT6wsDedJ5ONLiA_OOBFFX8/edit
+ private float mFallbackSpeed; // keep the original speed before 'pause' is called.
+ private float mVolumeLevelFloat;
+ private int mVolumeLevel;
+
+ private long mShowControllerIntervalMs;
+
+ private MediaRouter mMediaRouter;
+ private MediaRouteSelector mRouteSelector;
+ private MediaRouter.RouteInfo mRoute;
+ private RoutePlayer mRoutePlayer;
+
+ private final MediaRouter.Callback mRouterCallback = new MediaRouter.Callback() {
+ @Override
+ public void onRouteSelected(MediaRouter router, MediaRouter.RouteInfo route) {
+ if (route.supportsControlCategory(MediaControlIntent.CATEGORY_REMOTE_PLAYBACK)) {
+ // Stop local playback (if necessary)
+ resetPlayer();
+ mRoute = route;
+ mRoutePlayer = new RoutePlayer(mInstance.getContext(), route);
+ mRoutePlayer.setPlayerEventCallback(new RoutePlayer.PlayerEventCallback() {
+ @Override
+ public void onPlayerStateChanged(MediaItemStatus itemStatus) {
+ PlaybackState.Builder psBuilder = new PlaybackState.Builder();
+ psBuilder.setActions(RoutePlayer.PLAYBACK_ACTIONS);
+ long position = itemStatus.getContentPosition();
+ switch (itemStatus.getPlaybackState()) {
+ case MediaItemStatus.PLAYBACK_STATE_PENDING:
+ psBuilder.setState(PlaybackState.STATE_NONE, position, 0);
+ mCurrentState = STATE_IDLE;
+ break;
+ case MediaItemStatus.PLAYBACK_STATE_PLAYING:
+ psBuilder.setState(PlaybackState.STATE_PLAYING, position, 1);
+ mCurrentState = STATE_PLAYING;
+ break;
+ case MediaItemStatus.PLAYBACK_STATE_PAUSED:
+ psBuilder.setState(PlaybackState.STATE_PAUSED, position, 0);
+ mCurrentState = STATE_PAUSED;
+ break;
+ case MediaItemStatus.PLAYBACK_STATE_BUFFERING:
+ psBuilder.setState(PlaybackState.STATE_BUFFERING, position, 0);
+ mCurrentState = STATE_PAUSED;
+ break;
+ case MediaItemStatus.PLAYBACK_STATE_FINISHED:
+ psBuilder.setState(PlaybackState.STATE_STOPPED, position, 0);
+ mCurrentState = STATE_PLAYBACK_COMPLETED;
+ break;
+ }
+
+ PlaybackState pbState = psBuilder.build();
+ mMediaSession.setPlaybackState(pbState);
+
+ MediaMetadata.Builder mmBuilder = new MediaMetadata.Builder();
+ mmBuilder.putLong(MediaMetadata.METADATA_KEY_DURATION,
+ itemStatus.getContentDuration());
+ mMediaSession.setMetadata(mmBuilder.build());
+ }
+ });
+ // Start remote playback (if necessary)
+ mRoutePlayer.openVideo(mDsd);
+ }
+ }
+
+ @Override
+ public void onRouteUnselected(MediaRouter router, MediaRouter.RouteInfo route, int reason) {
+ if (mRoute != null && mRoutePlayer != null) {
+ mRoutePlayer.release();
+ mRoutePlayer = null;
+ }
+ if (mRoute == route) {
+ mRoute = null;
+ }
+ if (reason != MediaRouter.UNSELECT_REASON_ROUTE_CHANGED) {
+ // TODO: Resume local playback (if necessary)
+ openVideo(mDsd);
+ }
+ }
+ };
+
+ public VideoView2Impl(VideoView2 instance,
+ ViewGroupProvider superProvider, ViewGroupProvider privateProvider) {
+ super(instance, superProvider, privateProvider);
+ mInstance = instance;
+ }
+
+ @Override
+ public void initialize(@Nullable AttributeSet attrs, int defStyleAttr, int defStyleRes) {
+ mVideoWidth = 0;
+ mVideoHeight = 0;
+ mSpeed = 1.0f;
+ mFallbackSpeed = mSpeed;
+ mSelectedSubtitleTrackIndex = INVALID_TRACK_INDEX;
+ // TODO: add attributes to get this value.
+ mShowControllerIntervalMs = DEFAULT_SHOW_CONTROLLER_INTERVAL_MS;
+
+ mAccessibilityManager = AccessibilityManager.getInstance(mInstance.getContext());
+
+ mAudioManager = (AudioManager) mInstance.getContext()
+ .getSystemService(Context.AUDIO_SERVICE);
+ mAudioAttributes = new AudioAttributes.Builder().setUsage(AudioAttributes.USAGE_MEDIA)
+ .setContentType(AudioAttributes.CONTENT_TYPE_MOVIE).build();
+ mInstance.setFocusable(true);
+ mInstance.setFocusableInTouchMode(true);
+ mInstance.requestFocus();
+
+ // TODO: try to keep a single child at a time rather than always having both.
+ mTextureView = new VideoTextureView(mInstance.getContext());
+ mSurfaceView = new VideoSurfaceView(mInstance.getContext());
+ LayoutParams params = new LayoutParams(LayoutParams.MATCH_PARENT,
+ LayoutParams.MATCH_PARENT);
+ mTextureView.setLayoutParams(params);
+ mSurfaceView.setLayoutParams(params);
+ mTextureView.setSurfaceListener(this);
+ mSurfaceView.setSurfaceListener(this);
+ mInstance.addView(mTextureView);
+ mInstance.addView(mSurfaceView);
+
+ mSubtitleView = new SubtitleView(mInstance.getContext());
+ mSubtitleView.setLayoutParams(params);
+ mSubtitleView.setBackgroundColor(0);
+ mInstance.addView(mSubtitleView);
+
+ boolean enableControlView = (attrs == null) || attrs.getAttributeBooleanValue(
+ "http://schemas.android.com/apk/res/android",
+ "enableControlView", true);
+ if (enableControlView) {
+ mMediaControlView = new MediaControlView2(mInstance.getContext());
+ }
+
+ mSubtitleEnabled = (attrs == null) || attrs.getAttributeBooleanValue(
+ "http://schemas.android.com/apk/res/android",
+ "enableSubtitle", false);
+
+ // TODO: Choose TextureView when SurfaceView cannot be created.
+ // Choose surface view by default
+ int viewType = (attrs == null) ? VideoView2.VIEW_TYPE_SURFACEVIEW
+ : attrs.getAttributeIntValue(
+ "http://schemas.android.com/apk/res/android",
+ "viewType", VideoView2.VIEW_TYPE_SURFACEVIEW);
+ if (viewType == VideoView2.VIEW_TYPE_SURFACEVIEW) {
+ Log.d(TAG, "viewType attribute is surfaceView.");
+ mTextureView.setVisibility(View.GONE);
+ mSurfaceView.setVisibility(View.VISIBLE);
+ mCurrentView = mSurfaceView;
+ } else if (viewType == VideoView2.VIEW_TYPE_TEXTUREVIEW) {
+ Log.d(TAG, "viewType attribute is textureView.");
+ mTextureView.setVisibility(View.VISIBLE);
+ mSurfaceView.setVisibility(View.GONE);
+ mCurrentView = mTextureView;
+ }
+
+ MediaRouteSelector.Builder builder = new MediaRouteSelector.Builder();
+ builder.addControlCategory(MediaControlIntent.CATEGORY_REMOTE_PLAYBACK);
+ builder.addControlCategory(MediaControlIntent.CATEGORY_LIVE_AUDIO);
+ builder.addControlCategory(MediaControlIntent.CATEGORY_LIVE_VIDEO);
+ mRouteSelector = builder.build();
+ }
+
+ @Override
+ public void setMediaControlView2_impl(MediaControlView2 mediaControlView, long intervalMs) {
+ mMediaControlView = mediaControlView;
+ mShowControllerIntervalMs = intervalMs;
+ // TODO: Call MediaControlView2.setRouteSelector only when cast availalbe.
+ ((MediaControlView2Impl) mMediaControlView.getProvider()).setRouteSelector(mRouteSelector);
+
+ if (mInstance.isAttachedToWindow()) {
+ attachMediaControlView();
+ }
+ }
+
+ @Override
+ public MediaController getMediaController_impl() {
+ if (mMediaSession == null) {
+ throw new IllegalStateException("MediaSession instance is not available.");
+ }
+ return mMediaController;
+ }
+
+ @Override
+ public SessionToken2 getMediaSessionToken_impl() {
+ // TODO: implement this
+ return null;
+ }
+
+ @Override
+ public MediaControlView2 getMediaControlView2_impl() {
+ return mMediaControlView;
+ }
+
+ @Override
+ public MediaMetadata2 getMediaMetadata_impl() {
+ return mMediaMetadata;
+ }
+
+ @Override
+ public void setMediaMetadata_impl(MediaMetadata2 metadata) {
+ // TODO: integrate this with MediaSession2#MediaItem2
+ mMediaMetadata = metadata;
+
+ // TODO: add support for handling website link
+ mMediaTypeData = new Bundle();
+ boolean isAd = metadata == null ?
+ false : metadata.getLong(MediaMetadata2.METADATA_KEY_ADVERTISEMENT) != 0;
+ mMediaTypeData.putBoolean(
+ MediaControlView2Impl.KEY_STATE_IS_ADVERTISEMENT, isAd);
+
+ if (mMediaSession != null) {
+ mMediaSession.sendSessionEvent(
+ MediaControlView2Impl.EVENT_UPDATE_MEDIA_TYPE_STATUS, mMediaTypeData);
+ } else {
+ // Update later inside OnPreparedListener after MediaSession is initialized.
+ mNeedUpdateMediaType = true;
+ }
+ }
+
+ @Override
+ public void setSubtitleEnabled_impl(boolean enable) {
+ if (enable != mSubtitleEnabled) {
+ selectOrDeselectSubtitle(enable);
+ }
+ mSubtitleEnabled = enable;
+ }
+
+ @Override
+ public boolean isSubtitleEnabled_impl() {
+ return mSubtitleEnabled;
+ }
+
+ // TODO: remove setSpeed_impl once MediaController2 is ready.
+ @Override
+ public void setSpeed_impl(float speed) {
+ if (speed <= 0.0f) {
+ Log.e(TAG, "Unsupported speed (" + speed + ") is ignored.");
+ return;
+ }
+ mSpeed = speed;
+ if (mMediaPlayer != null && mMediaPlayer.isPlaying()) {
+ applySpeed();
+ }
+ updatePlaybackState();
+ }
+
+ @Override
+ public void setAudioFocusRequest_impl(int focusGain) {
+ if (focusGain != AudioManager.AUDIOFOCUS_NONE
+ && focusGain != AudioManager.AUDIOFOCUS_GAIN
+ && focusGain != AudioManager.AUDIOFOCUS_GAIN_TRANSIENT
+ && focusGain != AudioManager.AUDIOFOCUS_GAIN_TRANSIENT_MAY_DUCK
+ && focusGain != AudioManager.AUDIOFOCUS_GAIN_TRANSIENT_EXCLUSIVE) {
+ throw new IllegalArgumentException("Illegal audio focus type " + focusGain);
+ }
+ mAudioFocusType = focusGain;
+ }
+
+ @Override
+ public void setAudioAttributes_impl(AudioAttributes attributes) {
+ if (attributes == null) {
+ throw new IllegalArgumentException("Illegal null AudioAttributes");
+ }
+ mAudioAttributes = attributes;
+ }
+
+ @Override
+ public void setVideoPath_impl(String path) {
+ mInstance.setVideoUri(Uri.parse(path));
+ }
+
+ @Override
+ public void setVideoUri_impl(Uri uri) {
+ mInstance.setVideoUri(uri, null);
+ }
+
+ @Override
+ public void setVideoUri_impl(Uri uri, Map<String, String> headers) {
+ DataSourceDesc.Builder builder = new DataSourceDesc.Builder();
+ builder.setDataSource(mInstance.getContext(), uri, headers, null);
+ mInstance.setDataSource(builder.build());
+ }
+
+ @Override
+ public void setMediaItem_impl(MediaItem2 mediaItem) {
+ // TODO: implement this
+ }
+
+ @Override
+ public void setDataSource_impl(DataSourceDesc dsd) {
+ mDsd = dsd;
+ mSeekWhenPrepared = 0;
+ openVideo(dsd);
+ }
+
+ @Override
+ public void setViewType_impl(int viewType) {
+ if (viewType == mCurrentView.getViewType()) {
+ return;
+ }
+ VideoViewInterface targetView;
+ if (viewType == VideoView2.VIEW_TYPE_TEXTUREVIEW) {
+ Log.d(TAG, "switching to TextureView");
+ targetView = mTextureView;
+ } else if (viewType == VideoView2.VIEW_TYPE_SURFACEVIEW) {
+ Log.d(TAG, "switching to SurfaceView");
+ targetView = mSurfaceView;
+ } else {
+ throw new IllegalArgumentException("Unknown view type: " + viewType);
+ }
+ ((View) targetView).setVisibility(View.VISIBLE);
+ targetView.takeOver(mCurrentView);
+ mInstance.requestLayout();
+ }
+
+ @Override
+ public int getViewType_impl() {
+ return mCurrentView.getViewType();
+ }
+
+ @Override
+ public void setCustomActions_impl(
+ List<PlaybackState.CustomAction> actionList,
+ Executor executor, VideoView2.OnCustomActionListener listener) {
+ mCustomActionList = actionList;
+ mCustomActionListenerRecord = new Pair<>(executor, listener);
+
+ // Create a new playback builder in order to clear existing the custom actions.
+ mStateBuilder = null;
+ updatePlaybackState();
+ }
+
+ @Override
+ public void setOnViewTypeChangedListener_impl(VideoView2.OnViewTypeChangedListener l) {
+ mViewTypeChangedListener = l;
+ }
+
+ @Override
+ public void setFullScreenRequestListener_impl(VideoView2.OnFullScreenRequestListener l) {
+ mFullScreenRequestListener = l;
+ }
+
+ @Override
+ public void onAttachedToWindow_impl() {
+ super.onAttachedToWindow_impl();
+
+ // Create MediaSession
+ mMediaSession = new MediaSession(mInstance.getContext(), "VideoView2MediaSession");
+ mMediaSession.setCallback(new MediaSessionCallback());
+ mMediaSession.setActive(true);
+ mMediaController = mMediaSession.getController();
+ mMediaRouter = MediaRouter.getInstance(mInstance.getContext());
+ mMediaRouter.setMediaSession(mMediaSession);
+ mMediaRouter.addCallback(mRouteSelector, mRouterCallback);
+ attachMediaControlView();
+ // TODO: remove this after moving MediaSession creating code inside initializing VideoView2
+ if (mCurrentState == STATE_PREPARED) {
+ extractTracks();
+ extractMetadata();
+ extractAudioMetadata();
+ if (mNeedUpdateMediaType) {
+ mMediaSession.sendSessionEvent(
+ MediaControlView2Impl.EVENT_UPDATE_MEDIA_TYPE_STATUS,
+ mMediaTypeData);
+ mNeedUpdateMediaType = false;
+ }
+ }
+ }
+
+ @Override
+ public void onDetachedFromWindow_impl() {
+ super.onDetachedFromWindow_impl();
+
+ mMediaSession.release();
+ mMediaSession = null;
+ mMediaController = null;
+ }
+
+ @Override
+ public CharSequence getAccessibilityClassName_impl() {
+ return VideoView2.class.getName();
+ }
+
+ @Override
+ public boolean onTouchEvent_impl(MotionEvent ev) {
+ if (DEBUG) {
+ Log.d(TAG, "onTouchEvent(). mCurrentState=" + mCurrentState
+ + ", mTargetState=" + mTargetState);
+ }
+ if (ev.getAction() == MotionEvent.ACTION_UP && mMediaControlView != null) {
+ if (!mIsMusicMediaType || mSizeType != SIZE_TYPE_FULL) {
+ toggleMediaControlViewVisibility();
+ }
+ }
+
+ return super.onTouchEvent_impl(ev);
+ }
+
+ @Override
+ public boolean onTrackballEvent_impl(MotionEvent ev) {
+ if (ev.getAction() == MotionEvent.ACTION_UP && mMediaControlView != null) {
+ if (!mIsMusicMediaType || mSizeType != SIZE_TYPE_FULL) {
+ toggleMediaControlViewVisibility();
+ }
+ }
+
+ return super.onTrackballEvent_impl(ev);
+ }
+
+ @Override
+ public boolean dispatchTouchEvent_impl(MotionEvent ev) {
+ // TODO: Test touch event handling logic thoroughly and simplify the logic.
+ return super.dispatchTouchEvent_impl(ev);
+ }
+
+ @Override
+ public void onMeasure_impl(int widthMeasureSpec, int heightMeasureSpec) {
+ super.onMeasure_impl(widthMeasureSpec, heightMeasureSpec);
+
+ if (mIsMusicMediaType) {
+ if (mPrevWidth != mInstance.getMeasuredWidth()
+ || mPrevHeight != mInstance.getMeasuredHeight()) {
+ int currWidth = mInstance.getMeasuredWidth();
+ int currHeight = mInstance.getMeasuredHeight();
+ Point screenSize = new Point();
+ mManager.getDefaultDisplay().getSize(screenSize);
+ int screenWidth = screenSize.x;
+ int screenHeight = screenSize.y;
+
+ if (currWidth == screenWidth && currHeight == screenHeight) {
+ int orientation = retrieveOrientation();
+ if (orientation == ActivityInfo.SCREEN_ORIENTATION_LANDSCAPE) {
+ inflateMusicView(R.layout.full_landscape_music);
+ } else {
+ inflateMusicView(R.layout.full_portrait_music);
+ }
+
+ if (mSizeType != SIZE_TYPE_FULL) {
+ mSizeType = SIZE_TYPE_FULL;
+ // Remove existing mFadeOut callback
+ mMediaControlView.removeCallbacks(mFadeOut);
+ mMediaControlView.setVisibility(View.VISIBLE);
+ }
+ } else {
+ if (mSizeType != SIZE_TYPE_EMBEDDED) {
+ mSizeType = SIZE_TYPE_EMBEDDED;
+ inflateMusicView(R.layout.embedded_music);
+ // Add new mFadeOut callback
+ mMediaControlView.postDelayed(mFadeOut, mShowControllerIntervalMs);
+ }
+ }
+ mPrevWidth = currWidth;
+ mPrevHeight = currHeight;
+ }
+ }
+ }
+
+ ///////////////////////////////////////////////////
+ // Implements VideoViewInterface.SurfaceListener
+ ///////////////////////////////////////////////////
+
+ @Override
+ public void onSurfaceCreated(View view, int width, int height) {
+ if (DEBUG) {
+ Log.d(TAG, "onSurfaceCreated(). mCurrentState=" + mCurrentState
+ + ", mTargetState=" + mTargetState + ", width/height: " + width + "/" + height
+ + ", " + view.toString());
+ }
+ if (needToStart()) {
+ mMediaController.getTransportControls().play();
+ }
+ }
+
+ @Override
+ public void onSurfaceDestroyed(View view) {
+ if (DEBUG) {
+ Log.d(TAG, "onSurfaceDestroyed(). mCurrentState=" + mCurrentState
+ + ", mTargetState=" + mTargetState + ", " + view.toString());
+ }
+ }
+
+ @Override
+ public void onSurfaceChanged(View view, int width, int height) {
+ // TODO: Do we need to call requestLayout here?
+ if (DEBUG) {
+ Log.d(TAG, "onSurfaceChanged(). width/height: " + width + "/" + height
+ + ", " + view.toString());
+ }
+ }
+
+ @Override
+ public void onSurfaceTakeOverDone(VideoViewInterface view) {
+ if (DEBUG) {
+ Log.d(TAG, "onSurfaceTakeOverDone(). Now current view is: " + view);
+ }
+ mCurrentView = view;
+ if (mViewTypeChangedListener != null) {
+ mViewTypeChangedListener.onViewTypeChanged(mInstance, view.getViewType());
+ }
+ if (needToStart()) {
+ mMediaController.getTransportControls().play();
+ }
+ }
+
+ ///////////////////////////////////////////////////
+ // Protected or private methods
+ ///////////////////////////////////////////////////
+
+ private void attachMediaControlView() {
+ // Get MediaController from MediaSession and set it inside MediaControlView
+ mMediaControlView.setController(mMediaSession.getController());
+
+ LayoutParams params =
+ new LayoutParams(LayoutParams.MATCH_PARENT, LayoutParams.MATCH_PARENT);
+ mInstance.addView(mMediaControlView, params);
+ }
+
+ private boolean isInPlaybackState() {
+ return (mMediaPlayer != null || mRoutePlayer != null)
+ && mCurrentState != STATE_ERROR
+ && mCurrentState != STATE_IDLE
+ && mCurrentState != STATE_PREPARING;
+ }
+
+ private boolean needToStart() {
+ return (mMediaPlayer != null || mRoutePlayer != null)
+ && mCurrentState != STATE_PLAYING
+ && mTargetState == STATE_PLAYING;
+ }
+
+ // Creates a MediaPlayer2 instance and prepare playback.
+ private void openVideo(DataSourceDesc dsd) {
+ Uri uri = dsd.getUri();
+ Map<String, String> headers = dsd.getUriHeaders();
+ resetPlayer();
+ if (isRemotePlayback()) {
+ mRoutePlayer.openVideo(dsd);
+ return;
+ }
+ if (mAudioFocusType != AudioManager.AUDIOFOCUS_NONE) {
+ // TODO this should have a focus listener
+ AudioFocusRequest focusRequest;
+ focusRequest = new AudioFocusRequest.Builder(mAudioFocusType)
+ .setAudioAttributes(mAudioAttributes)
+ .build();
+ mAudioManager.requestAudioFocus(focusRequest);
+ }
+
+ try {
+ Log.d(TAG, "openVideo(): creating new MediaPlayer2 instance.");
+ mMediaPlayer = new MediaPlayer2Impl();
+ mSurfaceView.setMediaPlayer(mMediaPlayer);
+ mTextureView.setMediaPlayer(mMediaPlayer);
+ mCurrentView.assignSurfaceToMediaPlayer(mMediaPlayer);
+
+ final Context context = mInstance.getContext();
+ // TODO: Add timely firing logic for more accurate sync between CC and video frame
+ mSubtitleController = new SubtitleController(context);
+ mSubtitleController.registerRenderer(new ClosedCaptionRenderer(context));
+ mSubtitleController.setAnchor((SubtitleController.Anchor) mSubtitleView);
+ Executor executor = new Executor() {
+ @Override
+ public void execute(Runnable runnable) {
+ runnable.run();
+ }
+ };
+ mMediaPlayer.setMediaPlayer2EventCallback(executor, mMediaPlayer2Callback);
+
+ mCurrentBufferPercentage = -1;
+ mMediaPlayer.setDataSource(dsd);
+ mMediaPlayer.setAudioAttributes(mAudioAttributes);
+ mMediaPlayer.setOnSubtitleDataListener(mSubtitleListener);
+ // we don't set the target state here either, but preserve the
+ // target state that was there before.
+ mCurrentState = STATE_PREPARING;
+ mMediaPlayer.prepare();
+
+ // Save file name as title since the file may not have a title Metadata.
+ mTitle = uri.getPath();
+ String scheme = uri.getScheme();
+ if (scheme != null && scheme.equals("file")) {
+ mTitle = uri.getLastPathSegment();
+ }
+ mRetriever = new MediaMetadataRetriever();
+ mRetriever.setDataSource(mInstance.getContext(), uri);
+
+ if (DEBUG) {
+ Log.d(TAG, "openVideo(). mCurrentState=" + mCurrentState
+ + ", mTargetState=" + mTargetState);
+ }
+ } catch (IllegalArgumentException ex) {
+ Log.w(TAG, "Unable to open content: " + uri, ex);
+ mCurrentState = STATE_ERROR;
+ mTargetState = STATE_ERROR;
+ mMediaPlayer2Callback.onError(mMediaPlayer, dsd,
+ MediaPlayer2.MEDIA_ERROR_UNKNOWN, MediaPlayer2.MEDIA_ERROR_IO);
+ }
+ }
+
+ /*
+ * Reset the media player in any state
+ */
+ private void resetPlayer() {
+ if (mMediaPlayer != null) {
+ final MediaPlayer2 player = mMediaPlayer;
+ new AsyncTask<MediaPlayer2, Void, Void>() {
+ @Override
+ protected Void doInBackground(MediaPlayer2... players) {
+ // TODO: Fix NPE while MediaPlayer2.close()
+ //players[0].close();
+ return null;
+ }
+ }.executeOnExecutor(AsyncTask.SERIAL_EXECUTOR, player);
+ mMediaPlayer = null;
+ mTextureView.setMediaPlayer(null);
+ mSurfaceView.setMediaPlayer(null);
+ //mPendingSubtitleTracks.clear();
+ mCurrentState = STATE_IDLE;
+ mTargetState = STATE_IDLE;
+ if (mAudioFocusType != AudioManager.AUDIOFOCUS_NONE) {
+ mAudioManager.abandonAudioFocus(null);
+ }
+ }
+ mVideoWidth = 0;
+ mVideoHeight = 0;
+ }
+
+ private void updatePlaybackState() {
+ if (mStateBuilder == null) {
+ // Get the capabilities of the player for this stream
+ mMetadata = mMediaPlayer.getMetadata(MediaPlayer2.METADATA_ALL,
+ MediaPlayer2.BYPASS_METADATA_FILTER);
+
+ // Add Play action as default
+ long playbackActions = PlaybackState.ACTION_PLAY;
+ if (mMetadata != null) {
+ if (!mMetadata.has(Metadata.PAUSE_AVAILABLE)
+ || mMetadata.getBoolean(Metadata.PAUSE_AVAILABLE)) {
+ playbackActions |= PlaybackState.ACTION_PAUSE;
+ }
+ if (!mMetadata.has(Metadata.SEEK_BACKWARD_AVAILABLE)
+ || mMetadata.getBoolean(Metadata.SEEK_BACKWARD_AVAILABLE)) {
+ playbackActions |= PlaybackState.ACTION_REWIND;
+ }
+ if (!mMetadata.has(Metadata.SEEK_FORWARD_AVAILABLE)
+ || mMetadata.getBoolean(Metadata.SEEK_FORWARD_AVAILABLE)) {
+ playbackActions |= PlaybackState.ACTION_FAST_FORWARD;
+ }
+ if (!mMetadata.has(Metadata.SEEK_AVAILABLE)
+ || mMetadata.getBoolean(Metadata.SEEK_AVAILABLE)) {
+ playbackActions |= PlaybackState.ACTION_SEEK_TO;
+ }
+ } else {
+ playbackActions |= (PlaybackState.ACTION_PAUSE |
+ PlaybackState.ACTION_REWIND | PlaybackState.ACTION_FAST_FORWARD |
+ PlaybackState.ACTION_SEEK_TO);
+ }
+ mStateBuilder = new PlaybackState.Builder();
+ mStateBuilder.setActions(playbackActions);
+
+ if (mCustomActionList != null) {
+ for (PlaybackState.CustomAction action : mCustomActionList) {
+ mStateBuilder.addCustomAction(action);
+ }
+ }
+ }
+ mStateBuilder.setState(getCorrespondingPlaybackState(),
+ mMediaPlayer.getCurrentPosition(), mSpeed);
+ if (mCurrentState != STATE_ERROR
+ && mCurrentState != STATE_IDLE
+ && mCurrentState != STATE_PREPARING) {
+ // TODO: this should be replaced with MediaPlayer2.getBufferedPosition() once it is
+ // implemented.
+ if (mCurrentBufferPercentage == -1) {
+ mStateBuilder.setBufferedPosition(-1);
+ } else {
+ mStateBuilder.setBufferedPosition(
+ (long) (mCurrentBufferPercentage / 100.0 * mMediaPlayer.getDuration()));
+ }
+ }
+
+ // Set PlaybackState for MediaSession
+ if (mMediaSession != null) {
+ PlaybackState state = mStateBuilder.build();
+ mMediaSession.setPlaybackState(state);
+ }
+ }
+
+ private int getCorrespondingPlaybackState() {
+ switch (mCurrentState) {
+ case STATE_ERROR:
+ return PlaybackState.STATE_ERROR;
+ case STATE_IDLE:
+ return PlaybackState.STATE_NONE;
+ case STATE_PREPARING:
+ return PlaybackState.STATE_CONNECTING;
+ case STATE_PREPARED:
+ return PlaybackState.STATE_PAUSED;
+ case STATE_PLAYING:
+ return PlaybackState.STATE_PLAYING;
+ case STATE_PAUSED:
+ return PlaybackState.STATE_PAUSED;
+ case STATE_PLAYBACK_COMPLETED:
+ return PlaybackState.STATE_STOPPED;
+ default:
+ return -1;
+ }
+ }
+
+ private final Runnable mFadeOut = new Runnable() {
+ @Override
+ public void run() {
+ if (mCurrentState == STATE_PLAYING) {
+ mMediaControlView.setVisibility(View.GONE);
+ }
+ }
+ };
+
+ private void showController() {
+ // TODO: Decide what to show when the state is not in playback state
+ if (mMediaControlView == null || !isInPlaybackState()
+ || (mIsMusicMediaType && mSizeType == SIZE_TYPE_FULL)) {
+ return;
+ }
+ mMediaControlView.removeCallbacks(mFadeOut);
+ mMediaControlView.setVisibility(View.VISIBLE);
+ if (mShowControllerIntervalMs != 0
+ && !mAccessibilityManager.isTouchExplorationEnabled()) {
+ mMediaControlView.postDelayed(mFadeOut, mShowControllerIntervalMs);
+ }
+ }
+
+ private void toggleMediaControlViewVisibility() {
+ if (mMediaControlView.getVisibility() == View.VISIBLE) {
+ mMediaControlView.removeCallbacks(mFadeOut);
+ mMediaControlView.setVisibility(View.GONE);
+ } else {
+ showController();
+ }
+ }
+
+ private void applySpeed() {
+ PlaybackParams params = mMediaPlayer.getPlaybackParams().allowDefaults();
+ if (mSpeed != params.getSpeed()) {
+ try {
+ params.setSpeed(mSpeed);
+ mMediaPlayer.setPlaybackParams(params);
+ mFallbackSpeed = mSpeed;
+ } catch (IllegalArgumentException e) {
+ Log.e(TAG, "PlaybackParams has unsupported value: " + e);
+ // TODO: should revise this part after integrating with MP2.
+ // If mSpeed had an illegal value for speed rate, system will determine best
+ // handling (see PlaybackParams.AUDIO_FALLBACK_MODE_DEFAULT).
+ // Note: The pre-MP2 returns 0.0f when it is paused. In this case, VideoView2 will
+ // use mFallbackSpeed instead.
+ float fallbackSpeed = mMediaPlayer.getPlaybackParams().allowDefaults().getSpeed();
+ if (fallbackSpeed > 0.0f) {
+ mFallbackSpeed = fallbackSpeed;
+ }
+ mSpeed = mFallbackSpeed;
+ }
+ }
+ }
+
+ private boolean isRemotePlayback() {
+ if (mMediaController == null) {
+ return false;
+ }
+ PlaybackInfo playbackInfo = mMediaController.getPlaybackInfo();
+ return playbackInfo != null
+ && playbackInfo.getPlaybackType() == PlaybackInfo.PLAYBACK_TYPE_REMOTE;
+ }
+
+ private void selectOrDeselectSubtitle(boolean select) {
+ if (!isInPlaybackState()) {
+ return;
+ }
+ if (select) {
+ if (mSubtitleTrackIndices.size() > 0) {
+ // TODO: make this selection dynamic
+ mSelectedSubtitleTrackIndex = mSubtitleTrackIndices.get(0).first;
+ mSubtitleController.selectTrack(mSubtitleTrackIndices.get(0).second);
+ mMediaPlayer.selectTrack(mSelectedSubtitleTrackIndex);
+ mSubtitleView.setVisibility(View.VISIBLE);
+ }
+ } else {
+ if (mSelectedSubtitleTrackIndex != INVALID_TRACK_INDEX) {
+ mMediaPlayer.deselectTrack(mSelectedSubtitleTrackIndex);
+ mSelectedSubtitleTrackIndex = INVALID_TRACK_INDEX;
+ mSubtitleView.setVisibility(View.GONE);
+ }
+ }
+ }
+
+ private void extractTracks() {
+ List<MediaPlayer2.TrackInfo> trackInfos = mMediaPlayer.getTrackInfo();
+ mVideoTrackIndices = new ArrayList<>();
+ mAudioTrackIndices = new ArrayList<>();
+ mSubtitleTrackIndices = new ArrayList<>();
+ mSubtitleController.reset();
+ for (int i = 0; i < trackInfos.size(); ++i) {
+ int trackType = trackInfos.get(i).getTrackType();
+ if (trackType == MediaPlayer2.TrackInfo.MEDIA_TRACK_TYPE_VIDEO) {
+ mVideoTrackIndices.add(i);
+ } else if (trackType == MediaPlayer2.TrackInfo.MEDIA_TRACK_TYPE_AUDIO) {
+ mAudioTrackIndices.add(i);
+ } else if (trackType == MediaPlayer2.TrackInfo.MEDIA_TRACK_TYPE_SUBTITLE
+ || trackType == MediaPlayer2.TrackInfo.MEDIA_TRACK_TYPE_TIMEDTEXT) {
+ SubtitleTrack track = mSubtitleController.addTrack(trackInfos.get(i).getFormat());
+ if (track != null) {
+ mSubtitleTrackIndices.add(new Pair<>(i, track));
+ }
+ }
+ }
+ // Select first tracks as default
+ if (mVideoTrackIndices.size() > 0) {
+ mSelectedVideoTrackIndex = 0;
+ }
+ if (mAudioTrackIndices.size() > 0) {
+ mSelectedAudioTrackIndex = 0;
+ }
+ if (mVideoTrackIndices.size() == 0 && mAudioTrackIndices.size() > 0) {
+ mIsMusicMediaType = true;
+ }
+
+ Bundle data = new Bundle();
+ data.putInt(MediaControlView2Impl.KEY_VIDEO_TRACK_COUNT, mVideoTrackIndices.size());
+ data.putInt(MediaControlView2Impl.KEY_AUDIO_TRACK_COUNT, mAudioTrackIndices.size());
+ data.putInt(MediaControlView2Impl.KEY_SUBTITLE_TRACK_COUNT, mSubtitleTrackIndices.size());
+ if (mSubtitleTrackIndices.size() > 0) {
+ selectOrDeselectSubtitle(mSubtitleEnabled);
+ }
+ mMediaSession.sendSessionEvent(MediaControlView2Impl.EVENT_UPDATE_TRACK_STATUS, data);
+ }
+
+ private void extractMetadata() {
+ // Get and set duration and title values as MediaMetadata for MediaControlView2
+ MediaMetadata.Builder builder = new MediaMetadata.Builder();
+ if (mMetadata != null && mMetadata.has(Metadata.TITLE)) {
+ mTitle = mMetadata.getString(Metadata.TITLE);
+ }
+ builder.putString(MediaMetadata.METADATA_KEY_TITLE, mTitle);
+ builder.putLong(
+ MediaMetadata.METADATA_KEY_DURATION, mMediaPlayer.getDuration());
+
+ if (mMediaSession != null) {
+ mMediaSession.setMetadata(builder.build());
+ }
+ }
+
+ private void extractAudioMetadata() {
+ if (!mIsMusicMediaType) {
+ return;
+ }
+
+ mResources = ApiHelper.getLibResources(mInstance.getContext());
+ mManager = (WindowManager) mInstance.getContext().getApplicationContext()
+ .getSystemService(Context.WINDOW_SERVICE);
+
+ byte[] album = mRetriever.getEmbeddedPicture();
+ if (album != null) {
+ Bitmap bitmap = BitmapFactory.decodeByteArray(album, 0, album.length);
+ mMusicAlbumDrawable = new BitmapDrawable(bitmap);
+
+ // TODO: replace with visualizer
+ Palette.generateAsync(bitmap, new Palette.PaletteAsyncListener() {
+ public void onGenerated(Palette palette) {
+ // TODO: add dominant color for default album image.
+ mDominantColor = palette.getDominantColor(0);
+ if (mMusicView != null) {
+ mMusicView.setBackgroundColor(mDominantColor);
+ }
+ }
+ });
+ } else {
+ mMusicAlbumDrawable = mResources.getDrawable(R.drawable.ic_default_album_image);
+ }
+
+ String title = mRetriever.extractMetadata(MediaMetadataRetriever.METADATA_KEY_TITLE);
+ if (title != null) {
+ mMusicTitleText = title;
+ } else {
+ mMusicTitleText = mResources.getString(R.string.mcv2_music_title_unknown_text);
+ }
+
+ String artist = mRetriever.extractMetadata(MediaMetadataRetriever.METADATA_KEY_ARTIST);
+ if (artist != null) {
+ mMusicArtistText = artist;
+ } else {
+ mMusicArtistText = mResources.getString(R.string.mcv2_music_artist_unknown_text);
+ }
+
+ // Send title and artist string to MediaControlView2
+ MediaMetadata.Builder builder = new MediaMetadata.Builder();
+ builder.putString(MediaMetadata.METADATA_KEY_TITLE, mMusicTitleText);
+ builder.putString(MediaMetadata.METADATA_KEY_ARTIST, mMusicArtistText);
+ mMediaSession.setMetadata(builder.build());
+
+ // Display Embedded mode as default
+ mInstance.removeView(mSurfaceView);
+ mInstance.removeView(mTextureView);
+ inflateMusicView(R.layout.embedded_music);
+ }
+
+ private int retrieveOrientation() {
+ DisplayMetrics dm = Resources.getSystem().getDisplayMetrics();
+ int width = dm.widthPixels;
+ int height = dm.heightPixels;
+
+ return (height > width) ?
+ ActivityInfo.SCREEN_ORIENTATION_PORTRAIT :
+ ActivityInfo.SCREEN_ORIENTATION_LANDSCAPE;
+ }
+
+ private void inflateMusicView(int layoutId) {
+ mInstance.removeView(mMusicView);
+
+ View v = ApiHelper.inflateLibLayout(mInstance.getContext(), layoutId);
+ v.setBackgroundColor(mDominantColor);
+
+ ImageView albumView = v.findViewById(R.id.album);
+ if (albumView != null) {
+ albumView.setImageDrawable(mMusicAlbumDrawable);
+ }
+
+ TextView titleView = v.findViewById(R.id.title);
+ if (titleView != null) {
+ titleView.setText(mMusicTitleText);
+ }
+
+ TextView artistView = v.findViewById(R.id.artist);
+ if (artistView != null) {
+ artistView.setText(mMusicArtistText);
+ }
+
+ mMusicView = v;
+ mInstance.addView(mMusicView, 0);
+ }
+
+ OnSubtitleDataListener mSubtitleListener =
+ new OnSubtitleDataListener() {
+ @Override
+ public void onSubtitleData(MediaPlayer2 mp, SubtitleData data) {
+ if (DEBUG) {
+ Log.d(TAG, "onSubtitleData(): getTrackIndex: " + data.getTrackIndex()
+ + ", getCurrentPosition: " + mp.getCurrentPosition()
+ + ", getStartTimeUs(): " + data.getStartTimeUs()
+ + ", diff: "
+ + (data.getStartTimeUs()/1000 - mp.getCurrentPosition())
+ + "ms, getDurationUs(): " + data.getDurationUs()
+ );
+
+ }
+ final int index = data.getTrackIndex();
+ if (index != mSelectedSubtitleTrackIndex) {
+ Log.d(TAG, "onSubtitleData(): getTrackIndex: " + data.getTrackIndex()
+ + ", selected track index: " + mSelectedSubtitleTrackIndex);
+ return;
+ }
+ for (Pair<Integer, SubtitleTrack> p : mSubtitleTrackIndices) {
+ if (p.first == index) {
+ SubtitleTrack track = p.second;
+ track.onData(data);
+ }
+ }
+ }
+ };
+
+ MediaPlayer2EventCallback mMediaPlayer2Callback =
+ new MediaPlayer2EventCallback() {
+ @Override
+ public void onVideoSizeChanged(
+ MediaPlayer2 mp, DataSourceDesc dsd, int width, int height) {
+ if (DEBUG) {
+ Log.d(TAG, "onVideoSizeChanged(): size: " + width + "/" + height);
+ }
+ mVideoWidth = mp.getVideoWidth();
+ mVideoHeight = mp.getVideoHeight();
+ if (DEBUG) {
+ Log.d(TAG, "onVideoSizeChanged(): mVideoSize:" + mVideoWidth + "/"
+ + mVideoHeight);
+ }
+ if (mVideoWidth != 0 && mVideoHeight != 0) {
+ mInstance.requestLayout();
+ }
+ }
+
+ // TODO: Remove timed text related code later once relevant Renderer is defined.
+ // This is just for debugging purpose.
+ @Override
+ public void onTimedText(
+ MediaPlayer2 mp, DataSourceDesc dsd, TimedText text) {
+ Log.d(TAG, "TimedText: " + text.getText());
+ }
+
+ @Override
+ public void onInfo(
+ MediaPlayer2 mp, DataSourceDesc dsd, int what, int extra) {
+ if (what == MediaPlayer2.MEDIA_INFO_METADATA_UPDATE) {
+ extractTracks();
+ } else if (what == MediaPlayer2.MEDIA_INFO_PREPARED) {
+ this.onPrepared(mp, dsd);
+ } else if (what == MediaPlayer2.MEDIA_INFO_PLAYBACK_COMPLETE) {
+ this.onCompletion(mp, dsd);
+ } else if (what == MediaPlayer2.MEDIA_INFO_BUFFERING_UPDATE) {
+ this.onBufferingUpdate(mp, dsd, extra);
+ }
+ }
+
+ @Override
+ public void onError(
+ MediaPlayer2 mp, DataSourceDesc dsd, int frameworkErr, int implErr) {
+ if (DEBUG) {
+ Log.d(TAG, "Error: " + frameworkErr + "," + implErr);
+ }
+ mCurrentState = STATE_ERROR;
+ mTargetState = STATE_ERROR;
+ updatePlaybackState();
+
+ if (mMediaControlView != null) {
+ mMediaControlView.setVisibility(View.GONE);
+ }
+ }
+
+ @Override
+ public void onCallCompleted(MediaPlayer2 mp, DataSourceDesc dsd, int what,
+ int status) {
+ if (what == MediaPlayer2.CALL_COMPLETED_SEEK_TO && status == 0) {
+ updatePlaybackState();
+ }
+ }
+
+ private void onPrepared(MediaPlayer2 mp, DataSourceDesc dsd) {
+ if (DEBUG) {
+ Log.d(TAG, "OnPreparedListener(). mCurrentState=" + mCurrentState
+ + ", mTargetState=" + mTargetState);
+ }
+ mCurrentState = STATE_PREPARED;
+ // Create and set playback state for MediaControlView2
+ updatePlaybackState();
+
+ // TODO: change this to send TrackInfos to MediaControlView2
+ // TODO: create MediaSession when initializing VideoView2
+ if (mMediaSession != null) {
+ extractTracks();
+ extractMetadata();
+ extractAudioMetadata();
+ }
+
+ if (mMediaControlView != null) {
+ mMediaControlView.setEnabled(true);
+ }
+ int videoWidth = mp.getVideoWidth();
+ int videoHeight = mp.getVideoHeight();
+
+ // mSeekWhenPrepared may be changed after seekTo() call
+ long seekToPosition = mSeekWhenPrepared;
+ if (seekToPosition != 0) {
+ mMediaController.getTransportControls().seekTo(seekToPosition);
+ }
+
+ if (videoWidth != 0 && videoHeight != 0) {
+ if (videoWidth != mVideoWidth || videoHeight != mVideoHeight) {
+ if (DEBUG) {
+ Log.i(TAG, "OnPreparedListener() : ");
+ Log.i(TAG, " video size: " + videoWidth + "/" + videoHeight);
+ Log.i(TAG, " measuredSize: " + mInstance.getMeasuredWidth() + "/"
+ + mInstance.getMeasuredHeight());
+ Log.i(TAG, " viewSize: " + mInstance.getWidth() + "/"
+ + mInstance.getHeight());
+ }
+ mVideoWidth = videoWidth;
+ mVideoHeight = videoHeight;
+ mInstance.requestLayout();
+ }
+
+ if (needToStart()) {
+ mMediaController.getTransportControls().play();
+ }
+ } else {
+ // We don't know the video size yet, but should start anyway.
+ // The video size might be reported to us later.
+ if (needToStart()) {
+ mMediaController.getTransportControls().play();
+ }
+ }
+ }
+
+ private void onCompletion(MediaPlayer2 mp, DataSourceDesc dsd) {
+ mCurrentState = STATE_PLAYBACK_COMPLETED;
+ mTargetState = STATE_PLAYBACK_COMPLETED;
+ updatePlaybackState();
+ if (mAudioFocusType != AudioManager.AUDIOFOCUS_NONE) {
+ mAudioManager.abandonAudioFocus(null);
+ }
+ }
+
+ private void onBufferingUpdate(MediaPlayer2 mp, DataSourceDesc dsd, int percent) {
+ mCurrentBufferPercentage = percent;
+ updatePlaybackState();
+ }
+ };
+
+ private class MediaSessionCallback extends MediaSession.Callback {
+ @Override
+ public void onCommand(String command, Bundle args, ResultReceiver receiver) {
+ if (isRemotePlayback()) {
+ mRoutePlayer.onCommand(command, args, receiver);
+ } else {
+ switch (command) {
+ case MediaControlView2Impl.COMMAND_SHOW_SUBTITLE:
+ int subtitleIndex = args.getInt(
+ MediaControlView2Impl.KEY_SELECTED_SUBTITLE_INDEX,
+ INVALID_TRACK_INDEX);
+ if (subtitleIndex != INVALID_TRACK_INDEX) {
+ int subtitleTrackIndex = mSubtitleTrackIndices.get(subtitleIndex).first;
+ if (subtitleTrackIndex != mSelectedSubtitleTrackIndex) {
+ mSelectedSubtitleTrackIndex = subtitleTrackIndex;
+ mInstance.setSubtitleEnabled(true);
+ }
+ }
+ break;
+ case MediaControlView2Impl.COMMAND_HIDE_SUBTITLE:
+ mInstance.setSubtitleEnabled(false);
+ break;
+ case MediaControlView2Impl.COMMAND_SET_FULLSCREEN:
+ if (mFullScreenRequestListener != null) {
+ mFullScreenRequestListener.onFullScreenRequest(
+ mInstance,
+ args.getBoolean(MediaControlView2Impl.ARGUMENT_KEY_FULLSCREEN));
+ }
+ break;
+ case MediaControlView2Impl.COMMAND_SELECT_AUDIO_TRACK:
+ int audioIndex = args.getInt(MediaControlView2Impl.KEY_SELECTED_AUDIO_INDEX,
+ INVALID_TRACK_INDEX);
+ if (audioIndex != INVALID_TRACK_INDEX) {
+ int audioTrackIndex = mAudioTrackIndices.get(audioIndex);
+ if (audioTrackIndex != mSelectedAudioTrackIndex) {
+ mSelectedAudioTrackIndex = audioTrackIndex;
+ mMediaPlayer.selectTrack(mSelectedAudioTrackIndex);
+ }
+ }
+ break;
+ case MediaControlView2Impl.COMMAND_SET_PLAYBACK_SPEED:
+ float speed = args.getFloat(
+ MediaControlView2Impl.KEY_PLAYBACK_SPEED, INVALID_SPEED);
+ if (speed != INVALID_SPEED && speed != mSpeed) {
+ mInstance.setSpeed(speed);
+ mSpeed = speed;
+ }
+ break;
+ case MediaControlView2Impl.COMMAND_MUTE:
+ mVolumeLevel = mAudioManager.getStreamVolume(AudioManager.STREAM_MUSIC);
+ mAudioManager.setStreamVolume(AudioManager.STREAM_MUSIC, 0, 0);
+ break;
+ case MediaControlView2Impl.COMMAND_UNMUTE:
+ mAudioManager.setStreamVolume(AudioManager.STREAM_MUSIC, mVolumeLevel, 0);
+ break;
+ }
+ }
+ showController();
+ }
+
+ @Override
+ public void onCustomAction(String action, Bundle extras) {
+ mCustomActionListenerRecord.first.execute(() ->
+ mCustomActionListenerRecord.second.onCustomAction(action, extras));
+ showController();
+ }
+
+ @Override
+ public void onPlay() {
+ if (isInPlaybackState() && (mCurrentView.hasAvailableSurface() || mIsMusicMediaType)) {
+ if (isRemotePlayback()) {
+ mRoutePlayer.onPlay();
+ } else {
+ applySpeed();
+ mMediaPlayer.play();
+ mCurrentState = STATE_PLAYING;
+ updatePlaybackState();
+ }
+ mCurrentState = STATE_PLAYING;
+ }
+ mTargetState = STATE_PLAYING;
+ if (DEBUG) {
+ Log.d(TAG, "onPlay(). mCurrentState=" + mCurrentState
+ + ", mTargetState=" + mTargetState);
+ }
+ showController();
+ }
+
+ @Override
+ public void onPause() {
+ if (isInPlaybackState()) {
+ if (isRemotePlayback()) {
+ mRoutePlayer.onPause();
+ mCurrentState = STATE_PAUSED;
+ } else if (mMediaPlayer.isPlaying()) {
+ mMediaPlayer.pause();
+ mCurrentState = STATE_PAUSED;
+ updatePlaybackState();
+ }
+ }
+ mTargetState = STATE_PAUSED;
+ if (DEBUG) {
+ Log.d(TAG, "onPause(). mCurrentState=" + mCurrentState
+ + ", mTargetState=" + mTargetState);
+ }
+ showController();
+ }
+
+ @Override
+ public void onSeekTo(long pos) {
+ if (isInPlaybackState()) {
+ if (isRemotePlayback()) {
+ mRoutePlayer.onSeekTo(pos);
+ } else {
+ mMediaPlayer.seekTo(pos, MediaPlayer2.SEEK_PREVIOUS_SYNC);
+ mSeekWhenPrepared = 0;
+ }
+ } else {
+ mSeekWhenPrepared = pos;
+ }
+ showController();
+ }
+
+ @Override
+ public void onStop() {
+ if (isRemotePlayback()) {
+ mRoutePlayer.onStop();
+ } else {
+ resetPlayer();
+ }
+ showController();
+ }
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/widget/VideoViewInterface.java b/packages/MediaComponents/src/com/android/widget/VideoViewInterface.java
new file mode 100644
index 0000000..854d47e
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/widget/VideoViewInterface.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.widget;
+
+import android.annotation.NonNull;
+import android.media.MediaPlayer2;
+import android.view.View;
+
+interface VideoViewInterface {
+ /**
+ * Assigns the view's surface to the given MediaPlayer2 instance.
+ *
+ * @param mp MediaPlayer2
+ * @return true if the surface is successfully assigned, false if not. It will fail to assign
+ * if any of MediaPlayer2 or surface is unavailable.
+ */
+ boolean assignSurfaceToMediaPlayer(MediaPlayer2 mp);
+ void setSurfaceListener(SurfaceListener l);
+ int getViewType();
+ void setMediaPlayer(MediaPlayer2 mp);
+
+ /**
+ * Takes over oldView. It means that the MediaPlayer2 will start rendering on this view.
+ * The visibility of oldView will be set as {@link View.GONE}. If the view doesn't have a
+ * MediaPlayer2 instance or its surface is not available, the actual execution is deferred until
+ * a MediaPlayer2 instance is set by {@link #setMediaPlayer} or its surface becomes available.
+ * {@link SurfaceListener.onSurfaceTakeOverDone} will be called when the actual execution is
+ * done.
+ *
+ * @param oldView The view that MediaPlayer2 is currently rendering on.
+ */
+ void takeOver(@NonNull VideoViewInterface oldView);
+
+ /**
+ * Indicates if the view's surface is available.
+ *
+ * @return true if the surface is available.
+ */
+ boolean hasAvailableSurface();
+
+ /**
+ * An instance of VideoViewInterface calls these surface notification methods accordingly if
+ * a listener has been registered via {@link #setSurfaceListener(SurfaceListener)}.
+ */
+ interface SurfaceListener {
+ void onSurfaceCreated(View view, int width, int height);
+ void onSurfaceDestroyed(View view);
+ void onSurfaceChanged(View view, int width, int height);
+ void onSurfaceTakeOverDone(VideoViewInterface view);
+ }
+}
diff --git a/packages/MediaComponents/src/com/android/widget/ViewGroupImpl.java b/packages/MediaComponents/src/com/android/widget/ViewGroupImpl.java
new file mode 100644
index 0000000..5a06826
--- /dev/null
+++ b/packages/MediaComponents/src/com/android/widget/ViewGroupImpl.java
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.widget;
+
+import android.media.update.ViewGroupProvider;
+import android.util.AttributeSet;
+import android.view.MotionEvent;
+import android.view.View;
+import android.view.ViewGroup;
+
+public abstract class ViewGroupImpl implements ViewGroupProvider {
+ private final ViewGroupProvider mSuperProvider;
+
+ public ViewGroupImpl(ViewGroup instance,
+ ViewGroupProvider superProvider, ViewGroupProvider privateProvider) {
+ mSuperProvider = superProvider;
+ }
+
+ @Override
+ public void onAttachedToWindow_impl() {
+ mSuperProvider.onAttachedToWindow_impl();
+ }
+
+ @Override
+ public void onDetachedFromWindow_impl() {
+ mSuperProvider.onDetachedFromWindow_impl();
+ }
+
+ @Override
+ public CharSequence getAccessibilityClassName_impl() {
+ return mSuperProvider.getAccessibilityClassName_impl();
+ }
+
+ @Override
+ public boolean onTouchEvent_impl(MotionEvent ev) {
+ return mSuperProvider.onTouchEvent_impl(ev);
+ }
+
+ @Override
+ public boolean onTrackballEvent_impl(MotionEvent ev) {
+ return mSuperProvider.onTrackballEvent_impl(ev);
+ }
+
+ @Override
+ public void onFinishInflate_impl() {
+ mSuperProvider.onFinishInflate_impl();
+ }
+
+ @Override
+ public void setEnabled_impl(boolean enabled) {
+ mSuperProvider.setEnabled_impl(enabled);
+ }
+
+ @Override
+ public void onVisibilityAggregated_impl(boolean isVisible) {
+ mSuperProvider.onVisibilityAggregated_impl(isVisible);
+ }
+
+ @Override
+ public void onLayout_impl(boolean changed, int left, int top, int right, int bottom) {
+ mSuperProvider.onLayout_impl(changed, left, top, right, bottom);
+ }
+
+ @Override
+ public void onMeasure_impl(int widthMeasureSpec, int heightMeasureSpec) {
+ mSuperProvider.onMeasure_impl(widthMeasureSpec, heightMeasureSpec);
+ }
+
+ @Override
+ public int getSuggestedMinimumWidth_impl() {
+ return mSuperProvider.getSuggestedMinimumWidth_impl();
+ }
+
+ @Override
+ public int getSuggestedMinimumHeight_impl() {
+ return mSuperProvider.getSuggestedMinimumHeight_impl();
+ }
+
+ @Override
+ public void setMeasuredDimension_impl(int measuredWidth, int measuredHeight) {
+ mSuperProvider.setMeasuredDimension_impl(measuredWidth, measuredHeight);
+ }
+
+ @Override
+ public boolean dispatchTouchEvent_impl(MotionEvent ev) {
+ return mSuperProvider.dispatchTouchEvent_impl(ev);
+ }
+
+ @Override
+ public boolean checkLayoutParams_impl(ViewGroup.LayoutParams p) {
+ return mSuperProvider.checkLayoutParams_impl(p);
+ }
+
+ @Override
+ public ViewGroup.LayoutParams generateDefaultLayoutParams_impl() {
+ return mSuperProvider.generateDefaultLayoutParams_impl();
+ }
+
+ @Override
+ public ViewGroup.LayoutParams generateLayoutParams_impl(AttributeSet attrs) {
+ return mSuperProvider.generateLayoutParams_impl(attrs);
+ }
+
+ @Override
+ public ViewGroup.LayoutParams generateLayoutParams_impl(ViewGroup.LayoutParams lp) {
+ return mSuperProvider.generateLayoutParams_impl(lp);
+ }
+
+ @Override
+ public boolean shouldDelayChildPressedState_impl() {
+ return mSuperProvider.shouldDelayChildPressedState_impl();
+ }
+
+ @Override
+ public void measureChildWithMargins_impl(View child,
+ int parentWidthMeasureSpec, int widthUsed, int parentHeightMeasureSpec, int heightUsed) {
+ mSuperProvider.measureChildWithMargins_impl(child,
+ parentWidthMeasureSpec, widthUsed, parentHeightMeasureSpec, heightUsed);
+ }
+}
diff --git a/packages/MediaComponents/tests/Android.mk b/packages/MediaComponents/tests/Android.mk
new file mode 100644
index 0000000..dddfd2a
--- /dev/null
+++ b/packages/MediaComponents/tests/Android.mk
@@ -0,0 +1,36 @@
+# Copyright 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_STATIC_JAVA_LIBRARIES := \
+ android.test.runner.stubs \
+ android.test.base.stubs \
+ mockito-target-minus-junit4 \
+ junit
+
+LOCAL_SRC_FILES := $(call all-java-files-under, src)
+
+LOCAL_PACKAGE_NAME := MediaComponentsTest
+
+LOCAL_INSTRUMENTATION_FOR := MediaComponents
+
+LOCAL_PRIVATE_PLATFORM_APIS := true
+
+LOCAL_CERTIFICATE := platform
+
+include $(BUILD_PACKAGE)
diff --git a/packages/MediaComponents/tests/AndroidManifest.xml b/packages/MediaComponents/tests/AndroidManifest.xml
new file mode 100644
index 0000000..7255265
--- /dev/null
+++ b/packages/MediaComponents/tests/AndroidManifest.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.android.media.tests">
+
+ <application android:label="Media API Test">
+ <uses-library android:name="android.test.runner" />
+ </application>
+
+ <!--
+ To run the tests use the command:
+ "adb shell am instrument -w com.android.media.tests/android.test.InstrumentationTestRunner"
+ -->
+ <instrumentation
+ android:name="android.test.InstrumentationTestRunner"
+ android:targetPackage="com.android.media.update"
+ android:label="Media API test" />
+
+</manifest>
diff --git a/packages/MediaComponents/tests/src/com/android/media/SessionPlaylistAgentTest.java b/packages/MediaComponents/tests/src/com/android/media/SessionPlaylistAgentTest.java
new file mode 100644
index 0000000..beb0848
--- /dev/null
+++ b/packages/MediaComponents/tests/src/com/android/media/SessionPlaylistAgentTest.java
@@ -0,0 +1,642 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media;
+
+import static org.mockito.Mockito.*;
+
+import android.content.Context;
+import android.media.AudioAttributes;
+import android.media.DataSourceDesc;
+import android.media.MediaItem2;
+import android.media.MediaMetadata2;
+import android.media.MediaPlayerBase;
+import android.media.MediaPlayerBase.PlayerEventCallback;
+import android.media.MediaPlaylistAgent;
+import android.media.MediaSession2;
+import android.media.MediaSession2.OnDataSourceMissingHelper;
+import android.net.Uri;
+import android.os.Handler;
+import android.os.HandlerThread;
+import android.test.AndroidTestCase;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Matchers;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Executor;
+
+/**
+ * Tests {@link SessionPlaylistAgent}.
+ */
+public class SessionPlaylistAgentTest extends AndroidTestCase {
+ private static final String TAG = "SessionPlaylistAgentTest";
+ private static final int WAIT_TIME_MS = 1000;
+ private static final int INVALID_REPEAT_MODE = -100;
+ private static final int INVALID_SHUFFLE_MODE = -100;
+
+ private Handler mHandler;
+ private Executor mHandlerExecutor;
+
+ private Object mWaitLock = new Object();
+ private Context mContext;
+ private MediaSession2Impl mSessionImpl;
+ private MediaPlayerBase mPlayer;
+ private PlayerEventCallback mPlayerEventCallback;
+ private SessionPlaylistAgent mAgent;
+ private OnDataSourceMissingHelper mDataSourceHelper;
+ private MyPlaylistEventCallback mEventCallback;
+
+ public class MyPlaylistEventCallback extends MediaPlaylistAgent.PlaylistEventCallback {
+ boolean onPlaylistChangedCalled;
+ boolean onPlaylistMetadataChangedCalled;
+ boolean onRepeatModeChangedCalled;
+ boolean onShuffleModeChangedCalled;
+
+ private Object mWaitLock;
+
+ public MyPlaylistEventCallback(Object waitLock) {
+ mWaitLock = waitLock;
+ }
+
+ public void clear() {
+ onPlaylistChangedCalled = false;
+ onPlaylistMetadataChangedCalled = false;
+ onRepeatModeChangedCalled = false;
+ onShuffleModeChangedCalled = false;
+ }
+
+ public void onPlaylistChanged(MediaPlaylistAgent playlistAgent, List<MediaItem2> list,
+ MediaMetadata2 metadata) {
+ synchronized (mWaitLock) {
+ onPlaylistChangedCalled = true;
+ mWaitLock.notify();
+ }
+ }
+
+ public void onPlaylistMetadataChanged(MediaPlaylistAgent playlistAgent,
+ MediaMetadata2 metadata) {
+ synchronized (mWaitLock) {
+ onPlaylistMetadataChangedCalled = true;
+ mWaitLock.notify();
+ }
+ }
+
+ public void onRepeatModeChanged(MediaPlaylistAgent playlistAgent, int repeatMode) {
+ synchronized (mWaitLock) {
+ onRepeatModeChangedCalled = true;
+ mWaitLock.notify();
+ }
+ }
+
+ public void onShuffleModeChanged(MediaPlaylistAgent playlistAgent, int shuffleMode) {
+ synchronized (mWaitLock) {
+ onShuffleModeChangedCalled = true;
+ mWaitLock.notify();
+ }
+ }
+ }
+
+ public class MyDataSourceHelper implements OnDataSourceMissingHelper {
+ @Override
+ public DataSourceDesc onDataSourceMissing(MediaSession2 session, MediaItem2 item) {
+ if (item.getMediaId().contains("WITHOUT_DSD")) {
+ return null;
+ }
+ return new DataSourceDesc.Builder()
+ .setDataSource(getContext(), Uri.parse("dsd://test"))
+ .setMediaId(item.getMediaId())
+ .build();
+ }
+ }
+
+ public class MockPlayer extends MediaPlayerBase {
+ @Override
+ public void play() {
+ }
+
+ @Override
+ public void prepare() {
+ }
+
+ @Override
+ public void pause() {
+ }
+
+ @Override
+ public void reset() {
+ }
+
+ @Override
+ public void skipToNext() {
+ }
+
+ @Override
+ public void seekTo(long pos) {
+ }
+
+ @Override
+ public int getPlayerState() {
+ return 0;
+ }
+
+ @Override
+ public int getBufferingState() {
+ return 0;
+ }
+
+ @Override
+ public void setAudioAttributes(AudioAttributes attributes) {
+ }
+
+ @Override
+ public AudioAttributes getAudioAttributes() {
+ return null;
+ }
+
+ @Override
+ public void setDataSource(DataSourceDesc dsd) {
+ }
+
+ @Override
+ public void setNextDataSource(DataSourceDesc dsd) {
+ }
+
+ @Override
+ public void setNextDataSources(List<DataSourceDesc> dsds) {
+ }
+
+ @Override
+ public DataSourceDesc getCurrentDataSource() {
+ return null;
+ }
+
+ @Override
+ public void loopCurrent(boolean loop) {
+ }
+
+ @Override
+ public void setPlaybackSpeed(float speed) {
+ }
+
+ @Override
+ public void setPlayerVolume(float volume) {
+ }
+
+ @Override
+ public float getPlayerVolume() {
+ return 0;
+ }
+
+ @Override
+ public void registerPlayerEventCallback(Executor e, PlayerEventCallback cb) {
+ }
+
+ @Override
+ public void unregisterPlayerEventCallback(PlayerEventCallback cb) {
+ }
+
+ @Override
+ public void close() throws Exception {
+ }
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ mContext = getContext();
+ // Workaround for dexmaker bug: https://code.google.com/p/dexmaker/issues/detail?id=2
+ // Dexmaker is used by mockito.
+ System.setProperty("dexmaker.dexcache", mContext.getCacheDir().getPath());
+
+ HandlerThread handlerThread = new HandlerThread("SessionPlaylistAgent");
+ handlerThread.start();
+ mHandler = new Handler(handlerThread.getLooper());
+ mHandlerExecutor = (runnable) -> {
+ mHandler.post(runnable);
+ };
+
+ mPlayer = mock(MockPlayer.class);
+ doAnswer(invocation -> {
+ Object[] args = invocation.getArguments();
+ mPlayerEventCallback = (PlayerEventCallback) args[1];
+ return null;
+ }).when(mPlayer).registerPlayerEventCallback(Matchers.any(), Matchers.any());
+
+ mSessionImpl = mock(MediaSession2Impl.class);
+ mDataSourceHelper = new MyDataSourceHelper();
+ mAgent = new SessionPlaylistAgent(mSessionImpl, mPlayer);
+ mAgent.setOnDataSourceMissingHelper(mDataSourceHelper);
+ mEventCallback = new MyPlaylistEventCallback(mWaitLock);
+ mAgent.registerPlaylistEventCallback(mHandlerExecutor, mEventCallback);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ mHandler.getLooper().quitSafely();
+ mHandler = null;
+ mHandlerExecutor = null;
+ }
+
+ @Test
+ public void testSetAndGetShuflleMode() throws Exception {
+ int shuffleMode = mAgent.getShuffleMode();
+ if (shuffleMode != MediaPlaylistAgent.SHUFFLE_MODE_NONE) {
+ mEventCallback.clear();
+ synchronized (mWaitLock) {
+ mAgent.setShuffleMode(MediaPlaylistAgent.SHUFFLE_MODE_NONE);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onShuffleModeChangedCalled);
+ }
+ assertEquals(MediaPlaylistAgent.SHUFFLE_MODE_NONE, mAgent.getShuffleMode());
+ }
+
+ mEventCallback.clear();
+ synchronized (mWaitLock) {
+ mAgent.setShuffleMode(MediaPlaylistAgent.SHUFFLE_MODE_ALL);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onShuffleModeChangedCalled);
+ }
+ assertEquals(MediaPlaylistAgent.SHUFFLE_MODE_ALL, mAgent.getShuffleMode());
+
+ mEventCallback.clear();
+ synchronized (mWaitLock) {
+ mAgent.setShuffleMode(MediaPlaylistAgent.SHUFFLE_MODE_GROUP);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onShuffleModeChangedCalled);
+ }
+ assertEquals(MediaPlaylistAgent.SHUFFLE_MODE_GROUP, mAgent.getShuffleMode());
+
+ // INVALID_SHUFFLE_MODE will not change the shuffle mode.
+ mEventCallback.clear();
+ synchronized (mWaitLock) {
+ mAgent.setShuffleMode(INVALID_SHUFFLE_MODE);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertFalse(mEventCallback.onShuffleModeChangedCalled);
+ }
+ assertEquals(MediaPlaylistAgent.SHUFFLE_MODE_GROUP, mAgent.getShuffleMode());
+ }
+
+ @Test
+ public void testSetAndGetRepeatMode() throws Exception {
+ int repeatMode = mAgent.getRepeatMode();
+ if (repeatMode != MediaPlaylistAgent.REPEAT_MODE_NONE) {
+ mEventCallback.clear();
+ synchronized (mWaitLock) {
+ mAgent.setRepeatMode(MediaPlaylistAgent.REPEAT_MODE_NONE);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onRepeatModeChangedCalled);
+ }
+ assertEquals(MediaPlaylistAgent.REPEAT_MODE_NONE, mAgent.getRepeatMode());
+ }
+
+ mEventCallback.clear();
+ synchronized (mWaitLock) {
+ mAgent.setRepeatMode(MediaPlaylistAgent.REPEAT_MODE_ONE);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onRepeatModeChangedCalled);
+ }
+ assertEquals(MediaPlaylistAgent.REPEAT_MODE_ONE, mAgent.getRepeatMode());
+
+ mEventCallback.clear();
+ synchronized (mWaitLock) {
+ mAgent.setRepeatMode(MediaPlaylistAgent.REPEAT_MODE_ALL);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onRepeatModeChangedCalled);
+ }
+ assertEquals(MediaPlaylistAgent.REPEAT_MODE_ALL, mAgent.getRepeatMode());
+
+ mEventCallback.clear();
+ synchronized (mWaitLock) {
+ mAgent.setRepeatMode(MediaPlaylistAgent.REPEAT_MODE_GROUP);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onRepeatModeChangedCalled);
+ }
+ assertEquals(MediaPlaylistAgent.REPEAT_MODE_GROUP, mAgent.getRepeatMode());
+
+ // INVALID_SHUFFLE_MODE will not change the shuffle mode.
+ mEventCallback.clear();
+ synchronized (mWaitLock) {
+ mAgent.setRepeatMode(INVALID_REPEAT_MODE);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertFalse(mEventCallback.onRepeatModeChangedCalled);
+ }
+ assertEquals(MediaPlaylistAgent.REPEAT_MODE_GROUP, mAgent.getRepeatMode());
+ }
+
+ @Test
+ public void testSetPlaylist() throws Exception {
+ int listSize = 10;
+ createAndSetPlaylist(10);
+ assertEquals(listSize, mAgent.getPlaylist().size());
+ assertEquals(0, mAgent.getCurShuffledIndex());
+ }
+
+ @Test
+ public void testSkipItems() throws Exception {
+ int listSize = 5;
+ List<MediaItem2> playlist = createAndSetPlaylist(listSize);
+
+ mAgent.setRepeatMode(MediaPlaylistAgent.REPEAT_MODE_NONE);
+ // Test skipToPlaylistItem
+ for (int i = listSize - 1; i >= 0; --i) {
+ mAgent.skipToPlaylistItem(playlist.get(i));
+ assertEquals(i, mAgent.getCurShuffledIndex());
+ }
+
+ // Test skipToNextItem
+ // curPlayPos = 0
+ for (int curPlayPos = 0; curPlayPos < listSize - 1; ++curPlayPos) {
+ mAgent.skipToNextItem();
+ assertEquals(curPlayPos + 1, mAgent.getCurShuffledIndex());
+ }
+ mAgent.skipToNextItem();
+ assertEquals(listSize - 1, mAgent.getCurShuffledIndex());
+
+ // Test skipToPrevious
+ // curPlayPos = listSize - 1
+ for (int curPlayPos = listSize - 1; curPlayPos > 0; --curPlayPos) {
+ mAgent.skipToPreviousItem();
+ assertEquals(curPlayPos - 1, mAgent.getCurShuffledIndex());
+ }
+ mAgent.skipToPreviousItem();
+ assertEquals(0, mAgent.getCurShuffledIndex());
+
+ mAgent.setRepeatMode(MediaPlaylistAgent.REPEAT_MODE_ALL);
+ // Test skipToPrevious with repeat mode all
+ // curPlayPos = 0
+ mAgent.skipToPreviousItem();
+ assertEquals(listSize - 1, mAgent.getCurShuffledIndex());
+
+ // Test skipToNext with repeat mode all
+ // curPlayPos = listSize - 1
+ mAgent.skipToNextItem();
+ assertEquals(0, mAgent.getCurShuffledIndex());
+
+ mAgent.skipToPreviousItem();
+ // curPlayPos = listSize - 1, nextPlayPos = 0
+ // Test next play pos after setting repeat mode none.
+ mAgent.setRepeatMode(MediaPlaylistAgent.REPEAT_MODE_NONE);
+ assertEquals(listSize - 1, mAgent.getCurShuffledIndex());
+ }
+
+ @Test
+ public void testEditPlaylist() throws Exception {
+ int listSize = 5;
+ List<MediaItem2> playlist = createAndSetPlaylist(listSize);
+
+ // Test add item: [0 (cur), 1, 2, 3, 4] -> [0 (cur), 1, 5, 2, 3, 4]
+ mEventCallback.clear();
+ MediaItem2 item_5 = generateMediaItem(5);
+ synchronized (mWaitLock) {
+ playlist.add(2, item_5);
+ mAgent.addPlaylistItem(2, item_5);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onPlaylistChangedCalled);
+ }
+ assertPlaylistEquals(playlist, mAgent.getPlaylist());
+
+ mEventCallback.clear();
+ // Move current: [0 (cur), 1, 5, 2, 3, 4] -> [0, 1, 5 (cur), 2, 3, 4]
+ mAgent.skipToPlaylistItem(item_5);
+ // Remove current item: [0, 1, 5 (cur), 2, 3, 4] -> [0, 1, 2 (cur), 3, 4]
+ synchronized (mWaitLock) {
+ playlist.remove(item_5);
+ mAgent.removePlaylistItem(item_5);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onPlaylistChangedCalled);
+ }
+ assertPlaylistEquals(playlist, mAgent.getPlaylist());
+ assertEquals(2, mAgent.getCurShuffledIndex());
+
+ // Remove previous item: [0, 1, 2 (cur), 3, 4] -> [0, 2 (cur), 3, 4]
+ mEventCallback.clear();
+ MediaItem2 previousItem = playlist.get(1);
+ synchronized (mWaitLock) {
+ playlist.remove(previousItem);
+ mAgent.removePlaylistItem(previousItem);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onPlaylistChangedCalled);
+ }
+ assertPlaylistEquals(playlist, mAgent.getPlaylist());
+ assertEquals(1, mAgent.getCurShuffledIndex());
+
+ // Remove next item: [0, 2 (cur), 3, 4] -> [0, 2 (cur), 4]
+ mEventCallback.clear();
+ MediaItem2 nextItem = playlist.get(2);
+ synchronized (mWaitLock) {
+ playlist.remove(nextItem);
+ mAgent.removePlaylistItem(nextItem);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onPlaylistChangedCalled);
+ }
+ assertPlaylistEquals(playlist, mAgent.getPlaylist());
+ assertEquals(1, mAgent.getCurShuffledIndex());
+
+ // Replace item: [0, 2 (cur), 4] -> [0, 2 (cur), 5]
+ mEventCallback.clear();
+ synchronized (mWaitLock) {
+ playlist.set(2, item_5);
+ mAgent.replacePlaylistItem(2, item_5);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onPlaylistChangedCalled);
+ }
+ assertPlaylistEquals(playlist, mAgent.getPlaylist());
+ assertEquals(1, mAgent.getCurShuffledIndex());
+
+ // Move last and remove the last item: [0, 2 (cur), 5] -> [0, 2, 5 (cur)] -> [0, 2 (cur)]
+ MediaItem2 lastItem = playlist.get(1);
+ mAgent.skipToPlaylistItem(lastItem);
+ mEventCallback.clear();
+ synchronized (mWaitLock) {
+ playlist.remove(lastItem);
+ mAgent.removePlaylistItem(lastItem);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onPlaylistChangedCalled);
+ }
+ assertPlaylistEquals(playlist, mAgent.getPlaylist());
+ assertEquals(1, mAgent.getCurShuffledIndex());
+
+ // Remove all items
+ for (int i = playlist.size() - 1; i >= 0; --i) {
+ MediaItem2 item = playlist.get(i);
+ mAgent.skipToPlaylistItem(item);
+ mEventCallback.clear();
+ synchronized (mWaitLock) {
+ playlist.remove(item);
+ mAgent.removePlaylistItem(item);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onPlaylistChangedCalled);
+ }
+ assertPlaylistEquals(playlist, mAgent.getPlaylist());
+ }
+ assertEquals(SessionPlaylistAgent.NO_VALID_ITEMS, mAgent.getCurShuffledIndex());
+ }
+
+
+ @Test
+ public void testPlaylistWithInvalidItem() throws Exception {
+ int listSize = 2;
+ List<MediaItem2> playlist = createAndSetPlaylist(listSize);
+
+ // Add item: [0 (cur), 1] -> [0 (cur), 3 (no_dsd), 1]
+ mEventCallback.clear();
+ MediaItem2 invalidItem2 = generateMediaItemWithoutDataSourceDesc(2);
+ synchronized (mWaitLock) {
+ playlist.add(1, invalidItem2);
+ mAgent.addPlaylistItem(1, invalidItem2);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onPlaylistChangedCalled);
+ }
+ assertPlaylistEquals(playlist, mAgent.getPlaylist());
+ assertEquals(0, mAgent.getCurShuffledIndex());
+
+ // Test skip to next item: [0 (cur), 2 (no_dsd), 1] -> [0, 2 (no_dsd), 1 (cur)]
+ mAgent.skipToNextItem();
+ assertEquals(2, mAgent.getCurShuffledIndex());
+
+ // Test skip to previous item: [0, 2 (no_dsd), 1 (cur)] -> [0 (cur), 2 (no_dsd), 1]
+ mAgent.skipToPreviousItem();
+ assertEquals(0, mAgent.getCurShuffledIndex());
+
+ // Remove current item: [0 (cur), 2 (no_dsd), 1] -> [2 (no_dsd), 1 (cur)]
+ mEventCallback.clear();
+ MediaItem2 item = playlist.get(0);
+ synchronized (mWaitLock) {
+ playlist.remove(item);
+ mAgent.removePlaylistItem(item);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onPlaylistChangedCalled);
+ }
+ assertPlaylistEquals(playlist, mAgent.getPlaylist());
+ assertEquals(1, mAgent.getCurShuffledIndex());
+
+ // Remove current item: [2 (no_dsd), 1 (cur)] -> [2 (no_dsd)]
+ mEventCallback.clear();
+ item = playlist.get(1);
+ synchronized (mWaitLock) {
+ playlist.remove(item);
+ mAgent.removePlaylistItem(item);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onPlaylistChangedCalled);
+ }
+ assertPlaylistEquals(playlist, mAgent.getPlaylist());
+ assertEquals(SessionPlaylistAgent.NO_VALID_ITEMS, mAgent.getCurShuffledIndex());
+
+ // Add invalid item: [2 (no_dsd)] -> [0 (no_dsd), 2 (no_dsd)]
+ MediaItem2 invalidItem0 = generateMediaItemWithoutDataSourceDesc(0);
+ mEventCallback.clear();
+ synchronized (mWaitLock) {
+ playlist.add(0, invalidItem0);
+ mAgent.addPlaylistItem(0, invalidItem0);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onPlaylistChangedCalled);
+ }
+ assertPlaylistEquals(playlist, mAgent.getPlaylist());
+ assertEquals(SessionPlaylistAgent.NO_VALID_ITEMS, mAgent.getCurShuffledIndex());
+
+ // Add valid item: [0 (no_dsd), 2 (no_dsd)] -> [0 (no_dsd), 1, 2 (no_dsd)]
+ MediaItem2 invalidItem1 = generateMediaItem(1);
+ mEventCallback.clear();
+ synchronized (mWaitLock) {
+ playlist.add(1, invalidItem1);
+ mAgent.addPlaylistItem(1, invalidItem1);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onPlaylistChangedCalled);
+ }
+ assertPlaylistEquals(playlist, mAgent.getPlaylist());
+ assertEquals(1, mAgent.getCurShuffledIndex());
+
+ // Replace the valid item with an invalid item:
+ // [0 (no_dsd), 1 (cur), 2 (no_dsd)] -> [0 (no_dsd), 3 (no_dsd), 2 (no_dsd)]
+ MediaItem2 invalidItem3 = generateMediaItemWithoutDataSourceDesc(3);
+ mEventCallback.clear();
+ synchronized (mWaitLock) {
+ playlist.set(1, invalidItem3);
+ mAgent.replacePlaylistItem(1, invalidItem3);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onPlaylistChangedCalled);
+ }
+ assertPlaylistEquals(playlist, mAgent.getPlaylist());
+ assertEquals(SessionPlaylistAgent.END_OF_PLAYLIST, mAgent.getCurShuffledIndex());
+ }
+
+ @Test
+ public void testPlaylistAfterOnCurrentDataSourceChanged() throws Exception {
+ int listSize = 2;
+ verify(mPlayer).registerPlayerEventCallback(Matchers.any(), Matchers.any());
+
+ createAndSetPlaylist(listSize);
+ assertEquals(0, mAgent.getCurShuffledIndex());
+
+ mPlayerEventCallback.onCurrentDataSourceChanged(mPlayer, null);
+ assertEquals(1, mAgent.getCurShuffledIndex());
+ mPlayerEventCallback.onCurrentDataSourceChanged(mPlayer, null);
+ assertEquals(SessionPlaylistAgent.END_OF_PLAYLIST, mAgent.getCurShuffledIndex());
+
+ mAgent.skipToNextItem();
+ assertEquals(SessionPlaylistAgent.END_OF_PLAYLIST, mAgent.getCurShuffledIndex());
+
+ mAgent.setRepeatMode(MediaPlaylistAgent.REPEAT_MODE_ONE);
+ assertEquals(SessionPlaylistAgent.END_OF_PLAYLIST, mAgent.getCurShuffledIndex());
+
+ mAgent.setRepeatMode(MediaPlaylistAgent.REPEAT_MODE_ALL);
+ assertEquals(0, mAgent.getCurShuffledIndex());
+ mPlayerEventCallback.onCurrentDataSourceChanged(mPlayer, null);
+ assertEquals(1, mAgent.getCurShuffledIndex());
+ mPlayerEventCallback.onCurrentDataSourceChanged(mPlayer, null);
+ assertEquals(0, mAgent.getCurShuffledIndex());
+ }
+
+ private List<MediaItem2> createAndSetPlaylist(int listSize) throws Exception {
+ List<MediaItem2> items = new ArrayList<>();
+ for (int i = 0; i < listSize; ++i) {
+ items.add(generateMediaItem(i));
+ }
+ mEventCallback.clear();
+ synchronized (mWaitLock) {
+ mAgent.setPlaylist(items, null);
+ mWaitLock.wait(WAIT_TIME_MS);
+ assertTrue(mEventCallback.onPlaylistChangedCalled);
+ }
+ return items;
+ }
+
+ private void assertPlaylistEquals(List<MediaItem2> expected, List<MediaItem2> actual) {
+ if (expected == actual) {
+ return;
+ }
+ assertTrue(expected != null && actual != null);
+ assertEquals(expected.size(), actual.size());
+ for (int i = 0; i < expected.size(); ++i) {
+ assertTrue(expected.get(i).equals(actual.get(i)));
+ }
+ }
+
+ private MediaItem2 generateMediaItemWithoutDataSourceDesc(int key) {
+ return new MediaItem2.Builder(0)
+ .setMediaId("TEST_MEDIA_ID_WITHOUT_DSD_" + key)
+ .build();
+ }
+
+ private MediaItem2 generateMediaItem(int key) {
+ return new MediaItem2.Builder(0)
+ .setMediaId("TEST_MEDIA_ID_" + key)
+ .build();
+ }
+}
diff --git a/packages/OWNERS b/packages/OWNERS
new file mode 100644
index 0000000..bbc4cef
--- /dev/null
+++ b/packages/OWNERS
@@ -0,0 +1,6 @@
+akersten@google.com
+dwkang@google.com
+jaewan@google.com
+marcone@google.com
+sungsoo@google.com
+wjia@google.com
diff --git a/services/OWNERS b/services/OWNERS
index d500dce..d5d00da 100644
--- a/services/OWNERS
+++ b/services/OWNERS
@@ -1,4 +1,4 @@
elaurent@google.com
etalvala@google.com
-gkasten@android.com
+gkasten@google.com
hunga@google.com
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index d0454d4..7419e64 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -51,6 +51,7 @@
libmedialogservice \
libmediautils \
libnbaio \
+ libnblog \
libpowermanager \
libserviceutility \
libmediautils \
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index fbc17c8..bdd39c6 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -28,6 +28,7 @@
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
+#include <cutils/multiuser.h>
#include <utils/Log.h>
#include <utils/Trace.h>
#include <binder/Parcel.h>
@@ -55,6 +56,7 @@
#include <system/audio_effects/effect_aec.h>
#include <audio_utils/primitives.h>
+#include <audio_utils/string.h>
#include <powermanager/PowerManager.h>
@@ -155,6 +157,8 @@
mBtNrecIsOff(false),
mIsLowRamDevice(true),
mIsDeviceTypeKnown(false),
+ mTotalMemory(0),
+ mClientSharedHeapSize(kMinimumClientSharedHeapSizeBytes),
mGlobalEffectEnableTime(0),
mSystemReady(false)
{
@@ -262,6 +266,7 @@
audio_config_base_t *config,
const AudioClient& client,
audio_port_handle_t *deviceId,
+ audio_session_t *sessionId,
const sp<MmapStreamCallback>& callback,
sp<MmapStreamInterface>& interface,
audio_port_handle_t *handle)
@@ -274,7 +279,8 @@
status_t ret = NO_INIT;
if (af != 0) {
ret = af->openMmapStream(
- direction, attr, config, client, deviceId, callback, interface, handle);
+ direction, attr, config, client, deviceId,
+ sessionId, callback, interface, handle);
}
return ret;
}
@@ -284,6 +290,7 @@
audio_config_base_t *config,
const AudioClient& client,
audio_port_handle_t *deviceId,
+ audio_session_t *sessionId,
const sp<MmapStreamCallback>& callback,
sp<MmapStreamInterface>& interface,
audio_port_handle_t *handle)
@@ -292,8 +299,10 @@
if (ret != NO_ERROR) {
return ret;
}
-
- audio_session_t sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ audio_session_t actualSessionId = *sessionId;
+ if (actualSessionId == AUDIO_SESSION_ALLOCATE) {
+ actualSessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ }
audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT;
audio_io_handle_t io = AUDIO_IO_HANDLE_NONE;
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
@@ -303,17 +312,18 @@
fullConfig.channel_mask = config->channel_mask;
fullConfig.format = config->format;
ret = AudioSystem::getOutputForAttr(attr, &io,
- sessionId,
- &streamType, client.clientUid,
+ actualSessionId,
+ &streamType, client.clientPid, client.clientUid,
&fullConfig,
(audio_output_flags_t)(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ |
AUDIO_OUTPUT_FLAG_DIRECT),
deviceId, &portId);
} else {
ret = AudioSystem::getInputForAttr(attr, &io,
- sessionId,
+ actualSessionId,
client.clientPid,
client.clientUid,
+ client.packageName,
config,
AUDIO_INPUT_FLAG_MMAP_NOIRQ, deviceId, &portId);
}
@@ -326,13 +336,14 @@
sp<MmapThread> thread = mMmapThreads.valueFor(io);
if (thread != 0) {
interface = new MmapThreadHandle(thread);
- thread->configure(attr, streamType, sessionId, callback, *deviceId, portId);
+ thread->configure(attr, streamType, actualSessionId, callback, *deviceId, portId);
*handle = portId;
+ *sessionId = actualSessionId;
} else {
if (direction == MmapStreamInterface::DIRECTION_OUTPUT) {
- AudioSystem::releaseOutput(io, streamType, sessionId);
+ AudioSystem::releaseOutput(io, streamType, actualSessionId);
} else {
- AudioSystem::releaseInput(io, sessionId);
+ AudioSystem::releaseInput(portId);
}
ret = NO_INIT;
}
@@ -641,38 +652,56 @@
// IAudioFlinger interface
-
-sp<IAudioTrack> AudioFlinger::createTrack(
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t *frameCount,
- audio_output_flags_t *flags,
- const sp<IMemory>& sharedBuffer,
- audio_io_handle_t output,
- pid_t pid,
- pid_t tid,
- audio_session_t *sessionId,
- int clientUid,
- status_t *status,
- audio_port_handle_t portId)
+sp<IAudioTrack> AudioFlinger::createTrack(const CreateTrackInput& input,
+ CreateTrackOutput& output,
+ status_t *status)
{
sp<PlaybackThread::Track> track;
sp<TrackHandle> trackHandle;
sp<Client> client;
status_t lStatus;
- audio_session_t lSessionId;
+ audio_stream_type_t streamType;
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+ bool updatePid = (input.clientInfo.clientPid == -1);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
- if (pid == -1 || !isTrustedCallingUid(callingUid)) {
+ uid_t clientUid = input.clientInfo.clientUid;
+ if (!isTrustedCallingUid(callingUid)) {
+ ALOGW_IF(clientUid != callingUid,
+ "%s uid %d tried to pass itself off as %d",
+ __FUNCTION__, callingUid, clientUid);
+ clientUid = callingUid;
+ updatePid = true;
+ }
+ pid_t clientPid = input.clientInfo.clientPid;
+ if (updatePid) {
const pid_t callingPid = IPCThreadState::self()->getCallingPid();
- ALOGW_IF(pid != -1 && pid != callingPid,
+ ALOGW_IF(clientPid != -1 && clientPid != callingPid,
"%s uid %d pid %d tried to pass itself off as pid %d",
- __func__, callingUid, callingPid, pid);
- pid = callingPid;
+ __func__, callingUid, callingPid, clientPid);
+ clientPid = callingPid;
}
+ audio_session_t sessionId = input.sessionId;
+ if (sessionId == AUDIO_SESSION_ALLOCATE) {
+ sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ } else if (audio_unique_id_get_use(sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+
+ output.sessionId = sessionId;
+ output.outputId = AUDIO_IO_HANDLE_NONE;
+ output.selectedDeviceId = input.selectedDeviceId;
+
+ lStatus = AudioSystem::getOutputForAttr(&input.attr, &output.outputId, sessionId, &streamType,
+ clientPid, clientUid, &input.config, input.flags,
+ &output.selectedDeviceId, &portId);
+
+ if (lStatus != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
+ ALOGE("createTrack() getOutputForAttr() return error %d or invalid output handle", lStatus);
+ goto Exit;
+ }
// client AudioTrack::set already implements AUDIO_STREAM_DEFAULT => AUDIO_STREAM_MUSIC,
// but if someone uses binder directly they could bypass that and cause us to crash
if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
@@ -681,91 +710,76 @@
goto Exit;
}
- // further sample rate checks are performed by createTrack_l() depending on the thread type
- if (sampleRate == 0) {
- ALOGE("createTrack() invalid sample rate %u", sampleRate);
- lStatus = BAD_VALUE;
- goto Exit;
- }
-
// further channel mask checks are performed by createTrack_l() depending on the thread type
- if (!audio_is_output_channel(channelMask)) {
- ALOGE("createTrack() invalid channel mask %#x", channelMask);
+ if (!audio_is_output_channel(input.config.channel_mask)) {
+ ALOGE("createTrack() invalid channel mask %#x", input.config.channel_mask);
lStatus = BAD_VALUE;
goto Exit;
}
// further format checks are performed by createTrack_l() depending on the thread type
- if (!audio_is_valid_format(format)) {
- ALOGE("createTrack() invalid format %#x", format);
- lStatus = BAD_VALUE;
- goto Exit;
- }
-
- if (sharedBuffer != 0 && sharedBuffer->pointer() == NULL) {
- ALOGE("createTrack() sharedBuffer is non-0 but has NULL pointer()");
+ if (!audio_is_valid_format(input.config.format)) {
+ ALOGE("createTrack() invalid format %#x", input.config.format);
lStatus = BAD_VALUE;
goto Exit;
}
{
Mutex::Autolock _l(mLock);
- PlaybackThread *thread = checkPlaybackThread_l(output);
+ PlaybackThread *thread = checkPlaybackThread_l(output.outputId);
if (thread == NULL) {
- ALOGE("no playback thread found for output handle %d", output);
+ ALOGE("no playback thread found for output handle %d", output.outputId);
lStatus = BAD_VALUE;
goto Exit;
}
- client = registerPid(pid);
+ client = registerPid(clientPid);
PlaybackThread *effectThread = NULL;
- if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) {
- if (audio_unique_id_get_use(*sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
- ALOGE("createTrack() invalid session ID %d", *sessionId);
- lStatus = BAD_VALUE;
- goto Exit;
- }
- lSessionId = *sessionId;
- // check if an effect chain with the same session ID is present on another
- // output thread and move it here.
- for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
- sp<PlaybackThread> t = mPlaybackThreads.valueAt(i);
- if (mPlaybackThreads.keyAt(i) != output) {
- uint32_t sessions = t->hasAudioSession(lSessionId);
- if (sessions & ThreadBase::EFFECT_SESSION) {
- effectThread = t.get();
- break;
- }
+ // check if an effect chain with the same session ID is present on another
+ // output thread and move it here.
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ sp<PlaybackThread> t = mPlaybackThreads.valueAt(i);
+ if (mPlaybackThreads.keyAt(i) != output.outputId) {
+ uint32_t sessions = t->hasAudioSession(sessionId);
+ if (sessions & ThreadBase::EFFECT_SESSION) {
+ effectThread = t.get();
+ break;
}
}
- } else {
- // if no audio session id is provided, create one here
- lSessionId = (audio_session_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
- if (sessionId != NULL) {
- *sessionId = lSessionId;
- }
}
- ALOGV("createTrack() lSessionId: %d", lSessionId);
+ ALOGV("createTrack() sessionId: %d", sessionId);
- track = thread->createTrack_l(client, streamType, sampleRate, format,
- channelMask, frameCount, sharedBuffer, lSessionId, flags, tid,
- clientUid, &lStatus, portId);
+ output.sampleRate = input.config.sample_rate;
+ output.frameCount = input.frameCount;
+ output.notificationFrameCount = input.notificationFrameCount;
+ output.flags = input.flags;
+
+ track = thread->createTrack_l(client, streamType, input.attr, &output.sampleRate,
+ input.config.format, input.config.channel_mask,
+ &output.frameCount, &output.notificationFrameCount,
+ input.notificationsPerBuffer, input.speed,
+ input.sharedBuffer, sessionId, &output.flags,
+ input.clientInfo.clientTid, clientUid, &lStatus, portId);
LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (track == 0));
// we don't abort yet if lStatus != NO_ERROR; there is still work to be done regardless
+ output.afFrameCount = thread->frameCount();
+ output.afSampleRate = thread->sampleRate();
+ output.afLatencyMs = thread->latency();
+
// move effect chain to this output thread if an effect on same session was waiting
// for a track to be created
if (lStatus == NO_ERROR && effectThread != NULL) {
// no risk of deadlock because AudioFlinger::mLock is held
Mutex::Autolock _dl(thread->mLock);
Mutex::Autolock _sl(effectThread->mLock);
- moveEffectChain_l(lSessionId, effectThread, thread, true);
+ moveEffectChain_l(sessionId, effectThread, thread, true);
}
// Look for sync events awaiting for a session to be used.
for (size_t i = 0; i < mPendingSyncEvents.size(); i++) {
- if (mPendingSyncEvents[i]->triggerSession() == lSessionId) {
+ if (mPendingSyncEvents[i]->triggerSession() == sessionId) {
if (thread->isValidSyncEvent(mPendingSyncEvents[i])) {
if (lStatus == NO_ERROR) {
(void) track->setSyncEvent(mPendingSyncEvents[i]);
@@ -778,7 +792,7 @@
}
}
- setAudioHwSyncForSession_l(thread, lSessionId);
+ setAudioHwSyncForSession_l(thread, sessionId);
}
if (lStatus != NO_ERROR) {
@@ -798,6 +812,9 @@
trackHandle = new TrackHandle(track);
Exit:
+ if (lStatus != NO_ERROR && output.outputId != AUDIO_IO_HANDLE_NONE) {
+ AudioSystem::releaseOutput(output.outputId, streamType, sessionId);
+ }
*status = lStatus;
return trackHandle;
}
@@ -981,6 +998,19 @@
return mute;
}
+void AudioFlinger::setRecordSilenced(uid_t uid, bool silenced)
+{
+ ALOGV("AudioFlinger::setRecordSilenced(uid:%d, silenced:%d)", uid, silenced);
+
+ AutoMutex lock(mLock);
+ for (size_t i = 0; i < mRecordThreads.size(); i++) {
+ mRecordThreads[i]->setRecordSilenced(uid, silenced);
+ }
+ for (size_t i = 0; i < mMmapThreads.size(); i++) {
+ mMmapThreads[i]->setRecordSilenced(uid, silenced);
+ }
+}
+
status_t AudioFlinger::setMasterMute(bool muted)
{
status_t ret = initCheck();
@@ -1069,26 +1099,17 @@
if (status != NO_ERROR) {
return status;
}
+ if (output == AUDIO_IO_HANDLE_NONE) {
+ return BAD_VALUE;
+ }
ALOG_ASSERT(stream != AUDIO_STREAM_PATCH, "attempt to change AUDIO_STREAM_PATCH volume");
AutoMutex lock(mLock);
- Vector<VolumeInterface *> volumeInterfaces;
- if (output != AUDIO_IO_HANDLE_NONE) {
- VolumeInterface *volumeInterface = getVolumeInterface_l(output);
- if (volumeInterface == NULL) {
- return BAD_VALUE;
- }
- volumeInterfaces.add(volumeInterface);
+ VolumeInterface *volumeInterface = getVolumeInterface_l(output);
+ if (volumeInterface == NULL) {
+ return BAD_VALUE;
}
-
- mStreamTypes[stream].volume = value;
-
- if (volumeInterfaces.size() == 0) {
- volumeInterfaces = getAllVolumeInterfaces_l();
- }
- for (size_t i = 0; i < volumeInterfaces.size(); i++) {
- volumeInterfaces[i]->setStreamVolume(stream, value);
- }
+ volumeInterface->setStreamVolume(stream, value);
return NO_ERROR;
}
@@ -1127,21 +1148,17 @@
if (status != NO_ERROR) {
return 0.0f;
}
-
- AutoMutex lock(mLock);
- float volume;
- if (output != AUDIO_IO_HANDLE_NONE) {
- VolumeInterface *volumeInterface = getVolumeInterface_l(output);
- if (volumeInterface != NULL) {
- volume = volumeInterface->streamVolume(stream);
- } else {
- volume = 0.0f;
- }
- } else {
- volume = streamVolume_l(stream);
+ if (output == AUDIO_IO_HANDLE_NONE) {
+ return 0.0f;
}
- return volume;
+ AutoMutex lock(mLock);
+ VolumeInterface *volumeInterface = getVolumeInterface_l(output);
+ if (volumeInterface == NULL) {
+ return 0.0f;
+ }
+
+ return volumeInterface->streamVolume(stream);
}
bool AudioFlinger::streamMute(audio_stream_type_t stream) const
@@ -1163,16 +1180,59 @@
}
}
+// Filter reserved keys from setParameters() before forwarding to audio HAL or acting upon.
+// Some keys are used for audio routing and audio path configuration and should be reserved for use
+// by audio policy and audio flinger for functional, privacy and security reasons.
+void AudioFlinger::filterReservedParameters(String8& keyValuePairs, uid_t callingUid)
+{
+ static const String8 kReservedParameters[] = {
+ String8(AudioParameter::keyRouting),
+ String8(AudioParameter::keySamplingRate),
+ String8(AudioParameter::keyFormat),
+ String8(AudioParameter::keyChannels),
+ String8(AudioParameter::keyFrameCount),
+ String8(AudioParameter::keyInputSource),
+ String8(AudioParameter::keyMonoOutput),
+ String8(AudioParameter::keyStreamConnect),
+ String8(AudioParameter::keyStreamDisconnect),
+ String8(AudioParameter::keyStreamSupportedFormats),
+ String8(AudioParameter::keyStreamSupportedChannels),
+ String8(AudioParameter::keyStreamSupportedSamplingRates),
+ };
+
+ // multiuser friendly app ID check for requests coming from audioserver
+ if (multiuser_get_app_id(callingUid) == AID_AUDIOSERVER) {
+ return;
+ }
+
+ AudioParameter param = AudioParameter(keyValuePairs);
+ String8 value;
+ for (auto& key : kReservedParameters) {
+ if (param.get(key, value) == NO_ERROR) {
+ ALOGW("%s: filtering key %s value %s from uid %d",
+ __func__, key.string(), value.string(), callingUid);
+ param.remove(key);
+ }
+ }
+ keyValuePairs = param.toString();
+}
+
status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
{
- ALOGV("setParameters(): io %d, keyvalue %s, calling pid %d",
- ioHandle, keyValuePairs.string(), IPCThreadState::self()->getCallingPid());
+ ALOGV("setParameters(): io %d, keyvalue %s, calling pid %d calling uid %d",
+ ioHandle, keyValuePairs.string(),
+ IPCThreadState::self()->getCallingPid(), IPCThreadState::self()->getCallingUid());
// check calling permissions
if (!settingsAllowed()) {
return PERMISSION_DENIED;
}
+ String8 filteredKeyValuePairs = keyValuePairs;
+ filterReservedParameters(filteredKeyValuePairs, IPCThreadState::self()->getCallingUid());
+
+ ALOGV("%s: filtered keyvalue %s", __func__, filteredKeyValuePairs.string());
+
// AUDIO_IO_HANDLE_NONE means the parameters are global to the audio hardware interface
if (ioHandle == AUDIO_IO_HANDLE_NONE) {
Mutex::Autolock _l(mLock);
@@ -1183,7 +1243,7 @@
mHardwareStatus = AUDIO_HW_SET_PARAMETER;
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
- status_t result = dev->setParameters(keyValuePairs);
+ status_t result = dev->setParameters(filteredKeyValuePairs);
// return success if at least one audio device accepts the parameters as not all
// HALs are requested to support all parameters. If no audio device supports the
// requested parameters, the last error is reported.
@@ -1194,7 +1254,7 @@
mHardwareStatus = AUDIO_HW_IDLE;
}
// disable AEC and NS if the device is a BT SCO headset supporting those pre processings
- AudioParameter param = AudioParameter(keyValuePairs);
+ AudioParameter param = AudioParameter(filteredKeyValuePairs);
String8 value;
if (param.get(String8(AudioParameter::keyBtNrec), value) == NO_ERROR) {
bool btNrecIsOff = (value == AudioParameter::valueOff);
@@ -1227,16 +1287,16 @@
}
} else if (thread == primaryPlaybackThread_l()) {
// indicate output device change to all input threads for pre processing
- AudioParameter param = AudioParameter(keyValuePairs);
+ AudioParameter param = AudioParameter(filteredKeyValuePairs);
int value;
if ((param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) &&
(value != 0)) {
- broacastParametersToRecordThreads_l(keyValuePairs);
+ broacastParametersToRecordThreads_l(filteredKeyValuePairs);
}
}
}
if (thread != 0) {
- return thread->setParameters(keyValuePairs);
+ return thread->setParameters(filteredKeyValuePairs);
}
return BAD_VALUE;
}
@@ -1485,17 +1545,9 @@
mAudioFlinger(audioFlinger),
mPid(pid)
{
- size_t heapSize = property_get_int32("ro.af.client_heap_size_kbyte", 0);
- heapSize *= 1024;
- if (!heapSize) {
- heapSize = kClientSharedHeapSizeBytes;
- // Increase heap size on non low ram devices to limit risk of reconnection failure for
- // invalidated tracks
- if (!audioFlinger->isLowRamDevice()) {
- heapSize *= kClientSharedHeapSizeMultiplier;
- }
- }
- mMemoryDealer = new MemoryDealer(heapSize, "AudioFlinger::Client");
+ mMemoryDealer = new MemoryDealer(
+ audioFlinger->getClientSharedHeapSize(),
+ (std::string("AudioFlinger::Client(") + std::to_string(pid) + ")").c_str());
}
// Client destructor must be called with AudioFlinger::mClientLock held
@@ -1566,120 +1618,144 @@
// ----------------------------------------------------------------------------
-sp<IAudioRecord> AudioFlinger::openRecord(
- audio_io_handle_t input,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- const String16& opPackageName,
- size_t *frameCount,
- audio_input_flags_t *flags,
- pid_t pid,
- pid_t tid,
- int clientUid,
- audio_session_t *sessionId,
- size_t *notificationFrames,
- sp<IMemory>& cblk,
- sp<IMemory>& buffers,
- status_t *status,
- audio_port_handle_t portId)
+sp<media::IAudioRecord> AudioFlinger::createRecord(const CreateRecordInput& input,
+ CreateRecordOutput& output,
+ status_t *status)
{
sp<RecordThread::RecordTrack> recordTrack;
sp<RecordHandle> recordHandle;
sp<Client> client;
status_t lStatus;
- audio_session_t lSessionId;
+ audio_session_t sessionId = input.sessionId;
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
- cblk.clear();
- buffers.clear();
+ output.cblk.clear();
+ output.buffers.clear();
+ output.inputId = AUDIO_IO_HANDLE_NONE;
- bool updatePid = (pid == -1);
+ bool updatePid = (input.clientInfo.clientPid == -1);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
+ uid_t clientUid = input.clientInfo.clientUid;
if (!isTrustedCallingUid(callingUid)) {
- ALOGW_IF((uid_t)clientUid != callingUid,
- "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, clientUid);
+ ALOGW_IF(clientUid != callingUid,
+ "%s uid %d tried to pass itself off as %d",
+ __FUNCTION__, callingUid, clientUid);
clientUid = callingUid;
updatePid = true;
}
-
+ pid_t clientPid = input.clientInfo.clientPid;
if (updatePid) {
const pid_t callingPid = IPCThreadState::self()->getCallingPid();
- ALOGW_IF(pid != -1 && pid != callingPid,
+ ALOGW_IF(clientPid != -1 && clientPid != callingPid,
"%s uid %d pid %d tried to pass itself off as pid %d",
- __func__, callingUid, callingPid, pid);
- pid = callingPid;
- }
-
- // check calling permissions
- if (!recordingAllowed(opPackageName, tid, clientUid)) {
- ALOGE("openRecord() permission denied: recording not allowed");
- lStatus = PERMISSION_DENIED;
- goto Exit;
- }
-
- // further sample rate checks are performed by createRecordTrack_l()
- if (sampleRate == 0) {
- ALOGE("openRecord() invalid sample rate %u", sampleRate);
- lStatus = BAD_VALUE;
- goto Exit;
+ __func__, callingUid, callingPid, clientPid);
+ clientPid = callingPid;
}
// we don't yet support anything other than linear PCM
- if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
- ALOGE("openRecord() invalid format %#x", format);
+ if (!audio_is_valid_format(input.config.format) || !audio_is_linear_pcm(input.config.format)) {
+ ALOGE("createRecord() invalid format %#x", input.config.format);
lStatus = BAD_VALUE;
goto Exit;
}
// further channel mask checks are performed by createRecordTrack_l()
- if (!audio_is_input_channel(channelMask)) {
- ALOGE("openRecord() invalid channel mask %#x", channelMask);
+ if (!audio_is_input_channel(input.config.channel_mask)) {
+ ALOGE("createRecord() invalid channel mask %#x", input.config.channel_mask);
lStatus = BAD_VALUE;
goto Exit;
}
+ if (sessionId == AUDIO_SESSION_ALLOCATE) {
+ sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ } else if (audio_unique_id_get_use(sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+
+ output.sessionId = sessionId;
+ output.selectedDeviceId = input.selectedDeviceId;
+ output.flags = input.flags;
+
+ client = registerPid(clientPid);
+
+ // Not a conventional loop, but a retry loop for at most two iterations total.
+ // Try first maybe with FAST flag then try again without FAST flag if that fails.
+ // Exits loop via break on no error of got exit on error
+ // The sp<> references will be dropped when re-entering scope.
+ // The lack of indentation is deliberate, to reduce code churn and ease merges.
+ for (;;) {
+ // release previously opened input if retrying.
+ if (output.inputId != AUDIO_IO_HANDLE_NONE) {
+ recordTrack.clear();
+ AudioSystem::releaseInput(portId);
+ output.inputId = AUDIO_IO_HANDLE_NONE;
+ output.selectedDeviceId = input.selectedDeviceId;
+ portId = AUDIO_PORT_HANDLE_NONE;
+ }
+ lStatus = AudioSystem::getInputForAttr(&input.attr, &output.inputId,
+ sessionId,
+ // FIXME compare to AudioTrack
+ clientPid,
+ clientUid,
+ input.opPackageName,
+ &input.config,
+ output.flags, &output.selectedDeviceId, &portId);
+
{
Mutex::Autolock _l(mLock);
- RecordThread *thread = checkRecordThread_l(input);
+ RecordThread *thread = checkRecordThread_l(output.inputId);
if (thread == NULL) {
- ALOGE("openRecord() checkRecordThread_l failed");
+ ALOGE("createRecord() checkRecordThread_l failed");
lStatus = BAD_VALUE;
goto Exit;
}
- client = registerPid(pid);
+ ALOGV("createRecord() lSessionId: %d input %d", sessionId, output.inputId);
- if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) {
- if (audio_unique_id_get_use(*sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
- lStatus = BAD_VALUE;
- goto Exit;
- }
- lSessionId = *sessionId;
- } else {
- // if no audio session id is provided, create one here
- lSessionId = (audio_session_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
- if (sessionId != NULL) {
- *sessionId = lSessionId;
- }
- }
- ALOGV("openRecord() lSessionId: %d input %d", lSessionId, input);
+ output.sampleRate = input.config.sample_rate;
+ output.frameCount = input.frameCount;
+ output.notificationFrameCount = input.notificationFrameCount;
- recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
- frameCount, lSessionId, notificationFrames,
- clientUid, flags, tid, &lStatus, portId);
+ recordTrack = thread->createRecordTrack_l(client, input.attr, &output.sampleRate,
+ input.config.format, input.config.channel_mask,
+ &output.frameCount, sessionId,
+ &output.notificationFrameCount,
+ clientUid, &output.flags,
+ input.clientInfo.clientTid,
+ &lStatus, portId);
LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0));
- if (lStatus == NO_ERROR) {
- // Check if one effect chain was awaiting for an AudioRecord to be created on this
- // session and move it to this thread.
- sp<EffectChain> chain = getOrphanEffectChain_l(lSessionId);
- if (chain != 0) {
- Mutex::Autolock _l(thread->mLock);
- thread->addEffectChain_l(chain);
- }
+ // lStatus == BAD_TYPE means FAST flag was rejected: request a new input from
+ // audio policy manager without FAST constraint
+ if (lStatus == BAD_TYPE) {
+ continue;
}
+
+ if (lStatus != NO_ERROR) {
+ goto Exit;
+ }
+
+ // Check if one effect chain was awaiting for an AudioRecord to be created on this
+ // session and move it to this thread.
+ sp<EffectChain> chain = getOrphanEffectChain_l(sessionId);
+ if (chain != 0) {
+ Mutex::Autolock _l(thread->mLock);
+ thread->addEffectChain_l(chain);
+ }
+ break;
+ }
+ // End of retry loop.
+ // The lack of indentation is deliberate, to reduce code churn and ease merges.
}
+ output.cblk = recordTrack->getCblk();
+ output.buffers = recordTrack->getBuffers();
+
+ // return handle to client
+ recordHandle = new RecordHandle(recordTrack);
+
+Exit:
if (lStatus != NO_ERROR) {
// remove local strong reference to Client before deleting the RecordTrack so that the
// Client destructor is called by the TrackBase destructor with mClientLock held
@@ -1690,16 +1766,11 @@
client.clear();
}
recordTrack.clear();
- goto Exit;
+ if (output.inputId != AUDIO_IO_HANDLE_NONE) {
+ AudioSystem::releaseInput(portId);
+ }
}
- cblk = recordTrack->getCblk();
- buffers = recordTrack->getBuffers();
-
- // return handle to client
- recordHandle = new RecordHandle(recordTrack);
-
-Exit:
*status = lStatus;
return recordHandle;
}
@@ -1811,7 +1882,7 @@
// ----------------------------------------------------------------------------
-status_t AudioFlinger::setLowRamDevice(bool isLowRamDevice)
+status_t AudioFlinger::setLowRamDevice(bool isLowRamDevice, int64_t totalMemory)
{
uid_t uid = IPCThreadState::self()->getCallingUid();
if (uid != AID_SYSTEM) {
@@ -1822,10 +1893,43 @@
return INVALID_OPERATION;
}
mIsLowRamDevice = isLowRamDevice;
+ mTotalMemory = totalMemory;
+ // mIsLowRamDevice and mTotalMemory are obtained through ActivityManager;
+ // see ActivityManager.isLowRamDevice() and ActivityManager.getMemoryInfo().
+ // mIsLowRamDevice generally represent devices with less than 1GB of memory,
+ // though actual setting is determined through device configuration.
+ constexpr int64_t GB = 1024 * 1024 * 1024;
+ mClientSharedHeapSize =
+ isLowRamDevice ? kMinimumClientSharedHeapSizeBytes
+ : mTotalMemory < 2 * GB ? 4 * kMinimumClientSharedHeapSizeBytes
+ : mTotalMemory < 3 * GB ? 8 * kMinimumClientSharedHeapSizeBytes
+ : mTotalMemory < 4 * GB ? 16 * kMinimumClientSharedHeapSizeBytes
+ : 32 * kMinimumClientSharedHeapSizeBytes;
mIsDeviceTypeKnown = true;
+
+ // TODO: Cache the client shared heap size in a persistent property.
+ // It's possible that a native process or Java service or app accesses audioserver
+ // after it is registered by system server, but before AudioService updates
+ // the memory info. This would occur immediately after boot or an audioserver
+ // crash and restore. Before update from AudioService, the client would get the
+ // minimum heap size.
+
+ ALOGD("isLowRamDevice:%s totalMemory:%lld mClientSharedHeapSize:%zu",
+ (isLowRamDevice ? "true" : "false"),
+ (long long)mTotalMemory,
+ mClientSharedHeapSize.load());
return NO_ERROR;
}
+size_t AudioFlinger::getClientSharedHeapSize() const
+{
+ size_t heapSizeInBytes = property_get_int32("ro.af.client_heap_size_kbyte", 0) * 1024;
+ if (heapSizeInBytes != 0) { // read-only property overrides all.
+ return heapSizeInBytes;
+ }
+ return mClientSharedHeapSize;
+}
+
audio_hw_sync_t AudioFlinger::getAudioHwSyncForSession(audio_session_t sessionId)
{
Mutex::Autolock _l(mLock);
@@ -1900,6 +2004,14 @@
return NO_ERROR;
}
+status_t AudioFlinger::getMicrophones(std::vector<media::MicrophoneInfo> *microphones)
+{
+ AutoMutex lock(mHardwareLock);
+ sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
+ status_t status = dev->getMicrophones(microphones);
+ return status;
+}
+
// setAudioHwSyncForSession_l() must be called with AudioFlinger::mLock held
void AudioFlinger::setAudioHwSyncForSession_l(PlaybackThread *thread, audio_session_t sessionId)
{
@@ -2014,8 +2126,8 @@
uint32_t *latencyMs,
audio_output_flags_t flags)
{
- ALOGI("openOutput() this %p, module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, "
- "flags %x",
+ ALOGI("openOutput() this %p, module %d Device %#x, SamplingRate %d, Format %#08x, "
+ "Channels %#x, flags %#x",
this, module,
(devices != NULL) ? *devices : 0,
config->sample_rate,
@@ -2257,8 +2369,8 @@
sp<StreamInHalInterface> inStream;
status_t status = inHwHal->openInputStream(
*input, devices, &halconfig, flags, address.string(), source, &inStream);
- ALOGV("openInput_l() openInputStream returned input %p, devices %x, SamplingRate %d"
- ", Format %#x, Channels %x, flags %#x, status %d addr %s",
+ ALOGV("openInput_l() openInputStream returned input %p, devices %#x, SamplingRate %d"
+ ", Format %#x, Channels %#x, flags %#x, status %d addr %s",
inStream.get(),
devices,
halconfig.sample_rate,
@@ -2954,6 +3066,7 @@
// check recording permission for visualizer
if ((memcmp(&desc.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0) &&
+ // TODO: Do we need to start/stop op - i.e. is there recording being performed?
!recordingAllowed(opPackageName, pid, IPCThreadState::self()->getCallingUid())) {
lStatus = PERMISSION_DENIED;
goto Exit;
@@ -3277,8 +3390,7 @@
// They would both traverse the directory, but the result would simply be
// failures at unlink() which are ignored. It's also unlikely since
// normally dumpsys is only done by bugreport or from the command line.
- char teePath[32+256];
- strcpy(teePath, "/data/misc/audioserver");
+ char teePath[PATH_MAX] = "/data/misc/audioserver";
size_t teePathLen = strlen(teePath);
DIR *dir = opendir(teePath);
teePath[teePathLen++] = '/';
@@ -3288,27 +3400,19 @@
struct Entry entries[TEE_MAX_SORT];
size_t entryCount = 0;
while (entryCount < TEE_MAX_SORT) {
- struct dirent de;
- struct dirent *result = NULL;
- int rc = readdir_r(dir, &de, &result);
- if (rc != 0) {
- ALOGW("readdir_r failed %d", rc);
- break;
- }
- if (result == NULL) {
- break;
- }
- if (result != &de) {
- ALOGW("readdir_r returned unexpected result %p != %p", result, &de);
+ errno = 0; // clear errno before readdir() to track potential errors.
+ const struct dirent *result = readdir(dir);
+ if (result == nullptr) {
+ ALOGW_IF(errno != 0, "tee readdir() failure %s", strerror(errno));
break;
}
// ignore non .wav file entries
- size_t nameLen = strlen(de.d_name);
+ const size_t nameLen = strlen(result->d_name);
if (nameLen <= 4 || nameLen >= TEE_MAX_FILENAME ||
- strcmp(&de.d_name[nameLen - 4], ".wav")) {
+ strcmp(&result->d_name[nameLen - 4], ".wav")) {
continue;
}
- strcpy(entries[entryCount++].mFileName, de.d_name);
+ (void)audio_utils_strlcpy(entries[entryCount++].mFileName, result->d_name);
}
(void) closedir(dir);
if (entryCount > TEE_MAX_KEEP) {
@@ -3379,8 +3483,13 @@
// FIXME not big-endian safe
write(teeFd, &temp, sizeof(temp));
close(teeFd);
- if (fd >= 0) {
- dprintf(fd, "tee copied to %s\n", teePath);
+ // TODO Should create file with temporary name and then rename to final if non-empty.
+ if (total > 0) {
+ if (fd >= 0) {
+ dprintf(fd, "tee copied to %s\n", teePath);
+ }
+ } else {
+ unlink(teePath);
}
} else {
if (fd >= 0) {
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index c64752f..963a87d 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -19,8 +19,11 @@
#define ANDROID_AUDIO_FLINGER_H
#include "Configuration.h"
+#include <atomic>
+#include <mutex>
#include <deque>
#include <map>
+#include <vector>
#include <stdint.h>
#include <sys/types.h>
#include <limits.h>
@@ -34,7 +37,6 @@
#include <media/IAudioFlinger.h>
#include <media/IAudioFlingerClient.h>
#include <media/IAudioTrack.h>
-#include <media/IAudioRecord.h>
#include <media/AudioSystem.h>
#include <media/AudioTrack.h>
#include <media/MmapStreamInterface.h>
@@ -72,10 +74,12 @@
#include <powermanager/IPowerManager.h>
-#include <media/nbaio/NBLog.h>
+#include <media/nblog/NBLog.h>
#include <private/media/AudioEffectShared.h>
#include <private/media/AudioTrackShared.h>
+#include "android/media/BnAudioRecord.h"
+
namespace android {
class AudioMixer;
@@ -93,12 +97,6 @@
static const nsecs_t kDefaultStandbyTimeInNsecs = seconds(3);
-
-// Max shared memory size for audio tracks and audio records per client process
-static const size_t kClientSharedHeapSizeBytes = 1024*1024;
-// Shared memory size multiplier for non low ram devices
-static const size_t kClientSharedHeapSizeMultiplier = 4;
-
#define INCLUDING_FROM_AUDIOFLINGER_H
class AudioFlinger :
@@ -113,39 +111,13 @@
virtual status_t dump(int fd, const Vector<String16>& args);
// IAudioFlinger interface, in binder opcode order
- virtual sp<IAudioTrack> createTrack(
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t *pFrameCount,
- audio_output_flags_t *flags,
- const sp<IMemory>& sharedBuffer,
- audio_io_handle_t output,
- pid_t pid,
- pid_t tid,
- audio_session_t *sessionId,
- int clientUid,
- status_t *status /*non-NULL*/,
- audio_port_handle_t portId);
+ virtual sp<IAudioTrack> createTrack(const CreateTrackInput& input,
+ CreateTrackOutput& output,
+ status_t *status);
- virtual sp<IAudioRecord> openRecord(
- audio_io_handle_t input,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- const String16& opPackageName,
- size_t *pFrameCount,
- audio_input_flags_t *flags,
- pid_t pid,
- pid_t tid,
- int clientUid,
- audio_session_t *sessionId,
- size_t *notificationFrames,
- sp<IMemory>& cblk,
- sp<IMemory>& buffers,
- status_t *status /*non-NULL*/,
- audio_port_handle_t portId);
+ virtual sp<media::IAudioRecord> createRecord(const CreateRecordInput& input,
+ CreateRecordOutput& output,
+ status_t *status);
virtual uint32_t sampleRate(audio_io_handle_t ioHandle) const;
virtual audio_format_t format(audio_io_handle_t output) const;
@@ -172,6 +144,8 @@
virtual status_t setMicMute(bool state);
virtual bool getMicMute() const;
+ virtual void setRecordSilenced(uid_t uid, bool silenced);
+
virtual status_t setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs);
virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) const;
@@ -250,7 +224,7 @@
virtual uint32_t getPrimaryOutputSamplingRate();
virtual size_t getPrimaryOutputFrameCount();
- virtual status_t setLowRamDevice(bool isLowRamDevice);
+ virtual status_t setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) override;
/* List available audio ports and their attributes */
virtual status_t listAudioPorts(unsigned int *num_ports,
@@ -279,6 +253,8 @@
/* Indicate JAVA services are ready (scheduling, power management ...) */
virtual status_t systemReady();
+ virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+
virtual status_t onTransact(
uint32_t code,
const Parcel& data,
@@ -296,6 +272,7 @@
audio_config_base_t *config,
const AudioClient& client,
audio_port_handle_t *deviceId,
+ audio_session_t *sessionId,
const sp<MmapStreamCallback>& callback,
sp<MmapStreamInterface>& interface,
audio_port_handle_t *handle);
@@ -537,6 +514,13 @@
};
// --- PlaybackThread ---
+#ifdef FLOAT_EFFECT_CHAIN
+#define EFFECT_BUFFER_FORMAT AUDIO_FORMAT_PCM_FLOAT
+using effect_buffer_t = float;
+#else
+#define EFFECT_BUFFER_FORMAT AUDIO_FORMAT_PCM_16_BIT
+using effect_buffer_t = int16_t;
+#endif
#include "Threads.h"
@@ -556,10 +540,10 @@
virtual void pause();
virtual status_t attachAuxEffect(int effectId);
virtual status_t setParameters(const String8& keyValuePairs);
- virtual VolumeShaper::Status applyVolumeShaper(
- const sp<VolumeShaper::Configuration>& configuration,
- const sp<VolumeShaper::Operation>& operation) override;
- virtual sp<VolumeShaper::State> getVolumeShaperState(int id) override;
+ virtual media::VolumeShaper::Status applyVolumeShaper(
+ const sp<media::VolumeShaper::Configuration>& configuration,
+ const sp<media::VolumeShaper::Operation>& operation) override;
+ virtual sp<media::VolumeShaper::State> getVolumeShaperState(int id) override;
virtual status_t getTimestamp(AudioTimestamp& timestamp);
virtual void signal(); // signal playback thread for a change in control block
@@ -571,15 +555,15 @@
};
// server side of the client's IAudioRecord
- class RecordHandle : public android::BnAudioRecord {
+ class RecordHandle : public android::media::BnAudioRecord {
public:
explicit RecordHandle(const sp<RecordThread::RecordTrack>& recordTrack);
virtual ~RecordHandle();
- virtual status_t start(int /*AudioSystem::sync_event_t*/ event,
- audio_session_t triggerSession);
- virtual void stop();
- virtual status_t onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
+ virtual binder::Status start(int /*AudioSystem::sync_event_t*/ event,
+ int /*audio_session_t*/ triggerSession);
+ virtual binder::Status stop();
+ virtual binder::Status getActiveMicrophones(
+ std::vector<media::MicrophoneInfo>* activeMicrophones);
private:
const sp<RecordThread::RecordTrack> mRecordTrack;
@@ -635,9 +619,6 @@
// no range check, AudioFlinger::mLock held
bool streamMute_l(audio_stream_type_t stream) const
{ return mStreamTypes[stream].mute; }
- // no range check, doesn't check per-thread stream volume, AudioFlinger::mLock held
- float streamVolume_l(audio_stream_type_t stream) const
- { return mStreamTypes[stream].volume; }
void ioConfigChanged(audio_io_config_event event,
const sp<AudioIoDescriptor>& ioDesc,
pid_t pid = 0);
@@ -817,6 +798,8 @@
status_t checkStreamType(audio_stream_type_t stream) const;
+ void filterReservedParameters(String8& keyValuePairs, uid_t callingUid);
+
#ifdef TEE_SINK
// all record threads serially share a common tee sink, which is re-created on format change
sp<NBAIO_Sink> mRecordTeeSink;
@@ -846,15 +829,18 @@
static const size_t kTeeSinkTrackFramesDefault = 0x200000;
#endif
- // This method reads from a variable without mLock, but the variable is updated under mLock. So
- // we might read a stale value, or a value that's inconsistent with respect to other variables.
- // In this case, it's safe because the return value isn't used for making an important decision.
- // The reason we don't want to take mLock is because it could block the caller for a long time.
+ // These methods read variables atomically without mLock,
+ // though the variables are updated with mLock.
bool isLowRamDevice() const { return mIsLowRamDevice; }
+ size_t getClientSharedHeapSize() const;
private:
- bool mIsLowRamDevice;
+ std::atomic<bool> mIsLowRamDevice;
bool mIsDeviceTypeKnown;
+ int64_t mTotalMemory;
+ std::atomic<size_t> mClientSharedHeapSize;
+ static constexpr size_t kMinimumClientSharedHeapSizeBytes = 1024 * 1024; // 1MB
+
nsecs_t mGlobalEffectEnableTime; // when a global effect was last enabled
sp<PatchPanel> mPatchPanel;
diff --git a/services/audioflinger/BufLog.cpp b/services/audioflinger/BufLog.cpp
index 9680eb5..ae96036 100644
--- a/services/audioflinger/BufLog.cpp
+++ b/services/audioflinger/BufLog.cpp
@@ -24,6 +24,7 @@
#include <pthread.h>
#include <stdio.h>
#include <string.h>
+#include <audio_utils/string.h>
#define MIN(a, b) ((a) < (b) ? (a) : (b))
@@ -117,11 +118,11 @@
mByteCount = 0l;
mPaused = false;
if (tag != NULL) {
- strncpy(mTag, tag, BUFLOGSTREAM_MAX_TAGSIZE);
+ (void)audio_utils_strlcpy(mTag, tag);
} else {
mTag[0] = 0;
}
- ALOGV("Creating BufLogStream id:%d tag:%s format:%d ch:%d sr:%d maxbytes:%zu", mId, mTag,
+ ALOGV("Creating BufLogStream id:%d tag:%s format:%#x ch:%d sr:%d maxbytes:%zu", mId, mTag,
mFormat, mChannels, mSamplingRate, mMaxBytes);
//open file (s), info about tag, format, etc.
diff --git a/services/audioflinger/Configuration.h b/services/audioflinger/Configuration.h
index 845697a..ede8e3f 100644
--- a/services/audioflinger/Configuration.h
+++ b/services/audioflinger/Configuration.h
@@ -41,4 +41,15 @@
// uncomment to log CPU statistics every n wall clock seconds
//#define DEBUG_CPU_USAGE 10
+// define FLOAT_EFFECT_CHAIN to request float effects (falls back to int16_t if unavailable)
+#define FLOAT_EFFECT_CHAIN
+
+#ifdef FLOAT_EFFECT_CHAIN
+// define FLOAT_AUX to process aux effect buffers in float (FLOAT_EFFECT_CHAIN must be defined)
+#define FLOAT_AUX
+
+// define MULTICHANNEL_EFFECT_CHAIN to allow multichannel effects (FLOAT_EFFECT_CHAIN defined)
+#define MULTICHANNEL_EFFECT_CHAIN
+#endif
+
#endif // ANDROID_AUDIOFLINGER_CONFIGURATION_H
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index bd5f146..2047dfd 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -19,11 +19,14 @@
#define LOG_TAG "AudioFlinger"
//#define LOG_NDEBUG 0
+#include <algorithm>
+
#include "Configuration.h"
#include <utils/Log.h>
#include <system/audio_effects/effect_aec.h>
#include <system/audio_effects/effect_ns.h>
#include <system/audio_effects/effect_visualizer.h>
+#include <audio_utils/channels.h>
#include <audio_utils/primitives.h>
#include <media/AudioEffect.h>
#include <media/audiohal/EffectHalInterface.h>
@@ -47,8 +50,6 @@
#define ALOGVV(a...) do { } while(0)
#endif
-#define min(a, b) ((a) < (b) ? (a) : (b))
-
namespace android {
// ----------------------------------------------------------------------------
@@ -67,12 +68,19 @@
: mPinned(pinned),
mThread(thread), mChain(chain), mId(id), mSessionId(sessionId),
mDescriptor(*desc),
- // mConfig is set by configure() and not used before then
+ // clear mConfig to ensure consistent initial value of buffer framecount
+ // in case buffers are associated by setInBuffer() or setOutBuffer()
+ // prior to configure().
+ mConfig{{}, {}},
mStatus(NO_INIT), mState(IDLE),
- // mMaxDisableWaitCnt is set by configure() and not used before then
- // mDisableWaitCnt is set by process() and updateState() and not used before then
+ mMaxDisableWaitCnt(1), // set by configure(), should be >= 1
+ mDisableWaitCnt(0), // set by process() and updateState()
mSuspended(false),
+ mOffloaded(false),
mAudioFlinger(thread->mAudioFlinger)
+#ifdef FLOAT_EFFECT_CHAIN
+ , mSupportsFloat(false)
+#endif
{
ALOGV("Constructor %p pinned %d", this, pinned);
int lStatus;
@@ -285,56 +293,190 @@
return;
}
+ const uint32_t inChannelCount =
+ audio_channel_count_from_out_mask(mConfig.inputCfg.channels);
+ const uint32_t outChannelCount =
+ audio_channel_count_from_out_mask(mConfig.outputCfg.channels);
+ const bool auxType =
+ (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY;
+
+ // safeInputOutputSampleCount is 0 if the channel count between input and output
+ // buffers do not match. This prevents automatic accumulation or copying between the
+ // input and output effect buffers without an intermediary effect process.
+ // TODO: consider implementing channel conversion.
+ const size_t safeInputOutputSampleCount =
+ inChannelCount != outChannelCount ? 0
+ : outChannelCount * std::min(
+ mConfig.inputCfg.buffer.frameCount,
+ mConfig.outputCfg.buffer.frameCount);
+ const auto accumulateInputToOutput = [this, safeInputOutputSampleCount]() {
+#ifdef FLOAT_EFFECT_CHAIN
+ accumulate_float(
+ mConfig.outputCfg.buffer.f32,
+ mConfig.inputCfg.buffer.f32,
+ safeInputOutputSampleCount);
+#else
+ accumulate_i16(
+ mConfig.outputCfg.buffer.s16,
+ mConfig.inputCfg.buffer.s16,
+ safeInputOutputSampleCount);
+#endif
+ };
+ const auto copyInputToOutput = [this, safeInputOutputSampleCount]() {
+#ifdef FLOAT_EFFECT_CHAIN
+ memcpy(
+ mConfig.outputCfg.buffer.f32,
+ mConfig.inputCfg.buffer.f32,
+ safeInputOutputSampleCount * sizeof(*mConfig.outputCfg.buffer.f32));
+
+#else
+ memcpy(
+ mConfig.outputCfg.buffer.s16,
+ mConfig.inputCfg.buffer.s16,
+ safeInputOutputSampleCount * sizeof(*mConfig.outputCfg.buffer.s16));
+#endif
+ };
+
if (isProcessEnabled()) {
- // do 32 bit to 16 bit conversion for auxiliary effect input buffer
- if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
- ditherAndClamp(mConfig.inputCfg.buffer.s32,
- mConfig.inputCfg.buffer.s32,
- mConfig.inputCfg.buffer.frameCount/2);
- }
int ret;
if (isProcessImplemented()) {
- // do the actual processing in the effect engine
- ret = mEffectInterface->process();
- } else {
- if (mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
- size_t frameCnt = mConfig.inputCfg.buffer.frameCount * FCC_2; //always stereo here
- int16_t *in = mConfig.inputCfg.buffer.s16;
- int16_t *out = mConfig.outputCfg.buffer.s16;
+ if (auxType) {
+ // We overwrite the aux input buffer here and clear after processing.
+ // aux input is always mono.
+#ifdef FLOAT_EFFECT_CHAIN
+ if (mSupportsFloat) {
+#ifndef FLOAT_AUX
+ // Do in-place float conversion for auxiliary effect input buffer.
+ static_assert(sizeof(float) <= sizeof(int32_t),
+ "in-place conversion requires sizeof(float) <= sizeof(int32_t)");
- if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
- for (size_t i = 0; i < frameCnt; i++) {
- out[i] = clamp16((int32_t)out[i] + (int32_t)in[i]);
+ memcpy_to_float_from_q4_27(
+ mConfig.inputCfg.buffer.f32,
+ mConfig.inputCfg.buffer.s32,
+ mConfig.inputCfg.buffer.frameCount);
+#endif // !FLOAT_AUX
+ } else
+#endif // FLOAT_EFFECT_CHAIN
+ {
+#ifdef FLOAT_AUX
+ memcpy_to_i16_from_float(
+ mConfig.inputCfg.buffer.s16,
+ mConfig.inputCfg.buffer.f32,
+ mConfig.inputCfg.buffer.frameCount);
+#else
+ memcpy_to_i16_from_q4_27(
+ mConfig.inputCfg.buffer.s16,
+ mConfig.inputCfg.buffer.s32,
+ mConfig.inputCfg.buffer.frameCount);
+#endif
+ }
+ }
+#ifdef FLOAT_EFFECT_CHAIN
+ sp<EffectBufferHalInterface> inBuffer = mInBuffer;
+ sp<EffectBufferHalInterface> outBuffer = mOutBuffer;
+
+ if (!auxType && mInChannelCountRequested != inChannelCount) {
+ adjust_channels(
+ inBuffer->audioBuffer()->f32, mInChannelCountRequested,
+ mInConversionBuffer->audioBuffer()->f32, inChannelCount,
+ sizeof(float),
+ sizeof(float)
+ * mInChannelCountRequested * mConfig.inputCfg.buffer.frameCount);
+ inBuffer = mInConversionBuffer;
+ }
+ if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE
+ && mOutChannelCountRequested != outChannelCount) {
+ adjust_selected_channels(
+ outBuffer->audioBuffer()->f32, mOutChannelCountRequested,
+ mOutConversionBuffer->audioBuffer()->f32, outChannelCount,
+ sizeof(float),
+ sizeof(float)
+ * mOutChannelCountRequested * mConfig.outputCfg.buffer.frameCount);
+ outBuffer = mOutConversionBuffer;
+ }
+ if (!mSupportsFloat) { // convert input to int16_t as effect doesn't support float.
+ if (!auxType) {
+ if (mInConversionBuffer.get() == nullptr) {
+ ALOGW("%s: mInConversionBuffer is null, bypassing", __func__);
+ goto data_bypass;
}
+ memcpy_to_i16_from_float(
+ mInConversionBuffer->audioBuffer()->s16,
+ inBuffer->audioBuffer()->f32,
+ inChannelCount * mConfig.inputCfg.buffer.frameCount);
+ inBuffer = mInConversionBuffer;
+ }
+ if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+ if (mOutConversionBuffer.get() == nullptr) {
+ ALOGW("%s: mOutConversionBuffer is null, bypassing", __func__);
+ goto data_bypass;
+ }
+ memcpy_to_i16_from_float(
+ mOutConversionBuffer->audioBuffer()->s16,
+ outBuffer->audioBuffer()->f32,
+ outChannelCount * mConfig.outputCfg.buffer.frameCount);
+ outBuffer = mOutConversionBuffer;
+ }
+ }
+#endif
+ ret = mEffectInterface->process();
+#ifdef FLOAT_EFFECT_CHAIN
+ if (!mSupportsFloat) { // convert output int16_t back to float.
+ sp<EffectBufferHalInterface> target =
+ mOutChannelCountRequested != outChannelCount
+ ? mOutConversionBuffer : mOutBuffer;
+
+ memcpy_to_float_from_i16(
+ target->audioBuffer()->f32,
+ mOutConversionBuffer->audioBuffer()->s16,
+ outChannelCount * mConfig.outputCfg.buffer.frameCount);
+ }
+ if (mOutChannelCountRequested != outChannelCount) {
+ adjust_selected_channels(mOutConversionBuffer->audioBuffer()->f32, outChannelCount,
+ mOutBuffer->audioBuffer()->f32, mOutChannelCountRequested,
+ sizeof(float),
+ sizeof(float) * outChannelCount * mConfig.outputCfg.buffer.frameCount);
+ }
+#endif
+ } else {
+#ifdef FLOAT_EFFECT_CHAIN
+ data_bypass:
+#endif
+ if (!auxType /* aux effects do not require data bypass */
+ && mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
+ if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+ accumulateInputToOutput();
} else {
- memcpy(mConfig.outputCfg.buffer.raw, mConfig.inputCfg.buffer.raw,
- frameCnt * sizeof(int16_t));
+ copyInputToOutput();
}
}
ret = -ENODATA;
}
+
// force transition to IDLE state when engine is ready
if (mState == STOPPED && ret == -ENODATA) {
mDisableWaitCnt = 1;
}
// clear auxiliary effect input buffer for next accumulation
- if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
- memset(mConfig.inputCfg.buffer.raw, 0,
- mConfig.inputCfg.buffer.frameCount*sizeof(int32_t));
+ if (auxType) {
+#ifdef FLOAT_AUX
+ const size_t size =
+ mConfig.inputCfg.buffer.frameCount * inChannelCount * sizeof(float);
+#else
+ const size_t size =
+ mConfig.inputCfg.buffer.frameCount * inChannelCount * sizeof(int32_t);
+#endif
+ memset(mConfig.inputCfg.buffer.raw, 0, size);
}
} else if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_INSERT &&
+ // mInBuffer->audioBuffer()->raw != mOutBuffer->audioBuffer()->raw
mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
// If an insert effect is idle and input buffer is different from output buffer,
// accumulate input onto output
sp<EffectChain> chain = mChain.promote();
- if (chain != 0 && chain->activeTrackCnt() != 0) {
- size_t frameCnt = mConfig.inputCfg.buffer.frameCount * FCC_2; //always stereo here
- int16_t *in = mConfig.inputCfg.buffer.s16;
- int16_t *out = mConfig.outputCfg.buffer.s16;
- for (size_t i = 0; i < frameCnt; i++) {
- out[i] = clamp16((int32_t)out[i] + (int32_t)in[i]);
- }
+ if (chain.get() != nullptr && chain->activeTrackCnt() != 0) {
+ accumulateInputToOutput();
}
}
}
@@ -349,6 +491,7 @@
status_t AudioFlinger::EffectModule::configure()
{
+ ALOGVV("configure() started");
status_t status;
sp<ThreadBase> thread;
uint32_t size;
@@ -366,15 +509,28 @@
}
// TODO: handle configuration of effects replacing track process
+ // TODO: handle configuration of input (record) SW effects above the HAL,
+ // similar to output EFFECT_FLAG_TYPE_INSERT/REPLACE,
+ // in which case input channel masks should be used here.
channelMask = thread->channelMask();
+ mConfig.inputCfg.channels = channelMask;
mConfig.outputCfg.channels = channelMask;
if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
- mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_MONO;
- mConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
- ALOGV("Overriding auxiliary effect input as MONO and output as STEREO");
+ if (mConfig.inputCfg.channels != AUDIO_CHANNEL_OUT_MONO) {
+ mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_MONO;
+ ALOGV("Overriding auxiliary effect input channels %#x as MONO",
+ mConfig.inputCfg.channels);
+ }
+#ifndef MULTICHANNEL_EFFECT_CHAIN
+ if (mConfig.outputCfg.channels != AUDIO_CHANNEL_OUT_STEREO) {
+ mConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+ ALOGV("Overriding auxiliary effect output channels %#x as STEREO",
+ mConfig.outputCfg.channels);
+ }
+#endif
} else {
- mConfig.inputCfg.channels = channelMask;
+#ifndef MULTICHANNEL_EFFECT_CHAIN
// TODO: Update this logic when multichannel effects are implemented.
// For offloaded tracks consider mono output as stereo for proper effect initialization
if (channelMask == AUDIO_CHANNEL_OUT_MONO) {
@@ -382,10 +538,15 @@
mConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
ALOGV("Overriding effect input and output as STEREO");
}
+#endif
}
+ mInChannelCountRequested =
+ audio_channel_count_from_out_mask(mConfig.inputCfg.channels);
+ mOutChannelCountRequested =
+ audio_channel_count_from_out_mask(mConfig.outputCfg.channels);
- mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
- mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+ mConfig.inputCfg.format = EFFECT_BUFFER_FORMAT;
+ mConfig.outputCfg.format = EFFECT_BUFFER_FORMAT;
mConfig.inputCfg.samplingRate = thread->sampleRate();
mConfig.outputCfg.samplingRate = mConfig.inputCfg.samplingRate;
mConfig.inputCfg.bufferProvider.cookie = NULL;
@@ -413,12 +574,6 @@
mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
mConfig.inputCfg.buffer.frameCount = thread->frameCount();
mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
- if (mInBuffer != 0) {
- mInBuffer->setFrameCount(mConfig.inputCfg.buffer.frameCount);
- }
- if (mOutBuffer != 0) {
- mOutBuffer->setFrameCount(mConfig.outputCfg.buffer.frameCount);
- }
ALOGV("configure() %p thread %p buffer %p framecount %zu",
this, thread.get(), mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount);
@@ -426,43 +581,108 @@
status_t cmdStatus;
size = sizeof(int);
status = mEffectInterface->command(EFFECT_CMD_SET_CONFIG,
- sizeof(effect_config_t),
+ sizeof(mConfig),
&mConfig,
&size,
&cmdStatus);
- if (status == 0) {
+ if (status == NO_ERROR) {
status = cmdStatus;
}
- if (status == 0 &&
- (memcmp(&mDescriptor.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0)) {
- uint32_t buf32[sizeof(effect_param_t) / sizeof(uint32_t) + 2];
- effect_param_t *p = (effect_param_t *)buf32;
-
- p->psize = sizeof(uint32_t);
- p->vsize = sizeof(uint32_t);
- size = sizeof(int);
- *(int32_t *)p->data = VISUALIZER_PARAM_LATENCY;
-
- uint32_t latency = 0;
- PlaybackThread *pbt = thread->mAudioFlinger->checkPlaybackThread_l(thread->mId);
- if (pbt != NULL) {
- latency = pbt->latency_l();
+#ifdef MULTICHANNEL_EFFECT_CHAIN
+ if (status != NO_ERROR &&
+ thread->isOutput() &&
+ (mConfig.inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO
+ || mConfig.outputCfg.channels != AUDIO_CHANNEL_OUT_STEREO)) {
+ // Older effects may require exact STEREO position mask.
+ if (mConfig.inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO
+ && (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_AUXILIARY) {
+ ALOGV("Overriding effect input channels %#x as STEREO", mConfig.inputCfg.channels);
+ mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
}
+ if (mConfig.outputCfg.channels != AUDIO_CHANNEL_OUT_STEREO) {
+ ALOGV("Overriding effect output channels %#x as STEREO", mConfig.outputCfg.channels);
+ mConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+ }
+ size = sizeof(int);
+ status = mEffectInterface->command(EFFECT_CMD_SET_CONFIG,
+ sizeof(mConfig),
+ &mConfig,
+ &size,
+ &cmdStatus);
+ if (status == NO_ERROR) {
+ status = cmdStatus;
+ }
+ }
+#endif
- *((int32_t *)p->data + 1)= latency;
- mEffectInterface->command(EFFECT_CMD_SET_PARAM,
- sizeof(effect_param_t) + 8,
- &buf32,
- &size,
- &cmdStatus);
+#ifdef FLOAT_EFFECT_CHAIN
+ if (status == NO_ERROR) {
+ mSupportsFloat = true;
}
- mMaxDisableWaitCnt = (MAX_DISABLE_TIME_MS * mConfig.outputCfg.samplingRate) /
- (1000 * mConfig.outputCfg.buffer.frameCount);
+ if (status != NO_ERROR) {
+ ALOGV("EFFECT_CMD_SET_CONFIG failed with float format, retry with int16_t.");
+ mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+ mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+ size = sizeof(int);
+ status = mEffectInterface->command(EFFECT_CMD_SET_CONFIG,
+ sizeof(mConfig),
+ &mConfig,
+ &size,
+ &cmdStatus);
+ if (status == NO_ERROR) {
+ status = cmdStatus;
+ }
+ if (status == NO_ERROR) {
+ mSupportsFloat = false;
+ ALOGVV("config worked with 16 bit");
+ } else {
+ ALOGE("%s failed %d with int16_t (as well as float)", __func__, status);
+ }
+ }
+#endif
+
+ if (status == NO_ERROR) {
+ // Establish Buffer strategy
+ setInBuffer(mInBuffer);
+ setOutBuffer(mOutBuffer);
+
+ // Update visualizer latency
+ if (memcmp(&mDescriptor.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0) {
+ uint32_t buf32[sizeof(effect_param_t) / sizeof(uint32_t) + 2];
+ effect_param_t *p = (effect_param_t *)buf32;
+
+ p->psize = sizeof(uint32_t);
+ p->vsize = sizeof(uint32_t);
+ size = sizeof(int);
+ *(int32_t *)p->data = VISUALIZER_PARAM_LATENCY;
+
+ uint32_t latency = 0;
+ PlaybackThread *pbt = thread->mAudioFlinger->checkPlaybackThread_l(thread->mId);
+ if (pbt != NULL) {
+ latency = pbt->latency_l();
+ }
+
+ *((int32_t *)p->data + 1)= latency;
+ mEffectInterface->command(EFFECT_CMD_SET_PARAM,
+ sizeof(effect_param_t) + 8,
+ &buf32,
+ &size,
+ &cmdStatus);
+ }
+ }
+
+ // mConfig.outputCfg.buffer.frameCount cannot be zero.
+ mMaxDisableWaitCnt = (uint32_t)std::max(
+ (uint64_t)1, // mMaxDisableWaitCnt must be greater than zero.
+ (uint64_t)MAX_DISABLE_TIME_MS * mConfig.outputCfg.samplingRate
+ / ((uint64_t)1000 * mConfig.outputCfg.buffer.frameCount));
exit:
+ // TODO: consider clearing mConfig on error.
mStatus = status;
+ ALOGVV("configure ended");
return status;
}
@@ -774,6 +994,9 @@
}
void AudioFlinger::EffectModule::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
+ ALOGVV("setInBuffer %p",(&buffer));
+
+ // mConfig.inputCfg.buffer.frameCount may be zero if configure() is not called yet.
if (buffer != 0) {
mConfig.inputCfg.buffer.raw = buffer->audioBuffer()->raw;
buffer->setFrameCount(mConfig.inputCfg.buffer.frameCount);
@@ -782,9 +1005,48 @@
}
mInBuffer = buffer;
mEffectInterface->setInBuffer(buffer);
+
+#ifdef FLOAT_EFFECT_CHAIN
+ // aux effects do in place conversion to float - we don't allocate mInConversionBuffer.
+ // Theoretically insert effects can also do in-place conversions (destroying
+ // the original buffer) when the output buffer is identical to the input buffer,
+ // but we don't optimize for it here.
+ const bool auxType = (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY;
+ const uint32_t inChannelCount =
+ audio_channel_count_from_out_mask(mConfig.inputCfg.channels);
+ const bool formatMismatch = !mSupportsFloat || mInChannelCountRequested != inChannelCount;
+ if (!auxType && formatMismatch && mInBuffer.get() != nullptr) {
+ // we need to translate - create hidl shared buffer and intercept
+ const size_t inFrameCount = mConfig.inputCfg.buffer.frameCount;
+ // Use FCC_2 in case mInChannelCountRequested is mono and the effect is stereo.
+ const uint32_t inChannels = std::max((uint32_t)FCC_2, mInChannelCountRequested);
+ const size_t size = inChannels * inFrameCount * std::max(sizeof(int16_t), sizeof(float));
+
+ ALOGV("%s: setInBuffer updating for inChannels:%d inFrameCount:%zu total size:%zu",
+ __func__, inChannels, inFrameCount, size);
+
+ if (size > 0 && (mInConversionBuffer.get() == nullptr
+ || size > mInConversionBuffer->getSize())) {
+ mInConversionBuffer.clear();
+ ALOGV("%s: allocating mInConversionBuffer %zu", __func__, size);
+ sp<AudioFlinger> audioFlinger = mAudioFlinger.promote();
+ LOG_ALWAYS_FATAL_IF(audioFlinger == nullptr, "EM could not retrieved audioFlinger");
+ (void)audioFlinger->mEffectsFactoryHal->allocateBuffer(size, &mInConversionBuffer);
+ }
+ if (mInConversionBuffer.get() != nullptr) {
+ mInConversionBuffer->setFrameCount(inFrameCount);
+ mEffectInterface->setInBuffer(mInConversionBuffer);
+ } else if (size > 0) {
+ ALOGE("%s cannot create mInConversionBuffer", __func__);
+ }
+ }
+#endif
}
void AudioFlinger::EffectModule::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
+ ALOGVV("setOutBuffer %p",(&buffer));
+
+ // mConfig.outputCfg.buffer.frameCount may be zero if configure() is not called yet.
if (buffer != 0) {
mConfig.outputCfg.buffer.raw = buffer->audioBuffer()->raw;
buffer->setFrameCount(mConfig.outputCfg.buffer.frameCount);
@@ -793,6 +1055,38 @@
}
mOutBuffer = buffer;
mEffectInterface->setOutBuffer(buffer);
+
+#ifdef FLOAT_EFFECT_CHAIN
+ // Note: Any effect that does not accumulate does not need mOutConversionBuffer and
+ // can do in-place conversion from int16_t to float. We don't optimize here.
+ const uint32_t outChannelCount =
+ audio_channel_count_from_out_mask(mConfig.outputCfg.channels);
+ const bool formatMismatch = !mSupportsFloat || mOutChannelCountRequested != outChannelCount;
+ if (formatMismatch && mOutBuffer.get() != nullptr) {
+ const size_t outFrameCount = mConfig.outputCfg.buffer.frameCount;
+ // Use FCC_2 in case mOutChannelCountRequested is mono and the effect is stereo.
+ const uint32_t outChannels = std::max((uint32_t)FCC_2, mOutChannelCountRequested);
+ const size_t size = outChannels * outFrameCount * std::max(sizeof(int16_t), sizeof(float));
+
+ ALOGV("%s: setOutBuffer updating for outChannels:%d outFrameCount:%zu total size:%zu",
+ __func__, outChannels, outFrameCount, size);
+
+ if (size > 0 && (mOutConversionBuffer.get() == nullptr
+ || size > mOutConversionBuffer->getSize())) {
+ mOutConversionBuffer.clear();
+ ALOGV("%s: allocating mOutConversionBuffer %zu", __func__, size);
+ sp<AudioFlinger> audioFlinger = mAudioFlinger.promote();
+ LOG_ALWAYS_FATAL_IF(audioFlinger == nullptr, "EM could not retrieved audioFlinger");
+ (void)audioFlinger->mEffectsFactoryHal->allocateBuffer(size, &mOutConversionBuffer);
+ }
+ if (mOutConversionBuffer.get() != nullptr) {
+ mOutConversionBuffer->setFrameCount(outFrameCount);
+ mEffectInterface->setOutBuffer(mOutConversionBuffer);
+ } else if (size > 0) {
+ ALOGE("%s cannot create mOutConversionBuffer", __func__);
+ }
+ }
+#endif
}
status_t AudioFlinger::EffectModule::setVolume(uint32_t *left, uint32_t *right, bool controller)
@@ -1063,15 +1357,26 @@
return s;
}
+static std::string dumpInOutBuffer(bool isInput, const sp<EffectBufferHalInterface> &buffer) {
+ std::stringstream ss;
+
+ if (buffer.get() == nullptr) {
+ return "nullptr"; // make different than below
+ } else if (buffer->externalData() != nullptr) {
+ ss << (isInput ? buffer->externalData() : buffer->audioBuffer()->raw)
+ << " -> "
+ << (isInput ? buffer->audioBuffer()->raw : buffer->externalData());
+ } else {
+ ss << buffer->audioBuffer()->raw;
+ }
+ return ss.str();
+}
void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args __unused)
{
- const size_t SIZE = 256;
- char buffer[SIZE];
String8 result;
- snprintf(buffer, SIZE, "\tEffect ID %d:\n", mId);
- result.append(buffer);
+ result.appendFormat("\tEffect ID %d:\n", mId);
bool locked = AudioFlinger::dumpTryLock(mLock);
// failed to lock - AudioFlinger is probably deadlocked
@@ -1080,59 +1385,64 @@
}
result.append("\t\tSession Status State Engine:\n");
- snprintf(buffer, SIZE, "\t\t%05d %03d %03d %p\n",
+ result.appendFormat("\t\t%05d %03d %03d %p\n",
mSessionId, mStatus, mState, mEffectInterface.get());
- result.append(buffer);
result.append("\t\tDescriptor:\n");
char uuidStr[64];
AudioEffect::guidToString(&mDescriptor.uuid, uuidStr, sizeof(uuidStr));
- snprintf(buffer, SIZE, "\t\t- UUID: %s\n", uuidStr);
- result.append(buffer);
+ result.appendFormat("\t\t- UUID: %s\n", uuidStr);
AudioEffect::guidToString(&mDescriptor.type, uuidStr, sizeof(uuidStr));
- snprintf(buffer, SIZE, "\t\t- TYPE: %s\n", uuidStr);
- result.append(buffer);
- snprintf(buffer, SIZE, "\t\t- apiVersion: %08X\n\t\t- flags: %08X (%s)\n",
+ result.appendFormat("\t\t- TYPE: %s\n", uuidStr);
+ result.appendFormat("\t\t- apiVersion: %08X\n\t\t- flags: %08X (%s)\n",
mDescriptor.apiVersion,
mDescriptor.flags,
effectFlagsToString(mDescriptor.flags).string());
- result.append(buffer);
- snprintf(buffer, SIZE, "\t\t- name: %s\n",
+ result.appendFormat("\t\t- name: %s\n",
mDescriptor.name);
- result.append(buffer);
- snprintf(buffer, SIZE, "\t\t- implementor: %s\n",
+
+ result.appendFormat("\t\t- implementor: %s\n",
mDescriptor.implementor);
- result.append(buffer);
+
+ result.appendFormat("\t\t- data: %s\n", mSupportsFloat ? "float" : "int16");
result.append("\t\t- Input configuration:\n");
- result.append("\t\t\tFrames Smp rate Channels Format Buffer\n");
- snprintf(buffer, SIZE, "\t\t\t%05zu %05d %08x %6d (%s) %p\n",
+ result.append("\t\t\tBuffer Frames Smp rate Channels Format\n");
+ result.appendFormat("\t\t\t%p %05zu %05d %08x %6d (%s)\n",
+ mConfig.inputCfg.buffer.raw,
mConfig.inputCfg.buffer.frameCount,
mConfig.inputCfg.samplingRate,
mConfig.inputCfg.channels,
mConfig.inputCfg.format,
- formatToString((audio_format_t)mConfig.inputCfg.format).c_str(),
- mConfig.inputCfg.buffer.raw);
- result.append(buffer);
+ formatToString((audio_format_t)mConfig.inputCfg.format).c_str());
result.append("\t\t- Output configuration:\n");
result.append("\t\t\tBuffer Frames Smp rate Channels Format\n");
- snprintf(buffer, SIZE, "\t\t\t%p %05zu %05d %08x %d (%s)\n",
+ result.appendFormat("\t\t\t%p %05zu %05d %08x %6d (%s)\n",
mConfig.outputCfg.buffer.raw,
mConfig.outputCfg.buffer.frameCount,
mConfig.outputCfg.samplingRate,
mConfig.outputCfg.channels,
mConfig.outputCfg.format,
formatToString((audio_format_t)mConfig.outputCfg.format).c_str());
- result.append(buffer);
- snprintf(buffer, SIZE, "\t\t%zu Clients:\n", mHandles.size());
- result.append(buffer);
+#ifdef FLOAT_EFFECT_CHAIN
+
+ result.appendFormat("\t\t- HAL buffers:\n"
+ "\t\t\tIn(%s) InConversion(%s) Out(%s) OutConversion(%s)\n",
+ dumpInOutBuffer(true /* isInput */, mInBuffer).c_str(),
+ dumpInOutBuffer(true /* isInput */, mInConversionBuffer).c_str(),
+ dumpInOutBuffer(false /* isInput */, mOutBuffer).c_str(),
+ dumpInOutBuffer(false /* isInput */, mOutConversionBuffer).c_str());
+#endif
+
+ result.appendFormat("\t\t%zu Clients:\n", mHandles.size());
result.append("\t\t\t Pid Priority Ctrl Locked client server\n");
+ char buffer[256];
for (size_t i = 0; i < mHandles.size(); ++i) {
EffectHandle *handle = mHandles[i];
if (handle != NULL && !handle->disconnected()) {
- handle->dumpToBuffer(buffer, SIZE);
+ handle->dumpToBuffer(buffer, sizeof(buffer));
result.append(buffer);
}
}
@@ -1598,12 +1908,9 @@
if (mInBuffer == NULL) {
return;
}
- // TODO: This will change in the future, depending on multichannel
- // and sample format changes for effects.
- // Currently effects processing is only available for stereo, AUDIO_FORMAT_PCM_16_BIT
- // (4 bytes frame size)
const size_t frameSize =
- audio_bytes_per_sample(AUDIO_FORMAT_PCM_16_BIT) * min(FCC_2, thread->channelCount());
+ audio_bytes_per_sample(EFFECT_BUFFER_FORMAT) * thread->channelCount();
+
memset(mInBuffer->audioBuffer()->raw, 0, thread->frameCount() * frameSize);
mInBuffer->commit();
}
@@ -1718,8 +2025,13 @@
// calling the process in effect engine
size_t numSamples = thread->frameCount();
sp<EffectBufferHalInterface> halBuffer;
- status_t result = EffectBufferHalInterface::allocate(
+#ifdef FLOAT_EFFECT_CHAIN
+ status_t result = thread->mAudioFlinger->mEffectsFactoryHal->allocateBuffer(
+ numSamples * sizeof(float), &halBuffer);
+#else
+ status_t result = thread->mAudioFlinger->mEffectsFactoryHal->allocateBuffer(
numSamples * sizeof(int32_t), &halBuffer);
+#endif
if (result != OK) return result;
effect->setInBuffer(halBuffer);
// auxiliary effects output samples to chain input buffer for further processing
@@ -1959,19 +2271,6 @@
}
}
-static void dumpInOutBuffer(
- char *dump, size_t dumpSize, bool isInput, EffectBufferHalInterface *buffer) {
- if (buffer == nullptr) {
- snprintf(dump, dumpSize, "%p", buffer);
- } else if (buffer->externalData() != nullptr) {
- snprintf(dump, dumpSize, "%p -> %p",
- isInput ? buffer->externalData() : buffer->audioBuffer()->raw,
- isInput ? buffer->audioBuffer()->raw : buffer->externalData());
- } else {
- snprintf(dump, dumpSize, "%p", buffer->audioBuffer()->raw);
- }
-}
-
void AudioFlinger::EffectChain::dump(int fd, const Vector<String16>& args)
{
const size_t SIZE = 256;
@@ -1989,15 +2288,13 @@
result.append("\tCould not lock mutex:\n");
}
- char inBufferStr[64], outBufferStr[64];
- dumpInOutBuffer(inBufferStr, sizeof(inBufferStr), true, mInBuffer.get());
- dumpInOutBuffer(outBufferStr, sizeof(outBufferStr), false, mOutBuffer.get());
- snprintf(buffer, SIZE, "\t%-*s%-*s Active tracks:\n",
- (int)strlen(inBufferStr), "In buffer ",
- (int)strlen(outBufferStr), "Out buffer ");
- result.append(buffer);
- snprintf(buffer, SIZE, "\t%s %s %d\n", inBufferStr, outBufferStr, mActiveTrackCnt);
- result.append(buffer);
+ const std::string inBufferStr = dumpInOutBuffer(true /* isInput */, mInBuffer);
+ const std::string outBufferStr = dumpInOutBuffer(false /* isInput */, mOutBuffer);
+ result.appendFormat("\t%-*s%-*s Active tracks:\n",
+ (int)inBufferStr.size(), "In buffer ",
+ (int)outBufferStr.size(), "Out buffer ");
+ result.appendFormat("\t%s %s %d\n",
+ inBufferStr.c_str(), outBufferStr.c_str(), mActiveTrackCnt);
write(fd, result.string(), result.size());
for (size_t i = 0; i < numEffects; ++i) {
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index e29798b..2327bb9 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -168,6 +168,14 @@
bool mSuspended; // effect is suspended: temporarily disabled by framework
bool mOffloaded; // effect is currently offloaded to the audio DSP
wp<AudioFlinger> mAudioFlinger;
+
+#ifdef FLOAT_EFFECT_CHAIN
+ bool mSupportsFloat; // effect supports float processing
+ sp<EffectBufferHalInterface> mInConversionBuffer; // Buffers for HAL conversion if needed.
+ sp<EffectBufferHalInterface> mOutConversionBuffer;
+ uint32_t mInChannelCountRequested;
+ uint32_t mOutChannelCountRequested;
+#endif
};
// The EffectHandle class implements the IEffect interface. It provides resources
@@ -308,14 +316,14 @@
void setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
mInBuffer = buffer;
}
- int16_t *inBuffer() const {
- return mInBuffer != 0 ? reinterpret_cast<int16_t*>(mInBuffer->ptr()) : NULL;
+ effect_buffer_t *inBuffer() const {
+ return mInBuffer != 0 ? reinterpret_cast<effect_buffer_t*>(mInBuffer->ptr()) : NULL;
}
void setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
mOutBuffer = buffer;
}
- int16_t *outBuffer() const {
- return mOutBuffer != 0 ? reinterpret_cast<int16_t*>(mOutBuffer->ptr()) : NULL;
+ effect_buffer_t *outBuffer() const {
+ return mOutBuffer != 0 ? reinterpret_cast<effect_buffer_t*>(mOutBuffer->ptr()) : NULL;
}
void incTrackCnt() { android_atomic_inc(&mTrackCnt); }
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index c10fa05..79bb9fe 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -79,7 +79,6 @@
unsigned i;
for (i = 0; i < FastMixerState::sMaxFastTracks; ++i) {
- mFastTrackNames[i] = -1;
mGenerations[i] = 0;
}
#ifdef FAST_THREAD_STATISTICS
@@ -138,8 +137,6 @@
void FastMixer::onStateChange()
{
- // log that audio was turned on/off
- LOG_AUDIO_STATE();
const FastMixerState * const current = (const FastMixerState *) mCurrent;
const FastMixerState * const previous = (const FastMixerState *) mPrevious;
FastMixerDumpState * const dumpState = (FastMixerDumpState *) mDumpState;
@@ -192,7 +189,7 @@
// FIXME new may block for unbounded time at internal mutex of the heap
// implementation; it would be better to have normal mixer allocate for us
// to avoid blocking here and to prevent possible priority inversion
- mMixer = new AudioMixer(frameCount, mSampleRate, FastMixerState::sMaxFastTracks);
+ mMixer = new AudioMixer(frameCount, mSampleRate);
// FIXME See the other FIXME at FastMixer::setNBLogWriter()
const size_t mixerFrameSize = mSinkChannelCount
* audio_bytes_per_sample(mMixerBufferFormat);
@@ -219,11 +216,6 @@
mWarmupNsMax = LONG_MAX;
}
mMixerBufferState = UNDEFINED;
-#if !LOG_NDEBUG
- for (unsigned i = 0; i < FastMixerState::sMaxFastTracks; ++i) {
- mFastTrackNames[i] = -1;
- }
-#endif
// we need to reconfigure all active tracks
previousTrackMask = 0;
mFastTracksGen = current->mFastTracksGen - 1;
@@ -237,7 +229,6 @@
dumpState->mTrackMask = currentTrackMask;
if (current->mFastTracksGen != mFastTracksGen) {
ALOG_ASSERT(mMixerBuffer != NULL);
- int name;
// process removed tracks first to avoid running out of track names
unsigned removedTracks = previousTrackMask & ~currentTrackMask;
@@ -247,13 +238,8 @@
const FastTrack* fastTrack = ¤t->mFastTracks[i];
ALOG_ASSERT(fastTrack->mBufferProvider == NULL);
if (mMixer != NULL) {
- name = mFastTrackNames[i];
- ALOG_ASSERT(name >= 0);
- mMixer->deleteTrackName(name);
+ mMixer->destroy(i);
}
-#if !LOG_NDEBUG
- mFastTrackNames[i] = -1;
-#endif
// don't reset track dump state, since other side is ignoring it
mGenerations[i] = fastTrack->mGeneration;
}
@@ -265,12 +251,17 @@
addedTracks &= ~(1 << i);
const FastTrack* fastTrack = ¤t->mFastTracks[i];
AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
- ALOG_ASSERT(bufferProvider != NULL && mFastTrackNames[i] == -1);
if (mMixer != NULL) {
- name = mMixer->getTrackName(fastTrack->mChannelMask,
+ const int name = i; // for clarity, choose name as fast track index.
+ status_t status = mMixer->create(
+ name,
+ fastTrack->mChannelMask,
fastTrack->mFormat, AUDIO_SESSION_OUTPUT_MIX);
- ALOG_ASSERT(name >= 0);
- mFastTrackNames[i] = name;
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+ "%s: cannot create track name"
+ " %d, mask %#x, format %#x, sessionId %d in AudioMixer",
+ __func__, name,
+ fastTrack->mChannelMask, fastTrack->mFormat, AUDIO_SESSION_OUTPUT_MIX);
mMixer->setBufferProvider(name, bufferProvider);
mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
(void *)mMixerBuffer);
@@ -302,8 +293,7 @@
AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
ALOG_ASSERT(bufferProvider != NULL);
if (mMixer != NULL) {
- name = mFastTrackNames[i];
- ALOG_ASSERT(name >= 0);
+ const int name = i;
mMixer->setBufferProvider(name, bufferProvider);
if (fastTrack->mVolumeProvider == NULL) {
float f = AudioMixer::UNITY_GAIN_FLOAT;
@@ -336,7 +326,13 @@
void FastMixer::onWork()
{
- LOG_HIST_TS();
+ // TODO: pass an ID parameter to indicate which time series we want to write to in NBLog.cpp
+ // Or: pass both of these into a single call with a boolean
+ if (mIsWarm) {
+ LOG_HIST_TS();
+ } else {
+ LOG_AUDIO_STATE();
+ }
const FastMixerState * const current = (const FastMixerState *) mCurrent;
FastMixerDumpState * const dumpState = (FastMixerDumpState *) mDumpState;
const FastMixerState::Command command = mCommand;
@@ -374,8 +370,7 @@
perTrackTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] = trackFramesWritten;
fastTrack->mBufferProvider->onTimestamp(perTrackTimestamp);
- int name = mFastTrackNames[i];
- ALOG_ASSERT(name >= 0);
+ const int name = i;
if (fastTrack->mVolumeProvider != NULL) {
gain_minifloat_packed_t vlr = fastTrack->mVolumeProvider->getVolumeLR();
float vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 930fa8d..235d23f 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -57,8 +57,6 @@
static const FastMixerState sInitial;
FastMixerState mPreIdle; // copy of state before we went into idle
- int mFastTrackNames[FastMixerState::kMaxFastTracks];
- // handles used by mixer to identify tracks
int mGenerations[FastMixerState::kMaxFastTracks];
// last observed mFastTracks[i].mGeneration
NBAIO_Sink* mOutputSink;
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index 5a55c7a..2be1e91 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -21,7 +21,7 @@
#include <system/audio.h>
#include <media/ExtendedAudioBufferProvider.h>
#include <media/nbaio/NBAIO.h>
-#include <media/nbaio/NBLog.h>
+#include <media/nblog/NBLog.h>
#include "FastThreadState.h"
namespace android {
diff --git a/services/audioflinger/FastThreadState.h b/services/audioflinger/FastThreadState.h
index f18f846..54c0dc6 100644
--- a/services/audioflinger/FastThreadState.h
+++ b/services/audioflinger/FastThreadState.h
@@ -19,7 +19,7 @@
#include "Configuration.h"
#include <stdint.h>
-#include <media/nbaio/NBLog.h>
+#include <media/nblog/NBLog.h>
namespace android {
diff --git a/services/audioflinger/MmapTracks.h b/services/audioflinger/MmapTracks.h
index 366a164..6f546c3 100644
--- a/services/audioflinger/MmapTracks.h
+++ b/services/audioflinger/MmapTracks.h
@@ -23,6 +23,7 @@
class MmapTrack : public TrackBase {
public:
MmapTrack(ThreadBase *thread,
+ const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -42,6 +43,15 @@
static void appendDumpHeader(String8& result);
void appendDump(String8& result, bool active);
+ // protected by MMapThread::mLock
+ void setSilenced_l(bool silenced) { mSilenced = silenced;
+ mSilencedNotified = false;}
+ // protected by MMapThread::mLock
+ bool isSilenced_l() const { return mSilenced; }
+ // protected by MMapThread::mLock
+ bool getAndSetSilencedNotified_l() { bool silencedNotified = mSilencedNotified;
+ mSilencedNotified = true;
+ return silencedNotified; }
private:
friend class MmapThread;
@@ -57,5 +67,7 @@
virtual void onTimestamp(const ExtendedTimestamp ×tamp);
pid_t mPid;
+ bool mSilenced; // protected by MMapThread::mLock
+ bool mSilencedNotified; // protected by MMapThread::mLock
}; // end of Track
diff --git a/services/audioflinger/OWNERS b/services/audioflinger/OWNERS
index 703e4d2..d02d9e0 100644
--- a/services/audioflinger/OWNERS
+++ b/services/audioflinger/OWNERS
@@ -1,3 +1,4 @@
hunga@google.com
jmtrivi@google.com
mnaganov@google.com
+gkasten@google.com
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 27c6d35..e5cb8a2 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -497,9 +497,6 @@
patch->mPatchRecord->buffer(),
patch->mPatchRecord->bufferSize(),
AUDIO_OUTPUT_FLAG_NONE);
- if (patch->mPatchTrack == 0) {
- return NO_MEMORY;
- }
status = patch->mPatchTrack->initCheck();
if (status != NO_ERROR) {
return status;
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 1c1a989..a78be99 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -25,6 +25,7 @@
Track( PlaybackThread *thread,
const sp<Client>& client,
audio_stream_type_t streamType,
+ const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -51,6 +52,11 @@
void flush();
void destroy();
int name() const { return mName; }
+ void setName(int name) {
+ LOG_ALWAYS_FATAL_IF(mName >= 0 && name >= 0,
+ "%s both old name %d and new name %d are valid", __func__, mName, name);
+ mName = name;
+ }
virtual uint32_t sampleRate() const;
@@ -68,8 +74,8 @@
status_t attachAuxEffect(int EffectId);
void setAuxBuffer(int EffectId, int32_t *buffer);
int32_t *auxBuffer() const { return mAuxBuffer; }
- void setMainBuffer(int16_t *buffer) { mMainBuffer = buffer; }
- int16_t *mainBuffer() const { return mMainBuffer; }
+ void setMainBuffer(effect_buffer_t *buffer) { mMainBuffer = buffer; }
+ effect_buffer_t *mainBuffer() const { return mMainBuffer; }
int auxEffectId() const { return mAuxEffectId; }
virtual status_t getTimestamp(AudioTimestamp& timestamp);
void signal();
@@ -82,11 +88,28 @@
virtual bool isFastTrack() const { return (mFlags & AUDIO_OUTPUT_FLAG_FAST) != 0; }
// implement volume handling.
- VolumeShaper::Status applyVolumeShaper(
- const sp<VolumeShaper::Configuration>& configuration,
- const sp<VolumeShaper::Operation>& operation);
-sp<VolumeShaper::State> getVolumeShaperState(int id);
- sp<VolumeHandler> getVolumeHandler() { return mVolumeHandler; }
+ media::VolumeShaper::Status applyVolumeShaper(
+ const sp<media::VolumeShaper::Configuration>& configuration,
+ const sp<media::VolumeShaper::Operation>& operation);
+ sp<media::VolumeShaper::State> getVolumeShaperState(int id);
+ sp<media::VolumeHandler> getVolumeHandler() { return mVolumeHandler; }
+ /** Set the computed normalized final volume of the track.
+ * !masterMute * masterVolume * streamVolume * averageLRVolume */
+ void setFinalVolume(float volume);
+ float getFinalVolume() const { return mFinalVolume; }
+
+ /** @return true if the track has changed (metadata or volume) since
+ * the last time this function was called,
+ * true if this function was never called since the track creation,
+ * false otherwise.
+ * Thread safe.
+ */
+ bool readAndClearHasChanged() { return !mChangeNotified.test_and_set(); }
+
+ using SourceMetadatas = std::vector<playback_track_metadata_t>;
+ using MetadataInserter = std::back_insert_iterator<SourceMetadatas>;
+ /** Copy the track metadata in the provided iterator. Thread safe. */
+ virtual void copyMetadataTo(MetadataInserter& backInserter) const;
protected:
// for numerous
@@ -127,6 +150,8 @@
bool presentationComplete(int64_t framesWritten, size_t audioHalFrames);
void signalClientFlag(int32_t flag);
+ /** Set that a metadata has changed and needs to be notified to backend. Thread safe. */
+ void setMetadataHasChanged() { mChangeNotified.clear(); }
public:
void triggerEvents(AudioSystem::sync_event_t type);
virtual void invalidate();
@@ -146,11 +171,9 @@
bool mResetDone;
const audio_stream_type_t mStreamType;
- int mName; // track name on the normal mixer,
- // allocated statically at track creation time,
- // and is even allocated (though unused) for fast tracks
- // FIXME don't allocate track name for fast tracks
- int16_t *mMainBuffer;
+ int mName;
+ effect_buffer_t *mMainBuffer;
+
int32_t *mAuxBuffer;
int mAuxEffectId;
bool mHasVolumeController;
@@ -163,7 +186,7 @@
ExtendedTimestamp mSinkTimestamp;
- sp<VolumeHandler> mVolumeHandler; // handles multiple VolumeShaper configs and operations
+ sp<media::VolumeHandler> mVolumeHandler; // handles multiple VolumeShaper configs and operations
private:
// The following fields are only for fast tracks, and should be in a subclass
@@ -178,10 +201,13 @@
volatile float mCachedVolume; // combined master volume and stream type volume;
// 'volatile' means accessed without lock or
// barrier, but is read/written atomically
+ float mFinalVolume; // combine master volume, stream type volume and track volume
sp<AudioTrackServerProxy> mAudioTrackServerProxy;
bool mResumeToStopping; // track was paused in stopping state.
bool mFlushHwPending; // track requests for thread flush
audio_output_flags_t mFlags;
+ // If the last track change was notified to the client with readAndClearHasChanged
+ std::atomic_flag mChangeNotified = ATOMIC_FLAG_INIT;
}; // end of Track
@@ -212,8 +238,11 @@
bool isActive() const { return mActive; }
const wp<ThreadBase>& thread() const { return mThread; }
-private:
+ void copyMetadataTo(MetadataInserter& backInserter) const override;
+ /** Set the metadatas of the upstream tracks. Thread safe. */
+ void setMetadatas(const SourceMetadatas& metadatas);
+private:
status_t obtainBuffer(AudioBufferProvider::Buffer* buffer,
uint32_t waitTimeMs);
void clearBufferQueue();
@@ -228,6 +257,20 @@
bool mActive;
DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
sp<AudioTrackClientProxy> mClientProxy;
+ /** Attributes of the source tracks.
+ *
+ * This member must be accessed with mTrackMetadatasMutex taken.
+ * There is one writer (duplicating thread) and one reader (downstream mixer).
+ *
+ * That means that the duplicating thread can block the downstream mixer
+ * thread and vice versa for the time of the copy.
+ * If this becomes an issue, the metadata could be stored in an atomic raw pointer,
+ * and a exchange with nullptr and delete can be used.
+ * Alternatively a read-copy-update might be implemented.
+ */
+ SourceMetadatas mTrackMetadatas;
+ /** Protects mTrackMetadatas against concurrent access. */
+ mutable std::mutex mTrackMetadatasMutex;
}; // end of OutputTrack
// playback track, used by PatchPanel
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index f8da780..fc2dbbb 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -24,6 +24,7 @@
public:
RecordTrack(RecordThread *thread,
const sp<Client>& client,
+ const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -63,6 +64,11 @@
virtual bool isFastTrack() const { return (mFlags & AUDIO_INPUT_FLAG_FAST) != 0; }
+ void setSilenced(bool silenced) { if (!isPatchTrack()) mSilenced = silenced; }
+ bool isSilenced() const { return mSilenced; }
+
+ status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
+
private:
friend class AudioFlinger; // for mState
@@ -91,6 +97,8 @@
// used by the record thread to convert frames to proper destination format
RecordBufferConverter *mRecordBufferConverter;
audio_input_flags_t mFlags;
+
+ bool mSilenced;
};
// playback track, used by PatchPanel
diff --git a/services/audioflinger/ServiceUtilities.cpp b/services/audioflinger/ServiceUtilities.cpp
index c1044ef..aa267ea 100644
--- a/services/audioflinger/ServiceUtilities.cpp
+++ b/services/audioflinger/ServiceUtilities.cpp
@@ -30,6 +30,8 @@
namespace android {
+static const String16 sAndroidPermissionRecordAudio("android.permission.RECORD_AUDIO");
+
// Not valid until initialized by AudioFlinger constructor. It would have to be
// re-initialized if the process containing AudioFlinger service forks (which it doesn't).
// This is often used to validate binder interface calls within audioserver
@@ -48,26 +50,11 @@
}
}
-bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid) {
- // we're always OK.
- if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
-
- static const String16 sRecordAudio("android.permission.RECORD_AUDIO");
-
- // We specify a pid and uid here as mediaserver (aka MediaRecorder or StageFrightRecorder)
- // may open a record track on behalf of a client. Note that pid may be a tid.
- // IMPORTANT: Don't use PermissionCache - a runtime permission and may change.
- const bool ok = checkPermission(sRecordAudio, pid, uid);
- if (!ok) {
- ALOGE("Request requires android.permission.RECORD_AUDIO");
- return false;
+static String16 resolveCallingPackage(PermissionController& permissionController,
+ const String16& opPackageName, uid_t uid) {
+ if (opPackageName.size() > 0) {
+ return opPackageName;
}
-
- // To permit command-line native tests
- if (uid == AID_ROOT) return true;
-
- String16 checkedOpPackageName = opPackageName;
-
// In some cases the calling code has no access to the package it runs under.
// For example, code using the wilhelm framework's OpenSL-ES APIs. In this
// case we will get the packages for the calling UID and pick the first one
@@ -75,40 +62,89 @@
// as for legacy apps we will toggle the app op for all packages in the UID.
// The caveat is that the operation may be attributed to the wrong package and
// stats based on app ops may be slightly off.
- if (checkedOpPackageName.size() <= 0) {
- sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> binder = sm->getService(String16("permission"));
- if (binder == 0) {
- ALOGE("Cannot get permission service");
- return false;
- }
+ Vector<String16> packages;
+ permissionController.getPackagesForUid(uid, packages);
+ if (packages.isEmpty()) {
+ ALOGE("No packages for uid %d", uid);
+ return opPackageName; // empty string
+ }
+ return packages[0];
+}
- sp<IPermissionController> permCtrl = interface_cast<IPermissionController>(binder);
- Vector<String16> packages;
+static inline bool isAudioServerOrRoot(uid_t uid) {
+ // AID_ROOT is OK for command-line tests. Native unforked audioserver always OK.
+ return uid == AID_ROOT || uid == AID_AUDIOSERVER ;
+}
- permCtrl->getPackagesForUid(uid, packages);
+static bool checkRecordingInternal(const String16& opPackageName, pid_t pid,
+ uid_t uid, bool start) {
+ // Okay to not track in app ops as audio server is us and if
+ // device is rooted security model is considered compromised.
+ if (isAudioServerOrRoot(uid)) return true;
- if (packages.isEmpty()) {
- ALOGE("No packages for calling UID");
- return false;
- }
- checkedOpPackageName = packages[0];
+ // We specify a pid and uid here as mediaserver (aka MediaRecorder or StageFrightRecorder)
+ // may open a record track on behalf of a client. Note that pid may be a tid.
+ // IMPORTANT: DON'T USE PermissionCache - RUNTIME PERMISSIONS CHANGE.
+ PermissionController permissionController;
+ const bool ok = permissionController.checkPermission(sAndroidPermissionRecordAudio, pid, uid);
+ if (!ok) {
+ ALOGE("Request requires %s", String8(sAndroidPermissionRecordAudio).c_str());
+ return false;
+ }
+
+ String16 resolvedOpPackageName = resolveCallingPackage(
+ permissionController, opPackageName, uid);
+ if (resolvedOpPackageName.size() == 0) {
+ return false;
}
AppOpsManager appOps;
- if (appOps.noteOp(AppOpsManager::OP_RECORD_AUDIO, uid, checkedOpPackageName)
- != AppOpsManager::MODE_ALLOWED) {
- ALOGE("Request denied by app op OP_RECORD_AUDIO");
- return false;
+ const int32_t op = appOps.permissionToOpCode(sAndroidPermissionRecordAudio);
+ if (start) {
+ if (appOps.startOpNoThrow(op, uid, resolvedOpPackageName, /*startIfModeDefault*/ false)
+ != AppOpsManager::MODE_ALLOWED) {
+ ALOGE("Request denied by app op: %d", op);
+ return false;
+ }
+ } else {
+ if (appOps.noteOp(op, uid, resolvedOpPackageName) != AppOpsManager::MODE_ALLOWED) {
+ ALOGE("Request denied by app op: %d", op);
+ return false;
+ }
}
return true;
}
+bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid) {
+ return checkRecordingInternal(opPackageName, pid, uid, /*start*/ false);
+}
+
+bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid) {
+ return checkRecordingInternal(opPackageName, pid, uid, /*start*/ true);
+}
+
+void finishRecording(const String16& opPackageName, uid_t uid) {
+ // Okay to not track in app ops as audio server is us and if
+ // device is rooted security model is considered compromised.
+ if (isAudioServerOrRoot(uid)) return;
+
+ PermissionController permissionController;
+ String16 resolvedOpPackageName = resolveCallingPackage(
+ permissionController, opPackageName, uid);
+ if (resolvedOpPackageName.size() == 0) {
+ return;
+ }
+
+ AppOpsManager appOps;
+ const int32_t op = appOps.permissionToOpCode(sAndroidPermissionRecordAudio);
+ appOps.finishOp(op, uid, resolvedOpPackageName);
+}
+
bool captureAudioOutputAllowed(pid_t pid, uid_t uid) {
if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
static const String16 sCaptureAudioOutput("android.permission.CAPTURE_AUDIO_OUTPUT");
- bool ok = checkPermission(sCaptureAudioOutput, pid, uid);
+ bool ok = PermissionCache::checkPermission(sCaptureAudioOutput, pid, uid);
if (!ok) ALOGE("Request requires android.permission.CAPTURE_AUDIO_OUTPUT");
return ok;
}
@@ -153,4 +189,11 @@
return ok;
}
+bool modifyPhoneStateAllowed(pid_t pid, uid_t uid) {
+ static const String16 sModifyPhoneState("android.permission.MODIFY_PHONE_STATE");
+ bool ok = PermissionCache::checkPermission(sModifyPhoneState, pid, uid);
+ if (!ok) ALOGE("Request requires android.permission.MODIFY_PHONE_STATE");
+ return ok;
+}
+
} // namespace android
diff --git a/services/audioflinger/ServiceUtilities.h b/services/audioflinger/ServiceUtilities.h
index 04cb9cd..f45ada1 100644
--- a/services/audioflinger/ServiceUtilities.h
+++ b/services/audioflinger/ServiceUtilities.h
@@ -16,14 +16,19 @@
#include <unistd.h>
+#include <binder/PermissionController.h>
+
namespace android {
extern pid_t getpid_cached;
bool isTrustedCallingUid(uid_t uid);
bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid);
+bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid);
+void finishRecording(const String16& opPackageName, uid_t uid);
bool captureAudioOutputAllowed(pid_t pid, uid_t uid);
bool captureHotwordAllowed(pid_t pid, uid_t uid);
bool settingsAllowed();
bool modifyAudioRoutingAllowed();
bool dumpAllowed();
+bool modifyPhoneStateAllowed(pid_t pid, uid_t uid);
}
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index e78445e..dcad866 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -56,6 +56,9 @@
#include <powermanager/PowerManager.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <media/audiohal/StreamHalInterface.h>
+
#include "AudioFlinger.h"
#include "FastMixer.h"
#include "FastCapture.h"
@@ -1176,6 +1179,7 @@
switch (mType) {
case MIXER: {
+#ifndef MULTICHANNEL_EFFECT_CHAIN
// Reject any effect on mixer multichannel sinks.
// TODO: fix both format and multichannel issues with effects.
if (mChannelCount != FCC_2) {
@@ -1183,6 +1187,7 @@
" thread %s", desc->name, mChannelCount, mThreadName);
return BAD_VALUE;
}
+#endif
audio_output_flags_t flags = mOutput->flags;
if (hasFastMixer() || (flags & AUDIO_OUTPUT_FLAG_FAST)) {
if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
@@ -1229,6 +1234,7 @@
desc->name, mThreadName);
return BAD_VALUE;
case DUPLICATING:
+#ifndef MULTICHANNEL_EFFECT_CHAIN
// Reject any effect on mixer multichannel sinks.
// TODO: fix both format and multichannel issues with effects.
if (mChannelCount != FCC_2) {
@@ -1236,6 +1242,7 @@
" on DUPLICATING thread %s", desc->name, mChannelCount, mThreadName);
return BAD_VALUE;
}
+#endif
if ((sessionId == AUDIO_SESSION_OUTPUT_STAGE) || (sessionId == AUDIO_SESSION_OUTPUT_MIX)) {
ALOGW("checkEffectCompatibility_l(): global effect %s on DUPLICATING"
" thread %s", desc->name, mThreadName);
@@ -1412,7 +1419,7 @@
bool chainCreated = false;
ALOGD_IF((mType == OFFLOAD) && !effect->isOffloadable(),
- "addEffect_l() on offloaded thread %p: effect %s does not support offload flags %x",
+ "addEffect_l() on offloaded thread %p: effect %s does not support offload flags %#x",
this, effect->desc().name, effect->desc().flags);
if (chain == 0) {
@@ -1548,6 +1555,7 @@
mActiveTracksGeneration++;
mLatestActiveTrack = track;
++mBatteryCounter[track->uid()].second;
+ mHasChanged = true;
return mActiveTracks.add(track);
}
@@ -1562,6 +1570,7 @@
mActiveTracksGeneration++;
--mBatteryCounter[track->uid()].second;
// mLatestActiveTrack is not cleared even if is the same as track.
+ mHasChanged = true;
return index;
}
@@ -1572,6 +1581,7 @@
logTrack("clear", track);
}
mLastActiveTracksGeneration = mActiveTracksGeneration;
+ if (!mActiveTracks.empty()) { mHasChanged = true; }
mActiveTracks.clear();
mLatestActiveTrack.clear();
mBatteryCounter.clear();
@@ -1609,6 +1619,13 @@
}
template <typename T>
+bool AudioFlinger::ThreadBase::ActiveTracks<T>::readAndClearHasChanged() {
+ const bool hasChanged = mHasChanged;
+ mHasChanged = false;
+ return hasChanged;
+}
+
+template <typename T>
void AudioFlinger::ThreadBase::ActiveTracks<T>::logTrack(
const char *funcName, const sp<T> &track) const {
if (mLocalLog != nullptr) {
@@ -1655,6 +1672,7 @@
mSuspendedFrames(0),
mActiveTracks(&this->mLocalLog),
// mStreamTypes[] initialized in constructor body
+ mTracks(type == MIXER),
mOutput(output),
mLastWriteTime(-1), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
mMixerStatus(MIXER_IDLE),
@@ -1696,11 +1714,14 @@
readOutputParameters_l();
// ++ operator does not compile
- for (audio_stream_type_t stream = AUDIO_STREAM_MIN; stream < AUDIO_STREAM_CNT;
+ for (audio_stream_type_t stream = AUDIO_STREAM_MIN; stream < AUDIO_STREAM_FOR_POLICY_CNT;
stream = (audio_stream_type_t) (stream + 1)) {
- mStreamTypes[stream].volume = mAudioFlinger->streamVolume_l(stream);
+ mStreamTypes[stream].volume = 0.0f;
mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream);
}
+ // Audio patch volume is always max
+ mStreamTypes[AUDIO_STREAM_PATCH].volume = 1.0f;
+ mStreamTypes[AUDIO_STREAM_PATCH].mute = false;
}
AudioFlinger::PlaybackThread::~PlaybackThread()
@@ -1837,10 +1858,14 @@
sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
const sp<AudioFlinger::Client>& client,
audio_stream_type_t streamType,
- uint32_t sampleRate,
+ const audio_attributes_t& attr,
+ uint32_t *pSampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t *pFrameCount,
+ size_t *pNotificationFrameCount,
+ uint32_t notificationsPerBuffer,
+ float speed,
const sp<IMemory>& sharedBuffer,
audio_session_t sessionId,
audio_output_flags_t *flags,
@@ -1850,9 +1875,16 @@
audio_port_handle_t portId)
{
size_t frameCount = *pFrameCount;
+ size_t notificationFrameCount = *pNotificationFrameCount;
sp<Track> track;
status_t lStatus;
audio_output_flags_t outputFlags = mOutput->flags;
+ audio_output_flags_t requestedFlags = *flags;
+
+ if (*pSampleRate == 0) {
+ *pSampleRate = mSampleRate;
+ }
+ uint32_t sampleRate = *pSampleRate;
// special case for FAST flag considered OK if fast mixer is present
if (hasFastMixer()) {
@@ -1929,36 +1961,114 @@
*flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_FAST);
}
}
- // For normal PCM streaming tracks, update minimum frame count.
- // For compatibility with AudioTrack calculation, buffer depth is forced
- // to be at least 2 x the normal mixer frame count and cover audio hardware latency.
- // This is probably too conservative, but legacy application code may depend on it.
- // If you change this calculation, also review the start threshold which is related.
- if (!(*flags & AUDIO_OUTPUT_FLAG_FAST)
- && audio_has_proportional_frames(format) && sharedBuffer == 0) {
- // this must match AudioTrack.cpp calculateMinFrameCount().
- // TODO: Move to a common library
- uint32_t latencyMs = 0;
- lStatus = mOutput->stream->getLatency(&latencyMs);
- if (lStatus != OK) {
- ALOGE("Error when retrieving output stream latency: %d", lStatus);
+
+ if (!audio_has_proportional_frames(format)) {
+ if (sharedBuffer != 0) {
+ // Same comment as below about ignoring frameCount parameter for set()
+ frameCount = sharedBuffer->size();
+ } else if (frameCount == 0) {
+ frameCount = mNormalFrameCount;
+ }
+ if (notificationFrameCount != frameCount) {
+ notificationFrameCount = frameCount;
+ }
+ } else if (sharedBuffer != 0) {
+ // FIXME: Ensure client side memory buffers need
+ // not have additional alignment beyond sample
+ // (e.g. 16 bit stereo accessed as 32 bit frame).
+ size_t alignment = audio_bytes_per_sample(format);
+ if (alignment & 1) {
+ // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
+ alignment = 1;
+ }
+ uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
+ size_t frameSize = channelCount * audio_bytes_per_sample(format);
+ if (channelCount > 1) {
+ // More than 2 channels does not require stronger alignment than stereo
+ alignment <<= 1;
+ }
+ if (((uintptr_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
+ ALOGE("Invalid buffer alignment: address %p, channel count %u",
+ sharedBuffer->pointer(), channelCount);
+ lStatus = BAD_VALUE;
goto Exit;
}
- uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
- if (minBufCount < 2) {
- minBufCount = 2;
+
+ // When initializing a shared buffer AudioTrack via constructors,
+ // there's no frameCount parameter.
+ // But when initializing a shared buffer AudioTrack via set(),
+ // there _is_ a frameCount parameter. We silently ignore it.
+ frameCount = sharedBuffer->size() / frameSize;
+ } else {
+ size_t minFrameCount = 0;
+ // For fast tracks we try to respect the application's request for notifications per buffer.
+ if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
+ if (notificationsPerBuffer > 0) {
+ // Avoid possible arithmetic overflow during multiplication.
+ if (notificationsPerBuffer > SIZE_MAX / mFrameCount) {
+ ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
+ notificationsPerBuffer, mFrameCount);
+ } else {
+ minFrameCount = mFrameCount * notificationsPerBuffer;
+ }
+ }
+ } else {
+ // For normal PCM streaming tracks, update minimum frame count.
+ // Buffer depth is forced to be at least 2 x the normal mixer frame count and
+ // cover audio hardware latency.
+ // This is probably too conservative, but legacy application code may depend on it.
+ // If you change this calculation, also review the start threshold which is related.
+ uint32_t latencyMs = latency_l();
+ if (latencyMs == 0) {
+ ALOGE("Error when retrieving output stream latency");
+ lStatus = UNKNOWN_ERROR;
+ goto Exit;
+ }
+
+ minFrameCount = AudioSystem::calculateMinFrameCount(latencyMs, mNormalFrameCount,
+ mSampleRate, sampleRate, speed /*, 0 mNotificationsPerBufferReq*/);
+
}
- // For normal mixing tracks, if speed is > 1.0f (normal), AudioTrack
- // or the client should compute and pass in a larger buffer request.
- size_t minFrameCount =
- minBufCount * sourceFramesNeededWithTimestretch(
- sampleRate, mNormalFrameCount,
- mSampleRate, AUDIO_TIMESTRETCH_SPEED_NORMAL /*speed*/);
- if (frameCount < minFrameCount) { // including frameCount == 0
+ if (frameCount < minFrameCount) {
frameCount = minFrameCount;
}
}
+
+ // Make sure that application is notified with sufficient margin before underrun.
+ // The client can divide the AudioTrack buffer into sub-buffers,
+ // and expresses its desire to server as the notification frame count.
+ if (sharedBuffer == 0 && audio_is_linear_pcm(format)) {
+ size_t maxNotificationFrames;
+ if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
+ // notify every HAL buffer, regardless of the size of the track buffer
+ maxNotificationFrames = mFrameCount;
+ } else {
+ // For normal tracks, use at least double-buffering if no sample rate conversion,
+ // or at least triple-buffering if there is sample rate conversion
+ const int nBuffering = sampleRate == mSampleRate ? 2 : 3;
+ maxNotificationFrames = frameCount / nBuffering;
+ // If client requested a fast track but this was denied, then use the smaller maximum.
+ if (requestedFlags & AUDIO_OUTPUT_FLAG_FAST) {
+ size_t maxNotificationFramesFastDenied = FMS_20 * sampleRate / 1000;
+ if (maxNotificationFrames > maxNotificationFramesFastDenied) {
+ maxNotificationFrames = maxNotificationFramesFastDenied;
+ }
+ }
+ }
+ if (notificationFrameCount == 0 || notificationFrameCount > maxNotificationFrames) {
+ if (notificationFrameCount == 0) {
+ ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
+ maxNotificationFrames, frameCount);
+ } else {
+ ALOGW("Client adjusted notificationFrames from %zu to %zu for frameCount %zu",
+ notificationFrameCount, maxNotificationFrames, frameCount);
+ }
+ notificationFrameCount = maxNotificationFrames;
+ }
+ }
+
*pFrameCount = frameCount;
+ *pNotificationFrameCount = notificationFrameCount;
switch (mType) {
@@ -2027,7 +2137,7 @@
}
}
- track = new Track(this, client, streamType, sampleRate, format,
+ track = new Track(this, client, streamType, attr, sampleRate, format,
channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, sharedBuffer,
sessionId, uid, *flags, TrackBase::TYPE_DEFAULT, portId);
@@ -2063,6 +2173,53 @@
return track;
}
+template<typename T>
+ssize_t AudioFlinger::PlaybackThread::Tracks<T>::add(const sp<T> &track)
+{
+ const ssize_t index = mTracks.add(track);
+ if (index >= 0) {
+ // set name for track when adding.
+ int name;
+ if (mUnusedTrackNames.empty()) {
+ name = mTracks.size() - 1; // new name {0 ... size-1}.
+ } else {
+ // reuse smallest name for deleted track.
+ auto it = mUnusedTrackNames.begin();
+ name = *it;
+ (void)mUnusedTrackNames.erase(it);
+ }
+ track->setName(name);
+ } else {
+ LOG_ALWAYS_FATAL("cannot add track");
+ }
+ return index;
+}
+
+template<typename T>
+ssize_t AudioFlinger::PlaybackThread::Tracks<T>::remove(const sp<T> &track)
+{
+ const int name = track->name();
+ const ssize_t index = mTracks.remove(track);
+ if (index >= 0) {
+ // invalidate name when removing from mTracks.
+ LOG_ALWAYS_FATAL_IF(name < 0, "invalid name %d for track on mTracks", name);
+
+ if (mSaveDeletedTrackNames) {
+ // We can't directly access mAudioMixer since the caller may be outside of threadLoop.
+ // Instead, we add to mDeletedTrackNames which is solely used for mAudioMixer update,
+ // to be handled when MixerThread::prepareTracks_l() next changes mAudioMixer.
+ mDeletedTrackNames.emplace(name);
+ }
+
+ mUnusedTrackNames.emplace(name);
+ track->setName(T::TRACK_NAME_PENDING);
+ } else {
+ LOG_ALWAYS_FATAL_IF(name >= 0,
+ "valid name %d for track not in mTracks (returned %zd)", name, index);
+ }
+ return index;
+}
+
uint32_t AudioFlinger::PlaybackThread::correctLatency_l(uint32_t latency) const
{
return latency;
@@ -2219,9 +2376,6 @@
mLocalLog.log("removeTrack_l (%p) %s", track.get(), result.string());
mTracks.remove(track);
- deleteTrackName_l(track->name());
- // redundant as track is about to be destroyed, for dumpsys only
- track->mName = -1;
if (track->isFastTrack()) {
int index = track->mFastIndex;
ALOG_ASSERT(0 < index && index < (int)FastMixerState::sMaxFastTracks);
@@ -2449,7 +2603,7 @@
free(mEffectBuffer);
mEffectBuffer = NULL;
if (mEffectBufferEnabled) {
- mEffectBufferFormat = AUDIO_FORMAT_PCM_16_BIT; // Note: Effects support 16b only
+ mEffectBufferFormat = EFFECT_BUFFER_FORMAT;
mEffectBufferSize = mNormalFrameCount * mChannelCount
* audio_bytes_per_sample(mEffectBufferFormat);
(void)posix_memalign(&mEffectBuffer, 32, mEffectBufferSize);
@@ -2467,6 +2621,34 @@
}
}
+void AudioFlinger::PlaybackThread::updateMetadata_l()
+{
+ if (mOutput == nullptr || mOutput->stream == nullptr ) {
+ return; // That should not happen
+ }
+ bool hasChanged = mActiveTracks.readAndClearHasChanged();
+ for (const sp<Track> &track : mActiveTracks) {
+ // Do not short-circuit as all hasChanged states must be reset
+ // as all the metadata are going to be sent
+ hasChanged |= track->readAndClearHasChanged();
+ }
+ if (!hasChanged) {
+ return; // nothing to do
+ }
+ StreamOutHalInterface::SourceMetadata metadata;
+ auto backInserter = std::back_inserter(metadata.tracks);
+ for (const sp<Track> &track : mActiveTracks) {
+ // No track is invalid as this is called after prepareTrack_l in the same critical section
+ track->copyMetadataTo(backInserter);
+ }
+ sendMetadataToBackend_l(metadata);
+}
+
+void AudioFlinger::PlaybackThread::sendMetadataToBackend_l(
+ const StreamOutHalInterface::SourceMetadata& metadata)
+{
+ mOutput->stream->updateSourceMetadata(metadata);
+};
status_t AudioFlinger::PlaybackThread::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames)
{
@@ -2638,6 +2820,7 @@
// shared by MIXER and DIRECT, overridden by DUPLICATING
ssize_t AudioFlinger::PlaybackThread::threadLoop_write()
{
+ LOG_HIST_TS();
mInWrite = true;
ssize_t bytesWritten;
const size_t offset = mCurrentWriteLength - mBytesRemaining;
@@ -2789,25 +2972,28 @@
{
audio_session_t session = chain->sessionId();
sp<EffectBufferHalInterface> halInBuffer, halOutBuffer;
- status_t result = EffectBufferHalInterface::mirror(
+ status_t result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
mEffectBufferEnabled ? mEffectBuffer : mSinkBuffer,
mEffectBufferEnabled ? mEffectBufferSize : mSinkBufferSize,
&halInBuffer);
if (result != OK) return result;
halOutBuffer = halInBuffer;
- int16_t *buffer = reinterpret_cast<int16_t*>(halInBuffer->externalData());
-
+ effect_buffer_t *buffer = reinterpret_cast<effect_buffer_t*>(halInBuffer->externalData());
ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
if (session > AUDIO_SESSION_OUTPUT_MIX) {
// Only one effect chain can be present in direct output thread and it uses
// the sink buffer as input
if (mType != DIRECT) {
size_t numSamples = mNormalFrameCount * mChannelCount;
- status_t result = EffectBufferHalInterface::allocate(
- numSamples * sizeof(int16_t),
+ status_t result = mAudioFlinger->mEffectsFactoryHal->allocateBuffer(
+ numSamples * sizeof(effect_buffer_t),
&halInBuffer);
if (result != OK) return result;
+#ifdef FLOAT_EFFECT_CHAIN
+ buffer = halInBuffer->audioBuffer()->f32;
+#else
buffer = halInBuffer->audioBuffer()->s16;
+#endif
ALOGV("addEffectChain_l() creating new input buffer %p session %d",
buffer, session);
}
@@ -2882,7 +3068,7 @@
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> track = mTracks[i];
if (session == track->sessionId()) {
- track->setMainBuffer(reinterpret_cast<int16_t*>(mSinkBuffer));
+ track->setMainBuffer(reinterpret_cast<effect_buffer_t*>(mSinkBuffer));
chain->decTrackCnt();
}
}
@@ -3116,6 +3302,10 @@
threadLoop_standby();
+ // This is where we go into standby
+ if (!mStandby) {
+ LOG_AUDIO_STATE();
+ }
mStandby = true;
}
@@ -3156,6 +3346,8 @@
mActiveTracks.updatePowerState(this);
+ updateMetadata_l();
+
// prevent any changes in effect chain list and in each effect chain
// during mixing and effect process as the audio buffers could be deleted
// or modified if an effect is created or deleted
@@ -3331,7 +3523,8 @@
if (diff > 0) {
// notify of throttle end on debug log
// but prevent spamming for bluetooth
- ALOGD_IF(!audio_is_a2dp_out_device(outDevice()),
+ ALOGD_IF(!audio_is_a2dp_out_device(outDevice()) &&
+ !audio_is_hearing_aid_out_device(outDevice()),
"mixer(%p) throttle end: throttle time(%u)", this, diff);
mThreadThrottleEndMs = mThreadThrottleTimeMs;
}
@@ -3601,7 +3794,7 @@
// mNormalSink below
{
ALOGV("MixerThread() id=%d device=%#x type=%d", id, device, type);
- ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%u, mFormat=%d, mFrameSize=%zu, "
+ ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%u, mFormat=%#x, mFrameSize=%zu, "
"mFrameCount=%zu, mNormalFrameCount=%zu",
mSampleRate, mChannelMask, mChannelCount, mFormat, mFrameSize, mFrameCount,
mNormalFrameCount);
@@ -3669,7 +3862,7 @@
NBAIO_Format origformat = format;
#endif
// adjust format to match that of the Fast Mixer
- ALOGV("format changed from %d to %d", format.mFormat, fastMixerFormat);
+ ALOGV("format changed from %#x to %#x", format.mFormat, fastMixerFormat);
format.mFormat = fastMixerFormat;
format.mFrameSize = audio_bytes_per_sample(format.mFormat) * format.mChannelCount;
@@ -3976,16 +4169,31 @@
// buffer size, then write 0s to the output
if (mSleepTimeUs == 0) {
if (mMixerStatus == MIXER_TRACKS_ENABLED) {
- mSleepTimeUs = mActiveSleepTimeUs >> sleepTimeShift;
- if (mSleepTimeUs < kMinThreadSleepTimeUs) {
- mSleepTimeUs = kMinThreadSleepTimeUs;
- }
- // reduce sleep time in case of consecutive application underruns to avoid
- // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer
- // duration we would end up writing less data than needed by the audio HAL if
- // the condition persists.
- if (sleepTimeShift < kMaxThreadSleepTimeShift) {
- sleepTimeShift++;
+ if (mPipeSink.get() != nullptr && mPipeSink == mNormalSink) {
+ // Using the Monopipe availableToWrite, we estimate the
+ // sleep time to retry for more data (before we underrun).
+ MonoPipe *monoPipe = static_cast<MonoPipe *>(mPipeSink.get());
+ const ssize_t availableToWrite = mPipeSink->availableToWrite();
+ const size_t pipeFrames = monoPipe->maxFrames();
+ const size_t framesLeft = pipeFrames - max(availableToWrite, 0);
+ // HAL_framecount <= framesDelay ~ framesLeft / 2 <= Normal_Mixer_framecount
+ const size_t framesDelay = std::min(
+ mNormalFrameCount, max(framesLeft / 2, mFrameCount));
+ ALOGV("pipeFrames:%zu framesLeft:%zu framesDelay:%zu",
+ pipeFrames, framesLeft, framesDelay);
+ mSleepTimeUs = framesDelay * MICROS_PER_SECOND / mSampleRate;
+ } else {
+ mSleepTimeUs = mActiveSleepTimeUs >> sleepTimeShift;
+ if (mSleepTimeUs < kMinThreadSleepTimeUs) {
+ mSleepTimeUs = kMinThreadSleepTimeUs;
+ }
+ // reduce sleep time in case of consecutive application underruns to avoid
+ // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer
+ // duration we would end up writing less data than needed by the audio HAL if
+ // the condition persists.
+ if (sleepTimeShift < kMaxThreadSleepTimeShift) {
+ sleepTimeShift++;
+ }
}
} else {
mSleepTimeUs = mIdleSleepTimeUs;
@@ -4009,6 +4217,14 @@
AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTracks_l(
Vector< sp<Track> > *tracksToRemove)
{
+ // clean up deleted track names in AudioMixer before allocating new tracks
+ (void)mTracks.processDeletedTrackNames([this](int name) {
+ // for each name, destroy it in the AudioMixer
+ if (mAudioMixer->exists(name)) {
+ mAudioMixer->destroy(name);
+ }
+ });
+ mTracks.clearDeletedTrackNames();
mixer_state mixerStatus = MIXER_IDLE;
// find out which tracks need to be processed
@@ -4186,13 +4402,19 @@
didModify = true;
// no acknowledgement required for newly active tracks
}
+ sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
// cache the combined master volume and stream type volume for fast mixer; this
// lacks any synchronization or barrier so VolumeProvider may read a stale value
const float vh = track->getVolumeHandler()->getVolume(
- track->mAudioTrackServerProxy->framesReleased()).first;
- track->mCachedVolume = masterVolume
+ proxy->framesReleased()).first;
+ float volume = masterVolume
* mStreamTypes[track->streamType()].volume
* vh;
+ track->mCachedVolume = volume;
+ gain_minifloat_packed_t vlr = proxy->getVolumeLR();
+ float vlf = volume * float_from_gain(gain_minifloat_unpack_left(vlr));
+ float vrf = volume * float_from_gain(gain_minifloat_unpack_right(vlr));
+ track->setFinalVolume((vlf + vrf) / 2.f);
++fastTracks;
} else {
// was it previously active?
@@ -4205,10 +4427,16 @@
// because we're about to decrement the last sp<> on those tracks.
block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
} else {
- LOG_ALWAYS_FATAL("fast track %d should have been active; "
+ // ALOGW rather than LOG_ALWAYS_FATAL because it seems there are cases where an
+ // AudioTrack may start (which may not be with a start() but with a write()
+ // after underrun) and immediately paused or released. In that case the
+ // FastTrack state hasn't had time to update.
+ // TODO Remove the ALOGW when this theory is confirmed.
+ ALOGW("fast track %d should have been active; "
"mState=%d, mTrackMask=%#x, recentUnderruns=%u, isShared=%d",
j, track->mState, state->mTrackMask, recentUnderruns,
track->sharedBuffer() != 0);
+ // Since the FastMixer state already has the track inactive, do nothing here.
}
tracksToRemove->add(track);
// Avoids a misleading display in dumpsys
@@ -4224,6 +4452,24 @@
// The first time a track is added we wait
// for all its buffers to be filled before processing it
int name = track->name();
+
+ // if an active track doesn't exist in the AudioMixer, create it.
+ if (!mAudioMixer->exists(name)) {
+ status_t status = mAudioMixer->create(
+ name,
+ track->mChannelMask,
+ track->mFormat,
+ track->mSessionId);
+ if (status != OK) {
+ ALOGW("%s: cannot create track name"
+ " %d, mask %#x, format %#x, sessionId %d in AudioMixer",
+ __func__, name, track->mChannelMask, track->mFormat, track->mSessionId);
+ tracksToRemove->add(track);
+ track->invalidate(); // consider it dead.
+ continue;
+ }
+ }
+
// make sure that we have enough frames to mix one full buffer.
// enforce this condition only once to enable draining the buffer in case the client
// app does not call stop() and relies on underrun to stop:
@@ -4249,20 +4495,9 @@
size_t framesReady = track->framesReady();
if (ATRACE_ENABLED()) {
// I wish we had formatted trace names
- char traceName[16];
- strcpy(traceName, "nRdy");
- int name = track->name();
- if (AudioMixer::TRACK0 <= name &&
- name < (int) (AudioMixer::TRACK0 + AudioMixer::MAX_NUM_TRACKS)) {
- name -= AudioMixer::TRACK0;
- traceName[4] = (name / 10) + '0';
- traceName[5] = (name % 10) + '0';
- } else {
- traceName[4] = '?';
- traceName[5] = '?';
- }
- traceName[6] = '\0';
- ATRACE_INT(traceName, framesReady);
+ std::string traceName("nRdy");
+ traceName += std::to_string(track->name());
+ ATRACE_INT(traceName.c_str(), framesReady);
}
if ((framesReady >= minFrames) && track->isReady() &&
!track->isPaused() && !track->isTerminated())
@@ -4356,6 +4591,8 @@
vaf = v * sendLevel * (1. / MAX_GAIN_INT);
}
+ track->setFinalVolume((vrf + vlf) / 2.f);
+
// Delegate volume control to effect in track effect chain if needed
if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
// Do not ramp volume if volume is controlled by effect
@@ -4461,7 +4698,7 @@
mAudioMixer->setParameter(
name,
AudioMixer::TRACK,
- AudioMixer::MIXER_FORMAT, (void *)AUDIO_FORMAT_PCM_16_BIT);
+ AudioMixer::MIXER_FORMAT, (void *)EFFECT_BUFFER_FORMAT);
mAudioMixer->setParameter(
name,
AudioMixer::TRACK,
@@ -4585,6 +4822,18 @@
track->reset();
}
+ // Track destruction may occur outside of threadLoop once it is removed from active tracks.
+ // Ensure the AudioMixer doesn't have a raw "buffer provider" pointer to the track if
+ // it ceases to be active, to allow safe removal from the AudioMixer at the start
+ // of prepareTracks_l(); this releases any outstanding buffer back to the track.
+ // See also the implementation of destroyTrack_l().
+ for (const auto &track : *tracksToRemove) {
+ const int name = track->name();
+ if (mAudioMixer->exists(name)) { // Normal tracks here, fast tracks in FastMixer.
+ mAudioMixer->setBufferProvider(name, nullptr /* bufferProvider */);
+ }
+ }
+
// remove all the tracks that need to be...
removeTracks_l(*tracksToRemove);
@@ -4628,7 +4877,7 @@
}
// trackCountForUid_l() must be called with ThreadBase::mLock held
-uint32_t AudioFlinger::PlaybackThread::trackCountForUid_l(uid_t uid)
+uint32_t AudioFlinger::PlaybackThread::trackCountForUid_l(uid_t uid) const
{
uint32_t trackCount = 0;
for (size_t i = 0; i < mTracks.size() ; i++) {
@@ -4639,21 +4888,24 @@
return trackCount;
}
-// getTrackName_l() must be called with ThreadBase::mLock held
-int AudioFlinger::MixerThread::getTrackName_l(audio_channel_mask_t channelMask,
- audio_format_t format, audio_session_t sessionId, uid_t uid)
+// isTrackAllowed_l() must be called with ThreadBase::mLock held
+bool AudioFlinger::MixerThread::isTrackAllowed_l(
+ audio_channel_mask_t channelMask, audio_format_t format,
+ audio_session_t sessionId, uid_t uid) const
{
- if (trackCountForUid_l(uid) > (PlaybackThread::kMaxTracksPerUid - 1)) {
- return -1;
+ if (!PlaybackThread::isTrackAllowed_l(channelMask, format, sessionId, uid)) {
+ return false;
}
- return mAudioMixer->getTrackName(channelMask, format, sessionId);
-}
-
-// deleteTrackName_l() must be called with ThreadBase::mLock held
-void AudioFlinger::MixerThread::deleteTrackName_l(int name)
-{
- ALOGV("remove track (%d) and delete from mixer", name);
- mAudioMixer->deleteTrackName(name);
+ // Check validity as we don't call AudioMixer::create() here.
+ if (!AudioMixer::isValidFormat(format)) {
+ ALOGW("%s: invalid format: %#x", __func__, format);
+ return false;
+ }
+ if (!AudioMixer::isValidChannelMask(channelMask)) {
+ ALOGW("%s: invalid channelMask: %#x", __func__, channelMask);
+ return false;
+ }
+ return true;
}
// checkForNewParameter_l() must be called with ThreadBase::mLock held
@@ -4746,13 +4998,18 @@
readOutputParameters_l();
delete mAudioMixer;
mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
- for (size_t i = 0; i < mTracks.size() ; i++) {
- int name = getTrackName_l(mTracks[i]->mChannelMask,
- mTracks[i]->mFormat, mTracks[i]->mSessionId, mTracks[i]->uid());
- if (name < 0) {
- break;
- }
- mTracks[i]->mName = name;
+ for (const auto &track : mTracks) {
+ const int name = track->name();
+ status_t status = mAudioMixer->create(
+ name,
+ track->mChannelMask,
+ track->mFormat,
+ track->mSessionId);
+ ALOGW_IF(status != NO_ERROR,
+ "%s: cannot create track name"
+ " %d, mask %#x, format %#x, sessionId %d in AudioMixer",
+ __func__,
+ name, track->mChannelMask, track->mFormat, track->mSessionId);
}
sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
}
@@ -4766,7 +5023,7 @@
{
PlaybackThread::dumpInternals(fd, args);
dprintf(fd, " Thread throttle time (msecs): %u\n", mThreadThrottleTimeMs);
- dprintf(fd, " AudioMixer tracks: 0x%08x\n", mAudioMixer->trackNames());
+ dprintf(fd, " AudioMixer tracks: %s\n", mAudioMixer->trackNames().c_str());
dprintf(fd, " Master mono: %s\n", mMasterMono ? "on" : "off");
if (hasFastMixer()) {
@@ -4880,6 +5137,7 @@
}
if (lastTrack) {
+ track->setFinalVolume((left + right) / 2.f);
if (left != mLeftVolFloat || right != mRightVolFloat) {
mLeftVolFloat = left;
mRightVolFloat = right;
@@ -5201,21 +5459,6 @@
return !mStandby && !(trackPaused || (mHwPaused && !trackStopped));
}
-// getTrackName_l() must be called with ThreadBase::mLock held
-int AudioFlinger::DirectOutputThread::getTrackName_l(audio_channel_mask_t channelMask __unused,
- audio_format_t format __unused, audio_session_t sessionId __unused, uid_t uid)
-{
- if (trackCountForUid_l(uid) > (PlaybackThread::kMaxTracksPerUid - 1)) {
- return -1;
- }
- return 0;
-}
-
-// deleteTrackName_l() must be called with ThreadBase::mLock held
-void AudioFlinger::DirectOutputThread::deleteTrackName_l(int name __unused)
-{
-}
-
// checkForNewParameter_l() must be called with ThreadBase::mLock held
bool AudioFlinger::DirectOutputThread::checkForNewParameter_l(const String8& keyValuePair,
status_t& status)
@@ -5453,7 +5696,7 @@
mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true),
mOffloadUnderrunPosition(~0LL)
{
- //FIXME: mStandby should be set to true by ThreadBase constructor
+ //FIXME: mStandby should be set to true by ThreadBase constructo
mStandby = true;
mKeepWakeLock = property_get_bool("ro.audio.offload_wakelock", true /* default_value */);
}
@@ -5833,6 +6076,31 @@
}
}
+void AudioFlinger::DuplicatingThread::dumpInternals(int fd, const Vector<String16>& args __unused)
+{
+ MixerThread::dumpInternals(fd, args);
+
+ std::stringstream ss;
+ const size_t numTracks = mOutputTracks.size();
+ ss << " " << numTracks << " OutputTracks";
+ if (numTracks > 0) {
+ ss << ":";
+ for (const auto &track : mOutputTracks) {
+ const sp<ThreadBase> thread = track->thread().promote();
+ ss << " (" << track->name() << " : ";
+ if (thread.get() != nullptr) {
+ ss << thread.get() << ", " << thread->id();
+ } else {
+ ss << "null";
+ }
+ ss << ")";
+ }
+ }
+ ss << "\n";
+ std::string result = ss.str();
+ write(fd, result.c_str(), result.size());
+}
+
void AudioFlinger::DuplicatingThread::saveOutputTracks()
{
outputTracks = mOutputTracks;
@@ -5927,6 +6195,14 @@
return true;
}
+void AudioFlinger::DuplicatingThread::sendMetadataToBackend_l(
+ const StreamOutHalInterface::SourceMetadata& metadata)
+{
+ for (auto& outputTrack : outputTracks) { // not mOutputTracks
+ outputTrack->setMetadatas(metadata.tracks);
+ }
+}
+
uint32_t AudioFlinger::DuplicatingThread::activeSleepTimeUs() const
{
return (mWaitTimeMs * 1000) / 2;
@@ -6254,6 +6530,8 @@
mActiveTracks.updatePowerState(this);
+ updateMetadata_l();
+
if (allStopped) {
standbyIfNotAlreadyInStandby();
}
@@ -6340,13 +6618,27 @@
if (mPipeSource != 0) {
size_t framesToRead = mBufferSize / mFrameSize;
framesToRead = min(mRsmpInFramesOA - rear, mRsmpInFramesP2 / 2);
- framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
- framesToRead);
- // since pipe is non-blocking, simulate blocking input by waiting for 1/2 of
- // buffer size or at least for 20ms.
- size_t sleepFrames = max(
- min(mPipeFramesP2, mRsmpInFramesP2) / 2, FMS_20 * mSampleRate / 1000);
- if (framesRead <= (ssize_t) sleepFrames) {
+
+ // The audio fifo read() returns OVERRUN on overflow, and advances the read pointer
+ // to the full buffer point (clearing the overflow condition). Upon OVERRUN error,
+ // we immediately retry the read() to get data and prevent another overflow.
+ for (int retries = 0; retries <= 2; ++retries) {
+ ALOGW_IF(retries > 0, "overrun on read from pipe, retry #%d", retries);
+ framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
+ framesToRead);
+ if (framesRead != OVERRUN) break;
+ }
+
+ const ssize_t availableToRead = mPipeSource->availableToRead();
+ if (availableToRead >= 0) {
+ // PipeSource is the master clock. It is up to the AudioRecord client to keep up.
+ LOG_ALWAYS_FATAL_IF((size_t)availableToRead > mPipeFramesP2,
+ "more frames to read than fifo size, %zd > %zu",
+ availableToRead, mPipeFramesP2);
+ const size_t pipeFramesFree = mPipeFramesP2 - availableToRead;
+ const size_t sleepFrames = min(pipeFramesFree, mRsmpInFramesP2) / 2;
+ ALOGVV("mPipeFramesP2:%zu mRsmpInFramesP2:%zu sleepFrames:%zu availableToRead:%zd",
+ mPipeFramesP2, mRsmpInFramesP2, sleepFrames, availableToRead);
sleepUs = (sleepFrames * 1000000LL) / mSampleRate;
}
if (framesRead < 0) {
@@ -6426,6 +6718,7 @@
rear = mRsmpInRear += framesRead;
size = activeTracks.size();
+
// loop over each active track
for (size_t i = 0; i < size; i++) {
activeTrack = activeTracks[i];
@@ -6482,6 +6775,11 @@
if (activeTrack->mFramesToDrop == 0) {
if (framesOut > 0) {
activeTrack->mSink.frameCount = framesOut;
+ // Sanitize before releasing if the track has no access to the source data
+ // An idle UID receives silence from non virtual devices until active
+ if (activeTrack->isSilenced()) {
+ memset(activeTrack->mSink.raw, 0, framesOut * mFrameSize);
+ }
activeTrack->releaseBuffer(&activeTrack->mSink);
}
} else {
@@ -6612,12 +6910,13 @@
// RecordThread::createRecordTrack_l() must be called with AudioFlinger::mLock held
sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l(
const sp<AudioFlinger::Client>& client,
- uint32_t sampleRate,
+ const audio_attributes_t& attr,
+ uint32_t *pSampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t *pFrameCount,
audio_session_t sessionId,
- size_t *notificationFrames,
+ size_t *pNotificationFrameCount,
uid_t uid,
audio_input_flags_t *flags,
pid_t tid,
@@ -6625,16 +6924,30 @@
audio_port_handle_t portId)
{
size_t frameCount = *pFrameCount;
+ size_t notificationFrameCount = *pNotificationFrameCount;
sp<RecordTrack> track;
status_t lStatus;
audio_input_flags_t inputFlags = mInput->flags;
+ audio_input_flags_t requestedFlags = *flags;
+ uint32_t sampleRate;
+
+ lStatus = initCheck();
+ if (lStatus != NO_ERROR) {
+ ALOGE("createRecordTrack_l() audio driver not initialized");
+ goto Exit;
+ }
+
+ if (*pSampleRate == 0) {
+ *pSampleRate = mSampleRate;
+ }
+ sampleRate = *pSampleRate;
// special case for FAST flag considered OK if fast capture is present
if (hasFastCapture()) {
inputFlags = (audio_input_flags_t)(inputFlags | AUDIO_INPUT_FLAG_FAST);
}
- // Check if requested flags are compatible with output stream flags
+ // Check if requested flags are compatible with input stream flags
if ((*flags & inputFlags) != *flags) {
ALOGW("createRecordTrack_l(): mismatch between requested flags (%08x) and"
" input flags (%08x)",
@@ -6689,12 +7002,20 @@
}
}
+ // If FAST or RAW flags were corrected, ask caller to request new input from audio policy
+ if ((*flags & AUDIO_INPUT_FLAG_FAST) !=
+ (requestedFlags & AUDIO_INPUT_FLAG_FAST)) {
+ *flags = (audio_input_flags_t) (*flags & ~(AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW));
+ lStatus = BAD_TYPE;
+ goto Exit;
+ }
+
// compute track buffer size in frames, and suggest the notification frame count
if (*flags & AUDIO_INPUT_FLAG_FAST) {
// fast track: frame count is exactly the pipe depth
frameCount = mPipeFramesP2;
// ignore requested notificationFrames, and always notify exactly once every HAL buffer
- *notificationFrames = mFrameCount;
+ notificationFrameCount = mFrameCount;
} else {
// not fast track: max notification period is resampled equivalent of one HAL buffer time
// or 20 ms if there is a fast capture
@@ -6713,22 +7034,17 @@
const size_t minFrameCount = maxNotificationFrames *
max(kMinNotifications, minNotificationsByMs);
frameCount = max(frameCount, minFrameCount);
- if (*notificationFrames == 0 || *notificationFrames > maxNotificationFrames) {
- *notificationFrames = maxNotificationFrames;
+ if (notificationFrameCount == 0 || notificationFrameCount > maxNotificationFrames) {
+ notificationFrameCount = maxNotificationFrames;
}
}
*pFrameCount = frameCount;
-
- lStatus = initCheck();
- if (lStatus != NO_ERROR) {
- ALOGE("createRecordTrack_l() audio driver not initialized");
- goto Exit;
- }
+ *pNotificationFrameCount = notificationFrameCount;
{ // scope for mLock
Mutex::Autolock _l(mLock);
- track = new RecordTrack(this, client, sampleRate,
+ track = new RecordTrack(this, client, attr, sampleRate,
format, channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, sessionId, uid,
*flags, TrackBase::TYPE_DEFAULT, portId);
@@ -6804,7 +7120,8 @@
status_t status = NO_ERROR;
if (recordTrack->isExternalTrack()) {
mLock.unlock();
- status = AudioSystem::startInput(mId, recordTrack->sessionId());
+ bool silenced;
+ status = AudioSystem::startInput(recordTrack->portId(), &silenced);
mLock.lock();
// FIXME should verify that recordTrack is still in mActiveTracks
if (status != NO_ERROR) {
@@ -6813,6 +7130,7 @@
ALOGV("RecordThread::start error %d", status);
return status;
}
+ recordTrack->setSilenced(silenced);
}
// Catch up with current buffer indices if thread is already running.
// This is what makes a new client discard all buffered data. If the track's mRsmpInFront
@@ -6835,7 +7153,7 @@
startError:
if (recordTrack->isExternalTrack()) {
- AudioSystem::stopInput(mId, recordTrack->sessionId());
+ AudioSystem::stopInput(recordTrack->portId());
}
recordTrack->clearSyncStartEvent();
// FIXME I wonder why we do not reset the state here?
@@ -6909,6 +7227,32 @@
#endif
}
+status_t AudioFlinger::RecordThread::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo>* activeMicrophones)
+{
+ ALOGV("RecordThread::getActiveMicrophones");
+ AutoMutex _l(mLock);
+ status_t status = mInput->stream->getActiveMicrophones(activeMicrophones);
+ return status;
+}
+
+void AudioFlinger::RecordThread::updateMetadata_l()
+{
+ if (mInput == nullptr || mInput->stream == nullptr ||
+ !mActiveTracks.readAndClearHasChanged()) {
+ return;
+ }
+ StreamInHalInterface::SinkMetadata metadata;
+ for (const sp<RecordTrack> &track : mActiveTracks) {
+ // No track is invalid as this is called after prepareTrack_l in the same critical section
+ metadata.tracks.push_back({
+ .source = track->attributes().source,
+ .gain = 1, // capture tracks do not have volumes
+ });
+ }
+ mInput->stream->updateSinkMetadata(metadata);
+}
+
// destroyTrack_l() must be called with ThreadBase::mLock held
void AudioFlinger::RecordThread::destroyTrack_l(const sp<RecordTrack>& track)
{
@@ -7016,6 +7360,16 @@
write(fd, result.string(), result.size());
}
+void AudioFlinger::RecordThread::setRecordSilenced(uid_t uid, bool silenced)
+{
+ Mutex::Autolock _l(mLock);
+ for (size_t i = 0; i < mTracks.size() ; i++) {
+ sp<RecordTrack> track = mTracks[i];
+ if (track != 0 && track->uid() == uid) {
+ track->setSilenced(silenced);
+ }
+ }
+}
void AudioFlinger::RecordThread::ResamplerBufferProvider::reset()
{
@@ -7573,7 +7927,9 @@
mSessionId(AUDIO_SESSION_NONE),
mDeviceId(AUDIO_PORT_HANDLE_NONE), mPortId(AUDIO_PORT_HANDLE_NONE),
mHalStream(stream), mHalDevice(hwDev->hwDevice()), mAudioHwDev(hwDev),
- mActiveTracks(&this->mLocalLog)
+ mActiveTracks(&this->mLocalLog),
+ mHalVolFloat(-1.0f), // Initialize to illegal value so it always gets set properly later.
+ mNoCallbackWarningCount(0)
{
mStandby = true;
readHalParameters_l();
@@ -7591,14 +7947,21 @@
void AudioFlinger::MmapThread::disconnect()
{
- for (const sp<MmapTrack> &t : mActiveTracks) {
+ ActiveTracks<MmapTrack> activeTracks;
+ {
+ Mutex::Autolock _l(mLock);
+ for (const sp<MmapTrack> &t : mActiveTracks) {
+ activeTracks.add(t);
+ }
+ }
+ for (const sp<MmapTrack> &t : activeTracks) {
stop(t->portId());
}
// This will decrement references and may cause the destruction of this thread.
if (isOutput()) {
AudioSystem::releaseOutput(mId, streamType(), mSessionId);
} else {
- AudioSystem::releaseInput(mId, mSessionId);
+ AudioSystem::releaseInput(mPortId);
}
}
@@ -7636,6 +7999,17 @@
return mHalStream->getMmapPosition(position);
}
+status_t AudioFlinger::MmapThread::exitStandby()
+{
+ status_t ret = mHalStream->start();
+ if (ret != NO_ERROR) {
+ ALOGE("%s: error mHalStream->start() = %d for first track", __FUNCTION__, ret);
+ return ret;
+ }
+ mStandby = false;
+ return NO_ERROR;
+}
+
status_t AudioFlinger::MmapThread::start(const AudioClient& client,
audio_port_handle_t *handle)
{
@@ -7649,17 +8023,7 @@
if (*handle == mPortId) {
// for the first track, reuse portId and session allocated when the stream was opened
- ret = mHalStream->start();
- if (ret != NO_ERROR) {
- ALOGE("%s: error mHalStream->start() = %d for first track", __FUNCTION__, ret);
- return ret;
- }
- mStandby = false;
- return NO_ERROR;
- }
-
- if (!isOutput() && !recordingAllowed(client.packageName, client.clientPid, client.clientUid)) {
- return PERMISSION_DENIED;
+ return exitStandby();
}
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
@@ -7677,6 +8041,7 @@
ret = AudioSystem::getOutputForAttr(&mAttr, &io,
mSessionId,
&stream,
+ client.clientPid,
client.clientUid,
&config,
flags,
@@ -7692,6 +8057,7 @@
mSessionId,
client.clientPid,
client.clientUid,
+ client.packageName,
&config,
AUDIO_INPUT_FLAG_MMAP_NOIRQ,
&deviceId,
@@ -7705,30 +8071,46 @@
return BAD_VALUE;
}
+ bool silenced = false;
if (isOutput()) {
ret = AudioSystem::startOutput(mId, streamType(), mSessionId);
} else {
- ret = AudioSystem::startInput(mId, mSessionId);
+ ret = AudioSystem::startInput(portId, &silenced);
}
+ Mutex::Autolock _l(mLock);
// abort if start is rejected by audio policy manager
if (ret != NO_ERROR) {
ALOGE("%s: error start rejected by AudioPolicyManager = %d", __FUNCTION__, ret);
if (mActiveTracks.size() != 0) {
+ mLock.unlock();
if (isOutput()) {
AudioSystem::releaseOutput(mId, streamType(), mSessionId);
} else {
- AudioSystem::releaseInput(mId, mSessionId);
+ AudioSystem::releaseInput(portId);
}
+ mLock.lock();
} else {
mHalStream->stop();
}
return PERMISSION_DENIED;
}
- sp<MmapTrack> track = new MmapTrack(this, mSampleRate, mFormat, mChannelMask, mSessionId,
+ if (isOutput()) {
+ // force volume update when a new track is added
+ mHalVolFloat = -1.0f;
+ } else if (!silenced) {
+ for (const sp<MmapTrack> &track : mActiveTracks) {
+ if (track->isSilenced_l() && track->uid() != client.clientUid)
+ track->invalidate();
+ }
+ }
+
+ // Given that MmapThread::mAttr is mutable, should a MmapTrack have attributes ?
+ sp<MmapTrack> track = new MmapTrack(this, mAttr, mSampleRate, mFormat, mChannelMask, mSessionId,
client.clientUid, client.clientPid, portId);
+ track->setSilenced_l(silenced);
mActiveTracks.add(track);
sp<EffectChain> chain = getEffectChain_l(mSessionId);
if (chain != 0) {
@@ -7758,6 +8140,8 @@
return NO_ERROR;
}
+ Mutex::Autolock _l(mLock);
+
sp<MmapTrack> track;
for (const sp<MmapTrack> &t : mActiveTracks) {
if (handle == t->portId()) {
@@ -7771,13 +8155,15 @@
mActiveTracks.remove(track);
+ mLock.unlock();
if (isOutput()) {
AudioSystem::stopOutput(mId, streamType(), track->sessionId());
AudioSystem::releaseOutput(mId, streamType(), track->sessionId());
} else {
- AudioSystem::stopInput(mId, track->sessionId());
- AudioSystem::releaseInput(mId, track->sessionId());
+ AudioSystem::stopInput(track->portId());
+ AudioSystem::releaseInput(track->portId());
}
+ mLock.lock();
sp<EffectChain> chain = getEffectChain_l(track->sessionId());
if (chain != 0) {
@@ -7862,6 +8248,8 @@
mActiveTracks.updatePowerState(this);
+ updateMetadata_l();
+
lockEffectChains_l(effectChains);
for (size_t i = 0; i < effectChains.size(); i ++) {
effectChains[i]->process_l();
@@ -8027,7 +8415,9 @@
sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
sp<MmapStreamCallback> callback = mCallback.promote();
if (mDeviceId != deviceId && callback != 0) {
+ mLock.unlock();
callback->onRoutingChanged(deviceId);
+ mLock.lock();
}
mDeviceId = deviceId;
}
@@ -8036,7 +8426,9 @@
sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
sp<MmapStreamCallback> callback = mCallback.promote();
if (mDeviceId != deviceId && callback != 0) {
+ mLock.unlock();
callback->onRoutingChanged(deviceId);
+ mLock.lock();
}
mDeviceId = deviceId;
}
@@ -8202,9 +8594,13 @@
if (track->isInvalid()) {
sp<MmapStreamCallback> callback = mCallback.promote();
if (callback != 0) {
- callback->onTearDown();
+ mLock.unlock();
+ callback->onTearDown(track->portId());
+ mLock.lock();
+ } else if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
+ ALOGW("Could not notify MMAP stream tear down: no onTearDown callback!");
+ mNoCallbackWarningCount++;
}
- break;
}
}
}
@@ -8256,7 +8652,9 @@
audio_devices_t outDevice, audio_devices_t inDevice, bool systemReady)
: MmapThread(audioFlinger, id, hwDev, output->stream, outDevice, inDevice, systemReady),
mStreamType(AUDIO_STREAM_MUSIC),
- mStreamVolume(1.0), mStreamMute(false), mOutput(output)
+ mStreamVolume(1.0),
+ mStreamMute(false),
+ mOutput(output)
{
snprintf(mThreadName, kThreadNameLength, "AudioMmapOut_%X", id);
mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
@@ -8364,7 +8762,6 @@
}
if (volume != mHalVolFloat) {
- mHalVolFloat = volume;
// Convert volumes from float to 8.24
uint32_t vol = (uint32_t)(volume * (1 << 24));
@@ -8377,7 +8774,10 @@
volume = (float)vol / (1 << 24);
}
// Try to use HW volume control and fall back to SW control if not implemented
- if (mOutput->stream->setVolume(volume, volume) != NO_ERROR) {
+ if (mOutput->stream->setVolume(volume, volume) == NO_ERROR) {
+ mHalVolFloat = volume; // HW volume control worked, so update value.
+ mNoCallbackWarningCount = 0;
+ } else {
sp<MmapStreamCallback> callback = mCallback.promote();
if (callback != 0) {
int channelCount;
@@ -8390,14 +8790,39 @@
for (int i = 0; i < channelCount; i++) {
values.add(volume);
}
+ mHalVolFloat = volume; // SW volume control worked, so update value.
+ mNoCallbackWarningCount = 0;
+ mLock.unlock();
callback->onVolumeChanged(mChannelMask, values);
+ mLock.lock();
} else {
- ALOGW("Could not set MMAP stream volume: no volume callback!");
+ if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
+ ALOGW("Could not set MMAP stream volume: no volume callback!");
+ mNoCallbackWarningCount++;
+ }
}
}
}
}
+void AudioFlinger::MmapPlaybackThread::updateMetadata_l()
+{
+ if (mOutput == nullptr || mOutput->stream == nullptr ||
+ !mActiveTracks.readAndClearHasChanged()) {
+ return;
+ }
+ StreamOutHalInterface::SourceMetadata metadata;
+ for (const sp<MmapTrack> &track : mActiveTracks) {
+ // No track is invalid as this is called after prepareTrack_l in the same critical section
+ metadata.tracks.push_back({
+ .usage = track->attributes().usage,
+ .content_type = track->attributes().content_type,
+ .gain = mHalVolFloat, // TODO: propagate from aaudio pre-mix volume
+ });
+ }
+ mOutput->stream->updateSourceMetadata(metadata);
+}
+
void AudioFlinger::MmapPlaybackThread::checkSilentMode_l()
{
if (!mMasterMute) {
@@ -8435,6 +8860,12 @@
mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
}
+status_t AudioFlinger::MmapCaptureThread::exitStandby()
+{
+ mInput->stream->setGain(1.0f);
+ return MmapThread::exitStandby();
+}
+
AudioFlinger::AudioStreamIn* AudioFlinger::MmapCaptureThread::clearInput()
{
Mutex::Autolock _l(mLock);
@@ -8442,4 +8873,61 @@
mInput = NULL;
return input;
}
+
+
+void AudioFlinger::MmapCaptureThread::processVolume_l()
+{
+ bool changed = false;
+ bool silenced = false;
+
+ sp<MmapStreamCallback> callback = mCallback.promote();
+ if (callback == 0) {
+ if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
+ ALOGW("Could not set MMAP stream silenced: no onStreamSilenced callback!");
+ mNoCallbackWarningCount++;
+ }
+ }
+
+ // After a change occurred in track silenced state, mute capture in audio DSP if at least one
+ // track is silenced and unmute otherwise
+ for (size_t i = 0; i < mActiveTracks.size() && !silenced; i++) {
+ if (!mActiveTracks[i]->getAndSetSilencedNotified_l()) {
+ changed = true;
+ silenced = mActiveTracks[i]->isSilenced_l();
+ }
+ }
+
+ if (changed) {
+ mInput->stream->setGain(silenced ? 0.0f: 1.0f);
+ }
+}
+
+void AudioFlinger::MmapCaptureThread::updateMetadata_l()
+{
+ if (mInput == nullptr || mInput->stream == nullptr ||
+ !mActiveTracks.readAndClearHasChanged()) {
+ return;
+ }
+ StreamInHalInterface::SinkMetadata metadata;
+ for (const sp<MmapTrack> &track : mActiveTracks) {
+ // No track is invalid as this is called after prepareTrack_l in the same critical section
+ metadata.tracks.push_back({
+ .source = track->attributes().source,
+ .gain = 1, // capture tracks do not have volumes
+ });
+ }
+ mInput->stream->updateSinkMetadata(metadata);
+}
+
+void AudioFlinger::MmapCaptureThread::setRecordSilenced(uid_t uid, bool silenced)
+{
+ Mutex::Autolock _l(mLock);
+ for (size_t i = 0; i < mActiveTracks.size() ; i++) {
+ if (mActiveTracks[i]->uid() == uid) {
+ mActiveTracks[i]->setSilenced_l(silenced);
+ broadcast_l();
+ }
+ }
+}
+
} // namespace android
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index dd2b89b..28d4482 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -425,6 +425,9 @@
// check if some effects must be suspended when an effect chain is added
void checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain);
+ // sends the metadata of the active tracks to the HAL
+ virtual void updateMetadata_l() = 0;
+
String16 getWakeLockTag();
virtual void preExit() { }
@@ -485,6 +488,7 @@
// Updated by updateSuspendedSessions_l() only.
KeyedVector< audio_session_t, KeyedVector< int, sp<SuspendedSessionDesc> > >
mSuspendedSessions;
+ // TODO: add comment and adjust size as needed
static const size_t kLogSize = 4 * 1024;
sp<NBLog::Writer> mNBLogWriter;
bool mSystemReady;
@@ -562,6 +566,10 @@
// periodically called in the threadLoop() to update power state uids.
void updatePowerState(sp<ThreadBase> thread, bool force = false);
+ /** @return true if one or move active tracks was added or removed since the
+ * last time this function was called or the vector was created. */
+ bool readAndClearHasChanged();
+
private:
void logTrack(const char *funcName, const sp<T> &track) const;
@@ -580,6 +588,8 @@
int mLastActiveTracksGeneration;
wp<T> mLatestActiveTrack; // latest track added to ActiveTracks
SimpleLog * const mLocalLog;
+ // If the vector has changed since last call to readAndClearHasChanged
+ bool mHasChanged = false;
};
SimpleLog mLocalLog;
@@ -622,8 +632,8 @@
static const int8_t kMaxTrackRetriesOffload = 20;
static const int8_t kMaxTrackStartupRetriesOffload = 100;
static const int8_t kMaxTrackStopRetriesOffload = 2;
- // 14 tracks max per client allows for 2 misbehaving application leaving 4 available tracks.
- static const uint32_t kMaxTracksPerUid = 14;
+ static constexpr uint32_t kMaxTracksPerUid = 40;
+ static constexpr size_t kMaxTracks = 256;
// Maximum delay (in nanoseconds) for upcoming buffers in suspend mode, otherwise
// if delay is greater, the estimated time for timeLoopNextNs is reset.
@@ -705,10 +715,14 @@
sp<Track> createTrack_l(
const sp<AudioFlinger::Client>& client,
audio_stream_type_t streamType,
- uint32_t sampleRate,
+ const audio_attributes_t& attr,
+ uint32_t *sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t *pFrameCount,
+ size_t *pNotificationFrameCount,
+ uint32_t notificationsPerBuffer,
+ float speed,
const sp<IMemory>& sharedBuffer,
audio_session_t sessionId,
audio_output_flags_t *flags,
@@ -737,11 +751,10 @@
virtual String8 getParameters(const String8& keys);
virtual void ioConfigChanged(audio_io_config_event event, pid_t pid = 0);
status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames);
- // FIXME rename mixBuffer() to sinkBuffer() and remove int16_t* dependency.
// Consider also removing and passing an explicit mMainBuffer initialization
// parameter to AF::PlaybackThread::Track::Track().
- int16_t *mixBuffer() const {
- return reinterpret_cast<int16_t *>(mSinkBuffer); };
+ effect_buffer_t *sinkBuffer() const {
+ return reinterpret_cast<effect_buffer_t *>(mSinkBuffer); };
virtual void detachAuxEffect_l(int effectId);
status_t attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track>& track,
@@ -776,6 +789,16 @@
virtual bool isOutput() const override { return true; }
+ // returns true if the track is allowed to be added to the thread.
+ virtual bool isTrackAllowed_l(
+ audio_channel_mask_t channelMask __unused,
+ audio_format_t format __unused,
+ audio_session_t sessionId __unused,
+ uid_t uid) const {
+ return trackCountForUid_l(uid) < PlaybackThread::kMaxTracksPerUid
+ && mTracks.size() < PlaybackThread::kMaxTracks;
+ }
+
protected:
// updated by readOutputParameters_l()
size_t mNormalFrameCount; // normal mixer and effects
@@ -864,12 +887,6 @@
protected:
ActiveTracks<Track> mActiveTracks;
- // Allocate a track name for a given channel mask.
- // Returns name >= 0 if successful, -1 on failure.
- virtual int getTrackName_l(audio_channel_mask_t channelMask, audio_format_t format,
- audio_session_t sessionId, uid_t uid) = 0;
- virtual void deleteTrackName_l(int name) = 0;
-
// Time to sleep between cycles when:
virtual uint32_t activeSleepTimeUs() const; // mixer state MIXER_TRACKS_ENABLED
virtual uint32_t idleSleepTimeUs() const = 0; // mixer state MIXER_IDLE
@@ -897,7 +914,7 @@
&& mHwSupportsPause
&& (mOutput->flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC); }
- uint32_t trackCountForUid_l(uid_t uid);
+ uint32_t trackCountForUid_l(uid_t uid) const;
private:
@@ -910,11 +927,70 @@
void removeTrack_l(const sp<Track>& track);
void readOutputParameters_l();
+ void updateMetadata_l() final;
+ virtual void sendMetadataToBackend_l(const StreamOutHalInterface::SourceMetadata& metadata);
virtual void dumpInternals(int fd, const Vector<String16>& args);
void dumpTracks(int fd, const Vector<String16>& args);
- SortedVector< sp<Track> > mTracks;
+ // The Tracks class manages names for all tracks
+ // added and removed from the Thread.
+ template <typename T>
+ class Tracks {
+ public:
+ Tracks(bool saveDeletedTrackNames) :
+ mSaveDeletedTrackNames(saveDeletedTrackNames) { }
+
+ // SortedVector methods
+ ssize_t add(const sp<T> &track);
+ ssize_t remove(const sp<T> &track);
+ size_t size() const {
+ return mTracks.size();
+ }
+ bool isEmpty() const {
+ return mTracks.isEmpty();
+ }
+ ssize_t indexOf(const sp<T> &item) {
+ return mTracks.indexOf(item);
+ }
+ sp<T> operator[](size_t index) const {
+ return mTracks[index];
+ }
+ typename SortedVector<sp<T>>::iterator begin() {
+ return mTracks.begin();
+ }
+ typename SortedVector<sp<T>>::iterator end() {
+ return mTracks.end();
+ }
+
+ size_t processDeletedTrackNames(std::function<void(int)> f) {
+ const size_t size = mDeletedTrackNames.size();
+ if (size > 0) {
+ for (const int name : mDeletedTrackNames) {
+ f(name);
+ }
+ }
+ return size;
+ }
+
+ void clearDeletedTrackNames() { mDeletedTrackNames.clear(); }
+
+ private:
+ // Track names pending deletion for MIXER type threads
+ const bool mSaveDeletedTrackNames; // true to enable tracking
+ std::set<int> mDeletedTrackNames;
+
+ // Fast lookup of previously deleted track names for reuse.
+ // This is an arbitrary decision (actually any non-negative
+ // integer that isn't in mTracks[*]->names() could be used) - we attempt
+ // to use the smallest possible available name.
+ std::set<int> mUnusedTrackNames;
+
+ SortedVector<sp<T>> mTracks; // wrapped SortedVector.
+ };
+
+ Tracks<Track> mTracks;
+
stream_type_t mStreamTypes[AUDIO_STREAM_CNT];
AudioStreamOut *mOutput;
@@ -984,6 +1060,7 @@
sp<NBAIO_Source> mTeeSource;
#endif
uint32_t mScreenState; // cached copy of gScreenState
+ // TODO: add comment and adjust size as needed
static const size_t kFastMixerLogSize = 8 * 1024;
sp<NBLog::Writer> mFastMixerNBLogWriter;
@@ -1020,11 +1097,11 @@
status_t& status);
virtual void dumpInternals(int fd, const Vector<String16>& args);
+ virtual bool isTrackAllowed_l(
+ audio_channel_mask_t channelMask, audio_format_t format,
+ audio_session_t sessionId, uid_t uid) const override;
protected:
virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
- virtual int getTrackName_l(audio_channel_mask_t channelMask, audio_format_t format,
- audio_session_t sessionId, uid_t uid);
- virtual void deleteTrackName_l(int name);
virtual uint32_t idleSleepTimeUs() const;
virtual uint32_t suspendSleepTimeUs() const;
virtual void cacheParameters_l();
@@ -1102,9 +1179,6 @@
virtual void flushHw_l();
protected:
- virtual int getTrackName_l(audio_channel_mask_t channelMask, audio_format_t format,
- audio_session_t sessionId, uid_t uid);
- virtual void deleteTrackName_l(int name);
virtual uint32_t activeSleepTimeUs() const;
virtual uint32_t idleSleepTimeUs() const;
virtual uint32_t suspendSleepTimeUs() const;
@@ -1208,9 +1282,14 @@
virtual ~DuplicatingThread();
// Thread virtuals
+ virtual void dumpInternals(int fd, const Vector<String16>& args) override;
+
void addOutputTrack(MixerThread* thread);
void removeOutputTrack(MixerThread* thread);
uint32_t waitTimeMs() const { return mWaitTimeMs; }
+
+ void sendMetadataToBackend_l(
+ const StreamOutHalInterface::SourceMetadata& metadata) override;
protected:
virtual uint32_t activeSleepTimeUs() const;
@@ -1323,12 +1402,13 @@
sp<AudioFlinger::RecordThread::RecordTrack> createRecordTrack_l(
const sp<AudioFlinger::Client>& client,
- uint32_t sampleRate,
+ const audio_attributes_t& attr,
+ uint32_t *pSampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t *pFrameCount,
audio_session_t sessionId,
- size_t *notificationFrames,
+ size_t *pNotificationFrameCount,
uid_t uid,
audio_input_flags_t *flags,
pid_t tid,
@@ -1392,6 +1472,13 @@
void checkBtNrec();
+ // Sets the UID records silence
+ void setRecordSilenced(uid_t uid, bool silenced);
+
+ status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
+
+ void updateMetadata_l() override;
+
private:
// Enter standby if not already in standby, and set mStandby flag
void standbyIfNotAlreadyInStandby();
@@ -1456,6 +1543,7 @@
// If a fast capture is present, the Pipe as IMemory, otherwise clear
sp<IMemory> mPipeMemory;
+ // TODO: add comment and adjust size as needed
static const size_t kFastCaptureLogSize = 4 * 1024;
sp<NBLog::Writer> mFastCaptureNBLogWriter;
@@ -1501,6 +1589,7 @@
virtual void threadLoop_exit();
virtual void threadLoop_standby();
virtual bool shouldStandby_l() { return false; }
+ virtual status_t exitStandby();
virtual status_t initCheck() const { return (mHalStream == 0) ? NO_INIT : NO_ERROR; }
virtual size_t frameCount() const { return mFrameCount; }
@@ -1533,6 +1622,9 @@
virtual void invalidateTracks(audio_stream_type_t streamType __unused) {}
+ // Sets the UID records silence
+ virtual void setRecordSilenced(uid_t uid __unused, bool silenced __unused) {}
+
void dump(int fd, const Vector<String16>& args);
virtual void dumpInternals(int fd, const Vector<String16>& args);
void dumpTracks(int fd, const Vector<String16>& args);
@@ -1549,6 +1641,10 @@
sp<DeviceHalInterface> mHalDevice;
AudioHwDevice* const mAudioHwDev;
ActiveTracks<MmapTrack> mActiveTracks;
+ float mHalVolFloat;
+
+ int32_t mNoCallbackWarningCount;
+ static constexpr int32_t kMaxNoCallbackWarnings = 5;
};
class MmapPlaybackThread : public MmapThread, public VolumeInterface
@@ -1582,12 +1678,14 @@
virtual audio_stream_type_t streamType() { return mStreamType; }
virtual void checkSilentMode_l();
- virtual void processVolume_l();
+ void processVolume_l() override;
virtual void dumpInternals(int fd, const Vector<String16>& args);
virtual bool isOutput() const override { return true; }
+ void updateMetadata_l() override;
+
protected:
audio_stream_type_t mStreamType;
@@ -1595,7 +1693,6 @@
float mStreamVolume;
bool mMasterMute;
bool mStreamMute;
- float mHalVolFloat;
AudioStreamOut* mOutput;
};
@@ -1610,8 +1707,13 @@
AudioStreamIn* clearInput();
+ status_t exitStandby() override;
virtual bool isOutput() const override { return false; }
+ void updateMetadata_l() override;
+ void processVolume_l() override;
+ void setRecordSilenced(uid_t uid, bool silenced) override;
+
protected:
AudioStreamIn* mInput;
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index d4ce0b4..ccfb69f 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -54,8 +54,14 @@
TYPE_PATCH,
};
+ enum {
+ TRACK_NAME_PENDING = -1,
+ TRACK_NAME_FAILURE = -2,
+ };
+
TrackBase(ThreadBase *thread,
const sp<Client>& client,
+ const audio_attributes_t& mAttr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -92,6 +98,7 @@
virtual void invalidate() { mIsInvalid = true; }
bool isInvalid() const { return mIsInvalid; }
+ audio_attributes_t attributes() const { return mAttr; }
protected:
DISALLOW_COPY_AND_ASSIGN(TrackBase);
@@ -183,6 +190,7 @@
size_t mBufferSize; // size of mBuffer in bytes
// we don't really need a lock for these
track_state mState;
+ const audio_attributes_t mAttr;
const uint32_t mSampleRate; // initial sample rate only; for tracks which
// support dynamic rates, the current value is in control block
const audio_format_t mFormat;
@@ -192,7 +200,7 @@
// where for AudioTrack (but not AudioRecord),
// 8-bit PCM samples are stored as 16-bit
const size_t mFrameCount;// size of track buffer given at createTrack() or
- // openRecord(), and then adjusted as needed
+ // createRecord(), and then adjusted as needed
const audio_session_t mSessionId;
uid_t mUid;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index fe93367..a7c4253 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -52,6 +52,7 @@
namespace android {
+using media::VolumeShaper;
// ----------------------------------------------------------------------------
// TrackBase
// ----------------------------------------------------------------------------
@@ -62,6 +63,7 @@
AudioFlinger::ThreadBase::TrackBase::TrackBase(
ThreadBase *thread,
const sp<Client>& client,
+ const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -80,6 +82,7 @@
mCblk(NULL),
// mBuffer, mBufferSize
mState(IDLE),
+ mAttr(attr),
mSampleRate(sampleRate),
mFormat(format),
mChannelMask(channelMask),
@@ -371,6 +374,7 @@
PlaybackThread *thread,
const sp<Client>& client,
audio_stream_type_t streamType,
+ const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -383,7 +387,7 @@
audio_output_flags_t flags,
track_type type,
audio_port_handle_t portId)
- : TrackBase(thread, client, sampleRate, format, channelMask, frameCount,
+ : TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
(sharedBuffer != 0) ? sharedBuffer->pointer() : buffer,
(sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,
sessionId, uid, true /*isOut*/,
@@ -393,16 +397,19 @@
// mRetryCount initialized later when needed
mSharedBuffer(sharedBuffer),
mStreamType(streamType),
- mName(-1), // see note below
- mMainBuffer(thread->mixBuffer()),
+ mName(TRACK_NAME_FAILURE), // set to TRACK_NAME_PENDING on constructor success.
+ mMainBuffer(thread->sinkBuffer()),
mAuxBuffer(NULL),
mAuxEffectId(0), mHasVolumeController(false),
mPresentationCompleteFrames(0),
mFrameMap(16 /* sink-frame-to-track-frame map memory */),
- mVolumeHandler(new VolumeHandler(sampleRate)),
+ mVolumeHandler(new media::VolumeHandler(sampleRate)),
// mSinkTimestamp
mFastIndex(-1),
mCachedVolume(1.0),
+ /* The track might not play immediately after being active, similarly as if its volume was 0.
+ * When the track starts playing, its volume will be computed. */
+ mFinalVolume(0.f),
mResumeToStopping(false),
mFlushHwPending(false),
mFlags(flags)
@@ -426,9 +433,8 @@
}
mServerProxy = mAudioTrackServerProxy;
- mName = thread->getTrackName_l(channelMask, format, sessionId, uid);
- if (mName < 0) {
- ALOGE("no more track names available");
+ if (!thread->isTrackAllowed_l(channelMask, format, sessionId, uid)) {
+ ALOGE("no more tracks available");
return;
}
// only allocate a fast track index if we were able to allocate a normal track name
@@ -447,6 +453,7 @@
mFastIndex = i;
thread->mFastTrackAvailMask &= ~(1 << i);
}
+ mName = TRACK_NAME_PENDING;
}
AudioFlinger::PlaybackThread::Track::~Track()
@@ -465,7 +472,7 @@
status_t AudioFlinger::PlaybackThread::Track::initCheck() const
{
status_t status = TrackBase::initCheck();
- if (status == NO_ERROR && mName < 0) {
+ if (status == NO_ERROR && mName == TRACK_NAME_FAILURE) {
status = NO_MEMORY;
}
return status;
@@ -526,10 +533,12 @@
if (isFastTrack()) {
result.appendFormat("F%c %3d", trackType, mFastIndex);
- } else if (mName >= AudioMixer::TRACK0) {
- result.appendFormat("%c %4d", trackType, mName - AudioMixer::TRACK0);
+ } else if (mName == TRACK_NAME_PENDING) {
+ result.appendFormat("%c pend", trackType);
+ } else if (mName == TRACK_NAME_FAILURE) {
+ result.appendFormat("%c fail", trackType);
} else {
- result.appendFormat("%c none", trackType);
+ result.appendFormat("%c %4d", trackType, mName);
}
char nowInUnderrun;
@@ -758,6 +767,12 @@
mState = state;
}
}
+
+ if (status == NO_ERROR || status == ALREADY_EXISTS) {
+ // for streaming tracks, remove the buffer read stop limit.
+ mAudioTrackServerProxy->start();
+ }
+
// track was already in the active list, not a problem
if (status == ALREADY_EXISTS) {
status = NO_ERROR;
@@ -985,6 +1000,23 @@
return mVolumeHandler->getVolumeShaperState(id);
}
+void AudioFlinger::PlaybackThread::Track::setFinalVolume(float volume)
+{
+ if (mFinalVolume != volume) { // Compare to an epsilon if too many meaningless updates
+ mFinalVolume = volume;
+ setMetadataHasChanged();
+ }
+}
+
+void AudioFlinger::PlaybackThread::Track::copyMetadataTo(MetadataInserter& backInserter) const
+{
+ *backInserter++ = {
+ .usage = mAttr.usage,
+ .content_type = mAttr.content_type,
+ .gain = mFinalVolume,
+ };
+}
+
status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
{
if (!isOffloaded() && !isDirect()) {
@@ -1256,6 +1288,7 @@
size_t frameCount,
uid_t uid)
: Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
+ audio_attributes_t{} /* currently unused for output track */,
sampleRate, format, channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, nullptr /* sharedBuffer */,
AUDIO_SESSION_NONE, uid, AUDIO_OUTPUT_FLAG_NONE,
@@ -1414,6 +1447,21 @@
return outputBufferFull;
}
+void AudioFlinger::PlaybackThread::OutputTrack::copyMetadataTo(MetadataInserter& backInserter) const
+{
+ std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
+ backInserter = std::copy(mTrackMetadatas.begin(), mTrackMetadatas.end(), backInserter);
+}
+
+void AudioFlinger::PlaybackThread::OutputTrack::setMetadatas(const SourceMetadatas& metadatas) {
+ {
+ std::lock_guard<std::mutex> lock(mTrackMetadatasMutex);
+ mTrackMetadatas = metadatas;
+ }
+ // No need to adjust metadata track volumes as OutputTrack volumes are always 0dBFS.
+ setMetadataHasChanged();
+}
+
status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
{
@@ -1458,6 +1506,7 @@
size_t bufferSize,
audio_output_flags_t flags)
: Track(playbackThread, NULL, streamType,
+ audio_attributes_t{} /* currently unused for patch track */,
sampleRate, format, channelMask, frameCount,
buffer, bufferSize, nullptr /* sharedBuffer */,
AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
@@ -1522,9 +1571,11 @@
status_t status = NO_ERROR;
static const int32_t kMaxTries = 5;
int32_t tryCounter = kMaxTries;
+ const size_t originalFrameCount = buffer->mFrameCount;
do {
if (status == NOT_ENOUGH_DATA) {
restartIfDisabled();
+ buffer->mFrameCount = originalFrameCount; // cleared on error, must be restored.
}
status = mProxy->obtainBuffer(buffer, timeOut);
} while ((status == NOT_ENOUGH_DATA) && (tryCounter-- > 0));
@@ -1562,14 +1613,16 @@
mRecordTrack->destroy();
}
-status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
- audio_session_t triggerSession) {
+binder::Status AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
+ int /*audio_session_t*/ triggerSession) {
ALOGV("RecordHandle::start()");
- return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession);
+ return binder::Status::fromStatusT(
+ mRecordTrack->start((AudioSystem::sync_event_t)event, (audio_session_t) triggerSession));
}
-void AudioFlinger::RecordHandle::stop() {
+binder::Status AudioFlinger::RecordHandle::stop() {
stop_nonvirtual();
+ return binder::Status::ok();
}
void AudioFlinger::RecordHandle::stop_nonvirtual() {
@@ -1577,10 +1630,11 @@
mRecordTrack->stop();
}
-status_t AudioFlinger::RecordHandle::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
- return BnAudioRecord::onTransact(code, data, reply, flags);
+binder::Status AudioFlinger::RecordHandle::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo>* activeMicrophones) {
+ ALOGV("RecordHandle::getActiveMicrophones()");
+ return binder::Status::fromStatusT(
+ mRecordTrack->getActiveMicrophones(activeMicrophones));
}
// ----------------------------------------------------------------------------
@@ -1589,6 +1643,7 @@
AudioFlinger::RecordThread::RecordTrack::RecordTrack(
RecordThread *thread,
const sp<Client>& client,
+ const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -1600,7 +1655,7 @@
audio_input_flags_t flags,
track_type type,
audio_port_handle_t portId)
- : TrackBase(thread, client, sampleRate, format,
+ : TrackBase(thread, client, attr, sampleRate, format,
channelMask, frameCount, buffer, bufferSize, sessionId, uid, false /*isOut*/,
(type == TYPE_DEFAULT) ?
((flags & AUDIO_INPUT_FLAG_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
@@ -1610,7 +1665,8 @@
mFramesToDrop(0),
mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
mRecordBufferConverter(NULL),
- mFlags(flags)
+ mFlags(flags),
+ mSilenced(false)
{
if (mCblk == NULL) {
return;
@@ -1690,7 +1746,7 @@
if (thread != 0) {
RecordThread *recordThread = (RecordThread *)thread.get();
if (recordThread->stop(this) && isExternalTrack()) {
- AudioSystem::stopInput(mThreadIoHandle, mSessionId);
+ AudioSystem::stopInput(mPortId);
}
}
}
@@ -1702,9 +1758,9 @@
{
if (isExternalTrack()) {
if (mState == ACTIVE || mState == RESUMING) {
- AudioSystem::stopInput(mThreadIoHandle, mSessionId);
+ AudioSystem::stopInput(mPortId);
}
- AudioSystem::releaseInput(mThreadIoHandle, mSessionId);
+ AudioSystem::releaseInput(mPortId);
}
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
@@ -1729,14 +1785,14 @@
/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
{
- result.append("Active Client Session S Flags Format Chn mask SRate Server FrmCnt\n");
+ result.append("Active Client Session S Flags Format Chn mask SRate Server FrmCnt Sil\n");
}
void AudioFlinger::RecordThread::RecordTrack::appendDump(String8& result, bool active)
{
result.appendFormat("%c%5s %6u %7u %2s 0x%03X "
"%08X %08X %6u "
- "%08X %6zu\n",
+ "%08X %6zu %3c\n",
isFastTrack() ? 'F' : ' ',
active ? "yes" : "no",
(mClient == 0) ? getpid_cached : mClient->pid(),
@@ -1749,7 +1805,8 @@
mSampleRate,
mCblk->mServer,
- mFrameCount
+ mFrameCount,
+ isSilenced() ? 's' : 'n'
);
}
@@ -1795,6 +1852,18 @@
mServerProxy->setTimestamp(local);
}
+status_t AudioFlinger::RecordThread::RecordTrack::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo>* activeMicrophones)
+{
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ RecordThread *recordThread = (RecordThread *)thread.get();
+ return recordThread->getActiveMicrophones(activeMicrophones);
+ } else {
+ return BAD_VALUE;
+ }
+}
+
AudioFlinger::RecordThread::PatchRecord::PatchRecord(RecordThread *recordThread,
uint32_t sampleRate,
audio_channel_mask_t channelMask,
@@ -1803,7 +1872,9 @@
void *buffer,
size_t bufferSize,
audio_input_flags_t flags)
- : RecordTrack(recordThread, NULL, sampleRate, format, channelMask, frameCount,
+ : RecordTrack(recordThread, NULL,
+ audio_attributes_t{} /* currently unused for patch track */,
+ sampleRate, format, channelMask, frameCount,
buffer, bufferSize, AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true))
{
@@ -1864,6 +1935,7 @@
AudioFlinger::MmapThread::MmapTrack::MmapTrack(ThreadBase *thread,
+ const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
@@ -1871,13 +1943,13 @@
uid_t uid,
pid_t pid,
audio_port_handle_t portId)
- : TrackBase(thread, NULL, sampleRate, format,
+ : TrackBase(thread, NULL, attr, sampleRate, format,
channelMask, (size_t)0 /* frameCount */,
nullptr /* buffer */, (size_t)0 /* bufferSize */,
sessionId, uid, false /* isOut */,
ALLOC_NONE,
TYPE_DEFAULT, portId),
- mPid(pid)
+ mPid(pid), mSilenced(false), mSilencedNotified(false)
{
}
diff --git a/services/audioflinger/TypedLogger.h b/services/audioflinger/TypedLogger.h
index 7e77e89..38c3c02 100644
--- a/services/audioflinger/TypedLogger.h
+++ b/services/audioflinger/TypedLogger.h
@@ -18,7 +18,9 @@
#ifndef ANDROID_TYPED_LOGGER_H
#define ANDROID_TYPED_LOGGER_H
-#include <media/nbaio/NBLog.h>
+// This is the client API for the typed logger.
+
+#include <media/nblog/NBLog.h>
#include <algorithm>
/*
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 65571f9..d29cae1 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -25,6 +25,7 @@
libserviceutility \
libaudiopolicymanager \
libmedia_helper \
+ libmediametrics \
libeffectsconfig
LOCAL_STATIC_LIBRARIES := \
@@ -60,6 +61,7 @@
audio_policy_criteria.conf \
LOCAL_C_INCLUDES += frameworks/av/services/audiopolicy/engineconfigurable/include
+LOCAL_C_INCLUDES += frameworks/av/include
LOCAL_SHARED_LIBRARIES += libaudiopolicyengineconfigurable
@@ -78,6 +80,7 @@
libaudiopolicycomponents
LOCAL_SHARED_LIBRARIES += libmedia_helper
+LOCAL_SHARED_LIBRARIES += libmediametrics
ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
LOCAL_SHARED_LIBRARIES += libicuuc libxml2
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 7b19f58..4812b1f 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -65,15 +65,19 @@
API_INPUT_TELEPHONY_RX, // used for capture from telephony RX path
} input_type_t;
- enum {
+ enum {
API_INPUT_CONCURRENCY_NONE = 0,
API_INPUT_CONCURRENCY_CALL = (1 << 0), // Concurrency with a call
API_INPUT_CONCURRENCY_CAPTURE = (1 << 1), // Concurrency with another capture
+ API_INPUT_CONCURRENCY_HOTWORD = (1 << 2), // Concurrency with a hotword
+ API_INPUT_CONCURRENCY_PREEMPT = (1 << 3), // pre-empted someone
+ // NB: preempt is marked on a successful return, others are on failing calls
+ API_INPUT_CONCURRENCY_LAST = (1 << 4),
- API_INPUT_CONCURRENCY_ALL = (API_INPUT_CONCURRENCY_CALL | API_INPUT_CONCURRENCY_CAPTURE),
- };
+ API_INPUT_CONCURRENCY_ALL = (API_INPUT_CONCURRENCY_LAST - 1),
+ };
- typedef uint32_t concurrency_type__mask_t;
+ typedef uint32_t concurrency_type__mask_t;
public:
virtual ~AudioPolicyInterface() {}
@@ -109,19 +113,14 @@
//
// request an output appropriate for playback of the supplied stream type and parameters
- virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo) = 0;
+ virtual audio_io_handle_t getOutput(audio_stream_type_t stream) = 0;
virtual status_t getOutputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
uid_t uid,
const audio_config_t *config,
- audio_output_flags_t flags,
+ audio_output_flags_t *flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId) = 0;
// indicates to the audio policy manager that the output starts being used by corresponding stream.
@@ -150,6 +149,7 @@
// indicates to the audio policy manager that the input starts being used.
virtual status_t startInput(audio_io_handle_t input,
audio_session_t session,
+ bool silenced,
concurrency_type__mask_t *concurrency) = 0;
// indicates to the audio policy manager that the input stops being used.
virtual status_t stopInput(audio_io_handle_t input,
@@ -244,6 +244,14 @@
virtual float getStreamVolumeDB(
audio_stream_type_t stream, int index, audio_devices_t device) = 0;
+
+ virtual status_t getSurroundFormats(unsigned int *numSurroundFormats,
+ audio_format_t *surroundFormats,
+ bool *surroundFormatsEnabled,
+ bool reported) = 0;
+ virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled) = 0;
+
+ virtual void setRecordSilenced(uid_t uid, bool silenced);
};
@@ -360,6 +368,6 @@
extern "C" void destroyAudioPolicyManager(AudioPolicyInterface *interface);
-}; // namespace android
+} // namespace android
#endif // ANDROID_AUDIOPOLICY_INTERFACE_H
diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h
index 1239fe0..fc012a2 100644
--- a/services/audiopolicy/common/include/Volume.h
+++ b/services/audiopolicy/common/include/Volume.h
@@ -38,6 +38,7 @@
DEVICE_CATEGORY_SPEAKER,
DEVICE_CATEGORY_EARPIECE,
DEVICE_CATEGORY_EXT_MEDIA,
+ DEVICE_CATEGORY_HEARING_AID,
DEVICE_CATEGORY_CNT
};
@@ -126,6 +127,8 @@
case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
case AUDIO_DEVICE_OUT_USB_HEADSET:
return DEVICE_CATEGORY_HEADSET;
+ case AUDIO_DEVICE_OUT_HEARING_AID:
+ return DEVICE_CATEGORY_HEARING_AID;
case AUDIO_DEVICE_OUT_LINE:
case AUDIO_DEVICE_OUT_AUX_DIGITAL:
case AUDIO_DEVICE_OUT_USB_DEVICE:
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 31f0550..9bd68e1 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -35,7 +35,8 @@
* A device mask for all audio input devices that are considered "virtual" when evaluating
* active inputs in getActiveInputs()
*/
-#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX)
+#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX|\
+ AUDIO_DEVICE_IN_BUS|AUDIO_DEVICE_IN_FM_TUNER)
/**
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
index e263c0c..e69e687 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.mk
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -36,6 +36,7 @@
frameworks/av/services/audiopolicy/common/include \
frameworks/av/services/audiopolicy \
frameworks/av/services/audiopolicy/utilities \
+ system/media/audio_utils/include \
ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioCollections.h b/services/audiopolicy/common/managerdefinitions/include/AudioCollections.h
index 8f00d22..f86e75a 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioCollections.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioCollections.h
@@ -41,4 +41,4 @@
status_t dump(int fd, int spaces) const;
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioGain.h b/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
index cea5c0b..4ac508f 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
@@ -66,4 +66,4 @@
bool mUseInChannelMask;
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index b169bac..b25d6d4 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -34,8 +34,8 @@
class AudioInputDescriptor: public AudioPortConfig, public AudioSessionInfoProvider
{
public:
- explicit AudioInputDescriptor(const sp<IOProfile>& profile);
- void setIoHandle(audio_io_handle_t ioHandle);
+ explicit AudioInputDescriptor(const sp<IOProfile>& profile,
+ AudioPolicyClientInterface *clientInterface);
audio_port_handle_t getId() const;
audio_module_handle_t getModuleHandle() const;
uint32_t getOpenRefCount() const;
@@ -73,6 +73,20 @@
void setPatchHandle(audio_patch_handle_t handle);
+ status_t open(const audio_config_t *config,
+ audio_devices_t device,
+ const String8& address,
+ audio_source_t source,
+ audio_input_flags_t flags,
+ audio_io_handle_t *input);
+ // Called when a stream is about to be started.
+ // Note: called after AudioSession::changeActiveCount(1)
+ status_t start();
+ // Called after a stream is stopped
+ // Note: called after AudioSession::changeActiveCount(-1)
+ void stop();
+ void close();
+
private:
audio_patch_handle_t mPatchHandle;
audio_port_handle_t mId;
@@ -85,6 +99,7 @@
// a particular input started and prevent preemption of this active input by this session.
// We also inherit sessions from the preempted input to avoid a 3 way preemption loop etc...
SortedVector<audio_session_t> mPreemptedSessions;
+ AudioPolicyClientInterface *mClientInterface;
};
class AudioInputCollection :
@@ -112,4 +127,4 @@
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 3726c06..5e5d38b 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -101,8 +101,6 @@
status_t dump(int fd);
- void setIoHandle(audio_io_handle_t ioHandle);
-
virtual audio_devices_t device() const;
virtual bool sharesHwModuleWith(const sp<AudioOutputDescriptor>& outputDesc);
virtual audio_devices_t supportedDevices();
@@ -122,6 +120,23 @@
const struct audio_port_config *srcConfig = NULL) const;
virtual void toAudioPort(struct audio_port *port) const;
+ status_t open(const audio_config_t *config,
+ audio_devices_t device,
+ const String8& address,
+ audio_stream_type_t stream,
+ audio_output_flags_t flags,
+ audio_io_handle_t *output);
+ // Called when a stream is about to be started
+ // Note: called before changeRefCount(1);
+ status_t start();
+ // Called after a stream is stopped.
+ // Note: called after changeRefCount(-1);
+ void stop();
+ void close();
+ status_t openDuplicating(const sp<SwAudioOutputDescriptor>& output1,
+ const sp<SwAudioOutputDescriptor>& output2,
+ audio_io_handle_t *ioHandle);
+
const sp<IOProfile> mProfile; // I/O profile this output derives from
audio_io_handle_t mIoHandle; // output handle
uint32_t mLatency; //
@@ -175,6 +190,15 @@
bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
/**
+ * return whether a stream is playing, but not on a "remote" device.
+ * Override to change the definition of a local/remote playback.
+ * Used for instance by policy manager to alter the speaker playback ("speaker safe" behavior)
+ * when media plays or not locally.
+ * For the base implementation, "remotely" means playing during screen mirroring.
+ */
+ bool isStreamActiveLocally(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+
+ /**
* returns the A2DP output handle if it is open or 0 otherwise
*/
audio_io_handle_t getA2dpOutput() const;
@@ -218,4 +242,4 @@
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h b/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
index 385f257..c1c3f3c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
@@ -16,6 +16,7 @@
#pragma once
+#include "HandleGenerator.h"
#include <system/audio.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
@@ -24,7 +25,7 @@
namespace android {
-class AudioPatch : public RefBase
+class AudioPatch : public RefBase, private HandleGenerator<audio_patch_handle_t>
{
public:
AudioPatch(const struct audio_patch *patch, uid_t uid);
@@ -35,9 +36,6 @@
struct audio_patch mPatch;
uid_t mUid;
audio_patch_handle_t mAfPatchHandle;
-
-private:
- static volatile int32_t mNextUniqueId;
};
class AudioPatchCollection : public DefaultKeyedVector<audio_patch_handle_t, sp<AudioPatch> >
@@ -52,4 +50,4 @@
status_t dump(int fd) const;
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index f2756b5..43f6ed6 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -39,14 +39,13 @@
DeviceVector &availableOutputDevices,
DeviceVector &availableInputDevices,
sp<DeviceDescriptor> &defaultOutputDevices,
- bool &isSpeakerDrcEnabled,
VolumeCurvesCollection *volumes = nullptr)
: mHwModules(hwModules),
mAvailableOutputDevices(availableOutputDevices),
mAvailableInputDevices(availableInputDevices),
mDefaultOutputDevices(defaultOutputDevices),
mVolumeCurves(volumes),
- mIsSpeakerDrcEnabled(isSpeakerDrcEnabled)
+ mIsSpeakerDrcEnabled(false)
{}
void setVolumes(const VolumeCurvesCollection &volumes)
@@ -80,6 +79,8 @@
mAvailableOutputDevices.add(availableOutputDevices);
}
+ bool isSpeakerDrcEnabled() const { return mIsSpeakerDrcEnabled; }
+
void setSpeakerDrcEnabled(bool isSpeakerDrcEnabled)
{
mIsSpeakerDrcEnabled = isSpeakerDrcEnabled;
@@ -112,7 +113,7 @@
mAvailableOutputDevices.add(mDefaultOutputDevices);
mAvailableInputDevices.add(defaultInputDevice);
- module = new HwModule("primary");
+ module = new HwModule(AUDIO_HARDWARE_MODULE_ID_PRIMARY);
sp<OutputProfile> outProfile;
outProfile = new OutputProfile(String8("primary"));
@@ -121,7 +122,7 @@
new AudioProfile(AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 44100));
outProfile->addSupportedDevice(mDefaultOutputDevices);
outProfile->setFlags(AUDIO_OUTPUT_FLAG_PRIMARY);
- module->mOutputProfiles.add(outProfile);
+ module->addOutputProfile(outProfile);
sp<InputProfile> inProfile;
inProfile = new InputProfile(String8("primary"));
@@ -129,7 +130,7 @@
inProfile->addAudioProfile(
new AudioProfile(AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_MONO, 8000));
inProfile->addSupportedDevice(defaultInputDevice);
- module->mInputProfiles.add(inProfile);
+ module->addInputProfile(inProfile);
mHwModules.add(module);
}
@@ -140,7 +141,10 @@
DeviceVector &mAvailableInputDevices;
sp<DeviceDescriptor> &mDefaultOutputDevices;
VolumeCurvesCollection *mVolumeCurves;
- bool &mIsSpeakerDrcEnabled;
+ // TODO: remove when legacy conf file is removed. true on devices that use DRC on the
+ // DEVICE_CATEGORY_SPEAKER path to boost soft sounds, used to adjust volume curves accordingly.
+ // Note: remove also speaker_drc_enabled from global configuration of XML config file.
+ bool mIsSpeakerDrcEnabled;
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index 0bacef7..8fc6fe9 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -83,4 +83,4 @@
status_t dump(int fd) const;
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index 4f79ed2..09a86dd 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -18,6 +18,7 @@
#include "AudioCollections.h"
#include "AudioProfile.h"
+#include "HandleGenerator.h"
#include <utils/String8.h>
#include <utils/Vector.h>
#include <utils/RefBase.h>
@@ -32,7 +33,7 @@
class AudioRoute;
typedef Vector<sp<AudioGain> > AudioGainCollection;
-class AudioPort : public virtual RefBase
+class AudioPort : public virtual RefBase, private HandleGenerator<audio_port_handle_t>
{
public:
AudioPort(const String8& name, audio_port_type_t type, audio_port_role_t role) :
@@ -51,7 +52,7 @@
void setGains(const AudioGainCollection &gains) { mGains = gains; }
const AudioGainCollection &getGains() const { return mGains; }
- void setFlags(uint32_t flags)
+ virtual void setFlags(uint32_t flags)
{
//force direct flag if offload flag is set: offloading implies a direct output stream
// and all common behaviors are driven by checking only the direct flag
@@ -83,12 +84,7 @@
bool hasDynamicAudioProfile() const { return mProfiles.hasDynamicProfile(); }
// searches for an exact match
- status_t checkExactAudioProfile(uint32_t samplingRate,
- audio_channel_mask_t channelMask,
- audio_format_t format) const
- {
- return mProfiles.checkExactProfile(samplingRate, channelMask, format);
- }
+ virtual status_t checkExactAudioProfile(const struct audio_port_config *config) const;
// searches for a compatible match, currently implemented for input
// parameters are input|output, returned value is the best match.
@@ -152,7 +148,6 @@
uint32_t mFlags; // attribute flags mask (e.g primary output, direct output...).
AudioProfileVector mProfiles; // AudioProfiles supported by this port (format, Rates, Channels)
AudioRouteVector mRoutes; // Routes involving this port
- static volatile int32_t mNextUniqueId;
};
class AudioPortConfig : public virtual RefBase
@@ -176,4 +171,4 @@
struct audio_gain_config mGain;
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h b/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
index 404e27d..8741c66 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
@@ -349,4 +349,4 @@
bool operator == (const AudioProfile &left, const AudioProfile &right);
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h b/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h
index df54f48..6b24fde 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h
@@ -55,4 +55,4 @@
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
index cedf22d..dd5247d 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
@@ -55,6 +55,8 @@
void setUid(uid_t uid) { mRecordClientInfo.uid = uid; }
bool matches(const sp<AudioSession> &other) const;
bool isSoundTrigger() const { return mIsSoundTrigger; }
+ void setSilenced(bool silenced) { mSilenced = silenced; }
+ bool isSilenced() const { return mSilenced; }
uint32_t openCount() const { return mOpenCount; } ;
uint32_t activeCount() const { return mActiveCount; } ;
@@ -70,6 +72,7 @@
const struct audio_config_base mConfig;
const audio_input_flags_t mFlags;
bool mIsSoundTrigger;
+ bool mSilenced;
uint32_t mOpenCount;
uint32_t mActiveCount;
AudioMix* mPolicyMix; // non NULL when used by a dynamic policy
@@ -102,4 +105,4 @@
status_t dump(int fd, int spaces) const;
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
index 7e1e24d..0d90f42 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
@@ -56,4 +56,4 @@
status_t dump(int fd) const;
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h b/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
index ee95ceb..a007854 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
@@ -56,4 +56,4 @@
static status_t loadHwModule(cnode *root, sp<HwModule> &module, AudioPolicyConfig &config);
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 1a644d7..92a4c3e 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -91,4 +91,4 @@
audio_devices_t mDeviceTypes;
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
index 9ea0aea..04831c6 100644
--- a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
@@ -69,4 +69,4 @@
static const uint32_t MAX_EFFECTS_MEMORY = 512;
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/Gains.h b/services/audiopolicy/common/managerdefinitions/include/Gains.h
index 34afc8c..cb229a4 100644
--- a/services/audiopolicy/common/managerdefinitions/include/Gains.h
+++ b/services/audiopolicy/common/managerdefinitions/include/Gains.h
@@ -52,8 +52,9 @@
static const VolumeCurvePoint sLinearVolumeCurve[Volume::VOLCNT];
static const VolumeCurvePoint sSilentVolumeCurve[Volume::VOLCNT];
static const VolumeCurvePoint sFullScaleVolumeCurve[Volume::VOLCNT];
+ static const VolumeCurvePoint sHearingAidVolumeCurve[Volume::VOLCNT];
// default volume curves per stream and device category. See initializeVolumeCurves()
static const VolumeCurvePoint *sVolumeProfiles[AUDIO_STREAM_CNT][DEVICE_CATEGORY_CNT];
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/HandleGenerator.h b/services/audiopolicy/common/managerdefinitions/include/HandleGenerator.h
new file mode 100644
index 0000000..737a2e0
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/HandleGenerator.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <atomic>
+#include <limits>
+
+namespace android {
+
+template<typename T>
+class HandleGenerator {
+ protected:
+ static T getNextHandle();
+};
+
+template<typename T>
+T HandleGenerator<T>::getNextHandle() {
+ static std::atomic<uint32_t> mNextUniqueId(1);
+ uint32_t id = mNextUniqueId++;
+ while (id > std::numeric_limits<T>::max()) {
+ id -= std::numeric_limits<T>::max();
+ }
+ return static_cast<T>(id);
+}
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index 29b6b9c..cb9f49e 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -44,12 +44,10 @@
const char *getName() const { return mName.string(); }
-
const DeviceVector &getDeclaredDevices() const { return mDeclaredDevices; }
void setDeclaredDevices(const DeviceVector &devices);
const InputProfileCollection &getInputProfiles() const { return mInputProfiles; }
-
const OutputProfileCollection &getOutputProfiles() const { return mOutputProfiles; }
void setProfiles(const IOProfileCollection &profiles);
@@ -76,6 +74,7 @@
status_t removeInputProfile(const String8& name);
audio_module_handle_t getHandle() const { return mHandle; }
+ void setHandle(audio_module_handle_t handle);
sp<AudioPort> findPortByTagName(const String8 &tagName) const
{
@@ -85,14 +84,13 @@
// TODO remove from here (split serialization)
void dump(int fd);
+private:
+ void refreshSupportedDevices();
+
const String8 mName; // base name of the audio HW module (primary, a2dp ...)
audio_module_handle_t mHandle;
OutputProfileCollection mOutputProfiles; // output profiles exposed by this module
InputProfileCollection mInputProfiles; // input profiles exposed by this module
-
-private:
- void refreshSupportedDevices();
-
uint32_t mHalVersion; // audio HAL API version
DeviceVector mDeclaredDevices; // devices declared in audio_policy configuration file.
AudioRouteVector mRoutes;
@@ -114,4 +112,4 @@
status_t dump(int fd) const;
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index ec04ef7..67ac9bc 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -34,11 +34,29 @@
{
public:
IOProfile(const String8 &name, audio_port_role_t role)
- : AudioPort(name, AUDIO_PORT_TYPE_MIX, role) {}
+ : AudioPort(name, AUDIO_PORT_TYPE_MIX, role),
+ maxOpenCount((role == AUDIO_PORT_ROLE_SOURCE) ? 1 : 0),
+ curOpenCount(0),
+ maxActiveCount(1),
+ curActiveCount(0) {}
// For a Profile aka MixPort, tag name and name are equivalent.
virtual const String8 getTagName() const { return getName(); }
+ // FIXME: this is needed because shared MMAP stream clients use the same audio session.
+ // Once capture clients are tracked individually and not per session this can be removed
+ // MMAP no IRQ input streams do not have the default limitation of one active client
+ // max as they can be used in shared mode by the same application.
+ // NOTE: this works for explicit values set in audio_policy_configuration.xml because
+ // flags are parsed before maxActiveCount by the serializer.
+ void setFlags(uint32_t flags) override
+ {
+ AudioPort::setFlags(flags);
+ if (getRole() == AUDIO_PORT_ROLE_SINK && (flags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
+ maxActiveCount = 0;
+ }
+ }
+
// This method is used for input and direct output, and is not used for other output.
// If parameter updatedSamplingRate is non-NULL, it is assigned the actual sample rate.
// For input, flags is interpreted as audio_input_flags_t.
@@ -51,7 +69,9 @@
audio_format_t *updatedFormat,
audio_channel_mask_t channelMask,
audio_channel_mask_t *updatedChannelMask,
- uint32_t flags) const;
+ // FIXME parameter type
+ uint32_t flags,
+ bool exactMatchRequiredForInputFlags = false) const;
void dump(int fd);
void log();
@@ -103,6 +123,34 @@
const DeviceVector &getSupportedDevices() const { return mSupportedDevices; }
+ bool canOpenNewIo() {
+ if (maxOpenCount == 0 || curOpenCount < maxOpenCount) {
+ return true;
+ }
+ return false;
+ }
+
+ bool canStartNewIo() {
+ if (maxActiveCount == 0 || curActiveCount < maxActiveCount) {
+ return true;
+ }
+ return false;
+ }
+
+ // Maximum number of input or output streams that can be simultaneously opened for this profile.
+ // By convention 0 means no limit. To respect legacy behavior, initialized to 1 for output
+ // profiles and 0 for input profiles
+ uint32_t maxOpenCount;
+ // Number of streams currently opened for this profile.
+ uint32_t curOpenCount;
+ // Maximum number of input or output streams that can be simultaneously active for this profile.
+ // By convention 0 means no limit. To respect legacy behavior, initialized to 0 for output
+ // profiles and 1 for input profiles
+ uint32_t maxActiveCount;
+ // Number of streams currently active for this profile. This is not the number of active clients
+ // (AudioTrack or AudioRecord) but the number of active HAL streams.
+ uint32_t curActiveCount;
+
private:
DeviceVector mSupportedDevices; // supported devices: this input/output can be routed from/to
};
@@ -119,4 +167,4 @@
explicit OutputProfile(const String8 &name) : IOProfile(name, AUDIO_PORT_ROLE_SOURCE) {}
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h
index a3de686..e1f6b08 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h
@@ -25,6 +25,8 @@
class IVolumeCurvesCollection
{
public:
+ virtual ~IVolumeCurvesCollection() = default;
+
virtual void clearCurrentVolumeIndex(audio_stream_type_t stream) = 0;
virtual void addCurrentVolumeIndex(audio_stream_type_t stream, audio_devices_t device,
int index) = 0;
@@ -46,9 +48,6 @@
audio_devices_t device) const = 0;
virtual status_t dump(int fd) const = 0;
-
-protected:
- virtual ~IVolumeCurvesCollection() {}
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/Serializer.h b/services/audiopolicy/common/managerdefinitions/include/Serializer.h
index 078b582..29de848 100644
--- a/services/audiopolicy/common/managerdefinitions/include/Serializer.h
+++ b/services/audiopolicy/common/managerdefinitions/include/Serializer.h
@@ -92,6 +92,8 @@
static const char name[];
static const char role[];
static const char flags[];
+ static const char maxOpenCount[];
+ static const char maxActiveCount[];
};
typedef IOProfile Element;
@@ -234,4 +236,4 @@
// Children are: ModulesTraits, VolumeTraits
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/SessionRoute.h b/services/audiopolicy/common/managerdefinitions/include/SessionRoute.h
index 75bfd9d..32b4440 100644
--- a/services/audiopolicy/common/managerdefinitions/include/SessionRoute.h
+++ b/services/audiopolicy/common/managerdefinitions/include/SessionRoute.h
@@ -24,6 +24,7 @@
namespace android {
class DeviceDescriptor;
+class DeviceVector;
class SessionRoute : public RefBase
{
@@ -54,7 +55,7 @@
void log(const char* prefix);
- bool isActive() {
+ bool isActiveOrChanged() {
return (mDeviceDescriptor != 0) && (mChanged || (mActivityCount > 0));
}
@@ -96,9 +97,10 @@
int incRouteActivity(audio_session_t session);
int decRouteActivity(audio_session_t session);
- bool hasRouteChanged(audio_session_t session); // also clears the changed flag
+ bool getAndClearRouteChanged(audio_session_t session); // also clears the changed flag
void log(const char* caption);
-
+ audio_devices_t getActiveDeviceForStream(audio_stream_type_t streamType,
+ const DeviceVector& availableDevices);
// Specify an Output(Sink) route by passing SessionRoute::SOURCE_TYPE_NA in the
// source argument.
// Specify an Input(Source) rout by passing SessionRoute::AUDIO_STREAM_DEFAULT
@@ -115,4 +117,4 @@
const session_route_map_type_t mMapType;
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/SoundTriggerSession.h b/services/audiopolicy/common/managerdefinitions/include/SoundTriggerSession.h
index 420e6d7..f895599 100644
--- a/services/audiopolicy/common/managerdefinitions/include/SoundTriggerSession.h
+++ b/services/audiopolicy/common/managerdefinitions/include/SoundTriggerSession.h
@@ -30,4 +30,4 @@
status_t acquireSession(audio_session_t session, audio_io_handle_t ioHandle);
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
index 8822927..50b1037 100644
--- a/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
@@ -107,4 +107,4 @@
void setVolumeIndexMax(audio_stream_type_t stream,int volIndexMax);
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
index fc95eb9..63c19d1 100644
--- a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
+++ b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
@@ -58,4 +58,4 @@
template <>
const RuleTypeConverter::Table RuleTypeConverter::mTable[];
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h b/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
index e7fcefc..3e6b2b4 100644
--- a/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
+++ b/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
@@ -135,7 +135,13 @@
float volIndexToDb(device_category deviceCat, int indexInUi) const
{
- return getCurvesFor(deviceCat)->volIndexToDb(indexInUi, mIndexMin, mIndexMax);
+ sp<VolumeCurve> vc = getCurvesFor(deviceCat);
+ if (vc != 0) {
+ return vc->volIndexToDb(indexInUi, mIndexMin, mIndexMax);
+ } else {
+ ALOGE("Invalid device category %d for Volume Curve", deviceCat);
+ return 0.0f;
+ }
}
void dump(int fd, int spaces, bool curvePoints = false) const;
@@ -230,4 +236,4 @@
}
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
index 635fe4d..ca67b87 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
@@ -27,14 +27,12 @@
sp<AudioPort> AudioPortVector::findByTagName(const String8 &tagName) const
{
- sp<AudioPort> port = 0;
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->getTagName() == tagName) {
- port = itemAt(i);
- break;
+ for (const auto& port : *this) {
+ if (port->getTagName() == tagName) {
+ return port;
}
}
- return port;
+ return nullptr;
}
status_t AudioRouteVector::dump(int fd, int spaces) const
@@ -55,4 +53,4 @@
return NO_ERROR;
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp
index e454941..193d4a6 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp
@@ -126,4 +126,4 @@
write(fd, result.string(), result.size());
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 2492ed6..92332fb 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "APM::AudioInputDescriptor"
//#define LOG_NDEBUG 0
+#include <AudioPolicyInterface.h>
#include "AudioInputDescriptor.h"
#include "IOProfile.h"
#include "AudioGain.h"
@@ -26,10 +27,12 @@
namespace android {
-AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile)
+AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile,
+ AudioPolicyClientInterface *clientInterface)
: mIoHandle(0),
mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL),
- mProfile(profile), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0)
+ mProfile(profile), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0),
+ mClientInterface(clientInterface)
{
if (profile != NULL) {
profile->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
@@ -39,12 +42,6 @@
}
}
-void AudioInputDescriptor::setIoHandle(audio_io_handle_t ioHandle)
-{
- mId = AudioPort::getNextUniqueId();
- mIoHandle = ioHandle;
-}
-
audio_module_handle_t AudioInputDescriptor::getModuleHandle() const
{
if (mProfile == 0) {
@@ -192,6 +189,93 @@
return config;
}
+status_t AudioInputDescriptor::open(const audio_config_t *config,
+ audio_devices_t device,
+ const String8& address,
+ audio_source_t source,
+ audio_input_flags_t flags,
+ audio_io_handle_t *input)
+{
+ audio_config_t lConfig;
+ if (config == nullptr) {
+ lConfig = AUDIO_CONFIG_INITIALIZER;
+ lConfig.sample_rate = mSamplingRate;
+ lConfig.channel_mask = mChannelMask;
+ lConfig.format = mFormat;
+ } else {
+ lConfig = *config;
+ }
+
+ mDevice = device;
+
+ ALOGV("opening input for device %08x address %s profile %p name %s",
+ mDevice, address.string(), mProfile.get(), mProfile->getName().string());
+
+ status_t status = mClientInterface->openInput(mProfile->getModuleHandle(),
+ input,
+ &lConfig,
+ &mDevice,
+ address,
+ source,
+ flags);
+ LOG_ALWAYS_FATAL_IF(mDevice != device,
+ "%s openInput returned device %08x when given device %08x",
+ __FUNCTION__, mDevice, device);
+
+ if (status == NO_ERROR) {
+ LOG_ALWAYS_FATAL_IF(*input == AUDIO_IO_HANDLE_NONE,
+ "%s openInput returned input handle %d for device %08x",
+ __FUNCTION__, *input, device);
+ mSamplingRate = lConfig.sample_rate;
+ mChannelMask = lConfig.channel_mask;
+ mFormat = lConfig.format;
+ mId = AudioPort::getNextUniqueId();
+ mIoHandle = *input;
+ mProfile->curOpenCount++;
+ }
+
+ return status;
+}
+
+status_t AudioInputDescriptor::start()
+{
+ if (getAudioSessionCount(true/*activeOnly*/) == 1) {
+ if (!mProfile->canStartNewIo()) {
+ ALOGI("%s mProfile->curActiveCount %d", __func__, mProfile->curActiveCount);
+ return INVALID_OPERATION;
+ }
+ mProfile->curActiveCount++;
+ }
+ return NO_ERROR;
+}
+
+void AudioInputDescriptor::stop()
+{
+ if (!isActive()) {
+ LOG_ALWAYS_FATAL_IF(mProfile->curActiveCount < 1,
+ "%s invalid profile active count %u",
+ __func__, mProfile->curActiveCount);
+ mProfile->curActiveCount--;
+ }
+}
+
+void AudioInputDescriptor::close()
+{
+ if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
+ mClientInterface->closeInput(mIoHandle);
+ LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
+ __FUNCTION__, mProfile->curOpenCount);
+ // do not call stop() here as stop() is supposed to be called after
+ // AudioSession::changeActiveCount(-1) and we don't know how many sessions
+ // are still active at this time
+ if (isActive()) {
+ mProfile->curActiveCount--;
+ }
+ mProfile->curOpenCount--;
+ mIoHandle = AUDIO_IO_HANDLE_NONE;
+ }
+}
+
status_t AudioInputDescriptor::dump(int fd)
{
const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 3819af8..294a2a6 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -23,6 +23,7 @@
#include "AudioGain.h"
#include "Volume.h"
#include "HwModule.h"
+#include <media/AudioParameter.h>
#include <media/AudioPolicy.h>
// A device mask for all audio output devices that are considered "remote" when evaluating
@@ -46,17 +47,17 @@
for (int i = 0; i < NUM_STRATEGIES; i++) {
mStrategyMutedByDevice[i] = false;
}
- if (port != NULL) {
- port->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
- if (port->mGains.size() > 0) {
- port->mGains[0]->getDefaultConfig(&mGain);
+ if (mPort.get() != nullptr) {
+ mPort->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
+ if (mPort->mGains.size() > 0) {
+ mPort->mGains[0]->getDefaultConfig(&mGain);
}
}
}
audio_module_handle_t AudioOutputDescriptor::getModuleHandle() const
{
- return mPort->getModuleHandle();
+ return mPort.get() != nullptr ? mPort->getModuleHandle() : AUDIO_MODULE_HANDLE_NONE;
}
audio_port_handle_t AudioOutputDescriptor::getId() const
@@ -175,9 +176,9 @@
dstConfig->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
}
-void AudioOutputDescriptor::toAudioPort(
- struct audio_port *port) const
+void AudioOutputDescriptor::toAudioPort(struct audio_port *port) const
{
+ // Should not be called for duplicated ports, see SwAudioOutputDescriptor::toAudioPortConfig.
mPort->toAudioPort(port);
port->id = mId;
port->ext.mix.hw_module = getModuleHandle();
@@ -221,7 +222,7 @@
SwAudioOutputDescriptor::SwAudioOutputDescriptor(const sp<IOProfile>& profile,
AudioPolicyClientInterface *clientInterface)
: AudioOutputDescriptor(profile, clientInterface),
- mProfile(profile), mIoHandle(0), mLatency(0),
+ mProfile(profile), mIoHandle(AUDIO_IO_HANDLE_NONE), mLatency(0),
mFlags((audio_output_flags_t)0), mPolicyMix(NULL),
mOutput1(0), mOutput2(0), mDirectOpenCount(0),
mDirectClientSession(AUDIO_SESSION_NONE), mGlobalRefCount(0)
@@ -231,13 +232,6 @@
}
}
-void SwAudioOutputDescriptor::setIoHandle(audio_io_handle_t ioHandle)
-{
- mId = AudioPort::getNextUniqueId();
- mIoHandle = ioHandle;
-}
-
-
status_t SwAudioOutputDescriptor::dump(int fd)
{
const size_t SIZE = 256;
@@ -339,6 +333,10 @@
return true;
}
}
+ if (device == AUDIO_DEVICE_OUT_TELEPHONY_TX) {
+ ALOGV("max gain when output device is telephony tx");
+ return true;
+ }
return false;
}
@@ -387,6 +385,154 @@
return changed;
}
+status_t SwAudioOutputDescriptor::open(const audio_config_t *config,
+ audio_devices_t device,
+ const String8& address,
+ audio_stream_type_t stream,
+ audio_output_flags_t flags,
+ audio_io_handle_t *output)
+{
+ audio_config_t lConfig;
+ if (config == nullptr) {
+ lConfig = AUDIO_CONFIG_INITIALIZER;
+ lConfig.sample_rate = mSamplingRate;
+ lConfig.channel_mask = mChannelMask;
+ lConfig.format = mFormat;
+ } else {
+ lConfig = *config;
+ }
+
+ mDevice = device;
+ // if the selected profile is offloaded and no offload info was specified,
+ // create a default one
+ if ((mProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) &&
+ lConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) {
+ flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+ lConfig.offload_info = AUDIO_INFO_INITIALIZER;
+ lConfig.offload_info.sample_rate = lConfig.sample_rate;
+ lConfig.offload_info.channel_mask = lConfig.channel_mask;
+ lConfig.offload_info.format = lConfig.format;
+ lConfig.offload_info.stream_type = stream;
+ lConfig.offload_info.duration_us = -1;
+ lConfig.offload_info.has_video = true; // conservative
+ lConfig.offload_info.is_streaming = true; // likely
+ }
+
+ mFlags = (audio_output_flags_t)(mFlags | flags);
+
+ ALOGV("opening output for device %08x address %s profile %p name %s",
+ mDevice, address.string(), mProfile.get(), mProfile->getName().string());
+
+ status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(),
+ output,
+ &lConfig,
+ &mDevice,
+ address,
+ &mLatency,
+ mFlags);
+ LOG_ALWAYS_FATAL_IF(mDevice != device,
+ "%s openOutput returned device %08x when given device %08x",
+ __FUNCTION__, mDevice, device);
+
+ if (status == NO_ERROR) {
+ LOG_ALWAYS_FATAL_IF(*output == AUDIO_IO_HANDLE_NONE,
+ "%s openOutput returned output handle %d for device %08x",
+ __FUNCTION__, *output, device);
+ mSamplingRate = lConfig.sample_rate;
+ mChannelMask = lConfig.channel_mask;
+ mFormat = lConfig.format;
+ mId = AudioPort::getNextUniqueId();
+ mIoHandle = *output;
+ mProfile->curOpenCount++;
+ }
+
+ return status;
+}
+
+status_t SwAudioOutputDescriptor::start()
+{
+ if (isDuplicated()) {
+ status_t status = mOutput1->start();
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = mOutput2->start();
+ if (status != NO_ERROR) {
+ mOutput1->stop();
+ return status;
+ }
+ return NO_ERROR;
+ }
+ if (!isActive()) {
+ if (!mProfile->canStartNewIo()) {
+ return INVALID_OPERATION;
+ }
+ mProfile->curActiveCount++;
+ }
+ return NO_ERROR;
+}
+
+void SwAudioOutputDescriptor::stop()
+{
+ if (isDuplicated()) {
+ mOutput1->stop();
+ mOutput2->stop();
+ return;
+ }
+
+ if (!isActive()) {
+ LOG_ALWAYS_FATAL_IF(mProfile->curActiveCount < 1,
+ "%s invalid profile active count %u",
+ __func__, mProfile->curActiveCount);
+ mProfile->curActiveCount--;
+ }
+}
+
+void SwAudioOutputDescriptor::close()
+{
+ if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
+ AudioParameter param;
+ param.add(String8("closing"), String8("true"));
+ mClientInterface->setParameters(mIoHandle, param.toString());
+
+ mClientInterface->closeOutput(mIoHandle);
+
+ LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
+ __FUNCTION__, mProfile->curOpenCount);
+ // do not call stop() here as stop() is supposed to be called after changeRefCount(-1)
+ // and we don't know how many streams are still active at this time
+ if (isActive()) {
+ mProfile->curActiveCount--;
+ }
+ mProfile->curOpenCount--;
+ mIoHandle = AUDIO_IO_HANDLE_NONE;
+ }
+}
+
+status_t SwAudioOutputDescriptor::openDuplicating(const sp<SwAudioOutputDescriptor>& output1,
+ const sp<SwAudioOutputDescriptor>& output2,
+ audio_io_handle_t *ioHandle)
+{
+ // open a duplicating output thread for the new output and the primary output
+ // Note: openDuplicateOutput() API expects the output handles in the reverse order from the
+ // numbering in SwAudioOutputDescriptor mOutput1 and mOutput2
+ *ioHandle = mClientInterface->openDuplicateOutput(output2->mIoHandle, output1->mIoHandle);
+ if (*ioHandle == AUDIO_IO_HANDLE_NONE) {
+ return INVALID_OPERATION;
+ }
+
+ mId = AudioPort::getNextUniqueId();
+ mIoHandle = *ioHandle;
+ mOutput1 = output1;
+ mOutput2 = output2;
+ mSamplingRate = output2->mSamplingRate;
+ mFormat = output2->mFormat;
+ mChannelMask = output2->mChannelMask;
+ mLatency = output2->mLatency;
+
+ return NO_ERROR;
+}
+
// HwAudioOutputDescriptor implementation
HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<AudioSourceDescriptor>& source,
AudioPolicyClientInterface *clientInterface)
@@ -457,6 +603,19 @@
return false;
}
+bool SwAudioOutputCollection::isStreamActiveLocally(audio_stream_type_t stream, uint32_t inPastMs) const
+{
+ nsecs_t sysTime = systemTime();
+ for (size_t i = 0; i < this->size(); i++) {
+ const sp<SwAudioOutputDescriptor> outputDesc = this->valueAt(i);
+ if (outputDesc->isStreamActive(stream, inPastMs, sysTime)
+ && ((outputDesc->device() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) == 0)) {
+ return true;
+ }
+ }
+ return false;
+}
+
bool SwAudioOutputCollection::isStreamActiveRemotely(audio_stream_type_t stream,
uint32_t inPastMs) const
{
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index 32606ea..a9fe48d 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -22,15 +22,12 @@
#include "TypeConverter.h"
#include <log/log.h>
-#include <cutils/atomic.h>
#include <utils/String8.h>
namespace android {
-int32_t volatile AudioPatch::mNextUniqueId = 1;
-
AudioPatch::AudioPatch(const struct audio_patch *patch, uid_t uid) :
- mHandle(static_cast<audio_patch_handle_t>(android_atomic_inc(&mNextUniqueId))),
+ mHandle(HandleGenerator<audio_patch_handle_t>::getNextHandle()),
mPatch(*patch),
mUid(uid),
mAfPatchHandle(AUDIO_PATCH_HANDLE_NONE)
@@ -176,4 +173,4 @@
return NO_ERROR;
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index fcf9070..d85562e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -21,7 +21,6 @@
#include "HwModule.h"
#include "AudioGain.h"
#include <policy.h>
-#include <cutils/atomic.h>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
@@ -29,8 +28,6 @@
namespace android {
-int32_t volatile AudioPort::mNextUniqueId = 1;
-
// --- AudioPort class implementation
void AudioPort::attach(const sp<HwModule>& module)
{
@@ -40,31 +37,22 @@
// Note that is a different namespace than AudioFlinger unique IDs
audio_port_handle_t AudioPort::getNextUniqueId()
{
- return static_cast<audio_port_handle_t>(android_atomic_inc(&mNextUniqueId));
+ return getNextHandle();
}
audio_module_handle_t AudioPort::getModuleHandle() const
{
- if (mModule == 0) {
- return AUDIO_MODULE_HANDLE_NONE;
- }
- return mModule->mHandle;
+ return mModule != 0 ? mModule->getHandle() : AUDIO_MODULE_HANDLE_NONE;
}
uint32_t AudioPort::getModuleVersionMajor() const
{
- if (mModule == 0) {
- return 0;
- }
- return mModule->getHalVersionMajor();
+ return mModule != 0 ? mModule->getHalVersionMajor() : 0;
}
const char *AudioPort::getModuleName() const
{
- if (mModule == 0) {
- return "invalid module";
- }
- return mModule->getName();
+ return mModule != 0 ? mModule->getName() : "invalid module";
}
void AudioPort::toAudioPort(struct audio_port *port) const
@@ -74,11 +62,11 @@
SortedVector<audio_format_t> flatenedFormats;
SampleRateVector flatenedRates;
ChannelsVector flatenedChannels;
- for (size_t profileIndex = 0; profileIndex < mProfiles.size(); profileIndex++) {
- if (mProfiles[profileIndex]->isValid()) {
- audio_format_t formatToExport = mProfiles[profileIndex]->getFormat();
- const SampleRateVector &ratesToExport = mProfiles[profileIndex]->getSampleRates();
- const ChannelsVector &channelsToExport = mProfiles[profileIndex]->getChannels();
+ for (const auto& profile : mProfiles) {
+ if (profile->isValid()) {
+ audio_format_t formatToExport = profile->getFormat();
+ const SampleRateVector &ratesToExport = profile->getSampleRates();
+ const ChannelsVector &channelsToExport = profile->getChannels();
if (flatenedFormats.indexOf(formatToExport) < 0) {
flatenedFormats.add(formatToExport);
@@ -130,14 +118,12 @@
void AudioPort::importAudioPort(const sp<AudioPort>& port, bool force __unused)
{
- size_t indexToImport;
- for (indexToImport = 0; indexToImport < port->mProfiles.size(); indexToImport++) {
- const sp<AudioProfile> &profileToImport = port->mProfiles[indexToImport];
+ for (const auto& profileToImport : port->mProfiles) {
if (profileToImport->isValid()) {
// Import only valid port, i.e. valid format, non empty rates and channels masks
bool hasSameProfile = false;
- for (size_t profileIndex = 0; profileIndex < mProfiles.size(); profileIndex++) {
- if (*mProfiles[profileIndex] == *profileToImport) {
+ for (const auto& profile : mProfiles) {
+ if (*profile == *profileToImport) {
// never import a profile twice
hasSameProfile = true;
break;
@@ -151,6 +137,26 @@
}
}
+status_t AudioPort::checkExactAudioProfile(const struct audio_port_config *config) const
+{
+ status_t status = NO_ERROR;
+ auto config_mask = config->config_mask;
+ if (config_mask & AUDIO_PORT_CONFIG_GAIN) {
+ config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
+ status = checkGain(&config->gain, config->gain.index);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ }
+ if (config_mask != 0) {
+ // TODO should we check sample_rate / channel_mask / format separately?
+ status = mProfiles.checkExactProfile(config->sample_rate,
+ config->channel_mask,
+ config->format);
+ }
+ return status;
+}
+
void AudioPort::pickSamplingRate(uint32_t &pickedRate,const SampleRateVector &samplingRates) const
{
pickedRate = 0;
@@ -385,6 +391,7 @@
mSamplingRate = 0;
mChannelMask = AUDIO_CHANNEL_NONE;
mFormat = AUDIO_FORMAT_INVALID;
+ memset(&mGain, 0, sizeof(struct audio_gain_config));
mGain.index = -1;
}
@@ -402,9 +409,7 @@
status = NO_INIT;
goto exit;
}
- status = audioport->checkExactAudioProfile(config->sample_rate,
- config->channel_mask,
- config->format);
+ status = audioport->checkExactAudioProfile(config);
if (status != NO_ERROR) {
goto exit;
}
@@ -418,10 +423,6 @@
mFormat = config->format;
}
if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
- status = audioport->checkGain(&config->gain, config->gain.index);
- if (status != NO_ERROR) {
- goto exit;
- }
mGain = config->gain;
}
@@ -479,4 +480,4 @@
}
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
index 98f7a94..fd6fc1c 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
@@ -233,8 +233,7 @@
return NO_ERROR;
}
- for (size_t i = 0; i < size(); i++) {
- const sp<AudioProfile> profile = itemAt(i);
+ for (const auto& profile : *this) {
if (profile->checkExact(samplingRate, channelMask, format) == NO_ERROR) {
return NO_ERROR;
}
@@ -288,4 +287,4 @@
return AudioPort::compareFormats((*profile1)->getFormat(), (*profile2)->getFormat());
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
index 5b57d3d..7cda46b 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -290,4 +290,4 @@
return NO_ERROR;
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
index e5888e2..1e105f5 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
@@ -416,4 +416,4 @@
return NO_ERROR;
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index a2c1165..19c2062 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "APM::Devices"
//#define LOG_NDEBUG 0
+#include <audio_utils/string.h>
#include "DeviceDescriptor.h"
#include "TypeConverter.h"
#include "AudioGain.h"
@@ -60,7 +61,7 @@
void DeviceVector::refreshTypes()
{
mDeviceTypes = AUDIO_DEVICE_NONE;
- for(size_t i = 0; i < size(); i++) {
+ for (size_t i = 0; i < size(); i++) {
mDeviceTypes |= itemAt(i)->type();
}
ALOGV("DeviceVector::refreshTypes() mDeviceTypes %08x", mDeviceTypes);
@@ -68,7 +69,7 @@
ssize_t DeviceVector::indexOf(const sp<DeviceDescriptor>& item) const
{
- for(size_t i = 0; i < size(); i++) {
+ for (size_t i = 0; i < size(); i++) {
if (item->equals(itemAt(i))) {
return i;
}
@@ -78,12 +79,15 @@
void DeviceVector::add(const DeviceVector &devices)
{
- for (size_t i = 0; i < devices.size(); i++) {
- sp<DeviceDescriptor> device = devices.itemAt(i);
+ bool added = false;
+ for (const auto& device : devices) {
if (indexOf(device) < 0 && SortedVector::add(device) >= 0) {
- refreshTypes();
+ added = true;
}
}
+ if (added) {
+ refreshTypes();
+ }
}
ssize_t DeviceVector::add(const sp<DeviceDescriptor>& item)
@@ -148,14 +152,12 @@
sp<DeviceDescriptor> DeviceVector::getDeviceFromId(audio_port_handle_t id) const
{
- sp<DeviceDescriptor> device;
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->getId() == id) {
- device = itemAt(i);
- break;
+ for (const auto& device : *this) {
+ if (device->getId() == id) {
+ return device;
}
}
- return device;
+ return nullptr;
}
DeviceVector DeviceVector::getDevicesFromType(audio_devices_t type) const
@@ -180,11 +182,9 @@
audio_devices_t type, const String8& address) const
{
DeviceVector devices;
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->type() == type) {
- if (itemAt(i)->mAddress == address) {
- devices.add(itemAt(i));
- }
+ for (const auto& device : *this) {
+ if (device->type() == type && device->mAddress == address) {
+ devices.add(device);
}
}
return devices;
@@ -192,14 +192,12 @@
sp<DeviceDescriptor> DeviceVector::getDeviceFromTagName(const String8 &tagName) const
{
- sp<DeviceDescriptor> device;
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->getTagName() == tagName) {
- device = itemAt(i);
- break;
+ for (const auto& device : *this) {
+ if (device->getTagName() == tagName) {
+ return device;
}
}
- return device;
+ return nullptr;
}
status_t DeviceVector::dump(int fd, const String8 &tag, int spaces, bool verbose) const
@@ -248,8 +246,9 @@
// without the test?
// This has been demonstrated to NOT be true (at start up)
// ALOG_ASSERT(mModule != NULL);
- dstConfig->ext.device.hw_module = mModule != 0 ? mModule->mHandle : AUDIO_MODULE_HANDLE_NONE;
- strncpy(dstConfig->ext.device.address, mAddress.string(), AUDIO_DEVICE_MAX_ADDRESS_LEN);
+ dstConfig->ext.device.hw_module =
+ mModule != 0 ? mModule->getHandle() : AUDIO_MODULE_HANDLE_NONE;
+ (void)audio_utils_strlcpy_zerofill(dstConfig->ext.device.address, mAddress.string());
}
void DeviceDescriptor::toAudioPort(struct audio_port *port) const
@@ -259,8 +258,8 @@
port->id = mId;
toAudioPortConfig(&port->active_config);
port->ext.device.type = mDeviceType;
- port->ext.device.hw_module = mModule->mHandle;
- strncpy(port->ext.device.address, mAddress.string(), AUDIO_DEVICE_MAX_ADDRESS_LEN);
+ port->ext.device.hw_module = mModule->getHandle();
+ (void)audio_utils_strlcpy_zerofill(port->ext.device.address, mAddress.string());
}
void DeviceDescriptor::importAudioPort(const sp<AudioPort>& port, bool force) {
@@ -312,4 +311,4 @@
AudioPort::log(" ");
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/Gains.cpp b/services/audiopolicy/common/managerdefinitions/src/Gains.cpp
index e3fc9a8..6407a17 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Gains.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Gains.cpp
@@ -113,86 +113,104 @@
{0, 0.0f}, {1, 0.0f}, {2, 0.0f}, {100, 0.0f}
};
+const VolumeCurvePoint
+Gains::sHearingAidVolumeCurve[Volume::VOLCNT] = {
+ {1, -128.0f}, {20, -80.0f}, {60, -40.0f}, {100, 0.0f}
+};
+
const VolumeCurvePoint *Gains::sVolumeProfiles[AUDIO_STREAM_CNT]
[DEVICE_CATEGORY_CNT] = {
{ // AUDIO_STREAM_VOICE_CALL
Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sDefaultMediaVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_SYSTEM
Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_RING
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_MUSIC
Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sDefaultMediaVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_ALARM
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_NOTIFICATION
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_BLUETOOTH_SCO
Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sDefaultMediaVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_ENFORCED_AUDIBLE
Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_DTMF
Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sExtMediaSystemVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_TTS
// "Transmitted Through Speaker": always silent except on DEVICE_CATEGORY_SPEAKER
Gains::sSilentVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sSilentVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sSilentVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sSilentVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_ACCESSIBILITY
Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sDefaultMediaVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sHearingAidVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_REROUTING
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sFullScaleVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sFullScaleVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
{ // AUDIO_STREAM_PATCH
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
- Gains::sFullScaleVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EXT_MEDIA
+ Gains::sFullScaleVolumeCurve // DEVICE_CATEGORY_HEARING_AID
},
};
@@ -235,4 +253,4 @@
return decibels;
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index cc56fb8..aef7dbe 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -154,10 +154,9 @@
DeviceVector HwModule::getRouteSourceDevices(const sp<AudioRoute> &route) const
{
DeviceVector sourceDevices;
- Vector <sp<AudioPort> > sources = route->getSources();
- for (size_t i = 0; i < sources.size(); i++) {
- if (sources[i]->getType() == AUDIO_PORT_TYPE_DEVICE) {
- sourceDevices.add(mDeclaredDevices.getDeviceFromTagName(sources[i]->getTagName()));
+ for (const auto& source : route->getSources()) {
+ if (source->getType() == AUDIO_PORT_TYPE_DEVICE) {
+ sourceDevices.add(mDeclaredDevices.getDeviceFromTagName(source->getTagName()));
}
}
return sourceDevices;
@@ -173,17 +172,15 @@
void HwModule::refreshSupportedDevices()
{
// Now updating the streams (aka IOProfile until now) supported devices
- for (size_t i = 0; i < mInputProfiles.size(); i++) {
- sp<IOProfile> stream = mInputProfiles[i];
+ for (const auto& stream : mInputProfiles) {
DeviceVector sourceDevices;
- const AudioRouteVector &routes = stream->getRoutes();
- for (size_t j = 0; j < routes.size(); j++) {
- sp<AudioPort> sink = routes[j]->getSink();
+ for (const auto& route : stream->getRoutes()) {
+ sp<AudioPort> sink = route->getSink();
if (sink == 0 || stream != sink) {
ALOGE("%s: Invalid route attached to input stream", __FUNCTION__);
continue;
}
- DeviceVector sourceDevicesForRoute = getRouteSourceDevices(routes[j]);
+ DeviceVector sourceDevicesForRoute = getRouteSourceDevices(route);
if (sourceDevicesForRoute.isEmpty()) {
ALOGE("%s: invalid source devices for %s", __FUNCTION__, stream->getName().string());
continue;
@@ -196,17 +193,15 @@
}
stream->setSupportedDevices(sourceDevices);
}
- for (size_t i = 0; i < mOutputProfiles.size(); i++) {
- sp<IOProfile> stream = mOutputProfiles[i];
+ for (const auto& stream : mOutputProfiles) {
DeviceVector sinkDevices;
- const AudioRouteVector &routes = stream->getRoutes();
- for (size_t j = 0; j < routes.size(); j++) {
- sp<AudioPort> source = routes[j]->getSources().findByTagName(stream->getTagName());
+ for (const auto& route : stream->getRoutes()) {
+ sp<AudioPort> source = route->getSources().findByTagName(stream->getTagName());
if (source == 0 || stream != source) {
ALOGE("%s: Invalid route attached to output stream", __FUNCTION__);
continue;
}
- sp<DeviceDescriptor> sinkDevice = getRouteSinkDevice(routes[j]);
+ sp<DeviceDescriptor> sinkDevice = getRouteSinkDevice(route);
if (sinkDevice == 0) {
ALOGE("%s: invalid sink device for %s", __FUNCTION__, stream->getName().string());
continue;
@@ -217,6 +212,12 @@
}
}
+void HwModule::setHandle(audio_module_handle_t handle) {
+ ALOGW_IF(mHandle != AUDIO_MODULE_HANDLE_NONE,
+ "HwModule handle is changing from %d to %d", mHandle, handle);
+ mHandle = handle;
+}
+
void HwModule::dump(int fd)
{
const size_t SIZE = 256;
@@ -252,60 +253,40 @@
sp <HwModule> HwModuleCollection::getModuleFromName(const char *name) const
{
- sp <HwModule> module;
-
- for (size_t i = 0; i < size(); i++)
- {
- if (strcmp(itemAt(i)->getName(), name) == 0) {
- return itemAt(i);
+ for (const auto& module : *this) {
+ if (strcmp(module->getName(), name) == 0) {
+ return module;
}
}
- return module;
+ return nullptr;
}
-
sp <HwModule> HwModuleCollection::getModuleForDevice(audio_devices_t device) const
{
- sp <HwModule> module;
-
- for (size_t i = 0; i < size(); i++) {
- if (itemAt(i)->getHandle() == 0) {
- continue;
- }
- if (audio_is_output_device(device)) {
- for (size_t j = 0; j < itemAt(i)->mOutputProfiles.size(); j++)
- {
- if (itemAt(i)->mOutputProfiles[j]->supportDevice(device)) {
- return itemAt(i);
- }
- }
- } else {
- for (size_t j = 0; j < itemAt(i)->mInputProfiles.size(); j++) {
- if (itemAt(i)->mInputProfiles[j]->supportDevice(device)) {
- return itemAt(i);
- }
+ for (const auto& module : *this) {
+ const auto& profiles = audio_is_output_device(device) ?
+ module->getOutputProfiles() : module->getInputProfiles();
+ for (const auto& profile : profiles) {
+ if (profile->supportDevice(device)) {
+ return module;
}
}
}
- return module;
+ return nullptr;
}
-sp<DeviceDescriptor> HwModuleCollection::getDeviceDescriptor(const audio_devices_t device,
- const char *device_address,
- const char *device_name,
- bool matchAdress) const
+sp<DeviceDescriptor> HwModuleCollection::getDeviceDescriptor(const audio_devices_t device,
+ const char *device_address,
+ const char *device_name,
+ bool matchAdress) const
{
- String8 address = (device_address == NULL) ? String8("") : String8(device_address);
+ String8 address = (device_address == nullptr) ? String8("") : String8(device_address);
// handle legacy remote submix case where the address was not always specified
if (device_distinguishes_on_address(device) && (address.length() == 0)) {
address = String8("0");
}
- for (size_t i = 0; i < size(); i++) {
- const sp<HwModule> hwModule = itemAt(i);
- if (hwModule->mHandle == 0) {
- continue;
- }
+ for (const auto& hwModule : *this) {
DeviceVector declaredDevices = hwModule->getDeclaredDevices();
DeviceVector deviceList = declaredDevices.getDevicesFromTypeAddr(device, address);
if (!deviceList.isEmpty()) {
@@ -340,4 +321,5 @@
return NO_ERROR;
}
+
} //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 74ef4ec..fbc2384 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -35,7 +35,9 @@
audio_format_t *updatedFormat,
audio_channel_mask_t channelMask,
audio_channel_mask_t *updatedChannelMask,
- uint32_t flags) const
+ // FIXME type punning here
+ uint32_t flags,
+ bool exactMatchRequiredForInputFlags) const
{
const bool isPlaybackThread =
getType() == AUDIO_PORT_TYPE_MIX && getRole() == AUDIO_PORT_ROLE_SOURCE;
@@ -71,7 +73,13 @@
return false;
}
} else {
- if (checkExactAudioProfile(samplingRate, channelMask, format) != NO_ERROR) {
+ const struct audio_port_config config = {
+ .config_mask = AUDIO_PORT_CONFIG_ALL & ~AUDIO_PORT_CONFIG_GAIN,
+ .sample_rate = samplingRate,
+ .channel_mask = channelMask,
+ .format = format,
+ };
+ if (checkExactAudioProfile(&config) != NO_ERROR) {
return false;
}
}
@@ -84,7 +92,7 @@
// An existing normal stream is compatible with a fast track request,
// but the fast request will be denied by AudioFlinger and converted to normal track.
if (isRecordThread && ((getFlags() ^ flags) &
- ~AUDIO_INPUT_FLAG_FAST)) {
+ ~(exactMatchRequiredForInputFlags ? AUDIO_INPUT_FLAG_NONE : AUDIO_INPUT_FLAG_FAST))) {
return false;
}
@@ -122,6 +130,16 @@
result.append("\n");
write(fd, result.string(), result.size());
mSupportedDevices.dump(fd, String8("Supported"), 4, false);
+
+ result.clear();
+ snprintf(buffer, SIZE, "\n - maxOpenCount: %u - curOpenCount: %u\n",
+ maxOpenCount, curOpenCount);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " - maxActiveCount: %u - curActiveCount: %u\n",
+ maxActiveCount, curActiveCount);
+ result.append(buffer);
+
+ write(fd, result.string(), result.size());
}
void IOProfile::log()
@@ -129,4 +147,4 @@
// @TODO: forward log to AudioPort
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index a224004..a253113 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -140,19 +140,19 @@
}
string minValueMBLiteral = getXmlAttribute(root, Attributes::minValueMB);
- uint32_t minValueMB;
+ int32_t minValueMB;
if (!minValueMBLiteral.empty() && convertTo(minValueMBLiteral, minValueMB)) {
gain->setMinValueInMb(minValueMB);
}
string maxValueMBLiteral = getXmlAttribute(root, Attributes::maxValueMB);
- uint32_t maxValueMB;
+ int32_t maxValueMB;
if (!maxValueMBLiteral.empty() && convertTo(maxValueMBLiteral, maxValueMB)) {
gain->setMaxValueInMb(maxValueMB);
}
string defaultValueMBLiteral = getXmlAttribute(root, Attributes::defaultValueMB);
- uint32_t defaultValueMB;
+ int32_t defaultValueMB;
if (!defaultValueMBLiteral.empty() && convertTo(defaultValueMBLiteral, defaultValueMB)) {
gain->setDefaultValueInMb(defaultValueMB);
}
@@ -217,6 +217,8 @@
const char MixPortTraits::Attributes::name[] = "name";
const char MixPortTraits::Attributes::role[] = "role";
const char MixPortTraits::Attributes::flags[] = "flags";
+const char MixPortTraits::Attributes::maxOpenCount[] = "maxOpenCount";
+const char MixPortTraits::Attributes::maxActiveCount[] = "maxActiveCount";
status_t MixPortTraits::deserialize(_xmlDoc *doc, const _xmlNode *child, PtrElement &mixPort,
PtrSerializingCtx /*serializingContext*/)
@@ -259,6 +261,14 @@
mixPort->setFlags(InputFlagConverter::maskFromString(flags));
}
}
+ string maxOpenCount = getXmlAttribute(child, Attributes::maxOpenCount);
+ if (!maxOpenCount.empty()) {
+ convertTo(maxOpenCount, mixPort->maxOpenCount);
+ }
+ string maxActiveCount = getXmlAttribute(child, Attributes::maxActiveCount);
+ if (!maxActiveCount.empty()) {
+ convertTo(maxActiveCount, mixPort->maxActiveCount);
+ }
// Deserialize children
AudioGainTraits::Collection gains;
deserializeCollection<AudioGainTraits>(doc, child, gains, NULL);
@@ -638,4 +648,4 @@
return android::OK;
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
index 689f4e6..d34214b 100644
--- a/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
@@ -40,7 +40,7 @@
return indexOfKey(session) >= 0 && valueFor(session)->mDeviceDescriptor != 0;
}
-bool SessionRouteMap::hasRouteChanged(audio_session_t session)
+bool SessionRouteMap::getAndClearRouteChanged(audio_session_t session)
{
if (indexOfKey(session) >= 0) {
if (valueFor(session)->mChanged) {
@@ -82,7 +82,7 @@
void SessionRouteMap::log(const char* caption)
{
ALOGI("%s ----", caption);
- for(size_t index = 0; index < size(); index++) {
+ for (size_t index = 0; index < size(); index++) {
valueAt(index)->log(" ");
}
}
@@ -104,9 +104,7 @@
sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0;
if (route != 0) {
- if (((route->mDeviceDescriptor == 0) && (descriptor != 0)) ||
- ((route->mDeviceDescriptor != 0) &&
- ((descriptor == 0) || (!route->mDeviceDescriptor->equals(descriptor))))) {
+ if (descriptor != 0 || route->mDeviceDescriptor != 0) {
route->mChanged = true;
}
route->mRefCount++;
@@ -114,11 +112,29 @@
} else {
route = new SessionRoute(session, streamType, source, descriptor, uid);
route->mRefCount++;
- add(session, route);
if (descriptor != 0) {
route->mChanged = true;
}
+ add(session, route);
}
}
+audio_devices_t SessionRouteMap::getActiveDeviceForStream(audio_stream_type_t streamType,
+ const DeviceVector& availableDevices)
+{
+ audio_devices_t device = AUDIO_DEVICE_NONE;
+
+ for (size_t index = 0; index < size(); index++) {
+ sp<SessionRoute> route = valueAt(index);
+ if (streamType == route->mStreamType && route->isActiveOrChanged()
+ && route->mDeviceDescriptor != 0) {
+ device = route->mDeviceDescriptor->type();
+ if (!availableDevices.getDevicesFromType(device).isEmpty()) {
+ break;
+ }
+ }
+ }
+ return device;
+}
+
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
index b3019e1..65649fb 100644
--- a/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
@@ -223,4 +223,4 @@
return NO_ERROR;
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
index 0362037..6f48eae 100644
--- a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -29,6 +29,7 @@
MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_SPEAKER),
MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EARPIECE),
MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EXT_MEDIA),
+ MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_HEARING_AID),
TERMINATOR
};
@@ -65,4 +66,4 @@
template class TypeConverter<RouteFlagTraits>;
template class TypeConverter<RuleTraits>;
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
index 14caf7c..ac3f1bc 100644
--- a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
@@ -29,6 +29,13 @@
size_t nbCurvePoints = mCurvePoints.size();
// the volume index in the UI is relative to the min and max volume indices for this stream
int nbSteps = 1 + mCurvePoints[nbCurvePoints - 1].mIndex - mCurvePoints[0].mIndex;
+ if (indexInUi < volIndexMin) {
+ ALOGV("VOLUME remapping index from %d to min index %d", indexInUi, volIndexMin);
+ indexInUi = volIndexMin;
+ } else if (indexInUi > volIndexMax) {
+ ALOGV("VOLUME remapping index from %d to max index %d", indexInUi, volIndexMax);
+ indexInUi = volIndexMax;
+ }
int volIdx = (nbSteps * (indexInUi - volIndexMin)) / (volIndexMax - volIndexMin);
// Where would this volume index been inserted in the curve point
@@ -138,4 +145,4 @@
return NO_ERROR;
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/config/audio_policy_configuration.xml b/services/audiopolicy/config/audio_policy_configuration.xml
index 7af2f81..a75f1cb 100644
--- a/services/audiopolicy/config/audio_policy_configuration.xml
+++ b/services/audiopolicy/config/audio_policy_configuration.xml
@@ -163,37 +163,16 @@
sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
<route type="mix" sink="Wired Headphones"
sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
- <route type="mix" sink="Telephony Tx"
- sources="voice_tx"/>
<route type="mix" sink="primary input"
sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic"/>
<route type="mix" sink="Telephony Tx"
- sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic"/>
+ sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic, voice_tx"/>
<route type="mix" sink="voice_rx"
sources="Telephony Rx"/>
</routes>
</module>
- <!-- HDMI Audio HAL -->
- <module description="HDMI Audio HAL" name="hdmi" version="2.0">
- <mixPorts>
- <mixPort name="hdmi output" role="source">
- <profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="48000"/>
- </mixPort>
- </mixPorts>
- <devicePorts>
- <devicePort tagName="HDMI Out" type="AUDIO_DEVICE_OUT_AUX_DIGITAL" role="sink">
- <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
- samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
- </devicePort>
- </devicePorts>
- <routes>
- <route type="mix" sink="HDMI Out"
- sources="hdmi output"/>
- </routes>
- </module>
-
<!-- A2dp Audio HAL -->
<xi:include href="a2dp_audio_policy_configuration.xml"/>
@@ -203,6 +182,9 @@
<!-- Remote Submix Audio HAL -->
<xi:include href="r_submix_audio_policy_configuration.xml"/>
+ <!-- Hearing aid Audio HAL -->
+ <xi:include href="hearing_aid_audio_policy_configuration.xml"/>
+
</modules>
<!-- End of Modules section -->
diff --git a/services/audiopolicy/config/audio_policy_volumes.xml b/services/audiopolicy/config/audio_policy_volumes.xml
index 43a47b0..ec64a7c 100644
--- a/services/audiopolicy/config/audio_policy_volumes.xml
+++ b/services/audiopolicy/config/audio_policy_volumes.xml
@@ -43,6 +43,8 @@
</volume>
<volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_HEADSET">
<point>1,-3000</point>
<point>33,-2600</point>
@@ -55,6 +57,8 @@
ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_SPEAKER">
@@ -67,6 +71,8 @@
ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_SPEAKER"
@@ -75,18 +81,22 @@
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_HEADSET"
- ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ ref="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_SPEAKER">
- <point>1,-2970</point>
+ <point>0,-2970</point>
<point>33,-2010</point>
<point>66,-1020</point>
<point>100,0</point>
</volume>
<volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_EARPIECE"
- ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+ ref="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
- ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ ref="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_SPEAKER">
@@ -99,6 +109,8 @@
ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_HEADSET">
<point>0,-4200</point>
<point>33,-2800</point>
@@ -119,6 +131,8 @@
</volume>
<volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_HEADSET">
<point>1,-3000</point>
<point>33,-2600</point>
@@ -131,6 +145,8 @@
ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_HEADSET">
<point>1,-3000</point>
<point>33,-2600</point>
@@ -143,6 +159,8 @@
ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="SILENT_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_SPEAKER"
@@ -151,14 +169,18 @@
ref="SILENT_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="SILENT_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="SILENT_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_HEADSET"
- ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_SPEAKER"
- ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+ ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_EARPIECE"
- ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
- ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_SPEAKER"
@@ -167,6 +189,8 @@
ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_SPEAKER"
@@ -175,5 +199,7 @@
ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
+ ref="FULL_SCALE_VOLUME_CURVE"/>
</volumes>
diff --git a/services/audiopolicy/config/default_volume_tables.xml b/services/audiopolicy/config/default_volume_tables.xml
index 9a22b1d..207be41 100644
--- a/services/audiopolicy/config/default_volume_tables.xml
+++ b/services/audiopolicy/config/default_volume_tables.xml
@@ -67,4 +67,63 @@
<point>60,-2100</point>
<point>100,-1000</point>
</reference>
+ <reference name="DEFAULT_HEARING_AID_VOLUME_CURVE">
+ <!-- Default Hearing Aid Volume Curve -->
+ <point>1,-12700</point>
+ <point>20,-8000</point>
+ <point>60,-4000</point>
+ <point>100,0</point>
+ </reference>
+ <!-- **************************************************************** -->
+ <!-- Non-mutable default volume curves: -->
+ <!-- * first point is always for index 0 -->
+ <!-- * attenuation is small enough that stream can still be heard -->
+ <reference name="DEFAULT_NON_MUTABLE_VOLUME_CURVE">
+ <!-- Default non-mutable reference Volume Curve -->
+ <!-- based on DEFAULT_MEDIA_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE">
+ <!--Default non-mutable Volume Curve for headset -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE -->
+ <point>0,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE">
+ <!-- Default non-mutable Speaker Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE">
+ <!--Default non-mutable Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE -->
+ <point>0,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE">
+ <!-- Default non-mutable Ext Media System Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-2100</point>
+ <point>100,-1000</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE">
+ <!-- Default non-mutable Hearing Aid Volume Curve -->
+ <!-- based on DEFAULT_HEARING_AID_VOLUME_CURVE -->
+ <point>0,-12700</point>
+ <point>20,-8000</point>
+ <point>60,-4000</point>
+ <point>100,0</point>
+ </reference>
</volumes>
diff --git a/services/audiopolicy/config/hearing_aid_audio_policy_configuration.xml b/services/audiopolicy/config/hearing_aid_audio_policy_configuration.xml
new file mode 100644
index 0000000..3c48e88
--- /dev/null
+++ b/services/audiopolicy/config/hearing_aid_audio_policy_configuration.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Hearing aid Audio HAL Audio Policy Configuration file -->
+<module name="hearing_aid" halVersion="2.0">
+ <mixPorts>
+ <mixPort name="hearing aid output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="24000,16000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="BT Hearing Aid Out" sources="hearing aid output"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
index 567ff9e..04594f5 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
@@ -137,4 +137,4 @@
virtual ~AudioPolicyManagerInterface() {}
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
index 846fa48..b7902cf 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
@@ -59,4 +59,4 @@
virtual ~AudioPolicyManagerObserver() {}
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/engineconfigurable/include/AudioPolicyEngineInstance.h b/services/audiopolicy/engineconfigurable/include/AudioPolicyEngineInstance.h
index 6c4be2c..a597e87 100644
--- a/services/audiopolicy/engineconfigurable/include/AudioPolicyEngineInstance.h
+++ b/services/audiopolicy/engineconfigurable/include/AudioPolicyEngineInstance.h
@@ -76,6 +76,6 @@
template <>
AudioPolicyPluginInterface *EngineInstance::queryInterface() const;
-}; // namespace audio_policy
+} // namespace audio_policy
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h b/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
index 759d0c9..2e29a9b 100644
--- a/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
+++ b/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
@@ -143,4 +143,4 @@
virtual ~AudioPolicyPluginInterface() {}
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.h b/services/audiopolicy/engineconfigurable/src/Engine.h
index bc5e035..328d23d 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.h
+++ b/services/audiopolicy/engineconfigurable/src/Engine.h
@@ -194,7 +194,7 @@
AudioPolicyManagerObserver *mApmObserver;
};
-}; // namespace audio_policy
+} // namespace audio_policy
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/engineconfigurable/wrapper/audio_policy_criteria_conf.h b/services/audiopolicy/engineconfigurable/wrapper/audio_policy_criteria_conf.h
index 31b7e0f..e4fd176 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/audio_policy_criteria_conf.h
+++ b/services/audiopolicy/engineconfigurable/wrapper/audio_policy_criteria_conf.h
@@ -62,7 +62,8 @@
[AUDIO_POLICY_FORCE_FOR_DOCK] = "ForceUseForDock",
[AUDIO_POLICY_FORCE_FOR_SYSTEM] = "ForceUseForSystem",
[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] = "ForceUseForHdmiSystemAudio",
- [AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND] = "ForceUseForEncodedSurround"
+ [AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND] = "ForceUseForEncodedSurround",
+ [AUDIO_POLICY_FORCE_FOR_VIBRATE_RINGING] = "ForceUseForVibrateRinging"
};
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 43205a2..3e13e50 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -148,12 +148,20 @@
case AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND:
if (config != AUDIO_POLICY_FORCE_NONE &&
config != AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER &&
- config != AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
+ config != AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS &&
+ config != AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL) {
ALOGW("setForceUse() invalid config %d for ENCODED_SURROUND", config);
return BAD_VALUE;
}
mForceUse[usage] = config;
break;
+ case AUDIO_POLICY_FORCE_FOR_VIBRATE_RINGING:
+ if (config != AUDIO_POLICY_FORCE_BT_SCO && config != AUDIO_POLICY_FORCE_NONE) {
+ ALOGW("setForceUse() invalid config %d for FOR_VIBRATE_RINGING", config);
+ return BAD_VALUE;
+ }
+ mForceUse[usage] = config;
+ break;
default:
ALOGW("setForceUse() invalid usage %d", usage);
break; // TODO return BAD_VALUE?
@@ -238,18 +246,19 @@
const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();
return getDeviceForStrategyInt(strategy, availableOutputDevices,
- availableInputDevices, outputs);
+ availableInputDevices, outputs, (uint32_t)AUDIO_DEVICE_NONE);
}
-
audio_devices_t Engine::getDeviceForStrategyInt(routing_strategy strategy,
- DeviceVector availableOutputDevices,
- DeviceVector availableInputDevices,
- const SwAudioOutputCollection &outputs) const
+ DeviceVector availableOutputDevices,
+ DeviceVector availableInputDevices,
+ const SwAudioOutputCollection &outputs,
+ uint32_t outputDeviceTypesToIgnore) const
{
uint32_t device = AUDIO_DEVICE_NONE;
- uint32_t availableOutputDevicesType = availableOutputDevices.types();
+ uint32_t availableOutputDevicesType =
+ availableOutputDevices.types() & ~outputDeviceTypesToIgnore;
switch (strategy) {
@@ -258,40 +267,26 @@
break;
case STRATEGY_SONIFICATION_RESPECTFUL:
- if (isInCall()) {
+ if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
device = getDeviceForStrategyInt(
- STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs);
- } else if (outputs.isStreamActiveRemotely(AUDIO_STREAM_MUSIC,
- SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
- // while media is playing on a remote device, use the the sonification behavior.
- // Note that we test this usecase before testing if media is playing because
- // the isStreamActive() method only informs about the activity of a stream, not
- // if it's for local playback. Note also that we use the same delay between both tests
- device = getDeviceForStrategyInt(
- STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs);
- //user "safe" speaker if available instead of normal speaker to avoid triggering
- //other acoustic safety mechanisms for notification
- if ((device & AUDIO_DEVICE_OUT_SPEAKER) &&
- (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
- device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
- device &= ~AUDIO_DEVICE_OUT_SPEAKER;
- }
- } else if (outputs.isStreamActive(
- AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)
- || outputs.isStreamActive(
- AUDIO_STREAM_ACCESSIBILITY, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY))
- {
- // while media/a11y is playing (or has recently played), use the same device
- device = getDeviceForStrategyInt(
- STRATEGY_MEDIA, availableOutputDevices, availableInputDevices, outputs);
+ STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs,
+ outputDeviceTypesToIgnore);
} else {
- // when media is not playing anymore, fall back on the sonification behavior
- device = getDeviceForStrategyInt(
- STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs);
- //user "safe" speaker if available instead of normal speaker to avoid triggering
- //other acoustic safety mechanisms for notification
- if ((device & AUDIO_DEVICE_OUT_SPEAKER) &&
- (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
+ bool media_active_locally =
+ outputs.isStreamActiveLocally(
+ AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)
+ || outputs.isStreamActiveLocally(
+ AUDIO_STREAM_ACCESSIBILITY, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY);
+ // routing is same as media without the "remote" device
+ device = getDeviceForStrategyInt(STRATEGY_MEDIA,
+ availableOutputDevices,
+ availableInputDevices, outputs,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX | outputDeviceTypesToIgnore);
+ // if no media is playing on the device, check for mandatory use of "safe" speaker
+ // when media would have played on speaker, and the safe speaker path is available
+ if (!media_active_locally
+ && (device & AUDIO_DEVICE_OUT_SPEAKER)
+ && (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
device &= ~AUDIO_DEVICE_OUT_SPEAKER;
}
@@ -302,7 +297,8 @@
if (!isInCall()) {
// when off call, DTMF strategy follows the same rules as MEDIA strategy
device = getDeviceForStrategyInt(
- STRATEGY_MEDIA, availableOutputDevices, availableInputDevices, outputs);
+ STRATEGY_MEDIA, availableOutputDevices, availableInputDevices, outputs,
+ outputDeviceTypesToIgnore);
break;
}
// when in call, DTMF and PHONE strategies follow the same rules
@@ -318,8 +314,14 @@
sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
audio_devices_t availPrimaryInputDevices =
availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle());
+
+ // TODO: getPrimaryOutput return only devices from first module in
+ // audio_policy_configuration.xml, hearing aid is not there, but it's
+ // a primary device
+ // FIXME: this is not the right way of solving this problem
audio_devices_t availPrimaryOutputDevices =
- primaryOutput->supportedDevices() & availableOutputDevices.types();
+ (primaryOutput->supportedDevices() | AUDIO_DEVICE_OUT_HEARING_AID) &
+ availableOutputDevices.types();
if (((availableInputDevices.types() &
AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) ||
@@ -344,6 +346,8 @@
// FALL THROUGH
default: // FORCE_NONE
+ device = availableOutputDevicesType & AUDIO_DEVICE_OUT_HEARING_AID;
+ if (device) break;
// when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
if (!isInCall() &&
(mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
@@ -406,9 +410,10 @@
// If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
// handleIncallSonification().
- if (isInCall()) {
+ if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
device = getDeviceForStrategyInt(
- STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
+ STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
+ outputDeviceTypesToIgnore);
break;
}
// FALL THROUGH
@@ -426,8 +431,7 @@
// if SCO headset is connected and we are told to use it, play ringtone over
// speaker and BT SCO
- if (((availableOutputDevicesType & AUDIO_DEVICE_OUT_ALL_SCO) != 0) &&
- (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] == AUDIO_POLICY_FORCE_BT_SCO)) {
+ if ((availableOutputDevicesType & AUDIO_DEVICE_OUT_ALL_SCO) != 0) {
uint32_t device2 = AUDIO_DEVICE_NONE;
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
if (device2 == AUDIO_DEVICE_NONE) {
@@ -436,10 +440,29 @@
if (device2 == AUDIO_DEVICE_NONE) {
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
}
-
- if (device2 != AUDIO_DEVICE_NONE) {
- device |= device2;
- break;
+ // Use ONLY Bluetooth SCO output when ringing in vibration mode
+ if (!((mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
+ && (strategy == STRATEGY_ENFORCED_AUDIBLE))) {
+ if (mForceUse[AUDIO_POLICY_FORCE_FOR_VIBRATE_RINGING]
+ == AUDIO_POLICY_FORCE_BT_SCO) {
+ if (device2 != AUDIO_DEVICE_NONE) {
+ device = device2;
+ break;
+ }
+ }
+ }
+ // Use both Bluetooth SCO and phone default output when ringing in normal mode
+ if (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] == AUDIO_POLICY_FORCE_BT_SCO) {
+ if ((strategy == STRATEGY_SONIFICATION) &&
+ (device & AUDIO_DEVICE_OUT_SPEAKER) &&
+ (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
+ device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
+ device &= ~AUDIO_DEVICE_OUT_SPEAKER;
+ }
+ if (device2 != AUDIO_DEVICE_NONE) {
+ device |= device2;
+ break;
+ }
}
}
// The second device used for sonification is the same as the device used by media strategy
@@ -463,11 +486,13 @@
if (outputs.isStreamActive(AUDIO_STREAM_RING) ||
outputs.isStreamActive(AUDIO_STREAM_ALARM)) {
return getDeviceForStrategyInt(
- STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs);
+ STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs,
+ outputDeviceTypesToIgnore);
}
if (isInCall()) {
return getDeviceForStrategyInt(
- STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
+ STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
+ outputDeviceTypesToIgnore);
}
}
// For other cases, STRATEGY_ACCESSIBILITY behaves like STRATEGY_MEDIA
@@ -486,9 +511,13 @@
}
if (isInCall() && (strategy == STRATEGY_MEDIA)) {
device = getDeviceForStrategyInt(
- STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
+ STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
+ outputDeviceTypesToIgnore);
break;
}
+ if (device2 == AUDIO_DEVICE_NONE) {
+ device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HEARING_AID;
+ }
if ((device2 == AUDIO_DEVICE_NONE) &&
(mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
outputs.isA2dpSupported()) {
@@ -591,6 +620,23 @@
uint32_t device = AUDIO_DEVICE_NONE;
+ // when a call is active, force device selection to match source VOICE_COMMUNICATION
+ // for most other input sources to avoid rerouting call TX audio
+ if (isInCall()) {
+ switch (inputSource) {
+ case AUDIO_SOURCE_DEFAULT:
+ case AUDIO_SOURCE_MIC:
+ case AUDIO_SOURCE_VOICE_RECOGNITION:
+ case AUDIO_SOURCE_UNPROCESSED:
+ case AUDIO_SOURCE_HOTWORD:
+ case AUDIO_SOURCE_CAMCORDER:
+ inputSource = AUDIO_SOURCE_VOICE_COMMUNICATION;
+ break;
+ default:
+ break;
+ }
+ }
+
switch (inputSource) {
case AUDIO_SOURCE_VOICE_UPLINK:
if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
index 57538c4..06186c1 100644
--- a/services/audiopolicy/enginedefault/src/Engine.h
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -126,9 +126,10 @@
routing_strategy getStrategyForUsage(audio_usage_t usage);
audio_devices_t getDeviceForStrategy(routing_strategy strategy) const;
audio_devices_t getDeviceForStrategyInt(routing_strategy strategy,
- DeviceVector availableOutputDevices,
- DeviceVector availableInputDevices,
- const SwAudioOutputCollection &outputs) const;
+ DeviceVector availableOutputDevices,
+ DeviceVector availableInputDevices,
+ const SwAudioOutputCollection &outputs,
+ uint32_t outputDeviceTypesToIgnore) const;
audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const;
audio_mode_t mPhoneState; /**< current phone state. */
diff --git a/services/audiopolicy/manager/AudioPolicyFactory.cpp b/services/audiopolicy/manager/AudioPolicyFactory.cpp
index 9910a1f..3efa1b0 100644
--- a/services/audiopolicy/manager/AudioPolicyFactory.cpp
+++ b/services/audiopolicy/manager/AudioPolicyFactory.cpp
@@ -29,4 +29,4 @@
delete interface;
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index b646c8e..0318ffe 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -34,7 +34,6 @@
#include <AudioPolicyManagerInterface.h>
#include <AudioPolicyEngineInstance.h>
-#include <cutils/atomic.h>
#include <cutils/properties.h>
#include <utils/Log.h>
#include <media/AudioParameter.h>
@@ -61,6 +60,26 @@
// media / notification / system volume.
constexpr float IN_CALL_EARPIECE_HEADROOM_DB = 3.f;
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+// Array of all surround formats.
+static const audio_format_t SURROUND_FORMATS[] = {
+ AUDIO_FORMAT_AC3,
+ AUDIO_FORMAT_E_AC3,
+ AUDIO_FORMAT_DTS,
+ AUDIO_FORMAT_DTS_HD,
+ AUDIO_FORMAT_AAC_LC,
+ AUDIO_FORMAT_DOLBY_TRUEHD,
+ AUDIO_FORMAT_E_AC3_JOC,
+};
+// Array of all AAC formats. When AAC is enabled by users, all AAC formats should be enabled.
+static const audio_format_t AAC_FORMATS[] = {
+ AUDIO_FORMAT_AAC_LC,
+ AUDIO_FORMAT_AAC_HE_V1,
+ AUDIO_FORMAT_AAC_HE_V2,
+ AUDIO_FORMAT_AAC_ELD,
+ AUDIO_FORMAT_AAC_XHE,
+};
+
// ----------------------------------------------------------------------------
// AudioPolicyInterface implementation
// ----------------------------------------------------------------------------
@@ -70,7 +89,9 @@
const char *device_address,
const char *device_name)
{
- return setDeviceConnectionStateInt(device, state, device_address, device_name);
+ status_t status = setDeviceConnectionStateInt(device, state, device_address, device_name);
+ nextAudioPortGeneration();
+ return status;
}
void AudioPolicyManager::broadcastDeviceConnectionState(audio_devices_t device,
@@ -90,7 +111,7 @@
const char *device_name)
{
ALOGV("setDeviceConnectionStateInt() device: 0x%X, state %d, address %s name %s",
-- device, state, device_address, device_name);
+ device, state, device_address, device_name);
// connect/disconnect only 1 device at a time
if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
@@ -185,14 +206,14 @@
checkOutputForAllStrategies();
// outputs must be closed after checkOutputForAllStrategies() is executed
if (!outputs.isEmpty()) {
- for (size_t i = 0; i < outputs.size(); i++) {
- sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
+ for (audio_io_handle_t output : outputs) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
// close unused outputs after device disconnection or direct outputs that have been
// opened by checkOutputsForDevice() to query dynamic parameters
if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
(((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
(desc->mDirectOpenCount == 0))) {
- closeOutput(outputs[i]);
+ closeOutput(output);
}
}
// check again after closing A2DP output to reset mA2dpSuspended if needed
@@ -347,6 +368,9 @@
const char *device_name)
{
status_t status;
+ String8 reply;
+ AudioParameter param;
+ int isReconfigA2dpSupported = 0;
ALOGV("handleDeviceConfigChange(() device: 0x%X, address %s name %s",
device, device_address, device_name);
@@ -363,6 +387,26 @@
return NO_ERROR;
}
+ // For offloaded A2DP, Hw modules may have the capability to
+ // configure codecs. Check if any of the loaded hw modules
+ // supports this.
+ // If supported, send a set parameter to configure A2DP codecs
+ // and return. No need to toggle device state.
+ if (device & AUDIO_DEVICE_OUT_ALL_A2DP) {
+ reply = mpClientInterface->getParameters(
+ AUDIO_IO_HANDLE_NONE,
+ String8(AudioParameter::keyReconfigA2dpSupported));
+ AudioParameter repliedParameters(reply);
+ repliedParameters.getInt(
+ String8(AudioParameter::keyReconfigA2dpSupported), isReconfigA2dpSupported);
+ if (isReconfigA2dpSupported) {
+ const String8 key(AudioParameter::keyReconfigA2dp);
+ param.add(key, String8("true"));
+ mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
+ return NO_ERROR;
+ }
+ }
+
// Toggle the device state: UNAVAILABLE -> AVAILABLE
// This will force reading again the device configuration
status = setDeviceConnectionState(device,
@@ -389,9 +433,6 @@
uint32_t AudioPolicyManager::updateCallRouting(audio_devices_t rxDevice, uint32_t delayMs)
{
bool createTxPatch = false;
- status_t status;
- audio_patch_handle_t afPatchHandle;
- DeviceVector deviceList;
uint32_t muteWaitMs = 0;
if(!hasPrimaryOutput() || mPrimaryOutput->device() == AUDIO_DEVICE_OUT_STUB) {
@@ -423,87 +464,53 @@
createTxPatch = true;
}
} else { // create RX path audio patch
- struct audio_patch patch;
-
- patch.num_sources = 1;
- patch.num_sinks = 1;
- deviceList = mAvailableOutputDevices.getDevicesFromType(rxDevice);
- ALOG_ASSERT(!deviceList.isEmpty(),
- "updateCallRouting() selected device not in output device list");
- sp<DeviceDescriptor> rxSinkDeviceDesc = deviceList.itemAt(0);
- deviceList = mAvailableInputDevices.getDevicesFromType(AUDIO_DEVICE_IN_TELEPHONY_RX);
- ALOG_ASSERT(!deviceList.isEmpty(),
- "updateCallRouting() no telephony RX device");
- sp<DeviceDescriptor> rxSourceDeviceDesc = deviceList.itemAt(0);
-
- rxSourceDeviceDesc->toAudioPortConfig(&patch.sources[0]);
- rxSinkDeviceDesc->toAudioPortConfig(&patch.sinks[0]);
-
- // request to reuse existing output stream if one is already opened to reach the RX device
- SortedVector<audio_io_handle_t> outputs =
- getOutputsForDevice(rxDevice, mOutputs);
- audio_io_handle_t output = selectOutput(outputs,
- AUDIO_OUTPUT_FLAG_NONE,
- AUDIO_FORMAT_INVALID);
- if (output != AUDIO_IO_HANDLE_NONE) {
- sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
- ALOG_ASSERT(!outputDesc->isDuplicated(),
- "updateCallRouting() RX device output is duplicated");
- outputDesc->toAudioPortConfig(&patch.sources[1]);
- patch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH;
- patch.num_sources = 2;
- }
-
- afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, delayMs);
- ALOGW_IF(status != NO_ERROR, "updateCallRouting() error %d creating RX audio patch",
- status);
- if (status == NO_ERROR) {
- mCallRxPatch = new AudioPatch(&patch, mUidCached);
- mCallRxPatch->mAfPatchHandle = afPatchHandle;
- mCallRxPatch->mUid = mUidCached;
- }
+ mCallRxPatch = createTelephonyPatch(true /*isRx*/, rxDevice, delayMs);
createTxPatch = true;
}
if (createTxPatch) { // create TX path audio patch
- struct audio_patch patch;
+ mCallTxPatch = createTelephonyPatch(false /*isRx*/, txDevice, delayMs);
+ }
- patch.num_sources = 1;
- patch.num_sinks = 1;
- deviceList = mAvailableInputDevices.getDevicesFromType(txDevice);
- ALOG_ASSERT(!deviceList.isEmpty(),
- "updateCallRouting() selected device not in input device list");
- sp<DeviceDescriptor> txSourceDeviceDesc = deviceList.itemAt(0);
- txSourceDeviceDesc->toAudioPortConfig(&patch.sources[0]);
- deviceList = mAvailableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_TELEPHONY_TX);
- ALOG_ASSERT(!deviceList.isEmpty(),
- "updateCallRouting() no telephony TX device");
- sp<DeviceDescriptor> txSinkDeviceDesc = deviceList.itemAt(0);
- txSinkDeviceDesc->toAudioPortConfig(&patch.sinks[0]);
+ return muteWaitMs;
+}
- SortedVector<audio_io_handle_t> outputs =
- getOutputsForDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX, mOutputs);
- audio_io_handle_t output = selectOutput(outputs,
- AUDIO_OUTPUT_FLAG_NONE,
- AUDIO_FORMAT_INVALID);
- // request to reuse existing output stream if one is already opened to reach the TX
- // path output device
- if (output != AUDIO_IO_HANDLE_NONE) {
- sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
- ALOG_ASSERT(!outputDesc->isDuplicated(),
- "updateCallRouting() RX device output is duplicated");
- outputDesc->toAudioPortConfig(&patch.sources[1]);
- patch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH;
- patch.num_sources = 2;
- }
+sp<AudioPatch> AudioPolicyManager::createTelephonyPatch(
+ bool isRx, audio_devices_t device, uint32_t delayMs) {
+ struct audio_patch patch;
+ patch.num_sources = 1;
+ patch.num_sinks = 1;
+ sp<DeviceDescriptor> txSourceDeviceDesc;
+ if (isRx) {
+ fillAudioPortConfigForDevice(mAvailableOutputDevices, device, &patch.sinks[0]);
+ fillAudioPortConfigForDevice(
+ mAvailableInputDevices, AUDIO_DEVICE_IN_TELEPHONY_RX, &patch.sources[0]);
+ } else {
+ txSourceDeviceDesc = fillAudioPortConfigForDevice(
+ mAvailableInputDevices, device, &patch.sources[0]);
+ fillAudioPortConfigForDevice(
+ mAvailableOutputDevices, AUDIO_DEVICE_OUT_TELEPHONY_TX, &patch.sinks[0]);
+ }
+
+ audio_devices_t outputDevice = isRx ? device : AUDIO_DEVICE_OUT_TELEPHONY_TX;
+ SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(outputDevice, mOutputs);
+ audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE, AUDIO_FORMAT_INVALID);
+ // request to reuse existing output stream if one is already opened to reach the target device
+ if (output != AUDIO_IO_HANDLE_NONE) {
+ sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
+ ALOG_ASSERT(!outputDesc->isDuplicated(),
+ "%s() %#x device output %d is duplicated", __func__, outputDevice, output);
+ outputDesc->toAudioPortConfig(&patch.sources[1]);
+ patch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH;
+ patch.num_sources = 2;
+ }
+
+ if (!isRx) {
// terminate active capture if on the same HW module as the call TX source device
// FIXME: would be better to refine to only inputs whose profile connects to the
// call TX device but this information is not in the audio patch and logic here must be
// symmetric to the one in startInput()
- Vector<sp <AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
- for (size_t i = 0; i < activeInputs.size(); i++) {
- sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+ for (const auto& activeDesc : mInputs.getActiveInputs()) {
if (activeDesc->hasSameHwModuleAs(txSourceDeviceDesc)) {
AudioSessionCollection activeSessions =
activeDesc->getAudioSessions(true /*activeOnly*/);
@@ -514,19 +521,29 @@
}
}
}
-
- afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, delayMs);
- ALOGW_IF(status != NO_ERROR, "setPhoneState() error %d creating TX audio patch",
- status);
- if (status == NO_ERROR) {
- mCallTxPatch = new AudioPatch(&patch, mUidCached);
- mCallTxPatch->mAfPatchHandle = afPatchHandle;
- mCallTxPatch->mUid = mUidCached;
- }
}
- return muteWaitMs;
+ audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+ status_t status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, delayMs);
+ ALOGW_IF(status != NO_ERROR,
+ "%s() error %d creating %s audio patch", __func__, status, isRx ? "RX" : "TX");
+ sp<AudioPatch> audioPatch;
+ if (status == NO_ERROR) {
+ audioPatch = new AudioPatch(&patch, mUidCached);
+ audioPatch->mAfPatchHandle = afPatchHandle;
+ audioPatch->mUid = mUidCached;
+ }
+ return audioPatch;
+}
+
+sp<DeviceDescriptor> AudioPolicyManager::fillAudioPortConfigForDevice(
+ const DeviceVector& devices, audio_devices_t device, audio_port_config *config) {
+ DeviceVector deviceList = devices.getDevicesFromType(device);
+ ALOG_ASSERT(!deviceList.isEmpty(),
+ "%s() selected device type %#x is not in devices list", __func__, device);
+ sp<DeviceDescriptor> deviceDesc = deviceList.itemAt(0);
+ deviceDesc->toAudioPortConfig(config);
+ return deviceDesc;
}
void AudioPolicyManager::setPhoneState(audio_mode_t state)
@@ -616,6 +633,16 @@
setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
}
}
+
+ // reevaluate routing on all outputs in case tracks have been started during the call
+ for (size_t i = 0; i < mOutputs.size(); i++) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
+ audio_devices_t newDevice = getNewOutputDevice(desc, true /*fromCache*/);
+ if (state != AUDIO_MODE_IN_CALL || desc != mPrimaryOutput) {
+ setOutputDevice(desc, newDevice, (newDevice != AUDIO_DEVICE_NONE), 0 /*delayMs*/);
+ }
+ }
+
// if entering in call state, handle special case of active streams
// pertaining to sonification strategy see handleIncallSonification()
if (isStateInCall(state)) {
@@ -685,9 +712,7 @@
}
}
- Vector<sp <AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
- for (size_t i = 0; i < activeInputs.size(); i++) {
- sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+ for (const auto& activeDesc : mInputs.getActiveInputs()) {
audio_devices_t newDevice = getNewInputDevice(activeDesc);
// Force new input selection if the new device can not be reached via current input
if (activeDesc->mProfile->getSupportedDevices().types() &
@@ -723,12 +748,8 @@
sp<IOProfile> profile;
- for (size_t i = 0; i < mHwModules.size(); i++) {
- if (mHwModules[i]->mHandle == 0) {
- continue;
- }
- for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) {
- sp<IOProfile> curProfile = mHwModules[i]->mOutputProfiles[j];
+ for (const auto& hwModule : mHwModules) {
+ for (const auto& curProfile : hwModule->getOutputProfiles()) {
if (!curProfile->isCompatibleProfile(device, String8(""),
samplingRate, NULL /*updatedSamplingRate*/,
format, NULL /*updatedFormat*/,
@@ -753,20 +774,22 @@
return profile;
}
-audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo)
+audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream)
{
routing_strategy strategy = getStrategy(stream);
audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
- ALOGV("getOutput() device %d, stream %d, samplingRate %d, format %x, channelMask %x, flags %x",
- device, stream, samplingRate, format, channelMask, flags);
- return getOutputForDevice(device, AUDIO_SESSION_ALLOCATE, stream, samplingRate, format,
- channelMask, flags, offloadInfo);
+ // Note that related method getOutputForAttr() uses getOutputForDevice() not selectOutput().
+ // We use selectOutput() here since we don't have the desired AudioTrack sample rate,
+ // format, flags, etc. This may result in some discrepancy for functions that utilize
+ // getOutput() solely on audio_stream_type such as AudioSystem::getOutputFrameCount()
+ // and AudioSystem::getOutputSamplingRate().
+
+ SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
+ audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE, AUDIO_FORMAT_INVALID);
+
+ ALOGV("getOutput() stream %d selected device %08x, output %d", stream, device, output);
+ return output;
}
status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr,
@@ -775,7 +798,7 @@
audio_stream_type_t *stream,
uid_t uid,
const audio_config_t *config,
- audio_output_flags_t flags,
+ audio_output_flags_t *flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId)
{
@@ -827,12 +850,7 @@
// Explicit routing?
sp<DeviceDescriptor> deviceDesc;
if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
- for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
- if (mAvailableOutputDevices[i]->getId() == *selectedDeviceId) {
- deviceDesc = mAvailableOutputDevices[i];
- break;
- }
- }
+ deviceDesc = mAvailableOutputDevices.getDeviceFromId(*selectedDeviceId);
}
mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
@@ -840,15 +858,14 @@
audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
if ((attributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
- flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
+ *flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
}
- ALOGV("getOutputForAttr() device 0x%x, samplingRate %d, format %x, channelMask %x, flags %x",
- device, config->sample_rate, config->format, config->channel_mask, flags);
+ ALOGV("getOutputForAttr() device 0x%x, sampling rate %d, format %#x, channel mask %#x, "
+ "flags %#x",
+ device, config->sample_rate, config->format, config->channel_mask, *flags);
- *output = getOutputForDevice(device, session, *stream,
- config->sample_rate, config->format, config->channel_mask,
- flags, &config->offload_info);
+ *output = getOutputForDevice(device, session, *stream, config, flags);
if (*output == AUDIO_IO_HANDLE_NONE) {
mOutputRoutes.removeRoute(session);
return INVALID_OPERATION;
@@ -867,92 +884,53 @@
audio_devices_t device,
audio_session_t session,
audio_stream_type_t stream,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo)
+ const audio_config_t *config,
+ audio_output_flags_t *flags)
{
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
status_t status;
-#ifdef AUDIO_POLICY_TEST
- if (mCurOutput != 0) {
- ALOGV("getOutput() test output mCurOutput %d, samplingRate %d, format %d, channelMask %x, mDirectOutput %d",
- mCurOutput, mTestSamplingRate, mTestFormat, mTestChannels, mDirectOutput);
-
- if (mTestOutputs[mCurOutput] == 0) {
- ALOGV("getOutput() opening test output");
- sp<AudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(NULL,
- mpClientInterface);
- outputDesc->mDevice = mTestDevice;
- outputDesc->mLatency = mTestLatencyMs;
- outputDesc->mFlags =
- (audio_output_flags_t)(mDirectOutput ? AUDIO_OUTPUT_FLAG_DIRECT : 0);
- outputDesc->mRefCount[stream] = 0;
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = mTestSamplingRate;
- config.channel_mask = mTestChannels;
- config.format = mTestFormat;
- if (offloadInfo != NULL) {
- config.offload_info = *offloadInfo;
- }
- status = mpClientInterface->openOutput(0,
- &mTestOutputs[mCurOutput],
- &config,
- &outputDesc->mDevice,
- String8(""),
- &outputDesc->mLatency,
- outputDesc->mFlags);
- if (status == NO_ERROR) {
- outputDesc->mSamplingRate = config.sample_rate;
- outputDesc->mFormat = config.format;
- outputDesc->mChannelMask = config.channel_mask;
- AudioParameter outputCmd = AudioParameter();
- outputCmd.addInt(String8("set_id"),mCurOutput);
- mpClientInterface->setParameters(mTestOutputs[mCurOutput],outputCmd.toString());
- addOutput(mTestOutputs[mCurOutput], outputDesc);
- }
- }
- return mTestOutputs[mCurOutput];
- }
-#endif //AUDIO_POLICY_TEST
-
// open a direct output if required by specified parameters
//force direct flag if offload flag is set: offloading implies a direct output stream
// and all common behaviors are driven by checking only the direct flag
// this should normally be set appropriately in the policy configuration file
- if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
- flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
+ if ((*flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+ *flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_DIRECT);
}
- if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
- flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
+ if ((*flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
+ *flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_DIRECT);
}
// only allow deep buffering for music stream type
if (stream != AUDIO_STREAM_MUSIC) {
- flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
+ *flags = (audio_output_flags_t)(*flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
} else if (/* stream == AUDIO_STREAM_MUSIC && */
- flags == AUDIO_OUTPUT_FLAG_NONE &&
+ *flags == AUDIO_OUTPUT_FLAG_NONE &&
property_get_bool("audio.deep_buffer.media", false /* default_value */)) {
// use DEEP_BUFFER as default output for music stream type
- flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+ *flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
}
if (stream == AUDIO_STREAM_TTS) {
- flags = AUDIO_OUTPUT_FLAG_TTS;
+ *flags = AUDIO_OUTPUT_FLAG_TTS;
} else if (stream == AUDIO_STREAM_VOICE_CALL &&
- audio_is_linear_pcm(format)) {
- flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_VOIP_RX |
+ audio_is_linear_pcm(config->format)) {
+ *flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_VOIP_RX |
AUDIO_OUTPUT_FLAG_DIRECT);
ALOGV("Set VoIP and Direct output flags for PCM format");
+ } else if (device == AUDIO_DEVICE_OUT_TELEPHONY_TX &&
+ stream == AUDIO_STREAM_MUSIC &&
+ audio_is_linear_pcm(config->format) &&
+ isInCall()) {
+ *flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
}
+
sp<IOProfile> profile;
// skip direct output selection if the request can obviously be attached to a mixed output
// and not explicitly requested
- if (((flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
- audio_is_linear_pcm(format) && samplingRate <= SAMPLE_RATE_HZ_MAX &&
- audio_channel_count_from_out_mask(channelMask) <= 2) {
+ if (((*flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
+ audio_is_linear_pcm(config->format) && config->sample_rate <= SAMPLE_RATE_HZ_MAX &&
+ audio_channel_count_from_out_mask(config->channel_mask) <= 2) {
goto non_direct_output;
}
@@ -963,105 +941,66 @@
// This may prevent offloading in rare situations where effects are left active by apps
// in the background.
- if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
+ if (((*flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
!(mEffects.isNonOffloadableEffectEnabled() || mMasterMono)) {
profile = getProfileForDirectOutput(device,
- samplingRate,
- format,
- channelMask,
- (audio_output_flags_t)flags);
+ config->sample_rate,
+ config->format,
+ config->channel_mask,
+ (audio_output_flags_t)*flags);
}
if (profile != 0) {
- sp<SwAudioOutputDescriptor> outputDesc = NULL;
-
+ // exclusive outputs for MMAP and Offload are enforced by different session ids.
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (!desc->isDuplicated() && (profile == desc->mProfile)) {
- outputDesc = desc;
// reuse direct output if currently open by the same client
// and configured with same parameters
- if ((samplingRate == outputDesc->mSamplingRate) &&
- audio_formats_match(format, outputDesc->mFormat) &&
- (channelMask == outputDesc->mChannelMask)) {
- if (session == outputDesc->mDirectClientSession) {
- outputDesc->mDirectOpenCount++;
- ALOGV("getOutput() reusing direct output %d for session %d",
- mOutputs.keyAt(i), session);
- return mOutputs.keyAt(i);
- } else {
- ALOGV("getOutput() do not reuse direct output because current client (%d) "
- "is not the same as requesting client (%d)",
- outputDesc->mDirectClientSession, session);
- goto non_direct_output;
- }
+ if ((config->sample_rate == desc->mSamplingRate) &&
+ (config->format == desc->mFormat) &&
+ (config->channel_mask == desc->mChannelMask) &&
+ (session == desc->mDirectClientSession)) {
+ desc->mDirectOpenCount++;
+ ALOGI("getOutputForDevice() reusing direct output %d for session %d",
+ mOutputs.keyAt(i), session);
+ return mOutputs.keyAt(i);
}
}
}
- // close direct output if currently open and configured with different parameters
- if (outputDesc != NULL) {
- closeOutput(outputDesc->mIoHandle);
+
+ if (!profile->canOpenNewIo()) {
+ goto non_direct_output;
}
- // if the selected profile is offloaded and no offload info was specified,
- // create a default one
- audio_offload_info_t defaultOffloadInfo = AUDIO_INFO_INITIALIZER;
- if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) && !offloadInfo) {
- flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
- defaultOffloadInfo.sample_rate = samplingRate;
- defaultOffloadInfo.channel_mask = channelMask;
- defaultOffloadInfo.format = format;
- defaultOffloadInfo.stream_type = stream;
- defaultOffloadInfo.bit_rate = 0;
- defaultOffloadInfo.duration_us = -1;
- defaultOffloadInfo.has_video = true; // conservative
- defaultOffloadInfo.is_streaming = true; // likely
- offloadInfo = &defaultOffloadInfo;
- }
+ sp<SwAudioOutputDescriptor> outputDesc =
+ new SwAudioOutputDescriptor(profile, mpClientInterface);
- outputDesc = new SwAudioOutputDescriptor(profile, mpClientInterface);
- outputDesc->mDevice = device;
- outputDesc->mLatency = 0;
- outputDesc->mFlags = (audio_output_flags_t)(outputDesc->mFlags | flags);
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = samplingRate;
- config.channel_mask = channelMask;
- config.format = format;
- if (offloadInfo != NULL) {
- config.offload_info = *offloadInfo;
- }
DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromType(device);
String8 address = outputDevices.size() > 0 ? outputDevices.itemAt(0)->mAddress
: String8("");
- status = mpClientInterface->openOutput(profile->getModuleHandle(),
- &output,
- &config,
- &outputDesc->mDevice,
- address,
- &outputDesc->mLatency,
- outputDesc->mFlags);
+
+ status = outputDesc->open(config, device, address, stream, *flags, &output);
// only accept an output with the requested parameters
if (status != NO_ERROR ||
- (samplingRate != 0 && samplingRate != config.sample_rate) ||
- (format != AUDIO_FORMAT_DEFAULT && !audio_formats_match(format, config.format)) ||
- (channelMask != 0 && channelMask != config.channel_mask)) {
- ALOGV("getOutput() failed opening direct output: output %d samplingRate %d %d,"
- "format %d %d, channelMask %04x %04x", output, samplingRate,
- outputDesc->mSamplingRate, format, outputDesc->mFormat, channelMask,
- outputDesc->mChannelMask);
+ (config->sample_rate != 0 && config->sample_rate != outputDesc->mSamplingRate) ||
+ (config->format != AUDIO_FORMAT_DEFAULT && config->format != outputDesc->mFormat) ||
+ (config->channel_mask != 0 && config->channel_mask != outputDesc->mChannelMask)) {
+ ALOGV("getOutputForDevice() failed opening direct output: output %d sample rate %d %d,"
+ "format %d %d, channel mask %04x %04x", output, config->sample_rate,
+ outputDesc->mSamplingRate, config->format, outputDesc->mFormat,
+ config->channel_mask, outputDesc->mChannelMask);
if (output != AUDIO_IO_HANDLE_NONE) {
- mpClientInterface->closeOutput(output);
+ outputDesc->close();
}
// fall back to mixer output if possible when the direct output could not be open
- if (audio_is_linear_pcm(format) && samplingRate <= SAMPLE_RATE_HZ_MAX) {
+ if (audio_is_linear_pcm(config->format) &&
+ config->sample_rate <= SAMPLE_RATE_HZ_MAX) {
goto non_direct_output;
}
return AUDIO_IO_HANDLE_NONE;
}
- outputDesc->mSamplingRate = config.sample_rate;
- outputDesc->mChannelMask = config.channel_mask;
- outputDesc->mFormat = config.format;
outputDesc->mRefCount[stream] = 0;
outputDesc->mStopTime[stream] = 0;
outputDesc->mDirectOpenCount = 1;
@@ -1069,7 +1008,7 @@
addOutput(output, outputDesc);
mPreviousOutputs = mOutputs;
- ALOGV("getOutput() returns new direct output %d", output);
+ ALOGV("getOutputForDevice() returns new direct output %d", output);
mpClientInterface->onAudioPortListUpdate();
return output;
}
@@ -1078,7 +1017,7 @@
// A request for HW A/V sync cannot fallback to a mixed output because time
// stamps are embedded in audio data
- if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
+ if ((*flags & (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ)) != 0) {
return AUDIO_IO_HANDLE_NONE;
}
@@ -1087,17 +1026,18 @@
// open a non direct output
// for non direct outputs, only PCM is supported
- if (audio_is_linear_pcm(format)) {
+ if (audio_is_linear_pcm(config->format)) {
// get which output is suitable for the specified stream. The actual
// routing change will happen when startOutput() will be called
SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
// at this stage we should ignore the DIRECT flag as no direct output could be found earlier
- flags = (audio_output_flags_t)(flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
- output = selectOutput(outputs, flags, format);
+ *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
+ output = selectOutput(outputs, *flags, config->format);
}
- ALOGW_IF((output == 0), "getOutput() could not find output for stream %d, samplingRate %d,"
- "format %d, channels %x, flags %x", stream, samplingRate, format, channelMask, flags);
+ ALOGW_IF((output == 0), "getOutputForDevice() could not find output for stream %d, "
+ "sampling rate %d, format %#x, channels %#x, flags %#x",
+ stream, config->sample_rate, config->format, config->channel_mask, *flags);
return output;
}
@@ -1115,26 +1055,26 @@
// 4: the first output in the list
if (outputs.size() == 0) {
- return 0;
+ return AUDIO_IO_HANDLE_NONE;
}
if (outputs.size() == 1) {
return outputs[0];
}
int maxCommonFlags = 0;
- audio_io_handle_t outputForFlags = 0;
- audio_io_handle_t outputForPrimary = 0;
- audio_io_handle_t outputForFormat = 0;
+ audio_io_handle_t outputForFlags = AUDIO_IO_HANDLE_NONE;
+ audio_io_handle_t outputForPrimary = AUDIO_IO_HANDLE_NONE;
+ audio_io_handle_t outputForFormat = AUDIO_IO_HANDLE_NONE;
audio_format_t bestFormat = AUDIO_FORMAT_INVALID;
audio_format_t bestFormatForFlags = AUDIO_FORMAT_INVALID;
- for (size_t i = 0; i < outputs.size(); i++) {
- sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
+ for (audio_io_handle_t output : outputs) {
+ sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
if (!outputDesc->isDuplicated()) {
// if a valid format is specified, skip output if not compatible
if (format != AUDIO_FORMAT_INVALID) {
if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
- if (!audio_formats_match(format, outputDesc->mFormat)) {
+ if (format != outputDesc->mFormat) {
continue;
}
} else if (!audio_is_linear_pcm(format)) {
@@ -1142,7 +1082,7 @@
}
if (AudioPort::isBetterFormatMatch(
outputDesc->mFormat, bestFormat, format)) {
- outputForFormat = outputs[i];
+ outputForFormat = output;
bestFormat = outputDesc->mFormat;
}
}
@@ -1150,31 +1090,32 @@
int commonFlags = popcount(outputDesc->mProfile->getFlags() & flags);
if (commonFlags >= maxCommonFlags) {
if (commonFlags == maxCommonFlags) {
- if (AudioPort::isBetterFormatMatch(
- outputDesc->mFormat, bestFormatForFlags, format)) {
- outputForFlags = outputs[i];
+ if (format != AUDIO_FORMAT_INVALID
+ && AudioPort::isBetterFormatMatch(
+ outputDesc->mFormat, bestFormatForFlags, format)) {
+ outputForFlags = output;
bestFormatForFlags = outputDesc->mFormat;
}
} else {
- outputForFlags = outputs[i];
+ outputForFlags = output;
maxCommonFlags = commonFlags;
bestFormatForFlags = outputDesc->mFormat;
}
- ALOGV("selectOutput() commonFlags for output %d, %04x", outputs[i], commonFlags);
+ ALOGV("selectOutput() commonFlags for output %d, %04x", output, commonFlags);
}
if (outputDesc->mProfile->getFlags() & AUDIO_OUTPUT_FLAG_PRIMARY) {
- outputForPrimary = outputs[i];
+ outputForPrimary = output;
}
}
}
- if (outputForFlags != 0) {
+ if (outputForFlags != AUDIO_IO_HANDLE_NONE) {
return outputForFlags;
}
- if (outputForFormat != 0) {
+ if (outputForFormat != AUDIO_IO_HANDLE_NONE) {
return outputForFormat;
}
- if (outputForPrimary != 0) {
+ if (outputForPrimary != AUDIO_IO_HANDLE_NONE) {
return outputForPrimary;
}
@@ -1195,6 +1136,11 @@
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
+ status_t status = outputDesc->start();
+ if (status != NO_ERROR) {
+ return status;
+ }
+
// Routing?
mOutputRoutes.incRouteActivity(session);
@@ -1209,19 +1155,22 @@
} else {
newDevice = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
}
- } else if (mOutputRoutes.hasRouteChanged(session)) {
+ } else if (mOutputRoutes.getAndClearRouteChanged(session)) {
newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
- checkStrategyRoute(getStrategy(stream), output);
+ if (newDevice != outputDesc->device()) {
+ checkStrategyRoute(getStrategy(stream), output);
+ }
} else {
newDevice = AUDIO_DEVICE_NONE;
}
uint32_t delayMs = 0;
- status_t status = startSource(outputDesc, stream, newDevice, address, &delayMs);
+ status = startSource(outputDesc, stream, newDevice, address, &delayMs);
if (status != NO_ERROR) {
mOutputRoutes.decRouteActivity(session);
+ outputDesc->stop();
return status;
}
// Automatically enable the remote submix input when output is started on a re routing mix
@@ -1268,6 +1217,12 @@
bool force = !outputDesc->isActive() &&
(outputDesc->getPatchHandle() == AUDIO_PATCH_HANDLE_NONE);
+ // requiresMuteCheck is false when we can bypass mute strategy.
+ // It covers a common case when there is no materially active audio
+ // and muting would result in unnecessary delay and dropped audio.
+ const uint32_t outputLatencyMs = outputDesc->latency();
+ bool requiresMuteCheck = outputDesc->isActive(outputLatencyMs * 2); // account for drain
+
// increment usage count for this stream on the requested output:
// NOTE that the usage count is the same for duplicated output and hardware output which is
// necessary for a correct control of hardware output routing by startOutput() and stopOutput()
@@ -1291,29 +1246,44 @@
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (desc != outputDesc) {
+ // An output has a shared device if
+ // - managed by the same hw module
+ // - supports the currently selected device
+ const bool sharedDevice = outputDesc->sharesHwModuleWith(desc)
+ && (desc->supportedDevices() & device) != AUDIO_DEVICE_NONE;
+
// force a device change if any other output is:
// - managed by the same hw module
- // - has a current device selection that differs from selected device.
// - supports currently selected device
+ // - has a current device selection that differs from selected device.
// - has an active audio patch
// In this case, the audio HAL must receive the new device selection so that it can
- // change the device currently selected by the other active output.
- if (outputDesc->sharesHwModuleWith(desc) &&
+ // change the device currently selected by the other output.
+ if (sharedDevice &&
desc->device() != device &&
- desc->supportedDevices() & device &&
desc->getPatchHandle() != AUDIO_PATCH_HANDLE_NONE) {
force = true;
}
// wait for audio on other active outputs to be presented when starting
// a notification so that audio focus effect can propagate, or that a mute/unmute
// event occurred for beacon
- uint32_t latency = desc->latency();
- if (shouldWait && desc->isActive(latency * 2) && (waitMs < latency)) {
- waitMs = latency;
+ const uint32_t latencyMs = desc->latency();
+ const bool isActive = desc->isActive(latencyMs * 2); // account for drain
+
+ if (shouldWait && isActive && (waitMs < latencyMs)) {
+ waitMs = latencyMs;
}
+
+ // Require mute check if another output is on a shared device
+ // and currently active to have proper drain and avoid pops.
+ // Note restoring AudioTracks onto this output needs to invoke
+ // a volume ramp if there is no mute.
+ requiresMuteCheck |= sharedDevice && isActive;
}
}
- uint32_t muteWaitMs = setOutputDevice(outputDesc, device, force, 0, NULL, address);
+
+ const uint32_t muteWaitMs =
+ setOutputDevice(outputDesc, device, force, 0, NULL, address, requiresMuteCheck);
// handle special case for sonification while in call
if (isInCall()) {
@@ -1338,6 +1308,19 @@
if (waitMs > muteWaitMs) {
*delayMs = waitMs - muteWaitMs;
}
+
+ // FIXME: A device change (muteWaitMs > 0) likely introduces a volume change.
+ // A volume change enacted by APM with 0 delay is not synchronous, as it goes
+ // via AudioCommandThread to AudioFlinger. Hence it is possible that the volume
+ // change occurs after the MixerThread starts and causes a stream volume
+ // glitch.
+ //
+ // We do not introduce additional delay here.
+ }
+
+ if (stream == AUDIO_STREAM_ENFORCED_AUDIBLE &&
+ mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
+ setStrategyMute(STRATEGY_SONIFICATION, true, outputDesc);
}
if (stream == AUDIO_STREAM_ENFORCED_AUDIBLE &&
@@ -1386,7 +1369,12 @@
}
}
- return stopSource(outputDesc, stream, forceDeviceUpdate);
+ status_t status = stopSource(outputDesc, stream, forceDeviceUpdate);
+
+ if (status == NO_ERROR ) {
+ outputDesc->stop();
+ }
+ return status;
}
status_t AudioPolicyManager::stopSource(const sp<AudioOutputDescriptor>& outputDesc,
@@ -1427,6 +1415,7 @@
(newDevice != desc->device())) {
audio_devices_t newDevice2 = getNewOutputDevice(desc, false /*fromCache*/);
bool force = desc->device() != newDevice2;
+
setOutputDevice(desc,
newDevice2,
force,
@@ -1467,19 +1456,6 @@
return;
}
-#ifdef AUDIO_POLICY_TEST
- int testIndex = testOutputIndex(output);
- if (testIndex != 0) {
- sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
- if (outputDesc->isActive()) {
- mpClientInterface->closeOutput(output);
- removeOutput(output);
- mTestOutputs[testIndex] = 0;
- }
- return;
- }
-#endif //AUDIO_POLICY_TEST
-
// Routing
mOutputRoutes.removeRoute(session);
@@ -1508,7 +1484,7 @@
input_type_t *inputType,
audio_port_handle_t *portId)
{
- ALOGV("getInputForAttr() source %d, samplingRate %d, format %d, channelMask %x,"
+ ALOGV("getInputForAttr() source %d, sampling rate %d, format %#x, channel mask %#x,"
"session %d, flags %#x",
attr->source, config->sample_rate, config->format, config->channel_mask, session, flags);
@@ -1520,15 +1496,14 @@
AudioMix *policyMix = NULL;
DeviceVector inputDevices;
+ if (inputSource == AUDIO_SOURCE_DEFAULT) {
+ inputSource = AUDIO_SOURCE_MIC;
+ }
+
// Explicit routing?
sp<DeviceDescriptor> deviceDesc;
if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
- for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
- if (mAvailableInputDevices[i]->getId() == *selectedDeviceId) {
- deviceDesc = mAvailableInputDevices[i];
- break;
- }
- }
+ deviceDesc = mAvailableInputDevices.getDeviceFromId(*selectedDeviceId);
}
mInputRoutes.addRoute(session, SessionRoute::STREAM_TYPE_NA, inputSource, deviceDesc, uid);
@@ -1551,14 +1526,19 @@
}
// For MMAP mode, the first call to getInputForAttr() is made on behalf of audioflinger.
// The second call is for the first active client and sets the UID. Any further call
- // corresponds to a new client and is only permitted from the same UId.
+ // corresponds to a new client and is only permitted from the same UID.
+ // If the first UID is silenced, allow a new UID connection and replace with new UID
if (audioSession->openCount() == 1) {
audioSession->setUid(uid);
} else if (audioSession->uid() != uid) {
- ALOGW("getInputForAttr() bad uid %d for session %d uid %d",
- uid, session, audioSession->uid());
- status = INVALID_OPERATION;
- goto error;
+ if (!audioSession->isSilenced()) {
+ ALOGW("getInputForAttr() bad uid %d for session %d uid %d",
+ uid, session, audioSession->uid());
+ status = INVALID_OPERATION;
+ goto error;
+ }
+ audioSession->setUid(uid);
+ audioSession->setSilenced(false);
}
audioSession->changeOpenCount(1);
*inputType = API_INPUT_LEGACY;
@@ -1576,9 +1556,6 @@
*input = AUDIO_IO_HANDLE_NONE;
*inputType = API_INPUT_INVALID;
- if (inputSource == AUDIO_SOURCE_DEFAULT) {
- inputSource = AUDIO_SOURCE_MIC;
- }
halInputSource = inputSource;
// TODO: check for existing client for this port ID
@@ -1628,7 +1605,7 @@
}
*input = getInputForDevice(device, address, session, uid, inputSource,
- config->sample_rate, config->format, config->channel_mask, flags,
+ config, flags,
policyMix);
if (*input == AUDIO_IO_HANDLE_NONE) {
status = INVALID_OPERATION;
@@ -1655,9 +1632,7 @@
audio_session_t session,
uid_t uid,
audio_source_t inputSource,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_base_t *config,
audio_input_flags_t flags,
AudioMix *policyMix)
{
@@ -1676,18 +1651,20 @@
halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
}
} else if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION &&
- audio_is_linear_pcm(format)) {
+ audio_is_linear_pcm(config->format)) {
flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_VOIP_TX);
}
// find a compatible input profile (not necessarily identical in parameters)
sp<IOProfile> profile;
- // samplingRate and flags may be updated by getInputProfile
- uint32_t profileSamplingRate = (samplingRate == 0) ? SAMPLE_RATE_HZ_DEFAULT : samplingRate;
- audio_format_t profileFormat = format;
- audio_channel_mask_t profileChannelMask = channelMask;
+ // sampling rate and flags may be updated by getInputProfile
+ uint32_t profileSamplingRate = (config->sample_rate == 0) ?
+ SAMPLE_RATE_HZ_DEFAULT : config->sample_rate;
+ audio_format_t profileFormat;
+ audio_channel_mask_t profileChannelMask = config->channel_mask;
audio_input_flags_t profileFlags = flags;
for (;;) {
+ profileFormat = config->format; // reset each time through loop, in case it is updated
profile = getInputProfile(device, address,
profileSamplingRate, profileFormat, profileChannelMask,
profileFlags);
@@ -1698,13 +1675,14 @@
} else if (profileFlags != AUDIO_INPUT_FLAG_NONE) {
profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
} else { // fail
- ALOGW("getInputForDevice() could not find profile for device 0x%X,"
- "samplingRate %u, format %#x, channelMask 0x%X, flags %#x",
- device, samplingRate, format, channelMask, flags);
+ ALOGW("getInputForDevice() could not find profile for device 0x%X, "
+ "sampling rate %u, format %#x, channel mask 0x%X, flags %#x",
+ device, config->sample_rate, config->format, config->channel_mask, flags);
return input;
}
}
// Pick input sampling rate if not specified by client
+ uint32_t samplingRate = config->sample_rate;
if (samplingRate == 0) {
samplingRate = profileSamplingRate;
}
@@ -1715,14 +1693,14 @@
}
sp<AudioSession> audioSession = new AudioSession(session,
- inputSource,
- format,
- samplingRate,
- channelMask,
- flags,
- uid,
- isSoundTrigger,
- policyMix, mpClientInterface);
+ inputSource,
+ config->format,
+ samplingRate,
+ config->channel_mask,
+ flags,
+ uid,
+ isSoundTrigger,
+ policyMix, mpClientInterface);
// FIXME: disable concurrent capture until UI is ready
#if 0
@@ -1766,8 +1744,8 @@
// can be selected.
if (!isConcurrentSource(inputSource) &&
((desc->mSamplingRate != samplingRate ||
- desc->mChannelMask != channelMask ||
- !audio_formats_match(desc->mFormat, format)) &&
+ desc->mChannelMask != config->channel_mask ||
+ !audio_formats_match(desc->mFormat, config->format)) &&
(source_priority(desc->getHighestPrioritySource(false /*activeOnly*/)) <
source_priority(inputSource)))) {
reusedInputDesc = desc;
@@ -1790,44 +1768,40 @@
}
#endif
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = profileSamplingRate;
- config.channel_mask = profileChannelMask;
- config.format = profileFormat;
+ if (!profile->canOpenNewIo()) {
+ return AUDIO_IO_HANDLE_NONE;
+ }
+
+ sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile, mpClientInterface);
+
+ audio_config_t lConfig = AUDIO_CONFIG_INITIALIZER;
+ lConfig.sample_rate = profileSamplingRate;
+ lConfig.channel_mask = profileChannelMask;
+ lConfig.format = profileFormat;
if (address == "") {
DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(device);
- // the inputs vector must be of size 1, but we don't want to crash here
+ // the inputs vector must be of size >= 1, but we don't want to crash here
address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress : String8("");
}
- status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
- &input,
- &config,
- &device,
- address,
- halInputSource,
- profileFlags);
+ status_t status = inputDesc->open(&lConfig, device, address,
+ halInputSource, profileFlags, &input);
// only accept input with the exact requested set of parameters
if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE ||
- (profileSamplingRate != config.sample_rate) ||
- !audio_formats_match(profileFormat, config.format) ||
- (profileChannelMask != config.channel_mask)) {
- ALOGW("getInputForAttr() failed opening input: samplingRate %d"
- ", format %d, channelMask %x",
- samplingRate, format, channelMask);
+ (profileSamplingRate != lConfig.sample_rate) ||
+ !audio_formats_match(profileFormat, lConfig.format) ||
+ (profileChannelMask != lConfig.channel_mask)) {
+ ALOGW("getInputForAttr() failed opening input: sampling rate %d"
+ ", format %#x, channel mask %#x",
+ profileSamplingRate, profileFormat, profileChannelMask);
if (input != AUDIO_IO_HANDLE_NONE) {
- mpClientInterface->closeInput(input);
+ inputDesc->close();
}
return AUDIO_IO_HANDLE_NONE;
}
- sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile);
- inputDesc->mSamplingRate = profileSamplingRate;
- inputDesc->mFormat = profileFormat;
- inputDesc->mChannelMask = profileChannelMask;
- inputDesc->mDevice = device;
inputDesc->mPolicyMix = policyMix;
inputDesc->addAudioSession(session, audioSession);
@@ -1868,9 +1842,7 @@
return true;
}
- Vector< sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
- for (size_t i = 0; i < activeInputs.size(); i++) {
- sp<AudioInputDescriptor> activeInput = activeInputs[i];
+ for (const auto& activeInput : mInputs.getActiveInputs()) {
if (!isConcurrentSource(activeInput->inputSource(true)) &&
!is_virtual_input_device(activeInput->mDevice)) {
return false;
@@ -1917,10 +1889,15 @@
status_t AudioPolicyManager::startInput(audio_io_handle_t input,
audio_session_t session,
+ bool silenced,
concurrency_type__mask_t *concurrency)
{
- ALOGV("startInput() input %d", input);
+
+ ALOGV("AudioPolicyManager::startInput(input:%d, session:%d, silenced:%d, concurrency:%d)",
+ input, session, silenced, *concurrency);
+
*concurrency = API_INPUT_CONCURRENCY_NONE;
+
ssize_t index = mInputs.indexOfKey(input);
if (index < 0) {
ALOGW("startInput() unknown input %d", input);
@@ -1952,17 +1929,37 @@
if (mCallTxPatch != 0 &&
inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
ALOGW("startInput(%d) failed: call in progress", input);
+ *concurrency |= API_INPUT_CONCURRENCY_CALL;
return INVALID_OPERATION;
}
- Vector< sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
- for (size_t i = 0; i < activeInputs.size(); i++) {
- sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+ Vector<sp<AudioInputDescriptor>> activeInputs = mInputs.getActiveInputs();
- if (is_virtual_input_device(activeDesc->mDevice)) {
- continue;
+ // If a UID is idle and records silence and another not silenced recording starts
+ // from another UID (idle or active) we stop the current idle UID recording in
+ // favor of the new one - "There can be only one" TM
+ if (!silenced) {
+ for (const auto& activeDesc : activeInputs) {
+ if ((audioSession->flags() & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0 &&
+ activeDesc->getId() == inputDesc->getId()) {
+ continue;
+ }
+
+ AudioSessionCollection activeSessions = activeDesc->getAudioSessions(
+ true /*activeOnly*/);
+ sp<AudioSession> activeSession = activeSessions.valueAt(0);
+ if (activeSession->isSilenced()) {
+ audio_io_handle_t activeInput = activeDesc->mIoHandle;
+ audio_session_t activeSessionId = activeSession->session();
+ stopInput(activeInput, activeSessionId);
+ releaseInput(activeInput, activeSessionId);
+ ALOGV("startInput(%d) stopping silenced input %d", input, activeInput);
+ activeInputs = mInputs.getActiveInputs();
+ }
}
+ }
+ for (const auto& activeDesc : activeInputs) {
if ((audioSession->flags() & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0 &&
activeDesc->getId() == inputDesc->getId()) {
continue;
@@ -1975,17 +1972,20 @@
ALOGW("startInput(%d) failed for HOTWORD: "
"other input %d already started for HOTWORD",
input, activeDesc->mIoHandle);
+ *concurrency |= API_INPUT_CONCURRENCY_HOTWORD;
return INVALID_OPERATION;
}
} else {
ALOGV("startInput(%d) failed for HOTWORD: other input %d already started",
input, activeDesc->mIoHandle);
+ *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
return INVALID_OPERATION;
}
} else {
if (activeSource != AUDIO_SOURCE_HOTWORD) {
ALOGW("startInput(%d) failed: other input %d already started",
input, activeDesc->mIoHandle);
+ *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
return INVALID_OPERATION;
}
}
@@ -1998,13 +1998,7 @@
inputDesc->isSoundTrigger() ? soundTriggerSupportsConcurrentCapture() : false;
// if capture is allowed, preempt currently active HOTWORD captures
- for (size_t i = 0; i < activeInputs.size(); i++) {
- sp<AudioInputDescriptor> activeDesc = activeInputs[i];
-
- if (is_virtual_input_device(activeDesc->mDevice)) {
- continue;
- }
-
+ for (const auto& activeDesc : activeInputs) {
if (allowConcurrentWithSoundTrigger && activeDesc->isSoundTrigger()) {
continue;
}
@@ -2016,6 +2010,7 @@
audio_session_t activeSession = activeSessions.keyAt(0);
audio_io_handle_t activeHandle = activeDesc->mIoHandle;
SortedVector<audio_session_t> sessions = activeDesc->getPreemptedSessions();
+ *concurrency |= API_INPUT_CONCURRENCY_PREEMPT;
sessions.add(activeSession);
inputDesc->setPreemptedSessions(sessions);
stopInput(activeHandle, activeSession);
@@ -2027,6 +2022,9 @@
}
#endif
+ // Make sure we start with the correct silence state
+ audioSession->setSilenced(silenced);
+
// increment activity count before calling getNewInputDevice() below as only active sessions
// are considered for device selection
audioSession->changeActiveCount(1);
@@ -2034,12 +2032,19 @@
// Routing?
mInputRoutes.incRouteActivity(session);
- if (audioSession->activeCount() == 1 || mInputRoutes.hasRouteChanged(session)) {
+ if (audioSession->activeCount() == 1 || mInputRoutes.getAndClearRouteChanged(session)) {
// indicate active capture to sound trigger service if starting capture from a mic on
// primary HW module
audio_devices_t device = getNewInputDevice(inputDesc);
setInputDevice(input, device, true /* force */);
+ status_t status = inputDesc->start();
+ if (status != NO_ERROR) {
+ mInputRoutes.decRouteActivity(session);
+ audioSession->changeActiveCount(-1);
+ return status;
+ }
+
if (inputDesc->getAudioSessionCount(true/*activeOnly*/) == 1) {
// if input maps to a dynamic policy with an activity listener, notify of state change
if ((inputDesc->mPolicyMix != NULL)
@@ -2106,7 +2111,7 @@
mInputRoutes.decRouteActivity(session);
if (audioSession->activeCount() == 0) {
-
+ inputDesc->stop();
if (inputDesc->isActive()) {
setInputDevice(input, getNewInputDevice(inputDesc), false /* force */);
} else {
@@ -2152,7 +2157,6 @@
void AudioPolicyManager::releaseInput(audio_io_handle_t input,
audio_session_t session)
{
-
ALOGV("releaseInput() %d", input);
ssize_t index = mInputs.indexOfKey(input);
if (index < 0) {
@@ -2195,7 +2199,7 @@
void AudioPolicyManager::closeAllInputs() {
bool patchRemoved = false;
- for(size_t input_index = 0; input_index < mInputs.size(); input_index++) {
+ for (size_t input_index = 0; input_index < mInputs.size(); input_index++) {
sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(input_index);
ssize_t patch_index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
if (patch_index >= 0) {
@@ -2204,8 +2208,9 @@
mAudioPatches.removeItemsAt(patch_index);
patchRemoved = true;
}
- mpClientInterface->closeInput(mInputs.keyAt(input_index));
+ inputDesc->close();
}
+ mInputRoutes.clear();
mInputs.clear();
SoundTrigger::setCaptureState(false);
nextAudioPortGeneration();
@@ -2236,7 +2241,10 @@
audio_devices_t device)
{
- if ((index < mVolumeCurves->getVolumeIndexMin(stream)) ||
+ // VOICE_CALL stream has minVolumeIndex > 0 but can be muted directly by an
+ // app that has MODIFY_PHONE_STATE permission.
+ if (((index < mVolumeCurves->getVolumeIndexMin(stream)) &&
+ !(stream == AUDIO_STREAM_VOICE_CALL && index == 0)) ||
(index > mVolumeCurves->getVolumeIndexMax(stream))) {
return BAD_VALUE;
}
@@ -2359,21 +2367,21 @@
audio_io_handle_t outputDeepBuffer = AUDIO_IO_HANDLE_NONE;
audio_io_handle_t outputPrimary = AUDIO_IO_HANDLE_NONE;
- for (size_t i = 0; i < outputs.size(); i++) {
- sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
+ for (audio_io_handle_t output : outputs) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
if (activeOnly && !desc->isStreamActive(AUDIO_STREAM_MUSIC)) {
continue;
}
- ALOGV("selectOutputForMusicEffects activeOnly %d outputs[%zu] flags 0x%08x",
- activeOnly, i, desc->mFlags);
+ ALOGV("selectOutputForMusicEffects activeOnly %d output %d flags 0x%08x",
+ activeOnly, output, desc->mFlags);
if ((desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
- outputOffloaded = outputs[i];
+ outputOffloaded = output;
}
if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
- outputDeepBuffer = outputs[i];
+ outputDeepBuffer = output;
}
if ((desc->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) != 0) {
- outputPrimary = outputs[i];
+ outputPrimary = output;
}
}
if (outputOffloaded != AUDIO_IO_HANDLE_NONE) {
@@ -2484,23 +2492,16 @@
break;
}
if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
- // Loop back through "remote submix"
- if (rSubmixModule == 0) {
- for (size_t j = 0; i < mHwModules.size(); j++) {
- if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[j]->mName) == 0
- && mHwModules[j]->mHandle != 0) {
- rSubmixModule = mHwModules[j];
- break;
- }
- }
- }
-
ALOGV("registerPolicyMixes() mix %zu of %zu is LOOP_BACK", i, mixes.size());
-
if (rSubmixModule == 0) {
- ALOGE(" Unable to find audio module for submix, aborting mix %zu registration", i);
- res = INVALID_OPERATION;
- break;
+ rSubmixModule = mHwModules.getModuleFromName(
+ AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX);
+ if (rSubmixModule == 0) {
+ ALOGE(" Unable to find audio module for submix, aborting mix %zu registration",
+ i);
+ res = INVALID_OPERATION;
+ break;
+ }
}
String8 address = mixes[i].mDeviceAddress;
@@ -2579,24 +2580,19 @@
status_t res = NO_ERROR;
sp<HwModule> rSubmixModule;
// examine each mix's route type
- for (size_t i = 0; i < mixes.size(); i++) {
- if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
+ for (const auto& mix : mixes) {
+ if ((mix.mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
if (rSubmixModule == 0) {
- for (size_t j = 0; i < mHwModules.size(); j++) {
- if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[j]->mName) == 0
- && mHwModules[j]->mHandle != 0) {
- rSubmixModule = mHwModules[j];
- break;
- }
+ rSubmixModule = mHwModules.getModuleFromName(
+ AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX);
+ if (rSubmixModule == 0) {
+ res = INVALID_OPERATION;
+ continue;
}
}
- if (rSubmixModule == 0) {
- res = INVALID_OPERATION;
- continue;
- }
- String8 address = mixes[i].mDeviceAddress;
+ String8 address = mix.mDeviceAddress;
if (mPolicyMixes.unregisterMix(address) != NO_ERROR) {
res = INVALID_OPERATION;
@@ -2618,8 +2614,8 @@
rSubmixModule->removeOutputProfile(address);
rSubmixModule->removeInputProfile(address);
- } if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
- if (mPolicyMixes.unregisterMix(mixes[i].mDeviceAddress) != NO_ERROR) {
+ } if ((mix.mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
+ if (mPolicyMixes.unregisterMix(mix.mDeviceAddress) != NO_ERROR) {
res = INVALID_OPERATION;
continue;
}
@@ -2671,7 +2667,7 @@
mAvailableOutputDevices.dump(fd, String8("Available output"));
mAvailableInputDevices.dump(fd, String8("Available input"));
- mHwModules.dump(fd);
+ mHwModulesAll.dump(fd);
mOutputs.dump(fd);
mInputs.dump(fd);
mVolumeCurves->dump(fd);
@@ -2776,23 +2772,23 @@
// do not report devices with type AUDIO_DEVICE_IN_STUB or AUDIO_DEVICE_OUT_STUB
// as they are used by stub HALs by convention
if (role == AUDIO_PORT_ROLE_SINK || role == AUDIO_PORT_ROLE_NONE) {
- for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
- if (mAvailableOutputDevices[i]->type() == AUDIO_DEVICE_OUT_STUB) {
+ for (const auto& dev : mAvailableOutputDevices) {
+ if (dev->type() == AUDIO_DEVICE_OUT_STUB) {
continue;
}
if (portsWritten < portsMax) {
- mAvailableOutputDevices[i]->toAudioPort(&ports[portsWritten++]);
+ dev->toAudioPort(&ports[portsWritten++]);
}
(*num_ports)++;
}
}
if (role == AUDIO_PORT_ROLE_SOURCE || role == AUDIO_PORT_ROLE_NONE) {
- for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
- if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_STUB) {
+ for (const auto& dev : mAvailableInputDevices) {
+ if (dev->type() == AUDIO_DEVICE_IN_STUB) {
continue;
}
if (portsWritten < portsMax) {
- mAvailableInputDevices[i]->toAudioPort(&ports[portsWritten++]);
+ dev->toAudioPort(&ports[portsWritten++]);
}
(*num_ports)++;
}
@@ -2823,9 +2819,32 @@
return NO_ERROR;
}
-status_t AudioPolicyManager::getAudioPort(struct audio_port *port __unused)
+status_t AudioPolicyManager::getAudioPort(struct audio_port *port)
{
- return NO_ERROR;
+ if (port == nullptr || port->id == AUDIO_PORT_HANDLE_NONE) {
+ return BAD_VALUE;
+ }
+ sp<DeviceDescriptor> dev = mAvailableOutputDevices.getDeviceFromId(port->id);
+ if (dev != 0) {
+ dev->toAudioPort(port);
+ return NO_ERROR;
+ }
+ dev = mAvailableInputDevices.getDeviceFromId(port->id);
+ if (dev != 0) {
+ dev->toAudioPort(port);
+ return NO_ERROR;
+ }
+ sp<SwAudioOutputDescriptor> out = mOutputs.getOutputFromId(port->id);
+ if (out != 0) {
+ out->toAudioPort(port);
+ return NO_ERROR;
+ }
+ sp<AudioInputDescriptor> in = mInputs.getInputFromId(port->id);
+ if (in != 0) {
+ in->toAudioPort(port);
+ return NO_ERROR;
+ }
+ return BAD_VALUE;
}
status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch,
@@ -3031,7 +3050,7 @@
sinkDeviceDesc->toAudioPortConfig(&newPatch.sinks[i], &patch->sinks[i]);
// create a software bridge in PatchPanel if:
- // - source and sink devices are on differnt HW modules OR
+ // - source and sink devices are on different HW modules OR
// - audio HAL version is < 3.0
if (!srcDeviceDesc->hasSameHwModuleAs(sinkDeviceDesc) ||
(srcDeviceDesc->mModule->getHalVersionMajor() < 3)) {
@@ -3287,8 +3306,8 @@
}
}
// reroute outputs if necessary
- for (size_t i = 0; i < affectedStrategies.size(); i++) {
- checkStrategyRoute(affectedStrategies[i], AUDIO_IO_HANDLE_NONE);
+ for (const auto& strategy : affectedStrategies) {
+ checkStrategyRoute(strategy, AUDIO_IO_HANDLE_NONE);
}
// remove input routes associated with this uid
@@ -3310,8 +3329,8 @@
inputsToClose.add(inputDesc->mIoHandle);
}
}
- for (size_t i = 0; i < inputsToClose.size(); i++) {
- closeInput(inputsToClose[i]);
+ for (const auto& input : inputsToClose) {
+ closeInput(input);
}
}
@@ -3414,6 +3433,11 @@
ALOGV("%s output for device %08x is duplicated", __FUNCTION__, sinkDevice);
return INVALID_OPERATION;
}
+ status_t status = outputDesc->start();
+ if (status != NO_ERROR) {
+ return status;
+ }
+
// create a special patch with no sink and two sources:
// - the second source indicates to PatchPanel through which output mix this patch should
// be connected as well as the stream type for volume control
@@ -3424,7 +3448,7 @@
srcDeviceDesc->toAudioPortConfig(&patch->sources[0], NULL);
outputDesc->toAudioPortConfig(&patch->sources[1], NULL);
patch->sources[1].ext.mix.usecase.stream = stream;
- status_t status = mpClientInterface->createAudioPatch(patch,
+ status = mpClientInterface->createAudioPatch(patch,
&afPatchHandle,
0);
ALOGV("%s patch panel returned %d patchHandle %d", __FUNCTION__,
@@ -3488,8 +3512,8 @@
offloaded.push(desc->mIoHandle);
}
}
- for (size_t i = 0; i < offloaded.size(); ++i) {
- closeOutput(offloaded[i]);
+ for (const auto& handle : offloaded) {
+ closeOutput(handle);
}
}
// update master mono for all remaining outputs
@@ -3511,6 +3535,292 @@
return computeVolume(stream, index, device);
}
+status_t AudioPolicyManager::getSupportedFormats(audio_io_handle_t ioHandle,
+ FormatVector& formats) {
+ if (ioHandle == AUDIO_IO_HANDLE_NONE) {
+ return BAD_VALUE;
+ }
+ String8 reply;
+ reply = mpClientInterface->getParameters(
+ ioHandle, String8(AudioParameter::keyStreamSupportedFormats));
+ ALOGV("%s: supported formats %s", __FUNCTION__, reply.string());
+ AudioParameter repliedParameters(reply);
+ if (repliedParameters.get(
+ String8(AudioParameter::keyStreamSupportedFormats), reply) != NO_ERROR) {
+ ALOGE("%s: failed to retrieve format, bailing out", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ for (auto format : formatsFromString(reply.string())) {
+ // Only AUDIO_FORMAT_AAC_LC will be used in Settings UI for all AAC formats.
+ for (size_t i = 0; i < ARRAY_SIZE(AAC_FORMATS); i++) {
+ if (format == AAC_FORMATS[i]) {
+ format = AUDIO_FORMAT_AAC_LC;
+ break;
+ }
+ }
+ bool exist = false;
+ for (size_t i = 0; i < formats.size(); i++) {
+ if (format == formats[i]) {
+ exist = true;
+ break;
+ }
+ }
+ bool isSurroundFormat = false;
+ for (size_t i = 0; i < ARRAY_SIZE(SURROUND_FORMATS); i++) {
+ if (SURROUND_FORMATS[i] == format) {
+ isSurroundFormat = true;
+ break;
+ }
+ }
+ if (!exist && isSurroundFormat) {
+ formats.add(format);
+ }
+ }
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManager::getSurroundFormats(unsigned int *numSurroundFormats,
+ audio_format_t *surroundFormats,
+ bool *surroundFormatsEnabled,
+ bool reported)
+{
+ if (numSurroundFormats == NULL || (*numSurroundFormats != 0 &&
+ (surroundFormats == NULL || surroundFormatsEnabled == NULL))) {
+ return BAD_VALUE;
+ }
+ ALOGV("getSurroundFormats() numSurroundFormats %d surroundFormats %p surroundFormatsEnabled %p",
+ *numSurroundFormats, surroundFormats, surroundFormatsEnabled);
+
+ // Only return value if there is HDMI output.
+ if ((mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_HDMI) == 0) {
+ return INVALID_OPERATION;
+ }
+
+ size_t formatsWritten = 0;
+ size_t formatsMax = *numSurroundFormats;
+ *numSurroundFormats = 0;
+ FormatVector formats;
+ if (reported) {
+ // Only get surround formats which are reported by device.
+ // First list already open outputs that can be routed to this device
+ audio_devices_t device = AUDIO_DEVICE_OUT_HDMI;
+ SortedVector<audio_io_handle_t> outputs;
+ bool reportedFormatFound = false;
+ status_t status;
+ sp<SwAudioOutputDescriptor> desc;
+ for (size_t i = 0; i < mOutputs.size(); i++) {
+ desc = mOutputs.valueAt(i);
+ if (!desc->isDuplicated() && (desc->supportedDevices() & device)) {
+ outputs.add(mOutputs.keyAt(i));
+ }
+ }
+ // Open an output to query dynamic parameters.
+ DeviceVector hdmiOutputDevices = mAvailableOutputDevices.getDevicesFromType(
+ AUDIO_DEVICE_OUT_HDMI);
+ for (size_t i = 0; i < hdmiOutputDevices.size(); i++) {
+ String8 address = hdmiOutputDevices[i]->mAddress;
+ for (const auto& hwModule : mHwModules) {
+ for (size_t i = 0; i < hwModule->getOutputProfiles().size(); i++) {
+ sp<IOProfile> profile = hwModule->getOutputProfiles()[i];
+ if (profile->supportDevice(AUDIO_DEVICE_OUT_HDMI) &&
+ profile->supportDeviceAddress(address)) {
+ size_t j;
+ for (j = 0; j < outputs.size(); j++) {
+ desc = mOutputs.valueFor(outputs.itemAt(j));
+ if (!desc->isDuplicated() && desc->mProfile == profile) {
+ break;
+ }
+ }
+ if (j != outputs.size()) {
+ status = getSupportedFormats(outputs.itemAt(j), formats);
+ reportedFormatFound |= (status == NO_ERROR);
+ continue;
+ }
+
+ if (!profile->canOpenNewIo()) {
+ ALOGW("Max Output number %u already opened for this profile %s",
+ profile->maxOpenCount, profile->getTagName().c_str());
+ continue;
+ }
+
+ ALOGV("opening output for device %08x with params %s profile %p name %s",
+ device, address.string(), profile.get(), profile->getName().string());
+ desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
+ audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+ status_t status = desc->open(nullptr, device, address,
+ AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE,
+ &output);
+
+ if (status == NO_ERROR) {
+ status = getSupportedFormats(output, formats);
+ reportedFormatFound |= (status == NO_ERROR);
+ desc->close();
+ output = AUDIO_IO_HANDLE_NONE;
+ }
+ }
+ }
+ }
+ }
+
+ if (!reportedFormatFound) {
+ return UNKNOWN_ERROR;
+ }
+ } else {
+ for (size_t i = 0; i < ARRAY_SIZE(SURROUND_FORMATS); i++) {
+ formats.add(SURROUND_FORMATS[i]);
+ }
+ }
+ for (size_t i = 0; i < formats.size(); i++) {
+ if (formatsWritten < formatsMax) {
+ surroundFormats[formatsWritten] = formats[i];
+ bool formatEnabled = false;
+ if (formats[i] == AUDIO_FORMAT_AAC_LC) {
+ for (size_t j = 0; j < ARRAY_SIZE(AAC_FORMATS); j++) {
+ formatEnabled =
+ mSurroundFormats.find(AAC_FORMATS[i]) != mSurroundFormats.end();
+ break;
+ }
+ } else {
+ formatEnabled = mSurroundFormats.find(formats[i]) != mSurroundFormats.end();
+ }
+ surroundFormatsEnabled[formatsWritten++] = formatEnabled;
+ }
+ (*numSurroundFormats)++;
+ }
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManager::setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled)
+{
+ // Check if audio format is a surround formats.
+ bool isSurroundFormat = false;
+ for (size_t i = 0; i < ARRAY_SIZE(SURROUND_FORMATS); i++) {
+ if (audioFormat == SURROUND_FORMATS[i]) {
+ isSurroundFormat = true;
+ break;
+ }
+ }
+ if (!isSurroundFormat) {
+ return BAD_VALUE;
+ }
+
+ // Should only be called when MANUAL.
+ audio_policy_forced_cfg_t forceUse = mEngine->getForceUse(
+ AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND);
+ if (forceUse != AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL) {
+ return INVALID_OPERATION;
+ }
+
+ if ((mSurroundFormats.find(audioFormat) != mSurroundFormats.end() && enabled)
+ || (mSurroundFormats.find(audioFormat) == mSurroundFormats.end() && !enabled)) {
+ return NO_ERROR;
+ }
+
+ // The operation is valid only when there is HDMI output available.
+ if ((mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_HDMI) == 0) {
+ return INVALID_OPERATION;
+ }
+
+ if (enabled) {
+ if (audioFormat == AUDIO_FORMAT_AAC_LC) {
+ for (size_t i = 0; i < ARRAY_SIZE(AAC_FORMATS); i++) {
+ mSurroundFormats.insert(AAC_FORMATS[i]);
+ }
+ } else {
+ mSurroundFormats.insert(audioFormat);
+ }
+ } else {
+ if (audioFormat == AUDIO_FORMAT_AAC_LC) {
+ for (size_t i = 0; i < ARRAY_SIZE(AAC_FORMATS); i++) {
+ mSurroundFormats.erase(AAC_FORMATS[i]);
+ }
+ } else {
+ mSurroundFormats.erase(audioFormat);
+ }
+ }
+
+ sp<SwAudioOutputDescriptor> outputDesc;
+ bool profileUpdated = false;
+ DeviceVector hdmiOutputDevices = mAvailableOutputDevices.getDevicesFromType(
+ AUDIO_DEVICE_OUT_HDMI);
+ for (size_t i = 0; i < hdmiOutputDevices.size(); i++) {
+ // Simulate reconnection to update enabled surround sound formats.
+ String8 address = hdmiOutputDevices[i]->mAddress;
+ String8 name = hdmiOutputDevices[i]->getName();
+ status_t status = setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_HDMI,
+ AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+ address.c_str(),
+ name.c_str());
+ if (status != NO_ERROR) {
+ continue;
+ }
+ status = setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_HDMI,
+ AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+ address.c_str(),
+ name.c_str());
+ profileUpdated |= (status == NO_ERROR);
+ }
+ DeviceVector hdmiInputDevices = mAvailableInputDevices.getDevicesFromType(
+ AUDIO_DEVICE_IN_HDMI);
+ for (size_t i = 0; i < hdmiInputDevices.size(); i++) {
+ // Simulate reconnection to update enabled surround sound formats.
+ String8 address = hdmiInputDevices[i]->mAddress;
+ String8 name = hdmiInputDevices[i]->getName();
+ status_t status = setDeviceConnectionStateInt(AUDIO_DEVICE_IN_HDMI,
+ AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+ address.c_str(),
+ name.c_str());
+ if (status != NO_ERROR) {
+ continue;
+ }
+ status = setDeviceConnectionStateInt(AUDIO_DEVICE_IN_HDMI,
+ AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+ address.c_str(),
+ name.c_str());
+ profileUpdated |= (status == NO_ERROR);
+ }
+
+ // Undo the surround formats change due to no audio profiles updated.
+ if (!profileUpdated) {
+ if (enabled) {
+ if (audioFormat == AUDIO_FORMAT_AAC_LC) {
+ for (size_t i = 0; i < ARRAY_SIZE(AAC_FORMATS); i++) {
+ mSurroundFormats.erase(AAC_FORMATS[i]);
+ }
+ } else {
+ mSurroundFormats.erase(audioFormat);
+ }
+ } else {
+ if (audioFormat == AUDIO_FORMAT_AAC_LC) {
+ for (size_t i = 0; i < ARRAY_SIZE(AAC_FORMATS); i++) {
+ mSurroundFormats.insert(AAC_FORMATS[i]);
+ }
+ } else {
+ mSurroundFormats.insert(audioFormat);
+ }
+ }
+ }
+
+ return profileUpdated ? NO_ERROR : INVALID_OPERATION;
+}
+
+void AudioPolicyManager::setRecordSilenced(uid_t uid, bool silenced)
+{
+ ALOGV("AudioPolicyManager:setRecordSilenced(uid:%d, silenced:%d)", uid, silenced);
+
+ Vector<sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
+ for (size_t i = 0; i < activeInputs.size(); i++) {
+ sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+ AudioSessionCollection activeSessions = activeDesc->getAudioSessions(true);
+ for (size_t j = 0; j < activeSessions.size(); j++) {
+ sp<AudioSession> activeSession = activeSessions.valueAt(j);
+ if (activeSession->uid() == uid) {
+ activeSession->setSilenced(silenced);
+ }
+ }
+ }
+}
+
status_t AudioPolicyManager::disconnectAudioSource(const sp<AudioSourceDescriptor>& sourceDesc)
{
ALOGV("%s handle %d", __FUNCTION__, sourceDesc->getHandle());
@@ -3526,7 +3836,10 @@
audio_stream_type_t stream = streamTypefromAttributesInt(&sourceDesc->mAttributes);
sp<SwAudioOutputDescriptor> swOutputDesc = sourceDesc->mSwOutput.promote();
if (swOutputDesc != 0) {
- stopSource(swOutputDesc, stream, false);
+ status_t status = stopSource(swOutputDesc, stream, false);
+ if (status == NO_ERROR) {
+ swOutputDesc->stop();
+ }
mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
} else {
sp<HwAudioOutputDescriptor> hwOutputDesc = sourceDesc->mHwOutput.promote();
@@ -3562,7 +3875,7 @@
// ----------------------------------------------------------------------------
uint32_t AudioPolicyManager::nextAudioPortGeneration()
{
- return android_atomic_inc(&mAudioPortGeneration);
+ return mAudioPortGeneration++;
}
#ifdef USE_XML_AUDIO_POLICY_CONF
@@ -3599,13 +3912,22 @@
}
#endif
-AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface)
+AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface,
+ bool /*forTesting*/)
:
-#ifdef AUDIO_POLICY_TEST
- Thread(false),
-#endif //AUDIO_POLICY_TEST
+ mUidCached(getuid()),
+ mpClientInterface(clientInterface),
mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
mA2dpSuspended(false),
+#ifdef USE_XML_AUDIO_POLICY_CONF
+ mVolumeCurves(new VolumeCurvesCollection()),
+ mConfig(mHwModulesAll, mAvailableOutputDevices, mAvailableInputDevices,
+ mDefaultOutputDevice, static_cast<VolumeCurvesCollection*>(mVolumeCurves.get())),
+#else
+ mVolumeCurves(new StreamDescriptorCollection()),
+ mConfig(mHwModulesAll, mAvailableOutputDevices, mAvailableInputDevices,
+ mDefaultOutputDevice),
+#endif
mAudioPortGeneration(1),
mBeaconMuteRefCount(0),
mBeaconPlayingRefCount(0),
@@ -3615,70 +3937,72 @@
mMusicEffectOutput(AUDIO_IO_HANDLE_NONE),
mHasComputedSoundTriggerSupportsConcurrentCapture(false)
{
- mUidCached = getuid();
- mpClientInterface = clientInterface;
+}
- // TODO: remove when legacy conf file is removed. true on devices that use DRC on the
- // DEVICE_CATEGORY_SPEAKER path to boost soft sounds, used to adjust volume curves accordingly.
- // Note: remove also speaker_drc_enabled from global configuration of XML config file.
- bool speakerDrcEnabled = false;
+AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface)
+ : AudioPolicyManager(clientInterface, false /*forTesting*/)
+{
+ loadConfig();
+ initialize();
+}
+void AudioPolicyManager::loadConfig() {
#ifdef USE_XML_AUDIO_POLICY_CONF
- mVolumeCurves = new VolumeCurvesCollection();
- AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
- mDefaultOutputDevice, speakerDrcEnabled,
- static_cast<VolumeCurvesCollection *>(mVolumeCurves));
- if (deserializeAudioPolicyXmlConfig(config) != NO_ERROR) {
+ if (deserializeAudioPolicyXmlConfig(getConfig()) != NO_ERROR) {
#else
- mVolumeCurves = new StreamDescriptorCollection();
- AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
- mDefaultOutputDevice, speakerDrcEnabled);
- if ((ConfigParsingUtils::loadConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE, config) != NO_ERROR) &&
- (ConfigParsingUtils::loadConfig(AUDIO_POLICY_CONFIG_FILE, config) != NO_ERROR)) {
+ if ((ConfigParsingUtils::loadConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE, getConfig()) != NO_ERROR)
+ && (ConfigParsingUtils::loadConfig(AUDIO_POLICY_CONFIG_FILE, getConfig()) != NO_ERROR)) {
#endif
ALOGE("could not load audio policy configuration file, setting defaults");
- config.setDefault();
+ getConfig().setDefault();
}
- // must be done after reading the policy (since conditionned by Speaker Drc Enabling)
- mVolumeCurves->initializeVolumeCurves(speakerDrcEnabled);
+}
+
+status_t AudioPolicyManager::initialize() {
+ mVolumeCurves->initializeVolumeCurves(getConfig().isSpeakerDrcEnabled());
// Once policy config has been parsed, retrieve an instance of the engine and initialize it.
audio_policy::EngineInstance *engineInstance = audio_policy::EngineInstance::getInstance();
if (!engineInstance) {
ALOGE("%s: Could not get an instance of policy engine", __FUNCTION__);
- return;
+ return NO_INIT;
}
// Retrieve the Policy Manager Interface
mEngine = engineInstance->queryInterface<AudioPolicyManagerInterface>();
if (mEngine == NULL) {
ALOGE("%s: Failed to get Policy Engine Interface", __FUNCTION__);
- return;
+ return NO_INIT;
}
mEngine->setObserver(this);
status_t status = mEngine->initCheck();
- (void) status;
- ALOG_ASSERT(status == NO_ERROR, "Policy engine not initialized(err=%d)", status);
+ if (status != NO_ERROR) {
+ LOG_FATAL("Policy engine not initialized(err=%d)", status);
+ return status;
+ }
// mAvailableOutputDevices and mAvailableInputDevices now contain all attached devices
// open all output streams needed to access attached devices
audio_devices_t outputDeviceTypes = mAvailableOutputDevices.types();
audio_devices_t inputDeviceTypes = mAvailableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
- for (size_t i = 0; i < mHwModules.size(); i++) {
- mHwModules[i]->mHandle = mpClientInterface->loadHwModule(mHwModules[i]->getName());
- if (mHwModules[i]->mHandle == 0) {
- ALOGW("could not open HW module %s", mHwModules[i]->getName());
+ for (const auto& hwModule : mHwModulesAll) {
+ hwModule->setHandle(mpClientInterface->loadHwModule(hwModule->getName()));
+ if (hwModule->getHandle() == AUDIO_MODULE_HANDLE_NONE) {
+ ALOGW("could not open HW module %s", hwModule->getName());
continue;
}
+ mHwModules.push_back(hwModule);
// open all output streams needed to access attached devices
// except for direct output streams that are only opened when they are actually
// required by an app.
// This also validates mAvailableOutputDevices list
- for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
- {
- const sp<IOProfile> outProfile = mHwModules[i]->mOutputProfiles[j];
-
+ for (const auto& outProfile : hwModule->getOutputProfiles()) {
+ if (!outProfile->canOpenNewIo()) {
+ ALOGE("Invalid Output profile max open count %u for profile %s",
+ outProfile->maxOpenCount, outProfile->getTagName().c_str());
+ continue;
+ }
if (!outProfile->hasSupportedDevices()) {
- ALOGW("Output profile contains no device on module %s", mHwModules[i]->getName());
+ ALOGW("Output profile contains no device on module %s", hwModule->getName());
continue;
}
if ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_TTS) != 0) {
@@ -3705,35 +4029,20 @@
const DeviceVector &devicesForType = supportedDevices.getDevicesFromType(profileType);
String8 address = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
: String8("");
-
- outputDesc->mDevice = profileType;
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = outputDesc->mSamplingRate;
- config.channel_mask = outputDesc->mChannelMask;
- config.format = outputDesc->mFormat;
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = mpClientInterface->openOutput(outProfile->getModuleHandle(),
- &output,
- &config,
- &outputDesc->mDevice,
- address,
- &outputDesc->mLatency,
- outputDesc->mFlags);
+ status_t status = outputDesc->open(nullptr, profileType, address,
+ AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
if (status != NO_ERROR) {
ALOGW("Cannot open output stream for device %08x on hw module %s",
outputDesc->mDevice,
- mHwModules[i]->getName());
+ hwModule->getName());
} else {
- outputDesc->mSamplingRate = config.sample_rate;
- outputDesc->mChannelMask = config.channel_mask;
- outputDesc->mFormat = config.format;
-
- for (size_t k = 0; k < supportedDevices.size(); k++) {
- ssize_t index = mAvailableOutputDevices.indexOf(supportedDevices[k]);
+ for (const auto& dev : supportedDevices) {
+ ssize_t index = mAvailableOutputDevices.indexOf(dev);
// give a valid ID to an attached device once confirmed it is reachable
if (index >= 0 && !mAvailableOutputDevices[index]->isAttached()) {
- mAvailableOutputDevices[index]->attach(mHwModules[i]);
+ mAvailableOutputDevices[index]->attach(hwModule);
}
}
if (mPrimaryOutput == 0 &&
@@ -3742,21 +4051,23 @@
}
addOutput(output, outputDesc);
setOutputDevice(outputDesc,
- outputDesc->mDevice,
+ profileType,
true,
0,
NULL,
- address.string());
+ address);
}
}
// open input streams needed to access attached devices to validate
// mAvailableInputDevices list
- for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++)
- {
- const sp<IOProfile> inProfile = mHwModules[i]->mInputProfiles[j];
-
+ for (const auto& inProfile : hwModule->getInputProfiles()) {
+ if (!inProfile->canOpenNewIo()) {
+ ALOGE("Invalid Input profile max open count %u for profile %s",
+ inProfile->maxOpenCount, inProfile->getTagName().c_str());
+ continue;
+ }
if (!inProfile->hasSupportedDevices()) {
- ALOGW("Input profile contains no device on module %s", mHwModules[i]->getName());
+ ALOGW("Input profile contains no device on module %s", hwModule->getName());
continue;
}
// chose first device present in profile's SupportedDevices also part of
@@ -3767,49 +4078,40 @@
continue;
}
sp<AudioInputDescriptor> inputDesc =
- new AudioInputDescriptor(inProfile);
+ new AudioInputDescriptor(inProfile, mpClientInterface);
- inputDesc->mDevice = profileType;
-
- // find the address
DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(profileType);
- // the inputs vector must be of size 1, but we don't want to crash here
+ // the inputs vector must be of size >= 1, but we don't want to crash here
String8 address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress
: String8("");
ALOGV(" for input device 0x%x using address %s", profileType, address.string());
ALOGE_IF(inputDevices.size() == 0, "Input device list is empty!");
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = inputDesc->mSamplingRate;
- config.channel_mask = inputDesc->mChannelMask;
- config.format = inputDesc->mFormat;
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
- status_t status = mpClientInterface->openInput(inProfile->getModuleHandle(),
- &input,
- &config,
- &inputDesc->mDevice,
- address,
- AUDIO_SOURCE_MIC,
- AUDIO_INPUT_FLAG_NONE);
+ status_t status = inputDesc->open(nullptr,
+ profileType,
+ address,
+ AUDIO_SOURCE_MIC,
+ AUDIO_INPUT_FLAG_NONE,
+ &input);
if (status == NO_ERROR) {
- const DeviceVector &supportedDevices = inProfile->getSupportedDevices();
- for (size_t k = 0; k < supportedDevices.size(); k++) {
- ssize_t index = mAvailableInputDevices.indexOf(supportedDevices[k]);
+ for (const auto& dev : inProfile->getSupportedDevices()) {
+ ssize_t index = mAvailableInputDevices.indexOf(dev);
// give a valid ID to an attached device once confirmed it is reachable
if (index >= 0) {
sp<DeviceDescriptor> devDesc = mAvailableInputDevices[index];
if (!devDesc->isAttached()) {
- devDesc->attach(mHwModules[i]);
+ devDesc->attach(hwModule);
devDesc->importAudioPort(inProfile, true);
}
}
}
- mpClientInterface->closeInput(input);
+ inputDesc->close();
} else {
ALOGW("Cannot open input stream for device %08x on hw module %s",
- inputDesc->mDevice,
- mHwModules[i]->getName());
+ profileType,
+ hwModule->getName());
}
}
}
@@ -3839,53 +4141,43 @@
// make sure default device is reachable
if (mDefaultOutputDevice == 0 || mAvailableOutputDevices.indexOf(mDefaultOutputDevice) < 0) {
ALOGE("Default device %08x is unreachable", mDefaultOutputDevice->type());
+ status = NO_INIT;
+ }
+ // If microphones address is empty, set it according to device type
+ for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
+ if (mAvailableInputDevices[i]->mAddress.isEmpty()) {
+ if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BUILTIN_MIC) {
+ mAvailableInputDevices[i]->mAddress = String8(AUDIO_BOTTOM_MICROPHONE_ADDRESS);
+ } else if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BACK_MIC) {
+ mAvailableInputDevices[i]->mAddress = String8(AUDIO_BACK_MICROPHONE_ADDRESS);
+ }
+ }
}
- ALOGE_IF((mPrimaryOutput == 0), "Failed to open primary output");
+ if (mPrimaryOutput == 0) {
+ ALOGE("Failed to open primary output");
+ status = NO_INIT;
+ }
updateDevicesAndOutputs();
-
-#ifdef AUDIO_POLICY_TEST
- if (mPrimaryOutput != 0) {
- AudioParameter outputCmd = AudioParameter();
- outputCmd.addInt(String8("set_id"), 0);
- mpClientInterface->setParameters(mPrimaryOutput->mIoHandle, outputCmd.toString());
-
- mTestDevice = AUDIO_DEVICE_OUT_SPEAKER;
- mTestSamplingRate = 44100;
- mTestFormat = AUDIO_FORMAT_PCM_16_BIT;
- mTestChannels = AUDIO_CHANNEL_OUT_STEREO;
- mTestLatencyMs = 0;
- mCurOutput = 0;
- mDirectOutput = false;
- for (int i = 0; i < NUM_TEST_OUTPUTS; i++) {
- mTestOutputs[i] = 0;
- }
-
- const size_t SIZE = 256;
- char buffer[SIZE];
- snprintf(buffer, SIZE, "AudioPolicyManagerTest");
- run(buffer, ANDROID_PRIORITY_AUDIO);
- }
-#endif //AUDIO_POLICY_TEST
+ return status;
}
AudioPolicyManager::~AudioPolicyManager()
{
-#ifdef AUDIO_POLICY_TEST
- exit();
-#endif //AUDIO_POLICY_TEST
for (size_t i = 0; i < mOutputs.size(); i++) {
- mpClientInterface->closeOutput(mOutputs.keyAt(i));
+ mOutputs.valueAt(i)->close();
}
for (size_t i = 0; i < mInputs.size(); i++) {
- mpClientInterface->closeInput(mInputs.keyAt(i));
+ mInputs.valueAt(i)->close();
}
mAvailableOutputDevices.clear();
mAvailableInputDevices.clear();
mOutputs.clear();
mInputs.clear();
mHwModules.clear();
+ mHwModulesAll.clear();
+ mSurroundFormats.clear();
}
status_t AudioPolicyManager::initCheck()
@@ -3893,170 +4185,13 @@
return hasPrimaryOutput() ? NO_ERROR : NO_INIT;
}
-#ifdef AUDIO_POLICY_TEST
-bool AudioPolicyManager::threadLoop()
-{
- ALOGV("entering threadLoop()");
- while (!exitPending())
- {
- String8 command;
- int valueInt;
- String8 value;
-
- Mutex::Autolock _l(mLock);
- mWaitWorkCV.waitRelative(mLock, milliseconds(50));
-
- command = mpClientInterface->getParameters(0, String8("test_cmd_policy"));
- AudioParameter param = AudioParameter(command);
-
- if (param.getInt(String8("test_cmd_policy"), valueInt) == NO_ERROR &&
- valueInt != 0) {
- ALOGV("Test command %s received", command.string());
- String8 target;
- if (param.get(String8("target"), target) != NO_ERROR) {
- target = "Manager";
- }
- if (param.getInt(String8("test_cmd_policy_output"), valueInt) == NO_ERROR) {
- param.remove(String8("test_cmd_policy_output"));
- mCurOutput = valueInt;
- }
- if (param.get(String8("test_cmd_policy_direct"), value) == NO_ERROR) {
- param.remove(String8("test_cmd_policy_direct"));
- if (value == "false") {
- mDirectOutput = false;
- } else if (value == "true") {
- mDirectOutput = true;
- }
- }
- if (param.getInt(String8("test_cmd_policy_input"), valueInt) == NO_ERROR) {
- param.remove(String8("test_cmd_policy_input"));
- mTestInput = valueInt;
- }
-
- if (param.get(String8("test_cmd_policy_format"), value) == NO_ERROR) {
- param.remove(String8("test_cmd_policy_format"));
- int format = AUDIO_FORMAT_INVALID;
- if (value == "PCM 16 bits") {
- format = AUDIO_FORMAT_PCM_16_BIT;
- } else if (value == "PCM 8 bits") {
- format = AUDIO_FORMAT_PCM_8_BIT;
- } else if (value == "Compressed MP3") {
- format = AUDIO_FORMAT_MP3;
- }
- if (format != AUDIO_FORMAT_INVALID) {
- if (target == "Manager") {
- mTestFormat = format;
- } else if (mTestOutputs[mCurOutput] != 0) {
- AudioParameter outputParam = AudioParameter();
- outputParam.addInt(String8(AudioParameter::keyStreamSupportedFormats), format);
- mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
- }
- }
- }
- if (param.get(String8("test_cmd_policy_channels"), value) == NO_ERROR) {
- param.remove(String8("test_cmd_policy_channels"));
- int channels = 0;
-
- if (value == "Channels Stereo") {
- channels = AUDIO_CHANNEL_OUT_STEREO;
- } else if (value == "Channels Mono") {
- channels = AUDIO_CHANNEL_OUT_MONO;
- }
- if (channels != 0) {
- if (target == "Manager") {
- mTestChannels = channels;
- } else if (mTestOutputs[mCurOutput] != 0) {
- AudioParameter outputParam = AudioParameter();
- outputParam.addInt(String8(AudioParameter::keyStreamSupportedChannels), channels);
- mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
- }
- }
- }
- if (param.getInt(String8("test_cmd_policy_sampleRate"), valueInt) == NO_ERROR) {
- param.remove(String8("test_cmd_policy_sampleRate"));
- if (valueInt >= 0 && valueInt <= 96000) {
- int samplingRate = valueInt;
- if (target == "Manager") {
- mTestSamplingRate = samplingRate;
- } else if (mTestOutputs[mCurOutput] != 0) {
- AudioParameter outputParam = AudioParameter();
- outputParam.addInt(String8(AudioParameter::keyStreamSupportedSamplingRates), samplingRate);
- mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
- }
- }
- }
-
- if (param.get(String8("test_cmd_policy_reopen"), value) == NO_ERROR) {
- param.remove(String8("test_cmd_policy_reopen"));
-
- mpClientInterface->closeOutput(mpClientInterface->closeOutput(mPrimaryOutput););
-
- audio_module_handle_t moduleHandle = mPrimaryOutput->getModuleHandle();
-
- removeOutput(mPrimaryOutput->mIoHandle);
- sp<SwAudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(NULL,
- mpClientInterface);
- outputDesc->mDevice = AUDIO_DEVICE_OUT_SPEAKER;
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = outputDesc->mSamplingRate;
- config.channel_mask = outputDesc->mChannelMask;
- config.format = outputDesc->mFormat;
- audio_io_handle_t handle;
- status_t status = mpClientInterface->openOutput(moduleHandle,
- &handle,
- &config,
- &outputDesc->mDevice,
- String8(""),
- &outputDesc->mLatency,
- outputDesc->mFlags);
- if (status != NO_ERROR) {
- ALOGE("Failed to reopen hardware output stream, "
- "samplingRate: %d, format %d, channels %d",
- outputDesc->mSamplingRate, outputDesc->mFormat, outputDesc->mChannelMask);
- } else {
- outputDesc->mSamplingRate = config.sample_rate;
- outputDesc->mChannelMask = config.channel_mask;
- outputDesc->mFormat = config.format;
- mPrimaryOutput = outputDesc;
- AudioParameter outputCmd = AudioParameter();
- outputCmd.addInt(String8("set_id"), 0);
- mpClientInterface->setParameters(handle, outputCmd.toString());
- addOutput(handle, outputDesc);
- }
- }
-
-
- mpClientInterface->setParameters(0, String8("test_cmd_policy="));
- }
- }
- return false;
-}
-
-void AudioPolicyManager::exit()
-{
- {
- AutoMutex _l(mLock);
- requestExit();
- mWaitWorkCV.signal();
- }
- requestExitAndWait();
-}
-
-int AudioPolicyManager::testOutputIndex(audio_io_handle_t output)
-{
- for (int i = 0; i < NUM_TEST_OUTPUTS; i++) {
- if (output == mTestOutputs[i]) return i;
- }
- return 0;
-}
-#endif //AUDIO_POLICY_TEST
-
// ---
-void AudioPolicyManager::addOutput(audio_io_handle_t output, const sp<SwAudioOutputDescriptor>& outputDesc)
+void AudioPolicyManager::addOutput(audio_io_handle_t output,
+ const sp<SwAudioOutputDescriptor>& outputDesc)
{
- outputDesc->setIoHandle(output);
mOutputs.add(output, outputDesc);
+ applyStreamVolumes(outputDesc, AUDIO_DEVICE_NONE, 0 /* delayMs */, true /* force */);
updateMono(output); // update mono status when adding to output list
selectOutputForMusicEffects();
nextAudioPortGeneration();
@@ -4068,9 +4203,9 @@
selectOutputForMusicEffects();
}
-void AudioPolicyManager::addInput(audio_io_handle_t input, const sp<AudioInputDescriptor>& inputDesc)
+void AudioPolicyManager::addInput(audio_io_handle_t input,
+ const sp<AudioInputDescriptor>& inputDesc)
{
- inputDesc->setIoHandle(input);
mInputs.add(input, inputDesc);
nextAudioPortGeneration();
}
@@ -4117,19 +4252,15 @@
}
// then look for output profiles that can be routed to this device
SortedVector< sp<IOProfile> > profiles;
- for (size_t i = 0; i < mHwModules.size(); i++)
- {
- if (mHwModules[i]->mHandle == 0) {
- continue;
- }
- for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
- {
- sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
+ for (const auto& hwModule : mHwModules) {
+ for (size_t j = 0; j < hwModule->getOutputProfiles().size(); j++) {
+ sp<IOProfile> profile = hwModule->getOutputProfiles()[j];
if (profile->supportDevice(device)) {
if (!device_distinguishes_on_address(device) ||
profile->supportDeviceAddress(address)) {
profiles.add(profile);
- ALOGV("checkOutputsForDevice(): adding profile %zu from module %zu", j, i);
+ ALOGV("checkOutputsForDevice(): adding profile %zu from module %s",
+ j, hwModule->getName());
}
}
}
@@ -4164,30 +4295,20 @@
continue;
}
+ if (!profile->canOpenNewIo()) {
+ ALOGW("Max Output number %u already opened for this profile %s",
+ profile->maxOpenCount, profile->getTagName().c_str());
+ continue;
+ }
+
ALOGV("opening output for device %08x with params %s profile %p name %s",
device, address.string(), profile.get(), profile->getName().string());
desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
- desc->mDevice = device;
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = desc->mSamplingRate;
- config.channel_mask = desc->mChannelMask;
- config.format = desc->mFormat;
- config.offload_info.sample_rate = desc->mSamplingRate;
- config.offload_info.channel_mask = desc->mChannelMask;
- config.offload_info.format = desc->mFormat;
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = mpClientInterface->openOutput(profile->getModuleHandle(),
- &output,
- &config,
- &desc->mDevice,
- address,
- &desc->mLatency,
- desc->mFlags);
- if (status == NO_ERROR) {
- desc->mSamplingRate = config.sample_rate;
- desc->mChannelMask = config.channel_mask;
- desc->mFormat = config.format;
+ status_t status = desc->open(nullptr, device, address,
+ AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
+ if (status == NO_ERROR) {
// Here is where the out_set_parameters() for card & device gets called
if (!address.isEmpty()) {
char *param = audio_device_address_to_parameter(device, address);
@@ -4197,27 +4318,21 @@
updateAudioProfiles(device, output, profile->getAudioProfiles());
if (!profile->hasValidAudioProfile()) {
ALOGW("checkOutputsForDevice() missing param");
- mpClientInterface->closeOutput(output);
+ desc->close();
output = AUDIO_IO_HANDLE_NONE;
} else if (profile->hasDynamicAudioProfile()) {
- mpClientInterface->closeOutput(output);
+ desc->close();
output = AUDIO_IO_HANDLE_NONE;
- profile->pickAudioProfile(config.sample_rate, config.channel_mask, config.format);
+ audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+ profile->pickAudioProfile(
+ config.sample_rate, config.channel_mask, config.format);
config.offload_info.sample_rate = config.sample_rate;
config.offload_info.channel_mask = config.channel_mask;
config.offload_info.format = config.format;
- status = mpClientInterface->openOutput(profile->getModuleHandle(),
- &output,
- &config,
- &desc->mDevice,
- address,
- &desc->mLatency,
- desc->mFlags);
- if (status == NO_ERROR) {
- desc->mSamplingRate = config.sample_rate;
- desc->mChannelMask = config.channel_mask;
- desc->mFormat = config.format;
- } else {
+
+ status_t status = desc->open(&config, device, address, AUDIO_STREAM_DEFAULT,
+ AUDIO_OUTPUT_FLAG_NONE, &output);
+ if (status != NO_ERROR) {
output = AUDIO_IO_HANDLE_NONE;
}
}
@@ -4239,31 +4354,20 @@
// outputs used by dynamic policy mixes
audio_io_handle_t duplicatedOutput = AUDIO_IO_HANDLE_NONE;
- // set initial stream volume for device
- applyStreamVolumes(desc, device, 0, true);
-
//TODO: configure audio effect output stage here
// open a duplicating output thread for the new output and the primary output
- duplicatedOutput =
- mpClientInterface->openDuplicateOutput(output,
- mPrimaryOutput->mIoHandle);
- if (duplicatedOutput != AUDIO_IO_HANDLE_NONE) {
+ sp<SwAudioOutputDescriptor> dupOutputDesc =
+ new SwAudioOutputDescriptor(NULL, mpClientInterface);
+ status_t status = dupOutputDesc->openDuplicating(mPrimaryOutput, desc,
+ &duplicatedOutput);
+ if (status == NO_ERROR) {
// add duplicated output descriptor
- sp<SwAudioOutputDescriptor> dupOutputDesc =
- new SwAudioOutputDescriptor(NULL, mpClientInterface);
- dupOutputDesc->mOutput1 = mPrimaryOutput;
- dupOutputDesc->mOutput2 = desc;
- dupOutputDesc->mSamplingRate = desc->mSamplingRate;
- dupOutputDesc->mFormat = desc->mFormat;
- dupOutputDesc->mChannelMask = desc->mChannelMask;
- dupOutputDesc->mLatency = desc->mLatency;
addOutput(duplicatedOutput, dupOutputDesc);
- applyStreamVolumes(dupOutputDesc, device, 0, true);
} else {
ALOGW("checkOutputsForDevice() could not open dup output for %d and %d",
mPrimaryOutput->mIoHandle, output);
- mpClientInterface->closeOutput(output);
+ desc->close();
removeOutput(output);
nextAudioPortGeneration();
output = AUDIO_IO_HANDLE_NONE;
@@ -4315,17 +4419,13 @@
}
}
// Clear any profiles associated with the disconnected device.
- for (size_t i = 0; i < mHwModules.size(); i++)
- {
- if (mHwModules[i]->mHandle == 0) {
- continue;
- }
- for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
- {
- sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
+ for (const auto& hwModule : mHwModules) {
+ for (size_t j = 0; j < hwModule->getOutputProfiles().size(); j++) {
+ sp<IOProfile> profile = hwModule->getOutputProfiles()[j];
if (profile->supportDevice(device)) {
ALOGV("checkOutputsForDevice(): "
- "clearing direct output profile %zu on module %zu", j, i);
+ "clearing direct output profile %zu on module %s",
+ j, hwModule->getName());
profile->clearAudioProfiles();
}
}
@@ -4359,23 +4459,18 @@
// then look for input profiles that can be routed to this device
SortedVector< sp<IOProfile> > profiles;
- for (size_t module_idx = 0; module_idx < mHwModules.size(); module_idx++)
- {
- if (mHwModules[module_idx]->mHandle == 0) {
- continue;
- }
+ for (const auto& hwModule : mHwModules) {
for (size_t profile_index = 0;
- profile_index < mHwModules[module_idx]->mInputProfiles.size();
- profile_index++)
- {
- sp<IOProfile> profile = mHwModules[module_idx]->mInputProfiles[profile_index];
+ profile_index < hwModule->getInputProfiles().size();
+ profile_index++) {
+ sp<IOProfile> profile = hwModule->getInputProfiles()[profile_index];
if (profile->supportDevice(device)) {
if (!device_distinguishes_on_address(device) ||
profile->supportDeviceAddress(address)) {
profiles.add(profile);
- ALOGV("checkInputsForDevice(): adding profile %zu from module %zu",
- profile_index, module_idx);
+ ALOGV("checkInputsForDevice(): adding profile %zu from module %s",
+ profile_index, hwModule->getName());
}
}
}
@@ -4391,6 +4486,7 @@
for (ssize_t profile_index = 0; profile_index < (ssize_t)profiles.size(); profile_index++) {
sp<IOProfile> profile = profiles[profile_index];
+
// nothing to do if one input is already opened for this profile
size_t input_index;
for (input_index = 0; input_index < mInputs.size(); input_index++) {
@@ -4406,31 +4502,22 @@
continue;
}
- ALOGV("opening input for device 0x%X with params %s", device, address.string());
- desc = new AudioInputDescriptor(profile);
- desc->mDevice = device;
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = desc->mSamplingRate;
- config.channel_mask = desc->mChannelMask;
- config.format = desc->mFormat;
+ if (!profile->canOpenNewIo()) {
+ ALOGW("Max Input number %u already opened for this profile %s",
+ profile->maxOpenCount, profile->getTagName().c_str());
+ continue;
+ }
+
+ desc = new AudioInputDescriptor(profile, mpClientInterface);
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
-
- ALOGV("opening inputput for device %08x with params %s profile %p name %s",
- desc->mDevice, address.string(), profile.get(), profile->getName().string());
-
- status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
- &input,
- &config,
- &desc->mDevice,
- address,
- AUDIO_SOURCE_MIC,
- AUDIO_INPUT_FLAG_NONE /*FIXME*/);
+ status_t status = desc->open(nullptr,
+ device,
+ address,
+ AUDIO_SOURCE_MIC,
+ AUDIO_INPUT_FLAG_NONE,
+ &input);
if (status == NO_ERROR) {
- desc->mSamplingRate = config.sample_rate;
- desc->mChannelMask = config.channel_mask;
- desc->mFormat = config.format;
-
if (!address.isEmpty()) {
char *param = audio_device_address_to_parameter(device, address);
mpClientInterface->setParameters(input, String8(param));
@@ -4439,7 +4526,7 @@
updateAudioProfiles(device, input, profile->getAudioProfiles());
if (!profile->hasValidAudioProfile()) {
ALOGW("checkInputsForDevice() direct input missing param");
- mpClientInterface->closeInput(input);
+ desc->close();
input = AUDIO_IO_HANDLE_NONE;
}
@@ -4477,17 +4564,14 @@
}
}
// Clear any profiles associated with the disconnected device.
- for (size_t module_index = 0; module_index < mHwModules.size(); module_index++) {
- if (mHwModules[module_index]->mHandle == 0) {
- continue;
- }
+ for (const auto& hwModule : mHwModules) {
for (size_t profile_index = 0;
- profile_index < mHwModules[module_index]->mInputProfiles.size();
+ profile_index < hwModule->getInputProfiles().size();
profile_index++) {
- sp<IOProfile> profile = mHwModules[module_index]->mInputProfiles[profile_index];
+ sp<IOProfile> profile = hwModule->getInputProfiles()[profile_index];
if (profile->supportDevice(device)) {
- ALOGV("checkInputsForDevice(): clearing direct input profile %zu on module %zu",
- profile_index, module_index);
+ ALOGV("checkInputsForDevice(): clearing direct input profile %zu on module %s",
+ profile_index, hwModule->getName());
profile->clearAudioProfiles();
}
}
@@ -4515,7 +4599,7 @@
if (dupOutputDesc->isDuplicated() &&
(dupOutputDesc->mOutput1 == outputDesc ||
dupOutputDesc->mOutput2 == outputDesc)) {
- sp<AudioOutputDescriptor> outputDesc2;
+ sp<SwAudioOutputDescriptor> outputDesc2;
if (dupOutputDesc->mOutput1 == outputDesc) {
outputDesc2 = dupOutputDesc->mOutput2;
} else {
@@ -4525,10 +4609,16 @@
// and as they were also referenced on the other output, the reference
// count for their stream type must be adjusted accordingly on
// the other output.
+ bool wasActive = outputDesc2->isActive();
for (int j = 0; j < AUDIO_STREAM_CNT; j++) {
int refCount = dupOutputDesc->mRefCount[j];
outputDesc2->changeRefCount((audio_stream_type_t)j,-refCount);
}
+ // stop() will be a no op if the output is still active but is needed in case all
+ // active streams refcounts where cleared above
+ if (wasActive) {
+ outputDesc2->stop();
+ }
audio_io_handle_t duplicatedOutput = mOutputs.keyAt(i);
ALOGV("closeOutput() closing also duplicated output %d", duplicatedOutput);
@@ -4547,11 +4637,8 @@
mpClientInterface->onAudioPatchListUpdate();
}
- AudioParameter param;
- param.add(String8("closing"), String8("true"));
- mpClientInterface->setParameters(output, param.toString());
+ outputDesc->close();
- mpClientInterface->closeOutput(output);
removeOutput(output);
mPreviousOutputs = mOutputs;
}
@@ -4576,7 +4663,7 @@
mpClientInterface->onAudioPatchListUpdate();
}
- mpClientInterface->closeInput(input);
+ inputDesc->close();
mInputs.removeItem(input);
}
@@ -4639,17 +4726,26 @@
}
if (!vectorsEqual(srcOutputs,dstOutputs)) {
+ // get maximum latency of all source outputs to determine the minimum mute time guaranteeing
+ // audio from invalidated tracks will be rendered when unmuting
+ uint32_t maxLatency = 0;
+ for (audio_io_handle_t srcOut : srcOutputs) {
+ sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
+ if (desc != 0 && maxLatency < desc->latency()) {
+ maxLatency = desc->latency();
+ }
+ }
ALOGV("checkOutputForStrategy() strategy %d, moving from output %d to output %d",
strategy, srcOutputs[0], dstOutputs[0]);
// mute strategy while moving tracks from one output to another
- for (size_t i = 0; i < srcOutputs.size(); i++) {
- sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(srcOutputs[i]);
- if (isStrategyActive(desc, strategy)) {
+ for (audio_io_handle_t srcOut : srcOutputs) {
+ sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
+ if (desc != 0 && isStrategyActive(desc, strategy)) {
setStrategyMute(strategy, true, desc);
- setStrategyMute(strategy, false, desc, MUTE_TIME_MS, newDevice);
+ setStrategyMute(strategy, false, desc, maxLatency * LATENCY_MUTE_FACTOR, newDevice);
}
sp<AudioSourceDescriptor> source =
- getSourceForStrategyOnOutput(srcOutputs[i], strategy);
+ getSourceForStrategyOnOutput(srcOut, strategy);
if (source != 0){
connectAudioSource(source);
}
@@ -4748,6 +4844,20 @@
}
}
+ // Check if an explicit routing request exists for an active stream on this output and
+ // use it in priority before any other rule
+ for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
+ if (outputDesc->isStreamActive((audio_stream_type_t)stream)) {
+ audio_devices_t forcedDevice =
+ mOutputRoutes.getActiveDeviceForStream(
+ (audio_stream_type_t)stream, mAvailableOutputDevices);
+
+ if (forcedDevice != AUDIO_DEVICE_NONE) {
+ return forcedDevice;
+ }
+ }
+ }
+
// check the following by order of priority to request a routing change if necessary:
// 1: the strategy enforced audible is active and enforced on the output:
// use device for strategy enforced audible
@@ -4809,10 +4919,13 @@
}
}
+ // If we are not in call and no client is active on this input, this methods returns
+ // AUDIO_DEVICE_NONE, causing the patch on the input stream to be released.
audio_source_t source = inputDesc->getHighestPrioritySource(true /*activeOnly*/);
- if (isInCall()) {
- device = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
- } else if (source != AUDIO_SOURCE_DEFAULT) {
+ if (source == AUDIO_SOURCE_DEFAULT && isInCall()) {
+ source = AUDIO_SOURCE_VOICE_COMMUNICATION;
+ }
+ if (source != AUDIO_SOURCE_DEFAULT) {
device = getDeviceAndMixForInputSource(source);
}
@@ -4843,9 +4956,8 @@
routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
audio_devices_t curDevices =
getDeviceForStrategy((routing_strategy)curStrategy, false /*fromCache*/);
- SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(curDevices, mOutputs);
- for (size_t i = 0; i < outputs.size(); i++) {
- sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
+ for (audio_io_handle_t output : getOutputsForDevice(curDevices, mOutputs)) {
+ sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
if (outputDesc->isStreamActive((audio_stream_type_t)curStream)) {
curDevices |= outputDesc->device();
}
@@ -4953,19 +5065,16 @@
audio_devices_t AudioPolicyManager::getDeviceForStrategy(routing_strategy strategy,
bool fromCache)
{
- // Routing
- // see if we have an explicit route
- // scan the whole RouteMap, for each entry, convert the stream type to a strategy
- // (getStrategy(stream)).
- // if the strategy from the stream type in the RouteMap is the same as the argument above,
- // and activity count is non-zero and the device in the route descriptor is available
- // then select this device.
- for (size_t routeIndex = 0; routeIndex < mOutputRoutes.size(); routeIndex++) {
- sp<SessionRoute> route = mOutputRoutes.valueAt(routeIndex);
- routing_strategy routeStrategy = getStrategy(route->mStreamType);
- if ((routeStrategy == strategy) && route->isActive() &&
- (mAvailableOutputDevices.indexOf(route->mDeviceDescriptor) >= 0)) {
- return route->mDeviceDescriptor->type();
+ // Check if an explicit routing request exists for a stream type corresponding to the
+ // specified strategy and use it in priority over default routing rules.
+ for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
+ if (getStrategy((audio_stream_type_t)stream) == strategy) {
+ audio_devices_t forcedDevice =
+ mOutputRoutes.getActiveDeviceForStream(
+ (audio_stream_type_t)stream, mAvailableOutputDevices);
+ if (forcedDevice != AUDIO_DEVICE_NONE) {
+ return forcedDevice;
+ }
}
}
@@ -5075,21 +5184,24 @@
bool force,
int delayMs,
audio_patch_handle_t *patchHandle,
- const char* address)
+ const char *address,
+ bool requiresMuteCheck)
{
ALOGV("setOutputDevice() device %04x delayMs %d", device, delayMs);
AudioParameter param;
uint32_t muteWaitMs;
if (outputDesc->isDuplicated()) {
- muteWaitMs = setOutputDevice(outputDesc->subOutput1(), device, force, delayMs);
- muteWaitMs += setOutputDevice(outputDesc->subOutput2(), device, force, delayMs);
+ muteWaitMs = setOutputDevice(outputDesc->subOutput1(), device, force, delayMs,
+ nullptr /* patchHandle */, nullptr /* address */, requiresMuteCheck);
+ muteWaitMs += setOutputDevice(outputDesc->subOutput2(), device, force, delayMs,
+ nullptr /* patchHandle */, nullptr /* address */, requiresMuteCheck);
return muteWaitMs;
}
// no need to proceed if new device is not AUDIO_DEVICE_NONE and not supported by current
// output profile
if ((device != AUDIO_DEVICE_NONE) &&
- ((device & outputDesc->supportedDevices()) == 0)) {
+ ((device & outputDesc->supportedDevices()) == AUDIO_DEVICE_NONE)) {
return 0;
}
@@ -5103,7 +5215,14 @@
if (device != AUDIO_DEVICE_NONE) {
outputDesc->mDevice = device;
}
- muteWaitMs = checkDeviceMuteStrategies(outputDesc, prevDevice, delayMs);
+
+ // if the outputs are not materially active, there is no need to mute.
+ if (requiresMuteCheck) {
+ muteWaitMs = checkDeviceMuteStrategies(outputDesc, prevDevice, delayMs);
+ } else {
+ ALOGV("%s: suppressing checkDeviceMuteStrategies", __func__);
+ muteWaitMs = 0;
+ }
// Do not change the routing if:
// the requested device is AUDIO_DEVICE_NONE
@@ -5319,27 +5438,46 @@
// TODO: perhaps isCompatibleProfile should return a "matching" score so we can return
// the best matching profile, not the first one.
- for (size_t i = 0; i < mHwModules.size(); i++)
- {
- if (mHwModules[i]->mHandle == 0) {
- continue;
- }
- for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++)
- {
- sp<IOProfile> profile = mHwModules[i]->mInputProfiles[j];
+ sp<IOProfile> firstInexact;
+ uint32_t updatedSamplingRate = 0;
+ audio_format_t updatedFormat = AUDIO_FORMAT_INVALID;
+ audio_channel_mask_t updatedChannelMask = AUDIO_CHANNEL_INVALID;
+ for (const auto& hwModule : mHwModules) {
+ for (const auto& profile : hwModule->getInputProfiles()) {
// profile->log();
+ //updatedFormat = format;
if (profile->isCompatibleProfile(device, address, samplingRate,
- &samplingRate /*updatedSamplingRate*/,
+ &samplingRate /*updatedSamplingRate*/,
format,
- &format /*updatedFormat*/,
+ &format, /*updatedFormat*/
channelMask,
- &channelMask /*updatedChannelMask*/,
- (audio_output_flags_t) flags)) {
-
+ &channelMask /*updatedChannelMask*/,
+ // FIXME ugly cast
+ (audio_output_flags_t) flags,
+ true /*exactMatchRequiredForInputFlags*/)) {
return profile;
}
+ if (firstInexact == nullptr && profile->isCompatibleProfile(device, address,
+ samplingRate,
+ &updatedSamplingRate,
+ format,
+ &updatedFormat,
+ channelMask,
+ &updatedChannelMask,
+ // FIXME ugly cast
+ (audio_output_flags_t) flags,
+ false /*exactMatchRequiredForInputFlags*/)) {
+ firstInexact = profile;
+ }
+
}
}
+ if (firstInexact != nullptr) {
+ samplingRate = updatedSamplingRate;
+ format = updatedFormat;
+ channelMask = updatedChannelMask;
+ return firstInexact;
+ }
return NULL;
}
@@ -5366,7 +5504,7 @@
// then select this device.
for (size_t routeIndex = 0; routeIndex < mInputRoutes.size(); routeIndex++) {
sp<SessionRoute> route = mInputRoutes.valueAt(routeIndex);
- if ((inputSource == route->mSource) && route->isActive() &&
+ if ((inputSource == route->mSource) && route->isActiveOrChanged() &&
(mAvailableInputDevices.indexOf(route->mDeviceDescriptor) >= 0)) {
return route->mDeviceDescriptor->type();
}
@@ -5393,7 +5531,8 @@
}
// in-call: always cap earpiece volume by voice volume + some low headroom
- if ((stream != AUDIO_STREAM_VOICE_CALL) && (device & AUDIO_DEVICE_OUT_EARPIECE) && isInCall()) {
+ if ((stream != AUDIO_STREAM_VOICE_CALL) && (device & AUDIO_DEVICE_OUT_EARPIECE) &&
+ (isInCall() || mOutputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL))) {
switch (stream) {
case AUDIO_STREAM_SYSTEM:
case AUDIO_STREAM_RING:
@@ -5403,8 +5542,11 @@
case AUDIO_STREAM_ENFORCED_AUDIBLE:
case AUDIO_STREAM_DTMF:
case AUDIO_STREAM_ACCESSIBILITY: {
- const float maxVoiceVolDb = computeVolume(AUDIO_STREAM_VOICE_CALL, index, device)
- + IN_CALL_EARPIECE_HEADROOM_DB;
+ int voiceVolumeIndex =
+ mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, AUDIO_DEVICE_OUT_EARPIECE);
+ const float maxVoiceVolDb =
+ computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, AUDIO_DEVICE_OUT_EARPIECE)
+ + IN_CALL_EARPIECE_HEADROOM_DB;
if (volumeDB > maxVoiceVolDb) {
ALOGV("computeVolume() stream %d at vol=%f overriden by stream %d at vol=%f",
stream, volumeDB, AUDIO_STREAM_VOICE_CALL, maxVoiceVolDb);
@@ -5428,7 +5570,8 @@
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
AUDIO_DEVICE_OUT_WIRED_HEADSET |
AUDIO_DEVICE_OUT_WIRED_HEADPHONE |
- AUDIO_DEVICE_OUT_USB_HEADSET)) &&
+ AUDIO_DEVICE_OUT_USB_HEADSET |
+ AUDIO_DEVICE_OUT_HEARING_AID)) &&
((stream_strategy == STRATEGY_SONIFICATION)
|| (stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL)
|| (stream == AUDIO_STREAM_SYSTEM)
@@ -5501,7 +5644,10 @@
}
float volumeDb = computeVolume(stream, index, device);
- if (outputDesc->isFixedVolume(device)) {
+ if (outputDesc->isFixedVolume(device) ||
+ // Force VoIP volume to max for bluetooth SCO
+ ((stream == AUDIO_STREAM_VOICE_CALL || stream == AUDIO_STREAM_BLUETOOTH_SCO) &&
+ (device & AUDIO_DEVICE_OUT_ALL_SCO) != 0)) {
volumeDb = 0.0f;
}
@@ -5802,77 +5948,110 @@
AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND);
ALOGD("%s: forced use = %d", __FUNCTION__, forceUse);
- // Analyze original support for various formats.
- bool supportsAC3 = false;
- bool supportsOtherSurround = false;
- bool supportsIEC61937 = false;
- for (size_t formatIndex = 0; formatIndex < formats.size(); formatIndex++) {
- audio_format_t format = formats[formatIndex];
- switch (format) {
- case AUDIO_FORMAT_AC3:
- supportsAC3 = true;
- break;
- case AUDIO_FORMAT_E_AC3:
- case AUDIO_FORMAT_DTS:
- case AUDIO_FORMAT_DTS_HD:
- supportsOtherSurround = true;
- break;
- case AUDIO_FORMAT_IEC61937:
- supportsIEC61937 = true;
- break;
- default:
- break;
+ // If MANUAL, keep the supported surround sound formats as current enabled ones.
+ if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL) {
+ formats.clear();
+ for (auto it = mSurroundFormats.begin(); it != mSurroundFormats.end(); it++) {
+ formats.add(*it);
}
- }
+ // Always enable IEC61937 when in MANUAL mode.
+ formats.add(AUDIO_FORMAT_IEC61937);
+ } else { // NEVER, AUTO or ALWAYS
+ // Analyze original support for various formats.
+ bool supportsAC3 = false;
+ bool supportsOtherSurround = false;
+ bool supportsIEC61937 = false;
+ mSurroundFormats.clear();
+ for (ssize_t formatIndex = 0; formatIndex < (ssize_t)formats.size(); formatIndex++) {
+ audio_format_t format = formats[formatIndex];
+ switch (format) {
+ case AUDIO_FORMAT_AC3:
+ supportsAC3 = true;
+ break;
+ case AUDIO_FORMAT_E_AC3:
+ case AUDIO_FORMAT_DTS:
+ case AUDIO_FORMAT_DTS_HD:
+ // If ALWAYS, remove all other surround formats here
+ // since we will add them later.
+ if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
+ formats.removeAt(formatIndex);
+ formatIndex--;
+ }
+ supportsOtherSurround = true;
+ break;
+ case AUDIO_FORMAT_IEC61937:
+ supportsIEC61937 = true;
+ break;
+ default:
+ break;
+ }
+ }
- // Modify formats based on surround preferences.
- // If NEVER, remove support for surround formats.
- if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER) {
- if (supportsAC3 || supportsOtherSurround || supportsIEC61937) {
- // Remove surround sound related formats.
- for (size_t formatIndex = 0; formatIndex < formats.size(); ) {
+ // Modify formats based on surround preferences.
+ // If NEVER, remove support for surround formats.
+ if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER) {
+ if (supportsAC3 || supportsOtherSurround || supportsIEC61937) {
+ // Remove surround sound related formats.
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ) {
+ audio_format_t format = formats[formatIndex];
+ switch(format) {
+ case AUDIO_FORMAT_AC3:
+ case AUDIO_FORMAT_E_AC3:
+ case AUDIO_FORMAT_DTS:
+ case AUDIO_FORMAT_DTS_HD:
+ case AUDIO_FORMAT_IEC61937:
+ formats.removeAt(formatIndex);
+ break;
+ default:
+ formatIndex++; // keep it
+ break;
+ }
+ }
+ supportsAC3 = false;
+ supportsOtherSurround = false;
+ supportsIEC61937 = false;
+ }
+ } else { // AUTO or ALWAYS
+ // Most TVs support AC3 even if they do not report it in the EDID.
+ if ((alwaysForceAC3 || (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS))
+ && !supportsAC3) {
+ formats.add(AUDIO_FORMAT_AC3);
+ supportsAC3 = true;
+ }
+
+ // If ALWAYS, add support for raw surround formats if all are missing.
+ // This assumes that if any of these formats are reported by the HAL
+ // then the report is valid and should not be modified.
+ if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
+ formats.add(AUDIO_FORMAT_E_AC3);
+ formats.add(AUDIO_FORMAT_DTS);
+ formats.add(AUDIO_FORMAT_DTS_HD);
+ supportsOtherSurround = true;
+ }
+
+ // Add support for IEC61937 if any raw surround supported.
+ // The HAL could do this but add it here, just in case.
+ if ((supportsAC3 || supportsOtherSurround) && !supportsIEC61937) {
+ formats.add(AUDIO_FORMAT_IEC61937);
+ supportsIEC61937 = true;
+ }
+
+ // Add reported surround sound formats to enabled surround formats.
+ for (size_t formatIndex = 0; formatIndex < formats.size(); formatIndex++) {
audio_format_t format = formats[formatIndex];
switch(format) {
case AUDIO_FORMAT_AC3:
case AUDIO_FORMAT_E_AC3:
case AUDIO_FORMAT_DTS:
case AUDIO_FORMAT_DTS_HD:
- case AUDIO_FORMAT_IEC61937:
- formats.removeAt(formatIndex);
- break;
+ case AUDIO_FORMAT_AAC_LC:
+ case AUDIO_FORMAT_DOLBY_TRUEHD:
+ case AUDIO_FORMAT_E_AC3_JOC:
+ mSurroundFormats.insert(format);
default:
- formatIndex++; // keep it
break;
}
}
- supportsAC3 = false;
- supportsOtherSurround = false;
- supportsIEC61937 = false;
- }
- } else { // AUTO or ALWAYS
- // Most TVs support AC3 even if they do not report it in the EDID.
- if ((alwaysForceAC3 || (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS))
- && !supportsAC3) {
- formats.add(AUDIO_FORMAT_AC3);
- supportsAC3 = true;
- }
-
- // If ALWAYS, add support for raw surround formats if all are missing.
- // This assumes that if any of these formats are reported by the HAL
- // then the report is valid and should not be modified.
- if ((forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS)
- && !supportsOtherSurround) {
- formats.add(AUDIO_FORMAT_E_AC3);
- formats.add(AUDIO_FORMAT_DTS);
- formats.add(AUDIO_FORMAT_DTS_HD);
- supportsOtherSurround = true;
- }
-
- // Add support for IEC61937 if any raw surround supported.
- // The HAL could do this but add it here, just in case.
- if ((supportsAC3 || supportsOtherSurround) && !supportsIEC61937) {
- formats.add(AUDIO_FORMAT_IEC61937);
- supportsIEC61937 = true;
}
}
}
@@ -5894,12 +6073,12 @@
maskIndex++;
}
}
- // If ALWAYS, then make sure we at least support 5.1
- } else if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
+ // If ALWAYS or MANUAL, then make sure we at least support 5.1
+ } else if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS
+ || forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL) {
bool supports5dot1 = false;
// Are there any channel masks that can be considered "surround"?
- for (size_t maskIndex = 0; maskIndex < channelMasks.size(); maskIndex++) {
- audio_channel_mask_t channelMask = channelMasks[maskIndex];
+ for (audio_channel_mask_t channelMask : channelMasks) {
if ((channelMask & AUDIO_CHANNEL_OUT_5POINT1) == AUDIO_CHANNEL_OUT_5POINT1) {
supports5dot1 = true;
break;
@@ -5923,7 +6102,7 @@
if (profiles.hasDynamicFormat()) {
reply = mpClientInterface->getParameters(
ioHandle, String8(AudioParameter::keyStreamSupportedFormats));
- ALOGV("%s: supported formats %s", __FUNCTION__, reply.string());
+ ALOGV("%s: supported formats %d, %s", __FUNCTION__, ioHandle, reply.string());
AudioParameter repliedParameters(reply);
if (repliedParameters.get(
String8(AudioParameter::keyStreamSupportedFormats), reply) != NO_ERROR) {
@@ -5936,10 +6115,8 @@
}
profiles.setFormats(formats);
}
- const FormatVector &supportedFormats = profiles.getSupportedFormats();
- for (size_t formatIndex = 0; formatIndex < supportedFormats.size(); formatIndex++) {
- audio_format_t format = supportedFormats[formatIndex];
+ for (audio_format_t format : profiles.getSupportedFormats()) {
ChannelsVector channelMasks;
SampleRateVector samplingRates;
AudioParameter requestedParameters;
@@ -5975,4 +6152,4 @@
}
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 82c4c35..b954714 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -16,6 +16,10 @@
#pragma once
+#include <atomic>
+#include <memory>
+#include <unordered_set>
+
#include <stdint.h>
#include <sys/types.h>
#include <cutils/config_utils.h>
@@ -31,6 +35,7 @@
#include <AudioPolicyManagerInterface.h>
#include <AudioPolicyManagerObserver.h>
#include <AudioGain.h>
+#include <AudioPolicyConfig.h>
#include <AudioPort.h>
#include <AudioPatch.h>
#include <DeviceDescriptor.h>
@@ -63,6 +68,10 @@
// is switched
#define MUTE_TIME_MS 2000
+// multiplication factor applied to output latency when calculating a safe mute delay when
+// invalidating tracks
+#define LATENCY_MUTE_FACTOR 4
+
#define NUM_TEST_OUTPUTS 5
#define NUM_VOL_CURVE_KNEES 2
@@ -76,10 +85,6 @@
// ----------------------------------------------------------------------------
class AudioPolicyManager : public AudioPolicyInterface, public AudioPolicyManagerObserver
-
-#ifdef AUDIO_POLICY_TEST
- , public Thread
-#endif //AUDIO_POLICY_TEST
{
public:
@@ -103,19 +108,14 @@
virtual void setSystemProperty(const char* property, const char* value);
virtual status_t initCheck();
- virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo);
+ virtual audio_io_handle_t getOutput(audio_stream_type_t stream);
virtual status_t getOutputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
uid_t uid,
const audio_config_t *config,
- audio_output_flags_t flags,
+ audio_output_flags_t *flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId);
virtual status_t startOutput(audio_io_handle_t output,
@@ -140,6 +140,7 @@
// indicates to the audio policy manager that the input starts being used.
virtual status_t startInput(audio_io_handle_t input,
audio_session_t session,
+ bool silenced,
concurrency_type__mask_t *concurrency);
// indicates to the audio policy manager that the input stops being used.
@@ -237,10 +238,33 @@
virtual float getStreamVolumeDB(
audio_stream_type_t stream, int index, audio_devices_t device);
+ virtual status_t getSurroundFormats(unsigned int *numSurroundFormats,
+ audio_format_t *surroundFormats,
+ bool *surroundFormatsEnabled,
+ bool reported);
+ virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
+
// return the strategy corresponding to a given stream type
routing_strategy getStrategy(audio_stream_type_t stream) const;
+ virtual void setRecordSilenced(uid_t uid, bool silenced);
+
protected:
+ // A constructor that allows more fine-grained control over initialization process,
+ // used in automatic tests.
+ AudioPolicyManager(AudioPolicyClientInterface *clientInterface, bool forTesting);
+
+ // These methods should be used when finer control over APM initialization
+ // is needed, e.g. in tests. Must be used in conjunction with the constructor
+ // that only performs fields initialization. The public constructor comprises
+ // these steps in the following sequence:
+ // - field initializing constructor;
+ // - loadConfig;
+ // - initialize.
+ AudioPolicyConfig& getConfig() { return mConfig; }
+ void loadConfig();
+ status_t initialize();
+
// From AudioPolicyManagerObserver
virtual const AudioPatchCollection &getAudioPatches() const
{
@@ -275,7 +299,7 @@
{
return mDefaultOutputDevice;
}
-protected:
+
void addOutput(audio_io_handle_t output, const sp<SwAudioOutputDescriptor>& outputDesc);
void removeOutput(audio_io_handle_t output);
void addInput(audio_io_handle_t input, const sp<AudioInputDescriptor>& inputDesc);
@@ -304,7 +328,8 @@
bool force = false,
int delayMs = 0,
audio_patch_handle_t *patchHandle = NULL,
- const char* address = NULL);
+ const char *address = nullptr,
+ bool requiresMuteCheck = true);
status_t resetOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
int delayMs = 0,
audio_patch_handle_t *patchHandle = NULL);
@@ -419,11 +444,6 @@
{
return mEffects.getMaxEffectsMemory();
}
-#ifdef AUDIO_POLICY_TEST
- virtual bool threadLoop();
- void exit();
- int testOutputIndex(audio_io_handle_t output);
-#endif //AUDIO_POLICY_TEST
SortedVector<audio_io_handle_t> getOutputsForDevice(audio_devices_t device,
const SwAudioOutputCollection& openOutputs);
@@ -481,6 +501,9 @@
}
uint32_t updateCallRouting(audio_devices_t rxDevice, uint32_t delayMs = 0);
+ sp<AudioPatch> createTelephonyPatch(bool isRx, audio_devices_t device, uint32_t delayMs);
+ sp<DeviceDescriptor> fillAudioPortConfigForDevice(
+ const DeviceVector& devices, audio_devices_t device, audio_port_config *config);
// if argument "device" is different from AUDIO_DEVICE_NONE, startSource() will force
// the re-evaluation of the output device.
@@ -534,18 +557,20 @@
SessionRouteMap mOutputRoutes = SessionRouteMap(SessionRouteMap::MAPTYPE_OUTPUT);
SessionRouteMap mInputRoutes = SessionRouteMap(SessionRouteMap::MAPTYPE_INPUT);
- IVolumeCurvesCollection *mVolumeCurves; // Volume Curves per use case and device category
-
bool mLimitRingtoneVolume; // limit ringtone volume to music volume if headset connected
audio_devices_t mDeviceForStrategy[NUM_STRATEGIES];
float mLastVoiceVolume; // last voice volume value sent to audio HAL
-
- EffectDescriptorCollection mEffects; // list of registered audio effects
bool mA2dpSuspended; // true if A2DP output is suspended
- sp<DeviceDescriptor> mDefaultOutputDevice; // output device selected by default at boot time
- HwModuleCollection mHwModules;
- volatile int32_t mAudioPortGeneration;
+ std::unique_ptr<IVolumeCurvesCollection> mVolumeCurves; // Volume Curves per use case and device category
+ EffectDescriptorCollection mEffects; // list of registered audio effects
+ sp<DeviceDescriptor> mDefaultOutputDevice; // output device selected by default at boot time
+ HwModuleCollection mHwModules; // contains only modules that have been loaded successfully
+ HwModuleCollection mHwModulesAll; // normally not needed, used during construction and for
+ // dumps
+ AudioPolicyConfig mConfig;
+
+ std::atomic<uint32_t> mAudioPortGeneration;
AudioPatchCollection mAudioPatches;
@@ -574,31 +599,20 @@
AudioPolicyMixCollection mPolicyMixes; // list of registered mixes
audio_io_handle_t mMusicEffectOutput; // output selected for music effects
-
-#ifdef AUDIO_POLICY_TEST
- Mutex mLock;
- Condition mWaitWorkCV;
-
- int mCurOutput;
- bool mDirectOutput;
- audio_io_handle_t mTestOutputs[NUM_TEST_OUTPUTS];
- int mTestInput;
- uint32_t mTestDevice;
- uint32_t mTestSamplingRate;
- uint32_t mTestFormat;
- uint32_t mTestChannels;
- uint32_t mTestLatencyMs;
-#endif //AUDIO_POLICY_TEST
-
uint32_t nextAudioPortGeneration();
// Audio Policy Engine Interface.
AudioPolicyManagerInterface *mEngine;
+
+ // Surround formats that are enabled.
+ std::unordered_set<audio_format_t> mSurroundFormats;
private:
// Add or remove AC3 DTS encodings based on user preferences.
void filterSurroundFormats(FormatVector *formatsPtr);
void filterSurroundChannelMasks(ChannelsVector *channelMasksPtr);
+ status_t getSupportedFormats(audio_io_handle_t ioHandle, FormatVector& formats);
+
// If any, resolve any "dynamic" fields of an Audio Profiles collection
void updateAudioProfiles(audio_devices_t device, audio_io_handle_t ioHandle,
AudioProfileVector &profiles);
@@ -630,20 +644,15 @@
audio_devices_t device,
audio_session_t session,
audio_stream_type_t stream,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo);
+ const audio_config_t *config,
+ audio_output_flags_t *flags);
// internal method to return the input handle for the given device and format
audio_io_handle_t getInputForDevice(audio_devices_t device,
String8 address,
audio_session_t session,
uid_t uid,
audio_source_t inputSource,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_base_t *config,
audio_input_flags_t flags,
AudioMix *policyMix);
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index 31c9575..b064f8c 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -233,4 +233,4 @@
return AudioSystem::newAudioUniqueId(use);
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index 84b1073..c7dfe0f 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -399,11 +399,12 @@
while (pos + size > *totSize) {
*totSize += ((*totSize + 7) / 8) * 4;
}
- *param = (char *)realloc(*param, *totSize);
- if (*param == NULL) {
+ char *newParam = (char *)realloc(*param, *totSize);
+ if (newParam == NULL) {
ALOGE("%s realloc error for size %zu", __func__, *totSize);
return 0;
}
+ *param = newParam;
}
*curSize = pos + size;
return pos;
@@ -744,4 +745,4 @@
}
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h
index 59d5d14..623180e 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.h
+++ b/services/audiopolicy/service/AudioPolicyEffects.h
@@ -192,6 +192,6 @@
KeyedVector< audio_session_t, EffectVector* > mOutputSessions;
};
-}; // namespace android
+} // namespace android
#endif // ANDROID_AUDIOPOLICYEFFECTS_H
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index b7bce55..7337f04 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -18,8 +18,11 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#include <media/MediaAnalyticsItem.h>
+
#include "AudioPolicyService.h"
#include "ServiceUtilities.h"
+#include "TypeConverter.h"
namespace android {
@@ -44,6 +47,7 @@
ALOGV("setDeviceConnectionState()");
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->setDeviceConnectionState(device, state,
device_address, device_name);
}
@@ -55,6 +59,7 @@
if (mAudioPolicyManager == NULL) {
return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
}
+ AutoCallerClear acc;
return mAudioPolicyManager->getDeviceConnectionState(device,
device_address);
}
@@ -72,6 +77,7 @@
ALOGV("handleDeviceConfigChange()");
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->handleDeviceConfigChange(device, device_address,
device_name);
}
@@ -94,10 +100,10 @@
// operation from policy manager standpoint (no other operation (e.g track start or stop)
// can be interleaved).
Mutex::Autolock _l(mLock);
-
// TODO: check if it is more appropriate to do it in platform specific policy manager
AudioSystem::setMode(state);
+ AutoCallerClear acc;
mAudioPolicyManager->setPhoneState(state);
mPhoneState = state;
return NO_ERROR;
@@ -115,9 +121,11 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
- if (!settingsAllowed()) {
+
+ if (!modifyAudioRoutingAllowed()) {
return PERMISSION_DENIED;
}
+
if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
return BAD_VALUE;
}
@@ -126,6 +134,7 @@
}
ALOGV("setForceUse()");
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
mAudioPolicyManager->setForceUse(usage, config);
return NO_ERROR;
}
@@ -138,15 +147,11 @@
if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
return AUDIO_POLICY_FORCE_NONE;
}
+ AutoCallerClear acc;
return mAudioPolicyManager->getForceUse(usage);
}
-audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo)
+audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream)
{
if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
return AUDIO_IO_HANDLE_NONE;
@@ -156,14 +161,15 @@
}
ALOGV("getOutput()");
Mutex::Autolock _l(mLock);
- return mAudioPolicyManager->getOutput(stream, samplingRate,
- format, channelMask, flags, offloadInfo);
+ AutoCallerClear acc;
+ return mAudioPolicyManager->getOutput(stream);
}
status_t AudioPolicyService::getOutputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
+ pid_t pid,
uid_t uid,
const audio_config_t *config,
audio_output_flags_t flags,
@@ -173,7 +179,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
- ALOGV("getOutput()");
+ ALOGV("getOutputForAttr()");
Mutex::Autolock _l(mLock);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
@@ -182,9 +188,27 @@
"%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, uid);
uid = callingUid;
}
- return mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid,
+ audio_output_flags_t originalFlags = flags;
+ AutoCallerClear acc;
+ status_t result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid,
config,
- flags, selectedDeviceId, portId);
+ &flags, selectedDeviceId, portId);
+
+ // FIXME: Introduce a way to check for the the telephony device before opening the output
+ if ((result == NO_ERROR) &&
+ (flags & AUDIO_OUTPUT_FLAG_INCALL_MUSIC) &&
+ !modifyPhoneStateAllowed(pid, uid)) {
+ // If the app tries to play music through the telephony device and doesn't have permission
+ // the fallback to the default output device.
+ mAudioPolicyManager->releaseOutput(*output, *stream, session);
+ flags = originalFlags;
+ *selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+ *portId = AUDIO_PORT_HANDLE_NONE;
+ result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid,
+ config,
+ &flags, selectedDeviceId, portId);
+ }
+ return result;
}
status_t AudioPolicyService::startOutput(audio_io_handle_t output,
@@ -211,6 +235,7 @@
}
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->startOutput(output, stream, session);
}
@@ -247,6 +272,7 @@
}
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->stopOutput(output, stream, session);
}
@@ -267,6 +293,7 @@
{
ALOGV("doReleaseOutput from tid %d", gettid());
Mutex::Autolock _l(mLock);
+ // called from internal thread: no need to clear caller identity
mAudioPolicyManager->releaseOutput(output, stream, session);
}
@@ -275,6 +302,7 @@
audio_session_t session,
pid_t pid,
uid_t uid,
+ const String16& opPackageName,
const audio_config_base_t *config,
audio_input_flags_t flags,
audio_port_handle_t *selectedDeviceId,
@@ -283,9 +311,10 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
+
// already checked by client, but double-check in case the client wrapper is bypassed
- if (attr->source >= AUDIO_SOURCE_CNT && attr->source != AUDIO_SOURCE_HOTWORD &&
- attr->source != AUDIO_SOURCE_FM_TUNER) {
+ if (attr->source < AUDIO_SOURCE_DEFAULT && attr->source >= AUDIO_SOURCE_CNT &&
+ attr->source != AUDIO_SOURCE_HOTWORD && attr->source != AUDIO_SOURCE_FM_TUNER) {
return BAD_VALUE;
}
@@ -306,6 +335,20 @@
pid = callingPid;
}
+ // check calling permissions
+ if (!recordingAllowed(opPackageName, pid, uid)) {
+ ALOGE("%s permission denied: recording not allowed for uid %d pid %d",
+ __func__, uid, pid);
+ return PERMISSION_DENIED;
+ }
+
+ if ((attr->source == AUDIO_SOURCE_VOICE_UPLINK ||
+ attr->source == AUDIO_SOURCE_VOICE_DOWNLINK ||
+ attr->source == AUDIO_SOURCE_VOICE_CALL) &&
+ !captureAudioOutputAllowed(pid, uid)) {
+ return PERMISSION_DENIED;
+ }
+
if ((attr->source == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed(pid, uid)) {
return BAD_VALUE;
}
@@ -316,11 +359,14 @@
AudioPolicyInterface::input_type_t inputType;
Mutex::Autolock _l(mLock);
- // the audio_in_acoustics_t parameter is ignored by get_input()
- status = mAudioPolicyManager->getInputForAttr(attr, input, session, uid,
- config,
- flags, selectedDeviceId,
- &inputType, portId);
+ {
+ AutoCallerClear acc;
+ // the audio_in_acoustics_t parameter is ignored by get_input()
+ status = mAudioPolicyManager->getInputForAttr(attr, input, session, uid,
+ config,
+ flags, selectedDeviceId,
+ &inputType, portId);
+ }
audioPolicyEffects = mAudioPolicyEffects;
if (status == NO_ERROR) {
@@ -351,10 +397,19 @@
if (status != NO_ERROR) {
if (status == PERMISSION_DENIED) {
+ AutoCallerClear acc;
mAudioPolicyManager->releaseInput(*input, session);
}
return status;
}
+
+ sp<AudioRecordClient> client =
+ new AudioRecordClient(*attr, *input, uid, pid, opPackageName, session);
+ client->active = false;
+ client->isConcurrent = false;
+ client->isVirtualDevice = false; //TODO : update from APM->getInputForAttr()
+ client->deviceId = *selectedDeviceId;
+ mAudioRecordClients.add(*portId, client);
}
if (audioPolicyEffects != 0) {
@@ -367,15 +422,147 @@
return NO_ERROR;
}
-status_t AudioPolicyService::startInput(audio_io_handle_t input,
- audio_session_t session)
+// this is replicated from frameworks/av/media/libaudioclient/AudioRecord.cpp
+// XXX -- figure out how to put it into a common, shared location
+
+static std::string audioSourceString(audio_source_t value) {
+ std::string source;
+ if (SourceTypeConverter::toString(value, source)) {
+ return source;
+ }
+ char rawbuffer[16]; // room for "%d"
+ snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
+ return rawbuffer;
+}
+
+static std::string audioConcurrencyString(
+ AudioPolicyInterface::concurrency_type__mask_t concurrency)
+{
+ char buffer[64]; // oversized
+ if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_ALL) {
+ snprintf(buffer, sizeof(buffer), "%s%s%s%s",
+ (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CALL)? ",call":"",
+ (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CAPTURE)? ",capture":"",
+ (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_HOTWORD)? ",hotword":"",
+ (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_PREEMPT)? ",preempt":"");
+ } else {
+ snprintf(buffer, sizeof(buffer), ",none");
+ }
+
+ return &buffer[1];
+}
+
+std::string AudioPolicyService::getDeviceTypeStrForPortId(audio_port_handle_t portId) {
+ std::string typeStr;
+ struct audio_port port = {};
+ port.id = portId;
+ status_t status = mAudioPolicyManager->getAudioPort(&port);
+ if (status == NO_ERROR && port.type == AUDIO_PORT_TYPE_DEVICE) {
+ deviceToString(port.ext.device.type, typeStr);
+ }
+ return typeStr;
+}
+
+status_t AudioPolicyService::startInput(audio_port_handle_t portId, bool *silenced)
{
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
+ sp<AudioRecordClient> client;
+ {
+ Mutex::Autolock _l(mLock);
+
+ ssize_t index = mAudioRecordClients.indexOfKey(portId);
+ if (index < 0) {
+ return INVALID_OPERATION;
+ }
+ client = mAudioRecordClients.valueAt(index);
+ }
+
+ // check calling permissions
+ if (!startRecording(client->opPackageName, client->pid, client->uid)) {
+ ALOGE("%s permission denied: recording not allowed for uid %d pid %d",
+ __func__, client->uid, client->pid);
+ return PERMISSION_DENIED;
+ }
+
+ // If UID inactive it records silence until becoming active
+ *silenced = !mUidPolicy->isUidActive(client->uid) && !client->isVirtualDevice;
+
Mutex::Autolock _l(mLock);
- AudioPolicyInterface::concurrency_type__mask_t concurrency;
- status_t status = mAudioPolicyManager->startInput(input, session, &concurrency);
+ AudioPolicyInterface::concurrency_type__mask_t concurrency =
+ AudioPolicyInterface::API_INPUT_CONCURRENCY_NONE;
+
+ status_t status;
+ {
+ AutoCallerClear acc;
+ status = mAudioPolicyManager->startInput(
+ client->input, client->session, *silenced, &concurrency);
+
+ }
+
+ // including successes gets very verbose
+ if (status != NO_ERROR) {
+
+ static constexpr char kAudioPolicy[] = "audiopolicy";
+
+ static constexpr char kAudioPolicyReason[] = "android.media.audiopolicy.reason";
+ static constexpr char kAudioPolicyStatus[] = "android.media.audiopolicy.status";
+ static constexpr char kAudioPolicyRqstSrc[] = "android.media.audiopolicy.rqst.src";
+ static constexpr char kAudioPolicyRqstPkg[] = "android.media.audiopolicy.rqst.pkg";
+ static constexpr char kAudioPolicyRqstSession[] = "android.media.audiopolicy.rqst.session";
+ static constexpr char kAudioPolicyRqstDevice[] =
+ "android.media.audiopolicy.rqst.device";
+ static constexpr char kAudioPolicyActiveSrc[] = "android.media.audiopolicy.active.src";
+ static constexpr char kAudioPolicyActivePkg[] = "android.media.audiopolicy.active.pkg";
+ static constexpr char kAudioPolicyActiveSession[] =
+ "android.media.audiopolicy.active.session";
+ static constexpr char kAudioPolicyActiveDevice[] =
+ "android.media.audiopolicy.active.device";
+
+ MediaAnalyticsItem *item = new MediaAnalyticsItem(kAudioPolicy);
+ if (item != NULL) {
+
+ item->setCString(kAudioPolicyReason, audioConcurrencyString(concurrency).c_str());
+ item->setInt32(kAudioPolicyStatus, status);
+
+ item->setCString(kAudioPolicyRqstSrc,
+ audioSourceString(client->attributes.source).c_str());
+ item->setCString(kAudioPolicyRqstPkg,
+ std::string(String8(client->opPackageName).string()).c_str());
+ item->setInt32(kAudioPolicyRqstSession, client->session);
+
+ item->setCString(
+ kAudioPolicyRqstDevice, getDeviceTypeStrForPortId(client->deviceId).c_str());
+
+ // figure out who is active
+ // NB: might the other party have given up the microphone since then? how sure.
+ // perhaps could have given up on it.
+ // we hold mLock, so perhaps we're safe for this looping
+ if (concurrency != AudioPolicyInterface::API_INPUT_CONCURRENCY_NONE) {
+ int count = mAudioRecordClients.size();
+ for (int i = 0; i<count ; i++) {
+ if (portId == mAudioRecordClients.keyAt(i)) {
+ continue;
+ }
+ sp<AudioRecordClient> other = mAudioRecordClients.valueAt(i);
+ if (other->active) {
+ // keeps the last of the clients marked active
+ item->setCString(kAudioPolicyActiveSrc,
+ audioSourceString(other->attributes.source).c_str());
+ item->setCString(kAudioPolicyActivePkg,
+ std::string(String8(other->opPackageName).string()).c_str());
+ item->setInt32(kAudioPolicyActiveSession, other->session);
+ item->setCString(kAudioPolicyActiveDevice,
+ getDeviceTypeStrForPortId(other->deviceId).c_str());
+ }
+ }
+ }
+ item->selfrecord();
+ delete item;
+ item = NULL;
+ }
+ }
if (status == NO_ERROR) {
LOG_ALWAYS_FATAL_IF(concurrency & ~AudioPolicyInterface::API_INPUT_CONCURRENCY_ALL,
@@ -388,43 +575,67 @@
if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CAPTURE) {
//TODO: check concurrent capture permission
}
+
+ client->active = true;
+ } else {
+ finishRecording(client->opPackageName, client->uid);
}
return status;
}
-status_t AudioPolicyService::stopInput(audio_io_handle_t input,
- audio_session_t session)
+status_t AudioPolicyService::stopInput(audio_port_handle_t portId)
{
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
Mutex::Autolock _l(mLock);
- return mAudioPolicyManager->stopInput(input, session);
+ ssize_t index = mAudioRecordClients.indexOfKey(portId);
+ if (index < 0) {
+ return INVALID_OPERATION;
+ }
+ sp<AudioRecordClient> client = mAudioRecordClients.valueAt(index);
+
+ client->active = false;
+
+ // finish the recording app op
+ finishRecording(client->opPackageName, client->uid);
+ AutoCallerClear acc;
+ return mAudioPolicyManager->stopInput(client->input, client->session);
}
-void AudioPolicyService::releaseInput(audio_io_handle_t input,
- audio_session_t session)
+void AudioPolicyService::releaseInput(audio_port_handle_t portId)
{
if (mAudioPolicyManager == NULL) {
return;
}
sp<AudioPolicyEffects>audioPolicyEffects;
+ sp<AudioRecordClient> client;
{
Mutex::Autolock _l(mLock);
audioPolicyEffects = mAudioPolicyEffects;
+ ssize_t index = mAudioRecordClients.indexOfKey(portId);
+ if (index < 0) {
+ return;
+ }
+ client = mAudioRecordClients.valueAt(index);
+ mAudioRecordClients.removeItem(portId);
+ }
+ if (client == 0) {
+ return;
}
if (audioPolicyEffects != 0) {
// release audio processors from the input
- status_t status = audioPolicyEffects->releaseInputEffects(input, session);
+ status_t status = audioPolicyEffects->releaseInputEffects(client->input, client->session);
if(status != NO_ERROR) {
- ALOGW("Failed to release effects on input %d", input);
+ ALOGW("Failed to release effects on input %d", client->input);
}
}
{
Mutex::Autolock _l(mLock);
- mAudioPolicyManager->releaseInput(input, session);
+ AutoCallerClear acc;
+ mAudioPolicyManager->releaseInput(client->input, client->session);
}
}
@@ -442,6 +653,7 @@
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
mAudioPolicyManager->initStreamVolume(stream, indexMin, indexMax);
return NO_ERROR;
}
@@ -460,6 +672,7 @@
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->setStreamVolumeIndex(stream,
index,
device);
@@ -476,6 +689,7 @@
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->getStreamVolumeIndex(stream,
index,
device);
@@ -489,6 +703,7 @@
if (mAudioPolicyManager == NULL) {
return 0;
}
+ AutoCallerClear acc;
return mAudioPolicyManager->getStrategyForStream(stream);
}
@@ -503,6 +718,7 @@
return AUDIO_DEVICE_NONE;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->getDevicesForStream(stream);
}
@@ -513,6 +729,7 @@
return 0;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->getOutputForEffect(desc);
}
@@ -526,6 +743,7 @@
return NO_INIT;
}
Mutex::Autolock _l(mEffectsLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->registerEffect(desc, io, strategy, session, id);
}
@@ -535,6 +753,7 @@
return NO_INIT;
}
Mutex::Autolock _l(mEffectsLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->unregisterEffect(id);
}
@@ -544,6 +763,7 @@
return NO_INIT;
}
Mutex::Autolock _l(mEffectsLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->setEffectEnabled(id, enabled);
}
@@ -556,6 +776,7 @@
return false;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->isStreamActive(stream, inPastMs);
}
@@ -568,6 +789,7 @@
return false;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->isStreamActiveRemotely(stream, inPastMs);
}
@@ -577,6 +799,7 @@
return false;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->isSourceActive(source);
}
@@ -610,6 +833,7 @@
Mutex::Autolock _l(mLock);
Mutex::Autolock _le(mEffectsLock); // isOffloadSupported queries for
// non-offloadable effects
+ AutoCallerClear acc;
return mAudioPolicyManager->isOffloadSupported(info);
}
@@ -623,7 +847,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->listAudioPorts(role, type, num_ports, ports, generation);
}
@@ -633,7 +857,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->getAudioPort(port);
}
@@ -647,6 +871,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
+ AutoCallerClear acc;
return mAudioPolicyManager->createAudioPatch(patch, handle,
IPCThreadState::self()->getCallingUid());
}
@@ -660,7 +885,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->releaseAudioPatch(handle,
IPCThreadState::self()->getCallingUid());
}
@@ -673,7 +898,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->listAudioPatches(num_patches, patches, generation);
}
@@ -686,7 +911,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->setAudioPortConfig(config);
}
@@ -698,7 +923,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->acquireSoundTriggerSession(session, ioHandle, device);
}
@@ -708,7 +933,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->releaseSoundTriggerSession(session);
}
@@ -721,6 +946,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
+ AutoCallerClear acc;
if (registration) {
return mAudioPolicyManager->registerPolicyMixes(mixes);
} else {
@@ -736,7 +962,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->startAudioSource(source, attributes, handle,
IPCThreadState::self()->getCallingUid());
}
@@ -747,7 +973,7 @@
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
-
+ AutoCallerClear acc;
return mAudioPolicyManager->stopAudioSource(handle);
}
@@ -760,6 +986,7 @@
return PERMISSION_DENIED;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->setMasterMono(mono);
}
@@ -769,6 +996,7 @@
return NO_INIT;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->getMasterMono(mono);
}
@@ -780,8 +1008,32 @@
return NAN;
}
Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
return mAudioPolicyManager->getStreamVolumeDB(stream, index, device);
}
+status_t AudioPolicyService::getSurroundFormats(unsigned int *numSurroundFormats,
+ audio_format_t *surroundFormats,
+ bool *surroundFormatsEnabled,
+ bool reported)
+{
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
+ return mAudioPolicyManager->getSurroundFormats(numSurroundFormats, surroundFormats,
+ surroundFormatsEnabled, reported);
+}
-}; // namespace android
+status_t AudioPolicyService::setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled)
+{
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
+ return mAudioPolicyManager->setSurroundFormatEnabled(audioFormat, enabled);
+}
+
+} // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 7af2e74..f3cddc3 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -26,8 +26,12 @@
#include <sys/time.h>
#include <binder/IServiceManager.h>
#include <utils/Log.h>
+#include <cutils/multiuser.h>
#include <cutils/properties.h>
#include <binder/IPCThreadState.h>
+#include <binder/ActivityManager.h>
+#include <binder/PermissionController.h>
+#include <binder/IResultReceiver.h>
#include <utils/String16.h>
#include <utils/threads.h>
#include "AudioPolicyService.h"
@@ -39,6 +43,8 @@
#include <system/audio.h>
#include <system/audio_policy.h>
+#include <private/android_filesystem_config.h>
+
namespace android {
static const char kDeadlockedString[] = "AudioPolicyService may be deadlocked\n";
@@ -49,6 +55,7 @@
static const nsecs_t kAudioCommandTimeoutNs = seconds(3); // 3 seconds
+static const String16 sManageAudioPolicyPermission("android.permission.MANAGE_AUDIO_POLICY");
// ----------------------------------------------------------------------------
@@ -79,6 +86,9 @@
Mutex::Autolock _l(mLock);
mAudioPolicyEffects = audioPolicyEffects;
}
+
+ mUidPolicy = new UidPolicy(this);
+ mUidPolicy->registerSelf();
}
AudioPolicyService::~AudioPolicyService()
@@ -92,6 +102,9 @@
mNotificationClients.clear();
mAudioPolicyEffects.clear();
+
+ mUidPolicy->unregisterSelf();
+ mUidPolicy.clear();
}
// A notification client is always registered by AudioSystem when the client process
@@ -139,6 +152,7 @@
{
Mutex::Autolock _l(mLock);
if (mAudioPolicyManager) {
+ // called from binder death notification: no need to clear caller identity
mAudioPolicyManager->releaseResourcesForUid(uid);
}
}
@@ -261,7 +275,7 @@
void AudioPolicyService::NotificationClient::onDynamicPolicyMixStateUpdate(
const String8& regId, int32_t state)
{
- if (mAudioPolicyServiceClient != 0) {
+ if (mAudioPolicyServiceClient != 0 && multiuser_get_app_id(mUid) < AID_APP_START) {
mAudioPolicyServiceClient->onDynamicPolicyMixStateUpdate(regId, state);
}
}
@@ -271,7 +285,7 @@
const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
audio_patch_handle_t patchHandle)
{
- if (mAudioPolicyServiceClient != 0) {
+ if (mAudioPolicyServiceClient != 0 && multiuser_get_app_id(mUid) < AID_APP_START) {
mAudioPolicyServiceClient->onRecordingConfigurationUpdate(event, clientInfo,
clientConfig, deviceConfig, patchHandle);
}
@@ -318,6 +332,21 @@
return NO_ERROR;
}
+void AudioPolicyService::setRecordSilenced(uid_t uid, bool silenced)
+{
+ {
+ Mutex::Autolock _l(mLock);
+ if (mAudioPolicyManager) {
+ AutoCallerClear acc;
+ mAudioPolicyManager->setRecordSilenced(uid, silenced);
+ }
+ }
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af) {
+ af->setRecordSilenced(uid, silenced);
+ }
+}
+
status_t AudioPolicyService::dump(int fd, const Vector<String16>& args __unused)
{
if (!dumpAllowed()) {
@@ -361,11 +390,248 @@
}
status_t AudioPolicyService::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) {
+ switch (code) {
+ case SHELL_COMMAND_TRANSACTION: {
+ int in = data.readFileDescriptor();
+ int out = data.readFileDescriptor();
+ int err = data.readFileDescriptor();
+ int argc = data.readInt32();
+ Vector<String16> args;
+ for (int i = 0; i < argc && data.dataAvail() > 0; i++) {
+ args.add(data.readString16());
+ }
+ sp<IBinder> unusedCallback;
+ sp<IResultReceiver> resultReceiver;
+ status_t status;
+ if ((status = data.readNullableStrongBinder(&unusedCallback)) != NO_ERROR) {
+ return status;
+ }
+ if ((status = data.readNullableStrongBinder(&resultReceiver)) != NO_ERROR) {
+ return status;
+ }
+ status = shellCommand(in, out, err, args);
+ if (resultReceiver != nullptr) {
+ resultReceiver->send(status);
+ }
+ return NO_ERROR;
+ }
+ }
+
return BnAudioPolicyService::onTransact(code, data, reply, flags);
}
+// ------------------- Shell command implementation -------------------
+
+// NOTE: This is a remote API - make sure all args are validated
+status_t AudioPolicyService::shellCommand(int in, int out, int err, Vector<String16>& args) {
+ if (!checkCallingPermission(sManageAudioPolicyPermission, nullptr, nullptr)) {
+ return PERMISSION_DENIED;
+ }
+ if (in == BAD_TYPE || out == BAD_TYPE || err == BAD_TYPE) {
+ return BAD_VALUE;
+ }
+ if (args.size() == 3 && args[0] == String16("set-uid-state")) {
+ return handleSetUidState(args, err);
+ } else if (args.size() == 2 && args[0] == String16("reset-uid-state")) {
+ return handleResetUidState(args, err);
+ } else if (args.size() == 2 && args[0] == String16("get-uid-state")) {
+ return handleGetUidState(args, out, err);
+ } else if (args.size() == 1 && args[0] == String16("help")) {
+ printHelp(out);
+ return NO_ERROR;
+ }
+ printHelp(err);
+ return BAD_VALUE;
+}
+
+status_t AudioPolicyService::handleSetUidState(Vector<String16>& args, int err) {
+ PermissionController pc;
+ int uid = pc.getPackageUid(args[1], 0);
+ if (uid <= 0) {
+ ALOGE("Unknown package: '%s'", String8(args[1]).string());
+ dprintf(err, "Unknown package: '%s'\n", String8(args[1]).string());
+ return BAD_VALUE;
+ }
+ bool active = false;
+ if (args[2] == String16("active")) {
+ active = true;
+ } else if ((args[2] != String16("idle"))) {
+ ALOGE("Expected active or idle but got: '%s'", String8(args[2]).string());
+ return BAD_VALUE;
+ }
+ mUidPolicy->addOverrideUid(uid, active);
+ return NO_ERROR;
+}
+
+status_t AudioPolicyService::handleResetUidState(Vector<String16>& args, int err) {
+ PermissionController pc;
+ int uid = pc.getPackageUid(args[1], 0);
+ if (uid < 0) {
+ ALOGE("Unknown package: '%s'", String8(args[1]).string());
+ dprintf(err, "Unknown package: '%s'\n", String8(args[1]).string());
+ return BAD_VALUE;
+ }
+ mUidPolicy->removeOverrideUid(uid);
+ return NO_ERROR;
+}
+
+status_t AudioPolicyService::handleGetUidState(Vector<String16>& args, int out, int err) {
+ PermissionController pc;
+ int uid = pc.getPackageUid(args[1], 0);
+ if (uid < 0) {
+ ALOGE("Unknown package: '%s'", String8(args[1]).string());
+ dprintf(err, "Unknown package: '%s'\n", String8(args[1]).string());
+ return BAD_VALUE;
+ }
+ if (mUidPolicy->isUidActive(uid)) {
+ return dprintf(out, "active\n");
+ } else {
+ return dprintf(out, "idle\n");
+ }
+}
+
+status_t AudioPolicyService::printHelp(int out) {
+ return dprintf(out, "Audio policy service commands:\n"
+ " get-uid-state <PACKAGE> gets the uid state\n"
+ " set-uid-state <PACKAGE> <active|idle> overrides the uid state\n"
+ " reset-uid-state <PACKAGE> clears the uid state override\n"
+ " help print this message\n");
+}
+
+// ----------- AudioPolicyService::UidPolicy implementation ----------
+
+void AudioPolicyService::UidPolicy::registerSelf() {
+ ActivityManager am;
+ am.registerUidObserver(this, ActivityManager::UID_OBSERVER_GONE
+ | ActivityManager::UID_OBSERVER_IDLE
+ | ActivityManager::UID_OBSERVER_ACTIVE,
+ ActivityManager::PROCESS_STATE_UNKNOWN,
+ String16("audioserver"));
+ status_t res = am.linkToDeath(this);
+ if (!res) {
+ Mutex::Autolock _l(mLock);
+ mObserverRegistered = true;
+ } else {
+ ALOGE("UidPolicy::registerSelf linkToDeath failed: %d", res);
+ am.unregisterUidObserver(this);
+ }
+}
+
+void AudioPolicyService::UidPolicy::unregisterSelf() {
+ ActivityManager am;
+ am.unlinkToDeath(this);
+ am.unregisterUidObserver(this);
+ Mutex::Autolock _l(mLock);
+ mObserverRegistered = false;
+}
+
+void AudioPolicyService::UidPolicy::binderDied(__unused const wp<IBinder> &who) {
+ Mutex::Autolock _l(mLock);
+ mCachedUids.clear();
+ mObserverRegistered = false;
+}
+
+bool AudioPolicyService::UidPolicy::isUidActive(uid_t uid) {
+ if (isServiceUid(uid)) return true;
+ bool needToReregister = false;
+ {
+ Mutex::Autolock _l(mLock);
+ needToReregister = !mObserverRegistered;
+ }
+ if (needToReregister) {
+ // Looks like ActivityManager has died previously, attempt to re-register.
+ registerSelf();
+ }
+ {
+ Mutex::Autolock _l(mLock);
+ auto overrideIter = mOverrideUids.find(uid);
+ if (overrideIter != mOverrideUids.end()) {
+ return overrideIter->second;
+ }
+ // In an absense of the ActivityManager, assume everything to be active.
+ if (!mObserverRegistered) return true;
+ auto cacheIter = mCachedUids.find(uid);
+ if (cacheIter != mCachedUids.end()) {
+ return cacheIter->second;
+ }
+ }
+ ActivityManager am;
+ bool active = am.isUidActive(uid, String16("audioserver"));
+ {
+ Mutex::Autolock _l(mLock);
+ mCachedUids.insert(std::pair<uid_t, bool>(uid, active));
+ }
+ return active;
+}
+
+void AudioPolicyService::UidPolicy::onUidActive(uid_t uid) {
+ updateUidCache(uid, true, true);
+}
+
+void AudioPolicyService::UidPolicy::onUidGone(uid_t uid, __unused bool disabled) {
+ updateUidCache(uid, false, false);
+}
+
+void AudioPolicyService::UidPolicy::onUidIdle(uid_t uid, __unused bool disabled) {
+ updateUidCache(uid, false, true);
+}
+
+bool AudioPolicyService::UidPolicy::isServiceUid(uid_t uid) const {
+ return multiuser_get_app_id(uid) < AID_APP_START;
+}
+
+void AudioPolicyService::UidPolicy::notifyService(uid_t uid, bool active) {
+ sp<AudioPolicyService> service = mService.promote();
+ if (service != nullptr) {
+ service->setRecordSilenced(uid, !active);
+ }
+}
+
+void AudioPolicyService::UidPolicy::updateOverrideUid(uid_t uid, bool active, bool insert) {
+ if (isServiceUid(uid)) return;
+ bool wasOverridden = false, wasActive = false;
+ {
+ Mutex::Autolock _l(mLock);
+ updateUidLocked(&mOverrideUids, uid, active, insert, &wasOverridden, &wasActive);
+ }
+ if (!wasOverridden && insert) {
+ notifyService(uid, active); // Started to override.
+ } else if (wasOverridden && !insert) {
+ notifyService(uid, isUidActive(uid)); // Override ceased, notify with ground truth.
+ } else if (wasActive != active) {
+ notifyService(uid, active); // Override updated.
+ }
+}
+
+void AudioPolicyService::UidPolicy::updateUidCache(uid_t uid, bool active, bool insert) {
+ if (isServiceUid(uid)) return;
+ bool wasActive = false;
+ {
+ Mutex::Autolock _l(mLock);
+ updateUidLocked(&mCachedUids, uid, active, insert, nullptr, &wasActive);
+ // Do not notify service if currently overridden.
+ if (mOverrideUids.find(uid) != mOverrideUids.end()) return;
+ }
+ bool nowActive = active && insert;
+ if (wasActive != nowActive) notifyService(uid, nowActive);
+}
+
+void AudioPolicyService::UidPolicy::updateUidLocked(std::unordered_map<uid_t, bool> *uids,
+ uid_t uid, bool active, bool insert, bool *wasThere, bool *wasActive) {
+ auto it = uids->find(uid);
+ if (it != uids->end()) {
+ if (wasThere != nullptr) *wasThere = true;
+ if (wasActive != nullptr) *wasActive = it->second;
+ if (insert) {
+ it->second = active;
+ } else {
+ uids->erase(it);
+ }
+ } else if (insert) {
+ uids->insert(std::pair<uid_t, bool>(uid, active));
+ }
+}
// ----------- AudioPolicyService::AudioCommandThread implementation ----------
@@ -1146,4 +1412,4 @@
int aps_set_voice_volume(void *service, float volume, int delay_ms);
};
-}; // namespace android
+} // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 38d4b17..407d7a5 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -24,6 +24,7 @@
#include <utils/Vector.h>
#include <utils/SortedVector.h>
#include <binder/BinderService.h>
+#include <binder/IUidObserver.h>
#include <system/audio.h>
#include <system/audio_policy.h>
#include <media/IAudioPolicyService.h>
@@ -33,9 +34,12 @@
#include "AudioPolicyEffects.h"
#include "managerdefault/AudioPolicyManager.h"
+#include <unordered_map>
namespace android {
+using namespace std;
+
// ----------------------------------------------------------------------------
class AudioPolicyService :
@@ -68,17 +72,12 @@
virtual status_t setPhoneState(audio_mode_t state);
virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
- virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
- uint32_t samplingRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = 0,
- audio_output_flags_t flags =
- AUDIO_OUTPUT_FLAG_NONE,
- const audio_offload_info_t *offloadInfo = NULL);
+ virtual audio_io_handle_t getOutput(audio_stream_type_t stream);
virtual status_t getOutputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
+ pid_t pid,
uid_t uid,
const audio_config_t *config,
audio_output_flags_t flags,
@@ -98,16 +97,15 @@
audio_session_t session,
pid_t pid,
uid_t uid,
+ const String16& opPackageName,
const audio_config_base_t *config,
audio_input_flags_t flags,
audio_port_handle_t *selectedDeviceId = NULL,
audio_port_handle_t *portId = NULL);
- virtual status_t startInput(audio_io_handle_t input,
- audio_session_t session);
- virtual status_t stopInput(audio_io_handle_t input,
- audio_session_t session);
- virtual void releaseInput(audio_io_handle_t input,
- audio_session_t session);
+ virtual status_t startInput(audio_port_handle_t portId,
+ bool *silenced);
+ virtual status_t stopInput(audio_port_handle_t portId);
+ virtual void releaseInput(audio_port_handle_t portId);
virtual status_t initStreamVolume(audio_stream_type_t stream,
int indexMin,
int indexMax);
@@ -205,6 +203,12 @@
virtual float getStreamVolumeDB(
audio_stream_type_t stream, int index, audio_devices_t device);
+ virtual status_t getSurroundFormats(unsigned int *numSurroundFormats,
+ audio_format_t *surroundFormats,
+ bool *surroundFormatsEnabled,
+ bool reported);
+ virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
+
status_t doStopOutput(audio_io_handle_t output,
audio_stream_type_t stream,
audio_session_t session);
@@ -241,6 +245,68 @@
status_t dumpInternals(int fd);
+ // Handles binder shell commands
+ virtual status_t shellCommand(int in, int out, int err, Vector<String16>& args);
+
+ // Sets whether the given UID records only silence
+ virtual void setRecordSilenced(uid_t uid, bool silenced);
+
+ // Overrides the UID state as if it is idle
+ status_t handleSetUidState(Vector<String16>& args, int err);
+
+ // Clears the override for the UID state
+ status_t handleResetUidState(Vector<String16>& args, int err);
+
+ // Gets the UID state
+ status_t handleGetUidState(Vector<String16>& args, int out, int err);
+
+ // Prints the shell command help
+ status_t printHelp(int out);
+
+ std::string getDeviceTypeStrForPortId(audio_port_handle_t portId);
+
+ // If recording we need to make sure the UID is allowed to do that. If the UID is idle
+ // then it cannot record and gets buffers with zeros - silence. As soon as the UID
+ // transitions to an active state we will start reporting buffers with data. This approach
+ // transparently handles recording while the UID transitions between idle/active state
+ // avoiding to get stuck in a state receiving non-empty buffers while idle or in a state
+ // receiving empty buffers while active.
+ class UidPolicy : public BnUidObserver, public virtual IBinder::DeathRecipient {
+ public:
+ explicit UidPolicy(wp<AudioPolicyService> service)
+ : mService(service), mObserverRegistered(false) {}
+
+ void registerSelf();
+ void unregisterSelf();
+
+ // IBinder::DeathRecipient implementation
+ void binderDied(const wp<IBinder> &who) override;
+
+ bool isUidActive(uid_t uid);
+
+ // BnUidObserver implementation
+ void onUidActive(uid_t uid) override;
+ void onUidGone(uid_t uid, bool disabled) override;
+ void onUidIdle(uid_t uid, bool disabled) override;
+
+ void addOverrideUid(uid_t uid, bool active) { updateOverrideUid(uid, active, true); }
+ void removeOverrideUid(uid_t uid) { updateOverrideUid(uid, false, false); }
+
+ private:
+ bool isServiceUid(uid_t uid) const;
+ void notifyService(uid_t uid, bool active);
+ void updateOverrideUid(uid_t uid, bool active, bool insert);
+ void updateUidCache(uid_t uid, bool active, bool insert);
+ void updateUidLocked(std::unordered_map<uid_t, bool> *uids,
+ uid_t uid, bool active, bool insert, bool *wasThere, bool *wasActive);
+
+ wp<AudioPolicyService> mService;
+ Mutex mLock;
+ bool mObserverRegistered;
+ std::unordered_map<uid_t, bool> mOverrideUids;
+ std::unordered_map<uid_t, bool> mCachedUids;
+ };
+
// Thread used for tone playback and to send audio config commands to audio flinger
// For tone playback, using a separate thread is necessary to avoid deadlock with mLock because
// startTone() and stopTone() are normally called with mLock locked and requesting a tone start
@@ -312,7 +378,6 @@
const audio_config_base_t *deviceConfig,
audio_patch_handle_t patchHandle);
void insertCommand_l(AudioCommand *command, int delayMs = 0);
-
private:
class AudioCommandData;
@@ -558,6 +623,48 @@
bool mAudioPortCallbacksEnabled;
};
+ // --- AudioRecordClient ---
+ // Information about each registered AudioRecord client
+ // (between calls to getInputForAttr() and releaseInput())
+ class AudioRecordClient : public RefBase {
+ public:
+ AudioRecordClient(const audio_attributes_t attributes,
+ const audio_io_handle_t input, uid_t uid, pid_t pid,
+ const String16& opPackageName, const audio_session_t session) :
+ attributes(attributes),
+ input(input), uid(uid), pid(pid),
+ opPackageName(opPackageName), session(session),
+ active(false), isConcurrent(false), isVirtualDevice(false) {}
+ virtual ~AudioRecordClient() {}
+
+ const audio_attributes_t attributes; // source, flags ...
+ const audio_io_handle_t input; // audio HAL input IO handle
+ const uid_t uid; // client UID
+ const pid_t pid; // client PID
+ const String16 opPackageName; // client package name
+ const audio_session_t session; // audio session ID
+ bool active; // Capture is active or inactive
+ bool isConcurrent; // is allowed to concurrent capture
+ bool isVirtualDevice; // uses virtual device: updated by APM::getInputForAttr()
+ audio_port_handle_t deviceId; // selected input device port ID
+ };
+
+ // A class automatically clearing and restoring binder caller identity inside
+ // a code block (scoped variable)
+ // Declare one systematically before calling AudioPolicyManager methods so that they are
+ // executed with the same level of privilege as audioserver process.
+ class AutoCallerClear {
+ public:
+ AutoCallerClear() :
+ mToken(IPCThreadState::self()->clearCallingIdentity()) {}
+ ~AutoCallerClear() {
+ IPCThreadState::self()->restoreCallingIdentity(mToken);
+ }
+
+ private:
+ const int64_t mToken;
+ };
+
// Internal dump utilities.
status_t dumpPermissionDenial(int fd);
@@ -581,8 +688,11 @@
// Manage all effects configured in audio_effects.conf
sp<AudioPolicyEffects> mAudioPolicyEffects;
audio_mode_t mPhoneState;
+
+ sp<UidPolicy> mUidPolicy;
+ DefaultKeyedVector< audio_port_handle_t, sp<AudioRecordClient> > mAudioRecordClients;
};
-}; // namespace android
+} // namespace android
#endif // ANDROID_AUDIOPOLICYSERVICE_H
diff --git a/services/audiopolicy/tests/Android.mk b/services/audiopolicy/tests/Android.mk
new file mode 100644
index 0000000..a43daea
--- /dev/null
+++ b/services/audiopolicy/tests/Android.mk
@@ -0,0 +1,32 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_C_INCLUDES := \
+ frameworks/av/services/audiopolicy \
+ frameworks/av/services/audiopolicy/common/include \
+ frameworks/av/services/audiopolicy/engine/interface \
+ frameworks/av/services/audiopolicy/utilities
+
+LOCAL_SHARED_LIBRARIES := \
+ libaudiopolicymanagerdefault \
+ libbase \
+ liblog \
+ libmedia_helper \
+ libutils \
+
+LOCAL_STATIC_LIBRARIES := \
+ libaudiopolicycomponents \
+
+LOCAL_SRC_FILES := \
+ audiopolicymanager_tests.cpp \
+
+LOCAL_MODULE := audiopolicy_tests
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_CFLAGS := -Werror -Wall
+
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
+include $(BUILD_NATIVE_TEST)
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
new file mode 100644
index 0000000..eb8222c
--- /dev/null
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include "AudioPolicyInterface.h"
+
+namespace android {
+
+class AudioPolicyTestClient : public AudioPolicyClientInterface
+{
+public:
+ virtual ~AudioPolicyTestClient() = default;
+
+ // AudioPolicyClientInterface Implementation
+ audio_module_handle_t loadHwModule(const char* /*name*/) override {
+ return AUDIO_MODULE_HANDLE_NONE;
+ }
+ status_t openOutput(audio_module_handle_t /*module*/,
+ audio_io_handle_t* /*output*/,
+ audio_config_t* /*config*/,
+ audio_devices_t* /*devices*/,
+ const String8& /*address*/,
+ uint32_t* /*latencyMs*/,
+ audio_output_flags_t /*flags*/) override { return NO_INIT; }
+ audio_io_handle_t openDuplicateOutput(audio_io_handle_t /*output1*/,
+ audio_io_handle_t /*output2*/) override {
+ return AUDIO_IO_HANDLE_NONE;
+ }
+ status_t closeOutput(audio_io_handle_t /*output*/) override { return NO_INIT; }
+ status_t suspendOutput(audio_io_handle_t /*output*/) override { return NO_INIT; }
+ status_t restoreOutput(audio_io_handle_t /*output*/) override { return NO_INIT; }
+ status_t openInput(audio_module_handle_t /*module*/,
+ audio_io_handle_t* /*input*/,
+ audio_config_t* /*config*/,
+ audio_devices_t* /*device*/,
+ const String8& /*address*/,
+ audio_source_t /*source*/,
+ audio_input_flags_t /*flags*/) override { return NO_INIT; }
+ status_t closeInput(audio_io_handle_t /*input*/) override { return NO_INIT; }
+ status_t setStreamVolume(audio_stream_type_t /*stream*/,
+ float /*volume*/,
+ audio_io_handle_t /*output*/,
+ int /*delayMs*/) override { return NO_INIT; }
+ status_t invalidateStream(audio_stream_type_t /*stream*/) override { return NO_INIT; }
+ void setParameters(audio_io_handle_t /*ioHandle*/,
+ const String8& /*keyValuePairs*/,
+ int /*delayMs*/) override { }
+ String8 getParameters(audio_io_handle_t /*ioHandle*/,
+ const String8& /*keys*/) override { return String8(); }
+ status_t startTone(audio_policy_tone_t /*tone*/,
+ audio_stream_type_t /*stream*/) override { return NO_INIT; }
+ status_t stopTone() override { return NO_INIT; }
+ status_t setVoiceVolume(float /*volume*/, int /*delayMs*/) override { return NO_INIT; }
+ status_t moveEffects(audio_session_t /*session*/,
+ audio_io_handle_t /*srcOutput*/,
+ audio_io_handle_t /*dstOutput*/) override { return NO_INIT; }
+ status_t createAudioPatch(const struct audio_patch* /*patch*/,
+ audio_patch_handle_t* /*handle*/,
+ int /*delayMs*/) override { return NO_INIT; }
+ status_t releaseAudioPatch(audio_patch_handle_t /*handle*/,
+ int /*delayMs*/) override { return NO_INIT; }
+ status_t setAudioPortConfig(const struct audio_port_config* /*config*/,
+ int /*delayMs*/) override { return NO_INIT; }
+ void onAudioPortListUpdate() override { }
+ void onAudioPatchListUpdate() override { }
+ audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t /*use*/) override { return 0; }
+ void onDynamicPolicyMixStateUpdate(String8 /*regId*/, int32_t /*state*/) override { }
+ void onRecordingConfigurationUpdate(int /*event*/,
+ const record_client_info_t* /*clientInfo*/,
+ const struct audio_config_base* /*clientConfig*/,
+ const struct audio_config_base* /*deviceConfig*/,
+ audio_patch_handle_t /*patchHandle*/) override { }
+};
+
+} // namespace android
diff --git a/services/audiopolicy/tests/AudioPolicyTestManager.h b/services/audiopolicy/tests/AudioPolicyTestManager.h
new file mode 100644
index 0000000..fe543a6
--- /dev/null
+++ b/services/audiopolicy/tests/AudioPolicyTestManager.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include "managerdefault/AudioPolicyManager.h"
+
+namespace android {
+
+class AudioPolicyTestManager : public AudioPolicyManager {
+ public:
+ explicit AudioPolicyTestManager(AudioPolicyClientInterface *clientInterface)
+ : AudioPolicyManager(clientInterface, true /*forTesting*/) { }
+ using AudioPolicyManager::getConfig;
+ using AudioPolicyManager::initialize;
+};
+
+} // namespace android
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
new file mode 100644
index 0000000..a9593b8
--- /dev/null
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <memory>
+#include <set>
+
+#include <gtest/gtest.h>
+
+#include "AudioPolicyTestClient.h"
+#include "AudioPolicyTestManager.h"
+
+using namespace android;
+
+TEST(AudioPolicyManagerTestInit, Failure) {
+ AudioPolicyTestClient client;
+ AudioPolicyTestManager manager(&client);
+ manager.getConfig().setDefault();
+ // Since the default client fails to open anything,
+ // APM should indicate that the initialization didn't succeed.
+ ASSERT_EQ(NO_INIT, manager.initialize());
+ ASSERT_EQ(NO_INIT, manager.initCheck());
+}
+
+
+class AudioPolicyManagerTestClient : public AudioPolicyTestClient {
+ public:
+ // AudioPolicyClientInterface implementation
+ audio_module_handle_t loadHwModule(const char* /*name*/) override {
+ return mNextModuleHandle++;
+ }
+
+ status_t openOutput(audio_module_handle_t module,
+ audio_io_handle_t* output,
+ audio_config_t* /*config*/,
+ audio_devices_t* /*devices*/,
+ const String8& /*address*/,
+ uint32_t* /*latencyMs*/,
+ audio_output_flags_t /*flags*/) override {
+ if (module >= mNextModuleHandle) {
+ ALOGE("%s: Module handle %d has not been allocated yet (next is %d)",
+ __func__, module, mNextModuleHandle);
+ return BAD_VALUE;
+ }
+ *output = mNextIoHandle++;
+ return NO_ERROR;
+ }
+
+ status_t openInput(audio_module_handle_t module,
+ audio_io_handle_t* input,
+ audio_config_t* /*config*/,
+ audio_devices_t* /*device*/,
+ const String8& /*address*/,
+ audio_source_t /*source*/,
+ audio_input_flags_t /*flags*/) override {
+ if (module >= mNextModuleHandle) {
+ ALOGE("%s: Module handle %d has not been allocated yet (next is %d)",
+ __func__, module, mNextModuleHandle);
+ return BAD_VALUE;
+ }
+ *input = mNextIoHandle++;
+ return NO_ERROR;
+ }
+
+ status_t createAudioPatch(const struct audio_patch* /*patch*/,
+ audio_patch_handle_t* handle,
+ int /*delayMs*/) override {
+ *handle = mNextPatchHandle++;
+ mActivePatches.insert(*handle);
+ return NO_ERROR;
+ }
+
+ status_t releaseAudioPatch(audio_patch_handle_t handle,
+ int /*delayMs*/) override {
+ if (mActivePatches.erase(handle) != 1) {
+ if (handle >= mNextPatchHandle) {
+ ALOGE("%s: Patch handle %d has not been allocated yet (next is %d)",
+ __func__, handle, mNextPatchHandle);
+ } else {
+ ALOGE("%s: Attempt to release patch %d twice", __func__, handle);
+ }
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+ }
+
+ // Helper methods for tests
+ size_t getActivePatchesCount() const { return mActivePatches.size(); }
+
+ private:
+ audio_module_handle_t mNextModuleHandle = AUDIO_MODULE_HANDLE_NONE + 1;
+ audio_io_handle_t mNextIoHandle = AUDIO_IO_HANDLE_NONE + 1;
+ audio_patch_handle_t mNextPatchHandle = AUDIO_PATCH_HANDLE_NONE + 1;
+ std::set<audio_patch_handle_t> mActivePatches;
+};
+
+class AudioPolicyManagerTest : public testing::Test {
+ protected:
+ virtual void SetUp();
+ virtual void TearDown();
+
+ std::unique_ptr<AudioPolicyManagerTestClient> mClient;
+ std::unique_ptr<AudioPolicyTestManager> mManager;
+};
+
+void AudioPolicyManagerTest::SetUp() {
+ mClient.reset(new AudioPolicyManagerTestClient);
+ mManager.reset(new AudioPolicyTestManager(mClient.get()));
+ mManager->getConfig().setDefault();
+ ASSERT_EQ(NO_ERROR, mManager->initialize());
+ ASSERT_EQ(NO_ERROR, mManager->initCheck());
+}
+
+void AudioPolicyManagerTest::TearDown() {
+ mManager.reset();
+ mClient.reset();
+}
+
+TEST_F(AudioPolicyManagerTest, InitSuccess) {
+ // SetUp must finish with no assertions.
+}
+
+TEST_F(AudioPolicyManagerTest, CreateAudioPatchFailure) {
+ audio_patch patch{};
+ audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
+ const size_t patchCountBefore = mClient->getActivePatchesCount();
+ ASSERT_EQ(BAD_VALUE, mManager->createAudioPatch(nullptr, &handle, 0));
+ ASSERT_EQ(BAD_VALUE, mManager->createAudioPatch(&patch, nullptr, 0));
+ ASSERT_EQ(BAD_VALUE, mManager->createAudioPatch(&patch, &handle, 0));
+ patch.num_sources = AUDIO_PATCH_PORTS_MAX + 1;
+ patch.num_sinks = 1;
+ ASSERT_EQ(BAD_VALUE, mManager->createAudioPatch(&patch, &handle, 0));
+ patch.num_sources = 1;
+ patch.num_sinks = AUDIO_PATCH_PORTS_MAX + 1;
+ ASSERT_EQ(BAD_VALUE, mManager->createAudioPatch(&patch, &handle, 0));
+ patch.num_sources = 2;
+ patch.num_sinks = 1;
+ ASSERT_EQ(INVALID_OPERATION, mManager->createAudioPatch(&patch, &handle, 0));
+ patch = {};
+ patch.num_sources = 1;
+ patch.sources[0].role = AUDIO_PORT_ROLE_SINK;
+ patch.num_sinks = 1;
+ patch.sinks[0].role = AUDIO_PORT_ROLE_SINK;
+ ASSERT_EQ(INVALID_OPERATION, mManager->createAudioPatch(&patch, &handle, 0));
+ patch = {};
+ patch.num_sources = 1;
+ patch.sources[0].role = AUDIO_PORT_ROLE_SOURCE;
+ patch.num_sinks = 1;
+ patch.sinks[0].role = AUDIO_PORT_ROLE_SOURCE;
+ ASSERT_EQ(INVALID_OPERATION, mManager->createAudioPatch(&patch, &handle, 0));
+ // Verify that the handle is left unchanged.
+ ASSERT_EQ(AUDIO_PATCH_HANDLE_NONE, handle);
+ ASSERT_EQ(patchCountBefore, mClient->getActivePatchesCount());
+}
+
+TEST_F(AudioPolicyManagerTest, CreateAudioPatchFromMix) {
+ audio_patch patch{};
+ audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
+ uid_t uid = 42;
+ const size_t patchCountBefore = mClient->getActivePatchesCount();
+ patch.num_sources = 1;
+ {
+ auto& src = patch.sources[0];
+ src.role = AUDIO_PORT_ROLE_SOURCE;
+ src.type = AUDIO_PORT_TYPE_MIX;
+ src.id = mManager->getConfig().getAvailableInputDevices()[0]->getId();
+ // Note: these are the parameters of the output device.
+ src.sample_rate = 44100;
+ src.format = AUDIO_FORMAT_PCM_16_BIT;
+ src.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ }
+ patch.num_sinks = 1;
+ {
+ auto& sink = patch.sinks[0];
+ sink.role = AUDIO_PORT_ROLE_SINK;
+ sink.type = AUDIO_PORT_TYPE_DEVICE;
+ sink.id = mManager->getConfig().getDefaultOutputDevice()->getId();
+ }
+ ASSERT_EQ(NO_ERROR, mManager->createAudioPatch(&patch, &handle, uid));
+ ASSERT_NE(AUDIO_PATCH_HANDLE_NONE, handle);
+ ASSERT_EQ(patchCountBefore + 1, mClient->getActivePatchesCount());
+}
+
+// TODO: Add patch creation tests that involve already existing patch
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index cb415f5..96261ab 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -51,6 +51,7 @@
device3/StatusTracker.cpp \
device3/Camera3BufferManager.cpp \
device3/Camera3StreamSplitter.cpp \
+ device3/DistortionMapper.cpp \
gui/RingBufferConsumer.cpp \
utils/CameraTraces.cpp \
utils/AutoConditionLock.cpp \
@@ -79,7 +80,8 @@
android.hardware.camera.provider@2.4 \
android.hardware.camera.device@1.0 \
android.hardware.camera.device@3.2 \
- android.hardware.camera.device@3.3
+ android.hardware.camera.device@3.3 \
+ android.hardware.camera.device@3.4
LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libbinder libcamera_client libfmq
@@ -95,3 +97,8 @@
LOCAL_MODULE:= libcameraservice
include $(BUILD_SHARED_LIBRARY)
+
+# Build tests too
+
+include $(LOCAL_PATH)/tests/Android.mk
+
diff --git a/services/camera/libcameraservice/CameraFlashlight.cpp b/services/camera/libcameraservice/CameraFlashlight.cpp
index e06a81f..471c77d 100644
--- a/services/camera/libcameraservice/CameraFlashlight.cpp
+++ b/services/camera/libcameraservice/CameraFlashlight.cpp
@@ -32,13 +32,15 @@
namespace android {
+using hardware::camera::common::V1_0::TorchModeStatus;
+
/////////////////////////////////////////////////////////////////////
// CameraFlashlight implementation begins
// used by camera service to control flashflight.
/////////////////////////////////////////////////////////////////////
CameraFlashlight::CameraFlashlight(sp<CameraProviderManager> providerManager,
- camera_module_callbacks_t* callbacks) :
+ CameraProviderManager::StatusListener* callbacks) :
mProviderManager(providerManager),
mCallbacks(callbacks),
mFlashlightMapInitialized(false) {
@@ -59,7 +61,7 @@
} else {
// Only HAL1 devices do not support setTorchMode
mFlashControl =
- new CameraHardwareInterfaceFlashControl(mProviderManager, *mCallbacks);
+ new CameraHardwareInterfaceFlashControl(mProviderManager, mCallbacks);
}
return OK;
@@ -118,19 +120,15 @@
return res;
}
-int CameraFlashlight::getNumberOfCameras() {
- return mProviderManager->getAPI1CompatibleCameraCount();
-}
-
status_t CameraFlashlight::findFlashUnits() {
Mutex::Autolock l(mLock);
status_t res;
std::vector<String8> cameraIds;
- int numberOfCameras = getNumberOfCameras();
+ std::vector<std::string> ids = mProviderManager->getAPI1CompatibleCameraDeviceIds();
+ int numberOfCameras = static_cast<int>(ids.size());
cameraIds.resize(numberOfCameras);
// No module, must be provider
- std::vector<std::string> ids = mProviderManager->getAPI1CompatibleCameraDeviceIds();
for (size_t i = 0; i < cameraIds.size(); i++) {
cameraIds[i] = String8(ids[i].c_str());
}
@@ -184,7 +182,8 @@
ssize_t index = mHasFlashlightMap.indexOfKey(cameraId);
if (index == NAME_NOT_FOUND) {
- ALOGE("%s: camera %s not present when findFlashUnits() was called",
+ // Might be external camera
+ ALOGW("%s: camera %s not present when findFlashUnits() was called",
__FUNCTION__, cameraId.string());
return false;
}
@@ -218,12 +217,13 @@
if (mOpenedCameraIds.size() == 0) {
// notify torch unavailable for all cameras with a flash
- int numCameras = getNumberOfCameras();
+ std::vector<std::string> ids = mProviderManager->getAPI1CompatibleCameraDeviceIds();
+ int numCameras = static_cast<int>(ids.size());
for (int i = 0; i < numCameras; i++) {
- if (hasFlashUnitLocked(String8::format("%d", i))) {
- mCallbacks->torch_mode_status_change(mCallbacks,
- String8::format("%d", i).string(),
- TORCH_MODE_STATUS_NOT_AVAILABLE);
+ String8 id8(ids[i].c_str());
+ if (hasFlashUnitLocked(id8)) {
+ mCallbacks->onTorchStatusChanged(
+ id8, TorchModeStatus::NOT_AVAILABLE);
}
}
}
@@ -263,12 +263,13 @@
if (isBackwardCompatibleMode(cameraId)) {
// notify torch available for all cameras with a flash
- int numCameras = getNumberOfCameras();
+ std::vector<std::string> ids = mProviderManager->getAPI1CompatibleCameraDeviceIds();
+ int numCameras = static_cast<int>(ids.size());
for (int i = 0; i < numCameras; i++) {
- if (hasFlashUnitLocked(String8::format("%d", i))) {
- mCallbacks->torch_mode_status_change(mCallbacks,
- String8::format("%d", i).string(),
- TORCH_MODE_STATUS_AVAILABLE_OFF);
+ String8 id8(ids[i].c_str());
+ if (hasFlashUnitLocked(id8)) {
+ mCallbacks->onTorchStatusChanged(
+ id8, TorchModeStatus::AVAILABLE_OFF);
}
}
}
@@ -315,9 +316,9 @@
CameraHardwareInterfaceFlashControl::CameraHardwareInterfaceFlashControl(
sp<CameraProviderManager> manager,
- const camera_module_callbacks_t& callbacks) :
+ CameraProviderManager::StatusListener* callbacks) :
mProviderManager(manager),
- mCallbacks(&callbacks),
+ mCallbacks(callbacks),
mTorchEnabled(false) {
}
@@ -333,8 +334,7 @@
if (mCallbacks) {
ALOGV("%s: notify the framework that torch was turned off",
__FUNCTION__);
- mCallbacks->torch_mode_status_change(mCallbacks,
- mCameraId.string(), TORCH_MODE_STATUS_AVAILABLE_OFF);
+ mCallbacks->onTorchStatusChanged(mCameraId, TorchModeStatus::AVAILABLE_OFF);
}
}
}
@@ -368,8 +368,7 @@
// disabling the torch mode of currently opened device
disconnectCameraDevice();
mTorchEnabled = false;
- mCallbacks->torch_mode_status_change(mCallbacks,
- cameraId.string(), TORCH_MODE_STATUS_AVAILABLE_OFF);
+ mCallbacks->onTorchStatusChanged(cameraId, TorchModeStatus::AVAILABLE_OFF);
return OK;
}
@@ -379,8 +378,7 @@
}
mTorchEnabled = true;
- mCallbacks->torch_mode_status_change(mCallbacks,
- cameraId.string(), TORCH_MODE_STATUS_AVAILABLE_ON);
+ mCallbacks->onTorchStatusChanged(cameraId, TorchModeStatus::AVAILABLE_ON);
return OK;
}
diff --git a/services/camera/libcameraservice/CameraFlashlight.h b/services/camera/libcameraservice/CameraFlashlight.h
index c86ee85..1baaba2 100644
--- a/services/camera/libcameraservice/CameraFlashlight.h
+++ b/services/camera/libcameraservice/CameraFlashlight.h
@@ -19,7 +19,6 @@
#include <gui/GLConsumer.h>
#include <gui/Surface.h>
-#include <hardware/camera_common.h>
#include <utils/KeyedVector.h>
#include <utils/SortedVector.h>
#include "common/CameraProviderManager.h"
@@ -55,7 +54,7 @@
class CameraFlashlight : public virtual VirtualLightRefBase {
public:
CameraFlashlight(sp<CameraProviderManager> providerManager,
- camera_module_callbacks_t* callbacks);
+ CameraProviderManager::StatusListener* callbacks);
virtual ~CameraFlashlight();
// Find all flash units. This must be called before other methods. All
@@ -93,13 +92,11 @@
// opening cameras)
bool isBackwardCompatibleMode(const String8& cameraId);
- int getNumberOfCameras();
-
sp<FlashControlBase> mFlashControl;
sp<CameraProviderManager> mProviderManager;
- const camera_module_callbacks_t *mCallbacks;
+ CameraProviderManager::StatusListener* mCallbacks;
SortedVector<String8> mOpenedCameraIds;
// camera id -> if it has a flash unit
@@ -134,7 +131,7 @@
public:
CameraHardwareInterfaceFlashControl(
sp<CameraProviderManager> manager,
- const camera_module_callbacks_t& callbacks);
+ CameraProviderManager::StatusListener* callbacks);
virtual ~CameraHardwareInterfaceFlashControl();
// FlashControlBase
@@ -166,7 +163,7 @@
status_t hasFlashUnitLocked(const String8& cameraId, bool *hasFlash, bool keepDeviceOpen);
sp<CameraProviderManager> mProviderManager;
- const camera_module_callbacks_t *mCallbacks;
+ CameraProviderManager::StatusListener* mCallbacks;
sp<CameraHardwareInterface> mDevice;
String8 mCameraId;
CameraParameters mParameters;
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 514d37a..282871b 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -33,14 +33,18 @@
#include <android-base/macros.h>
#include <android-base/parseint.h>
+#include <binder/ActivityManager.h>
#include <binder/AppOpsManager.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
+#include <binder/PermissionController.h>
#include <binder/ProcessInfoService.h>
+#include <binder/IResultReceiver.h>
#include <cutils/atomic.h>
#include <cutils/properties.h>
+#include <cutils/misc.h>
#include <gui/Surface.h>
#include <hardware/hardware.h>
#include <memunreachable/memunreachable.h>
@@ -63,6 +67,7 @@
#include "api1/Camera2Client.h"
#include "api2/CameraDeviceClient.h"
#include "utils/CameraTraces.h"
+#include "utils/TagMonitor.h"
namespace {
const char* kPermissionServiceName = "permission";
@@ -103,77 +108,13 @@
// ----------------------------------------------------------------------------
-extern "C" {
-static void camera_device_status_change(
- const struct camera_module_callbacks* callbacks,
- int camera_id,
- int new_status) {
- sp<CameraService> cs = const_cast<CameraService*>(
- static_cast<const CameraService*>(callbacks));
- String8 id = String8::format("%d", camera_id);
-
- CameraDeviceStatus newStatus{CameraDeviceStatus::NOT_PRESENT};
- switch (new_status) {
- case CAMERA_DEVICE_STATUS_NOT_PRESENT:
- newStatus = CameraDeviceStatus::NOT_PRESENT;
- break;
- case CAMERA_DEVICE_STATUS_PRESENT:
- newStatus = CameraDeviceStatus::PRESENT;
- break;
- case CAMERA_DEVICE_STATUS_ENUMERATING:
- newStatus = CameraDeviceStatus::ENUMERATING;
- break;
- default:
- ALOGW("Unknown device status change to %d", new_status);
- break;
- }
- cs->onDeviceStatusChanged(id, newStatus);
-}
-
-static void torch_mode_status_change(
- const struct camera_module_callbacks* callbacks,
- const char* camera_id,
- int new_status) {
- if (!callbacks || !camera_id) {
- ALOGE("%s invalid parameters. callbacks %p, camera_id %p", __FUNCTION__,
- callbacks, camera_id);
- }
- sp<CameraService> cs = const_cast<CameraService*>(
- static_cast<const CameraService*>(callbacks));
-
- TorchModeStatus status;
- switch (new_status) {
- case TORCH_MODE_STATUS_NOT_AVAILABLE:
- status = TorchModeStatus::NOT_AVAILABLE;
- break;
- case TORCH_MODE_STATUS_AVAILABLE_OFF:
- status = TorchModeStatus::AVAILABLE_OFF;
- break;
- case TORCH_MODE_STATUS_AVAILABLE_ON:
- status = TorchModeStatus::AVAILABLE_ON;
- break;
- default:
- ALOGE("Unknown torch status %d", new_status);
- return;
- }
-
- cs->onTorchStatusChanged(
- String8(camera_id),
- status);
-}
-} // extern "C"
-
-// ----------------------------------------------------------------------------
+static const String16 sManageCameraPermission("android.permission.MANAGE_CAMERA");
CameraService::CameraService() :
mEventLog(DEFAULT_EVENT_LOG_LENGTH),
- mNumberOfCameras(0), mNumberOfNormalCameras(0),
+ mNumberOfCameras(0),
mSoundRef(0), mInitialized(false) {
ALOGI("CameraService started (pid=%d)", getpid());
-
- this->camera_device_status_change = android::camera_device_status_change;
- this->torch_mode_status_change = android::torch_mode_status_change;
-
mServiceLockWrapper = std::make_shared<WaitableMutexWrapper>(&mServiceLock);
}
@@ -196,56 +137,49 @@
}
CameraService::pingCameraServiceProxy();
+
+ mUidPolicy = new UidPolicy(this);
+ mUidPolicy->registerSelf();
}
status_t CameraService::enumerateProviders() {
status_t res;
- Mutex::Autolock l(mServiceLock);
- if (nullptr == mCameraProviderManager.get()) {
- mCameraProviderManager = new CameraProviderManager();
- res = mCameraProviderManager->initialize(this);
- if (res != OK) {
- ALOGE("%s: Unable to initialize camera provider manager: %s (%d)",
- __FUNCTION__, strerror(-res), res);
- return res;
- }
- }
+ std::vector<std::string> deviceIds;
+ {
+ Mutex::Autolock l(mServiceLock);
- mNumberOfCameras = mCameraProviderManager->getCameraCount();
- mNumberOfNormalCameras =
- mCameraProviderManager->getAPI1CompatibleCameraCount();
-
- // Setup vendor tags before we call get_camera_info the first time
- // because HAL might need to setup static vendor keys in get_camera_info
- // TODO: maybe put this into CameraProviderManager::initialize()?
- mCameraProviderManager->setUpVendorTags();
-
- if (nullptr == mFlashlight.get()) {
- mFlashlight = new CameraFlashlight(mCameraProviderManager, this);
- }
-
- res = mFlashlight->findFlashUnits();
- if (res != OK) {
- ALOGE("Failed to enumerate flash units: %s (%d)", strerror(-res), res);
- }
-
- for (auto& cameraId : mCameraProviderManager->getCameraDeviceIds()) {
- String8 id8 = String8(cameraId.c_str());
- bool cameraFound = false;
- {
-
- Mutex::Autolock lock(mCameraStatesLock);
- auto iter = mCameraStates.find(id8);
- if (iter != mCameraStates.end()) {
- cameraFound = true;
+ if (nullptr == mCameraProviderManager.get()) {
+ mCameraProviderManager = new CameraProviderManager();
+ res = mCameraProviderManager->initialize(this);
+ if (res != OK) {
+ ALOGE("%s: Unable to initialize camera provider manager: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
}
}
- if (!cameraFound) {
- addStates(id8);
+
+ // Setup vendor tags before we call get_camera_info the first time
+ // because HAL might need to setup static vendor keys in get_camera_info
+ // TODO: maybe put this into CameraProviderManager::initialize()?
+ mCameraProviderManager->setUpVendorTags();
+
+ if (nullptr == mFlashlight.get()) {
+ mFlashlight = new CameraFlashlight(mCameraProviderManager, this);
}
+ res = mFlashlight->findFlashUnits();
+ if (res != OK) {
+ ALOGE("Failed to enumerate flash units: %s (%d)", strerror(-res), res);
+ }
+
+ deviceIds = mCameraProviderManager->getCameraDeviceIds();
+ }
+
+
+ for (auto& cameraId : deviceIds) {
+ String8 id8 = String8(cameraId.c_str());
onDeviceStatusChanged(id8, CameraDeviceStatus::PRESENT);
}
@@ -275,12 +209,20 @@
CameraService::~CameraService() {
VendorTagDescriptor::clearGlobalVendorTagDescriptor();
+ mUidPolicy->unregisterSelf();
}
void CameraService::onNewProviderRegistered() {
enumerateProviders();
}
+void CameraService::updateCameraNumAndIds() {
+ Mutex::Autolock l(mServiceLock);
+ mNumberOfCameras = mCameraProviderManager->getCameraCount();
+ mNormalDeviceIds =
+ mCameraProviderManager->getAPI1CompatibleCameraDeviceIds();
+}
+
void CameraService::addStates(const String8 id) {
std::string cameraId(id.c_str());
hardware::camera::common::V1_0::CameraResourceCost cost;
@@ -301,13 +243,18 @@
}
if (mFlashlight->hasFlashUnit(id)) {
+ Mutex::Autolock al(mTorchStatusMutex);
mTorchStatusMap.add(id, TorchModeStatus::AVAILABLE_OFF);
}
+
+ updateCameraNumAndIds();
logDeviceAdded(id, "Device added");
}
void CameraService::removeStates(const String8 id) {
+ updateCameraNumAndIds();
if (mFlashlight->hasFlashUnit(id)) {
+ Mutex::Autolock al(mTorchStatusMutex);
mTorchStatusMap.removeItem(id);
}
@@ -351,15 +298,16 @@
if (newStatus == StatusInternal::NOT_PRESENT) {
logDeviceRemoved(id, String8::format("Device status changed from %d to %d", oldStatus,
newStatus));
+
+ // Set the device status to NOT_PRESENT, clients will no longer be able to connect
+ // to this device until the status changes
+ updateStatus(StatusInternal::NOT_PRESENT, id);
+
sp<BasicClient> clientToDisconnect;
{
// Don't do this in updateStatus to avoid deadlock over mServiceLock
Mutex::Autolock lock(mServiceLock);
- // Set the device status to NOT_PRESENT, clients will no longer be able to connect
- // to this device until the status changes
- updateStatus(StatusInternal::NOT_PRESENT, id);
-
// Remove cached shim parameters
state->setShimParams(CameraParameters());
@@ -462,7 +410,7 @@
Mutex::Autolock l(mServiceLock);
switch (type) {
case CAMERA_TYPE_BACKWARD_COMPATIBLE:
- *numCameras = mNumberOfNormalCameras;
+ *numCameras = static_cast<int>(mNormalDeviceIds.size());
break;
case CAMERA_TYPE_ALL:
*numCameras = mNumberOfCameras;
@@ -492,7 +440,8 @@
}
Status ret = Status::ok();
- status_t err = mCameraProviderManager->getCameraInfo(std::to_string(cameraId), cameraInfo);
+ status_t err = mCameraProviderManager->getCameraInfo(
+ cameraIdIntToStrLocked(cameraId), cameraInfo);
if (err != OK) {
ret = STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
"Error retrieving camera info from device %d: %s (%d)", cameraId,
@@ -502,13 +451,19 @@
return ret;
}
-int CameraService::cameraIdToInt(const String8& cameraId) {
- int id;
- bool success = base::ParseInt(cameraId.string(), &id, 0);
- if (!success) {
- return -1;
+std::string CameraService::cameraIdIntToStrLocked(int cameraIdInt) {
+ if (cameraIdInt < 0 || cameraIdInt >= static_cast<int>(mNormalDeviceIds.size())) {
+ ALOGE("%s: input id %d invalid: valid range (0, %zu)",
+ __FUNCTION__, cameraIdInt, mNormalDeviceIds.size());
+ return std::string{};
}
- return id;
+
+ return mNormalDeviceIds[cameraIdInt];
+}
+
+String8 CameraService::cameraIdIntToStr(int cameraIdInt) {
+ Mutex::Autolock lock(mServiceLock);
+ return String8(cameraIdIntToStrLocked(cameraIdInt).c_str());
}
Status CameraService::getCameraCharacteristics(const String16& cameraId,
@@ -625,8 +580,8 @@
Status CameraService::makeClient(const sp<CameraService>& cameraService,
const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
- int facing, int clientPid, uid_t clientUid, int servicePid, bool legacyMode,
- int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
+ int api1CameraId, int facing, int clientPid, uid_t clientUid, int servicePid,
+ bool legacyMode, int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
/*out*/sp<BasicClient>* client) {
if (halVersion < 0 || halVersion == deviceVersion) {
@@ -636,8 +591,9 @@
case CAMERA_DEVICE_API_VERSION_1_0:
if (effectiveApiLevel == API_1) { // Camera1 API route
sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
- *client = new CameraClient(cameraService, tmp, packageName, cameraIdToInt(cameraId),
- facing, clientPid, clientUid, getpid(), legacyMode);
+ *client = new CameraClient(cameraService, tmp, packageName,
+ api1CameraId, facing, clientPid, clientUid,
+ getpid(), legacyMode);
} else { // Camera2 API route
ALOGW("Camera using old HAL version: %d", deviceVersion);
return STATUS_ERROR_FMT(ERROR_DEPRECATED_HAL,
@@ -652,8 +608,10 @@
case CAMERA_DEVICE_API_VERSION_3_4:
if (effectiveApiLevel == API_1) { // Camera1 API route
sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
- *client = new Camera2Client(cameraService, tmp, packageName, cameraIdToInt(cameraId),
- facing, clientPid, clientUid, servicePid, legacyMode);
+ *client = new Camera2Client(cameraService, tmp, packageName,
+ cameraId, api1CameraId,
+ facing, clientPid, clientUid,
+ servicePid, legacyMode);
} else { // Camera2 API route
sp<hardware::camera2::ICameraDeviceCallbacks> tmp =
static_cast<hardware::camera2::ICameraDeviceCallbacks*>(cameraCb.get());
@@ -675,8 +633,9 @@
halVersion == CAMERA_DEVICE_API_VERSION_1_0) {
// Only support higher HAL version device opened as HAL1.0 device.
sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
- *client = new CameraClient(cameraService, tmp, packageName, cameraIdToInt(cameraId),
- facing, clientPid, clientUid, servicePid, legacyMode);
+ *client = new CameraClient(cameraService, tmp, packageName,
+ api1CameraId, facing, clientPid, clientUid,
+ servicePid, legacyMode);
} else {
// Other combinations (e.g. HAL3.x open as HAL2.x) are not supported yet.
ALOGE("Invalid camera HAL version %x: HAL %x device can only be"
@@ -772,7 +731,8 @@
Status ret = Status::ok();
sp<Client> tmp = nullptr;
if (!(ret = connectHelper<ICameraClient,Client>(
- sp<ICameraClient>{nullptr}, id, static_cast<int>(CAMERA_HAL_API_VERSION_UNSPECIFIED),
+ sp<ICameraClient>{nullptr}, id, cameraId,
+ static_cast<int>(CAMERA_HAL_API_VERSION_UNSPECIFIED),
internalPackageName, uid, USE_CALLING_PID,
API_1, /*legacyMode*/ false, /*shimUpdateOnly*/ true,
/*out*/ tmp)
@@ -945,6 +905,15 @@
clientName8.string(), clientUid, clientPid, cameraId.string());
}
+ // Make sure the UID is in an active state to use the camera
+ if (!mUidPolicy->isUidActive(callingUid, String16(clientName8))) {
+ ALOGE("Access Denial: can't use the camera from an idle UID pid=%d, uid=%d",
+ clientPid, clientUid);
+ return STATUS_ERROR_FMT(ERROR_DISABLED,
+ "Caller \"%s\" (PID %d, UID %d) cannot open camera \"%s\" from background",
+ clientName8.string(), clientUid, clientPid, cameraId.string());
+ }
+
// Only use passed in clientPid to check permission. Use calling PID as the client PID that's
// connected to camera service directly.
originalClientPid = clientPid;
@@ -1216,7 +1185,7 @@
Status CameraService::connect(
const sp<ICameraClient>& cameraClient,
- int cameraId,
+ int api1CameraId,
const String16& clientPackageName,
int clientUid,
int clientPid,
@@ -1225,9 +1194,10 @@
ATRACE_CALL();
Status ret = Status::ok();
- String8 id = String8::format("%d", cameraId);
+
+ String8 id = cameraIdIntToStr(api1CameraId);
sp<Client> client = nullptr;
- ret = connectHelper<ICameraClient,Client>(cameraClient, id,
+ ret = connectHelper<ICameraClient,Client>(cameraClient, id, api1CameraId,
CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName, clientUid, clientPid, API_1,
/*legacyMode*/ false, /*shimUpdateOnly*/ false,
/*out*/client);
@@ -1244,18 +1214,18 @@
Status CameraService::connectLegacy(
const sp<ICameraClient>& cameraClient,
- int cameraId, int halVersion,
+ int api1CameraId, int halVersion,
const String16& clientPackageName,
int clientUid,
/*out*/
sp<ICamera>* device) {
ATRACE_CALL();
- String8 id = String8::format("%d", cameraId);
+ String8 id = cameraIdIntToStr(api1CameraId);
Status ret = Status::ok();
sp<Client> client = nullptr;
- ret = connectHelper<ICameraClient,Client>(cameraClient, id, halVersion,
+ ret = connectHelper<ICameraClient,Client>(cameraClient, id, api1CameraId, halVersion,
clientPackageName, clientUid, USE_CALLING_PID, API_1,
/*legacyMode*/ true, /*shimUpdateOnly*/ false,
/*out*/client);
@@ -1283,6 +1253,7 @@
String8 id = String8(cameraId);
sp<CameraDeviceClient> client = nullptr;
ret = connectHelper<hardware::camera2::ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
+ /*api1CameraId*/-1,
CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName,
clientUid, USE_CALLING_PID, API_2,
/*legacyMode*/ false, /*shimUpdateOnly*/ false,
@@ -1300,8 +1271,8 @@
template<class CALLBACK, class CLIENT>
Status CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
- int halVersion, const String16& clientPackageName, int clientUid, int clientPid,
- apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
+ int api1CameraId, int halVersion, const String16& clientPackageName, int clientUid,
+ int clientPid, apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
/*out*/sp<CLIENT>& device) {
binder::Status ret = binder::Status::ok();
@@ -1384,8 +1355,10 @@
}
sp<BasicClient> tmp = nullptr;
- if(!(ret = makeClient(this, cameraCb, clientPackageName, cameraId, facing, clientPid,
- clientUid, getpid(), legacyMode, halVersion, deviceVersion, effectiveApiLevel,
+ if(!(ret = makeClient(this, cameraCb, clientPackageName,
+ cameraId, api1CameraId, facing,
+ clientPid, clientUid, getpid(), legacyMode,
+ halVersion, deviceVersion, effectiveApiLevel,
/*out*/&tmp)).isOk()) {
return ret;
}
@@ -1394,7 +1367,7 @@
LOG_ALWAYS_FATAL_IF(client.get() == nullptr, "%s: CameraService in invalid state",
__FUNCTION__);
- err = client->initialize(mCameraProviderManager);
+ err = client->initialize(mCameraProviderManager, mMonitorTags);
if (err != OK) {
ALOGE("%s: Could not initialize client from HAL.", __FUNCTION__);
// Errors could be from the HAL module open call or from AppOpsManager
@@ -1579,6 +1552,9 @@
switch(eventId) {
case ICameraService::EVENT_USER_SWITCHED: {
+ // Try to register for UID policy updates, in case we're recovering
+ // from a system server crash
+ mUidPolicy->registerSelf();
doUserSwitch(/*newUserIds*/ args);
break;
}
@@ -1763,8 +1739,6 @@
}
bool CameraService::evictClientIdByRemote(const wp<IBinder>& remote) {
- const int callingPid = getCallingPid();
- const int servicePid = getpid();
bool ret = false;
{
// Acquire mServiceLock and prevent other clients from connecting
@@ -1780,8 +1754,7 @@
mActiveClientManager.remove(i);
continue;
}
- if (remote == clientSp->getRemote() && (callingPid == servicePid ||
- callingPid == clientSp->getClientPid())) {
+ if (remote == clientSp->getRemote()) {
mActiveClientManager.remove(i);
evicted.push_back(clientSp);
@@ -1981,6 +1954,30 @@
// Permission checks
switch (code) {
+ case SHELL_COMMAND_TRANSACTION: {
+ int in = data.readFileDescriptor();
+ int out = data.readFileDescriptor();
+ int err = data.readFileDescriptor();
+ int argc = data.readInt32();
+ Vector<String16> args;
+ for (int i = 0; i < argc && data.dataAvail() > 0; i++) {
+ args.add(data.readString16());
+ }
+ sp<IBinder> unusedCallback;
+ sp<IResultReceiver> resultReceiver;
+ status_t status;
+ if ((status = data.readNullableStrongBinder(&unusedCallback)) != NO_ERROR) {
+ return status;
+ }
+ if ((status = data.readNullableStrongBinder(&resultReceiver)) != NO_ERROR) {
+ return status;
+ }
+ status = shellCommand(in, out, err, args);
+ if (resultReceiver != nullptr) {
+ resultReceiver->send(status);
+ }
+ return NO_ERROR;
+ }
case BnCameraService::NOTIFYSYSTEMEVENT: {
if (pid != selfPid) {
// Ensure we're being called by system_server, or similar process with
@@ -2004,14 +2001,18 @@
// A reference count is kept to determine when we will actually release the
// media players.
-MediaPlayer* CameraService::newMediaPlayer(const char *file) {
- MediaPlayer* mp = new MediaPlayer();
- if (mp->setDataSource(NULL /* httpService */, file, NULL) == NO_ERROR) {
+sp<MediaPlayer> CameraService::newMediaPlayer(const char *file) {
+ sp<MediaPlayer> mp = new MediaPlayer();
+ status_t error;
+ if ((error = mp->setDataSource(NULL /* httpService */, file, NULL)) == NO_ERROR) {
mp->setAudioStreamType(AUDIO_STREAM_ENFORCED_AUDIBLE);
- mp->prepare();
- } else {
+ error = mp->prepare();
+ }
+ if (error != NO_ERROR) {
ALOGE("Failed to load CameraService sounds: %s", file);
- return NULL;
+ mp->disconnect();
+ mp.clear();
+ return nullptr;
}
return mp;
}
@@ -2023,9 +2024,19 @@
LOG1("CameraService::loadSound ref=%d", mSoundRef);
if (mSoundRef++) return;
- mSoundPlayer[SOUND_SHUTTER] = newMediaPlayer("/system/media/audio/ui/camera_click.ogg");
- mSoundPlayer[SOUND_RECORDING_START] = newMediaPlayer("/system/media/audio/ui/VideoRecord.ogg");
- mSoundPlayer[SOUND_RECORDING_STOP] = newMediaPlayer("/system/media/audio/ui/VideoStop.ogg");
+ mSoundPlayer[SOUND_SHUTTER] = newMediaPlayer("/product/media/audio/ui/camera_click.ogg");
+ if (mSoundPlayer[SOUND_SHUTTER] == nullptr) {
+ mSoundPlayer[SOUND_SHUTTER] = newMediaPlayer("/system/media/audio/ui/camera_click.ogg");
+ }
+ mSoundPlayer[SOUND_RECORDING_START] = newMediaPlayer("/product/media/audio/ui/VideoRecord.ogg");
+ if (mSoundPlayer[SOUND_RECORDING_START] == nullptr) {
+ mSoundPlayer[SOUND_RECORDING_START] =
+ newMediaPlayer("/system/media/audio/ui/VideoRecord.ogg");
+ }
+ mSoundPlayer[SOUND_RECORDING_STOP] = newMediaPlayer("/product/media/audio/ui/VideoStop.ogg");
+ if (mSoundPlayer[SOUND_RECORDING_STOP] == nullptr) {
+ mSoundPlayer[SOUND_RECORDING_STOP] = newMediaPlayer("/system/media/audio/ui/VideoStop.ogg");
+ }
}
void CameraService::releaseSound() {
@@ -2058,7 +2069,8 @@
CameraService::Client::Client(const sp<CameraService>& cameraService,
const sp<ICameraClient>& cameraClient,
const String16& clientPackageName,
- const String8& cameraIdStr, int cameraFacing,
+ const String8& cameraIdStr,
+ int api1CameraId, int cameraFacing,
int clientPid, uid_t clientUid,
int servicePid) :
CameraService::BasicClient(cameraService,
@@ -2067,7 +2079,7 @@
cameraIdStr, cameraFacing,
clientPid, clientUid,
servicePid),
- mCameraId(CameraService::cameraIdToInt(cameraIdStr))
+ mCameraId(api1CameraId)
{
int callingPid = getCallingPid();
LOG1("Client::Client E (pid %d, id %d)", callingPid, mCameraId);
@@ -2214,8 +2226,8 @@
mAppOpsManager.startWatchingMode(AppOpsManager::OP_CAMERA,
mClientPackageName, mOpsCallback);
- res = mAppOpsManager.startOp(AppOpsManager::OP_CAMERA,
- mClientUid, mClientPackageName);
+ res = mAppOpsManager.startOpNoThrow(AppOpsManager::OP_CAMERA,
+ mClientUid, mClientPackageName, /*startIfModeDefault*/ false);
if (res == AppOpsManager::MODE_ERRORED) {
ALOGI("Camera %s: Access for \"%s\" has been revoked",
@@ -2235,9 +2247,13 @@
// Transition device availability listeners from PRESENT -> NOT_AVAILABLE
sCameraService->updateStatus(StatusInternal::NOT_AVAILABLE, mCameraIdStr);
+ int apiLevel = hardware::ICameraServiceProxy::CAMERA_API_LEVEL_1;
+ if (canCastToApiClient(API_2)) {
+ apiLevel = hardware::ICameraServiceProxy::CAMERA_API_LEVEL_2;
+ }
// Transition device state to OPEN
sCameraService->updateProxyDeviceState(ICameraServiceProxy::CAMERA_STATE_OPEN,
- mCameraIdStr, mCameraFacing, mClientPackageName);
+ mCameraIdStr, mCameraFacing, mClientPackageName, apiLevel);
return OK;
}
@@ -2262,9 +2278,13 @@
sCameraService->updateStatus(StatusInternal::PRESENT,
mCameraIdStr, rejected);
+ int apiLevel = hardware::ICameraServiceProxy::CAMERA_API_LEVEL_1;
+ if (canCastToApiClient(API_2)) {
+ apiLevel = hardware::ICameraServiceProxy::CAMERA_API_LEVEL_2;
+ }
// Transition device state to CLOSED
sCameraService->updateProxyDeviceState(ICameraServiceProxy::CAMERA_STATE_CLOSED,
- mCameraIdStr, mCameraFacing, mClientPackageName);
+ mCameraIdStr, mCameraFacing, mClientPackageName, apiLevel);
}
// Always stop watching, even if no camera op is active
if (mOpsCallback != NULL) {
@@ -2298,23 +2318,32 @@
if (res != AppOpsManager::MODE_ALLOWED) {
ALOGI("Camera %s: Access for \"%s\" revoked", mCameraIdStr.string(),
myName.string());
- // Reset the client PID to allow server-initiated disconnect,
- // and to prevent further calls by client.
- mClientPid = getCallingPid();
- CaptureResultExtras resultExtras; // a dummy result (invalid)
- notifyError(hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE, resultExtras);
- disconnect();
+ block();
}
}
+void CameraService::BasicClient::block() {
+ ATRACE_CALL();
+
+ // Reset the client PID to allow server-initiated disconnect,
+ // and to prevent further calls by client.
+ mClientPid = getCallingPid();
+ CaptureResultExtras resultExtras; // a dummy result (invalid)
+ notifyError(hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISABLED, resultExtras);
+ disconnect();
+}
+
// ----------------------------------------------------------------------------
void CameraService::Client::notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras) {
- (void) errorCode;
(void) resultExtras;
if (mRemoteCallback != NULL) {
- mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0);
+ int32_t api1ErrorCode = CAMERA_ERROR_RELEASED;
+ if (errorCode == hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISABLED) {
+ api1ErrorCode = CAMERA_ERROR_DISABLED;
+ }
+ mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, api1ErrorCode, 0);
} else {
ALOGE("mRemoteCallback is NULL!!");
}
@@ -2343,6 +2372,133 @@
}
// ----------------------------------------------------------------------------
+// UidPolicy
+// ----------------------------------------------------------------------------
+
+void CameraService::UidPolicy::registerSelf() {
+ Mutex::Autolock _l(mUidLock);
+
+ ActivityManager am;
+ if (mRegistered) return;
+ am.registerUidObserver(this, ActivityManager::UID_OBSERVER_GONE
+ | ActivityManager::UID_OBSERVER_IDLE
+ | ActivityManager::UID_OBSERVER_ACTIVE,
+ ActivityManager::PROCESS_STATE_UNKNOWN,
+ String16("cameraserver"));
+ status_t res = am.linkToDeath(this);
+ if (res == OK) {
+ mRegistered = true;
+ ALOGV("UidPolicy: Registered with ActivityManager");
+ }
+}
+
+void CameraService::UidPolicy::unregisterSelf() {
+ Mutex::Autolock _l(mUidLock);
+
+ ActivityManager am;
+ am.unregisterUidObserver(this);
+ am.unlinkToDeath(this);
+ mRegistered = false;
+ mActiveUids.clear();
+ ALOGV("UidPolicy: Unregistered with ActivityManager");
+}
+
+void CameraService::UidPolicy::onUidGone(uid_t uid, bool disabled) {
+ onUidIdle(uid, disabled);
+}
+
+void CameraService::UidPolicy::onUidActive(uid_t uid) {
+ Mutex::Autolock _l(mUidLock);
+ mActiveUids.insert(uid);
+}
+
+void CameraService::UidPolicy::onUidIdle(uid_t uid, bool /* disabled */) {
+ bool deleted = false;
+ {
+ Mutex::Autolock _l(mUidLock);
+ if (mActiveUids.erase(uid) > 0) {
+ deleted = true;
+ }
+ }
+ if (deleted) {
+ sp<CameraService> service = mService.promote();
+ if (service != nullptr) {
+ service->blockClientsForUid(uid);
+ }
+ }
+}
+
+bool CameraService::UidPolicy::isUidActive(uid_t uid, String16 callingPackage) {
+ Mutex::Autolock _l(mUidLock);
+ return isUidActiveLocked(uid, callingPackage);
+}
+
+bool CameraService::UidPolicy::isUidActiveLocked(uid_t uid, String16 callingPackage) {
+ // Non-app UIDs are considered always active
+ // If activity manager is unreachable, assume everything is active
+ if (uid < FIRST_APPLICATION_UID || !mRegistered) {
+ return true;
+ }
+ auto it = mOverrideUids.find(uid);
+ if (it != mOverrideUids.end()) {
+ return it->second;
+ }
+ bool active = mActiveUids.find(uid) != mActiveUids.end();
+ if (!active) {
+ // We want active UIDs to always access camera with their first attempt since
+ // there is no guarantee the app is robustly written and would retry getting
+ // the camera on failure. The inverse case is not a problem as we would take
+ // camera away soon once we get the callback that the uid is no longer active.
+ ActivityManager am;
+ // Okay to access with a lock held as UID changes are dispatched without
+ // a lock and we are a higher level component.
+ active = am.isUidActive(uid, callingPackage);
+ if (active) {
+ // Now that we found out the UID is actually active, cache that
+ mActiveUids.insert(uid);
+ }
+ }
+ return active;
+}
+
+void CameraService::UidPolicy::UidPolicy::addOverrideUid(uid_t uid,
+ String16 callingPackage, bool active) {
+ updateOverrideUid(uid, callingPackage, active, true);
+}
+
+void CameraService::UidPolicy::removeOverrideUid(uid_t uid, String16 callingPackage) {
+ updateOverrideUid(uid, callingPackage, false, false);
+}
+
+void CameraService::UidPolicy::binderDied(const wp<IBinder>& /*who*/) {
+ Mutex::Autolock _l(mUidLock);
+ ALOGV("UidPolicy: ActivityManager has died");
+ mRegistered = false;
+ mActiveUids.clear();
+}
+
+void CameraService::UidPolicy::updateOverrideUid(uid_t uid, String16 callingPackage,
+ bool active, bool insert) {
+ bool wasActive = false;
+ bool isActive = false;
+ {
+ Mutex::Autolock _l(mUidLock);
+ wasActive = isUidActiveLocked(uid, callingPackage);
+ mOverrideUids.erase(uid);
+ if (insert) {
+ mOverrideUids.insert(std::pair<uid_t, bool>(uid, active));
+ }
+ isActive = isUidActiveLocked(uid, callingPackage);
+ }
+ if (wasActive != isActive && !isActive) {
+ sp<CameraService> service = mService.promote();
+ if (service != nullptr) {
+ service->blockClientsForUid(uid);
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
// CameraState
// ----------------------------------------------------------------------------
@@ -2524,7 +2680,10 @@
}
dprintf(fd, "\n== Service global info: ==\n\n");
dprintf(fd, "Number of camera devices: %d\n", mNumberOfCameras);
- dprintf(fd, "Number of normal camera devices: %d\n", mNumberOfNormalCameras);
+ dprintf(fd, "Number of normal camera devices: %zu\n", mNormalDeviceIds.size());
+ for (size_t i = 0; i < mNormalDeviceIds.size(); i++) {
+ dprintf(fd, " Device %zu maps to \"%s\"\n", i, mNormalDeviceIds[i].c_str());
+ }
String8 activeClientString = mActiveClientManager.toString();
dprintf(fd, "Active Camera Clients:\n%s", activeClientString.string());
dprintf(fd, "Allowed user IDs: %s\n", toString(mAllowedUsers).string());
@@ -2536,6 +2695,16 @@
dprintf(fd, "CameraStates in use, may be deadlocked\n");
}
+ int argSize = args.size();
+ for (int i = 0; i < argSize; i++) {
+ if (args[i] == TagMonitor::kMonitorOption) {
+ if (i + 1 < argSize) {
+ mMonitorTags = String8(args[i + 1]);
+ }
+ break;
+ }
+ }
+
for (auto& state : mCameraStates) {
String8 cameraId = state.first;
@@ -2663,7 +2832,7 @@
* While tempting to promote the wp<IBinder> into a sp, it's actually not supported by the
* binder driver
*/
-
+ // PID here is approximate and can be wrong.
logClientDied(getCallingPid(), String8("Binder died unexpectedly"));
// check torch client
@@ -2769,11 +2938,11 @@
}
void CameraService::updateProxyDeviceState(int newState,
- const String8& cameraId, int facing, const String16& clientName) {
+ const String8& cameraId, int facing, const String16& clientName, int apiLevel) {
sp<ICameraServiceProxy> proxyBinder = getCameraServiceProxy();
if (proxyBinder == nullptr) return;
String16 id(cameraId);
- proxyBinder->notifyCameraState(id, newState, facing, clientName);
+ proxyBinder->notifyCameraState(id, newState, facing, clientName, apiLevel);
}
status_t CameraService::getTorchStatusLocked(
@@ -2803,4 +2972,92 @@
return OK;
}
+void CameraService::blockClientsForUid(uid_t uid) {
+ const auto clients = mActiveClientManager.getAll();
+ for (auto& current : clients) {
+ if (current != nullptr) {
+ const auto basicClient = current->getValue();
+ if (basicClient.get() != nullptr && basicClient->getClientUid() == uid) {
+ basicClient->block();
+ }
+ }
+ }
+}
+
+// NOTE: This is a remote API - make sure all args are validated
+status_t CameraService::shellCommand(int in, int out, int err, const Vector<String16>& args) {
+ if (!checkCallingPermission(sManageCameraPermission, nullptr, nullptr)) {
+ return PERMISSION_DENIED;
+ }
+ if (in == BAD_TYPE || out == BAD_TYPE || err == BAD_TYPE) {
+ return BAD_VALUE;
+ }
+ if (args.size() == 3 && args[0] == String16("set-uid-state")) {
+ return handleSetUidState(args, err);
+ } else if (args.size() == 2 && args[0] == String16("reset-uid-state")) {
+ return handleResetUidState(args, err);
+ } else if (args.size() == 2 && args[0] == String16("get-uid-state")) {
+ return handleGetUidState(args, out, err);
+ } else if (args.size() == 1 && args[0] == String16("help")) {
+ printHelp(out);
+ return NO_ERROR;
+ }
+ printHelp(err);
+ return BAD_VALUE;
+}
+
+status_t CameraService::handleSetUidState(const Vector<String16>& args, int err) {
+ PermissionController pc;
+ int uid = pc.getPackageUid(args[1], 0);
+ if (uid <= 0) {
+ ALOGE("Unknown package: '%s'", String8(args[1]).string());
+ dprintf(err, "Unknown package: '%s'\n", String8(args[1]).string());
+ return BAD_VALUE;
+ }
+ bool active = false;
+ if (args[2] == String16("active")) {
+ active = true;
+ } else if ((args[2] != String16("idle"))) {
+ ALOGE("Expected active or idle but got: '%s'", String8(args[2]).string());
+ return BAD_VALUE;
+ }
+ mUidPolicy->addOverrideUid(uid, args[1], active);
+ return NO_ERROR;
+}
+
+status_t CameraService::handleResetUidState(const Vector<String16>& args, int err) {
+ PermissionController pc;
+ int uid = pc.getPackageUid(args[1], 0);
+ if (uid < 0) {
+ ALOGE("Unknown package: '%s'", String8(args[1]).string());
+ dprintf(err, "Unknown package: '%s'\n", String8(args[1]).string());
+ return BAD_VALUE;
+ }
+ mUidPolicy->removeOverrideUid(uid, args[1]);
+ return NO_ERROR;
+}
+
+status_t CameraService::handleGetUidState(const Vector<String16>& args, int out, int err) {
+ PermissionController pc;
+ int uid = pc.getPackageUid(args[1], 0);
+ if (uid <= 0) {
+ ALOGE("Unknown package: '%s'", String8(args[1]).string());
+ dprintf(err, "Unknown package: '%s'\n", String8(args[1]).string());
+ return BAD_VALUE;
+ }
+ if (mUidPolicy->isUidActive(uid, args[1])) {
+ return dprintf(out, "active\n");
+ } else {
+ return dprintf(out, "idle\n");
+ }
+}
+
+status_t CameraService::printHelp(int out) {
+ return dprintf(out, "Camera service commands:\n"
+ " get-uid-state <PACKAGE> gets the uid state\n"
+ " set-uid-state <PACKAGE> <active|idle> overrides the uid state\n"
+ " reset-uid-state <PACKAGE> clears the uid state override\n"
+ " help print this message\n");
+}
+
}; // namespace android
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 12ca372..8d4bcdb 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -27,6 +27,7 @@
#include <binder/AppOpsManager.h>
#include <binder/BinderService.h>
#include <binder/IAppOpsCallback.h>
+#include <binder/IUidObserver.h>
#include <hardware/camera.h>
#include <android/hardware/camera/common/1.0/types.h>
@@ -47,6 +48,8 @@
#include <map>
#include <memory>
#include <utility>
+#include <unordered_map>
+#include <unordered_set>
namespace android {
@@ -59,7 +62,6 @@
public BinderService<CameraService>,
public virtual ::android::hardware::BnCameraService,
public virtual IBinder::DeathRecipient,
- public camera_module_callbacks_t,
public virtual CameraProviderManager::StatusListener
{
friend class BinderService<CameraService>;
@@ -163,6 +165,8 @@
virtual status_t dump(int fd, const Vector<String16>& args);
+ virtual status_t shellCommand(int in, int out, int err, const Vector<String16>& args);
+
/////////////////////////////////////////////////////////////////////
// Client functionality
@@ -185,7 +189,8 @@
int newState,
const String8& cameraId,
int facing,
- const String16& clientName);
+ const String16& clientName,
+ int apiLevel);
/////////////////////////////////////////////////////////////////////
// CameraDeviceFactory functionality
@@ -200,7 +205,8 @@
class BasicClient : public virtual RefBase {
public:
- virtual status_t initialize(sp<CameraProviderManager> manager) = 0;
+ virtual status_t initialize(sp<CameraProviderManager> manager,
+ const String8& monitorTags) = 0;
virtual binder::Status disconnect();
// because we can't virtually inherit IInterface, which breaks
@@ -233,6 +239,9 @@
// Check what API level is used for this client. This is used to determine which
// superclass this can be cast to.
virtual bool canCastToApiClient(apiLevel level) const;
+
+ // Block the client form using the camera
+ virtual void block();
protected:
BasicClient(const sp<CameraService>& cameraService,
const sp<IBinder>& remoteCallback,
@@ -325,6 +334,7 @@
const sp<hardware::ICameraClient>& cameraClient,
const String16& clientPackageName,
const String8& cameraIdStr,
+ int api1CameraId,
int cameraFacing,
int clientPid,
uid_t clientUid,
@@ -506,13 +516,48 @@
CameraParameters mShimParams;
}; // class CameraState
+ // Observer for UID lifecycle enforcing that UIDs in idle
+ // state cannot use the camera to protect user privacy.
+ class UidPolicy : public BnUidObserver, public virtual IBinder::DeathRecipient {
+ public:
+ explicit UidPolicy(sp<CameraService> service)
+ : mRegistered(false), mService(service) {}
+
+ void registerSelf();
+ void unregisterSelf();
+
+ bool isUidActive(uid_t uid, String16 callingPackage);
+
+ void onUidGone(uid_t uid, bool disabled);
+ void onUidActive(uid_t uid);
+ void onUidIdle(uid_t uid, bool disabled);
+
+ void addOverrideUid(uid_t uid, String16 callingPackage, bool active);
+ void removeOverrideUid(uid_t uid, String16 callingPackage);
+
+ // IBinder::DeathRecipient implementation
+ virtual void binderDied(const wp<IBinder> &who);
+ private:
+ bool isUidActiveLocked(uid_t uid, String16 callingPackage);
+ void updateOverrideUid(uid_t uid, String16 callingPackage, bool active, bool insert);
+
+ Mutex mUidLock;
+ bool mRegistered;
+ wp<CameraService> mService;
+ std::unordered_set<uid_t> mActiveUids;
+ std::unordered_map<uid_t, bool> mOverrideUids;
+ }; // class UidPolicy
+
+ sp<UidPolicy> mUidPolicy;
+
// Delay-load the Camera HAL module
virtual void onFirstRef();
// Eumerate all camera providers in the system
status_t enumerateProviders();
- // Add a new camera to camera and torch state lists or remove an unplugged one
+ // Add/remove a new camera to camera and torch state lists or remove an unplugged one
+ // Caller must not hold mServiceLock
void addStates(const String8 id);
void removeStates(const String8 id);
@@ -539,7 +584,7 @@
// Single implementation shared between the various connect calls
template<class CALLBACK, class CLIENT>
binder::Status connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
- int halVersion, const String16& clientPackageName,
+ int api1CameraId, int halVersion, const String16& clientPackageName,
int clientUid, int clientPid,
apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
/*out*/sp<CLIENT>& device);
@@ -566,6 +611,9 @@
RingBuffer<String8> mEventLog;
Mutex mLogLock;
+ // The last monitored tags set by client
+ String8 mMonitorTags;
+
// Currently allowed user IDs
std::set<userid_t> mAllowedUsers;
@@ -601,9 +649,16 @@
void finishConnectLocked(const sp<BasicClient>& client, const DescriptorPtr& desc);
/**
- * Returns the integer corresponding to the given camera ID string, or -1 on failure.
+ * Returns the underlying camera Id string mapped to a camera id int
+ * Empty string is returned when the cameraIdInt is invalid.
*/
- static int cameraIdToInt(const String8& cameraId);
+ String8 cameraIdIntToStr(int cameraIdInt);
+
+ /**
+ * Returns the underlying camera Id string mapped to a camera id int
+ * Empty string is returned when the cameraIdInt is invalid.
+ */
+ std::string cameraIdIntToStrLocked(int cameraIdInt);
/**
* Remove a single client corresponding to the given camera id from the list of active clients.
@@ -671,11 +726,17 @@
*/
void dumpEventLog(int fd);
+ /**
+ * This method will acquire mServiceLock
+ */
+ void updateCameraNumAndIds();
+
int mNumberOfCameras;
- int mNumberOfNormalCameras;
+
+ std::vector<std::string> mNormalDeviceIds;
// sounds
- MediaPlayer* newMediaPlayer(const char *file);
+ sp<MediaPlayer> newMediaPlayer(const char *file);
Mutex mSoundLock;
sp<MediaPlayer> mSoundPlayer[NUM_SOUNDS];
@@ -756,6 +817,21 @@
*/
binder::Status getLegacyParametersLazy(int cameraId, /*out*/CameraParameters* parameters);
+ // Blocks all clients from the UID
+ void blockClientsForUid(uid_t uid);
+
+ // Overrides the UID state as if it is idle
+ status_t handleSetUidState(const Vector<String16>& args, int err);
+
+ // Clears the override for the UID state
+ status_t handleResetUidState(const Vector<String16>& args, int err);
+
+ // Gets the UID state
+ status_t handleGetUidState(const Vector<String16>& args, int out, int err);
+
+ // Prints the shell command help
+ status_t printHelp(int out);
+
static int getCallingPid();
static int getCallingUid();
@@ -767,8 +843,8 @@
static binder::Status makeClient(const sp<CameraService>& cameraService,
const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
- int facing, int clientPid, uid_t clientUid, int servicePid, bool legacyMode,
- int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
+ int api1CameraId, int facing, int clientPid, uid_t clientUid, int servicePid,
+ bool legacyMode, int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
/*out*/sp<BasicClient>* client);
status_t checkCameraAccess(const String16& opPackageName);
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 2cf648f..65faac9 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -49,16 +49,17 @@
Camera2Client::Camera2Client(const sp<CameraService>& cameraService,
const sp<hardware::ICameraClient>& cameraClient,
const String16& clientPackageName,
- int cameraId,
+ const String8& cameraDeviceId,
+ int api1CameraId,
int cameraFacing,
int clientPid,
uid_t clientUid,
int servicePid,
bool legacyMode):
Camera2ClientBase(cameraService, cameraClient, clientPackageName,
- String8::format("%d", cameraId), cameraFacing,
+ cameraDeviceId, api1CameraId, cameraFacing,
clientPid, clientUid, servicePid),
- mParameters(cameraId, cameraFacing)
+ mParameters(api1CameraId, cameraFacing)
{
ATRACE_CALL();
@@ -68,8 +69,8 @@
mLegacyMode = legacyMode;
}
-status_t Camera2Client::initialize(sp<CameraProviderManager> manager) {
- return initializeImpl(manager);
+status_t Camera2Client::initialize(sp<CameraProviderManager> manager, const String8& monitorTags) {
+ return initializeImpl(manager, monitorTags);
}
bool Camera2Client::isZslEnabledInStillTemplate() {
@@ -87,13 +88,13 @@
}
template<typename TProviderPtr>
-status_t Camera2Client::initializeImpl(TProviderPtr providerPtr)
+status_t Camera2Client::initializeImpl(TProviderPtr providerPtr, const String8& monitorTags)
{
ATRACE_CALL();
ALOGV("%s: Initializing client for camera %d", __FUNCTION__, mCameraId);
status_t res;
- res = Camera2ClientBase::initialize(providerPtr);
+ res = Camera2ClientBase::initialize(providerPtr, monitorTags);
if (res != OK) {
return res;
}
@@ -778,7 +779,35 @@
int lastJpegStreamId = mJpegProcessor->getStreamId();
// If jpeg stream will slow down preview, make sure we remove it before starting preview
if (params.slowJpegMode) {
- mJpegProcessor->deleteStream();
+ // Pause preview if we are streaming
+ int32_t activeRequestId = mStreamingProcessor->getActiveRequestId();
+ if (activeRequestId != 0) {
+ res = mStreamingProcessor->togglePauseStream(/*pause*/true);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't pause streaming: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+ res = mDevice->waitUntilDrained();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Waiting to stop streaming failed: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+ }
+
+ res = mJpegProcessor->deleteStream();
+
+ if (res != OK) {
+ ALOGE("%s: Camera %d: delete Jpeg stream failed: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+
+ if (activeRequestId != 0) {
+ res = mStreamingProcessor->togglePauseStream(/*pause*/false);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't unpause streaming: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ }
+ }
} else {
res = updateProcessorStream(mJpegProcessor, params);
if (res != OK) {
@@ -859,6 +888,12 @@
outputStreams.push(getPreviewStreamId());
+ if (params.isDeviceZslSupported) {
+ // If device ZSL is supported, resume preview buffers that may be paused
+ // during last takePicture().
+ mDevice->dropStreamBuffers(false, getPreviewStreamId());
+ }
+
if (!params.recordingHint) {
if (!restart) {
res = mStreamingProcessor->updatePreviewRequest(params);
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index 5af74eb..44929c3 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -91,7 +91,8 @@
Camera2Client(const sp<CameraService>& cameraService,
const sp<hardware::ICameraClient>& cameraClient,
const String16& clientPackageName,
- int cameraId,
+ const String8& cameraDeviceId,
+ int api1CameraId,
int cameraFacing,
int clientPid,
uid_t clientUid,
@@ -100,7 +101,8 @@
virtual ~Camera2Client();
- virtual status_t initialize(sp<CameraProviderManager> manager) override;
+ virtual status_t initialize(sp<CameraProviderManager> manager,
+ const String8& monitorTags) override;
virtual status_t dump(int fd, const Vector<String16>& args);
@@ -223,7 +225,7 @@
status_t overrideVideoSnapshotSize(Parameters ¶ms);
template<typename TProviderPtr>
- status_t initializeImpl(TProviderPtr providerPtr);
+ status_t initializeImpl(TProviderPtr providerPtr, const String8& monitorTags);
bool isZslEnabledInStillTemplate();
};
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index e848a3f..f1203f9 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -42,7 +42,7 @@
int clientPid, int clientUid,
int servicePid, bool legacyMode):
Client(cameraService, cameraClient, clientPackageName,
- String8::format("%d", cameraId), cameraFacing, clientPid,
+ String8::format("%d", cameraId), cameraId, cameraFacing, clientPid,
clientUid, servicePid)
{
int callingPid = getCallingPid();
@@ -62,7 +62,8 @@
LOG1("CameraClient::CameraClient X (pid %d, id %d)", callingPid, cameraId);
}
-status_t CameraClient::initialize(sp<CameraProviderManager> manager) {
+status_t CameraClient::initialize(sp<CameraProviderManager> manager,
+ const String8& /*monitorTags*/) {
int callingPid = getCallingPid();
status_t res;
@@ -262,7 +263,8 @@
mHardware->stopPreview();
sCameraService->updateProxyDeviceState(
hardware::ICameraServiceProxy::CAMERA_STATE_IDLE,
- mCameraIdStr, mCameraFacing, mClientPackageName);
+ mCameraIdStr, mCameraFacing, mClientPackageName,
+ hardware::ICameraServiceProxy::CAMERA_API_LEVEL_1);
mHardware->cancelPicture();
// Release the hardware resources.
mHardware->release();
@@ -424,7 +426,8 @@
if (result == NO_ERROR) {
sCameraService->updateProxyDeviceState(
hardware::ICameraServiceProxy::CAMERA_STATE_ACTIVE,
- mCameraIdStr, mCameraFacing, mClientPackageName);
+ mCameraIdStr, mCameraFacing, mClientPackageName,
+ hardware::ICameraServiceProxy::CAMERA_API_LEVEL_1);
}
return result;
}
@@ -467,7 +470,8 @@
mHardware->stopPreview();
sCameraService->updateProxyDeviceState(
hardware::ICameraServiceProxy::CAMERA_STATE_IDLE,
- mCameraIdStr, mCameraFacing, mClientPackageName);
+ mCameraIdStr, mCameraFacing, mClientPackageName,
+ hardware::ICameraServiceProxy::CAMERA_API_LEVEL_1);
mPreviewBuffer.clear();
}
@@ -973,7 +977,8 @@
// idle now, until preview is restarted
sCameraService->updateProxyDeviceState(
hardware::ICameraServiceProxy::CAMERA_STATE_IDLE,
- mCameraIdStr, mCameraFacing, mClientPackageName);
+ mCameraIdStr, mCameraFacing, mClientPackageName,
+ hardware::ICameraServiceProxy::CAMERA_API_LEVEL_1);
mLock.unlock();
}
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 7f93fef..1910536 100644
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -72,7 +72,8 @@
bool legacyMode = false);
~CameraClient();
- virtual status_t initialize(sp<CameraProviderManager> manager) override;
+ virtual status_t initialize(sp<CameraProviderManager> manager,
+ const String8& monitorTags) override;
virtual status_t dump(int fd, const Vector<String16>& args);
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
index 0d2dba1..a71a732 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 The Android Open Source Project
+ * Copyright (C) 2012-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -154,7 +154,8 @@
callbackFormat, params.previewFormat);
res = device->createStream(mCallbackWindow,
params.previewWidth, params.previewHeight, callbackFormat,
- HAL_DATASPACE_V0_JFIF, CAMERA3_STREAM_ROTATION_0, &mCallbackStreamId);
+ HAL_DATASPACE_V0_JFIF, CAMERA3_STREAM_ROTATION_0, &mCallbackStreamId,
+ String8());
if (res != OK) {
ALOGE("%s: Camera %d: Can't create output stream for callbacks: "
"%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index b65f1c7..1ee216f 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -553,6 +553,12 @@
return DONE;
}
+ if (l.mParameters.isDeviceZslSupported) {
+ // If device ZSL is supported, drop all pending preview buffers to reduce the chance of
+ // rendering preview frames newer than the still frame.
+ client->getCameraDevice()->dropStreamBuffers(true, client->getPreviewStreamId());
+ }
+
/**
* Clear the streaming request for still-capture pictures
* (as opposed to i.e. video snapshots)
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 6e21126..0c738e7 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -197,7 +197,6 @@
faceRects[i*4 + 2], scalerCrop);
face.rect[3] = l.mParameters.arrayYToNormalizedWithCrop(
faceRects[i*4 + 3], scalerCrop);
-
face.score = faceScores[i];
if (faceDetectMode == ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) {
face.id = faceIds[i];
@@ -292,7 +291,8 @@
}
// Once all 3A states are received, notify the client about 3A changes.
- if (pendingState.aeState != m3aState.aeState) {
+ if (pendingState.aeState != m3aState.aeState ||
+ pendingState.aeTriggerId > m3aState.aeTriggerId) {
ALOGV("%s: Camera %d: AE state %d->%d",
__FUNCTION__, cameraId,
m3aState.aeState, pendingState.aeState);
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
old mode 100644
new mode 100755
index d8b7af2..b7020fe
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 The Android Open Source Project
+ * Copyright (C) 2012-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -168,7 +168,8 @@
res = device->createStream(mCaptureWindow,
params.pictureWidth, params.pictureHeight,
HAL_PIXEL_FORMAT_BLOB, HAL_DATASPACE_V0_JFIF,
- CAMERA3_STREAM_ROTATION_0, &mCaptureStreamId);
+ CAMERA3_STREAM_ROTATION_0, &mCaptureStreamId,
+ String8());
if (res != OK) {
ALOGE("%s: Camera %d: Can't create output stream for capture: "
"%s (%d)", __FUNCTION__, mId,
@@ -198,7 +199,11 @@
return INVALID_OPERATION;
}
- device->deleteStream(mCaptureStreamId);
+ status_t res = device->deleteStream(mCaptureStreamId);
+ if (res != OK) {
+ ALOGE("%s: delete stream %d failed!", __FUNCTION__, mCaptureStreamId);
+ return res;
+ }
mCaptureHeap.clear();
mCaptureWindow.clear();
@@ -395,7 +400,7 @@
}
// Read JFIF segment markers, skip over segment data
- size = 0;
+ size = MARKER_LENGTH; //jump SOI;
while (size <= maxSize - MARKER_LENGTH) {
segment_t *segment = (segment_t*)(jpegBuffer + size);
uint8_t type = checkJpegMarker(segment->marker);
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 050c3f7..d66dec4 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -242,7 +242,9 @@
HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, availableFpsRanges.data.i32[i+1])) {
continue;
}
- if (i != 0) supportedPreviewFpsRange += ",";
+ if (supportedPreviewFpsRange.length() > 0) {
+ supportedPreviewFpsRange += ",";
+ }
supportedPreviewFpsRange += String8::format("(%d,%d)",
availableFpsRanges.data.i32[i] * kFpsToApiScale,
availableFpsRanges.data.i32[i+1] * kFpsToApiScale);
@@ -759,12 +761,17 @@
focusingAreas.clear();
focusingAreas.add(Parameters::Area(0,0,0,0,0));
- camera_metadata_ro_entry_t availableFocalLengths =
- staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, 0, 0, false);
- if (!availableFocalLengths.count) return NO_INIT;
+ if (fastInfo.isExternalCamera) {
+ params.setFloat(CameraParameters::KEY_FOCAL_LENGTH, -1.0);
+ } else {
+ camera_metadata_ro_entry_t availableFocalLengths =
+ staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, 0, 0, false);
+ if (!availableFocalLengths.count) return NO_INIT;
- float minFocalLength = availableFocalLengths.data.f[0];
- params.setFloat(CameraParameters::KEY_FOCAL_LENGTH, minFocalLength);
+ float minFocalLength = availableFocalLengths.data.f[0];
+ params.setFloat(CameraParameters::KEY_FOCAL_LENGTH, minFocalLength);
+ }
+
float horizFov, vertFov;
res = calculatePictureFovs(&horizFov, &vertFov);
@@ -947,13 +954,24 @@
const uint8_t *caps = availableCapabilities.data.u8;
for (size_t i = 0; i < availableCapabilities.count; i++) {
if (ANDROID_REQUEST_AVAILABLE_CAPABILITIES_PRIVATE_REPROCESSING ==
- caps[i]) {
+ caps[i]) {
isZslReprocessPresent = true;
break;
}
}
}
+ isDistortionCorrectionSupported = false;
+ camera_metadata_ro_entry_t distortionCorrectionModes =
+ staticInfo(ANDROID_DISTORTION_CORRECTION_AVAILABLE_MODES);
+ for (size_t i = 0; i < distortionCorrectionModes.count; i++) {
+ if (distortionCorrectionModes.data.u8[i] !=
+ ANDROID_DISTORTION_CORRECTION_MODE_OFF) {
+ isDistortionCorrectionSupported = true;
+ break;
+ }
+ }
+
if (isDeviceZslSupported || slowJpegMode ||
property_get_bool("camera.disable_zsl_mode", false)) {
ALOGI("Camera %d: Disabling ZSL mode", cameraId);
@@ -1091,9 +1109,15 @@
focusDistanceCalibration.data.u8[0] !=
ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_UNCALIBRATED);
+
+ camera_metadata_ro_entry_t hwLevel = staticInfo(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL);
+ if (!hwLevel.count) return NO_INIT;
+ fastInfo.isExternalCamera =
+ hwLevel.data.u8[0] == ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL;
+
camera_metadata_ro_entry_t availableFocalLengths =
- staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS);
- if (!availableFocalLengths.count) return NO_INIT;
+ staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, 0, 0, /*required*/false);
+ if (!availableFocalLengths.count && !fastInfo.isExternalCamera) return NO_INIT;
SortedVector<int32_t> availableFormats = getAvailableOutputFormats();
if (!availableFormats.size()) return NO_INIT;
@@ -1178,10 +1202,14 @@
// Find smallest (widest-angle) focal length to use as basis of still
// picture FOV reporting.
- fastInfo.minFocalLength = availableFocalLengths.data.f[0];
- for (size_t i = 1; i < availableFocalLengths.count; i++) {
- if (fastInfo.minFocalLength > availableFocalLengths.data.f[i]) {
- fastInfo.minFocalLength = availableFocalLengths.data.f[i];
+ if (fastInfo.isExternalCamera) {
+ fastInfo.minFocalLength = -1.0;
+ } else {
+ fastInfo.minFocalLength = availableFocalLengths.data.f[0];
+ for (size_t i = 1; i < availableFocalLengths.count; i++) {
+ if (fastInfo.minFocalLength > availableFocalLengths.data.f[i]) {
+ fastInfo.minFocalLength = availableFocalLengths.data.f[i];
+ }
}
}
@@ -1198,6 +1226,35 @@
fastInfo.maxJpegSize = getMaxSize(getAvailableJpegSizes());
+ isZslReprocessPresent = false;
+ camera_metadata_ro_entry_t availableCapabilities =
+ staticInfo(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+ if (0 < availableCapabilities.count) {
+ const uint8_t *caps = availableCapabilities.data.u8;
+ for (size_t i = 0; i < availableCapabilities.count; i++) {
+ if (ANDROID_REQUEST_AVAILABLE_CAPABILITIES_PRIVATE_REPROCESSING ==
+ caps[i]) {
+ isZslReprocessPresent = true;
+ break;
+ }
+ }
+ }
+ if (isZslReprocessPresent) {
+ Vector<StreamConfiguration> scs = getStreamConfigurations();
+ Size maxPrivInputSize = {0, 0};
+ for (const auto& sc : scs) {
+ if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_INPUT &&
+ sc.format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+ if (sc.width * sc.height > maxPrivInputSize.width * maxPrivInputSize.height) {
+ maxPrivInputSize = {sc.width, sc.height};
+ }
+ }
+ }
+ fastInfo.maxZslSize = maxPrivInputSize;
+ } else {
+ fastInfo.maxZslSize = {0, 0};
+ }
+
return OK;
}
@@ -2051,15 +2108,24 @@
if (intent.count == 0) return BAD_VALUE;
+ uint8_t distortionMode = ANDROID_DISTORTION_CORRECTION_MODE_OFF;
if (intent.data.u8[0] == ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE) {
res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
fastInfo.bestStillCaptureFpsRange, 2);
+ distortionMode = ANDROID_DISTORTION_CORRECTION_MODE_HIGH_QUALITY;
} else {
res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
previewFpsRange, 2);
+ distortionMode = ANDROID_DISTORTION_CORRECTION_MODE_FAST;
}
if (res != OK) return res;
+ if (isDistortionCorrectionSupported) {
+ res = request->update(ANDROID_DISTORTION_CORRECTION_MODE,
+ &distortionMode, 1);
+ if (res != OK) return res;
+ }
+
if (autoWhiteBalanceLockAvailable) {
uint8_t reqWbLock = autoWhiteBalanceLock ?
ANDROID_CONTROL_AWB_LOCK_ON : ANDROID_CONTROL_AWB_LOCK_OFF;
@@ -2870,8 +2936,13 @@
if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
sc.format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
sc.width <= limit.width && sc.height <= limit.height) {
- Size sz = {sc.width, sc.height};
- sizes->push(sz);
+ int64_t minFrameDuration = getMinFrameDurationNs(
+ {sc.width, sc.height}, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
+ if (minFrameDuration > MAX_PREVIEW_RECORD_DURATION_NS) {
+ // Filter slow sizes from preview/record
+ continue;
+ }
+ sizes->push({sc.width, sc.height});
}
}
@@ -3081,6 +3152,16 @@
status_t Parameters::calculatePictureFovs(float *horizFov, float *vertFov)
const {
+ if (fastInfo.isExternalCamera) {
+ if (horizFov != NULL) {
+ *horizFov = -1.0;
+ }
+ if (vertFov != NULL) {
+ *vertFov = -1.0;
+ }
+ return OK;
+ }
+
camera_metadata_ro_entry_t sensorSize =
staticInfo(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, 2, 2);
if (!sensorSize.count) return NO_INIT;
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index 17e3d75..97f8ea7 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -177,6 +177,8 @@
bool isZslReprocessPresent;
// Whether the device supports enableZsl.
bool isDeviceZslSupported;
+ // Whether the device supports geometric distortion correction
+ bool isDistortionCorrectionSupported;
// Overall camera state
enum State {
@@ -207,6 +209,11 @@
static const int32_t FPS_MARGIN = 1;
// Max FPS for default parameters
static const int32_t MAX_DEFAULT_FPS = 30;
+ // Minimum FPS for a size to be listed in supported preview/video sizes
+ // Set to slightly less than 30.0 to have some tolerance margin
+ static constexpr double MIN_PREVIEW_RECORD_FPS = 29.97;
+ // Maximum frame duration for a size to be listed in supported preview/video sizes
+ static constexpr int64_t MAX_PREVIEW_RECORD_DURATION_NS = 1e9 / MIN_PREVIEW_RECORD_FPS;
// Full static camera info, object owned by someone else, such as
// Camera2Device.
@@ -233,9 +240,11 @@
}
};
DefaultKeyedVector<uint8_t, OverrideModes> sceneModeOverrides;
+ bool isExternalCamera;
float minFocalLength;
bool useFlexibleYuv;
Size maxJpegSize;
+ Size maxZslSize;
} fastInfo;
// Quirks information; these are short-lived flags to enable workarounds for
@@ -380,6 +389,7 @@
Vector<Size> availablePreviewSizes;
Vector<Size> availableVideoSizes;
// Get size list (that are no larger than limit) from static metadata.
+ // This method filtered size with minFrameDuration < MAX_PREVIEW_RECORD_DURATION_NS
status_t getFilteredSizes(Size limit, Vector<Size> *sizes);
// Get max size (from the size array) that matches the given aspect ratio.
Size getMaxSizeForRatio(float ratio, const int32_t* sizeArray, size_t count);
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index 73dca73..0786f53 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 The Android Open Source Project
+ * Copyright (C) 2012-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -194,7 +194,7 @@
res = device->createStream(mPreviewWindow,
params.previewWidth, params.previewHeight,
CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, HAL_DATASPACE_UNKNOWN,
- CAMERA3_STREAM_ROTATION_0, &mPreviewStreamId);
+ CAMERA3_STREAM_ROTATION_0, &mPreviewStreamId, String8());
if (res != OK) {
ALOGE("%s: Camera %d: Unable to create preview stream: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
@@ -379,7 +379,8 @@
res = device->createStream(mRecordingWindow,
params.videoWidth, params.videoHeight,
params.videoFormat, params.videoDataSpace,
- CAMERA3_STREAM_ROTATION_0, &mRecordingStreamId);
+ CAMERA3_STREAM_ROTATION_0, &mRecordingStreamId,
+ String8());
if (res != OK) {
ALOGE("%s: Camera %d: Can't create output stream for recording: "
"%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index b0607fb..8dc9863 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -231,63 +231,9 @@
return INVALID_OPERATION;
}
- if ((mZslStreamId != NO_STREAM) || (mInputStreamId != NO_STREAM)) {
- // Check if stream parameters have to change
- CameraDeviceBase::StreamInfo streamInfo;
- res = device->getStreamInfo(mZslStreamId, &streamInfo);
- if (res != OK) {
- ALOGE("%s: Camera %d: Error querying capture output stream info: "
- "%s (%d)", __FUNCTION__,
- client->getCameraId(), strerror(-res), res);
- return res;
- }
- if (streamInfo.width != (uint32_t)params.fastInfo.arrayWidth ||
- streamInfo.height != (uint32_t)params.fastInfo.arrayHeight) {
- if (mZslStreamId != NO_STREAM) {
- ALOGV("%s: Camera %d: Deleting stream %d since the buffer "
- "dimensions changed",
- __FUNCTION__, client->getCameraId(), mZslStreamId);
- res = device->deleteStream(mZslStreamId);
- if (res == -EBUSY) {
- ALOGV("%s: Camera %d: Device is busy, call updateStream again "
- " after it becomes idle", __FUNCTION__, mId);
- return res;
- } else if(res != OK) {
- ALOGE("%s: Camera %d: Unable to delete old output stream "
- "for ZSL: %s (%d)", __FUNCTION__,
- client->getCameraId(), strerror(-res), res);
- return res;
- }
- mZslStreamId = NO_STREAM;
- }
-
- if (mInputStreamId != NO_STREAM) {
- ALOGV("%s: Camera %d: Deleting stream %d since the buffer "
- "dimensions changed",
- __FUNCTION__, client->getCameraId(), mInputStreamId);
- res = device->deleteStream(mInputStreamId);
- if (res == -EBUSY) {
- ALOGV("%s: Camera %d: Device is busy, call updateStream again "
- " after it becomes idle", __FUNCTION__, mId);
- return res;
- } else if(res != OK) {
- ALOGE("%s: Camera %d: Unable to delete old output stream "
- "for ZSL: %s (%d)", __FUNCTION__,
- client->getCameraId(), strerror(-res), res);
- return res;
- }
- mInputStreamId = NO_STREAM;
- }
- if (nullptr != mInputProducer.get()) {
- mInputProducer->disconnect(NATIVE_WINDOW_API_CPU);
- mInputProducer.clear();
- }
- }
- }
-
if (mInputStreamId == NO_STREAM) {
- res = device->createInputStream(params.fastInfo.arrayWidth,
- params.fastInfo.arrayHeight, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+ res = device->createInputStream(params.fastInfo.maxZslSize.width,
+ params.fastInfo.maxZslSize.height, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
&mInputStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Can't create input stream: "
@@ -309,9 +255,10 @@
mProducer->setName(String8("Camera2-ZslRingBufferConsumer"));
sp<Surface> outSurface = new Surface(producer);
- res = device->createStream(outSurface, params.fastInfo.arrayWidth,
- params.fastInfo.arrayHeight, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
- HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0, &mZslStreamId);
+ res = device->createStream(outSurface, params.fastInfo.maxZslSize.width,
+ params.fastInfo.maxZslSize.height, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+ HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0, &mZslStreamId,
+ String8());
if (res != OK) {
ALOGE("%s: Camera %d: Can't create ZSL stream: "
"%s (%d)", __FUNCTION__, client->getCameraId(),
@@ -855,29 +802,25 @@
__FUNCTION__);
continue;
}
- uint8_t afMode = entry.data.u8[0];
- if (afMode == ANDROID_CONTROL_AF_MODE_OFF) {
- // Skip all the ZSL buffer for manual AF mode, as we don't really
- // know the af state.
- continue;
- }
-
// Check AF state if device has focuser and focus mode isn't fixed
- if (mHasFocuser && !isFixedFocusMode(afMode)) {
- // Make sure the candidate frame has good focus.
- entry = frame.find(ANDROID_CONTROL_AF_STATE);
- if (entry.count == 0) {
- ALOGW("%s: ZSL queue frame has no AF state field!",
- __FUNCTION__);
- continue;
- }
- uint8_t afState = entry.data.u8[0];
- if (afState != ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED &&
- afState != ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED &&
- afState != ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) {
- ALOGVV("%s: ZSL queue frame AF state is %d is not good for capture, skip it",
- __FUNCTION__, afState);
- continue;
+ if (mHasFocuser) {
+ uint8_t afMode = entry.data.u8[0];
+ if (!isFixedFocusMode(afMode)) {
+ // Make sure the candidate frame has good focus.
+ entry = frame.find(ANDROID_CONTROL_AF_STATE);
+ if (entry.count == 0) {
+ ALOGW("%s: ZSL queue frame has no AF state field!",
+ __FUNCTION__);
+ continue;
+ }
+ uint8_t afState = entry.data.u8[0];
+ if (afState != ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED &&
+ afState != ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED &&
+ afState != ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) {
+ ALOGVV("%s: ZSL queue frame AF state is %d is not good for capture,"
+ " skip it", __FUNCTION__, afState);
+ continue;
+ }
}
}
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 09c2d82..98d0534 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -28,6 +28,8 @@
#include "common/CameraDeviceBase.h"
#include "api2/CameraDeviceClient.h"
+#include <camera_metadata_hidden.h>
+
// Convenience methods for constructing binder::Status objects for error returns
#define STATUS_ERROR(errorCode, errorString) \
@@ -47,6 +49,7 @@
const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
const String16& clientPackageName,
const String8& cameraId,
+ int api1CameraId,
int cameraFacing,
int clientPid,
uid_t clientUid,
@@ -60,6 +63,8 @@
clientUid,
servicePid),
mRemoteCallback(remoteCallback) {
+ // We don't need it for API2 clients, but Camera2ClientBase requires it.
+ (void) api1CameraId;
}
// Interface used by CameraService
@@ -73,7 +78,8 @@
uid_t clientUid,
int servicePid) :
Camera2ClientBase(cameraService, remoteCallback, clientPackageName,
- cameraId, cameraFacing, clientPid, clientUid, servicePid),
+ cameraId, /*API1 camera ID*/ -1,
+ cameraFacing, clientPid, clientUid, servicePid),
mInputStream(),
mStreamingRequestId(REQUEST_ID_NONE),
mRequestIdCounter(0) {
@@ -82,16 +88,17 @@
ALOGI("CameraDeviceClient %s: Opened", cameraId.string());
}
-status_t CameraDeviceClient::initialize(sp<CameraProviderManager> manager) {
- return initializeImpl(manager);
+status_t CameraDeviceClient::initialize(sp<CameraProviderManager> manager,
+ const String8& monitorTags) {
+ return initializeImpl(manager, monitorTags);
}
template<typename TProviderPtr>
-status_t CameraDeviceClient::initializeImpl(TProviderPtr providerPtr) {
+status_t CameraDeviceClient::initializeImpl(TProviderPtr providerPtr, const String8& monitorTags) {
ATRACE_CALL();
status_t res;
- res = Camera2ClientBase::initialize(providerPtr);
+ res = Camera2ClientBase::initialize(providerPtr, monitorTags);
if (res != OK) {
return res;
}
@@ -106,6 +113,15 @@
/*listener*/this,
/*sendPartials*/true);
+ auto deviceInfo = mDevice->info();
+ camera_metadata_entry_t physicalKeysEntry = deviceInfo.find(
+ ANDROID_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS);
+ if (physicalKeysEntry.count > 0) {
+ mSupportedPhysicalRequestKeys.insert(mSupportedPhysicalRequestKeys.begin(),
+ physicalKeysEntry.data.i32,
+ physicalKeysEntry.data.i32 + physicalKeysEntry.count);
+ }
+
return OK;
}
@@ -121,6 +137,37 @@
return submitRequestList(requestList, streaming, submitInfo);
}
+binder::Status CameraDeviceClient::insertGbpLocked(const sp<IGraphicBufferProducer>& gbp,
+ SurfaceMap* outSurfaceMap, Vector<int32_t>* outputStreamIds, int32_t *currentStreamId) {
+ int idx = mStreamMap.indexOfKey(IInterface::asBinder(gbp));
+
+ // Trying to submit request with surface that wasn't created
+ if (idx == NAME_NOT_FOUND) {
+ ALOGE("%s: Camera %s: Tried to submit a request with a surface that"
+ " we have not called createStream on",
+ __FUNCTION__, mCameraIdStr.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Request targets Surface that is not part of current capture session");
+ }
+
+ const StreamSurfaceId& streamSurfaceId = mStreamMap.valueAt(idx);
+ if (outSurfaceMap->find(streamSurfaceId.streamId()) == outSurfaceMap->end()) {
+ (*outSurfaceMap)[streamSurfaceId.streamId()] = std::vector<size_t>();
+ outputStreamIds->push_back(streamSurfaceId.streamId());
+ }
+ (*outSurfaceMap)[streamSurfaceId.streamId()].push_back(streamSurfaceId.surfaceId());
+
+ ALOGV("%s: Camera %s: Appending output stream %d surface %d to request",
+ __FUNCTION__, mCameraIdStr.string(), streamSurfaceId.streamId(),
+ streamSurfaceId.surfaceId());
+
+ if (currentStreamId != nullptr) {
+ *currentStreamId = streamSurfaceId.streamId();
+ }
+
+ return binder::Status::ok();
+}
+
binder::Status CameraDeviceClient::submitRequestList(
const std::vector<hardware::camera2::CaptureRequest>& requests,
bool streaming,
@@ -147,7 +194,7 @@
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Empty request list");
}
- List<const CameraMetadata> metadataRequestList;
+ List<const CameraDeviceBase::PhysicalCameraSettingsList> metadataRequestList;
std::list<const SurfaceMap> surfaceMapList;
submitInfo->mRequestId = mRequestIdCounter;
uint32_t loopCounter = 0;
@@ -165,75 +212,163 @@
mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Repeating reprocess requests not supported");
+ } else if (request.mPhysicalCameraSettings.size() > 1) {
+ ALOGE("%s: Camera %s: reprocess requests not supported for "
+ "multiple physical cameras.", __FUNCTION__,
+ mCameraIdStr.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Reprocess requests not supported for multiple cameras");
}
}
- CameraMetadata metadata(request.mMetadata);
- if (metadata.isEmpty()) {
- ALOGE("%s: Camera %s: Sent empty metadata packet. Rejecting request.",
- __FUNCTION__, mCameraIdStr.string());
+ if (request.mPhysicalCameraSettings.empty()) {
+ ALOGE("%s: Camera %s: request doesn't contain any settings.", __FUNCTION__,
+ mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
- "Request settings are empty");
- } else if (request.mSurfaceList.isEmpty()) {
+ "Request doesn't contain any settings");
+ }
+
+ //The first capture settings should always match the logical camera id
+ String8 logicalId(request.mPhysicalCameraSettings.begin()->id.c_str());
+ if (mDevice->getId() != logicalId) {
+ ALOGE("%s: Camera %s: Invalid camera request settings.", __FUNCTION__,
+ mCameraIdStr.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Invalid camera request settings");
+ }
+
+ if (request.mSurfaceList.isEmpty() && request.mStreamIdxList.size() == 0) {
ALOGE("%s: Camera %s: Requests must have at least one surface target. "
"Rejecting request.", __FUNCTION__, mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Request has no output targets");
}
- if (!enforceRequestPermissions(metadata)) {
- // Callee logs
- return STATUS_ERROR(CameraService::ERROR_PERMISSION_DENIED,
- "Caller does not have permission to change restricted controls");
- }
-
/**
* Write in the output stream IDs and map from stream ID to surface ID
* which we calculate from the capture request's list of surface target
*/
SurfaceMap surfaceMap;
Vector<int32_t> outputStreamIds;
- for (sp<Surface> surface : request.mSurfaceList) {
- if (surface == 0) continue;
+ std::vector<std::string> requestedPhysicalIds;
+ if (request.mSurfaceList.size() > 0) {
+ for (sp<Surface> surface : request.mSurfaceList) {
+ if (surface == 0) continue;
- sp<IGraphicBufferProducer> gbp = surface->getIGraphicBufferProducer();
- int idx = mStreamMap.indexOfKey(IInterface::asBinder(gbp));
+ int32_t streamId;
+ sp<IGraphicBufferProducer> gbp = surface->getIGraphicBufferProducer();
+ res = insertGbpLocked(gbp, &surfaceMap, &outputStreamIds, &streamId);
+ if (!res.isOk()) {
+ return res;
+ }
- // Trying to submit request with surface that wasn't created
- if (idx == NAME_NOT_FOUND) {
- ALOGE("%s: Camera %s: Tried to submit a request with a surface that"
- " we have not called createStream on",
+ ssize_t index = mConfiguredOutputs.indexOfKey(streamId);
+ if (index >= 0) {
+ String8 requestedPhysicalId(
+ mConfiguredOutputs.valueAt(index).getPhysicalCameraId());
+ requestedPhysicalIds.push_back(requestedPhysicalId.string());
+ } else {
+ ALOGW("%s: Output stream Id not found among configured outputs!", __FUNCTION__);
+ }
+ }
+ } else {
+ for (size_t i = 0; i < request.mStreamIdxList.size(); i++) {
+ int streamId = request.mStreamIdxList.itemAt(i);
+ int surfaceIdx = request.mSurfaceIdxList.itemAt(i);
+
+ ssize_t index = mConfiguredOutputs.indexOfKey(streamId);
+ if (index < 0) {
+ ALOGE("%s: Camera %s: Tried to submit a request with a surface that"
+ " we have not called createStream on: stream %d",
+ __FUNCTION__, mCameraIdStr.string(), streamId);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Request targets Surface that is not part of current capture session");
+ }
+
+ const auto& gbps = mConfiguredOutputs.valueAt(index).getGraphicBufferProducers();
+ if ((size_t)surfaceIdx >= gbps.size()) {
+ ALOGE("%s: Camera %s: Tried to submit a request with a surface that"
+ " we have not called createStream on: stream %d, surfaceIdx %d",
+ __FUNCTION__, mCameraIdStr.string(), streamId, surfaceIdx);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Request targets Surface has invalid surface index");
+ }
+
+ res = insertGbpLocked(gbps[surfaceIdx], &surfaceMap, &outputStreamIds, nullptr);
+ if (!res.isOk()) {
+ return res;
+ }
+
+ String8 requestedPhysicalId(
+ mConfiguredOutputs.valueAt(index).getPhysicalCameraId());
+ requestedPhysicalIds.push_back(requestedPhysicalId.string());
+ }
+ }
+
+ CameraDeviceBase::PhysicalCameraSettingsList physicalSettingsList;
+ for (const auto& it : request.mPhysicalCameraSettings) {
+ if (it.settings.isEmpty()) {
+ ALOGE("%s: Camera %s: Sent empty metadata packet. Rejecting request.",
__FUNCTION__, mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
- "Request targets Surface that is not part of current capture session");
+ "Request settings are empty");
}
- const StreamSurfaceId& streamSurfaceId = mStreamMap.valueAt(idx);
- if (surfaceMap.find(streamSurfaceId.streamId()) == surfaceMap.end()) {
- surfaceMap[streamSurfaceId.streamId()] = std::vector<size_t>();
- outputStreamIds.push_back(streamSurfaceId.streamId());
- }
- surfaceMap[streamSurfaceId.streamId()].push_back(streamSurfaceId.surfaceId());
+ String8 physicalId(it.id.c_str());
+ if (physicalId != mDevice->getId()) {
+ auto found = std::find(requestedPhysicalIds.begin(), requestedPhysicalIds.end(),
+ it.id);
+ if (found == requestedPhysicalIds.end()) {
+ ALOGE("%s: Camera %s: Physical camera id: %s not part of attached outputs.",
+ __FUNCTION__, mCameraIdStr.string(), physicalId.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Invalid physical camera id");
+ }
- ALOGV("%s: Camera %s: Appending output stream %d surface %d to request",
- __FUNCTION__, mCameraIdStr.string(), streamSurfaceId.streamId(),
- streamSurfaceId.surfaceId());
+ if (!mSupportedPhysicalRequestKeys.empty()) {
+ // Filter out any unsupported physical request keys.
+ CameraMetadata filteredParams(mSupportedPhysicalRequestKeys.size());
+ camera_metadata_t *meta = const_cast<camera_metadata_t *>(
+ filteredParams.getAndLock());
+ set_camera_metadata_vendor_id(meta, mDevice->getVendorTagId());
+ filteredParams.unlock(meta);
+
+ for (const auto& keyIt : mSupportedPhysicalRequestKeys) {
+ camera_metadata_ro_entry entry = it.settings.find(keyIt);
+ if (entry.count > 0) {
+ filteredParams.update(entry);
+ }
+ }
+
+ physicalSettingsList.push_back({it.id, filteredParams});
+ }
+ } else {
+ physicalSettingsList.push_back({it.id, it.settings});
+ }
}
- metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0],
- outputStreamIds.size());
+ if (!enforceRequestPermissions(physicalSettingsList.begin()->metadata)) {
+ // Callee logs
+ return STATUS_ERROR(CameraService::ERROR_PERMISSION_DENIED,
+ "Caller does not have permission to change restricted controls");
+ }
+
+ physicalSettingsList.begin()->metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS,
+ &outputStreamIds[0], outputStreamIds.size());
if (request.mIsReprocess) {
- metadata.update(ANDROID_REQUEST_INPUT_STREAMS, &mInputStream.id, 1);
+ physicalSettingsList.begin()->metadata.update(ANDROID_REQUEST_INPUT_STREAMS,
+ &mInputStream.id, 1);
}
- metadata.update(ANDROID_REQUEST_ID, &(submitInfo->mRequestId), /*size*/1);
+ physicalSettingsList.begin()->metadata.update(ANDROID_REQUEST_ID,
+ &(submitInfo->mRequestId), /*size*/1);
loopCounter++; // loopCounter starts from 1
ALOGV("%s: Camera %s: Creating request with ID %d (%d of %zu)",
__FUNCTION__, mCameraIdStr.string(), submitInfo->mRequestId,
loopCounter, requests.size());
- metadataRequestList.push_back(metadata);
+ metadataRequestList.push_back(physicalSettingsList);
surfaceMapList.push_back(surfaceMap);
}
mRequestIdCounter++;
@@ -318,7 +453,8 @@
return binder::Status::ok();
}
-binder::Status CameraDeviceClient::endConfigure(int operatingMode) {
+binder::Status CameraDeviceClient::endConfigure(int operatingMode,
+ const hardware::camera2::impl::CameraMetadataNative& sessionParams) {
ATRACE_CALL();
ALOGV("%s: ending configure (%d input stream, %zu output surfaces)",
__FUNCTION__, mInputStream.configured ? 1 : 0,
@@ -364,7 +500,7 @@
}
}
- status_t err = mDevice->configureStreams(operatingMode);
+ status_t err = mDevice->configureStreams(sessionParams, operatingMode);
if (err == BAD_VALUE) {
String8 msg = String8::format("Camera %s: Unsupported set of inputs/outputs provided",
mCameraIdStr.string());
@@ -439,6 +575,8 @@
mStreamMap.removeItem(surface);
}
+ mConfiguredOutputs.removeItem(streamId);
+
if (dIndex != NAME_NOT_FOUND) {
mDeferredStreams.removeItemsAt(dIndex);
}
@@ -464,6 +602,7 @@
size_t numBufferProducers = bufferProducers.size();
bool deferredConsumer = outputConfiguration.isDeferred();
bool isShared = outputConfiguration.isShared();
+ String8 physicalCameraId = String8(outputConfiguration.getPhysicalCameraId());
if (numBufferProducers > MAX_SURFACES_PER_STREAM) {
ALOGE("%s: GraphicBufferProducer count %zu for stream exceeds limit of %d",
@@ -485,6 +624,20 @@
return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
}
+ if (physicalCameraId.size() > 0) {
+ std::vector<std::string> physicalCameraIds;
+ std::string physicalId(physicalCameraId.string());
+ bool logicalCamera =
+ CameraProviderManager::isLogicalCamera(mDevice->info(), &physicalCameraIds);
+ if (!logicalCamera ||
+ std::find(physicalCameraIds.begin(), physicalCameraIds.end(), physicalId) ==
+ physicalCameraIds.end()) {
+ String8 msg = String8::format("Camera %s: Camera doesn't support physicalCameraId %s.",
+ mCameraIdStr.string(), physicalCameraId.string());
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ }
std::vector<sp<Surface>> surfaces;
std::vector<sp<IBinder>> binders;
status_t err;
@@ -514,14 +667,6 @@
return res;
if (!isStreamInfoValid) {
- // Streaming sharing is only supported for IMPLEMENTATION_DEFINED
- // formats.
- if (isShared && streamInfo.format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
- String8 msg = String8::format("Camera %s: Stream sharing is only supported for "
- "IMPLEMENTATION_DEFINED format", mCameraIdStr.string());
- ALOGW("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
isStreamInfoValid = true;
}
@@ -530,10 +675,12 @@
}
int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
+ std::vector<int> surfaceIds;
err = mDevice->createStream(surfaces, deferredConsumer, streamInfo.width,
streamInfo.height, streamInfo.format, streamInfo.dataSpace,
static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
- &streamId, outputConfiguration.getSurfaceSetID(), isShared);
+ &streamId, physicalCameraId, &surfaceIds, outputConfiguration.getSurfaceSetID(),
+ isShared);
if (err != OK) {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
@@ -545,9 +692,11 @@
for (auto& binder : binders) {
ALOGV("%s: mStreamMap add binder %p streamId %d, surfaceId %d",
__FUNCTION__, binder.get(), streamId, i);
- mStreamMap.add(binder, StreamSurfaceId(streamId, i++));
+ mStreamMap.add(binder, StreamSurfaceId(streamId, surfaceIds[i]));
+ i++;
}
+ mConfiguredOutputs.add(streamId, outputConfiguration);
mStreamInfoMap[streamId] = streamInfo;
ALOGV("%s: Camera %s: Successfully created a new stream ID %d for output surface"
@@ -592,10 +741,14 @@
}
int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
std::vector<sp<Surface>> noSurface;
+ std::vector<int> surfaceIds;
+ String8 physicalCameraId(outputConfiguration.getPhysicalCameraId());
err = mDevice->createStream(noSurface, /*hasDeferredConsumer*/true, width,
height, format, dataSpace,
static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
- &streamId, outputConfiguration.getSurfaceSetID(), isShared, consumerUsage);
+ &streamId, physicalCameraId, &surfaceIds,
+ outputConfiguration.getSurfaceSetID(), isShared,
+ consumerUsage);
if (err != OK) {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
@@ -721,6 +874,124 @@
return res;
}
+binder::Status CameraDeviceClient::updateOutputConfiguration(int streamId,
+ const hardware::camera2::params::OutputConfiguration &outputConfiguration) {
+ ATRACE_CALL();
+
+ binder::Status res;
+ if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (!mDevice.get()) {
+ return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+ }
+
+ const std::vector<sp<IGraphicBufferProducer> >& bufferProducers =
+ outputConfiguration.getGraphicBufferProducers();
+ auto producerCount = bufferProducers.size();
+ if (producerCount == 0) {
+ ALOGE("%s: bufferProducers must not be empty", __FUNCTION__);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "bufferProducers must not be empty");
+ }
+
+ // The first output is the one associated with the output configuration.
+ // It should always be present, valid and the corresponding stream id should match.
+ sp<IBinder> binder = IInterface::asBinder(bufferProducers[0]);
+ ssize_t index = mStreamMap.indexOfKey(binder);
+ if (index == NAME_NOT_FOUND) {
+ ALOGE("%s: Outputconfiguration is invalid", __FUNCTION__);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "OutputConfiguration is invalid");
+ }
+ if (mStreamMap.valueFor(binder).streamId() != streamId) {
+ ALOGE("%s: Stream Id: %d provided doesn't match the id: %d in the stream map",
+ __FUNCTION__, streamId, mStreamMap.valueFor(binder).streamId());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Stream id is invalid");
+ }
+
+ std::vector<size_t> removedSurfaceIds;
+ std::vector<sp<IBinder>> removedOutputs;
+ std::vector<sp<Surface>> newOutputs;
+ std::vector<OutputStreamInfo> streamInfos;
+ KeyedVector<sp<IBinder>, sp<IGraphicBufferProducer>> newOutputsMap;
+ for (auto &it : bufferProducers) {
+ newOutputsMap.add(IInterface::asBinder(it), it);
+ }
+
+ for (size_t i = 0; i < mStreamMap.size(); i++) {
+ ssize_t idx = newOutputsMap.indexOfKey(mStreamMap.keyAt(i));
+ if (idx == NAME_NOT_FOUND) {
+ if (mStreamMap[i].streamId() == streamId) {
+ removedSurfaceIds.push_back(mStreamMap[i].surfaceId());
+ removedOutputs.push_back(mStreamMap.keyAt(i));
+ }
+ } else {
+ if (mStreamMap[i].streamId() != streamId) {
+ ALOGE("%s: Output surface already part of a different stream", __FUNCTION__);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Target Surface is invalid");
+ }
+ newOutputsMap.removeItemsAt(idx);
+ }
+ }
+
+ for (size_t i = 0; i < newOutputsMap.size(); i++) {
+ OutputStreamInfo outInfo;
+ sp<Surface> surface;
+ res = createSurfaceFromGbp(outInfo, /*isStreamInfoValid*/ false, surface,
+ newOutputsMap.valueAt(i));
+ if (!res.isOk())
+ return res;
+
+ streamInfos.push_back(outInfo);
+ newOutputs.push_back(surface);
+ }
+
+ //Trivial case no changes required
+ if (removedSurfaceIds.empty() && newOutputs.empty()) {
+ return binder::Status::ok();
+ }
+
+ KeyedVector<sp<Surface>, size_t> outputMap;
+ auto ret = mDevice->updateStream(streamId, newOutputs, streamInfos, removedSurfaceIds,
+ &outputMap);
+ if (ret != OK) {
+ switch (ret) {
+ case NAME_NOT_FOUND:
+ case BAD_VALUE:
+ case -EBUSY:
+ res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Camera %s: Error updating stream: %s (%d)",
+ mCameraIdStr.string(), strerror(ret), ret);
+ break;
+ default:
+ res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+ "Camera %s: Error updating stream: %s (%d)",
+ mCameraIdStr.string(), strerror(ret), ret);
+ break;
+ }
+ } else {
+ for (const auto &it : removedOutputs) {
+ mStreamMap.removeItem(it);
+ }
+
+ for (size_t i = 0; i < outputMap.size(); i++) {
+ mStreamMap.add(IInterface::asBinder(outputMap.keyAt(i)->getIGraphicBufferProducer()),
+ StreamSurfaceId(streamId, outputMap.valueAt(i)));
+ }
+
+ mConfiguredOutputs.replaceValueFor(streamId, outputConfiguration);
+
+ ALOGV("%s: Camera %s: Successful stream ID %d update",
+ __FUNCTION__, mCameraIdStr.string(), streamId);
+ }
+
+ return res;
+}
+
bool CameraDeviceClient::isPublicFormat(int32_t format)
{
switch(format) {
@@ -739,10 +1010,6 @@
case HAL_PIXEL_FORMAT_BLOB:
case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
case HAL_PIXEL_FORMAT_YCbCr_420_888:
- case HAL_PIXEL_FORMAT_YCbCr_422_888:
- case HAL_PIXEL_FORMAT_YCbCr_444_888:
- case HAL_PIXEL_FORMAT_FLEX_RGB_888:
- case HAL_PIXEL_FORMAT_FLEX_RGBA_8888:
case HAL_PIXEL_FORMAT_YCbCr_422_SP:
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
case HAL_PIXEL_FORMAT_YCbCr_422_I:
@@ -820,9 +1087,10 @@
}
// FIXME: remove this override since the default format should be
- // IMPLEMENTATION_DEFINED. b/9487482
- if (format >= HAL_PIXEL_FORMAT_RGBA_8888 &&
- format <= HAL_PIXEL_FORMAT_BGRA_8888) {
+ // IMPLEMENTATION_DEFINED. b/9487482 & b/35317944
+ if ((format >= HAL_PIXEL_FORMAT_RGBA_8888 && format <= HAL_PIXEL_FORMAT_BGRA_8888) &&
+ ((consumerUsage & GRALLOC_USAGE_HW_MASK) &&
+ ((consumerUsage & GRALLOC_USAGE_SW_READ_MASK) == 0))) {
ALOGW("%s: Camera %s: Overriding format %#x to IMPLEMENTATION_DEFINED",
__FUNCTION__, mCameraIdStr.string(), format);
format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
@@ -1242,15 +1510,12 @@
}
std::vector<sp<Surface>> consumerSurfaces;
- std::vector<size_t> consumerSurfaceIds;
- size_t surfaceId = 0;
for (auto& bufferProducer : bufferProducers) {
// Don't create multiple streams for the same target surface
ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
if (index != NAME_NOT_FOUND) {
ALOGV("Camera %s: Surface already has a stream created "
" for it (ID %zd)", mCameraIdStr.string(), index);
- surfaceId++;
continue;
}
@@ -1262,8 +1527,6 @@
return res;
consumerSurfaces.push_back(surface);
- consumerSurfaceIds.push_back(surfaceId);
- surfaceId++;
}
// Gracefully handle case where finalizeOutputConfigurations is called
@@ -1275,12 +1538,13 @@
// Finish the deferred stream configuration with the surface.
status_t err;
- err = mDevice->setConsumerSurfaces(streamId, consumerSurfaces);
+ std::vector<int> consumerSurfaceIds;
+ err = mDevice->setConsumerSurfaces(streamId, consumerSurfaces, &consumerSurfaceIds);
if (err == OK) {
for (size_t i = 0; i < consumerSurfaces.size(); i++) {
sp<IBinder> binder = IInterface::asBinder(
consumerSurfaces[i]->getIGraphicBufferProducer());
- ALOGV("%s: mStreamMap add binder %p streamId %d, surfaceId %zu", __FUNCTION__,
+ ALOGV("%s: mStreamMap add binder %p streamId %d, surfaceId %d", __FUNCTION__,
binder.get(), streamId, consumerSurfaceIds[i]);
mStreamMap.add(binder, StreamSurfaceId(streamId, consumerSurfaceIds[i]));
}
@@ -1288,6 +1552,7 @@
mDeferredStreams.removeItemsAt(deferredStreamIndex);
}
mStreamInfoMap[streamId].finalized = true;
+ mConfiguredOutputs.replaceValueFor(streamId, outputConfiguration);
} else if (err == NO_INIT) {
res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Camera %s: Deferred surface is invalid: %s (%d)",
@@ -1432,7 +1697,8 @@
// Thread-safe. No lock necessary.
sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = mRemoteCallback;
if (remoteCb != NULL) {
- remoteCb->onResultReceived(result.mMetadata, result.mResultExtras);
+ remoteCb->onResultReceived(result.mMetadata, result.mResultExtras,
+ result.mPhysicalMetadatas);
}
}
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 50661cb..5aaf5aa 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -26,6 +26,8 @@
#include "common/FrameProcessorBase.h"
#include "common/Camera2ClientBase.h"
+using android::camera3::OutputStreamInfo;
+
namespace android {
struct CameraDeviceClientBase :
@@ -43,6 +45,7 @@
const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
const String16& clientPackageName,
const String8& cameraId,
+ int api1CameraId,
int cameraFacing,
int clientPid,
uid_t clientUid,
@@ -83,7 +86,8 @@
virtual binder::Status beginConfigure() override;
- virtual binder::Status endConfigure(int operatingMode) override;
+ virtual binder::Status endConfigure(int operatingMode,
+ const hardware::camera2::impl::CameraMetadataNative& sessionParams) override;
// Returns -EBUSY if device is not idle or in error state
virtual binder::Status deleteStream(int streamId) override;
@@ -131,6 +135,10 @@
// Prepare stream by preallocating up to maxCount of its buffers
virtual binder::Status prepare2(int32_t maxCount, int32_t streamId) override;
+ // Update an output configuration
+ virtual binder::Status updateOutputConfiguration(int streamId,
+ const hardware::camera2::params::OutputConfiguration &outputConfiguration) override;
+
// Finalize the output configurations with surfaces not added before.
virtual binder::Status finalizeOutputConfigurations(int32_t streamId,
const hardware::camera2::params::OutputConfiguration &outputConfiguration) override;
@@ -149,7 +157,8 @@
int servicePid);
virtual ~CameraDeviceClient();
- virtual status_t initialize(sp<CameraProviderManager> manager) override;
+ virtual status_t initialize(sp<CameraProviderManager> manager,
+ const String8& monitorTags) override;
virtual status_t dump(int fd, const Vector<String16>& args);
@@ -206,24 +215,6 @@
}; // class StreamSurfaceId
- // OutputStreamInfo describes the property of a camera stream.
- class OutputStreamInfo {
- public:
- int width;
- int height;
- int format;
- android_dataspace dataSpace;
- uint64_t consumerUsage;
- bool finalized = false;
- OutputStreamInfo() :
- width(-1), height(-1), format(-1), dataSpace(HAL_DATASPACE_UNKNOWN),
- consumerUsage(0) {}
- OutputStreamInfo(int _width, int _height, int _format, android_dataspace _dataSpace,
- uint64_t _consumerUsage) :
- width(_width), height(_height), format(_format),
- dataSpace(_dataSpace), consumerUsage(_consumerUsage) {}
- };
-
private:
/** ICameraDeviceUser interface-related private members */
@@ -232,8 +223,10 @@
static const int32_t FRAME_PROCESSOR_LISTENER_MIN_ID = 0;
static const int32_t FRAME_PROCESSOR_LISTENER_MAX_ID = 0x7fffffffL;
+ std::vector<int32_t> mSupportedPhysicalRequestKeys;
+
template<typename TProviderPtr>
- status_t initializeImpl(TProviderPtr providerPtr);
+ status_t initializeImpl(TProviderPtr providerPtr, const String8& monitorTags);
/** Utility members */
binder::Status checkPidStatus(const char* checkLocation);
@@ -267,9 +260,22 @@
binder::Status createSurfaceFromGbp(OutputStreamInfo& streamInfo, bool isStreamInfoValid,
sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp);
+
+ // Utility method to insert the surface into SurfaceMap
+ binder::Status insertGbpLocked(const sp<IGraphicBufferProducer>& gbp,
+ /*out*/SurfaceMap* surfaceMap, /*out*/Vector<int32_t>* streamIds,
+ /*out*/int32_t* currentStreamId);
+
+ // Check that the physicalCameraId passed in is spported by the camera
+ // device.
+ bool checkPhysicalCameraId(const String8& physicalCameraId);
+
// IGraphicsBufferProducer binder -> Stream ID + Surface ID for output streams
KeyedVector<sp<IBinder>, StreamSurfaceId> mStreamMap;
+ // Stream ID -> OutputConfiguration. Used for looking up Surface by stream/surface index
+ KeyedVector<int32_t, hardware::camera2::params::OutputConfiguration> mConfiguredOutputs;
+
struct InputStreamConfiguration {
bool configured;
int32_t width;
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 51ef160..ce006a7 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -48,15 +48,16 @@
const sp<TCamCallbacks>& remoteCallback,
const String16& clientPackageName,
const String8& cameraId,
+ int api1CameraId,
int cameraFacing,
int clientPid,
uid_t clientUid,
int servicePid):
TClientBase(cameraService, remoteCallback, clientPackageName,
- cameraId, cameraFacing, clientPid, clientUid, servicePid),
+ cameraId, api1CameraId, cameraFacing, clientPid, clientUid, servicePid),
mSharedCameraCallbacks(remoteCallback),
mDeviceVersion(cameraService->getDeviceVersion(TClientBase::mCameraIdStr)),
- mDeviceActive(false)
+ mDeviceActive(false), mApi1CameraId(api1CameraId)
{
ALOGI("Camera %s: Opened. Client: %s (PID %d, UID %d)", cameraId.string(),
String8(clientPackageName).string(), clientPid, clientUid);
@@ -79,13 +80,15 @@
}
template <typename TClientBase>
-status_t Camera2ClientBase<TClientBase>::initialize(sp<CameraProviderManager> manager) {
- return initializeImpl(manager);
+status_t Camera2ClientBase<TClientBase>::initialize(sp<CameraProviderManager> manager,
+ const String8& monitorTags) {
+ return initializeImpl(manager, monitorTags);
}
template <typename TClientBase>
template <typename TProviderPtr>
-status_t Camera2ClientBase<TClientBase>::initializeImpl(TProviderPtr providerPtr) {
+status_t Camera2ClientBase<TClientBase>::initializeImpl(TProviderPtr providerPtr,
+ const String8& monitorTags) {
ATRACE_CALL();
ALOGV("%s: Initializing client for camera %s", __FUNCTION__,
TClientBase::mCameraIdStr.string());
@@ -103,7 +106,7 @@
return NO_INIT;
}
- res = mDevice->initialize(providerPtr);
+ res = mDevice->initialize(providerPtr, monitorTags);
if (res != OK) {
ALOGE("%s: Camera %s: unable to initialize device: %s (%d)",
__FUNCTION__, TClientBase::mCameraIdStr.string(), strerror(-res), res);
@@ -156,13 +159,14 @@
result = " Device dump:\n";
write(fd, result.string(), result.size());
- if (!mDevice.get()) {
+ sp<CameraDeviceBase> device = mDevice;
+ if (!device.get()) {
result = " *** Device is detached\n";
write(fd, result.string(), result.size());
return NO_ERROR;
}
- status_t res = mDevice->dump(fd, args);
+ status_t res = device->dump(fd, args);
if (res != OK) {
result = String8::format(" Error dumping device: %s (%d)",
strerror(-res), res);
@@ -249,7 +253,9 @@
if (mDeviceActive) {
getCameraService()->updateProxyDeviceState(
hardware::ICameraServiceProxy::CAMERA_STATE_IDLE, TClientBase::mCameraIdStr,
- TClientBase::mCameraFacing, TClientBase::mClientPackageName);
+ TClientBase::mCameraFacing, TClientBase::mClientPackageName,
+ ((mApi1CameraId < 0) ? hardware::ICameraServiceProxy::CAMERA_API_LEVEL_2 :
+ hardware::ICameraServiceProxy::CAMERA_API_LEVEL_1));
}
mDeviceActive = false;
@@ -265,7 +271,9 @@
if (!mDeviceActive) {
getCameraService()->updateProxyDeviceState(
hardware::ICameraServiceProxy::CAMERA_STATE_ACTIVE, TClientBase::mCameraIdStr,
- TClientBase::mCameraFacing, TClientBase::mClientPackageName);
+ TClientBase::mCameraFacing, TClientBase::mClientPackageName,
+ ((mApi1CameraId < 0) ? hardware::ICameraServiceProxy::CAMERA_API_LEVEL_2 :
+ hardware::ICameraServiceProxy::CAMERA_API_LEVEL_1));
}
mDeviceActive = true;
@@ -328,7 +336,7 @@
template <typename TClientBase>
int Camera2ClientBase<TClientBase>::getCameraId() const {
- return std::stoi(TClientBase::mCameraIdStr.string());
+ return mApi1CameraId;
}
template <typename TClientBase>
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index e898d5d..e74fbdf 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -49,13 +49,14 @@
const sp<TCamCallbacks>& remoteCallback,
const String16& clientPackageName,
const String8& cameraId,
+ int api1CameraId,
int cameraFacing,
int clientPid,
uid_t clientUid,
int servicePid);
virtual ~Camera2ClientBase();
- virtual status_t initialize(sp<CameraProviderManager> manager);
+ virtual status_t initialize(sp<CameraProviderManager> manager, const String8& monitorTags);
virtual status_t dumpClient(int fd, const Vector<String16>& args);
/**
@@ -140,9 +141,11 @@
bool mDeviceActive;
+ const int mApi1CameraId; // -1 if client is API2
+
private:
template<typename TProviderPtr>
- status_t initializeImpl(TProviderPtr providerPtr);
+ status_t initializeImpl(TProviderPtr providerPtr, const String8& monitorTags);
};
}; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 3919bfa..0ba7403 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -23,6 +23,7 @@
#include <utils/String8.h>
#include <utils/String16.h>
#include <utils/Vector.h>
+#include <utils/KeyedVector.h>
#include <utils/Timers.h>
#include <utils/List.h>
@@ -54,7 +55,12 @@
*/
virtual const String8& getId() const = 0;
- virtual status_t initialize(sp<CameraProviderManager> manager) = 0;
+ /**
+ * The device vendor tag ID
+ */
+ virtual metadata_vendor_id_t getVendorTagId() const = 0;
+
+ virtual status_t initialize(sp<CameraProviderManager> manager, const String8& monitorTags) = 0;
virtual status_t disconnect() = 0;
virtual status_t dump(int fd, const Vector<String16> &args) = 0;
@@ -64,6 +70,12 @@
*/
virtual const CameraMetadata& info() const = 0;
+ struct PhysicalCameraSettings {
+ std::string cameraId;
+ CameraMetadata metadata;
+ };
+ typedef List<PhysicalCameraSettings> PhysicalCameraSettingsList;
+
/**
* Submit request for capture. The CameraDevice takes ownership of the
* passed-in buffer.
@@ -75,7 +87,7 @@
* Submit a list of requests.
* Output lastFrameNumber is the expected last frame number of the list of requests.
*/
- virtual status_t captureList(const List<const CameraMetadata> &requests,
+ virtual status_t captureList(const List<const PhysicalCameraSettingsList> &requests,
const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber = NULL) = 0;
@@ -91,7 +103,7 @@
* Submit a list of requests for streaming.
* Output lastFrameNumber is the last frame number of the previous streaming request.
*/
- virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
+ virtual status_t setStreamingRequestList(const List<const PhysicalCameraSettingsList> &requests,
const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber = NULL) = 0;
@@ -118,6 +130,8 @@
virtual status_t createStream(sp<Surface> consumer,
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+ const String8& physicalCameraId,
+ std::vector<int> *surfaceIds = nullptr,
int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
bool isShared = false, uint64_t consumerUsage = 0) = 0;
@@ -131,6 +145,8 @@
virtual status_t createStream(const std::vector<sp<Surface>>& consumers,
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+ const String8& physicalCameraId,
+ std::vector<int> *surfaceIds = nullptr,
int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
bool isShared = false, uint64_t consumerUsage = 0) = 0;
@@ -209,7 +225,8 @@
* - BAD_VALUE if the set of streams was invalid (e.g. fmts or sizes)
* - INVALID_OPERATION if the device was in the wrong state
*/
- virtual status_t configureStreams(int operatingMode = 0) = 0;
+ virtual status_t configureStreams(const CameraMetadata& sessionParams,
+ int operatingMode = 0) = 0;
// get the buffer producer of the input stream
virtual status_t getInputBufferProducer(
@@ -347,8 +364,21 @@
* Set the deferred consumer surface and finish the rest of the stream configuration.
*/
virtual status_t setConsumerSurfaces(int streamId,
- const std::vector<sp<Surface>>& consumers) = 0;
+ const std::vector<sp<Surface>>& consumers, std::vector<int> *surfaceIds /*out*/) = 0;
+ /**
+ * Update a given stream.
+ */
+ virtual status_t updateStream(int streamId, const std::vector<sp<Surface>> &newSurfaces,
+ const std::vector<android::camera3::OutputStreamInfo> &outputInfo,
+ const std::vector<size_t> &removedSurfaceIds,
+ KeyedVector<sp<Surface>, size_t> *outputMap/*out*/) = 0;
+
+ /**
+ * Drop buffers for stream of streamId if dropping is true. If dropping is false, do not
+ * drop buffers for stream of streamId.
+ */
+ virtual status_t dropStreamBuffers(bool /*dropping*/, int /*streamId*/) = 0;
};
}; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index b3d1132..43f1a91 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -20,11 +20,13 @@
#include "CameraProviderManager.h"
+#include <algorithm>
#include <chrono>
#include <inttypes.h>
#include <hidl/ServiceManagement.h>
#include <functional>
#include <camera_metadata_hidden.h>
+#include <android-base/parseint.h>
namespace android {
@@ -35,9 +37,7 @@
// Hardcoded name for the passthrough HAL implementation, since it can't be discovered via the
// service manager
const std::string kLegacyProviderName("legacy/0");
-
-// Slash-separated list of provider types to consider for use via the old camera API
-const std::string kStandardProviderTypes("internal/legacy");
+const std::string kExternalProviderName("external/0");
} // anonymous namespace
@@ -69,6 +69,7 @@
// See if there's a passthrough HAL, but let's not complain if there's not
addProviderLocked(kLegacyProviderName, /*expected*/ false);
+ addProviderLocked(kExternalProviderName, /*expected*/ false);
return OK;
}
@@ -77,18 +78,7 @@
std::lock_guard<std::mutex> lock(mInterfaceMutex);
int count = 0;
for (auto& provider : mProviders) {
- count += provider->mUniqueDeviceCount;
- }
- return count;
-}
-
-int CameraProviderManager::getAPI1CompatibleCameraCount() const {
- std::lock_guard<std::mutex> lock(mInterfaceMutex);
- int count = 0;
- for (auto& provider : mProviders) {
- if (kStandardProviderTypes.find(provider->getType()) != std::string::npos) {
- count += provider->mUniqueAPI1CompatibleCameraIds.size();
- }
+ count += provider->mUniqueCameraIds.size();
}
return count;
}
@@ -108,12 +98,33 @@
std::lock_guard<std::mutex> lock(mInterfaceMutex);
std::vector<std::string> deviceIds;
for (auto& provider : mProviders) {
- if (kStandardProviderTypes.find(provider->getType()) != std::string::npos) {
- for (auto& id : provider->mUniqueAPI1CompatibleCameraIds) {
- deviceIds.push_back(id);
- }
- }
+ std::vector<std::string> providerDeviceIds = provider->mUniqueAPI1CompatibleCameraIds;
+
+ // API1 app doesn't handle logical and physical camera devices well. So
+ // for each [logical, physical1, physical2, ...] id combo, only take the
+ // first id advertised by HAL, and filter out the rest.
+ filterLogicalCameraIdsLocked(providerDeviceIds);
+
+ deviceIds.insert(deviceIds.end(), providerDeviceIds.begin(), providerDeviceIds.end());
}
+
+ std::sort(deviceIds.begin(), deviceIds.end(),
+ [](const std::string& a, const std::string& b) -> bool {
+ uint32_t aUint = 0, bUint = 0;
+ bool aIsUint = base::ParseUint(a, &aUint);
+ bool bIsUint = base::ParseUint(b, &bUint);
+
+ // Uint device IDs first
+ if (aIsUint && bIsUint) {
+ return aUint < bUint;
+ } else if (aIsUint) {
+ return true;
+ } else if (bIsUint) {
+ return false;
+ }
+ // Simple string compare if both id are not uint
+ return a < b;
+ });
return deviceIds;
}
@@ -166,11 +177,7 @@
status_t CameraProviderManager::getCameraCharacteristics(const std::string &id,
CameraMetadata* characteristics) const {
std::lock_guard<std::mutex> lock(mInterfaceMutex);
-
- auto deviceInfo = findDeviceInfoLocked(id, /*minVersion*/ {3,0}, /*maxVersion*/ {4,0});
- if (deviceInfo == nullptr) return NAME_NOT_FOUND;
-
- return deviceInfo->getCameraCharacteristics(characteristics);
+ return getCameraCharacteristicsLocked(id, characteristics);
}
status_t CameraProviderManager::getHighestSupportedVersion(const std::string &id,
@@ -385,6 +392,37 @@
return ret;
}
+bool CameraProviderManager::isLogicalCamera(const CameraMetadata& staticInfo,
+ std::vector<std::string>* physicalCameraIds) {
+ bool isLogicalCam = false;
+ camera_metadata_ro_entry_t entryCap;
+
+ entryCap = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+ for (size_t i = 0; i < entryCap.count; ++i) {
+ uint8_t capability = entryCap.data.u8[i];
+ if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) {
+ isLogicalCam = true;
+ break;
+ }
+ }
+ if (!isLogicalCam) {
+ return false;
+ }
+
+ camera_metadata_ro_entry_t entryIds = staticInfo.find(ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS);
+ const uint8_t* ids = entryIds.data.u8;
+ size_t start = 0;
+ for (size_t i = 0; i < entryIds.count; ++i) {
+ if (ids[i] == '\0') {
+ if (start != i) {
+ physicalCameraIds->push_back((const char*)ids+start);
+ }
+ start = i+1;
+ }
+ }
+ return true;
+}
+
status_t CameraProviderManager::addProviderLocked(const std::string& newProvider, bool expected) {
for (const auto& providerInfo : mProviders) {
if (providerInfo->mProviderName == newProvider) {
@@ -478,6 +516,8 @@
}
ALOGI("Connecting to new camera provider: %s, isRemote? %d",
mProviderName.c_str(), mInterface->isRemote());
+ // cameraDeviceStatusChange callbacks may be called (and causing new devices added)
+ // before setCallback returns
hardware::Return<Status> status = mInterface->setCallback(this);
if (!status.isOk()) {
ALOGE("%s: Transaction error setting up callbacks with camera provider '%s': %s",
@@ -534,17 +574,10 @@
}
}
- for (auto& device : mDevices) {
- mUniqueCameraIds.insert(device->mId);
- if (device->isAPI1Compatible()) {
- mUniqueAPI1CompatibleCameraIds.insert(device->mId);
- }
- }
- mUniqueDeviceCount = mUniqueCameraIds.size();
-
ALOGI("Camera provider %s ready with %zu camera devices",
mProviderName.c_str(), mDevices.size());
+ mInitialized = true;
return OK;
}
@@ -592,9 +625,15 @@
}
if (deviceInfo == nullptr) return BAD_VALUE;
deviceInfo->mStatus = initialStatus;
+ bool isAPI1Compatible = deviceInfo->isAPI1Compatible();
mDevices.push_back(std::move(deviceInfo));
+ mUniqueCameraIds.insert(id);
+ if (isAPI1Compatible) {
+ mUniqueAPI1CompatibleCameraIds.push_back(id);
+ }
+
if (parsedId != nullptr) {
*parsedId = id;
}
@@ -604,6 +643,12 @@
void CameraProviderManager::ProviderInfo::removeDevice(std::string id) {
for (auto it = mDevices.begin(); it != mDevices.end(); it++) {
if ((*it)->mId == id) {
+ mUniqueCameraIds.erase(id);
+ if ((*it)->isAPI1Compatible()) {
+ mUniqueAPI1CompatibleCameraIds.erase(std::remove(
+ mUniqueAPI1CompatibleCameraIds.begin(),
+ mUniqueAPI1CompatibleCameraIds.end(), id));
+ }
mDevices.erase(it);
break;
}
@@ -652,6 +697,14 @@
dprintf(fd, " API2 camera characteristics:\n");
info2.dump(fd, /*verbosity*/ 2, /*indentation*/ 4);
}
+
+ dprintf(fd, "== Camera HAL device %s (v%d.%d) dumpState: ==\n", device->mName.c_str(),
+ device->mVersion.get_major(), device->mVersion.get_minor());
+ res = device->dumpState(fd);
+ if (res != OK) {
+ dprintf(fd, " <Error dumping device %s state: %s (%d)>\n",
+ device->mName.c_str(), strerror(-res), res);
+ }
}
return OK;
}
@@ -661,6 +714,7 @@
CameraDeviceStatus newStatus) {
sp<StatusListener> listener;
std::string id;
+ bool initialized = false;
{
std::lock_guard<std::mutex> lock(mLock);
bool known = false;
@@ -687,9 +741,13 @@
removeDevice(id);
}
listener = mManager->getStatusListener();
+ initialized = mInitialized;
}
// Call without lock held to allow reentrancy into provider manager
- if (listener != nullptr) {
+ // Don't send the callback if providerInfo hasn't been initialized.
+ // CameraService will initialize device status after provider is
+ // initialized
+ if (listener != nullptr && initialized) {
listener->onDeviceStatusChanged(String8(id.c_str()), newStatus);
}
return hardware::Void();
@@ -759,6 +817,18 @@
name.c_str(), statusToString(status));
return nullptr;
}
+
+ for (auto& conflictName : resourceCost.conflictingDevices) {
+ uint16_t major, minor;
+ std::string type, id;
+ status_t res = parseDeviceName(conflictName, &major, &minor, &type, &id);
+ if (res != OK) {
+ ALOGE("%s: Failed to parse conflicting device %s", __FUNCTION__, conflictName.c_str());
+ return nullptr;
+ }
+ conflictName = id;
+ }
+
return std::unique_ptr<DeviceInfo>(
new DeviceInfoT(name, tagId, id, minorVersion, resourceCost,
cameraInterface));
@@ -919,6 +989,17 @@
return OK;
}
+status_t CameraProviderManager::ProviderInfo::DeviceInfo1::dumpState(int fd) const {
+ native_handle_t* handle = native_handle_create(1,0);
+ handle->data[0] = fd;
+ hardware::Return<Status> s = mInterface->dumpState(handle);
+ native_handle_delete(handle);
+ if (!s.isOk()) {
+ return INVALID_OPERATION;
+ }
+ return mapToStatusT(s);
+}
+
CameraProviderManager::ProviderInfo::DeviceInfo3::DeviceInfo3(const std::string& name,
const metadata_vendor_id_t tagId, const std::string &id,
uint16_t minorVersion,
@@ -1022,6 +1103,17 @@
return isBackwardCompatible;
}
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::dumpState(int fd) const {
+ native_handle_t* handle = native_handle_create(1,0);
+ handle->data[0] = fd;
+ auto ret = mInterface->dumpState(handle);
+ native_handle_delete(handle);
+ if (!ret.isOk()) {
+ return INVALID_OPERATION;
+ }
+ return OK;
+}
+
status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getCameraCharacteristics(
CameraMetadata *characteristics) const {
if (characteristics == nullptr) return BAD_VALUE;
@@ -1359,5 +1451,51 @@
return OK;
}
+status_t CameraProviderManager::getCameraCharacteristicsLocked(const std::string &id,
+ CameraMetadata* characteristics) const {
+ auto deviceInfo = findDeviceInfoLocked(id, /*minVersion*/ {3,0}, /*maxVersion*/ {4,0});
+ if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+ return deviceInfo->getCameraCharacteristics(characteristics);
+}
+
+void CameraProviderManager::filterLogicalCameraIdsLocked(
+ std::vector<std::string>& deviceIds) const
+{
+ std::unordered_set<std::string> removedIds;
+
+ for (auto& deviceId : deviceIds) {
+ CameraMetadata info;
+ status_t res = getCameraCharacteristicsLocked(deviceId, &info);
+ if (res != OK) {
+ ALOGE("%s: Failed to getCameraCharacteristics for id %s", __FUNCTION__,
+ deviceId.c_str());
+ return;
+ }
+
+ // idCombo contains the ids of a logical camera and its physical cameras
+ std::vector<std::string> idCombo;
+ bool logicalCamera = CameraProviderManager::isLogicalCamera(info, &idCombo);
+ if (!logicalCamera) {
+ continue;
+ }
+ idCombo.push_back(deviceId);
+
+ for (auto& id : deviceIds) {
+ auto foundId = std::find(idCombo.begin(), idCombo.end(), id);
+ if (foundId == idCombo.end()) {
+ continue;
+ }
+
+ idCombo.erase(foundId);
+ removedIds.insert(idCombo.begin(), idCombo.end());
+ break;
+ }
+ }
+
+ deviceIds.erase(std::remove_if(deviceIds.begin(), deviceIds.end(),
+ [&removedIds](const std::string& s) {return removedIds.find(s) != removedIds.end();}),
+ deviceIds.end());
+}
} // namespace android
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index b14a2c6..b8b8b8c 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -18,7 +18,7 @@
#define ANDROID_SERVERS_CAMERA_CAMERAPROVIDER_H
#include <vector>
-#include <set>
+#include <unordered_set>
#include <string>
#include <mutex>
@@ -125,16 +125,14 @@
*/
int getCameraCount() const;
+ std::vector<std::string> getCameraDeviceIds() const;
+
/**
* Retrieve the number of API1 compatible cameras; these are internal and
* backwards-compatible. This is the set of cameras that will be
- * accessible via the old camera API, with IDs in range of
- * [0, getAPI1CompatibleCameraCount()-1]. This value is not expected to change dynamically.
+ * accessible via the old camera API.
+ * The return value may change dynamically due to external camera hotplug.
*/
- int getAPI1CompatibleCameraCount() const;
-
- std::vector<std::string> getCameraDeviceIds() const;
-
std::vector<std::string> getAPI1CompatibleCameraDeviceIds() const;
/**
@@ -232,6 +230,13 @@
hardware::hidl_version minVersion = hardware::hidl_version{0,0},
hardware::hidl_version maxVersion = hardware::hidl_version{1000,0}) const;
+ /*
+ * Check if a camera with staticInfo is a logical camera. And if yes, return
+ * the physical camera ids.
+ */
+ static bool isLogicalCamera(const CameraMetadata& staticInfo,
+ std::vector<std::string>* physicalCameraIds);
+
private:
// All private members, unless otherwise noted, expect mInterfaceMutex to be locked before use
mutable std::mutex mInterfaceMutex;
@@ -293,6 +298,7 @@
virtual status_t setTorchMode(bool enabled) = 0;
virtual status_t getCameraInfo(hardware::CameraInfo *info) const = 0;
virtual bool isAPI1Compatible() const = 0;
+ virtual status_t dumpState(int fd) const = 0;
virtual status_t getCameraCharacteristics(CameraMetadata *characteristics) const {
(void) characteristics;
return INVALID_OPERATION;
@@ -313,9 +319,9 @@
static status_t setTorchMode(InterfaceT& interface, bool enabled);
};
std::vector<std::unique_ptr<DeviceInfo>> mDevices;
- std::set<std::string> mUniqueCameraIds;
+ std::unordered_set<std::string> mUniqueCameraIds;
int mUniqueDeviceCount;
- std::set<std::string> mUniqueAPI1CompatibleCameraIds;
+ std::vector<std::string> mUniqueAPI1CompatibleCameraIds;
// HALv1-specific camera fields, including the actual device interface
struct DeviceInfo1 : public DeviceInfo {
@@ -326,6 +332,7 @@
virtual status_t getCameraInfo(hardware::CameraInfo *info) const override;
//In case of Device1Info assume that we are always API1 compatible
virtual bool isAPI1Compatible() const override { return true; }
+ virtual status_t dumpState(int fd) const override;
DeviceInfo1(const std::string& name, const metadata_vendor_id_t tagId,
const std::string &id, uint16_t minorVersion,
const hardware::camera::common::V1_0::CameraResourceCost& resourceCost,
@@ -343,6 +350,7 @@
virtual status_t setTorchMode(bool enabled) override;
virtual status_t getCameraInfo(hardware::CameraInfo *info) const override;
virtual bool isAPI1Compatible() const override;
+ virtual status_t dumpState(int fd) const override;
virtual status_t getCameraCharacteristics(
CameraMetadata *characteristics) const override;
@@ -363,6 +371,8 @@
CameraProviderManager *mManager;
+ bool mInitialized = false;
+
// Templated method to instantiate the right kind of DeviceInfo and call the
// right CameraProvider getCameraDeviceInterface_* method.
template<class DeviceInfoT>
@@ -411,6 +421,9 @@
static const char* torchStatusToString(
const hardware::camera::common::V1_0::TorchModeStatus&);
+ status_t getCameraCharacteristicsLocked(const std::string &id,
+ CameraMetadata* characteristics) const;
+ void filterLogicalCameraIdsLocked(std::vector<std::string>& deviceIds) const;
};
} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index ced1d3a..543914e 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -39,6 +39,8 @@
#include <inttypes.h>
+#include <utility>
+
#include <utils/Log.h>
#include <utils/Trace.h>
#include <utils/Timers.h>
@@ -75,7 +77,8 @@
mNextShutterFrameNumber(0),
mNextReprocessShutterFrameNumber(0),
mListener(NULL),
- mVendorTagId(CAMERA_METADATA_INVALID_VENDOR_ID)
+ mVendorTagId(CAMERA_METADATA_INVALID_VENDOR_ID),
+ mLastTemplateId(-1)
{
ATRACE_CALL();
camera3_callback_ops::notify = &sNotify;
@@ -94,7 +97,7 @@
return mId;
}
-status_t Camera3Device::initialize(sp<CameraProviderManager> manager) {
+status_t Camera3Device::initialize(sp<CameraProviderManager> manager, const String8& monitorTags) {
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
@@ -167,6 +170,10 @@
mInterface = new HalInterface(session, queue);
std::string providerType;
mVendorTagId = manager->getProviderTagIdLocked(mId.string());
+ mTagMonitor.initialize(mVendorTagId);
+ if (!monitorTags.isEmpty()) {
+ mTagMonitor.parseTagsToMonitor(String8(monitorTags));
+ }
return initializeCommonLocked();
}
@@ -190,10 +197,14 @@
/** Create buffer manager */
mBufferManager = new Camera3BufferManager();
- mTagMonitor.initialize(mVendorTagId);
-
+ Vector<int32_t> sessionParamKeys;
+ camera_metadata_entry_t sessionKeysEntry = mDeviceInfo.find(
+ ANDROID_REQUEST_AVAILABLE_SESSION_KEYS);
+ if (sessionKeysEntry.count > 0) {
+ sessionParamKeys.insertArrayAt(sessionKeysEntry.data.i32, 0, sessionKeysEntry.count);
+ }
/** Start up request queue thread */
- mRequestThread = new RequestThread(this, mStatusTracker, mInterface);
+ mRequestThread = new RequestThread(this, mStatusTracker, mInterface, sessionParamKeys);
res = mRequestThread->run(String8::format("C3Dev-%s-ReqQueue", mId.string()).string());
if (res != OK) {
SET_ERR_L("Unable to start request queue thread: %s (%d)",
@@ -238,6 +249,14 @@
}
}
+ if (DistortionMapper::isDistortionSupported(mDeviceInfo)) {
+ res = mDistortionMapper.setupStaticInfo(mDeviceInfo);
+ if (res != OK) {
+ SET_ERR_L("Unable to read necessary calibration fields for distortion correction");
+ return res;
+ }
+ }
+
return OK;
}
@@ -574,13 +593,12 @@
bool dumpTemplates = false;
String16 templatesOption("-t");
- String16 monitorOption("-m");
int n = args.size();
for (int i = 0; i < n; i++) {
if (args[i] == templatesOption) {
dumpTemplates = true;
}
- if (args[i] == monitorOption) {
+ if (args[i] == TagMonitor::kMonitorOption) {
if (i + 1 < n) {
String8 monitorTags = String8(args[i + 1]);
if (monitorTags == "off") {
@@ -661,13 +679,13 @@
}
if (dumpTemplates) {
- const char *templateNames[] = {
+ const char *templateNames[CAMERA3_TEMPLATE_COUNT] = {
"TEMPLATE_PREVIEW",
"TEMPLATE_STILL_CAPTURE",
"TEMPLATE_VIDEO_RECORD",
"TEMPLATE_VIDEO_SNAPSHOT",
"TEMPLATE_ZERO_SHUTTER_LAG",
- "TEMPLATE_MANUAL"
+ "TEMPLATE_MANUAL",
};
for (int i = 1; i < CAMERA3_TEMPLATE_COUNT; i++) {
@@ -733,7 +751,7 @@
}
status_t Camera3Device::convertMetadataListToRequestListLocked(
- const List<const CameraMetadata> &metadataList,
+ const List<const PhysicalCameraSettingsList> &metadataList,
const std::list<const SurfaceMap> &surfaceMaps,
bool repeating,
RequestList *requestList) {
@@ -743,7 +761,7 @@
}
int32_t burstId = 0;
- List<const CameraMetadata>::const_iterator metadataIt = metadataList.begin();
+ List<const PhysicalCameraSettingsList>::const_iterator metadataIt = metadataList.begin();
std::list<const SurfaceMap>::const_iterator surfaceMapIt = surfaceMaps.begin();
for (; metadataIt != metadataList.end() && surfaceMapIt != surfaceMaps.end();
++metadataIt, ++surfaceMapIt) {
@@ -757,12 +775,13 @@
// Setup burst Id and request Id
newRequest->mResultExtras.burstId = burstId++;
- if (metadataIt->exists(ANDROID_REQUEST_ID)) {
- if (metadataIt->find(ANDROID_REQUEST_ID).count == 0) {
+ if (metadataIt->begin()->metadata.exists(ANDROID_REQUEST_ID)) {
+ if (metadataIt->begin()->metadata.find(ANDROID_REQUEST_ID).count == 0) {
CLOGE("RequestID entry exists; but must not be empty in metadata");
return BAD_VALUE;
}
- newRequest->mResultExtras.requestId = metadataIt->find(ANDROID_REQUEST_ID).data.i32[0];
+ newRequest->mResultExtras.requestId = metadataIt->begin()->metadata.find(
+ ANDROID_REQUEST_ID).data.i32[0];
} else {
CLOGE("RequestID does not exist in metadata");
return BAD_VALUE;
@@ -794,17 +813,19 @@
status_t Camera3Device::capture(CameraMetadata &request, int64_t* /*lastFrameNumber*/) {
ATRACE_CALL();
- List<const CameraMetadata> requests;
+ List<const PhysicalCameraSettingsList> requestsList;
std::list<const SurfaceMap> surfaceMaps;
- convertToRequestList(requests, surfaceMaps, request);
+ convertToRequestList(requestsList, surfaceMaps, request);
- return captureList(requests, surfaceMaps, /*lastFrameNumber*/NULL);
+ return captureList(requestsList, surfaceMaps, /*lastFrameNumber*/NULL);
}
-void Camera3Device::convertToRequestList(List<const CameraMetadata>& requests,
+void Camera3Device::convertToRequestList(List<const PhysicalCameraSettingsList>& requestsList,
std::list<const SurfaceMap>& surfaceMaps,
const CameraMetadata& request) {
- requests.push_back(request);
+ PhysicalCameraSettingsList requestList;
+ requestList.push_back({std::string(getId().string()), request});
+ requestsList.push_back(requestList);
SurfaceMap surfaceMap;
camera_metadata_ro_entry streams = request.find(ANDROID_REQUEST_OUTPUT_STREAMS);
@@ -817,7 +838,7 @@
}
status_t Camera3Device::submitRequestsHelper(
- const List<const CameraMetadata> &requests,
+ const List<const PhysicalCameraSettingsList> &requests,
const std::list<const SurfaceMap> &surfaceMaps,
bool repeating,
/*out*/
@@ -863,11 +884,9 @@
return res;
}
-// Only one processCaptureResult should be called at a time, so
-// the locks won't block. The locks are present here simply to enforce this.
-hardware::Return<void> Camera3Device::processCaptureResult(
+hardware::Return<void> Camera3Device::processCaptureResult_3_4(
const hardware::hidl_vec<
- hardware::camera::device::V3_2::CaptureResult>& results) {
+ hardware::camera::device::V3_4::CaptureResult>& results) {
// Ideally we should grab mLock, but that can lead to deadlock, and
// it's not super important to get up to date value of mStatus for this
// warning print, hence skipping the lock here
@@ -891,45 +910,121 @@
}
}
for (const auto& result : results) {
- processOneCaptureResultLocked(result);
+ processOneCaptureResultLocked(result.v3_2, result.physicalCameraMetadata);
}
mProcessCaptureResultLock.unlock();
return hardware::Void();
}
+// Only one processCaptureResult should be called at a time, so
+// the locks won't block. The locks are present here simply to enforce this.
+hardware::Return<void> Camera3Device::processCaptureResult(
+ const hardware::hidl_vec<
+ hardware::camera::device::V3_2::CaptureResult>& results) {
+ hardware::hidl_vec<hardware::camera::device::V3_4::PhysicalCameraMetadata> noPhysMetadata;
+
+ // Ideally we should grab mLock, but that can lead to deadlock, and
+ // it's not super important to get up to date value of mStatus for this
+ // warning print, hence skipping the lock here
+ if (mStatus == STATUS_ERROR) {
+ // Per API contract, HAL should act as closed after device error
+ // But mStatus can be set to error by framework as well, so just log
+ // a warning here.
+ ALOGW("%s: received capture result in error state.", __FUNCTION__);
+ }
+
+ if (mProcessCaptureResultLock.tryLock() != OK) {
+ // This should never happen; it indicates a wrong client implementation
+ // that doesn't follow the contract. But, we can be tolerant here.
+ ALOGE("%s: callback overlapped! waiting 1s...",
+ __FUNCTION__);
+ if (mProcessCaptureResultLock.timedLock(1000000000 /* 1s */) != OK) {
+ ALOGE("%s: cannot acquire lock in 1s, dropping results",
+ __FUNCTION__);
+ // really don't know what to do, so bail out.
+ return hardware::Void();
+ }
+ }
+ for (const auto& result : results) {
+ processOneCaptureResultLocked(result, noPhysMetadata);
+ }
+ mProcessCaptureResultLock.unlock();
+ return hardware::Void();
+}
+
+status_t Camera3Device::readOneCameraMetadataLocked(
+ uint64_t fmqResultSize, hardware::camera::device::V3_2::CameraMetadata& resultMetadata,
+ const hardware::camera::device::V3_2::CameraMetadata& result) {
+ if (fmqResultSize > 0) {
+ resultMetadata.resize(fmqResultSize);
+ if (mResultMetadataQueue == nullptr) {
+ return NO_MEMORY; // logged in initialize()
+ }
+ if (!mResultMetadataQueue->read(resultMetadata.data(), fmqResultSize)) {
+ ALOGE("%s: Cannot read camera metadata from fmq, size = %" PRIu64,
+ __FUNCTION__, fmqResultSize);
+ return INVALID_OPERATION;
+ }
+ } else {
+ resultMetadata.setToExternal(const_cast<uint8_t *>(result.data()),
+ result.size());
+ }
+
+ if (resultMetadata.size() != 0) {
+ status_t res;
+ const camera_metadata_t* metadata =
+ reinterpret_cast<const camera_metadata_t*>(resultMetadata.data());
+ size_t expected_metadata_size = resultMetadata.size();
+ if ((res = validate_camera_metadata_structure(metadata, &expected_metadata_size)) != OK) {
+ ALOGE("%s: Invalid camera metadata received by camera service from HAL: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return INVALID_OPERATION;
+ }
+ }
+
+ return OK;
+}
+
void Camera3Device::processOneCaptureResultLocked(
- const hardware::camera::device::V3_2::CaptureResult& result) {
+ const hardware::camera::device::V3_2::CaptureResult& result,
+ const hardware::hidl_vec<
+ hardware::camera::device::V3_4::PhysicalCameraMetadata> physicalCameraMetadatas) {
camera3_capture_result r;
status_t res;
r.frame_number = result.frameNumber;
+ // Read and validate the result metadata.
hardware::camera::device::V3_2::CameraMetadata resultMetadata;
- if (result.fmqResultSize > 0) {
- resultMetadata.resize(result.fmqResultSize);
- if (mResultMetadataQueue == nullptr) {
- return; // logged in initialize()
- }
- if (!mResultMetadataQueue->read(resultMetadata.data(), result.fmqResultSize)) {
- ALOGE("%s: Frame %d: Cannot read camera metadata from fmq, size = %" PRIu64,
- __FUNCTION__, result.frameNumber, result.fmqResultSize);
- return;
- }
- } else {
- resultMetadata.setToExternal(const_cast<uint8_t *>(result.result.data()),
- result.result.size());
+ res = readOneCameraMetadataLocked(result.fmqResultSize, resultMetadata, result.result);
+ if (res != OK) {
+ ALOGE("%s: Frame %d: Failed to read capture result metadata",
+ __FUNCTION__, result.frameNumber);
+ return;
}
+ r.result = reinterpret_cast<const camera_metadata_t*>(resultMetadata.data());
- if (resultMetadata.size() != 0) {
- r.result = reinterpret_cast<const camera_metadata_t*>(resultMetadata.data());
- size_t expected_metadata_size = resultMetadata.size();
- if ((res = validate_camera_metadata_structure(r.result, &expected_metadata_size)) != OK) {
- ALOGE("%s: Frame %d: Invalid camera metadata received by camera service from HAL: %s (%d)",
- __FUNCTION__, result.frameNumber, strerror(-res), res);
+ // Read and validate physical camera metadata
+ size_t physResultCount = physicalCameraMetadatas.size();
+ std::vector<const char*> physCamIds(physResultCount);
+ std::vector<const camera_metadata_t *> phyCamMetadatas(physResultCount);
+ std::vector<hardware::camera::device::V3_2::CameraMetadata> physResultMetadata;
+ physResultMetadata.resize(physResultCount);
+ for (size_t i = 0; i < physicalCameraMetadatas.size(); i++) {
+ res = readOneCameraMetadataLocked(physicalCameraMetadatas[i].fmqMetadataSize,
+ physResultMetadata[i], physicalCameraMetadatas[i].metadata);
+ if (res != OK) {
+ ALOGE("%s: Frame %d: Failed to read capture result metadata for camera %s",
+ __FUNCTION__, result.frameNumber,
+ physicalCameraMetadatas[i].physicalCameraId.c_str());
return;
}
- } else {
- r.result = nullptr;
+ physCamIds[i] = physicalCameraMetadatas[i].physicalCameraId.c_str();
+ phyCamMetadatas[i] = reinterpret_cast<const camera_metadata_t*>(
+ physResultMetadata[i].data());
}
+ r.num_physcam_metadata = physResultCount;
+ r.physcam_ids = physCamIds.data();
+ r.physcam_metadata = phyCamMetadatas.data();
std::vector<camera3_stream_buffer_t> outputBuffers(result.outputBuffers.size());
std::vector<buffer_handle_t> outputBufferHandles(result.outputBuffers.size());
@@ -1067,42 +1162,43 @@
notify(&m);
}
-status_t Camera3Device::captureList(const List<const CameraMetadata> &requests,
+status_t Camera3Device::captureList(const List<const PhysicalCameraSettingsList> &requestsList,
const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber) {
ATRACE_CALL();
- return submitRequestsHelper(requests, surfaceMaps, /*repeating*/false, lastFrameNumber);
+ return submitRequestsHelper(requestsList, surfaceMaps, /*repeating*/false, lastFrameNumber);
}
status_t Camera3Device::setStreamingRequest(const CameraMetadata &request,
int64_t* /*lastFrameNumber*/) {
ATRACE_CALL();
- List<const CameraMetadata> requests;
+ List<const PhysicalCameraSettingsList> requestsList;
std::list<const SurfaceMap> surfaceMaps;
- convertToRequestList(requests, surfaceMaps, request);
+ convertToRequestList(requestsList, surfaceMaps, request);
- return setStreamingRequestList(requests, /*surfaceMap*/surfaceMaps,
+ return setStreamingRequestList(requestsList, /*surfaceMap*/surfaceMaps,
/*lastFrameNumber*/NULL);
}
-status_t Camera3Device::setStreamingRequestList(const List<const CameraMetadata> &requests,
- const std::list<const SurfaceMap> &surfaceMaps,
- int64_t *lastFrameNumber) {
+status_t Camera3Device::setStreamingRequestList(
+ const List<const PhysicalCameraSettingsList> &requestsList,
+ const std::list<const SurfaceMap> &surfaceMaps, int64_t *lastFrameNumber) {
ATRACE_CALL();
- return submitRequestsHelper(requests, surfaceMaps, /*repeating*/true, lastFrameNumber);
+ return submitRequestsHelper(requestsList, surfaceMaps, /*repeating*/true, lastFrameNumber);
}
sp<Camera3Device::CaptureRequest> Camera3Device::setUpRequestLocked(
- const CameraMetadata &request, const SurfaceMap &surfaceMap) {
+ const PhysicalCameraSettingsList &request, const SurfaceMap &surfaceMap) {
status_t res;
if (mStatus == STATUS_UNCONFIGURED || mNeedConfig) {
// This point should only be reached via API1 (API2 must explicitly call configureStreams)
// so unilaterally select normal operating mode.
- res = configureStreamsLocked(CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE);
+ res = filterParamsAndConfigureLocked(request.begin()->metadata,
+ CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE);
// Stream configuration failed. Client might try other configuraitons.
if (res != OK) {
CLOGE("Can't set up streams: %s (%d)", strerror(-res), res);
@@ -1205,8 +1301,8 @@
// Continue captures if active at start
if (wasActive) {
ALOGV("%s: Restarting activity to reconfigure streams", __FUNCTION__);
- // Reuse current operating mode for new stream config
- res = configureStreamsLocked(mOperatingMode);
+ // Reuse current operating mode and session parameters for new stream config
+ res = configureStreamsLocked(mOperatingMode, mSessionParams);
if (res != OK) {
ALOGE("%s: Can't reconfigure device for new stream %d: %s (%d)",
__FUNCTION__, mNextStreamId, strerror(-res), res);
@@ -1222,7 +1318,8 @@
status_t Camera3Device::createStream(sp<Surface> consumer,
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
- int streamSetId, bool isShared, uint64_t consumerUsage) {
+ const String8& physicalCameraId,
+ std::vector<int> *surfaceIds, int streamSetId, bool isShared, uint64_t consumerUsage) {
ATRACE_CALL();
if (consumer == nullptr) {
@@ -1234,20 +1331,24 @@
consumers.push_back(consumer);
return createStream(consumers, /*hasDeferredConsumer*/ false, width, height,
- format, dataSpace, rotation, id, streamSetId, isShared, consumerUsage);
+ format, dataSpace, rotation, id, physicalCameraId, surfaceIds, streamSetId,
+ isShared, consumerUsage);
}
status_t Camera3Device::createStream(const std::vector<sp<Surface>>& consumers,
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
- int streamSetId, bool isShared, uint64_t consumerUsage) {
+ const String8& physicalCameraId,
+ std::vector<int> *surfaceIds, int streamSetId, bool isShared, uint64_t consumerUsage) {
ATRACE_CALL();
+
Mutex::Autolock il(mInterfaceLock);
nsecs_t maxExpectedDuration = getExpectedInFlightDuration();
Mutex::Autolock l(mLock);
ALOGV("Camera %s: Creating new stream %d: %d x %d, format %d, dataspace %d rotation %d"
- " consumer usage %" PRIu64 ", isShared %d", mId.string(), mNextStreamId, width, height, format,
- dataSpace, rotation, consumerUsage, isShared);
+ " consumer usage %" PRIu64 ", isShared %d, physicalCameraId %s", mId.string(),
+ mNextStreamId, width, height, format, dataSpace, rotation, consumerUsage, isShared,
+ physicalCameraId.string());
status_t res;
bool wasActive = false;
@@ -1307,7 +1408,7 @@
}
newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, blobBufferSize, format, dataSpace, rotation,
- mTimestampOffset, streamSetId);
+ mTimestampOffset, physicalCameraId, streamSetId);
} else if (format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
ssize_t rawOpaqueBufferSize = getRawOpaqueBufferSize(width, height);
if (rawOpaqueBufferSize <= 0) {
@@ -1316,20 +1417,33 @@
}
newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, rawOpaqueBufferSize, format, dataSpace, rotation,
- mTimestampOffset, streamSetId);
+ mTimestampOffset, physicalCameraId, streamSetId);
} else if (isShared) {
newStream = new Camera3SharedOutputStream(mNextStreamId, consumers,
width, height, format, consumerUsage, dataSpace, rotation,
- mTimestampOffset, streamSetId);
+ mTimestampOffset, physicalCameraId, streamSetId);
} else if (consumers.size() == 0 && hasDeferredConsumer) {
newStream = new Camera3OutputStream(mNextStreamId,
width, height, format, consumerUsage, dataSpace, rotation,
- mTimestampOffset, streamSetId);
+ mTimestampOffset, physicalCameraId, streamSetId);
} else {
newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, format, dataSpace, rotation,
- mTimestampOffset, streamSetId);
+ mTimestampOffset, physicalCameraId, streamSetId);
}
+
+ size_t consumerCount = consumers.size();
+ for (size_t i = 0; i < consumerCount; i++) {
+ int id = newStream->getSurfaceId(consumers[i]);
+ if (id < 0) {
+ SET_ERR_L("Invalid surface id");
+ return BAD_VALUE;
+ }
+ if (surfaceIds != nullptr) {
+ surfaceIds->push_back(id);
+ }
+ }
+
newStream->setStatusTracker(mStatusTracker);
newStream->setBufferManager(mBufferManager);
@@ -1346,8 +1460,8 @@
// Continue captures if active at start
if (wasActive) {
ALOGV("%s: Restarting activity to reconfigure streams", __FUNCTION__);
- // Reuse current operating mode for new stream config
- res = configureStreamsLocked(mOperatingMode);
+ // Reuse current operating mode and session parameters for new stream config
+ res = configureStreamsLocked(mOperatingMode, mSessionParams);
if (res != OK) {
CLOGE("Can't reconfigure device for new stream %d: %s (%d)",
mNextStreamId, strerror(-res), res);
@@ -1445,7 +1559,7 @@
// CameraDevice semantics require device to already be idle before
// deleteStream is called, unlike for createStream.
if (mStatus == STATUS_ACTIVE) {
- ALOGV("%s: Camera %s: Device not idle", __FUNCTION__, mId.string());
+ ALOGW("%s: Camera %s: Device not idle", __FUNCTION__, mId.string());
return -EBUSY;
}
@@ -1485,14 +1599,50 @@
return res;
}
-status_t Camera3Device::configureStreams(int operatingMode) {
+status_t Camera3Device::configureStreams(const CameraMetadata& sessionParams, int operatingMode) {
ATRACE_CALL();
ALOGV("%s: E", __FUNCTION__);
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
- return configureStreamsLocked(operatingMode);
+ // In case the client doesn't include any session parameter, try a
+ // speculative configuration using the values from the last cached
+ // default request.
+ if (sessionParams.isEmpty() &&
+ ((mLastTemplateId > 0) && (mLastTemplateId < CAMERA3_TEMPLATE_COUNT)) &&
+ (!mRequestTemplateCache[mLastTemplateId].isEmpty())) {
+ ALOGV("%s: Speculative session param configuration with template id: %d", __func__,
+ mLastTemplateId);
+ return filterParamsAndConfigureLocked(mRequestTemplateCache[mLastTemplateId],
+ operatingMode);
+ }
+
+ return filterParamsAndConfigureLocked(sessionParams, operatingMode);
+}
+
+status_t Camera3Device::filterParamsAndConfigureLocked(const CameraMetadata& sessionParams,
+ int operatingMode) {
+ //Filter out any incoming session parameters
+ const CameraMetadata params(sessionParams);
+ camera_metadata_entry_t availableSessionKeys = mDeviceInfo.find(
+ ANDROID_REQUEST_AVAILABLE_SESSION_KEYS);
+ CameraMetadata filteredParams(availableSessionKeys.count);
+ camera_metadata_t *meta = const_cast<camera_metadata_t *>(
+ filteredParams.getAndLock());
+ set_camera_metadata_vendor_id(meta, mVendorTagId);
+ filteredParams.unlock(meta);
+ if (availableSessionKeys.count > 0) {
+ for (size_t i = 0; i < availableSessionKeys.count; i++) {
+ camera_metadata_ro_entry entry = params.find(
+ availableSessionKeys.data.i32[i]);
+ if (entry.count > 0) {
+ filteredParams.update(entry);
+ }
+ }
+ }
+
+ return configureStreamsLocked(operatingMode, filteredParams);
}
status_t Camera3Device::getInputBufferProducer(
@@ -1544,6 +1694,7 @@
if (!mRequestTemplateCache[templateId].isEmpty()) {
*request = mRequestTemplateCache[templateId];
+ mLastTemplateId = templateId;
return OK;
}
}
@@ -1568,6 +1719,7 @@
mRequestTemplateCache[templateId].acquire(rawRequest);
*request = mRequestTemplateCache[templateId];
+ mLastTemplateId = templateId;
}
return OK;
}
@@ -1614,10 +1766,16 @@
mStatusChanged.broadcast();
}
+void Camera3Device::pauseStateNotify(bool enable) {
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ mPauseStateNotify = enable;
+}
+
// Pause to reconfigure
status_t Camera3Device::internalPauseAndWaitLocked(nsecs_t maxExpectedDuration) {
mRequestThread->setPaused(true);
- mPauseStateNotify = true;
ALOGV("%s: Camera %s: Internal wait until idle (% " PRIi64 " ns)", __FUNCTION__, mId.string(),
maxExpectedDuration);
@@ -1636,6 +1794,8 @@
mRequestThread->setPaused(false);
+ ALOGV("%s: Camera %s: Internal wait until active (% " PRIi64 " ns)", __FUNCTION__, mId.string(),
+ kActiveTimeout);
res = waitUntilStateThenRelock(/*active*/ true, kActiveTimeout);
if (res != OK) {
SET_ERR_L("Can't transition to active in %f seconds!",
@@ -1744,6 +1904,7 @@
CaptureResult &result = *(mResultQueue.begin());
frame->mResultExtras = result.mResultExtras;
frame->mMetadata.acquire(result.mMetadata);
+ frame->mPhysicalMetadatas = std::move(result.mPhysicalMetadatas);
mResultQueue.erase(mResultQueue.begin());
return OK;
@@ -1916,8 +2077,8 @@
if (mStatus != STATUS_ACTIVE && mStatus != STATUS_CONFIGURED) {
return;
}
- ALOGV("%s: Camera %s: Now %s", __FUNCTION__, mId.string(),
- idle ? "idle" : "active");
+ ALOGV("%s: Camera %s: Now %s, pauseState: %s", __FUNCTION__, mId.string(),
+ idle ? "idle" : "active", mPauseStateNotify ? "true" : "false");
internalUpdateStatusLocked(idle ? STATUS_CONFIGURED : STATUS_ACTIVE);
// Skip notifying listener if we're doing some user-transparent
@@ -1936,10 +2097,15 @@
}
status_t Camera3Device::setConsumerSurfaces(int streamId,
- const std::vector<sp<Surface>>& consumers) {
+ const std::vector<sp<Surface>>& consumers, std::vector<int> *surfaceIds) {
ATRACE_CALL();
ALOGV("%s: Camera %s: set consumer surface for stream %d",
__FUNCTION__, mId.string(), streamId);
+
+ if (surfaceIds == nullptr) {
+ return BAD_VALUE;
+ }
+
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
@@ -1960,6 +2126,15 @@
return res;
}
+ for (auto &consumer : consumers) {
+ int id = stream->getSurfaceId(consumer);
+ if (id < 0) {
+ CLOGE("Invalid surface id!");
+ return BAD_VALUE;
+ }
+ surfaceIds->push_back(id);
+ }
+
if (stream->isConsumerConfigurationDeferred()) {
if (!stream->isConfiguring()) {
CLOGE("Stream %d was already fully configured.", streamId);
@@ -1977,20 +2152,68 @@
return OK;
}
+status_t Camera3Device::updateStream(int streamId, const std::vector<sp<Surface>> &newSurfaces,
+ const std::vector<OutputStreamInfo> &outputInfo,
+ const std::vector<size_t> &removedSurfaceIds, KeyedVector<sp<Surface>, size_t> *outputMap) {
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ ssize_t idx = mOutputStreams.indexOfKey(streamId);
+ if (idx == NAME_NOT_FOUND) {
+ CLOGE("Stream %d is unknown", streamId);
+ return idx;
+ }
+
+ for (const auto &it : removedSurfaceIds) {
+ if (mRequestThread->isOutputSurfacePending(streamId, it)) {
+ CLOGE("Shared surface still part of a pending request!");
+ return -EBUSY;
+ }
+ }
+
+ sp<Camera3OutputStreamInterface> stream = mOutputStreams[idx];
+ status_t res = stream->updateStream(newSurfaces, outputInfo, removedSurfaceIds, outputMap);
+ if (res != OK) {
+ CLOGE("Stream %d failed to update stream (error %d %s) ",
+ streamId, res, strerror(-res));
+ if (res == UNKNOWN_ERROR) {
+ SET_ERR_L("%s: Stream update failed to revert to previous output configuration!",
+ __FUNCTION__);
+ }
+ return res;
+ }
+
+ return res;
+}
+
+status_t Camera3Device::dropStreamBuffers(bool dropping, int streamId) {
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ int idx = mOutputStreams.indexOfKey(streamId);
+ if (idx == NAME_NOT_FOUND) {
+ ALOGE("%s: Stream %d is not found.", __FUNCTION__, streamId);
+ return BAD_VALUE;
+ }
+
+ sp<Camera3OutputStreamInterface> stream = mOutputStreams.editValueAt(idx);
+ return stream->dropBuffers(dropping);
+}
+
/**
* Camera3Device private methods
*/
sp<Camera3Device::CaptureRequest> Camera3Device::createCaptureRequest(
- const CameraMetadata &request, const SurfaceMap &surfaceMap) {
+ const PhysicalCameraSettingsList &request, const SurfaceMap &surfaceMap) {
ATRACE_CALL();
status_t res;
sp<CaptureRequest> newRequest = new CaptureRequest;
- newRequest->mSettings = request;
+ newRequest->mSettingsList = request;
camera_metadata_entry_t inputStreams =
- newRequest->mSettings.find(ANDROID_REQUEST_INPUT_STREAMS);
+ newRequest->mSettingsList.begin()->metadata.find(ANDROID_REQUEST_INPUT_STREAMS);
if (inputStreams.count > 0) {
if (mInputStream == NULL ||
mInputStream->getId() != inputStreams.data.i32[0]) {
@@ -2016,11 +2239,11 @@
}
newRequest->mInputStream = mInputStream;
- newRequest->mSettings.erase(ANDROID_REQUEST_INPUT_STREAMS);
+ newRequest->mSettingsList.begin()->metadata.erase(ANDROID_REQUEST_INPUT_STREAMS);
}
camera_metadata_entry_t streams =
- newRequest->mSettings.find(ANDROID_REQUEST_OUTPUT_STREAMS);
+ newRequest->mSettingsList.begin()->metadata.find(ANDROID_REQUEST_OUTPUT_STREAMS);
if (streams.count == 0) {
CLOGE("Zero output streams specified!");
return NULL;
@@ -2068,7 +2291,7 @@
newRequest->mOutputStreams.push(stream);
}
- newRequest->mSettings.erase(ANDROID_REQUEST_OUTPUT_STREAMS);
+ newRequest->mSettingsList.begin()->metadata.erase(ANDROID_REQUEST_OUTPUT_STREAMS);
newRequest->mBatchSize = 1;
return newRequest;
@@ -2110,9 +2333,47 @@
// properly clean things up
internalUpdateStatusLocked(STATUS_UNCONFIGURED);
mNeedConfig = true;
+
+ res = mPreparerThread->resume();
+ if (res != OK) {
+ ALOGE("%s: Camera %s: Preparer thread failed to resume!", __FUNCTION__, mId.string());
+ }
}
-status_t Camera3Device::configureStreamsLocked(int operatingMode) {
+bool Camera3Device::reconfigureCamera(const CameraMetadata& sessionParams) {
+ ATRACE_CALL();
+ bool ret = false;
+
+ Mutex::Autolock il(mInterfaceLock);
+ nsecs_t maxExpectedDuration = getExpectedInFlightDuration();
+
+ Mutex::Autolock l(mLock);
+ auto rc = internalPauseAndWaitLocked(maxExpectedDuration);
+ if (rc == NO_ERROR) {
+ mNeedConfig = true;
+ rc = configureStreamsLocked(mOperatingMode, sessionParams, /*notifyRequestThread*/ false);
+ if (rc == NO_ERROR) {
+ ret = true;
+ mPauseStateNotify = false;
+ //Moving to active state while holding 'mLock' is important.
+ //There could be pending calls to 'create-/deleteStream' which
+ //will trigger another stream configuration while the already
+ //present streams end up with outstanding buffers that will
+ //not get drained.
+ internalUpdateStatusLocked(STATUS_ACTIVE);
+ } else {
+ setErrorStateLocked("%s: Failed to re-configure camera: %d",
+ __FUNCTION__, rc);
+ }
+ } else {
+ ALOGE("%s: Failed to pause streaming: %d", __FUNCTION__, rc);
+ }
+
+ return ret;
+}
+
+status_t Camera3Device::configureStreamsLocked(int operatingMode,
+ const CameraMetadata& sessionParams, bool notifyRequestThread) {
ATRACE_CALL();
status_t res;
@@ -2153,12 +2414,16 @@
// Start configuring the streams
ALOGV("%s: Camera %s: Starting stream configuration", __FUNCTION__, mId.string());
+ mPreparerThread->pause();
+
camera3_stream_configuration config;
config.operation_mode = mOperatingMode;
config.num_streams = (mInputStream != NULL) + mOutputStreams.size();
Vector<camera3_stream_t*> streams;
streams.setCapacity(config.num_streams);
+ std::vector<uint32_t> bufferSizes(config.num_streams, 0);
+
if (mInputStream != NULL) {
camera3_stream_t *inputStream;
@@ -2189,6 +2454,14 @@
return INVALID_OPERATION;
}
streams.add(outputStream);
+
+ if (outputStream->format == HAL_PIXEL_FORMAT_BLOB &&
+ outputStream->data_space == HAL_DATASPACE_V0_JFIF) {
+ size_t k = i + ((mInputStream != nullptr) ? 1 : 0); // Input stream if present should
+ // always occupy the initial entry.
+ bufferSizes[k] = static_cast<uint32_t>(
+ getJpegBufferSize(outputStream->width, outputStream->height));
+ }
}
config.streams = streams.editArray();
@@ -2196,7 +2469,9 @@
// Do the HAL configuration; will potentially touch stream
// max_buffers, usage, priv fields.
- res = mInterface->configureStreams(&config);
+ const camera_metadata_t *sessionBuffer = sessionParams.getAndLock();
+ res = mInterface->configureStreams(sessionBuffer, &config, bufferSizes);
+ sessionParams.unlock(sessionBuffer);
if (res == BAD_VALUE) {
// HAL rejected this set of streams as unsupported, clean up config
@@ -2242,7 +2517,9 @@
// Request thread needs to know to avoid using repeat-last-settings protocol
// across configure_streams() calls
- mRequestThread->configurationComplete(mIsConstrainedHighSpeedConfiguration);
+ if (notifyRequestThread) {
+ mRequestThread->configurationComplete(mIsConstrainedHighSpeedConfiguration, sessionParams);
+ }
char value[PROPERTY_VALUE_MAX];
property_get("camera.fifo.disable", value, "0");
@@ -2261,6 +2538,14 @@
}
// Update device state
+ const camera_metadata_t *newSessionParams = sessionParams.getAndLock();
+ const camera_metadata_t *currentSessionParams = mSessionParams.getAndLock();
+ bool updateSessionParams = (newSessionParams != currentSessionParams) ? true : false;
+ sessionParams.unlock(newSessionParams);
+ mSessionParams.unlock(currentSessionParams);
+ if (updateSessionParams) {
+ mSessionParams = sessionParams;
+ }
mNeedConfig = false;
@@ -2272,6 +2557,12 @@
// tear down the deleted streams after configure streams.
mDeletedStreams.clear();
+ auto rc = mPreparerThread->resume();
+ if (rc != OK) {
+ SET_ERR_L("%s: Camera %s: Preparer thread failed to resume!", __FUNCTION__, mId.string());
+ return rc;
+ }
+
return OK;
}
@@ -2397,13 +2688,14 @@
status_t Camera3Device::registerInFlight(uint32_t frameNumber,
int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
- bool hasAppCallback, nsecs_t maxExpectedDuration) {
+ bool hasAppCallback, nsecs_t maxExpectedDuration,
+ std::set<String8>& physicalCameraIds) {
ATRACE_CALL();
Mutex::Autolock l(mInFlightLock);
ssize_t res;
res = mInFlightMap.add(frameNumber, InFlightRequest(numBuffers, resultExtras, hasInput,
- hasAppCallback, maxExpectedDuration));
+ hasAppCallback, maxExpectedDuration, physicalCameraIds));
if (res < 0) return res;
if (mInFlightMap.size() == 1) {
@@ -2643,7 +2935,8 @@
CaptureResultExtras &resultExtras,
CameraMetadata &collectedPartialResult,
uint32_t frameNumber,
- bool reprocess) {
+ bool reprocess,
+ const std::vector<PhysicalCaptureResultInfo>& physicalMetadatas) {
ATRACE_CALL();
if (pendingMetadata.isEmpty())
return;
@@ -2672,6 +2965,7 @@
CaptureResult captureResult;
captureResult.mResultExtras = resultExtras;
captureResult.mMetadata = pendingMetadata;
+ captureResult.mPhysicalMetadatas = physicalMetadatas;
// Append any previous partials to form a complete result
if (mUsePartialResult && !collectedPartialResult.isEmpty()) {
@@ -2687,6 +2981,23 @@
frameNumber);
return;
}
+ for (auto& physicalMetadata : captureResult.mPhysicalMetadatas) {
+ camera_metadata_entry timestamp =
+ physicalMetadata.mPhysicalCameraMetadata.find(ANDROID_SENSOR_TIMESTAMP);
+ if (timestamp.count == 0) {
+ SET_ERR("No timestamp provided by HAL for physical camera %s frame %d!",
+ String8(physicalMetadata.mPhysicalCameraId).c_str(), frameNumber);
+ return;
+ }
+ }
+
+ // Fix up some result metadata to account for HAL-level distortion correction
+ status_t res = mDistortionMapper.correctCaptureResult(&captureResult.mMetadata);
+ if (res != OK) {
+ SET_ERR("Unable to correct capture result metadata for frame %d: %s (%d)",
+ frameNumber, strerror(res), res);
+ return;
+ }
mTagMonitor.monitorMetadata(TagMonitor::RESULT,
frameNumber, timestamp.data.i64[0], captureResult.mMetadata);
@@ -2722,7 +3033,6 @@
bool isPartialResult = false;
CameraMetadata collectedPartialResult;
- CaptureResultExtras resultExtras;
bool hasInputBufferInRequest = false;
// Get shutter timestamp and resultExtras from list of in-flight requests,
@@ -2763,6 +3073,11 @@
return;
}
isPartialResult = (result->partial_result < mNumPartialResults);
+ if (isPartialResult && result->num_physcam_metadata) {
+ SET_ERR("Result is malformed for frame %d: partial_result not allowed for"
+ " physical camera result", frameNumber);
+ return;
+ }
if (isPartialResult) {
request.collectedPartialResult.append(result->result);
}
@@ -2779,11 +3094,28 @@
// Did we get the (final) result metadata for this capture?
if (result->result != NULL && !isPartialResult) {
+ if (request.physicalCameraIds.size() != result->num_physcam_metadata) {
+ SET_ERR("Requested physical Camera Ids %d not equal to number of metadata %d",
+ request.physicalCameraIds.size(), result->num_physcam_metadata);
+ return;
+ }
if (request.haveResultMetadata) {
SET_ERR("Called multiple times with metadata for frame %d",
frameNumber);
return;
}
+ for (uint32_t i = 0; i < result->num_physcam_metadata; i++) {
+ String8 physicalId(result->physcam_ids[i]);
+ std::set<String8>::iterator cameraIdIter =
+ request.physicalCameraIds.find(physicalId);
+ if (cameraIdIter != request.physicalCameraIds.end()) {
+ request.physicalCameraIds.erase(cameraIdIter);
+ } else {
+ SET_ERR("Total result for frame %d has already returned for camera %s",
+ frameNumber, physicalId.c_str());
+ return;
+ }
+ }
if (mUsePartialResult &&
!request.collectedPartialResult.isEmpty()) {
collectedPartialResult.acquire(
@@ -2828,15 +3160,21 @@
}
if (result->result != NULL && !isPartialResult) {
+ for (uint32_t i = 0; i < result->num_physcam_metadata; i++) {
+ CameraMetadata physicalMetadata;
+ physicalMetadata.append(result->physcam_metadata[i]);
+ request.physicalMetadatas.push_back({String16(result->physcam_ids[i]),
+ physicalMetadata});
+ }
if (shutterTimestamp == 0) {
request.pendingMetadata = result->result;
request.collectedPartialResult = collectedPartialResult;
- } else if (request.hasCallback) {
+ } else if (request.hasCallback) {
CameraMetadata metadata;
metadata = result->result;
sendCaptureResult(metadata, request.resultExtras,
collectedPartialResult, frameNumber,
- hasInputBufferInRequest);
+ hasInputBufferInRequest, request.physicalMetadatas);
}
}
@@ -3022,7 +3360,7 @@
// send pending result and buffers
sendCaptureResult(r.pendingMetadata, r.resultExtras,
r.collectedPartialResult, msg.frame_number,
- r.hasInputBuffer);
+ r.hasInputBuffer, r.physicalMetadatas);
}
returnOutputBuffers(r.pendingOutputBuffers.array(),
r.pendingOutputBuffers.size(), r.shutterTimestamp);
@@ -3037,7 +3375,6 @@
}
}
-
CameraMetadata Camera3Device::getLatestRequestLocked() {
ALOGV("%s", __FUNCTION__);
@@ -3064,7 +3401,18 @@
sp<ICameraDeviceSession> &session,
std::shared_ptr<RequestMetadataQueue> queue) :
mHidlSession(session),
- mRequestMetadataQueue(queue) {}
+ mRequestMetadataQueue(queue) {
+ // Check with hardware service manager if we can downcast these interfaces
+ // Somewhat expensive, so cache the results at startup
+ auto castResult_3_4 = device::V3_4::ICameraDeviceSession::castFrom(mHidlSession);
+ if (castResult_3_4.isOk()) {
+ mHidlSession_3_4 = castResult_3_4;
+ }
+ auto castResult_3_3 = device::V3_3::ICameraDeviceSession::castFrom(mHidlSession);
+ if (castResult_3_3.isOk()) {
+ mHidlSession_3_3 = castResult_3_3;
+ }
+}
Camera3Device::HalInterface::HalInterface() {}
@@ -3077,6 +3425,8 @@
}
void Camera3Device::HalInterface::clear() {
+ mHidlSession_3_4.clear();
+ mHidlSession_3_3.clear();
mHidlSession.clear();
}
@@ -3092,6 +3442,29 @@
status_t res = OK;
common::V1_0::Status status;
+
+ auto requestCallback = [&status, &requestTemplate]
+ (common::V1_0::Status s, const device::V3_2::CameraMetadata& request) {
+ status = s;
+ if (status == common::V1_0::Status::OK) {
+ const camera_metadata *r =
+ reinterpret_cast<const camera_metadata_t*>(request.data());
+ size_t expectedSize = request.size();
+ int ret = validate_camera_metadata_structure(r, &expectedSize);
+ if (ret == OK || ret == CAMERA_METADATA_VALIDATION_SHIFTED) {
+ *requestTemplate = clone_camera_metadata(r);
+ if (*requestTemplate == nullptr) {
+ ALOGE("%s: Unable to clone camera metadata received from HAL",
+ __FUNCTION__);
+ status = common::V1_0::Status::INTERNAL_ERROR;
+ }
+ } else {
+ ALOGE("%s: Malformed camera metadata received from HAL", __FUNCTION__);
+ status = common::V1_0::Status::INTERNAL_ERROR;
+ }
+ }
+ };
+ hardware::Return<void> err;
RequestTemplate id;
switch (templateId) {
case CAMERA3_TEMPLATE_PREVIEW:
@@ -3113,31 +3486,11 @@
id = RequestTemplate::MANUAL;
break;
default:
- // Unknown template ID
+ // Unknown template ID, or this HAL is too old to support it
return BAD_VALUE;
}
- auto err = mHidlSession->constructDefaultRequestSettings(id,
- [&status, &requestTemplate]
- (common::V1_0::Status s, const device::V3_2::CameraMetadata& request) {
- status = s;
- if (status == common::V1_0::Status::OK) {
- const camera_metadata *r =
- reinterpret_cast<const camera_metadata_t*>(request.data());
- size_t expectedSize = request.size();
- int ret = validate_camera_metadata_structure(r, &expectedSize);
- if (ret == OK || ret == CAMERA_METADATA_VALIDATION_SHIFTED) {
- *requestTemplate = clone_camera_metadata(r);
- if (*requestTemplate == nullptr) {
- ALOGE("%s: Unable to clone camera metadata received from HAL",
- __FUNCTION__);
- status = common::V1_0::Status::INTERNAL_ERROR;
- }
- } else {
- ALOGE("%s: Malformed camera metadata received from HAL", __FUNCTION__);
- status = common::V1_0::Status::INTERNAL_ERROR;
- }
- }
- });
+ err = mHidlSession->constructDefaultRequestSettings(id, requestCallback);
+
if (!err.isOk()) {
ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
res = DEAD_OBJECT;
@@ -3148,17 +3501,21 @@
return res;
}
-status_t Camera3Device::HalInterface::configureStreams(camera3_stream_configuration *config) {
+status_t Camera3Device::HalInterface::configureStreams(const camera_metadata_t *sessionParams,
+ camera3_stream_configuration *config, const std::vector<uint32_t>& bufferSizes) {
ATRACE_NAME("CameraHal::configureStreams");
if (!valid()) return INVALID_OPERATION;
status_t res = OK;
// Convert stream config to HIDL
std::set<int> activeStreams;
- StreamConfiguration requestedConfiguration;
- requestedConfiguration.streams.resize(config->num_streams);
+ device::V3_2::StreamConfiguration requestedConfiguration3_2;
+ device::V3_4::StreamConfiguration requestedConfiguration3_4;
+ requestedConfiguration3_2.streams.resize(config->num_streams);
+ requestedConfiguration3_4.streams.resize(config->num_streams);
for (size_t i = 0; i < config->num_streams; i++) {
- Stream &dst = requestedConfiguration.streams[i];
+ device::V3_2::Stream &dst3_2 = requestedConfiguration3_2.streams[i];
+ device::V3_4::Stream &dst3_4 = requestedConfiguration3_4.streams[i];
camera3_stream_t *src = config->streams[i];
Camera3Stream* cam3stream = Camera3Stream::cast(src);
@@ -3177,14 +3534,19 @@
__FUNCTION__, streamId, config->streams[i]->stream_type);
return BAD_VALUE;
}
- dst.id = streamId;
- dst.streamType = streamType;
- dst.width = src->width;
- dst.height = src->height;
- dst.format = mapToPixelFormat(src->format);
- dst.usage = mapToConsumerUsage(cam3stream->getUsage());
- dst.dataSpace = mapToHidlDataspace(src->data_space);
- dst.rotation = mapToStreamRotation((camera3_stream_rotation_t) src->rotation);
+ dst3_2.id = streamId;
+ dst3_2.streamType = streamType;
+ dst3_2.width = src->width;
+ dst3_2.height = src->height;
+ dst3_2.format = mapToPixelFormat(src->format);
+ dst3_2.usage = mapToConsumerUsage(cam3stream->getUsage());
+ dst3_2.dataSpace = mapToHidlDataspace(src->data_space);
+ dst3_2.rotation = mapToStreamRotation((camera3_stream_rotation_t) src->rotation);
+ dst3_4.v3_2 = dst3_2;
+ dst3_4.bufferSize = bufferSizes[i];
+ if (src->physical_camera_id != nullptr) {
+ dst3_4.physicalCameraId = src->physical_camera_id;
+ }
activeStreams.insert(streamId);
// Create Buffer ID map if necessary
@@ -3203,31 +3565,46 @@
}
}
+ StreamConfigurationMode operationMode;
res = mapToStreamConfigurationMode(
(camera3_stream_configuration_mode_t) config->operation_mode,
- /*out*/ &requestedConfiguration.operationMode);
+ /*out*/ &operationMode);
if (res != OK) {
return res;
}
+ requestedConfiguration3_2.operationMode = operationMode;
+ requestedConfiguration3_4.operationMode = operationMode;
+ requestedConfiguration3_4.sessionParams.setToExternal(
+ reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(sessionParams)),
+ get_camera_metadata_size(sessionParams));
// Invoke configureStreams
-
device::V3_3::HalStreamConfiguration finalConfiguration;
common::V1_0::Status status;
- // See if we have v3.3 HAL
- sp<device::V3_3::ICameraDeviceSession> hidlSession_3_3;
- auto castResult = device::V3_3::ICameraDeviceSession::castFrom(mHidlSession);
- if (castResult.isOk()) {
- hidlSession_3_3 = castResult;
- } else {
- ALOGE("%s: Transaction error when casting ICameraDeviceSession: %s", __FUNCTION__,
- castResult.description().c_str());
- }
- if (hidlSession_3_3 != nullptr) {
+ // See if we have v3.4 or v3.3 HAL
+ if (mHidlSession_3_4 != nullptr) {
+ // We do; use v3.4 for the call
+ ALOGV("%s: v3.4 device found", __FUNCTION__);
+ device::V3_4::HalStreamConfiguration finalConfiguration3_4;
+ auto err = mHidlSession_3_4->configureStreams_3_4(requestedConfiguration3_4,
+ [&status, &finalConfiguration3_4]
+ (common::V1_0::Status s, const device::V3_4::HalStreamConfiguration& halConfiguration) {
+ finalConfiguration3_4 = halConfiguration;
+ status = s;
+ });
+ if (!err.isOk()) {
+ ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
+ return DEAD_OBJECT;
+ }
+ finalConfiguration.streams.resize(finalConfiguration3_4.streams.size());
+ for (size_t i = 0; i < finalConfiguration3_4.streams.size(); i++) {
+ finalConfiguration.streams[i] = finalConfiguration3_4.streams[i].v3_3;
+ }
+ } else if (mHidlSession_3_3 != nullptr) {
// We do; use v3.3 for the call
ALOGV("%s: v3.3 device found", __FUNCTION__);
- auto err = hidlSession_3_3->configureStreams_3_3(requestedConfiguration,
+ auto err = mHidlSession_3_3->configureStreams_3_3(requestedConfiguration3_2,
[&status, &finalConfiguration]
(common::V1_0::Status s, const device::V3_3::HalStreamConfiguration& halConfiguration) {
finalConfiguration = halConfiguration;
@@ -3241,7 +3618,7 @@
// We don't; use v3.2 call and construct a v3.3 HalStreamConfiguration
ALOGV("%s: v3.2 device found", __FUNCTION__);
HalStreamConfiguration finalConfiguration_3_2;
- auto err = mHidlSession->configureStreams(requestedConfiguration,
+ auto err = mHidlSession->configureStreams(requestedConfiguration3_2,
[&status, &finalConfiguration_3_2]
(common::V1_0::Status s, const HalStreamConfiguration& halConfiguration) {
finalConfiguration_3_2 = halConfiguration;
@@ -3255,7 +3632,7 @@
for (size_t i = 0; i < finalConfiguration_3_2.streams.size(); i++) {
finalConfiguration.streams[i].v3_2 = finalConfiguration_3_2.streams[i];
finalConfiguration.streams[i].overrideDataSpace =
- requestedConfiguration.streams[i].dataSpace;
+ requestedConfiguration3_2.streams[i].dataSpace;
}
}
@@ -3409,13 +3786,29 @@
ATRACE_NAME("CameraHal::processBatchCaptureRequests");
if (!valid()) return INVALID_OPERATION;
+ sp<device::V3_4::ICameraDeviceSession> hidlSession_3_4;
+ auto castResult_3_4 = device::V3_4::ICameraDeviceSession::castFrom(mHidlSession);
+ if (castResult_3_4.isOk()) {
+ hidlSession_3_4 = castResult_3_4;
+ }
+
hardware::hidl_vec<device::V3_2::CaptureRequest> captureRequests;
+ hardware::hidl_vec<device::V3_4::CaptureRequest> captureRequests_3_4;
size_t batchSize = requests.size();
- captureRequests.resize(batchSize);
+ if (hidlSession_3_4 != nullptr) {
+ captureRequests_3_4.resize(batchSize);
+ } else {
+ captureRequests.resize(batchSize);
+ }
std::vector<native_handle_t*> handlesCreated;
for (size_t i = 0; i < batchSize; i++) {
- wrapAsHidlRequest(requests[i], /*out*/&captureRequests[i], /*out*/&handlesCreated);
+ if (hidlSession_3_4 != nullptr) {
+ wrapAsHidlRequest(requests[i], /*out*/&captureRequests_3_4[i].v3_2,
+ /*out*/&handlesCreated);
+ } else {
+ wrapAsHidlRequest(requests[i], /*out*/&captureRequests[i], /*out*/&handlesCreated);
+ }
}
std::vector<device::V3_2::BufferCache> cachesToRemove;
@@ -3436,7 +3829,12 @@
// Write metadata to FMQ.
for (size_t i = 0; i < batchSize; i++) {
camera3_capture_request_t* request = requests[i];
- device::V3_2::CaptureRequest* captureRequest = &captureRequests[i];
+ device::V3_2::CaptureRequest* captureRequest;
+ if (hidlSession_3_4 != nullptr) {
+ captureRequest = &captureRequests_3_4[i].v3_2;
+ } else {
+ captureRequest = &captureRequests[i];
+ }
if (request->settings != nullptr) {
size_t settingsSize = get_camera_metadata_size(request->settings);
@@ -3458,12 +3856,52 @@
captureRequest->settings.resize(0);
captureRequest->fmqSettingsSize = 0u;
}
+
+ if (hidlSession_3_4 != nullptr) {
+ captureRequests_3_4[i].physicalCameraSettings.resize(request->num_physcam_settings);
+ for (size_t j = 0; j < request->num_physcam_settings; j++) {
+ if (request->physcam_settings != nullptr) {
+ size_t settingsSize = get_camera_metadata_size(request->physcam_settings[j]);
+ if (mRequestMetadataQueue != nullptr && mRequestMetadataQueue->write(
+ reinterpret_cast<const uint8_t*>(request->physcam_settings[j]),
+ settingsSize)) {
+ captureRequests_3_4[i].physicalCameraSettings[j].settings.resize(0);
+ captureRequests_3_4[i].physicalCameraSettings[j].fmqSettingsSize =
+ settingsSize;
+ } else {
+ if (mRequestMetadataQueue != nullptr) {
+ ALOGW("%s: couldn't utilize fmq, fallback to hwbinder", __FUNCTION__);
+ }
+ captureRequests_3_4[i].physicalCameraSettings[j].settings.setToExternal(
+ reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(
+ request->physcam_settings[j])),
+ get_camera_metadata_size(request->physcam_settings[j]));
+ captureRequests_3_4[i].physicalCameraSettings[j].fmqSettingsSize = 0u;
+ }
+ } else {
+ captureRequests_3_4[i].physicalCameraSettings[j].fmqSettingsSize = 0u;
+ captureRequests_3_4[i].physicalCameraSettings[j].settings.resize(0);
+ }
+ captureRequests_3_4[i].physicalCameraSettings[j].physicalCameraId =
+ request->physcam_id[j];
+ }
+ }
}
- auto err = mHidlSession->processCaptureRequest(captureRequests, cachesToRemove,
+
+ hardware::details::return_status err;
+ if (hidlSession_3_4 != nullptr) {
+ err = hidlSession_3_4->processCaptureRequest_3_4(captureRequests_3_4, cachesToRemove,
[&status, &numRequestProcessed] (auto s, uint32_t n) {
status = s;
*numRequestProcessed = n;
});
+ } else {
+ err = mHidlSession->processCaptureRequest(captureRequests, cachesToRemove,
+ [&status, &numRequestProcessed] (auto s, uint32_t n) {
+ status = s;
+ *numRequestProcessed = n;
+ });
+ }
if (!err.isOk()) {
ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
return DEAD_OBJECT;
@@ -3621,7 +4059,7 @@
Camera3Device::RequestThread::RequestThread(wp<Camera3Device> parent,
sp<StatusTracker> statusTracker,
- sp<HalInterface> interface) :
+ sp<HalInterface> interface, const Vector<int32_t>& sessionParamKeys) :
Thread(/*canCallJava*/false),
mParent(parent),
mStatusTracker(statusTracker),
@@ -3638,7 +4076,10 @@
mRepeatingLastFrameNumber(
hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES),
mPrepareVideoStream(false),
- mRequestLatency(kRequestLatencyBinSize) {
+ mConstrainedMode(false),
+ mRequestLatency(kRequestLatencyBinSize),
+ mSessionParamKeys(sessionParamKeys),
+ mLatestSessionParams(sessionParamKeys.size()) {
mStatusId = statusTracker->addComponent();
}
@@ -3651,12 +4092,15 @@
mListener = listener;
}
-void Camera3Device::RequestThread::configurationComplete(bool isConstrainedHighSpeed) {
+void Camera3Device::RequestThread::configurationComplete(bool isConstrainedHighSpeed,
+ const CameraMetadata& sessionParams) {
ATRACE_CALL();
Mutex::Autolock l(mRequestLock);
mReconfigured = true;
+ mLatestSessionParams = sessionParams;
// Prepare video stream for high speed recording.
mPrepareVideoStream = isConstrainedHighSpeed;
+ mConstrainedMode = isConstrainedHighSpeed;
}
status_t Camera3Device::RequestThread::queueRequestList(
@@ -3914,9 +4358,9 @@
uint32_t numRequestProcessed = 0;
for (size_t i = 0; i < batchSize; i++) {
requests[i] = &mNextRequests.editItemAt(i).halRequest;
+ ATRACE_ASYNC_BEGIN("frame capture", mNextRequests[i].halRequest.frame_number);
}
- ATRACE_ASYNC_BEGIN("batch frame capture", mNextRequests[0].halRequest.frame_number);
res = mInterface->processBatchCaptureRequests(requests, &numRequestProcessed);
bool triggerRemoveFailed = false;
@@ -3942,9 +4386,12 @@
}
if (nextRequest.halRequest.settings != NULL) {
- nextRequest.captureRequest->mSettings.unlock(nextRequest.halRequest.settings);
+ nextRequest.captureRequest->mSettingsList.begin()->metadata.unlock(
+ nextRequest.halRequest.settings);
}
+ cleanupPhysicalSettings(nextRequest.captureRequest, &nextRequest.halRequest);
+
if (!triggerRemoveFailed) {
// Remove any previously queued triggers (after unlock)
status_t removeTriggerRes = removeTriggers(mPrevRequest);
@@ -4013,9 +4460,12 @@
}
if (nextRequest.halRequest.settings != NULL) {
- nextRequest.captureRequest->mSettings.unlock(nextRequest.halRequest.settings);
+ nextRequest.captureRequest->mSettingsList.begin()->metadata.unlock(
+ nextRequest.halRequest.settings);
}
+ cleanupPhysicalSettings(nextRequest.captureRequest, &nextRequest.halRequest);
+
// Remove any previously queued triggers (after unlock)
res = removeTriggers(mPrevRequest);
if (res != OK) {
@@ -4065,6 +4515,65 @@
return maxExpectedDuration;
}
+bool Camera3Device::RequestThread::skipHFRTargetFPSUpdate(int32_t tag,
+ const camera_metadata_ro_entry_t& newEntry, const camera_metadata_entry_t& currentEntry) {
+ if (mConstrainedMode && (ANDROID_CONTROL_AE_TARGET_FPS_RANGE == tag) &&
+ (newEntry.count == currentEntry.count) && (currentEntry.count == 2) &&
+ (currentEntry.data.i32[1] == newEntry.data.i32[1])) {
+ return true;
+ }
+
+ return false;
+}
+
+bool Camera3Device::RequestThread::updateSessionParameters(const CameraMetadata& settings) {
+ ATRACE_CALL();
+ bool updatesDetected = false;
+
+ for (auto tag : mSessionParamKeys) {
+ camera_metadata_ro_entry entry = settings.find(tag);
+ camera_metadata_entry lastEntry = mLatestSessionParams.find(tag);
+
+ if (entry.count > 0) {
+ bool isDifferent = false;
+ if (lastEntry.count > 0) {
+ // Have a last value, compare to see if changed
+ if (lastEntry.type == entry.type &&
+ lastEntry.count == entry.count) {
+ // Same type and count, compare values
+ size_t bytesPerValue = camera_metadata_type_size[lastEntry.type];
+ size_t entryBytes = bytesPerValue * lastEntry.count;
+ int cmp = memcmp(entry.data.u8, lastEntry.data.u8, entryBytes);
+ if (cmp != 0) {
+ isDifferent = true;
+ }
+ } else {
+ // Count or type has changed
+ isDifferent = true;
+ }
+ } else {
+ // No last entry, so always consider to be different
+ isDifferent = true;
+ }
+
+ if (isDifferent) {
+ ALOGV("%s: Session parameter tag id %d changed", __FUNCTION__, tag);
+ if (!skipHFRTargetFPSUpdate(tag, entry, lastEntry)) {
+ updatesDetected = true;
+ }
+ mLatestSessionParams.update(entry);
+ }
+ } else if (lastEntry.count > 0) {
+ // Value has been removed
+ ALOGV("%s: Session parameter tag id %d removed", __FUNCTION__, tag);
+ mLatestSessionParams.erase(tag);
+ updatesDetected = true;
+ }
+ }
+
+ return updatesDetected;
+}
+
bool Camera3Device::RequestThread::threadLoop() {
ATRACE_CALL();
status_t res;
@@ -4083,7 +4592,7 @@
// Get the latest request ID, if any
int latestRequestId;
camera_metadata_entry_t requestIdEntry = mNextRequests[mNextRequests.size() - 1].
- captureRequest->mSettings.find(ANDROID_REQUEST_ID);
+ captureRequest->mSettingsList.begin()->metadata.find(ANDROID_REQUEST_ID);
if (requestIdEntry.count > 0) {
latestRequestId = requestIdEntry.data.i32[0];
} else {
@@ -4091,6 +4600,53 @@
latestRequestId = NAME_NOT_FOUND;
}
+ // 'mNextRequests' will at this point contain either a set of HFR batched requests
+ // or a single request from streaming or burst. In either case the first element
+ // should contain the latest camera settings that we need to check for any session
+ // parameter updates.
+ if (updateSessionParameters(mNextRequests[0].captureRequest->mSettingsList.begin()->metadata)) {
+ res = OK;
+
+ //Input stream buffers are already acquired at this point so an input stream
+ //will not be able to move to idle state unless we force it.
+ if (mNextRequests[0].captureRequest->mInputStream != nullptr) {
+ res = mNextRequests[0].captureRequest->mInputStream->forceToIdle();
+ if (res != OK) {
+ ALOGE("%s: Failed to force idle input stream: %d", __FUNCTION__, res);
+ cleanUpFailedRequests(/*sendRequestError*/ false);
+ return false;
+ }
+ }
+
+ if (res == OK) {
+ sp<StatusTracker> statusTracker = mStatusTracker.promote();
+ if (statusTracker != 0) {
+ sp<Camera3Device> parent = mParent.promote();
+ if (parent != nullptr) {
+ parent->pauseStateNotify(true);
+ }
+
+ statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
+
+ if (parent != nullptr) {
+ mReconfigured |= parent->reconfigureCamera(mLatestSessionParams);
+ }
+
+ statusTracker->markComponentActive(mStatusId);
+ setPaused(false);
+ }
+
+ if (mNextRequests[0].captureRequest->mInputStream != nullptr) {
+ mNextRequests[0].captureRequest->mInputStream->restoreConfiguredState();
+ if (res != OK) {
+ ALOGE("%s: Failed to restore configured input stream: %d", __FUNCTION__, res);
+ cleanUpFailedRequests(/*sendRequestError*/ false);
+ return false;
+ }
+ }
+ }
+ }
+
// Prepare a batch of HAL requests and output buffers.
res = prepareHalRequests();
if (res == TIMED_OUT) {
@@ -4165,19 +4721,20 @@
// Insert any queued triggers (before metadata is locked)
status_t res = insertTriggers(captureRequest);
-
if (res < 0) {
SET_ERR("RequestThread: Unable to insert triggers "
"(capture request %d, HAL device: %s (%d)",
halRequest->frame_number, strerror(-res), res);
return INVALID_OPERATION;
}
+
int triggerCount = res;
bool triggersMixedIn = (triggerCount > 0 || mPrevTriggers > 0);
mPrevTriggers = triggerCount;
// If the request is the same as last, or we had triggers last time
- if (mPrevRequest != captureRequest || triggersMixedIn) {
+ bool newRequest = mPrevRequest != captureRequest || triggersMixedIn;
+ if (newRequest) {
/**
* HAL workaround:
* Insert a dummy trigger ID if a trigger is set but no trigger ID is
@@ -4190,12 +4747,27 @@
return INVALID_OPERATION;
}
+ {
+ // Correct metadata regions for distortion correction if enabled
+ sp<Camera3Device> parent = mParent.promote();
+ if (parent != nullptr) {
+ res = parent->mDistortionMapper.correctCaptureRequest(
+ &(captureRequest->mSettingsList.begin()->metadata));
+ if (res != OK) {
+ SET_ERR("RequestThread: Unable to correct capture requests "
+ "for lens distortion for request %d: %s (%d)",
+ halRequest->frame_number, strerror(-res), res);
+ return INVALID_OPERATION;
+ }
+ }
+ }
+
/**
* The request should be presorted so accesses in HAL
* are O(logn). Sidenote, sorting a sorted metadata is nop.
*/
- captureRequest->mSettings.sort();
- halRequest->settings = captureRequest->mSettings.getAndLock();
+ captureRequest->mSettingsList.begin()->metadata.sort();
+ halRequest->settings = captureRequest->mSettingsList.begin()->metadata.getAndLock();
mPrevRequest = captureRequest;
ALOGVV("%s: Request settings are NEW", __FUNCTION__);
@@ -4219,6 +4791,26 @@
__FUNCTION__);
}
+ if (captureRequest->mSettingsList.size() > 1) {
+ halRequest->num_physcam_settings = captureRequest->mSettingsList.size() - 1;
+ halRequest->physcam_id = new const char* [halRequest->num_physcam_settings];
+ if (newRequest) {
+ halRequest->physcam_settings =
+ new const camera_metadata* [halRequest->num_physcam_settings];
+ } else {
+ halRequest->physcam_settings = nullptr;
+ }
+ auto it = ++captureRequest->mSettingsList.begin();
+ size_t i = 0;
+ for (; it != captureRequest->mSettingsList.end(); it++, i++) {
+ halRequest->physcam_id[i] = it->cameraId.c_str();
+ if (newRequest) {
+ it->metadata.sort();
+ halRequest->physcam_settings[i] = it->metadata.getAndLock();
+ }
+ }
+ }
+
uint32_t totalNumBuffers = 0;
// Fill in buffers
@@ -4232,6 +4824,7 @@
outputBuffers->insertAt(camera3_stream_buffer_t(), 0,
captureRequest->mOutputStreams.size());
halRequest->output_buffers = outputBuffers->array();
+ std::set<String8> requestedPhysicalCameras;
for (size_t j = 0; j < captureRequest->mOutputStreams.size(); j++) {
sp<Camera3OutputStreamInterface> outputStream = captureRequest->mOutputStreams.editItemAt(j);
@@ -4262,8 +4855,18 @@
return TIMED_OUT;
}
- halRequest->num_output_buffers++;
+ String8 physicalCameraId = outputStream->getPhysicalCameraId();
+
+ if (!physicalCameraId.isEmpty()) {
+ // Physical stream isn't supported for input request.
+ if (halRequest->input_buffer) {
+ CLOGE("Physical stream is not supported for input request");
+ return INVALID_OPERATION;
+ }
+ requestedPhysicalCameras.insert(physicalCameraId);
+ }
+ halRequest->num_output_buffers++;
}
totalNumBuffers += halRequest->num_output_buffers;
@@ -4286,7 +4889,8 @@
totalNumBuffers, captureRequest->mResultExtras,
/*hasInput*/halRequest->input_buffer != NULL,
hasCallback,
- calculateMaxExpectedDuration(halRequest->settings));
+ calculateMaxExpectedDuration(halRequest->settings),
+ requestedPhysicalCameras);
ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
", burstId = %" PRId32 ".",
__FUNCTION__,
@@ -4342,6 +4946,46 @@
return false;
}
+bool Camera3Device::RequestThread::isOutputSurfacePending(int streamId, size_t surfaceId) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mRequestLock);
+
+ for (const auto& nextRequest : mNextRequests) {
+ for (const auto& s : nextRequest.captureRequest->mOutputSurfaces) {
+ if (s.first == streamId) {
+ const auto &it = std::find(s.second.begin(), s.second.end(), surfaceId);
+ if (it != s.second.end()) {
+ return true;
+ }
+ }
+ }
+ }
+
+ for (const auto& request : mRequestQueue) {
+ for (const auto& s : request->mOutputSurfaces) {
+ if (s.first == streamId) {
+ const auto &it = std::find(s.second.begin(), s.second.end(), surfaceId);
+ if (it != s.second.end()) {
+ return true;
+ }
+ }
+ }
+ }
+
+ for (const auto& request : mRepeatingRequests) {
+ for (const auto& s : request->mOutputSurfaces) {
+ if (s.first == streamId) {
+ const auto &it = std::find(s.second.begin(), s.second.end(), surfaceId);
+ if (it != s.second.end()) {
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
nsecs_t Camera3Device::getExpectedInFlightDuration() {
ATRACE_CALL();
Mutex::Autolock al(mInFlightLock);
@@ -4349,6 +4993,30 @@
mExpectedInflightDuration : kMinInflightDuration;
}
+void Camera3Device::RequestThread::cleanupPhysicalSettings(sp<CaptureRequest> request,
+ camera3_capture_request_t *halRequest) {
+ if ((request == nullptr) || (halRequest == nullptr)) {
+ ALOGE("%s: Invalid request!", __FUNCTION__);
+ return;
+ }
+
+ if (halRequest->num_physcam_settings > 0) {
+ if (halRequest->physcam_id != nullptr) {
+ delete [] halRequest->physcam_id;
+ halRequest->physcam_id = nullptr;
+ }
+ if (halRequest->physcam_settings != nullptr) {
+ auto it = ++(request->mSettingsList.begin());
+ size_t i = 0;
+ for (; it != request->mSettingsList.end(); it++, i++) {
+ it->metadata.unlock(halRequest->physcam_settings[i]);
+ }
+ delete [] halRequest->physcam_settings;
+ halRequest->physcam_settings = nullptr;
+ }
+ }
+}
+
void Camera3Device::RequestThread::cleanUpFailedRequests(bool sendRequestError) {
if (mNextRequests.empty()) {
return;
@@ -4365,9 +5033,11 @@
Vector<camera3_stream_buffer_t>* outputBuffers = &nextRequest.outputBuffers;
if (halRequest->settings != NULL) {
- captureRequest->mSettings.unlock(halRequest->settings);
+ captureRequest->mSettingsList.begin()->metadata.unlock(halRequest->settings);
}
+ cleanupPhysicalSettings(captureRequest, halRequest);
+
if (captureRequest->mInputStream != NULL) {
captureRequest->mInputBuffer.status = CAMERA3_BUFFER_STATUS_ERROR;
captureRequest->mInputStream->returnInputBuffer(captureRequest->mInputBuffer);
@@ -4631,7 +5301,7 @@
return DEAD_OBJECT;
}
- CameraMetadata &metadata = request->mSettings;
+ CameraMetadata &metadata = request->mSettingsList.begin()->metadata;
size_t count = mTriggerMap.size();
for (size_t i = 0; i < count; ++i) {
@@ -4714,7 +5384,7 @@
ATRACE_CALL();
Mutex::Autolock al(mTriggerMutex);
- CameraMetadata &metadata = request->mSettings;
+ CameraMetadata &metadata = request->mSettingsList.begin()->metadata;
/**
* Replace all old entries with their old values.
@@ -4779,7 +5449,7 @@
static const int32_t dummyTriggerId = 1;
status_t res;
- CameraMetadata &metadata = request->mSettings;
+ CameraMetadata &metadata = request->mSettingsList.begin()->metadata;
// If AF trigger is active, insert a dummy AF trigger ID if none already
// exists
@@ -4814,7 +5484,7 @@
Camera3Device::PreparerThread::PreparerThread() :
Thread(/*canCallJava*/false), mListener(nullptr),
- mActive(false), mCancelNow(false) {
+ mActive(false), mCancelNow(false), mCurrentMaxCount(0), mCurrentPrepareComplete(false) {
}
Camera3Device::PreparerThread::~PreparerThread() {
@@ -4865,18 +5535,101 @@
}
// queue up the work
- mPendingStreams.push_back(stream);
+ mPendingStreams.emplace(maxCount, stream);
ALOGV("%s: Stream %d queued for preparing", __FUNCTION__, stream->getId());
return OK;
}
+void Camera3Device::PreparerThread::pause() {
+ ATRACE_CALL();
+
+ Mutex::Autolock l(mLock);
+
+ std::unordered_map<int, sp<camera3::Camera3StreamInterface> > pendingStreams;
+ pendingStreams.insert(mPendingStreams.begin(), mPendingStreams.end());
+ sp<camera3::Camera3StreamInterface> currentStream = mCurrentStream;
+ int currentMaxCount = mCurrentMaxCount;
+ mPendingStreams.clear();
+ mCancelNow = true;
+ while (mActive) {
+ auto res = mThreadActiveSignal.waitRelative(mLock, kActiveTimeout);
+ if (res == TIMED_OUT) {
+ ALOGE("%s: Timed out waiting on prepare thread!", __FUNCTION__);
+ return;
+ } else if (res != OK) {
+ ALOGE("%s: Encountered an error: %d waiting on prepare thread!", __FUNCTION__, res);
+ return;
+ }
+ }
+
+ //Check whether the prepare thread was able to complete the current
+ //stream. In case work is still pending emplace it along with the rest
+ //of the streams in the pending list.
+ if (currentStream != nullptr) {
+ if (!mCurrentPrepareComplete) {
+ pendingStreams.emplace(currentMaxCount, currentStream);
+ }
+ }
+
+ mPendingStreams.insert(pendingStreams.begin(), pendingStreams.end());
+ for (const auto& it : mPendingStreams) {
+ it.second->cancelPrepare();
+ }
+}
+
+status_t Camera3Device::PreparerThread::resume() {
+ ATRACE_CALL();
+ status_t res;
+
+ Mutex::Autolock l(mLock);
+ sp<NotificationListener> listener = mListener.promote();
+
+ if (mActive) {
+ ALOGE("%s: Trying to resume an already active prepare thread!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ auto it = mPendingStreams.begin();
+ for (; it != mPendingStreams.end();) {
+ res = it->second->startPrepare(it->first);
+ if (res == OK) {
+ if (listener != NULL) {
+ listener->notifyPrepared(it->second->getId());
+ }
+ it = mPendingStreams.erase(it);
+ } else if (res != NOT_ENOUGH_DATA) {
+ ALOGE("%s: Unable to start preparer stream: %d (%s)", __FUNCTION__,
+ res, strerror(-res));
+ it = mPendingStreams.erase(it);
+ } else {
+ it++;
+ }
+ }
+
+ if (mPendingStreams.empty()) {
+ return OK;
+ }
+
+ res = Thread::run("C3PrepThread", PRIORITY_BACKGROUND);
+ if (res != OK) {
+ ALOGE("%s: Unable to start preparer stream: %d (%s)",
+ __FUNCTION__, res, strerror(-res));
+ return res;
+ }
+ mCancelNow = false;
+ mActive = true;
+ ALOGV("%s: Preparer stream started", __FUNCTION__);
+
+ return OK;
+}
+
status_t Camera3Device::PreparerThread::clear() {
ATRACE_CALL();
Mutex::Autolock l(mLock);
- for (const auto& stream : mPendingStreams) {
- stream->cancelPrepare();
+ for (const auto& it : mPendingStreams) {
+ it.second->cancelPrepare();
}
mPendingStreams.clear();
mCancelNow = true;
@@ -4901,12 +5654,15 @@
// threadLoop _must not_ re-acquire mLock after it sets mActive to false; would
// cause deadlock with prepare()'s requestExitAndWait triggered by !mActive.
mActive = false;
+ mThreadActiveSignal.signal();
return false;
}
// Get next stream to prepare
auto it = mPendingStreams.begin();
- mCurrentStream = *it;
+ mCurrentStream = it->second;
+ mCurrentMaxCount = it->first;
+ mCurrentPrepareComplete = false;
mPendingStreams.erase(it);
ATRACE_ASYNC_BEGIN("stream prepare", mCurrentStream->getId());
ALOGV("%s: Preparing stream %d", __FUNCTION__, mCurrentStream->getId());
@@ -4941,6 +5697,7 @@
ATRACE_ASYNC_END("stream prepare", mCurrentStream->getId());
mCurrentStream.clear();
+ mCurrentPrepareComplete = true;
return true;
}
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index fbbbd08..d8fe19f 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,6 +19,7 @@
#include <utility>
#include <unordered_map>
+#include <set>
#include <utils/Condition.h>
#include <utils/Errors.h>
@@ -31,7 +32,9 @@
#include <android/hardware/camera/device/3.2/ICameraDevice.h>
#include <android/hardware/camera/device/3.2/ICameraDeviceSession.h>
#include <android/hardware/camera/device/3.3/ICameraDeviceSession.h>
+#include <android/hardware/camera/device/3.4/ICameraDeviceSession.h>
#include <android/hardware/camera/device/3.2/ICameraDeviceCallback.h>
+#include <android/hardware/camera/device/3.4/ICameraDeviceCallback.h>
#include <fmq/MessageQueue.h>
#include <hardware/camera3.h>
@@ -40,10 +43,13 @@
#include "common/CameraDeviceBase.h"
#include "device3/StatusTracker.h"
#include "device3/Camera3BufferManager.h"
+#include "device3/DistortionMapper.h"
#include "utils/TagMonitor.h"
#include "utils/LatencyHistogram.h"
#include <camera_metadata_hidden.h>
+using android::camera3::OutputStreamInfo;
+
/**
* Function pointer types with C calling convention to
* use for HAL callback functions.
@@ -74,7 +80,7 @@
*/
class Camera3Device :
public CameraDeviceBase,
- virtual public hardware::camera::device::V3_2::ICameraDeviceCallback,
+ virtual public hardware::camera::device::V3_4::ICameraDeviceCallback,
private camera3_callback_ops {
public:
@@ -88,8 +94,10 @@
const String8& getId() const override;
+ metadata_vendor_id_t getVendorTagId() const override { return mVendorTagId; }
+
// Transitions to idle state on success.
- status_t initialize(sp<CameraProviderManager> manager) override;
+ status_t initialize(sp<CameraProviderManager> manager, const String8& monitorTags) override;
status_t disconnect() override;
status_t dump(int fd, const Vector<String16> &args) override;
const CameraMetadata& info() const override;
@@ -97,12 +105,12 @@
// Capture and setStreamingRequest will configure streams if currently in
// idle state
status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL) override;
- status_t captureList(const List<const CameraMetadata> &requests,
+ status_t captureList(const List<const PhysicalCameraSettingsList> &requestsList,
const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber = NULL) override;
status_t setStreamingRequest(const CameraMetadata &request,
int64_t *lastFrameNumber = NULL) override;
- status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
+ status_t setStreamingRequestList(const List<const PhysicalCameraSettingsList> &requestsList,
const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber = NULL) override;
status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL) override;
@@ -117,11 +125,15 @@
status_t createStream(sp<Surface> consumer,
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+ const String8& physicalCameraId,
+ std::vector<int> *surfaceIds = nullptr,
int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
bool isShared = false, uint64_t consumerUsage = 0) override;
status_t createStream(const std::vector<sp<Surface>>& consumers,
bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+ const String8& physicalCameraId,
+ std::vector<int> *surfaceIds = nullptr,
int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
bool isShared = false, uint64_t consumerUsage = 0) override;
@@ -134,7 +146,8 @@
status_t deleteStream(int id) override;
- status_t configureStreams(int operatingMode =
+ status_t configureStreams(const CameraMetadata& sessionParams,
+ int operatingMode =
static_cast<int>(hardware::camera::device::V3_2::StreamConfigurationMode::NORMAL_MODE))
override;
status_t getInputBufferProducer(
@@ -176,7 +189,23 @@
* Set the deferred consumer surfaces to the output stream and finish the deferred
* consumer configuration.
*/
- status_t setConsumerSurfaces(int streamId, const std::vector<sp<Surface>>& consumers) override;
+ status_t setConsumerSurfaces(
+ int streamId, const std::vector<sp<Surface>>& consumers,
+ std::vector<int> *surfaceIds /*out*/) override;
+
+ /**
+ * Update a given stream.
+ */
+ status_t updateStream(int streamId, const std::vector<sp<Surface>> &newSurfaces,
+ const std::vector<OutputStreamInfo> &outputInfo,
+ const std::vector<size_t> &removedSurfaceIds,
+ KeyedVector<sp<Surface>, size_t> *outputMap/*out*/);
+
+ /**
+ * Drop buffers for stream of streamId if dropping is true. If dropping is false, do not
+ * drop buffers for stream of streamId.
+ */
+ status_t dropStreamBuffers(bool dropping, int streamId) override;
private:
@@ -216,6 +245,9 @@
// Current stream configuration mode;
int mOperatingMode;
+ // Current session wide parameters
+ hardware::camera2::impl::CameraMetadataNative mSessionParams;
+
// Constant to use for no set operating mode
static const int NO_MODE = -1;
@@ -252,7 +284,9 @@
// Caller takes ownership of requestTemplate
status_t constructDefaultRequestSettings(camera3_request_template_t templateId,
/*out*/ camera_metadata_t **requestTemplate);
- status_t configureStreams(/*inout*/ camera3_stream_configuration *config);
+ status_t configureStreams(const camera_metadata_t *sessionParams,
+ /*inout*/ camera3_stream_configuration *config,
+ const std::vector<uint32_t>& bufferSizes);
status_t processCaptureRequest(camera3_capture_request_t *request);
status_t processBatchCaptureRequests(
std::vector<camera3_capture_request_t*>& requests,
@@ -270,7 +304,13 @@
void getInflightBufferKeys(std::vector<std::pair<int32_t, int32_t>>* out);
private:
+ // Always valid
sp<hardware::camera::device::V3_2::ICameraDeviceSession> mHidlSession;
+ // Valid if ICameraDeviceSession is @3.3 or newer
+ sp<hardware::camera::device::V3_3::ICameraDeviceSession> mHidlSession_3_3;
+ // Valid if ICameraDeviceSession is @3.4 or newer
+ sp<hardware::camera::device::V3_4::ICameraDeviceSession> mHidlSession_3_4;
+
std::shared_ptr<RequestMetadataQueue> mRequestMetadataQueue;
std::mutex mInflightLock;
@@ -401,7 +441,7 @@
class CaptureRequest : public LightRefBase<CaptureRequest> {
public:
- CameraMetadata mSettings;
+ PhysicalCameraSettingsList mSettingsList;
sp<camera3::Camera3Stream> mInputStream;
camera3_stream_buffer_t mInputBuffer;
Vector<sp<camera3::Camera3OutputStreamInterface> >
@@ -421,26 +461,28 @@
status_t checkStatusOkToCaptureLocked();
status_t convertMetadataListToRequestListLocked(
- const List<const CameraMetadata> &metadataList,
+ const List<const PhysicalCameraSettingsList> &metadataList,
const std::list<const SurfaceMap> &surfaceMaps,
bool repeating,
/*out*/
RequestList *requestList);
- void convertToRequestList(List<const CameraMetadata>& requests,
+ void convertToRequestList(List<const PhysicalCameraSettingsList>& requestsList,
std::list<const SurfaceMap>& surfaceMaps,
const CameraMetadata& request);
- status_t submitRequestsHelper(const List<const CameraMetadata> &requests,
+ status_t submitRequestsHelper(const List<const PhysicalCameraSettingsList> &requestsList,
const std::list<const SurfaceMap> &surfaceMaps,
bool repeating,
int64_t *lastFrameNumber = NULL);
/**
- * Implementation of android::hardware::camera::device::V3_2::ICameraDeviceCallback
+ * Implementation of android::hardware::camera::device::V3_4::ICameraDeviceCallback
*/
-
+ hardware::Return<void> processCaptureResult_3_4(
+ const hardware::hidl_vec<
+ hardware::camera::device::V3_4::CaptureResult>& results) override;
hardware::Return<void> processCaptureResult(
const hardware::hidl_vec<
hardware::camera::device::V3_2::CaptureResult>& results) override;
@@ -450,7 +492,13 @@
// Handle one capture result. Assume that mProcessCaptureResultLock is held.
void processOneCaptureResultLocked(
- const hardware::camera::device::V3_2::CaptureResult& results);
+ const hardware::camera::device::V3_2::CaptureResult& result,
+ const hardware::hidl_vec<
+ hardware::camera::device::V3_4::PhysicalCameraMetadata> physicalCameraMetadatas);
+ status_t readOneCameraMetadataLocked(uint64_t fmqResultSize,
+ hardware::camera::device::V3_2::CameraMetadata& resultMetadata,
+ const hardware::camera::device::V3_2::CameraMetadata& result);
+
// Handle one notify message
void notify(const hardware::camera::device::V3_2::NotifyMsg& msg);
@@ -516,21 +564,41 @@
* Do common work for setting up a streaming or single capture request.
* On success, will transition to ACTIVE if in IDLE.
*/
- sp<CaptureRequest> setUpRequestLocked(const CameraMetadata &request,
+ sp<CaptureRequest> setUpRequestLocked(const PhysicalCameraSettingsList &request,
const SurfaceMap &surfaceMap);
/**
* Build a CaptureRequest request from the CameraDeviceBase request
* settings.
*/
- sp<CaptureRequest> createCaptureRequest(const CameraMetadata &request,
+ sp<CaptureRequest> createCaptureRequest(const PhysicalCameraSettingsList &request,
const SurfaceMap &surfaceMap);
/**
+ * Pause state updates to the client application. Needed to mask out idle/active
+ * transitions during internal reconfigure
+ */
+ void pauseStateNotify(bool enable);
+
+ /**
+ * Internally re-configure camera device using new session parameters.
+ * This will get triggered by the request thread. Be sure to call
+ * pauseStateNotify(true) before going idle in the requesting location.
+ */
+ bool reconfigureCamera(const CameraMetadata& sessionParams);
+
+ /**
+ * Filter stream session parameters and configure camera HAL.
+ */
+ status_t filterParamsAndConfigureLocked(const CameraMetadata& sessionParams,
+ int operatingMode);
+
+ /**
* Take the currently-defined set of streams and configure the HAL to use
* them. This is a long-running operation (may be several hundered ms).
*/
- status_t configureStreamsLocked(int operatingMode);
+ status_t configureStreamsLocked(int operatingMode,
+ const CameraMetadata& sessionParams, bool notifyRequestThread = true);
/**
* Cancel stream configuration that did not finish successfully.
@@ -629,7 +697,7 @@
RequestThread(wp<Camera3Device> parent,
sp<camera3::StatusTracker> statusTracker,
- sp<HalInterface> interface);
+ sp<HalInterface> interface, const Vector<int32_t>& sessionParamKeys);
~RequestThread();
void setNotificationListener(wp<NotificationListener> listener);
@@ -637,7 +705,8 @@
/**
* Call after stream (re)-configuration is completed.
*/
- void configurationComplete(bool isConstrainedHighSpeed);
+ void configurationComplete(bool isConstrainedHighSpeed,
+ const CameraMetadata& sessionParams);
/**
* Set or clear the list of repeating requests. Does not block
@@ -705,6 +774,12 @@
*/
bool isStreamPending(sp<camera3::Camera3StreamInterface>& stream);
+ /**
+ * Returns true if the surface is a target of any queued or repeating
+ * capture request
+ */
+ bool isOutputSurfacePending(int streamId, size_t surfaceId);
+
// dump processCaptureRequest latency
void dumpCaptureRequestLatency(int fd, const char* name) {
mRequestLatency.dump(fd, name);
@@ -758,6 +833,10 @@
// Stop the repeating request if any of its output streams is abandoned.
void checkAndStopRepeatingRequest();
+ // Release physical camera settings and camera id resources.
+ void cleanupPhysicalSettings(sp<CaptureRequest> request,
+ /*out*/camera3_capture_request_t *halRequest);
+
// Pause handling
bool waitIfPaused();
void unpauseForNewRequests();
@@ -780,6 +859,17 @@
// Calculate the expected maximum duration for a request
nsecs_t calculateMaxExpectedDuration(const camera_metadata_t *request);
+ // Check and update latest session parameters based on the current request settings.
+ bool updateSessionParameters(const CameraMetadata& settings);
+
+ // Check whether FPS range session parameter re-configuration is needed in constrained
+ // high speed recording camera sessions.
+ bool skipHFRTargetFPSUpdate(int32_t tag, const camera_metadata_ro_entry_t& newEntry,
+ const camera_metadata_entry_t& currentEntry);
+
+ // Re-configure camera using the latest session parameters.
+ bool reconfigureCamera();
+
wp<Camera3Device> mParent;
wp<camera3::StatusTracker> mStatusTracker;
sp<HalInterface> mInterface;
@@ -835,8 +925,13 @@
// Flag indicating if we should prepare video stream for video requests.
bool mPrepareVideoStream;
+ bool mConstrainedMode;
+
static const int32_t kRequestLatencyBinSize = 40; // in ms
CameraLatencyHistogram mRequestLatency;
+
+ Vector<int32_t> mSessionParamKeys;
+ CameraMetadata mLatestSessionParams;
};
sp<RequestThread> mRequestThread;
@@ -891,6 +986,12 @@
// REQUEST/RESULT error.
bool skipResultMetadata;
+ // The physical camera ids being requested.
+ std::set<String8> physicalCameraIds;
+
+ // Map of physicalCameraId <-> Metadata
+ std::vector<PhysicalCaptureResultInfo> physicalMetadatas;
+
// Default constructor needed by KeyedVector
InFlightRequest() :
shutterTimestamp(0),
@@ -905,7 +1006,8 @@
}
InFlightRequest(int numBuffers, CaptureResultExtras extras, bool hasInput,
- bool hasAppCallback, nsecs_t maxDuration) :
+ bool hasAppCallback, nsecs_t maxDuration,
+ const std::set<String8>& physicalCameraIdSet) :
shutterTimestamp(0),
sensorTimestamp(0),
requestStatus(OK),
@@ -915,7 +1017,8 @@
hasInputBuffer(hasInput),
hasCallback(hasAppCallback),
maxExpectedDuration(maxDuration),
- skipResultMetadata(false) {
+ skipResultMetadata(false),
+ physicalCameraIds(physicalCameraIdSet) {
}
};
@@ -932,7 +1035,7 @@
status_t registerInFlight(uint32_t frameNumber,
int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
- bool callback, nsecs_t maxExpectedDuration);
+ bool callback, nsecs_t maxExpectedDuration, std::set<String8>& physicalCameraIds);
/**
* Returns the maximum expected time it'll take for all currently in-flight
@@ -974,21 +1077,34 @@
*/
status_t clear();
+ /**
+ * Pause all preparation activities
+ */
+ void pause();
+
+ /**
+ * Resume preparation activities
+ */
+ status_t resume();
+
private:
Mutex mLock;
+ Condition mThreadActiveSignal;
virtual bool threadLoop();
// Guarded by mLock
wp<NotificationListener> mListener;
- List<sp<camera3::Camera3StreamInterface> > mPendingStreams;
+ std::unordered_map<int, sp<camera3::Camera3StreamInterface> > mPendingStreams;
bool mActive;
bool mCancelNow;
// Only accessed by threadLoop and the destructor
sp<camera3::Camera3StreamInterface> mCurrentStream;
+ int mCurrentMaxCount;
+ bool mCurrentPrepareComplete;
};
sp<PreparerThread> mPreparerThread;
@@ -1040,7 +1156,9 @@
void sendCaptureResult(CameraMetadata &pendingMetadata,
CaptureResultExtras &resultExtras,
CameraMetadata &collectedPartialResult, uint32_t frameNumber,
- bool reprocess);
+ bool reprocess, const std::vector<PhysicalCaptureResultInfo>& physicalMetadatas);
+
+ bool isLastFullResult(const InFlightRequest& inFlightRequest);
// Insert the result to the result queue after updating frame number and overriding AE
// trigger cancel.
@@ -1062,6 +1180,12 @@
/**** End scope for mInFlightLock ****/
+ /**
+ * Distortion correction support
+ */
+
+ camera3::DistortionMapper mDistortionMapper;
+
// Debug tracker for metadata tag value changes
// - Enabled with the -m <taglist> option to dumpsys, such as
// dumpsys -m android.control.aeState,android.control.aeMode
@@ -1074,6 +1198,9 @@
metadata_vendor_id_t mVendorTagId;
+ // Cached last requested template id
+ int mLastTemplateId;
+
/**
* Static callback forwarding methods from HAL to instance
*/
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index 6e2978f..fb1ff77 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2014-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -26,9 +26,12 @@
namespace camera3 {
+const String8 Camera3DummyStream::DUMMY_ID;
+
Camera3DummyStream::Camera3DummyStream(int id) :
Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, DUMMY_WIDTH, DUMMY_HEIGHT,
- /*maxSize*/0, DUMMY_FORMAT, DUMMY_DATASPACE, DUMMY_ROTATION) {
+ /*maxSize*/0, DUMMY_FORMAT, DUMMY_DATASPACE, DUMMY_ROTATION,
+ DUMMY_ID) {
}
@@ -108,11 +111,28 @@
return false;
}
+status_t Camera3DummyStream::dropBuffers(bool /*dropping*/) {
+ return OK;
+}
+
+const String8& Camera3DummyStream::getPhysicalCameraId() const {
+ return DUMMY_ID;
+}
+
status_t Camera3DummyStream::setConsumers(const std::vector<sp<Surface>>& /*consumers*/) {
ALOGE("%s: Stream %d: Dummy stream doesn't support set consumer surface!",
__FUNCTION__, mId);
return INVALID_OPERATION;
}
+
+status_t Camera3DummyStream::updateStream(const std::vector<sp<Surface>> &/*outputSurfaces*/,
+ const std::vector<OutputStreamInfo> &/*outputInfo*/,
+ const std::vector<size_t> &/*removedSurfaceIds*/,
+ KeyedVector<sp<Surface>, size_t> * /*outputMap*/) {
+ ALOGE("%s: this method is not supported!", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
}; // namespace camera3
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 492fb49..4627548 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2014-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -57,6 +57,17 @@
virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
/**
+ * Drop buffers for stream of streamId if dropping is true. If dropping is false, do not
+ * drop buffers for stream of streamId.
+ */
+ virtual status_t dropBuffers(bool /*dropping*/) override;
+
+ /**
+ * Query the physical camera id for the output stream.
+ */
+ virtual const String8& getPhysicalCameraId() const override;
+
+ /**
* Return if this output stream is for video encoding.
*/
bool isVideoStream() const;
@@ -71,6 +82,19 @@
*/
virtual status_t setConsumers(const std::vector<sp<Surface>>& consumers);
+ /**
+ * Query the output surface id.
+ */
+ virtual ssize_t getSurfaceId(const sp<Surface> &/*surface*/) { return 0; }
+
+ /**
+ * Update the stream output surfaces.
+ */
+ virtual status_t updateStream(const std::vector<sp<Surface>> &outputSurfaces,
+ const std::vector<OutputStreamInfo> &outputInfo,
+ const std::vector<size_t> &removedSurfaceIds,
+ KeyedVector<sp<Surface>, size_t> *outputMap/*out*/);
+
protected:
/**
@@ -95,6 +119,7 @@
static const android_dataspace DUMMY_DATASPACE = HAL_DATASPACE_UNKNOWN;
static const camera3_stream_rotation_t DUMMY_ROTATION = CAMERA3_STREAM_ROTATION_0;
static const uint64_t DUMMY_USAGE = GRALLOC_USAGE_HW_COMPOSER;
+ static const String8 DUMMY_ID;
/**
* Internal Camera3Stream interface
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index a52422d..3c1e43d 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -31,9 +31,11 @@
Camera3IOStreamBase::Camera3IOStreamBase(int id, camera3_stream_type_t type,
uint32_t width, uint32_t height, size_t maxSize, int format,
- android_dataspace dataSpace, camera3_stream_rotation_t rotation, int setId) :
+ android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+ const String8& physicalCameraId, int setId) :
Camera3Stream(id, type,
- width, height, maxSize, format, dataSpace, rotation, setId),
+ width, height, maxSize, format, dataSpace, rotation,
+ physicalCameraId, setId),
mTotalBufferCount(0),
mHandoutTotalBufferCount(0),
mHandoutOutputBufferCount(0),
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index 2376058..0a31d44 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -35,6 +35,7 @@
Camera3IOStreamBase(int id, camera3_stream_type_t type,
uint32_t width, uint32_t height, size_t maxSize, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+ const String8& physicalCameraId,
int setId = CAMERA3_STREAM_SET_ID_INVALID);
public:
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 2cb1ea7..017d7be 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -27,10 +27,13 @@
namespace camera3 {
+const String8 Camera3InputStream::DUMMY_ID;
+
Camera3InputStream::Camera3InputStream(int id,
uint32_t width, uint32_t height, int format) :
Camera3IOStreamBase(id, CAMERA3_STREAM_INPUT, width, height, /*maxSize*/0,
- format, HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0) {
+ format, HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0,
+ DUMMY_ID) {
if (format == HAL_PIXEL_FORMAT_BLOB) {
ALOGE("%s: Bad format, BLOB not supported", __FUNCTION__);
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h
index 81226f8..0732464 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -53,6 +53,8 @@
sp<IGraphicBufferProducer> mProducer;
Vector<BufferItem> mBuffersInFlight;
+ static const String8 DUMMY_ID;
+
/**
* Camera3IOStreamBase
*/
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index dcaefe3..b3c3717 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -35,15 +35,18 @@
sp<Surface> consumer,
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation,
- nsecs_t timestampOffset, int setId) :
+ nsecs_t timestampOffset, const String8& physicalCameraId,
+ int setId) :
Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, width, height,
- /*maxSize*/0, format, dataSpace, rotation, setId),
+ /*maxSize*/0, format, dataSpace, rotation,
+ physicalCameraId, setId),
mConsumer(consumer),
mTransform(0),
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(0),
+ mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
if (mConsumer == NULL) {
@@ -60,9 +63,9 @@
sp<Surface> consumer,
uint32_t width, uint32_t height, size_t maxSize, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation,
- nsecs_t timestampOffset, int setId) :
+ nsecs_t timestampOffset, const String8& physicalCameraId, int setId) :
Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, width, height, maxSize,
- format, dataSpace, rotation, setId),
+ format, dataSpace, rotation, physicalCameraId, setId),
mConsumer(consumer),
mTransform(0),
mTraceFirstBuffer(true),
@@ -70,6 +73,7 @@
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(0),
+ mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
if (format != HAL_PIXEL_FORMAT_BLOB && format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
@@ -91,15 +95,18 @@
Camera3OutputStream::Camera3OutputStream(int id,
uint32_t width, uint32_t height, int format,
uint64_t consumerUsage, android_dataspace dataSpace,
- camera3_stream_rotation_t rotation, nsecs_t timestampOffset, int setId) :
+ camera3_stream_rotation_t rotation, nsecs_t timestampOffset,
+ const String8& physicalCameraId, int setId) :
Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, width, height,
- /*maxSize*/0, format, dataSpace, rotation, setId),
+ /*maxSize*/0, format, dataSpace, rotation,
+ physicalCameraId, setId),
mConsumer(nullptr),
mTransform(0),
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(consumerUsage),
+ mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
// Deferred consumer only support preview surface format now.
if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
@@ -128,17 +135,20 @@
int format,
android_dataspace dataSpace,
camera3_stream_rotation_t rotation,
+ const String8& physicalCameraId,
uint64_t consumerUsage, nsecs_t timestampOffset,
int setId) :
Camera3IOStreamBase(id, type, width, height,
/*maxSize*/0,
- format, dataSpace, rotation, setId),
+ format, dataSpace, rotation,
+ physicalCameraId, setId),
mTransform(0),
mTraceFirstBuffer(true),
mUseMonoTimestamp(false),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
mConsumerUsage(consumerUsage),
+ mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
@@ -227,9 +237,14 @@
/**
* Return buffer back to ANativeWindow
*/
- if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
+ if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR || mDropBuffers) {
// Cancel buffer
- ALOGW("A frame is dropped for stream %d", mId);
+ if (mDropBuffers) {
+ ALOGV("%s: Dropping a frame for stream %d.", __FUNCTION__, mId);
+ } else {
+ ALOGW("%s: A frame is dropped for stream %d due to buffer error.", __FUNCTION__, mId);
+ }
+
res = currentConsumer->cancelBuffer(currentConsumer.get(),
anwBuffer,
anwReleaseFence);
@@ -691,6 +706,14 @@
return OK;
}
+status_t Camera3OutputStream::updateStream(const std::vector<sp<Surface>> &/*outputSurfaces*/,
+ const std::vector<OutputStreamInfo> &/*outputInfo*/,
+ const std::vector<size_t> &/*removedSurfaceIds*/,
+ KeyedVector<sp<Surface>, size_t> * /*outputMapo*/) {
+ ALOGE("%s: this method is not supported!", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
void Camera3OutputStream::BufferReleasedListener::onBufferReleased() {
sp<Camera3OutputStream> stream = mParent.promote();
if (stream == nullptr) {
@@ -728,7 +751,7 @@
const std::vector<sp<GraphicBuffer>>& removedBuffers) {
sp<Camera3StreamBufferFreedListener> callback = mBufferFreedListener.promote();
if (callback != nullptr) {
- for (auto gb : removedBuffers) {
+ for (const auto& gb : removedBuffers) {
callback->onBufferFreed(mId, gb->handle);
}
}
@@ -777,6 +800,17 @@
return res;
}
+status_t Camera3OutputStream::dropBuffers(bool dropping) {
+ Mutex::Autolock l(mLock);
+ mDropBuffers = dropping;
+ return OK;
+}
+
+const String8& Camera3OutputStream::getPhysicalCameraId() const {
+ Mutex::Autolock l(mLock);
+ return physicalCameraId();
+}
+
status_t Camera3OutputStream::notifyBufferReleased(ANativeWindowBuffer* /*anwBuffer*/) {
return OK;
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 7023d5d..6f36f92 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -82,7 +82,8 @@
Camera3OutputStream(int id, sp<Surface> consumer,
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation,
- nsecs_t timestampOffset, int setId = CAMERA3_STREAM_SET_ID_INVALID);
+ nsecs_t timestampOffset, const String8& physicalCameraId,
+ int setId = CAMERA3_STREAM_SET_ID_INVALID);
/**
* Set up a stream for formats that have a variable buffer size for the same
@@ -93,7 +94,8 @@
Camera3OutputStream(int id, sp<Surface> consumer,
uint32_t width, uint32_t height, size_t maxSize, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation,
- nsecs_t timestampOffset, int setId = CAMERA3_STREAM_SET_ID_INVALID);
+ nsecs_t timestampOffset, const String8& physicalCameraId,
+ int setId = CAMERA3_STREAM_SET_ID_INVALID);
/**
* Set up a stream with deferred consumer for formats that have 2 dimensions, such as
@@ -103,6 +105,7 @@
Camera3OutputStream(int id, uint32_t width, uint32_t height, int format,
uint64_t consumerUsage, android_dataspace dataSpace,
camera3_stream_rotation_t rotation, nsecs_t timestampOffset,
+ const String8& physicalCameraId,
int setId = CAMERA3_STREAM_SET_ID_INVALID);
virtual ~Camera3OutputStream();
@@ -166,16 +169,40 @@
virtual status_t notifyBufferReleased(ANativeWindowBuffer *anwBuffer);
/**
+ * Drop buffers if dropping is true. If dropping is false, do not drop buffers.
+ */
+ virtual status_t dropBuffers(bool dropping) override;
+
+ /**
+ * Query the physical camera id for the output stream.
+ */
+ virtual const String8& getPhysicalCameraId() const override;
+
+ /**
* Set the graphic buffer manager to get/return the stream buffers.
*
* It is only legal to call this method when stream is in STATE_CONSTRUCTED state.
*/
status_t setBufferManager(sp<Camera3BufferManager> bufferManager);
+ /**
+ * Query the ouput surface id.
+ */
+ virtual ssize_t getSurfaceId(const sp<Surface> &/*surface*/) { return 0; }
+
+ /**
+ * Update the stream output surfaces.
+ */
+ virtual status_t updateStream(const std::vector<sp<Surface>> &outputSurfaces,
+ const std::vector<OutputStreamInfo> &outputInfo,
+ const std::vector<size_t> &removedSurfaceIds,
+ KeyedVector<sp<Surface>, size_t> *outputMap/*out*/);
+
protected:
Camera3OutputStream(int id, camera3_stream_type_t type,
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+ const String8& physicalCameraId,
uint64_t consumerUsage = 0, nsecs_t timestampOffset = 0,
int setId = CAMERA3_STREAM_SET_ID_INVALID);
@@ -247,6 +274,9 @@
*/
uint64_t mConsumerUsage;
+ // Whether to drop valid buffers.
+ bool mDropBuffers;
+
/**
* Internal Camera3Stream interface
*/
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index 8107dd0..a711a6d 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -18,6 +18,7 @@
#define ANDROID_SERVERS_CAMERA3_OUTPUT_STREAM_INTERFACE_H
#include "Camera3StreamInterface.h"
+#include <utils/KeyedVector.h>
namespace android {
@@ -59,6 +60,29 @@
*
*/
virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) = 0;
+
+ /**
+ * Query the surface id.
+ */
+ virtual ssize_t getSurfaceId(const sp<Surface> &surface) = 0;
+
+ /**
+ * Update the stream output surfaces.
+ */
+ virtual status_t updateStream(const std::vector<sp<Surface>> &outputSurfaces,
+ const std::vector<OutputStreamInfo> &outputInfo,
+ const std::vector<size_t> &removedSurfaceIds,
+ KeyedVector<sp<Surface>, size_t> *outputMap/*out*/) = 0;
+
+ /**
+ * Drop buffers if dropping is true. If dropping is false, do not drop buffers.
+ */
+ virtual status_t dropBuffers(bool /*dropping*/) = 0;
+
+ /**
+ * Query the physical camera id for the output stream.
+ */
+ virtual const String8& getPhysicalCameraId() const = 0;
};
} // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
index 5051711..2bb9ff7 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2016-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,16 +20,25 @@
namespace camera3 {
+const size_t Camera3SharedOutputStream::kMaxOutputs;
+
Camera3SharedOutputStream::Camera3SharedOutputStream(int id,
const std::vector<sp<Surface>>& surfaces,
uint32_t width, uint32_t height, int format,
uint64_t consumerUsage, android_dataspace dataSpace,
camera3_stream_rotation_t rotation,
- nsecs_t timestampOffset, int setId) :
+ nsecs_t timestampOffset, const String8& physicalCameraId,
+ int setId) :
Camera3OutputStream(id, CAMERA3_STREAM_OUTPUT, width, height,
- format, dataSpace, rotation, consumerUsage,
- timestampOffset, setId),
- mSurfaces(surfaces) {
+ format, dataSpace, rotation, physicalCameraId,
+ consumerUsage, timestampOffset, setId) {
+ size_t consumerCount = std::min(surfaces.size(), kMaxOutputs);
+ if (surfaces.size() > consumerCount) {
+ ALOGE("%s: Trying to add more consumers than the maximum ", __func__);
+ }
+ for (size_t i = 0; i < consumerCount; i++) {
+ mSurfaces[i] = surfaces[i];
+ }
}
Camera3SharedOutputStream::~Camera3SharedOutputStream() {
@@ -44,7 +53,16 @@
uint64_t usage;
getEndpointUsage(&usage);
- res = mStreamSplitter->connect(mSurfaces, usage, camera3_stream::max_buffers, &mConsumer);
+ std::unordered_map<size_t, sp<Surface>> initialSurfaces;
+ for (size_t i = 0; i < kMaxOutputs; i++) {
+ if (mSurfaces[i] != nullptr) {
+ initialSurfaces.emplace(i, mSurfaces[i]);
+ }
+ }
+
+ android::PixelFormat format = isFormatOverridden() ? getOriginalFormat() : getFormat();
+ res = mStreamSplitter->connect(initialSurfaces, usage, mUsage, camera3_stream::max_buffers,
+ getWidth(), getHeight(), format, &mConsumer);
if (res != OK) {
ALOGE("%s: Failed to connect to stream splitter: %s(%d)",
__FUNCTION__, strerror(-res), res);
@@ -68,7 +86,11 @@
bool Camera3SharedOutputStream::isConsumerConfigurationDeferred(size_t surface_id) const {
Mutex::Autolock l(mLock);
- return (surface_id >= mSurfaces.size());
+ if (surface_id >= kMaxOutputs) {
+ return true;
+ }
+
+ return (mSurfaces[surface_id] == nullptr);
}
status_t Camera3SharedOutputStream::setConsumers(const std::vector<sp<Surface>>& surfaces) {
@@ -85,11 +107,17 @@
return INVALID_OPERATION;
}
- mSurfaces.push_back(surface);
+ ssize_t id = getNextSurfaceIdLocked();
+ if (id < 0) {
+ ALOGE("%s: No surface ids available!", __func__);
+ return NO_MEMORY;
+ }
+
+ mSurfaces[id] = surface;
// Only call addOutput if the splitter has been connected.
if (mStreamSplitter != nullptr) {
- ret = mStreamSplitter->addOutput(surface);
+ ret = mStreamSplitter->addOutput(id, surface);
if (ret != OK) {
ALOGE("%s: addOutput failed with error code %d", __FUNCTION__, ret);
return ret;
@@ -200,9 +228,9 @@
// Called before shared buffer queue is constructed.
*usage = getPresetConsumerUsage();
- for (auto surface : mSurfaces) {
- if (surface != nullptr) {
- res = getEndpointUsageForSurface(&u, surface);
+ for (size_t id = 0; id < kMaxOutputs; id++) {
+ if (mSurfaces[id] != nullptr) {
+ res = getEndpointUsageForSurface(&u, mSurfaces[id]);
*usage |= u;
}
}
@@ -215,6 +243,140 @@
return res;
}
+ssize_t Camera3SharedOutputStream::getNextSurfaceIdLocked() {
+ ssize_t id = -1;
+ for (size_t i = 0; i < kMaxOutputs; i++) {
+ if (mSurfaces[i] == nullptr) {
+ id = i;
+ break;
+ }
+ }
+
+ return id;
+}
+
+ssize_t Camera3SharedOutputStream::getSurfaceId(const sp<Surface> &surface) {
+ Mutex::Autolock l(mLock);
+ ssize_t id = -1;
+ for (size_t i = 0; i < kMaxOutputs; i++) {
+ if (mSurfaces[i] == surface) {
+ id = i;
+ break;
+ }
+ }
+
+ return id;
+}
+
+status_t Camera3SharedOutputStream::revertPartialUpdateLocked(
+ const KeyedVector<sp<Surface>, size_t> &removedSurfaces,
+ const KeyedVector<sp<Surface>, size_t> &attachedSurfaces) {
+ status_t ret = OK;
+
+ for (size_t i = 0; i < attachedSurfaces.size(); i++) {
+ size_t index = attachedSurfaces.valueAt(i);
+ if (mStreamSplitter != nullptr) {
+ ret = mStreamSplitter->removeOutput(index);
+ if (ret != OK) {
+ return UNKNOWN_ERROR;
+ }
+ }
+ mSurfaces[index] = nullptr;
+ }
+
+ for (size_t i = 0; i < removedSurfaces.size(); i++) {
+ size_t index = removedSurfaces.valueAt(i);
+ if (mStreamSplitter != nullptr) {
+ ret = mStreamSplitter->addOutput(index, removedSurfaces.keyAt(i));
+ if (ret != OK) {
+ return UNKNOWN_ERROR;
+ }
+ }
+ mSurfaces[index] = removedSurfaces.keyAt(i);
+ }
+
+ return ret;
+}
+
+status_t Camera3SharedOutputStream::updateStream(const std::vector<sp<Surface>> &outputSurfaces,
+ const std::vector<OutputStreamInfo> &outputInfo,
+ const std::vector<size_t> &removedSurfaceIds,
+ KeyedVector<sp<Surface>, size_t> *outputMap) {
+ status_t ret = OK;
+ Mutex::Autolock l(mLock);
+
+ if ((outputMap == nullptr) || (outputInfo.size() != outputSurfaces.size()) ||
+ (outputSurfaces.size() > kMaxOutputs)) {
+ return BAD_VALUE;
+ }
+
+ uint64_t usage;
+ getEndpointUsage(&usage);
+ KeyedVector<sp<Surface>, size_t> removedSurfaces;
+ //Check whether the new surfaces are compatible.
+ for (const auto &infoIt : outputInfo) {
+ bool imgReaderUsage = (infoIt.consumerUsage & GRALLOC_USAGE_SW_READ_OFTEN) ? true : false;
+ bool sizeMismatch = ((static_cast<uint32_t>(infoIt.width) != getWidth()) ||
+ (static_cast<uint32_t> (infoIt.height) != getHeight())) ?
+ true : false;
+ if ((imgReaderUsage && sizeMismatch) ||
+ (infoIt.format != getOriginalFormat() && infoIt.format != getFormat()) ||
+ (infoIt.dataSpace != getDataSpace() &&
+ infoIt.dataSpace != getOriginalDataSpace())) {
+ ALOGE("%s: Shared surface parameters format: 0x%x dataSpace: 0x%x "
+ " don't match source stream format: 0x%x dataSpace: 0x%x", __FUNCTION__,
+ infoIt.format, infoIt.dataSpace, getFormat(), getDataSpace());
+ return BAD_VALUE;
+ }
+ }
+
+ //First remove all absent outputs
+ for (const auto &it : removedSurfaceIds) {
+ if (mStreamSplitter != nullptr) {
+ ret = mStreamSplitter->removeOutput(it);
+ if (ret != OK) {
+ ALOGE("%s: failed with error code %d", __FUNCTION__, ret);
+ status_t res = revertPartialUpdateLocked(removedSurfaces, *outputMap);
+ if (res != OK) {
+ return res;
+ }
+ return ret;
+
+ }
+ }
+ mSurfaces[it] = nullptr;
+ removedSurfaces.add(mSurfaces[it], it);
+ }
+
+ //Next add the new outputs
+ for (const auto &it : outputSurfaces) {
+ ssize_t surfaceId = getNextSurfaceIdLocked();
+ if (surfaceId < 0) {
+ ALOGE("%s: No more available output slots!", __FUNCTION__);
+ status_t res = revertPartialUpdateLocked(removedSurfaces, *outputMap);
+ if (res != OK) {
+ return res;
+ }
+ return NO_MEMORY;
+ }
+ if (mStreamSplitter != nullptr) {
+ ret = mStreamSplitter->addOutput(surfaceId, it);
+ if (ret != OK) {
+ ALOGE("%s: failed with error code %d", __FUNCTION__, ret);
+ status_t res = revertPartialUpdateLocked(removedSurfaces, *outputMap);
+ if (res != OK) {
+ return res;
+ }
+ return ret;
+ }
+ }
+ mSurfaces[surfaceId] = it;
+ outputMap->add(it, surfaceId);
+ }
+
+ return ret;
+}
+
} // namespace camera3
} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
index 22bb2fc..02b1c09 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2016-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -36,6 +36,7 @@
uint32_t width, uint32_t height, int format,
uint64_t consumerUsage, android_dataspace dataSpace,
camera3_stream_rotation_t rotation, nsecs_t timestampOffset,
+ const String8& physicalCameraId,
int setId = CAMERA3_STREAM_SET_ID_INVALID);
virtual ~Camera3SharedOutputStream();
@@ -46,9 +47,24 @@
virtual status_t setConsumers(const std::vector<sp<Surface>>& consumers);
+ virtual ssize_t getSurfaceId(const sp<Surface> &surface);
+
+ virtual status_t updateStream(const std::vector<sp<Surface>> &outputSurfaces,
+ const std::vector<OutputStreamInfo> &outputInfo,
+ const std::vector<size_t> &removedSurfaceIds,
+ KeyedVector<sp<Surface>, size_t> *outputMap/*out*/);
+
private:
- // Surfaces passed in constructor from app
- std::vector<sp<Surface> > mSurfaces;
+
+ static const size_t kMaxOutputs = 4;
+
+ // Map surfaceId -> output surfaces
+ sp<Surface> mSurfaces[kMaxOutputs];
+
+ ssize_t getNextSurfaceIdLocked();
+
+ status_t revertPartialUpdateLocked(const KeyedVector<sp<Surface>, size_t> &removedSurfaces,
+ const KeyedVector<sp<Surface>, size_t> &attachedSurfaces);
/**
* The Camera3StreamSplitter object this stream uses for stream
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index fbe8f4f..1105b75 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -47,7 +47,8 @@
Camera3Stream::Camera3Stream(int id,
camera3_stream_type type,
uint32_t width, uint32_t height, size_t maxSize, int format,
- android_dataspace dataSpace, camera3_stream_rotation_t rotation, int setId) :
+ android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+ const String8& physicalCameraId, int setId) :
camera3_stream(),
mId(id),
mSetId(setId),
@@ -64,7 +65,8 @@
mLastMaxCount(Camera3StreamInterface::ALLOCATE_PIPELINE_MAX),
mBufferLimitLatency(kBufferLimitLatencyBinSize),
mFormatOverridden(false),
- mOriginalFormat(-1) {
+ mOriginalFormat(-1),
+ mPhysicalCameraId(physicalCameraId) {
camera3_stream::stream_type = type;
camera3_stream::width = width;
@@ -74,6 +76,7 @@
camera3_stream::rotation = rotation;
camera3_stream::max_buffers = 0;
camera3_stream::priv = NULL;
+ camera3_stream::physical_camera_id = mPhysicalCameraId.string();
if ((format == HAL_PIXEL_FORMAT_BLOB || format == HAL_PIXEL_FORMAT_RAW_OPAQUE) &&
maxSize == 0) {
@@ -140,6 +143,79 @@
return mOriginalDataSpace;
}
+const String8& Camera3Stream::physicalCameraId() const {
+ return mPhysicalCameraId;
+}
+
+status_t Camera3Stream::forceToIdle() {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+ status_t res;
+
+ switch (mState) {
+ case STATE_ERROR:
+ case STATE_CONSTRUCTED:
+ case STATE_IN_CONFIG:
+ case STATE_PREPARING:
+ case STATE_IN_RECONFIG:
+ ALOGE("%s: Invalid state: %d", __FUNCTION__, mState);
+ res = NO_INIT;
+ break;
+ case STATE_CONFIGURED:
+ if (hasOutstandingBuffersLocked()) {
+ sp<StatusTracker> statusTracker = mStatusTracker.promote();
+ if (statusTracker != 0) {
+ statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
+ }
+ }
+
+ mState = STATE_IN_IDLE;
+ res = OK;
+
+ break;
+ default:
+ ALOGE("%s: Unknown state %d", __FUNCTION__, mState);
+ res = NO_INIT;
+ }
+
+ return res;
+}
+
+status_t Camera3Stream::restoreConfiguredState() {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+ status_t res;
+
+ switch (mState) {
+ case STATE_ERROR:
+ case STATE_CONSTRUCTED:
+ case STATE_IN_CONFIG:
+ case STATE_PREPARING:
+ case STATE_IN_RECONFIG:
+ case STATE_CONFIGURED:
+ ALOGE("%s: Invalid state: %d", __FUNCTION__, mState);
+ res = NO_INIT;
+ break;
+ case STATE_IN_IDLE:
+ if (hasOutstandingBuffersLocked()) {
+ sp<StatusTracker> statusTracker = mStatusTracker.promote();
+ if (statusTracker != 0) {
+ statusTracker->markComponentActive(mStatusId);
+ }
+ }
+
+ mState = STATE_CONFIGURED;
+ res = OK;
+
+ break;
+ default:
+ ALOGE("%s: Unknown state %d", __FUNCTION__, mState);
+ res = NO_INIT;
+ }
+
+ return res;
+}
+
camera3_stream* Camera3Stream::startConfiguration() {
ATRACE_CALL();
Mutex::Autolock l(mLock);
@@ -150,6 +226,7 @@
ALOGE("%s: In error state", __FUNCTION__);
return NULL;
case STATE_CONSTRUCTED:
+ case STATE_IN_IDLE:
// OK
break;
case STATE_IN_CONFIG:
@@ -179,6 +256,11 @@
return NULL;
}
+ if (mState == STATE_IN_IDLE) {
+ // Skip configuration.
+ return this;
+ }
+
// Stop tracking if currently doing so
if (mStatusId != StatusTracker::NO_STATUS_ID) {
sp<StatusTracker> statusTracker = mStatusTracker.promote();
@@ -219,6 +301,9 @@
ALOGE("%s: Cannot finish configuration that hasn't been started",
__FUNCTION__);
return INVALID_OPERATION;
+ case STATE_IN_IDLE:
+ //Skip configuration in this state
+ return OK;
default:
ALOGE("%s: Unknown state", __FUNCTION__);
return INVALID_OPERATION;
@@ -267,6 +352,7 @@
return INVALID_OPERATION;
case STATE_IN_CONFIG:
case STATE_IN_RECONFIG:
+ case STATE_IN_IDLE:
// OK
break;
case STATE_CONSTRUCTED:
@@ -282,7 +368,9 @@
mUsage = mOldUsage;
camera3_stream::max_buffers = mOldMaxBuffers;
- mState = (mState == STATE_IN_RECONFIG) ? STATE_CONFIGURED : STATE_CONSTRUCTED;
+ mState = ((mState == STATE_IN_RECONFIG) || (mState == STATE_IN_IDLE)) ? STATE_CONFIGURED :
+ STATE_CONSTRUCTED;
+
return OK;
}
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 6e7912e..a60cb56 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -68,6 +68,12 @@
* duration. In this state, only prepareNextBuffer() and cancelPrepare()
* may be called.
*
+ * STATE_IN_IDLE: This is a temporary state only intended to be used for input
+ * streams and only for the case where we need to re-configure the camera device
+ * while the input stream has an outstanding buffer. All other streams should not
+ * be able to switch to this state. For them this is invalid and should be handled
+ * as an unknown state.
+ *
* Transition table:
*
* <none> => STATE_CONSTRUCTED:
@@ -98,6 +104,11 @@
* all stream buffers, or cancelPrepare is called.
* STATE_CONFIGURED => STATE_ABANDONED:
* When the buffer queue of the stream is abandoned.
+ * STATE_CONFIGURED => STATE_IN_IDLE:
+ * Only for an input stream which has an outstanding buffer.
+ * STATE_IN_IDLE => STATE_CONFIGURED:
+ * After the internal re-configuration, the input should revert back to
+ * the configured state.
*
* Status Tracking:
* Each stream is tracked by StatusTracker as a separate component,
@@ -108,7 +119,9 @@
*
* - ACTIVE: One or more buffers have been handed out (with #getBuffer).
* - IDLE: All buffers have been returned (with #returnBuffer), and their
- * respective release_fence(s) have been signaled.
+ * respective release_fence(s) have been signaled. The only exception to this
+ * rule is an input stream that moves to "STATE_IN_IDLE" during internal
+ * re-configuration.
*
* A typical use case is output streams. When the HAL has any buffers
* dequeued, the stream is marked ACTIVE. When the HAL returns all buffers
@@ -152,6 +165,7 @@
void setDataSpaceOverride(bool dataSpaceOverriden);
bool isDataSpaceOverridden() const;
android_dataspace getOriginalDataSpace() const;
+ const String8& physicalCameraId() const;
camera3_stream* asHalStream() override {
return this;
@@ -386,6 +400,19 @@
*/
bool isAbandoned() const;
+ /**
+ * Switch a configured stream with possibly outstanding buffers in idle
+ * state. Configuration for such streams will be skipped assuming there
+ * are no changes to the stream parameters.
+ */
+ status_t forceToIdle();
+
+ /**
+ * Restore a forced idle stream to configured state, marking it active
+ * in case it contains outstanding buffers.
+ */
+ status_t restoreConfiguredState();
+
protected:
const int mId;
/**
@@ -414,7 +441,8 @@
STATE_IN_RECONFIG,
STATE_CONFIGURED,
STATE_PREPARING,
- STATE_ABANDONED
+ STATE_ABANDONED,
+ STATE_IN_IDLE
} mState;
mutable Mutex mLock;
@@ -422,7 +450,7 @@
Camera3Stream(int id, camera3_stream_type type,
uint32_t width, uint32_t height, size_t maxSize, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation,
- int setId);
+ const String8& physicalCameraId, int setId);
wp<Camera3StreamBufferFreedListener> mBufferFreedListener;
@@ -529,6 +557,7 @@
bool mDataSpaceOverridden;
android_dataspace mOriginalDataSpace;
+ String8 mPhysicalCameraId;
}; // class Camera3Stream
}; // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index cc9bf8e..9ed7184 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -43,6 +43,24 @@
class StatusTracker;
+// OutputStreamInfo describes the property of a camera stream.
+class OutputStreamInfo {
+ public:
+ int width;
+ int height;
+ int format;
+ android_dataspace dataSpace;
+ uint64_t consumerUsage;
+ bool finalized = false;
+ OutputStreamInfo() :
+ width(-1), height(-1), format(-1), dataSpace(HAL_DATASPACE_UNKNOWN),
+ consumerUsage(0) {}
+ OutputStreamInfo(int _width, int _height, int _format, android_dataspace _dataSpace,
+ uint64_t _consumerUsage) :
+ width(_width), height(_height), format(_format),
+ dataSpace(_dataSpace), consumerUsage(_consumerUsage) {}
+};
+
/**
* An interface for managing a single stream of input and/or output data from
* the camera device.
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
index a0a50c2..59ac636 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
@@ -38,8 +38,9 @@
namespace android {
-status_t Camera3StreamSplitter::connect(const std::vector<sp<Surface> >& surfaces,
- uint64_t consumerUsage, size_t halMaxBuffers, sp<Surface>* consumer) {
+status_t Camera3StreamSplitter::connect(const std::unordered_map<size_t, sp<Surface>> &surfaces,
+ uint64_t consumerUsage, uint64_t producerUsage, size_t halMaxBuffers, uint32_t width,
+ uint32_t height, android::PixelFormat format, sp<Surface>* consumer) {
ATRACE_CALL();
if (consumer == nullptr) {
SP_LOGE("%s: consumer pointer is NULL", __FUNCTION__);
@@ -62,12 +63,12 @@
mConsumerName = getUniqueConsumerName();
// Add output surfaces. This has to be before creating internal buffer queue
// in order to get max consumer side buffers.
- for (size_t i = 0; i < surfaces.size(); i++) {
- if (surfaces[i] == nullptr) {
+ for (auto &it : surfaces) {
+ if (it.second == nullptr) {
SP_LOGE("%s: Fatal: surface is NULL", __FUNCTION__);
return BAD_VALUE;
}
- res = addOutputLocked(surfaces[i]);
+ res = addOutputLocked(it.first, it.second);
if (res != OK) {
SP_LOGE("%s: Failed to add output surface: %s(%d)",
__FUNCTION__, strerror(-res), res);
@@ -94,8 +95,20 @@
return NO_MEMORY;
}
+ res = mProducer->setAsyncMode(true);
+ if (res != OK) {
+ SP_LOGE("%s: Failed to enable input queue async mode: %s(%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+
res = mConsumer->consumerConnect(this, /* controlledByApp */ false);
+ mWidth = width;
+ mHeight = height;
+ mFormat = format;
+ mProducerUsage = producerUsage;
+
SP_LOGV("%s: connected", __FUNCTION__);
return res;
}
@@ -117,10 +130,13 @@
mNotifiers.clear();
for (auto& output : mOutputs) {
- output->disconnect(NATIVE_WINDOW_API_CAMERA);
+ if (output.second != nullptr) {
+ output.second->disconnect(NATIVE_WINDOW_API_CAMERA);
+ }
}
mOutputs.clear();
mOutputSlots.clear();
+ mConsumerBufferCount.clear();
mConsumer->consumerDisconnect();
@@ -139,10 +155,10 @@
disconnect();
}
-status_t Camera3StreamSplitter::addOutput(const sp<Surface>& outputQueue) {
+status_t Camera3StreamSplitter::addOutput(size_t surfaceId, const sp<Surface>& outputQueue) {
ATRACE_CALL();
Mutex::Autolock lock(mMutex);
- status_t res = addOutputLocked(outputQueue);
+ status_t res = addOutputLocked(surfaceId, outputQueue);
if (res != OK) {
SP_LOGE("%s: addOutputLocked failed %d", __FUNCTION__, res);
@@ -154,18 +170,30 @@
return res;
}
-status_t Camera3StreamSplitter::addOutputLocked(const sp<Surface>& outputQueue) {
+status_t Camera3StreamSplitter::addOutputLocked(size_t surfaceId, const sp<Surface>& outputQueue) {
ATRACE_CALL();
if (outputQueue == nullptr) {
SP_LOGE("addOutput: outputQueue must not be NULL");
return BAD_VALUE;
}
+ if (mOutputs[surfaceId] != nullptr) {
+ SP_LOGE("%s: surfaceId: %u already taken!", __FUNCTION__, (unsigned) surfaceId);
+ return BAD_VALUE;
+ }
+
+ status_t res = native_window_set_buffers_dimensions(outputQueue.get(),
+ mWidth, mHeight);
+ if (res != NO_ERROR) {
+ SP_LOGE("addOutput: failed to set buffer dimensions (%d)", res);
+ return res;
+ }
+
sp<IGraphicBufferProducer> gbp = outputQueue->getIGraphicBufferProducer();
// Connect to the buffer producer
sp<OutputListener> listener(new OutputListener(this, gbp));
IInterface::asBinder(gbp)->linkToDeath(listener);
- status_t res = outputQueue->connect(NATIVE_WINDOW_API_CAMERA, listener);
+ res = outputQueue->connect(NATIVE_WINDOW_API_CAMERA, listener);
if (res != NO_ERROR) {
SP_LOGE("addOutput: failed to connect (%d)", res);
return res;
@@ -184,7 +212,11 @@
SP_LOGV("%s: Consumer wants %d buffers, Producer wants %zu", __FUNCTION__,
maxConsumerBuffers, mMaxHalBuffers);
- size_t totalBufferCount = maxConsumerBuffers + mMaxHalBuffers;
+ // The output slot count requirement can change depending on the current amount
+ // of outputs and incoming buffer consumption rate. To avoid any issues with
+ // insufficient slots, set their count to the maximum supported. The output
+ // surface buffer allocation is disabled so no real buffers will get allocated.
+ size_t totalBufferCount = BufferQueue::NUM_BUFFER_SLOTS;
res = native_window_set_buffer_count(outputQueue.get(),
totalBufferCount);
if (res != OK) {
@@ -208,7 +240,8 @@
}
// Add new entry into mOutputs
- mOutputs.push_back(gbp);
+ mOutputs[surfaceId] = gbp;
+ mConsumerBufferCount[surfaceId] = maxConsumerBuffers;
mNotifiers[gbp] = listener;
mOutputSlots[gbp] = std::make_unique<OutputSlots>(totalBufferCount);
@@ -216,8 +249,80 @@
return NO_ERROR;
}
+status_t Camera3StreamSplitter::removeOutput(size_t surfaceId) {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mMutex);
+
+ status_t res = removeOutputLocked(surfaceId);
+ if (res != OK) {
+ SP_LOGE("%s: removeOutputLocked failed %d", __FUNCTION__, res);
+ return res;
+ }
+
+ res = mConsumer->setMaxAcquiredBufferCount(mMaxConsumerBuffers+1);
+ if (res != OK) {
+ SP_LOGE("%s: setMaxAcquiredBufferCount failed %d", __FUNCTION__, res);
+ return res;
+ }
+
+ return res;
+}
+
+status_t Camera3StreamSplitter::removeOutputLocked(size_t surfaceId) {
+ if (mOutputs[surfaceId] == nullptr) {
+ SP_LOGE("%s: output surface is not present!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ sp<IGraphicBufferProducer> gbp = mOutputs[surfaceId];
+ //Search and decrement the ref. count of any buffers that are
+ //still attached to the removed surface.
+ std::vector<uint64_t> pendingBufferIds;
+ auto& outputSlots = *mOutputSlots[gbp];
+ for (size_t i = 0; i < outputSlots.size(); i++) {
+ if (outputSlots[i] != nullptr) {
+ pendingBufferIds.push_back(outputSlots[i]->getId());
+ auto rc = gbp->detachBuffer(i);
+ if (rc != NO_ERROR) {
+ //Buffers that fail to detach here will be scheduled for detach in the
+ //input buffer queue and the rest of the registered outputs instead.
+ //This will help ensure that camera stops accessing buffers that still
+ //can get referenced by the disconnected output.
+ mDetachedBuffers.emplace(outputSlots[i]->getId());
+ }
+ }
+ }
+ mOutputs[surfaceId] = nullptr;
+ mOutputSlots[gbp] = nullptr;
+ for (const auto &id : pendingBufferIds) {
+ decrementBufRefCountLocked(id, surfaceId);
+ }
+
+ auto res = IInterface::asBinder(gbp)->unlinkToDeath(mNotifiers[gbp]);
+ if (res != OK) {
+ SP_LOGE("%s: Failed to unlink producer death listener: %d ", __FUNCTION__, res);
+ return res;
+ }
+
+ res = gbp->disconnect(NATIVE_WINDOW_API_CAMERA);
+ if (res != OK) {
+ SP_LOGE("%s: Unable disconnect from producer interface: %d ", __FUNCTION__, res);
+ return res;
+ }
+
+ mNotifiers[gbp] = nullptr;
+ if (mConsumerBufferCount[surfaceId] < mMaxHalBuffers) {
+ mMaxConsumerBuffers -= mConsumerBufferCount[surfaceId];
+ } else {
+ SP_LOGE("%s: Cached consumer buffer count mismatch!", __FUNCTION__);
+ }
+ mConsumerBufferCount[surfaceId] = 0;
+
+ return res;
+}
+
status_t Camera3StreamSplitter::outputBufferLocked(const sp<IGraphicBufferProducer>& output,
- const BufferItem& bufferItem) {
+ const BufferItem& bufferItem, size_t surfaceId) {
ATRACE_CALL();
status_t res;
IGraphicBufferProducer::QueueBufferInput queueInput(
@@ -242,6 +347,11 @@
SP_LOGV("%s: Queuing buffer to buffer queue %p slot %d returns %d",
__FUNCTION__, output.get(), slot, res);
+ //During buffer queue 'mMutex' is not held which makes the removal of
+ //"output" possible. Check whether this is the case and return.
+ if (mOutputSlots[output] == nullptr) {
+ return res;
+ }
if (res != OK) {
if (res != NO_INIT && res != DEAD_OBJECT) {
SP_LOGE("Queuing buffer to output failed (%d)", res);
@@ -250,7 +360,7 @@
// that, increment the release count so that we still release this
// buffer eventually, and move on to the next output
onAbandonedLocked();
- decrementBufRefCountLocked(bufferItem.mGraphicBuffer->getId(), output);
+ decrementBufRefCountLocked(bufferItem.mGraphicBuffer->getId(), surfaceId);
return res;
}
@@ -258,7 +368,7 @@
// queue, no onBufferReleased is called by the buffer queue.
// Proactively trigger the callback to avoid buffer loss.
if (queueOutput.bufferReplaced) {
- onBufferReleasedByOutputLocked(output);
+ onBufferReplacedLocked(output, surfaceId);
}
return res;
@@ -271,7 +381,6 @@
status_t Camera3StreamSplitter::notifyBufferReleased(const sp<GraphicBuffer>& buffer) {
ATRACE_CALL();
- status_t res = OK;
Mutex::Autolock lock(mMutex);
@@ -279,17 +388,7 @@
std::unique_ptr<BufferTracker> tracker_ptr = std::move(mBuffers[bufferId]);
mBuffers.erase(bufferId);
- for (const auto surface : tracker_ptr->requestedSurfaces()) {
- sp<IGraphicBufferProducer>& gbp = mOutputs[surface];
- OutputSlots& outputSlots = *(mOutputSlots[gbp]);
- int slot = getSlotForOutputLocked(gbp, buffer);
- if (slot != BufferItem::INVALID_BUFFER_SLOT) {
- gbp->detachBuffer(slot);
- outputSlots[slot].clear();
- }
- }
-
- return res;
+ return OK;
}
status_t Camera3StreamSplitter::attachBufferToOutputs(ANativeWindowBuffer* anb,
@@ -307,7 +406,15 @@
for (auto& surface_id : surface_ids) {
sp<IGraphicBufferProducer>& gbp = mOutputs[surface_id];
- int slot = BufferItem::INVALID_BUFFER_SLOT;
+ if (gbp.get() == nullptr) {
+ //Output surface got likely removed by client.
+ continue;
+ }
+ int slot = getSlotForOutputLocked(gbp, gb);
+ if (slot != BufferItem::INVALID_BUFFER_SLOT) {
+ //Buffer is already attached to this output surface.
+ continue;
+ }
//Temporarly Unlock the mutex when trying to attachBuffer to the output
//queue, because attachBuffer could block in case of a slow consumer. If
//we block while holding the lock, onFrameAvailable and onBufferReleased
@@ -320,12 +427,25 @@
__FUNCTION__, gbp.get(), strerror(-res), res);
return res;
}
+ if ((slot < 0) || (slot > BufferQueue::NUM_BUFFER_SLOTS)) {
+ SP_LOGE("%s: Slot received %d either bigger than expected maximum %d or negative!",
+ __FUNCTION__, slot, BufferQueue::NUM_BUFFER_SLOTS);
+ return BAD_VALUE;
+ }
+ //During buffer attach 'mMutex' is not held which makes the removal of
+ //"gbp" possible. Check whether this is the case and continue.
+ if (mOutputSlots[gbp] == nullptr) {
+ continue;
+ }
auto& outputSlots = *mOutputSlots[gbp];
+ if (static_cast<size_t> (slot + 1) > outputSlots.size()) {
+ outputSlots.resize(slot + 1);
+ }
if (outputSlots[slot] != nullptr) {
// If the buffer is attached to a slot which already contains a buffer,
// the previous buffer will be removed from the output queue. Decrement
// the reference count accordingly.
- decrementBufRefCountLocked(outputSlots[slot]->getId(), gbp);
+ decrementBufRefCountLocked(outputSlots[slot]->getId(), surface_id);
}
SP_LOGV("%s: Attached buffer %p to slot %d on output %p.",__FUNCTION__, gb.get(),
slot, gbp.get());
@@ -349,7 +469,21 @@
mOnFrameAvailableRes.store(res);
return;
}
- if (mBuffers.find(bufferItem.mGraphicBuffer->getId()) == mBuffers.end()) {
+
+ uint64_t bufferId;
+ if (bufferItem.mGraphicBuffer != nullptr) {
+ mInputSlots[bufferItem.mSlot] = bufferItem;
+ } else if (bufferItem.mAcquireCalled) {
+ bufferItem.mGraphicBuffer = mInputSlots[bufferItem.mSlot].mGraphicBuffer;
+ mInputSlots[bufferItem.mSlot].mFrameNumber = bufferItem.mFrameNumber;
+ } else {
+ SP_LOGE("%s: Invalid input graphic buffer!", __FUNCTION__);
+ res = BAD_VALUE;
+ return;
+ }
+ bufferId = bufferItem.mGraphicBuffer->getId();
+
+ if (mBuffers.find(bufferId) == mBuffers.end()) {
SP_LOGE("%s: Acquired buffer doesn't exist in attached buffer map",
__FUNCTION__);
mOnFrameAvailableRes.store(INVALID_OPERATION);
@@ -359,24 +493,19 @@
SP_LOGV("acquired buffer %" PRId64 " from input at slot %d",
bufferItem.mGraphicBuffer->getId(), bufferItem.mSlot);
- res = mConsumer->detachBuffer(bufferItem.mSlot);
- if (res != NO_ERROR) {
- SP_LOGE("%s: detaching buffer from input failed (%d)", __FUNCTION__, res);
- mOnFrameAvailableRes.store(res);
- return;
- }
-
// Attach and queue the buffer to each of the outputs
- BufferTracker& tracker = *(mBuffers[bufferItem.mGraphicBuffer->getId()]);
+ BufferTracker& tracker = *(mBuffers[bufferId]);
SP_LOGV("%s: BufferTracker for buffer %" PRId64 ", number of requests %zu",
__FUNCTION__, bufferItem.mGraphicBuffer->getId(), tracker.requestedSurfaces().size());
for (const auto id : tracker.requestedSurfaces()) {
- LOG_ALWAYS_FATAL_IF(id >= mOutputs.size(),
- "requested surface id exceeding max registered ids");
+ if (mOutputs[id] == nullptr) {
+ //Output surface got likely removed by client.
+ continue;
+ }
- res = outputBufferLocked(mOutputs[id], bufferItem);
+ res = outputBufferLocked(mOutputs[id], bufferItem, id);
if (res != OK) {
SP_LOGE("%s: outputBufferLocked failed %d", __FUNCTION__, res);
mOnFrameAvailableRes.store(res);
@@ -389,12 +518,14 @@
mOnFrameAvailableRes.store(res);
}
-void Camera3StreamSplitter::decrementBufRefCountLocked(uint64_t id,
- const sp<IGraphicBufferProducer>& from) {
+void Camera3StreamSplitter::decrementBufRefCountLocked(uint64_t id, size_t surfaceId) {
ATRACE_CALL();
- size_t referenceCount = mBuffers[id]->decrementReferenceCountLocked();
- removeSlotForOutputLocked(from, mBuffers[id]->getBuffer());
+ if (mBuffers[id] == nullptr) {
+ return;
+ }
+
+ size_t referenceCount = mBuffers[id]->decrementReferenceCountLocked(surfaceId);
if (referenceCount > 0) {
return;
}
@@ -407,14 +538,28 @@
std::unique_ptr<BufferTracker> tracker_ptr = std::move(mBuffers[id]);
mBuffers.erase(id);
- // Attach and release the buffer back to the input
- int consumerSlot = BufferItem::INVALID_BUFFER_SLOT;
- status_t res = mConsumer->attachBuffer(&consumerSlot, tracker_ptr->getBuffer());
- if (res != NO_ERROR) {
- SP_LOGE("%s: attaching buffer to input failed (%d)", __FUNCTION__, res);
+ uint64_t bufferId = tracker_ptr->getBuffer()->getId();
+ int consumerSlot = -1;
+ uint64_t frameNumber;
+ auto inputSlot = mInputSlots.begin();
+ for (; inputSlot != mInputSlots.end(); inputSlot++) {
+ if (inputSlot->second.mGraphicBuffer->getId() == bufferId) {
+ consumerSlot = inputSlot->second.mSlot;
+ frameNumber = inputSlot->second.mFrameNumber;
+ break;
+ }
+ }
+ if (consumerSlot == -1) {
+ SP_LOGE("%s: Buffer missing inside input slots!", __FUNCTION__);
return;
}
+ auto detachBuffer = mDetachedBuffers.find(bufferId);
+ bool detach = (detachBuffer != mDetachedBuffers.end());
+ if (detach) {
+ mDetachedBuffers.erase(detachBuffer);
+ mInputSlots.erase(inputSlot);
+ }
// Temporarily unlock mutex to avoid circular lock:
// 1. This function holds splitter lock, calls releaseBuffer which triggers
// onBufferReleased in Camera3OutputStream. onBufferReleased waits on the
@@ -424,57 +569,126 @@
// splitter lock.
sp<IGraphicBufferConsumer> consumer(mConsumer);
mMutex.unlock();
+ int res = NO_ERROR;
if (consumer != nullptr) {
- res = consumer->releaseBuffer(consumerSlot, /* frameNumber */ 0,
- EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, tracker_ptr->getMergedFence());
+ if (detach) {
+ res = consumer->detachBuffer(consumerSlot);
+ } else {
+ res = consumer->releaseBuffer(consumerSlot, frameNumber,
+ EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, tracker_ptr->getMergedFence());
+ }
} else {
SP_LOGE("%s: consumer has become null!", __FUNCTION__);
}
mMutex.lock();
- // If the producer of this queue is disconnected, -22 error will occur
+
if (res != NO_ERROR) {
- SP_LOGE("%s: releaseBuffer returns %d", __FUNCTION__, res);
+ if (detach) {
+ SP_LOGE("%s: detachBuffer returns %d", __FUNCTION__, res);
+ } else {
+ SP_LOGE("%s: releaseBuffer returns %d", __FUNCTION__, res);
+ }
}
}
void Camera3StreamSplitter::onBufferReleasedByOutput(
const sp<IGraphicBufferProducer>& from) {
ATRACE_CALL();
- Mutex::Autolock lock(mMutex);
-
- onBufferReleasedByOutputLocked(from);
-}
-
-void Camera3StreamSplitter::onBufferReleasedByOutputLocked(
- const sp<IGraphicBufferProducer>& from) {
- ATRACE_CALL();
- sp<GraphicBuffer> buffer;
sp<Fence> fence;
- status_t res = from->detachNextBuffer(&buffer, &fence);
- if (res == NO_INIT) {
- // If we just discovered that this output has been abandoned, note that,
- // but we can't do anything else, since buffer is invalid
- onAbandonedLocked();
- return;
- } else if (res == NO_MEMORY) {
- SP_LOGV("%s: No free buffers", __FUNCTION__);
- return;
- } else if (res != OK) {
- SP_LOGE("%s: detaching buffer from output failed (%d)", __FUNCTION__, res);
+
+ int slot = BufferItem::INVALID_BUFFER_SLOT;
+ auto res = from->dequeueBuffer(&slot, &fence, mWidth, mHeight, mFormat, mProducerUsage,
+ nullptr, nullptr);
+ Mutex::Autolock lock(mMutex);
+ handleOutputDequeueStatusLocked(res, slot);
+ if (res != OK) {
return;
}
+ size_t surfaceId = 0;
+ bool found = false;
+ for (const auto& it : mOutputs) {
+ if (it.second == from) {
+ found = true;
+ surfaceId = it.first;
+ break;
+ }
+ }
+ if (!found) {
+ SP_LOGV("%s: output surface not registered anymore!", __FUNCTION__);
+ return;
+ }
+
+ returnOutputBufferLocked(fence, from, surfaceId, slot);
+}
+
+void Camera3StreamSplitter::onBufferReplacedLocked(
+ const sp<IGraphicBufferProducer>& from, size_t surfaceId) {
+ ATRACE_CALL();
+ sp<Fence> fence;
+
+ int slot = BufferItem::INVALID_BUFFER_SLOT;
+ auto res = from->dequeueBuffer(&slot, &fence, mWidth, mHeight, mFormat, mProducerUsage,
+ nullptr, nullptr);
+ handleOutputDequeueStatusLocked(res, slot);
+ if (res != OK) {
+ return;
+ }
+
+ returnOutputBufferLocked(fence, from, surfaceId, slot);
+}
+
+void Camera3StreamSplitter::returnOutputBufferLocked(const sp<Fence>& fence,
+ const sp<IGraphicBufferProducer>& from, size_t surfaceId, int slot) {
+ sp<GraphicBuffer> buffer;
+
+ if (mOutputSlots[from] == nullptr) {
+ //Output surface got likely removed by client.
+ return;
+ }
+
+ auto outputSlots = *mOutputSlots[from];
+ buffer = outputSlots[slot];
BufferTracker& tracker = *(mBuffers[buffer->getId()]);
// Merge the release fence of the incoming buffer so that the fence we send
// back to the input includes all of the outputs' fences
if (fence != nullptr && fence->isValid()) {
tracker.mergeFence(fence);
}
- SP_LOGV("detached buffer %" PRId64 " %p from output %p",
- buffer->getId(), buffer.get(), from.get());
+
+ auto detachBuffer = mDetachedBuffers.find(buffer->getId());
+ bool detach = (detachBuffer != mDetachedBuffers.end());
+ if (detach) {
+ auto res = from->detachBuffer(slot);
+ if (res == NO_ERROR) {
+ outputSlots[slot] = nullptr;
+ } else {
+ SP_LOGE("%s: detach buffer from output failed (%d)", __FUNCTION__, res);
+ }
+ }
// Check to see if this is the last outstanding reference to this buffer
- decrementBufRefCountLocked(buffer->getId(), from);
+ decrementBufRefCountLocked(buffer->getId(), surfaceId);
+}
+
+void Camera3StreamSplitter::handleOutputDequeueStatusLocked(status_t res, int slot) {
+ if (res == NO_INIT) {
+ // If we just discovered that this output has been abandoned, note that,
+ // but we can't do anything else, since buffer is invalid
+ onAbandonedLocked();
+ } else if (res == IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) {
+ SP_LOGE("%s: Producer needs to re-allocate buffer!", __FUNCTION__);
+ SP_LOGE("%s: This should not happen with buffer allocation disabled!", __FUNCTION__);
+ } else if (res == IGraphicBufferProducer::RELEASE_ALL_BUFFERS) {
+ SP_LOGE("%s: All slot->buffer mapping should be released!", __FUNCTION__);
+ SP_LOGE("%s: This should not happen with buffer allocation disabled!", __FUNCTION__);
+ } else if (res == NO_MEMORY) {
+ SP_LOGE("%s: No free buffers", __FUNCTION__);
+ } else if (res == WOULD_BLOCK) {
+ SP_LOGE("%s: Dequeue call will block", __FUNCTION__);
+ } else if (res != OK || (slot == BufferItem::INVALID_BUFFER_SLOT)) {
+ SP_LOGE("%s: dequeue buffer from output failed (%d)", __FUNCTION__, res);
+ }
}
void Camera3StreamSplitter::onAbandonedLocked() {
@@ -501,27 +715,11 @@
}
}
- SP_LOGE("%s: Cannot find slot for gb %p on output %p", __FUNCTION__, gb.get(),
+ SP_LOGV("%s: Cannot find slot for gb %p on output %p", __FUNCTION__, gb.get(),
gbp.get());
return BufferItem::INVALID_BUFFER_SLOT;
}
-status_t Camera3StreamSplitter::removeSlotForOutputLocked(const sp<IGraphicBufferProducer>& gbp,
- const sp<GraphicBuffer>& gb) {
- auto& outputSlots = *mOutputSlots[gbp];
-
- for (size_t i = 0; i < outputSlots.size(); i++) {
- if (outputSlots[i] == gb) {
- outputSlots[i].clear();
- return NO_ERROR;
- }
- }
-
- SP_LOGE("%s: Cannot find slot for gb %p on output %p", __FUNCTION__, gb.get(),
- gbp.get());
- return BAD_VALUE;
-}
-
Camera3StreamSplitter::OutputListener::OutputListener(
wp<Camera3StreamSplitter> splitter,
wp<IGraphicBufferProducer> output)
@@ -553,7 +751,14 @@
mMergedFence = Fence::merge(String8("Camera3StreamSplitter"), mMergedFence, with);
}
-size_t Camera3StreamSplitter::BufferTracker::decrementReferenceCountLocked() {
+size_t Camera3StreamSplitter::BufferTracker::decrementReferenceCountLocked(size_t surfaceId) {
+ const auto& it = std::find(mRequestedSurfaces.begin(), mRequestedSurfaces.end(), surfaceId);
+ if (it == mRequestedSurfaces.end()) {
+ return mReferenceCount;
+ } else {
+ mRequestedSurfaces.erase(it);
+ }
+
if (mReferenceCount > 0)
--mReferenceCount;
return mReferenceCount;
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
index 3b8839e..fea1bdb 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_SERVERS_STREAMSPLITTER_H
#define ANDROID_SERVERS_STREAMSPLITTER_H
+#include <unordered_set>
+
#include <gui/IConsumerListener.h>
#include <gui/IProducerListener.h>
#include <gui/BufferItemConsumer.h>
@@ -51,22 +53,25 @@
// Connect to the stream splitter by creating buffer queue and connecting it
// with output surfaces.
- status_t connect(const std::vector<sp<Surface> >& surfaces,
- uint64_t consumerUsage, size_t halMaxBuffers,
- sp<Surface>* consumer);
+ status_t connect(const std::unordered_map<size_t, sp<Surface>> &surfaces,
+ uint64_t consumerUsage, uint64_t producerUsage, size_t halMaxBuffers, uint32_t width,
+ uint32_t height, android::PixelFormat format, sp<Surface>* consumer);
// addOutput adds an output BufferQueue to the splitter. The splitter
// connects to outputQueue as a CPU producer, and any buffers queued
- // to the input will be queued to each output. It is assumed that all of the
- // outputs are added before any buffers are queued on the input. If any
- // output is abandoned by its consumer, the splitter will abandon its input
- // queue (see onAbandoned).
+ // to the input will be queued to each output. If any output is abandoned
+ // by its consumer, the splitter will abandon its input queue (see onAbandoned).
//
// A return value other than NO_ERROR means that an error has occurred and
// outputQueue has not been added to the splitter. BAD_VALUE is returned if
// outputQueue is NULL. See IGraphicBufferProducer::connect for explanations
// of other error codes.
- status_t addOutput(const sp<Surface>& outputQueue);
+ status_t addOutput(size_t surfaceId, const sp<Surface>& outputQueue);
+
+ //removeOutput will remove a BufferQueue that was previously added to
+ //the splitter outputs. Any pending buffers in the BufferQueue will get
+ //reclaimed.
+ status_t removeOutput(size_t surfaceId);
// Notification that the graphic buffer has been released to the input
// BufferQueue. The buffer should be reused by the camera device instead of
@@ -117,10 +122,8 @@
// onFrameAvailable call to proceed.
void onBufferReleasedByOutput(const sp<IGraphicBufferProducer>& from);
- // This is the implementation of onBufferReleasedByOutput without the mutex locked.
- // It could either be called from onBufferReleasedByOutput or from
- // onFrameAvailable when a buffer in the async buffer queue is overwritten.
- void onBufferReleasedByOutputLocked(const sp<IGraphicBufferProducer>& from);
+ // Called by outputBufferLocked when a buffer in the async buffer queue got replaced.
+ void onBufferReplacedLocked(const sp<IGraphicBufferProducer>& from, size_t surfaceId);
// When this is called, the splitter disconnects from (i.e., abandons) its
// input queue and signals any waiting onFrameAvailable calls to wake up.
@@ -131,7 +134,14 @@
// Decrement the buffer's reference count. Once the reference count becomes
// 0, return the buffer back to the input BufferQueue.
- void decrementBufRefCountLocked(uint64_t id, const sp<IGraphicBufferProducer>& from);
+ void decrementBufRefCountLocked(uint64_t id, size_t surfaceId);
+
+ // Check for and handle any output surface dequeue errors.
+ void handleOutputDequeueStatusLocked(status_t res, int slot);
+
+ // Handles released output surface buffers.
+ void returnOutputBufferLocked(const sp<Fence>& fence, const sp<IGraphicBufferProducer>& from,
+ size_t surfaceId, int slot);
// This is a thin wrapper class that lets us determine which BufferQueue
// the IProducerListener::onBufferReleased callback is associated with. We
@@ -168,7 +178,7 @@
// Returns the new value
// Only called while mMutex is held
- size_t decrementReferenceCountLocked();
+ size_t decrementReferenceCountLocked(size_t surfaceId);
const std::vector<size_t> requestedSurfaces() const { return mRequestedSurfaces; }
@@ -191,13 +201,15 @@
// Must be accessed through RefBase
virtual ~Camera3StreamSplitter();
- status_t addOutputLocked(const sp<Surface>& outputQueue);
+ status_t addOutputLocked(size_t surfaceId, const sp<Surface>& outputQueue);
+
+ status_t removeOutputLocked(size_t surfaceId);
// Send a buffer to particular output, and increment the reference count
// of the buffer. If this output is abandoned, the buffer's reference count
// won't be incremented.
status_t outputBufferLocked(const sp<IGraphicBufferProducer>& output,
- const BufferItem& bufferItem);
+ const BufferItem& bufferItem, size_t surfaceId);
// Get unique name for the buffer queue consumer
String8 getUniqueConsumerName();
@@ -205,14 +217,14 @@
// Helper function to get the BufferQueue slot where a particular buffer is attached to.
int getSlotForOutputLocked(const sp<IGraphicBufferProducer>& gbp,
const sp<GraphicBuffer>& gb);
- // Helper function to remove the buffer from the BufferQueue slot
- status_t removeSlotForOutputLocked(const sp<IGraphicBufferProducer>& gbp,
- const sp<GraphicBuffer>& gb);
-
// Sum of max consumer buffers for all outputs
size_t mMaxConsumerBuffers = 0;
size_t mMaxHalBuffers = 0;
+ uint32_t mWidth = 0;
+ uint32_t mHeight = 0;
+ android::PixelFormat mFormat = android::PIXEL_FORMAT_NONE;
+ uint64_t mProducerUsage = 0;
static const nsecs_t kDequeueBufferTimeout = s2ns(1); // 1 sec
@@ -223,7 +235,15 @@
sp<BufferItemConsumer> mBufferItemConsumer;
sp<Surface> mSurface;
- std::vector<sp<IGraphicBufferProducer> > mOutputs;
+ //Map graphic buffer ids -> buffer items
+ std::unordered_map<uint64_t, BufferItem> mInputSlots;
+
+ //Map surface ids -> gbp outputs
+ std::unordered_map<int, sp<IGraphicBufferProducer> > mOutputs;
+
+ //Map surface ids -> consumer buffer count
+ std::unordered_map<int, size_t > mConsumerBufferCount;
+
// Map of GraphicBuffer IDs (GraphicBuffer::getId()) to buffer tracking
// objects (which are mostly for counting how many outputs have released the
// buffer, but also contain merged release fences).
@@ -242,6 +262,10 @@
std::unordered_map<sp<IGraphicBufferProducer>, std::unique_ptr<OutputSlots>,
GBPHash> mOutputSlots;
+ //A set of buffers that could potentially stay in some of the outputs after removal
+ //and therefore should be detached from the input queue.
+ std::unordered_set<uint64_t> mDetachedBuffers;
+
// Latest onFrameAvailable return value
std::atomic<status_t> mOnFrameAvailableRes{0};
diff --git a/services/camera/libcameraservice/device3/DistortionMapper.cpp b/services/camera/libcameraservice/device3/DistortionMapper.cpp
new file mode 100644
index 0000000..9229079
--- /dev/null
+++ b/services/camera/libcameraservice/device3/DistortionMapper.cpp
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-DistMapper"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <cmath>
+
+#include "device3/DistortionMapper.h"
+
+namespace android {
+
+namespace camera3 {
+
+/**
+ * Metadata keys to correct when adjusting coordinates for distortion correction
+ */
+
+// Both capture request and result
+constexpr std::array<uint32_t, 3> DistortionMapper::kMeteringRegionsToCorrect = {
+ ANDROID_CONTROL_AF_REGIONS,
+ ANDROID_CONTROL_AE_REGIONS,
+ ANDROID_CONTROL_AWB_REGIONS
+};
+
+// Only capture request
+constexpr std::array<uint32_t, 1> DistortionMapper::kRequestRectsToCorrect = {
+ ANDROID_SCALER_CROP_REGION,
+};
+
+// Only for capture result
+constexpr std::array<uint32_t, 2> DistortionMapper::kResultRectsToCorrect = {
+ ANDROID_SCALER_CROP_REGION,
+ ANDROID_STATISTICS_FACE_RECTANGLES
+};
+
+// Only for capture result
+constexpr std::array<uint32_t, 1> DistortionMapper::kResultPointsToCorrect = {
+ ANDROID_STATISTICS_FACE_LANDMARKS,
+};
+
+
+DistortionMapper::DistortionMapper() : mValidMapping(false), mValidGrids(false) {
+}
+
+bool DistortionMapper::isDistortionSupported(const CameraMetadata &result) {
+ bool isDistortionCorrectionSupported = false;
+ camera_metadata_ro_entry_t distortionCorrectionModes =
+ result.find(ANDROID_DISTORTION_CORRECTION_AVAILABLE_MODES);
+ for (size_t i = 0; i < distortionCorrectionModes.count; i++) {
+ if (distortionCorrectionModes.data.u8[i] !=
+ ANDROID_DISTORTION_CORRECTION_MODE_OFF) {
+ isDistortionCorrectionSupported = true;
+ break;
+ }
+ }
+ return isDistortionCorrectionSupported;
+}
+
+status_t DistortionMapper::setupStaticInfo(const CameraMetadata &deviceInfo) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ camera_metadata_ro_entry_t array;
+
+ array = deviceInfo.find(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE);
+ if (array.count != 4) return BAD_VALUE;
+
+ mArrayWidth = array.data.i32[2];
+ mArrayHeight = array.data.i32[3];
+
+ return updateCalibration(deviceInfo);
+}
+
+bool DistortionMapper::calibrationValid() const {
+ std::lock_guard<std::mutex> lock(mMutex);
+
+ return mValidMapping;
+}
+
+status_t DistortionMapper::correctCaptureRequest(CameraMetadata *request) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ status_t res;
+
+ if (!mValidMapping) return OK;
+
+ camera_metadata_entry_t e;
+ e = request->find(ANDROID_DISTORTION_CORRECTION_MODE);
+ if (e.count != 0 && e.data.u8[0] != ANDROID_DISTORTION_CORRECTION_MODE_OFF) {
+ for (auto region : kMeteringRegionsToCorrect) {
+ e = request->find(region);
+ for (size_t j = 0; j < e.count; j += 5) {
+ res = mapCorrectedToRaw(e.data.i32 + j, 2);
+ if (res != OK) return res;
+ }
+ }
+ for (auto rect : kRequestRectsToCorrect) {
+ e = request->find(rect);
+ res = mapCorrectedRectToRaw(e.data.i32, e.count / 4);
+ if (res != OK) return res;
+ }
+ }
+
+ return OK;
+}
+
+status_t DistortionMapper::correctCaptureResult(CameraMetadata *result) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ status_t res;
+
+ if (!mValidMapping) return OK;
+
+ res = updateCalibration(*result);
+ if (res != OK) {
+ ALOGE("Failure to update lens calibration information");
+ return INVALID_OPERATION;
+ }
+
+ camera_metadata_entry_t e;
+ e = result->find(ANDROID_DISTORTION_CORRECTION_MODE);
+ if (e.count != 0 && e.data.u8[0] != ANDROID_DISTORTION_CORRECTION_MODE_OFF) {
+ for (auto region : kMeteringRegionsToCorrect) {
+ e = result->find(region);
+ for (size_t j = 0; j < e.count; j += 5) {
+ res = mapRawToCorrected(e.data.i32 + j, 2);
+ if (res != OK) return res;
+ }
+ }
+ for (auto rect : kResultRectsToCorrect) {
+ e = result->find(rect);
+ res = mapRawRectToCorrected(e.data.i32, e.count / 4);
+ if (res != OK) return res;
+ }
+ for (auto pts : kResultPointsToCorrect) {
+ e = result->find(pts);
+ res = mapRawToCorrected(e.data.i32, e.count / 2);
+ if (res != OK) return res;
+ }
+ }
+
+ return OK;
+}
+
+// Utility methods; not guarded by mutex
+
+status_t DistortionMapper::updateCalibration(const CameraMetadata &result) {
+ camera_metadata_ro_entry_t calib, distortion;
+
+ calib = result.find(ANDROID_LENS_INTRINSIC_CALIBRATION);
+ distortion = result.find(ANDROID_LENS_DISTORTION);
+
+ if (calib.count != 5) return BAD_VALUE;
+ if (distortion.count != 5) return BAD_VALUE;
+
+ // Skip redoing work if no change to calibration fields
+ if (mValidMapping &&
+ mFx == calib.data.f[0] &&
+ mFy == calib.data.f[1] &&
+ mCx == calib.data.f[2] &&
+ mCy == calib.data.f[3] &&
+ mS == calib.data.f[4]) {
+ bool noChange = true;
+ for (size_t i = 0; i < distortion.count; i++) {
+ if (mK[i] != distortion.data.f[i]) {
+ noChange = false;
+ break;
+ }
+ }
+ if (noChange) return OK;
+ }
+
+ mFx = calib.data.f[0];
+ mFy = calib.data.f[1];
+ mCx = calib.data.f[2];
+ mCy = calib.data.f[3];
+ mS = calib.data.f[4];
+
+ mInvFx = 1 / mFx;
+ mInvFy = 1 / mFy;
+
+ for (size_t i = 0; i < distortion.count; i++) {
+ mK[i] = distortion.data.f[i];
+ }
+
+ mValidMapping = true;
+ // Need to recalculate grid
+ mValidGrids = false;
+
+ return OK;
+}
+
+status_t DistortionMapper::mapRawToCorrected(int32_t *coordPairs, int coordCount) {
+ if (!mValidMapping) return INVALID_OPERATION;
+
+ if (!mValidGrids) {
+ status_t res = buildGrids();
+ if (res != OK) return res;
+ }
+
+ for (int i = 0; i < coordCount * 2; i += 2) {
+ const GridQuad *quad = findEnclosingQuad(coordPairs + i, mDistortedGrid);
+ if (quad == nullptr) {
+ ALOGE("Raw to corrected mapping failure: No quad found");
+ return INVALID_OPERATION;
+ }
+ ALOGV("src xy: %d, %d, enclosing quad: (%f, %f), (%f, %f), (%f, %f), (%f, %f)",
+ coordPairs[i], coordPairs[i+1],
+ quad->coords[0], quad->coords[1],
+ quad->coords[2], quad->coords[3],
+ quad->coords[4], quad->coords[5],
+ quad->coords[6], quad->coords[7]);
+
+ const GridQuad *corrQuad = quad->src;
+ if (corrQuad == nullptr) {
+ ALOGE("Raw to corrected mapping failure: No src quad found");
+ return INVALID_OPERATION;
+ }
+ ALOGV(" corr quad: (%f, %f), (%f, %f), (%f, %f), (%f, %f)",
+ corrQuad->coords[0], corrQuad->coords[1],
+ corrQuad->coords[2], corrQuad->coords[3],
+ corrQuad->coords[4], corrQuad->coords[5],
+ corrQuad->coords[6], corrQuad->coords[7]);
+
+ float u = calculateUorV(coordPairs + i, *quad, /*calculateU*/ true);
+ float v = calculateUorV(coordPairs + i, *quad, /*calculateU*/ false);
+
+ ALOGV("uv: %f, %f", u, v);
+
+ // Interpolate along top edge of corrected quad (which are axis-aligned) for x
+ float corrX = corrQuad->coords[0] + u * (corrQuad->coords[2] - corrQuad->coords[0]);
+ // Interpolate along left edge of corrected quad (which are axis-aligned) for y
+ float corrY = corrQuad->coords[1] + v * (corrQuad->coords[7] - corrQuad->coords[1]);
+
+ coordPairs[i] = static_cast<int32_t>(std::round(corrX));
+ coordPairs[i + 1] = static_cast<int32_t>(std::round(corrY));
+ }
+
+ return OK;
+}
+
+status_t DistortionMapper::mapRawRectToCorrected(int32_t *rects, int rectCount) {
+ if (!mValidMapping) return INVALID_OPERATION;
+ for (int i = 0; i < rectCount * 4; i += 4) {
+ // Map from (l, t, width, height) to (l, t, r, b)
+ int32_t coords[4] = {
+ rects[i],
+ rects[i + 1],
+ rects[i] + rects[i + 2],
+ rects[i + 1] + rects[i + 3]
+ };
+
+ mapRawToCorrected(coords, 2);
+
+ // Map back to (l, t, width, height)
+ rects[i] = coords[0];
+ rects[i + 1] = coords[1];
+ rects[i + 2] = coords[2] - coords[0];
+ rects[i + 3] = coords[3] - coords[1];
+ }
+
+ return OK;
+}
+
+template<typename T>
+status_t DistortionMapper::mapCorrectedToRaw(T *coordPairs, int coordCount) const {
+ if (!mValidMapping) return INVALID_OPERATION;
+
+ for (int i = 0; i < coordCount * 2; i += 2) {
+ // Move to normalized space
+ float ywi = (coordPairs[i + 1] - mCy) * mInvFy;
+ float xwi = (coordPairs[i] - mCx - mS * ywi) * mInvFx;
+ // Apply distortion model to calculate raw image coordinates
+ float rSq = xwi * xwi + ywi * ywi;
+ float Fr = 1.f + (mK[0] * rSq) + (mK[1] * rSq * rSq) + (mK[2] * rSq * rSq * rSq);
+ float xc = xwi * Fr + (mK[3] * 2 * xwi * ywi) + mK[4] * (rSq + 2 * xwi * xwi);
+ float yc = ywi * Fr + (mK[4] * 2 * xwi * ywi) + mK[3] * (rSq + 2 * ywi * ywi);
+ // Move back to image space
+ float xr = mFx * xc + mS * yc + mCx;
+ float yr = mFy * yc + mCy;
+
+ coordPairs[i] = static_cast<T>(std::round(xr));
+ coordPairs[i + 1] = static_cast<T>(std::round(yr));
+ }
+
+ return OK;
+}
+
+template status_t DistortionMapper::mapCorrectedToRaw(int32_t*, int) const;
+template status_t DistortionMapper::mapCorrectedToRaw(float*, int) const;
+
+status_t DistortionMapper::mapCorrectedRectToRaw(int32_t *rects, int rectCount) const {
+ if (!mValidMapping) return INVALID_OPERATION;
+
+ for (int i = 0; i < rectCount * 4; i += 4) {
+ // Map from (l, t, width, height) to (l, t, r, b)
+ int32_t coords[4] = {
+ rects[i],
+ rects[i + 1],
+ rects[i] + rects[i + 2],
+ rects[i + 1] + rects[i + 3]
+ };
+
+ mapCorrectedToRaw(coords, 2);
+
+ // Map back to (l, t, width, height)
+ rects[i] = coords[0];
+ rects[i + 1] = coords[1];
+ rects[i + 2] = coords[2] - coords[0];
+ rects[i + 3] = coords[3] - coords[1];
+ }
+
+ return OK;
+}
+
+status_t DistortionMapper::buildGrids() {
+ if (mCorrectedGrid.size() != kGridSize * kGridSize) {
+ mCorrectedGrid.resize(kGridSize * kGridSize);
+ mDistortedGrid.resize(kGridSize * kGridSize);
+ }
+
+ float gridMargin = mArrayWidth * kGridMargin;
+ float gridSpacingX = (mArrayWidth + 2 * gridMargin) / kGridSize;
+ float gridSpacingY = (mArrayHeight + 2 * gridMargin) / kGridSize;
+
+ size_t index = 0;
+ float x = -gridMargin;
+ for (size_t i = 0; i < kGridSize; i++, x += gridSpacingX) {
+ float y = -gridMargin;
+ for (size_t j = 0; j < kGridSize; j++, y += gridSpacingY, index++) {
+ mCorrectedGrid[index].src = nullptr;
+ mCorrectedGrid[index].coords = {
+ x, y,
+ x + gridSpacingX, y,
+ x + gridSpacingX, y + gridSpacingY,
+ x, y + gridSpacingY
+ };
+ mDistortedGrid[index].src = &mCorrectedGrid[index];
+ mDistortedGrid[index].coords = mCorrectedGrid[index].coords;
+ status_t res = mapCorrectedToRaw(mDistortedGrid[index].coords.data(), 4);
+ if (res != OK) return res;
+ }
+ }
+
+ mValidGrids = true;
+ return OK;
+}
+
+const DistortionMapper::GridQuad* DistortionMapper::findEnclosingQuad(
+ const int32_t pt[2], const std::vector<GridQuad>& grid) {
+ const float x = pt[0];
+ const float y = pt[1];
+
+ for (const GridQuad& quad : grid) {
+ const float &x1 = quad.coords[0];
+ const float &y1 = quad.coords[1];
+ const float &x2 = quad.coords[2];
+ const float &y2 = quad.coords[3];
+ const float &x3 = quad.coords[4];
+ const float &y3 = quad.coords[5];
+ const float &x4 = quad.coords[6];
+ const float &y4 = quad.coords[7];
+
+ // Point-in-quad test:
+
+ // Quad has corners P1-P4; if P is within the quad, then it is on the same side of all the
+ // edges (or on top of one of the edges or corners), traversed in a consistent direction.
+ // This means that the cross product of edge En = Pn->P(n+1 mod 4) and line Ep = Pn->P must
+ // have the same sign (or be zero) for all edges.
+ // For clockwise traversal, the sign should be negative or zero for Ep x En, indicating that
+ // En is to the left of Ep, or overlapping.
+ float s1 = (x - x1) * (y2 - y1) - (y - y1) * (x2 - x1);
+ if (s1 > 0) continue;
+ float s2 = (x - x2) * (y3 - y2) - (y - y2) * (x3 - x2);
+ if (s2 > 0) continue;
+ float s3 = (x - x3) * (y4 - y3) - (y - y3) * (x4 - x3);
+ if (s3 > 0) continue;
+ float s4 = (x - x4) * (y1 - y4) - (y - y4) * (x1 - x4);
+ if (s4 > 0) continue;
+
+ return &quad;
+ }
+ return nullptr;
+}
+
+float DistortionMapper::calculateUorV(const int32_t pt[2], const GridQuad& quad, bool calculateU) {
+ const float x = pt[0];
+ const float y = pt[1];
+ const float &x1 = quad.coords[0];
+ const float &y1 = quad.coords[1];
+ const float &x2 = calculateU ? quad.coords[2] : quad.coords[6];
+ const float &y2 = calculateU ? quad.coords[3] : quad.coords[7];
+ const float &x3 = quad.coords[4];
+ const float &y3 = quad.coords[5];
+ const float &x4 = calculateU ? quad.coords[6] : quad.coords[2];
+ const float &y4 = calculateU ? quad.coords[7] : quad.coords[3];
+
+ float a = (x1 - x2) * (y1 - y2 + y3 - y4) - (y1 - y2) * (x1 - x2 + x3 - x4);
+ float b = (x - x1) * (y1 - y2 + y3 - y4) + (x1 - x2) * (y4 - y1) -
+ (y - y1) * (x1 - x2 + x3 - x4) - (y1 - y2) * (x4 - x1);
+ float c = (x - x1) * (y4 - y1) - (y - y1) * (x4 - x1);
+
+ if (a == 0) {
+ // One solution may happen if edges are parallel
+ float u0 = -c / b;
+ ALOGV("u0: %.9g, b: %f, c: %f", u0, b, c);
+ return u0;
+ }
+
+ float det = b * b - 4 * a * c;
+ if (det < 0) {
+ // Sanity check - should not happen if pt is within the quad
+ ALOGE("Bad determinant! a: %f, b: %f, c: %f, det: %f", a,b,c,det);
+ return -1;
+ }
+
+ // Select more numerically stable solution
+ float sqdet = b > 0 ? -std::sqrt(det) : std::sqrt(det);
+
+ float u1 = (-b + sqdet) / (2 * a);
+ ALOGV("u1: %.9g", u1);
+ if (0 - kFloatFuzz < u1 && u1 < 1 + kFloatFuzz) return u1;
+
+ float u2 = c / (a * u1);
+ ALOGV("u2: %.9g", u2);
+ if (0 - kFloatFuzz < u2 && u2 < 1 + kFloatFuzz) return u2;
+
+ // Last resort, return the smaller-magnitude solution
+ return fabs(u1) < fabs(u2) ? u1 : u2;
+}
+
+} // namespace camera3
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/DistortionMapper.h b/services/camera/libcameraservice/device3/DistortionMapper.h
new file mode 100644
index 0000000..c6d715b
--- /dev/null
+++ b/services/camera/libcameraservice/device3/DistortionMapper.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_DISTORTIONMAPPER_H
+#define ANDROID_SERVERS_DISTORTIONMAPPER_H
+
+#include <utils/Errors.h>
+#include <array>
+#include <mutex>
+
+#include "camera/CameraMetadata.h"
+
+namespace android {
+
+namespace camera3 {
+
+/**
+ * Utilities to transform between raw (distorted) and warped (corrected) coordinate systems
+ * for cameras that support geometric distortion
+ */
+class DistortionMapper {
+ public:
+ DistortionMapper();
+
+ /**
+ * Check whether distortion correction is supported by the camera HAL
+ */
+ static bool isDistortionSupported(const CameraMetadata &deviceInfo);
+
+ /**
+ * Update static lens calibration info from camera characteristics
+ */
+ status_t setupStaticInfo(const CameraMetadata &deviceInfo);
+
+ /**
+ * Return whether distortion correction can be applied currently
+ */
+ bool calibrationValid() const;
+
+ /**
+ * Correct capture request if distortion correction is enabled
+ */
+ status_t correctCaptureRequest(CameraMetadata *request);
+
+ /**
+ * Correct capture result if distortion correction is enabled
+ */
+ status_t correctCaptureResult(CameraMetadata *request);
+
+
+ public: // Visible for testing. Not guarded by mutex; do not use concurrently
+ /**
+ * Update lens calibration from capture results or equivalent
+ */
+ status_t updateCalibration(const CameraMetadata &result);
+
+ /**
+ * Transform from distorted (original) to corrected (warped) coordinates.
+ * Coordinates are transformed in-place
+ *
+ * coordPairs: A pointer to an array of consecutive (x,y) points
+ * coordCount: Number of (x,y) pairs to transform
+ */
+ status_t mapRawToCorrected(int32_t *coordPairs, int coordCount);
+
+ /**
+ * Transform from distorted (original) to corrected (warped) coordinates.
+ * Coordinates are transformed in-place
+ *
+ * rects: A pointer to an array of consecutive (x,y, w, h) rectangles
+ * rectCount: Number of rectangles to transform
+ */
+ status_t mapRawRectToCorrected(int32_t *rects, int rectCount);
+
+ /**
+ * Transform from corrected (warped) to distorted (original) coordinates.
+ * Coordinates are transformed in-place
+ *
+ * coordPairs: A pointer to an array of consecutive (x,y) points
+ * coordCount: Number of (x,y) pairs to transform
+ */
+ template<typename T>
+ status_t mapCorrectedToRaw(T* coordPairs, int coordCount) const;
+
+ /**
+ * Transform from corrected (warped) to distorted (original) coordinates.
+ * Coordinates are transformed in-place
+ *
+ * rects: A pointer to an array of consecutive (x,y, w, h) rectangles
+ * rectCount: Number of rectangles to transform
+ */
+ status_t mapCorrectedRectToRaw(int32_t *rects, int rectCount) const;
+
+ struct GridQuad {
+ // Source grid quad, or null
+ const GridQuad *src;
+ // x,y coordinates of corners, in
+ // clockwise order
+ std::array<float, 8> coords;
+ };
+
+ // Find which grid quad encloses the point; returns null if none do
+ static const GridQuad* findEnclosingQuad(
+ const int32_t pt[2], const std::vector<GridQuad>& grid);
+
+ // Calculate 'horizontal' interpolation coordinate for the point and the quad
+ // Assumes the point P is within the quad Q.
+ // Given quad with points P1-P4, and edges E12-E41, and considering the edge segments as
+ // functions of U: E12(u), where E12(0) = P1 and E12(1) = P2, then we want to find a u
+ // such that the edge E12(u) -> E43(u) contains point P.
+ // This can be determined by checking if the cross product of vector [E12(u)-E43(u)] and
+ // vector [E12(u)-P] is zero. Solving the equation
+ // [E12(u)-E43(u)] x [E12(u)-P] = 0 gives a quadratic equation in u; the solution in the range
+ // 0 to 1 is the one chosen.
+ // If calculateU is true, then an interpolation coordinate for edges E12 and E43 is found;
+ // if it is false, then an interpolation coordinate for edges E14 and E23 is found.
+ static float calculateUorV(const int32_t pt[2], const GridQuad& quad, bool calculateU);
+
+ private:
+ mutable std::mutex mMutex;
+
+ // Number of quads in each dimension of the mapping grids
+ constexpr static size_t kGridSize = 15;
+ // Margin to expand the grid by to ensure it doesn't clip the domain
+ constexpr static float kGridMargin = 0.05f;
+ // Fuzziness for float inequality tests
+ constexpr static float kFloatFuzz = 1e-4;
+
+ // Metadata key lists to correct
+
+ // Both capture request and result
+ static const std::array<uint32_t, 3> kMeteringRegionsToCorrect;
+
+ // Only capture request
+ static const std::array<uint32_t, 1> kRequestRectsToCorrect;
+
+ // Only capture result
+ static const std::array<uint32_t, 2> kResultRectsToCorrect;
+
+ // Only for capture results
+ static const std::array<uint32_t, 1> kResultPointsToCorrect;
+
+ // Utility to create reverse mapping grids
+ status_t buildGrids();
+
+
+ bool mValidMapping;
+ bool mValidGrids;
+
+ // intrisic parameters, in pixels
+ float mFx, mFy, mCx, mCy, mS;
+ // pre-calculated inverses for speed
+ float mInvFx, mInvFy;
+ // radial/tangential distortion parameters
+ float mK[5];
+
+ // pre-correction active array dimensions
+ int mArrayWidth, mArrayHeight;
+
+ std::vector<GridQuad> mCorrectedGrid;
+ std::vector<GridQuad> mDistortedGrid;
+
+}; // class DistortionMapper
+
+} // namespace camera3
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
index 37a05c2..f77069c 100644
--- a/services/camera/libcameraservice/tests/Android.mk
+++ b/services/camera/libcameraservice/tests/Android.mk
@@ -18,6 +18,7 @@
LOCAL_SRC_FILES:= $(call all-cpp-files-under, .)
LOCAL_SHARED_LIBRARIES := \
+ libbase \
libcutils \
libcameraservice \
libhidlbase \
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index c1d6e85..ef93d9a 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -190,6 +190,7 @@
hardware::camera::common::V1_0::CameraDeviceStatus) override {}
void onTorchStatusChanged(const String8 &,
hardware::camera::common::V1_0::TorchModeStatus) override {}
+ void onNewProviderRegistered() override {}
};
TEST(CameraProviderManagerTest, InitializeTest) {
diff --git a/services/camera/libcameraservice/tests/DistortionMapperComp.py b/services/camera/libcameraservice/tests/DistortionMapperComp.py
new file mode 100644
index 0000000..dea36a7
--- /dev/null
+++ b/services/camera/libcameraservice/tests/DistortionMapperComp.py
@@ -0,0 +1,47 @@
+# Calculates comparison output values for DistortionMapperTest.cpp:CompareToOpenCV
+#
+# Assumes a python that has numpy and cv2 (OpenCV) available
+
+import numpy as np
+import cv2
+
+Fx = 1000
+Fy = 1000
+Cx = 500
+Cy = 500
+# s = 0 - not supported by OpenCV
+
+K = np.array([[Fx, 0, Cx],[0, Fy, Cy],[0, 0, 1]])
+
+# Order is k1, k2, t1, t2, k3
+dist = np.array([0.1, -0.003, 0.02, 0.01, 0.004])
+
+np.random.seed(1234)
+
+activeArray = np.array([[1000, 750]])
+
+rawCoords = np.floor(np.random.rand(1000,2) * activeArray)
+
+# OpenCV needs either row count or col count = 1 for some reason
+rawCoords2 = rawCoords.reshape(-1, 1, 2)
+
+# P is the output camera matrix, K is the input; use the same for both
+expCoords = cv2.undistortPoints(rawCoords2, K, dist, P = K)
+
+with open('DistortionMapperTest_OpenCvData.h','w') as f:
+ f.write('// Generated by DistortionMapperComp.py\n');
+ f.write('// for use by DistortionMapperTest.cpp\n\n');
+
+ f.write('namespace openCvData {\n')
+ f.write('std::array<int32_t, %d> rawCoords = {\n' % (rawCoords.shape[0] * rawCoords.shape[1]))
+ for i in range(rawCoords.shape[0]):
+ f.write(' %d, %d,\n' % (rawCoords[i][0], rawCoords[i][1]))
+ f.write('};\n')
+
+ f.write('std::array<int32_t, %d> expCoords = {\n' % (expCoords.shape[0] * expCoords.shape[2]))
+ for i in range(expCoords.shape[0]):
+ f.write(' %d, %d,\n' % (expCoords[i][0][0], expCoords[i][0][1]))
+ f.write('};\n')
+ f.write('} // namespace openCvData\n')
+
+print "DistortionMapperTest_OpenCvData.h generated"
diff --git a/services/camera/libcameraservice/tests/DistortionMapperTest.cpp b/services/camera/libcameraservice/tests/DistortionMapperTest.cpp
new file mode 100644
index 0000000..b489931
--- /dev/null
+++ b/services/camera/libcameraservice/tests/DistortionMapperTest.cpp
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "DistortionMapperTest"
+
+#include <random>
+
+#include <gtest/gtest.h>
+#include <android-base/stringprintf.h>
+#include <android-base/chrono_utils.h>
+
+#include "../device3/DistortionMapper.h"
+
+using namespace android;
+using namespace android::camera3;
+
+
+int32_t testActiveArray[] = {100, 100, 1000, 750};
+
+float testICal[] = { 1000.f, 1000.f, 500.f, 500.f, 0.f };
+
+float identityDistortion[] = { 0.f, 0.f, 0.f, 0.f, 0.f};
+
+std::array<int32_t, 12> basicCoords = {
+ 0, 0,
+ testActiveArray[2] - 1, 0,
+ testActiveArray[2] - 1, testActiveArray[3] - 1,
+ 0, testActiveArray[3] - 1,
+ testActiveArray[2] / 2, testActiveArray[3] / 2,
+ 251, 403 // A particularly bad coordinate for current grid count/array size
+};
+
+
+void setupTestMapper(DistortionMapper *m, float distortion[5]) {
+ CameraMetadata deviceInfo;
+
+ deviceInfo.update(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE,
+ testActiveArray, 4);
+
+ deviceInfo.update(ANDROID_LENS_INTRINSIC_CALIBRATION,
+ testICal, 5);
+
+ deviceInfo.update(ANDROID_LENS_DISTORTION,
+ distortion, 5);
+
+ m->setupStaticInfo(deviceInfo);
+}
+
+TEST(DistortionMapperTest, Initialization) {
+ CameraMetadata deviceInfo;
+
+ ASSERT_FALSE(DistortionMapper::isDistortionSupported(deviceInfo));
+
+ uint8_t distortionModes[] =
+ {ANDROID_DISTORTION_CORRECTION_MODE_OFF,
+ ANDROID_DISTORTION_CORRECTION_MODE_FAST,
+ ANDROID_DISTORTION_CORRECTION_MODE_HIGH_QUALITY};
+
+ deviceInfo.update(ANDROID_DISTORTION_CORRECTION_AVAILABLE_MODES,
+ distortionModes, 1);
+
+ ASSERT_FALSE(DistortionMapper::isDistortionSupported(deviceInfo));
+
+ deviceInfo.update(ANDROID_DISTORTION_CORRECTION_AVAILABLE_MODES,
+ distortionModes, 3);
+
+ ASSERT_TRUE(DistortionMapper::isDistortionSupported(deviceInfo));
+
+ DistortionMapper m;
+
+ ASSERT_FALSE(m.calibrationValid());
+
+ ASSERT_NE(m.setupStaticInfo(deviceInfo), OK);
+
+ ASSERT_FALSE(m.calibrationValid());
+
+ deviceInfo.update(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE,
+ testActiveArray, 4);
+
+ deviceInfo.update(ANDROID_LENS_INTRINSIC_CALIBRATION,
+ testICal, 5);
+
+ deviceInfo.update(ANDROID_LENS_DISTORTION,
+ identityDistortion, 5);
+
+ ASSERT_EQ(m.setupStaticInfo(deviceInfo), OK);
+
+ ASSERT_TRUE(m.calibrationValid());
+
+ CameraMetadata captureResult;
+
+ ASSERT_NE(m.updateCalibration(captureResult), OK);
+
+ captureResult.update(ANDROID_LENS_INTRINSIC_CALIBRATION,
+ testICal, 5);
+ captureResult.update(ANDROID_LENS_DISTORTION,
+ identityDistortion, 5);
+
+ ASSERT_EQ(m.updateCalibration(captureResult), OK);
+
+}
+
+TEST(DistortionMapperTest, IdentityTransform) {
+ status_t res;
+
+ DistortionMapper m;
+ setupTestMapper(&m, identityDistortion);
+
+ auto coords = basicCoords;
+ res = m.mapCorrectedToRaw(coords.data(), 5);
+ ASSERT_EQ(res, OK);
+
+ for (size_t i = 0; i < coords.size(); i++) {
+ EXPECT_EQ(coords[i], basicCoords[i]);
+ }
+
+ res = m.mapRawToCorrected(coords.data(), 5);
+ ASSERT_EQ(res, OK);
+
+ for (size_t i = 0; i < coords.size(); i++) {
+ EXPECT_EQ(coords[i], basicCoords[i]);
+ }
+
+ std::array<int32_t, 8> rects = {
+ 0, 0, 100, 100,
+ testActiveArray[2] - 100, testActiveArray[3]-100, 100, 100
+ };
+
+ auto rectsOrig = rects;
+ res = m.mapCorrectedRectToRaw(rects.data(), 2);
+ ASSERT_EQ(res, OK);
+
+ for (size_t i = 0; i < rects.size(); i++) {
+ EXPECT_EQ(rects[i], rectsOrig[i]);
+ }
+
+ res = m.mapRawRectToCorrected(rects.data(), 2);
+ ASSERT_EQ(res, OK);
+
+ for (size_t i = 0; i < rects.size(); i++) {
+ EXPECT_EQ(rects[i], rectsOrig[i]);
+ }
+}
+
+TEST(DistortionMapperTest, LargeTransform) {
+ status_t res;
+ constexpr int maxAllowedPixelError = 2; // Maximum per-pixel error allowed
+ constexpr int bucketsPerPixel = 3; // Histogram granularity
+
+ unsigned int seed = 1234; // Ensure repeatability for debugging
+ const size_t coordCount = 1e6; // Number of random test points
+
+ float bigDistortion[] = {0.1, -0.003, 0.004, 0.02, 0.01};
+
+ DistortionMapper m;
+ setupTestMapper(&m, bigDistortion);
+
+ std::default_random_engine gen(seed);
+
+ std::uniform_int_distribution<int> x_dist(0, testActiveArray[2] - 1);
+ std::uniform_int_distribution<int> y_dist(0, testActiveArray[3] - 1);
+
+ std::vector<int32_t> randCoords(coordCount * 2);
+
+ for (size_t i = 0; i < randCoords.size(); i += 2) {
+ randCoords[i] = x_dist(gen);
+ randCoords[i + 1] = y_dist(gen);
+ }
+
+ randCoords.insert(randCoords.end(), basicCoords.begin(), basicCoords.end());
+
+ auto origCoords = randCoords;
+
+ base::Timer correctedToRawTimer;
+ res = m.mapCorrectedToRaw(randCoords.data(), randCoords.size() / 2);
+ auto correctedToRawDurationMs = correctedToRawTimer.duration();
+ EXPECT_EQ(res, OK);
+
+ base::Timer rawToCorrectedTimer;
+ res = m.mapRawToCorrected(randCoords.data(), randCoords.size() / 2);
+ auto rawToCorrectedDurationMs = rawToCorrectedTimer.duration();
+ EXPECT_EQ(res, OK);
+
+ float correctedToRawDurationPerCoordUs =
+ (std::chrono::duration_cast<std::chrono::duration<double, std::micro>>(
+ correctedToRawDurationMs) / (randCoords.size() / 2) ).count();
+ float rawToCorrectedDurationPerCoordUs =
+ (std::chrono::duration_cast<std::chrono::duration<double, std::micro>>(
+ rawToCorrectedDurationMs) / (randCoords.size() / 2) ).count();
+
+ RecordProperty("CorrectedToRawDurationPerCoordUs",
+ base::StringPrintf("%f", correctedToRawDurationPerCoordUs));
+ RecordProperty("RawToCorrectedDurationPerCoordUs",
+ base::StringPrintf("%f", rawToCorrectedDurationPerCoordUs));
+
+ // Calculate mapping errors after round trip
+ float totalErrorSq = 0;
+ // Basic histogram; buckets go from [N to N+1)
+ std::array<int, maxAllowedPixelError * bucketsPerPixel> histogram = {0};
+ int outOfHistogram = 0;
+
+ for (size_t i = 0; i < randCoords.size(); i += 2) {
+ int xOrig = origCoords[i];
+ int yOrig = origCoords[i + 1];
+ int xMapped = randCoords[i];
+ int yMapped = randCoords[i + 1];
+
+ float errorSq = (xMapped - xOrig) * (xMapped - xOrig) +
+ (yMapped - yOrig) * (yMapped - yOrig);
+
+ EXPECT_LE(errorSq, maxAllowedPixelError * maxAllowedPixelError) << "( " <<
+ xOrig << "," << yOrig << ") -> (" << xMapped << "," << yMapped << ")";
+
+ // Note: Integer coordinates, so histogram will be clumpy; error distances can only be of
+ // form sqrt(X^2+Y^2) where X, Y are integers, so:
+ // 0, 1, sqrt(2), 2, sqrt(5), sqrt(8), 3, sqrt(10), sqrt(13), 4 ...
+ totalErrorSq += errorSq;
+ float errorDist = std::sqrt(errorSq);
+ if (errorDist < maxAllowedPixelError) {
+ int histBucket = static_cast<int>(errorDist * bucketsPerPixel); // rounds down
+ histogram[histBucket]++;
+ } else {
+ outOfHistogram++;
+ }
+ }
+
+ float rmsError = std::sqrt(totalErrorSq / randCoords.size());
+ RecordProperty("RmsError", base::StringPrintf("%f", rmsError));
+ for (size_t i = 0; i < histogram.size(); i++) {
+ std::string label = base::StringPrintf("HistogramBin[%f,%f)",
+ (float)i/bucketsPerPixel, (float)(i + 1)/bucketsPerPixel);
+ RecordProperty(label, histogram[i]);
+ }
+ RecordProperty("HistogramOutOfRange", outOfHistogram);
+}
+
+// Compare against values calculated by OpenCV
+// undistortPoints() method, which is the same as mapRawToCorrected
+// See script DistortionMapperComp.py
+#include "DistortionMapperTest_OpenCvData.h"
+
+TEST(DistortionMapperTest, CompareToOpenCV) {
+ status_t res;
+
+ float bigDistortion[] = {0.1, -0.003, 0.004, 0.02, 0.01};
+
+ // Expect to match within sqrt(2) radius pixels
+ const int32_t maxSqError = 2;
+
+ DistortionMapper m;
+ setupTestMapper(&m, bigDistortion);
+
+ using namespace openCvData;
+
+ res = m.mapRawToCorrected(rawCoords.data(), rawCoords.size() / 2);
+
+ for (size_t i = 0; i < rawCoords.size(); i+=2) {
+ int32_t dist = (rawCoords[i] - expCoords[i]) * (rawCoords[i] - expCoords[i]) +
+ (rawCoords[i + 1] - expCoords[i + 1]) * (rawCoords[i + 1] - expCoords[i + 1]);
+ EXPECT_LE(dist, maxSqError)
+ << "(" << rawCoords[i] << ", " << rawCoords[i + 1] << ") != ("
+ << expCoords[i] << ", " << expCoords[i + 1] << ")";
+ }
+}
diff --git a/services/camera/libcameraservice/tests/DistortionMapperTest_OpenCvData.h b/services/camera/libcameraservice/tests/DistortionMapperTest_OpenCvData.h
new file mode 100644
index 0000000..f996bd5
--- /dev/null
+++ b/services/camera/libcameraservice/tests/DistortionMapperTest_OpenCvData.h
@@ -0,0 +1,2009 @@
+// Generated by DistortionMapperComp.py
+// for use by DistortionMapperTest.cpp
+
+namespace openCvData {
+std::array<int32_t, 2000> rawCoords = {
+ 191, 466,
+ 437, 589,
+ 779, 204,
+ 276, 601,
+ 958, 656,
+ 357, 375,
+ 683, 534,
+ 370, 420,
+ 503, 10,
+ 772, 661,
+ 364, 461,
+ 75, 276,
+ 933, 488,
+ 397, 591,
+ 316, 426,
+ 869, 327,
+ 802, 107,
+ 704, 528,
+ 218, 693,
+ 442, 681,
+ 59, 138,
+ 47, 506,
+ 594, 399,
+ 43, 421,
+ 329, 377,
+ 111, 455,
+ 565, 5,
+ 617, 684,
+ 790, 744,
+ 958, 593,
+ 285, 468,
+ 478, 146,
+ 382, 40,
+ 451, 736,
+ 123, 89,
+ 738, 440,
+ 471, 80,
+ 229, 674,
+ 416, 401,
+ 6, 225,
+ 436, 459,
+ 918, 469,
+ 705, 112,
+ 746, 623,
+ 633, 328,
+ 152, 426,
+ 528, 713,
+ 480, 376,
+ 536, 614,
+ 57, 502,
+ 767, 531,
+ 796, 418,
+ 965, 110,
+ 29, 445,
+ 114, 713,
+ 325, 145,
+ 457, 690,
+ 879, 189,
+ 348, 136,
+ 901, 529,
+ 726, 675,
+ 779, 449,
+ 291, 113,
+ 335, 493,
+ 73, 41,
+ 323, 442,
+ 853, 215,
+ 173, 100,
+ 994, 134,
+ 317, 426,
+ 9, 675,
+ 977, 417,
+ 84, 249,
+ 728, 106,
+ 552, 204,
+ 974, 500,
+ 255, 81,
+ 776, 586,
+ 761, 685,
+ 658, 426,
+ 201, 523,
+ 952, 667,
+ 993, 614,
+ 545, 338,
+ 890, 729,
+ 593, 274,
+ 323, 653,
+ 215, 551,
+ 365, 601,
+ 782, 526,
+ 622, 370,
+ 840, 534,
+ 443, 23,
+ 363, 548,
+ 475, 258,
+ 640, 94,
+ 171, 552,
+ 127, 277,
+ 604, 77,
+ 802, 709,
+ 979, 660,
+ 627, 697,
+ 724, 537,
+ 41, 329,
+ 282, 251,
+ 83, 570,
+ 509, 495,
+ 630, 278,
+ 446, 311,
+ 480, 737,
+ 373, 9,
+ 921, 654,
+ 351, 472,
+ 357, 159,
+ 223, 314,
+ 72, 488,
+ 755, 699,
+ 376, 222,
+ 371, 620,
+ 901, 320,
+ 2, 31,
+ 141, 448,
+ 106, 222,
+ 58, 465,
+ 22, 641,
+ 306, 569,
+ 563, 28,
+ 565, 494,
+ 183, 598,
+ 612, 416,
+ 629, 514,
+ 240, 590,
+ 856, 545,
+ 692, 354,
+ 856, 498,
+ 333, 388,
+ 394, 23,
+ 576, 637,
+ 951, 512,
+ 76, 307,
+ 41, 130,
+ 565, 129,
+ 566, 385,
+ 868, 540,
+ 903, 208,
+ 256, 524,
+ 901, 737,
+ 640, 247,
+ 606, 616,
+ 627, 88,
+ 285, 740,
+ 431, 430,
+ 527, 148,
+ 165, 375,
+ 990, 295,
+ 714, 131,
+ 70, 118,
+ 161, 212,
+ 536, 579,
+ 880, 478,
+ 852, 448,
+ 693, 402,
+ 889, 39,
+ 783, 108,
+ 58, 44,
+ 51, 386,
+ 404, 749,
+ 108, 245,
+ 997, 300,
+ 883, 426,
+ 953, 664,
+ 779, 23,
+ 962, 389,
+ 207, 656,
+ 224, 103,
+ 725, 730,
+ 535, 333,
+ 22, 454,
+ 964, 725,
+ 930, 138,
+ 622, 309,
+ 363, 27,
+ 868, 504,
+ 87, 665,
+ 782, 237,
+ 818, 380,
+ 21, 325,
+ 446, 179,
+ 830, 558,
+ 586, 369,
+ 487, 200,
+ 605, 565,
+ 270, 391,
+ 98, 535,
+ 884, 425,
+ 994, 134,
+ 12, 342,
+ 931, 634,
+ 473, 676,
+ 225, 228,
+ 714, 543,
+ 18, 214,
+ 580, 698,
+ 338, 90,
+ 516, 524,
+ 298, 646,
+ 905, 576,
+ 261, 703,
+ 938, 558,
+ 910, 177,
+ 494, 607,
+ 954, 478,
+ 910, 519,
+ 42, 625,
+ 369, 702,
+ 483, 93,
+ 964, 12,
+ 676, 105,
+ 155, 487,
+ 981, 521,
+ 761, 318,
+ 138, 162,
+ 764, 40,
+ 490, 135,
+ 630, 413,
+ 567, 613,
+ 938, 144,
+ 712, 523,
+ 258, 686,
+ 532, 418,
+ 322, 253,
+ 332, 734,
+ 203, 500,
+ 574, 38,
+ 542, 155,
+ 91, 652,
+ 27, 726,
+ 327, 307,
+ 135, 95,
+ 413, 463,
+ 132, 730,
+ 180, 570,
+ 482, 576,
+ 302, 11,
+ 463, 527,
+ 66, 501,
+ 345, 443,
+ 229, 200,
+ 932, 619,
+ 145, 485,
+ 883, 556,
+ 515, 101,
+ 39, 625,
+ 117, 392,
+ 873, 29,
+ 692, 357,
+ 169, 47,
+ 32, 181,
+ 112, 303,
+ 42, 694,
+ 935, 607,
+ 188, 440,
+ 903, 725,
+ 67, 238,
+ 696, 480,
+ 632, 621,
+ 713, 251,
+ 167, 573,
+ 359, 66,
+ 729, 660,
+ 41, 131,
+ 420, 255,
+ 44, 84,
+ 438, 53,
+ 816, 649,
+ 131, 144,
+ 437, 728,
+ 773, 98,
+ 927, 620,
+ 392, 105,
+ 52, 69,
+ 886, 126,
+ 362, 490,
+ 604, 296,
+ 374, 47,
+ 428, 539,
+ 768, 145,
+ 862, 21,
+ 902, 177,
+ 128, 238,
+ 848, 624,
+ 345, 179,
+ 535, 203,
+ 34, 470,
+ 520, 31,
+ 757, 741,
+ 801, 257,
+ 335, 263,
+ 442, 434,
+ 617, 132,
+ 864, 532,
+ 170, 641,
+ 19, 481,
+ 162, 193,
+ 342, 4,
+ 438, 597,
+ 675, 408,
+ 564, 10,
+ 608, 741,
+ 224, 440,
+ 835, 594,
+ 329, 267,
+ 960, 167,
+ 211, 115,
+ 495, 195,
+ 555, 54,
+ 378, 345,
+ 778, 540,
+ 231, 18,
+ 257, 307,
+ 8, 353,
+ 852, 692,
+ 231, 743,
+ 5, 251,
+ 789, 73,
+ 442, 285,
+ 832, 533,
+ 355, 18,
+ 693, 315,
+ 858, 431,
+ 940, 660,
+ 458, 12,
+ 68, 240,
+ 592, 457,
+ 512, 108,
+ 876, 553,
+ 373, 621,
+ 90, 48,
+ 505, 700,
+ 148, 427,
+ 59, 126,
+ 69, 679,
+ 447, 79,
+ 665, 376,
+ 409, 545,
+ 172, 288,
+ 267, 231,
+ 177, 361,
+ 629, 44,
+ 196, 209,
+ 707, 245,
+ 425, 528,
+ 159, 329,
+ 629, 693,
+ 356, 614,
+ 640, 536,
+ 738, 728,
+ 246, 31,
+ 247, 33,
+ 705, 626,
+ 934, 353,
+ 512, 197,
+ 98, 599,
+ 142, 604,
+ 879, 374,
+ 914, 309,
+ 200, 482,
+ 399, 460,
+ 437, 444,
+ 951, 414,
+ 903, 103,
+ 1, 459,
+ 541, 585,
+ 821, 715,
+ 609, 217,
+ 826, 282,
+ 609, 465,
+ 441, 149,
+ 443, 693,
+ 671, 61,
+ 572, 90,
+ 999, 748,
+ 694, 280,
+ 401, 693,
+ 244, 498,
+ 481, 26,
+ 110, 671,
+ 892, 686,
+ 307, 223,
+ 659, 446,
+ 984, 461,
+ 154, 623,
+ 815, 69,
+ 887, 12,
+ 863, 674,
+ 850, 489,
+ 328, 409,
+ 237, 653,
+ 176, 277,
+ 759, 229,
+ 616, 164,
+ 872, 485,
+ 473, 175,
+ 833, 73,
+ 205, 176,
+ 596, 471,
+ 982, 132,
+ 799, 116,
+ 360, 716,
+ 130, 204,
+ 809, 724,
+ 92, 437,
+ 405, 674,
+ 84, 135,
+ 50, 225,
+ 190, 6,
+ 127, 84,
+ 730, 179,
+ 901, 246,
+ 874, 177,
+ 378, 406,
+ 515, 310,
+ 388, 255,
+ 402, 342,
+ 382, 493,
+ 881, 429,
+ 428, 193,
+ 865, 129,
+ 579, 545,
+ 730, 302,
+ 117, 572,
+ 533, 541,
+ 597, 317,
+ 933, 745,
+ 547, 547,
+ 27, 647,
+ 998, 243,
+ 850, 458,
+ 441, 395,
+ 779, 188,
+ 462, 635,
+ 678, 275,
+ 137, 302,
+ 931, 504,
+ 419, 426,
+ 41, 746,
+ 619, 584,
+ 931, 256,
+ 811, 654,
+ 665, 441,
+ 893, 336,
+ 244, 610,
+ 697, 307,
+ 5, 715,
+ 605, 4,
+ 498, 448,
+ 300, 346,
+ 495, 439,
+ 869, 624,
+ 778, 411,
+ 613, 550,
+ 566, 581,
+ 986, 591,
+ 614, 118,
+ 476, 212,
+ 153, 582,
+ 58, 59,
+ 664, 392,
+ 446, 230,
+ 17, 220,
+ 267, 27,
+ 804, 250,
+ 891, 607,
+ 964, 718,
+ 591, 233,
+ 918, 37,
+ 225, 272,
+ 412, 708,
+ 579, 140,
+ 596, 700,
+ 134, 736,
+ 153, 615,
+ 677, 303,
+ 217, 580,
+ 847, 397,
+ 409, 13,
+ 148, 603,
+ 419, 254,
+ 297, 538,
+ 997, 413,
+ 889, 126,
+ 883, 527,
+ 422, 647,
+ 235, 422,
+ 26, 285,
+ 361, 68,
+ 45, 256,
+ 355, 746,
+ 944, 98,
+ 518, 357,
+ 401, 697,
+ 515, 607,
+ 881, 572,
+ 464, 55,
+ 470, 150,
+ 208, 133,
+ 354, 683,
+ 433, 133,
+ 752, 37,
+ 82, 28,
+ 465, 452,
+ 181, 389,
+ 710, 693,
+ 529, 728,
+ 547, 4,
+ 472, 391,
+ 152, 490,
+ 130, 340,
+ 982, 99,
+ 60, 50,
+ 96, 614,
+ 629, 587,
+ 77, 728,
+ 882, 472,
+ 929, 298,
+ 488, 514,
+ 281, 507,
+ 495, 593,
+ 218, 559,
+ 454, 306,
+ 922, 113,
+ 130, 286,
+ 541, 708,
+ 323, 73,
+ 947, 642,
+ 26, 88,
+ 829, 103,
+ 569, 358,
+ 306, 42,
+ 936, 678,
+ 722, 490,
+ 392, 730,
+ 711, 369,
+ 326, 86,
+ 972, 205,
+ 187, 161,
+ 760, 708,
+ 501, 496,
+ 347, 96,
+ 681, 293,
+ 26, 375,
+ 528, 167,
+ 1, 334,
+ 505, 60,
+ 822, 180,
+ 9, 168,
+ 84, 619,
+ 714, 183,
+ 63, 320,
+ 706, 538,
+ 193, 555,
+ 956, 386,
+ 430, 17,
+ 507, 514,
+ 138, 504,
+ 39, 323,
+ 854, 316,
+ 88, 42,
+ 103, 363,
+ 674, 68,
+ 832, 582,
+ 61, 241,
+ 377, 376,
+ 449, 350,
+ 104, 280,
+ 21, 336,
+ 893, 581,
+ 249, 548,
+ 315, 372,
+ 50, 436,
+ 282, 220,
+ 126, 669,
+ 451, 488,
+ 809, 212,
+ 273, 289,
+ 421, 699,
+ 867, 333,
+ 29, 80,
+ 196, 178,
+ 824, 672,
+ 27, 429,
+ 805, 315,
+ 525, 214,
+ 658, 67,
+ 822, 605,
+ 191, 478,
+ 832, 352,
+ 580, 81,
+ 462, 664,
+ 464, 349,
+ 196, 29,
+ 615, 423,
+ 108, 556,
+ 183, 261,
+ 480, 482,
+ 621, 570,
+ 286, 369,
+ 681, 382,
+ 768, 224,
+ 546, 183,
+ 443, 607,
+ 103, 172,
+ 791, 424,
+ 827, 731,
+ 965, 712,
+ 551, 69,
+ 740, 423,
+ 745, 341,
+ 155, 746,
+ 889, 602,
+ 411, 159,
+ 294, 467,
+ 248, 599,
+ 18, 360,
+ 734, 512,
+ 421, 519,
+ 367, 174,
+ 785, 545,
+ 706, 23,
+ 239, 278,
+ 581, 65,
+ 232, 609,
+ 752, 603,
+ 294, 585,
+ 224, 217,
+ 848, 558,
+ 332, 425,
+ 699, 68,
+ 53, 647,
+ 629, 652,
+ 87, 649,
+ 41, 718,
+ 227, 563,
+ 400, 302,
+ 253, 380,
+ 184, 42,
+ 366, 539,
+ 474, 691,
+ 170, 538,
+ 869, 96,
+ 974, 565,
+ 916, 28,
+ 285, 617,
+ 274, 38,
+ 147, 12,
+ 782, 261,
+ 749, 41,
+ 78, 592,
+ 370, 83,
+ 405, 488,
+ 436, 151,
+ 443, 556,
+ 96, 383,
+ 843, 745,
+ 630, 214,
+ 126, 10,
+ 338, 363,
+ 546, 27,
+ 61, 17,
+ 507, 199,
+ 445, 730,
+ 797, 213,
+ 555, 148,
+ 790, 65,
+ 837, 180,
+ 434, 320,
+ 102, 681,
+ 149, 680,
+ 10, 130,
+ 839, 232,
+ 848, 683,
+ 899, 650,
+ 837, 190,
+ 843, 463,
+ 984, 457,
+ 651, 490,
+ 552, 139,
+ 980, 71,
+ 748, 393,
+ 290, 171,
+ 503, 698,
+ 574, 742,
+ 429, 312,
+ 627, 680,
+ 69, 412,
+ 154, 538,
+ 135, 3,
+ 537, 12,
+ 535, 34,
+ 153, 632,
+ 797, 227,
+ 398, 336,
+ 20, 463,
+ 804, 175,
+ 400, 369,
+ 501, 250,
+ 105, 480,
+ 151, 146,
+ 57, 686,
+ 830, 119,
+ 867, 380,
+ 128, 84,
+ 222, 667,
+ 450, 522,
+ 390, 466,
+ 716, 375,
+ 760, 624,
+ 559, 407,
+ 587, 18,
+ 989, 53,
+ 817, 102,
+ 153, 269,
+ 253, 164,
+ 563, 360,
+ 93, 385,
+ 197, 360,
+ 277, 7,
+ 887, 280,
+ 416, 658,
+ 760, 411,
+ 902, 690,
+ 465, 424,
+ 28, 105,
+ 399, 620,
+ 455, 520,
+ 637, 491,
+ 769, 0,
+ 300, 521,
+ 90, 392,
+ 894, 722,
+ 705, 573,
+ 344, 188,
+ 667, 111,
+ 470, 16,
+ 759, 154,
+ 840, 581,
+ 176, 663,
+ 93, 151,
+ 372, 130,
+ 345, 425,
+ 156, 581,
+ 33, 8,
+ 320, 395,
+ 629, 661,
+ 641, 17,
+ 695, 663,
+ 751, 197,
+ 507, 93,
+ 608, 519,
+ 77, 303,
+ 513, 605,
+ 98, 354,
+ 567, 401,
+ 184, 440,
+ 785, 748,
+ 52, 32,
+ 528, 452,
+ 82, 532,
+ 116, 147,
+ 779, 341,
+ 308, 275,
+ 763, 135,
+ 137, 375,
+ 14, 260,
+ 337, 378,
+ 492, 262,
+ 202, 119,
+ 561, 334,
+ 855, 683,
+ 876, 724,
+ 202, 544,
+ 571, 437,
+ 456, 436,
+ 67, 4,
+ 468, 592,
+ 922, 540,
+ 125, 539,
+ 615, 290,
+ 785, 76,
+ 402, 556,
+ 12, 696,
+ 460, 52,
+ 909, 92,
+ 894, 153,
+ 931, 373,
+ 360, 120,
+ 726, 626,
+ 318, 733,
+ 472, 424,
+ 146, 74,
+ 86, 564,
+ 742, 236,
+ 845, 400,
+ 832, 139,
+ 275, 437,
+ 929, 42,
+ 818, 123,
+ 439, 274,
+ 65, 590,
+ 512, 132,
+ 520, 443,
+ 444, 107,
+ 961, 313,
+ 130, 488,
+ 587, 191,
+ 287, 603,
+ 56, 208,
+ 936, 628,
+ 908, 445,
+ 773, 258,
+ 383, 283,
+ 425, 530,
+ 244, 133,
+ 216, 543,
+ 631, 595,
+ 785, 108,
+ 87, 192,
+ 640, 427,
+ 889, 688,
+ 152, 89,
+ 10, 209,
+ 122, 343,
+ 188, 5,
+ 896, 748,
+ 806, 22,
+ 535, 457,
+ 851, 307,
+ 261, 566,
+ 791, 590,
+ 947, 300,
+ 658, 394,
+ 418, 305,
+ 371, 632,
+ 470, 438,
+ 165, 410,
+ 538, 380,
+ 643, 408,
+ 318, 591,
+ 564, 311,
+ 327, 690,
+ 930, 8,
+ 93, 100,
+ 627, 196,
+ 582, 416,
+ 200, 492,
+ 943, 267,
+ 31, 355,
+ 67, 374,
+ 692, 57,
+ 229, 373,
+ 542, 371,
+ 801, 230,
+ 114, 420,
+ 769, 326,
+ 83, 448,
+ 846, 137,
+ 912, 77,
+ 126, 3,
+ 784, 420,
+ 660, 391,
+ 795, 188,
+ 530, 42,
+ 137, 106,
+ 663, 80,
+ 757, 340,
+ 694, 267,
+ 768, 612,
+ 926, 155,
+ 600, 25,
+ 292, 31,
+ 97, 225,
+ 60, 437,
+ 724, 563,
+ 698, 85,
+ 286, 196,
+ 66, 1,
+ 269, 25,
+ 467, 405,
+ 204, 171,
+ 653, 14,
+ 299, 360,
+ 521, 719,
+ 760, 602,
+ 329, 282,
+ 687, 530,
+ 110, 200,
+ 30, 300,
+ 6, 501,
+ 868, 281,
+ 281, 76,
+ 805, 363,
+ 876, 114,
+ 219, 549,
+ 65, 611,
+ 859, 23,
+ 66, 354,
+ 205, 169,
+ 434, 174,
+ 828, 668,
+ 814, 720,
+ 663, 34,
+ 875, 707,
+ 969, 561,
+ 932, 66,
+ 834, 548,
+ 961, 86,
+ 263, 148,
+ 145, 202,
+ 83, 146,
+ 947, 727,
+ 3, 138,
+ 927, 514,
+ 814, 742,
+ 80, 430,
+ 866, 184,
+ 593, 731,
+ 193, 219,
+ 496, 490,
+ 606, 530,
+ 314, 334,
+ 301, 327,
+ 50, 715,
+ 178, 57,
+ 936, 626,
+ 972, 617,
+ 33, 427,
+ 147, 435,
+ 83, 341,
+ 859, 244,
+ 337, 688,
+ 637, 124,
+ 874, 71,
+ 590, 474,
+ 332, 120,
+ 640, 290,
+ 816, 171,
+ 665, 431,
+ 79, 31,
+ 857, 110,
+ 103, 79,
+ 293, 397,
+ 866, 651,
+ 356, 73,
+ 438, 710,
+ 41, 233,
+ 782, 596,
+ 852, 407,
+ 590, 104,
+ 34, 116,
+ 756, 276,
+ 282, 181,
+ 871, 275,
+ 888, 712,
+ 872, 279,
+ 645, 324,
+ 730, 524,
+ 430, 302,
+ 601, 486,
+ 114, 529,
+ 359, 317,
+ 313, 426,
+ 33, 732,
+ 970, 211,
+ 657, 582,
+ 945, 501,
+ 450, 630,
+ 822, 697,
+ 702, 600,
+ 958, 289,
+ 732, 96,
+ 205, 662,
+ 695, 533,
+ 369, 433,
+ 83, 445,
+ 176, 315,
+ 239, 95,
+ 895, 682,
+ 628, 118,
+ 730, 741,
+ 779, 734,
+ 804, 314,
+ 465, 567,
+ 810, 106,
+ 81, 268,
+ 968, 518,
+ 22, 159,
+ 726, 504,
+ 38, 269,
+ 751, 649,
+ 954, 659,
+};
+std::array<int32_t, 2000> expCoords = {
+ 190, 464,
+ 437, 588,
+ 774, 203,
+ 276, 599,
+ 939, 646,
+ 356, 373,
+ 681, 533,
+ 369, 419,
+ 500, 7,
+ 765, 655,
+ 363, 460,
+ 75, 272,
+ 920, 484,
+ 397, 590,
+ 315, 424,
+ 861, 326,
+ 795, 107,
+ 701, 526,
+ 220, 688,
+ 442, 678,
+ 59, 134,
+ 50, 501,
+ 593, 398,
+ 44, 417,
+ 327, 375,
+ 111, 452,
+ 562, 3,
+ 614, 680,
+ 780, 734,
+ 941, 586,
+ 284, 467,
+ 476, 142,
+ 379, 36,
+ 451, 731,
+ 122, 85,
+ 735, 439,
+ 469, 76,
+ 231, 670,
+ 415, 400,
+ 8, 221,
+ 435, 458,
+ 906, 466,
+ 701, 111,
+ 741, 619,
+ 632, 327,
+ 151, 423,
+ 527, 709,
+ 479, 375,
+ 535, 612,
+ 59, 498,
+ 762, 529,
+ 791, 417,
+ 948, 113,
+ 31, 441,
+ 119, 705,
+ 323, 141,
+ 457, 687,
+ 869, 189,
+ 346, 132,
+ 890, 525,
+ 721, 670,
+ 775, 448,
+ 288, 108,
+ 334, 492,
+ 74, 38,
+ 322, 441,
+ 845, 215,
+ 171, 96,
+ 975, 137,
+ 316, 425,
+ 17, 665,
+ 961, 414,
+ 83, 245,
+ 723, 105,
+ 551, 201,
+ 958, 495,
+ 253, 77,
+ 770, 583,
+ 754, 679,
+ 657, 425,
+ 201, 521,
+ 934, 657,
+ 973, 605,
+ 544, 336,
+ 875, 717,
+ 592, 272,
+ 323, 650,
+ 215, 549,
+ 365, 600,
+ 777, 524,
+ 621, 369,
+ 832, 531,
+ 440, 19,
+ 362, 547,
+ 474, 255,
+ 637, 92,
+ 171, 549,
+ 126, 273,
+ 601, 74,
+ 792, 701,
+ 959, 649,
+ 624, 692,
+ 721, 535,
+ 42, 325,
+ 280, 247,
+ 86, 565,
+ 508, 494,
+ 629, 276,
+ 445, 309,
+ 479, 732,
+ 370, 5,
+ 905, 645,
+ 350, 471,
+ 355, 155,
+ 221, 311,
+ 73, 484,
+ 748, 692,
+ 374, 218,
+ 371, 618,
+ 891, 319,
+ 6, 29,
+ 141, 445,
+ 105, 218,
+ 60, 461,
+ 28, 633,
+ 306, 567,
+ 560, 25,
+ 564, 493,
+ 184, 595,
+ 611, 415,
+ 628, 513,
+ 240, 587,
+ 847, 541,
+ 690, 353,
+ 848, 495,
+ 331, 386,
+ 391, 19,
+ 575, 635,
+ 936, 507,
+ 76, 303,
+ 42, 126,
+ 563, 126,
+ 565, 384,
+ 858, 536,
+ 892, 209,
+ 255, 522,
+ 884, 724,
+ 639, 245,
+ 604, 614,
+ 624, 86,
+ 287, 734,
+ 430, 429,
+ 525, 145,
+ 164, 372,
+ 974, 295,
+ 710, 130,
+ 70, 114,
+ 159, 208,
+ 535, 578,
+ 871, 475,
+ 844, 446,
+ 691, 401,
+ 876, 43,
+ 777, 108,
+ 59, 41,
+ 52, 382,
+ 404, 744,
+ 107, 241,
+ 980, 300,
+ 874, 424,
+ 935, 654,
+ 771, 24,
+ 948, 387,
+ 209, 652,
+ 222, 99,
+ 718, 722,
+ 534, 331,
+ 25, 449,
+ 943, 711,
+ 916, 140,
+ 621, 307,
+ 360, 23,
+ 859, 501,
+ 92, 658,
+ 777, 236,
+ 812, 379,
+ 22, 321,
+ 444, 175,
+ 822, 554,
+ 585, 368,
+ 486, 197,
+ 604, 564,
+ 268, 389,
+ 100, 531,
+ 875, 423,
+ 975, 137,
+ 14, 338,
+ 915, 626,
+ 472, 673,
+ 223, 224,
+ 711, 541,
+ 19, 210,
+ 578, 694,
+ 336, 85,
+ 515, 523,
+ 298, 643,
+ 892, 570,
+ 263, 698,
+ 923, 552,
+ 898, 178,
+ 493, 606,
+ 940, 474,
+ 898, 515,
+ 47, 618,
+ 369, 698,
+ 481, 89,
+ 945, 18,
+ 673, 103,
+ 155, 484,
+ 964, 516,
+ 757, 317,
+ 136, 158,
+ 757, 41,
+ 488, 131,
+ 629, 412,
+ 566, 611,
+ 924, 146,
+ 709, 521,
+ 259, 682,
+ 531, 417,
+ 320, 250,
+ 333, 729,
+ 202, 498,
+ 571, 35,
+ 540, 152,
+ 95, 645,
+ 36, 715,
+ 325, 304,
+ 134, 91,
+ 412, 462,
+ 137, 722,
+ 181, 567,
+ 481, 575,
+ 300, 7,
+ 462, 526,
+ 68, 497,
+ 344, 442,
+ 227, 196,
+ 917, 611,
+ 145, 482,
+ 872, 551,
+ 513, 97,
+ 44, 618,
+ 116, 389,
+ 861, 32,
+ 690, 356,
+ 168, 43,
+ 33, 177,
+ 111, 299,
+ 49, 685,
+ 920, 600,
+ 187, 438,
+ 887, 713,
+ 67, 234,
+ 694, 479,
+ 630, 619,
+ 710, 250,
+ 168, 570,
+ 356, 62,
+ 724, 655,
+ 42, 127,
+ 418, 252,
+ 45, 81,
+ 436, 49,
+ 807, 643,
+ 130, 140,
+ 437, 723,
+ 767, 98,
+ 912, 612,
+ 390, 101,
+ 53, 66,
+ 875, 128,
+ 361, 489,
+ 603, 294,
+ 371, 43,
+ 427, 538,
+ 763, 144,
+ 850, 24,
+ 891, 178,
+ 126, 234,
+ 838, 618,
+ 343, 175,
+ 534, 200,
+ 36, 466,
+ 517, 28,
+ 749, 732,
+ 796, 256,
+ 333, 260,
+ 441, 433,
+ 615, 129,
+ 855, 528,
+ 172, 636,
+ 22, 476,
+ 160, 189,
+ 339, 0,
+ 438, 596,
+ 673, 407,
+ 561, 8,
+ 605, 735,
+ 223, 438,
+ 826, 589,
+ 327, 264,
+ 945, 169,
+ 209, 111,
+ 494, 192,
+ 552, 51,
+ 377, 343,
+ 773, 537,
+ 229, 14,
+ 255, 304,
+ 10, 349,
+ 840, 683,
+ 234, 736,
+ 7, 247,
+ 782, 73,
+ 441, 282,
+ 824, 530,
+ 352, 14,
+ 691, 314,
+ 850, 429,
+ 923, 650,
+ 455, 8,
+ 68, 236,
+ 591, 456,
+ 510, 104,
+ 866, 549,
+ 373, 619,
+ 90, 45,
+ 504, 696,
+ 147, 424,
+ 59, 122,
+ 75, 671,
+ 445, 75,
+ 664, 375,
+ 408, 544,
+ 170, 284,
+ 265, 227,
+ 175, 358,
+ 625, 42,
+ 194, 205,
+ 704, 244,
+ 424, 527,
+ 157, 326,
+ 626, 689,
+ 356, 612,
+ 638, 535,
+ 731, 720,
+ 244, 27,
+ 245, 29,
+ 701, 623,
+ 922, 352,
+ 511, 194,
+ 101, 594,
+ 144, 600,
+ 870, 373,
+ 903, 308,
+ 199, 480,
+ 398, 459,
+ 436, 443,
+ 938, 412,
+ 891, 105,
+ 4, 454,
+ 540, 584,
+ 810, 706,
+ 608, 215,
+ 820, 281,
+ 608, 464,
+ 439, 145,
+ 443, 690,
+ 667, 60,
+ 570, 87,
+ 974, 731,
+ 692, 279,
+ 401, 690,
+ 243, 496,
+ 478, 22,
+ 114, 664,
+ 878, 676,
+ 305, 219,
+ 658, 445,
+ 967, 457,
+ 156, 619,
+ 807, 70,
+ 873, 16,
+ 851, 666,
+ 842, 486,
+ 327, 407,
+ 238, 649,
+ 174, 273,
+ 755, 228,
+ 614, 161,
+ 863, 482,
+ 471, 171,
+ 824, 74,
+ 203, 172,
+ 595, 470,
+ 964, 135,
+ 792, 116,
+ 361, 712,
+ 128, 200,
+ 799, 715,
+ 92, 434,
+ 405, 671,
+ 84, 131,
+ 50, 221,
+ 189, 3,
+ 126, 80,
+ 726, 178,
+ 891, 246,
+ 865, 178,
+ 377, 405,
+ 514, 308,
+ 386, 252,
+ 401, 340,
+ 381, 492,
+ 872, 427,
+ 426, 189,
+ 855, 130,
+ 578, 544,
+ 727, 301,
+ 119, 568,
+ 532, 540,
+ 596, 315,
+ 914, 731,
+ 546, 546,
+ 33, 639,
+ 981, 244,
+ 842, 456,
+ 440, 394,
+ 774, 187,
+ 462, 633,
+ 676, 273,
+ 135, 298,
+ 918, 500,
+ 418, 425,
+ 50, 734,
+ 617, 582,
+ 919, 256,
+ 802, 648,
+ 664, 440,
+ 884, 335,
+ 244, 607,
+ 695, 306,
+ 14, 704,
+ 601, 2,
+ 497, 447,
+ 298, 344,
+ 494, 438,
+ 858, 618,
+ 774, 410,
+ 612, 549,
+ 565, 580,
+ 967, 583,
+ 612, 115,
+ 475, 209,
+ 154, 578,
+ 59, 56,
+ 663, 391,
+ 444, 227,
+ 18, 216,
+ 265, 23,
+ 799, 249,
+ 879, 601,
+ 943, 704,
+ 590, 231,
+ 903, 41,
+ 223, 268,
+ 412, 704,
+ 577, 137,
+ 594, 696,
+ 139, 727,
+ 155, 611,
+ 675, 302,
+ 217, 577,
+ 840, 396,
+ 406, 9,
+ 150, 599,
+ 417, 251,
+ 296, 537,
+ 980, 410,
+ 878, 128,
+ 873, 523,
+ 422, 645,
+ 234, 420,
+ 27, 281,
+ 358, 64,
+ 45, 252,
+ 356, 740,
+ 929, 101,
+ 517, 356,
+ 401, 694,
+ 514, 606,
+ 870, 567,
+ 462, 51,
+ 468, 146,
+ 206, 129,
+ 354, 680,
+ 431, 129,
+ 745, 37,
+ 83, 25,
+ 464, 451,
+ 180, 386,
+ 705, 687,
+ 528, 723,
+ 544, 1,
+ 471, 390,
+ 152, 487,
+ 129, 337,
+ 964, 103,
+ 61, 47,
+ 99, 609,
+ 627, 585,
+ 84, 718,
+ 873, 469,
+ 917, 298,
+ 488, 513,
+ 280, 506,
+ 494, 592,
+ 218, 557,
+ 453, 304,
+ 908, 115,
+ 129, 282,
+ 540, 704,
+ 320, 69,
+ 930, 633,
+ 28, 85,
+ 821, 104,
+ 568, 357,
+ 304, 38,
+ 919, 667,
+ 719, 489,
+ 392, 725,
+ 709, 368,
+ 323, 81,
+ 956, 206,
+ 185, 157,
+ 753, 701,
+ 500, 495,
+ 345, 92,
+ 679, 292,
+ 27, 371,
+ 526, 164,
+ 3, 330,
+ 503, 56,
+ 815, 180,
+ 11, 164,
+ 88, 613,
+ 711, 182,
+ 63, 316,
+ 703, 536,
+ 193, 552,
+ 942, 384,
+ 427, 13,
+ 506, 513,
+ 138, 501,
+ 40, 319,
+ 847, 315,
+ 88, 39,
+ 102, 360,
+ 670, 66,
+ 824, 578,
+ 61, 237,
+ 376, 374,
+ 448, 348,
+ 103, 276,
+ 22, 332,
+ 881, 576,
+ 249, 546,
+ 313, 370,
+ 51, 432,
+ 280, 216,
+ 130, 663,
+ 450, 487,
+ 803, 211,
+ 271, 286,
+ 421, 696,
+ 859, 332,
+ 31, 77,
+ 194, 174,
+ 814, 665,
+ 29, 425,
+ 800, 314,
+ 524, 211,
+ 654, 65,
+ 814, 600,
+ 190, 476,
+ 826, 351,
+ 577, 78,
+ 462, 662,
+ 463, 347,
+ 194, 25,
+ 614, 422,
+ 110, 552,
+ 181, 257,
+ 479, 481,
+ 619, 569,
+ 284, 367,
+ 679, 381,
+ 764, 223,
+ 545, 180,
+ 443, 606,
+ 102, 168,
+ 786, 423,
+ 816, 721,
+ 944, 699,
+ 548, 66,
+ 737, 422,
+ 742, 340,
+ 160, 737,
+ 877, 596,
+ 409, 155,
+ 293, 466,
+ 248, 596,
+ 20, 356,
+ 731, 510,
+ 420, 518,
+ 365, 170,
+ 779, 542,
+ 700, 23,
+ 237, 275,
+ 578, 62,
+ 233, 606,
+ 747, 599,
+ 294, 583,
+ 222, 213,
+ 839, 554,
+ 331, 424,
+ 694, 67,
+ 58, 640,
+ 627, 649,
+ 91, 642,
+ 49, 708,
+ 227, 561,
+ 398, 299,
+ 251, 378,
+ 182, 38,
+ 365, 538,
+ 473, 688,
+ 170, 535,
+ 858, 98,
+ 956, 558,
+ 901, 33,
+ 285, 615,
+ 272, 34,
+ 146, 9,
+ 778, 260,
+ 743, 41,
+ 81, 587,
+ 368, 79,
+ 404, 487,
+ 434, 147,
+ 443, 555,
+ 96, 380,
+ 830, 734,
+ 628, 212,
+ 126, 7,
+ 336, 361,
+ 543, 24,
+ 62, 15,
+ 506, 196,
+ 445, 725,
+ 792, 212,
+ 553, 145,
+ 783, 66,
+ 829, 180,
+ 433, 318,
+ 107, 674,
+ 152, 674,
+ 12, 127,
+ 832, 232,
+ 837, 675,
+ 885, 642,
+ 830, 190,
+ 836, 461,
+ 967, 453,
+ 650, 489,
+ 550, 136,
+ 961, 76,
+ 745, 392,
+ 288, 167,
+ 502, 694,
+ 572, 736,
+ 428, 310,
+ 624, 676,
+ 70, 408,
+ 155, 535,
+ 135, 0,
+ 534, 9,
+ 532, 31,
+ 155, 627,
+ 792, 226,
+ 397, 334,
+ 23, 458,
+ 798, 175,
+ 399, 367,
+ 500, 247,
+ 106, 477,
+ 149, 142,
+ 63, 677,
+ 822, 120,
+ 859, 379,
+ 127, 80,
+ 224, 663,
+ 449, 521,
+ 389, 465,
+ 714, 374,
+ 754, 620,
+ 558, 406,
+ 584, 16,
+ 969, 59,
+ 809, 103,
+ 151, 265,
+ 251, 160,
+ 562, 359,
+ 93, 382,
+ 195, 357,
+ 275, 3,
+ 878, 280,
+ 416, 656,
+ 756, 410,
+ 887, 680,
+ 464, 423,
+ 30, 102,
+ 399, 618,
+ 454, 519,
+ 636, 490,
+ 761, 2,
+ 299, 520,
+ 90, 388,
+ 878, 710,
+ 702, 571,
+ 342, 184,
+ 664, 109,
+ 467, 12,
+ 754, 153,
+ 831, 577,
+ 178, 658,
+ 92, 147,
+ 370, 126,
+ 344, 424,
+ 157, 577,
+ 36, 7,
+ 318, 393,
+ 626, 658,
+ 637, 16,
+ 691, 659,
+ 747, 196,
+ 505, 89,
+ 607, 518,
+ 77, 299,
+ 512, 604,
+ 97, 350,
+ 566, 400,
+ 183, 438,
+ 775, 738,
+ 54, 30,
+ 527, 451,
+ 84, 528,
+ 115, 143,
+ 775, 340,
+ 306, 272,
+ 758, 134,
+ 136, 372,
+ 15, 256,
+ 335, 376,
+ 491, 259,
+ 200, 115,
+ 560, 332,
+ 843, 675,
+ 862, 713,
+ 202, 541,
+ 570, 436,
+ 455, 435,
+ 68, 2,
+ 468, 591,
+ 909, 535,
+ 126, 535,
+ 614, 288,
+ 778, 76,
+ 402, 555,
+ 20, 686,
+ 458, 48,
+ 896, 95,
+ 883, 154,
+ 919, 372,
+ 358, 116,
+ 721, 622,
+ 319, 728,
+ 471, 423,
+ 145, 70,
+ 88, 559,
+ 739, 235,
+ 838, 399,
+ 824, 139,
+ 274, 435,
+ 913, 46,
+ 810, 123,
+ 438, 271,
+ 69, 584,
+ 510, 128,
+ 519, 442,
+ 442, 103,
+ 947, 312,
+ 130, 485,
+ 585, 188,
+ 287, 601,
+ 56, 204,
+ 920, 620,
+ 897, 443,
+ 769, 257,
+ 381, 280,
+ 424, 529,
+ 242, 129,
+ 216, 541,
+ 629, 593,
+ 779, 108,
+ 86, 188,
+ 639, 426,
+ 875, 678,
+ 151, 85,
+ 12, 205,
+ 121, 340,
+ 187, 2,
+ 879, 735,
+ 797, 24,
+ 534, 456,
+ 844, 306,
+ 261, 564,
+ 784, 586,
+ 934, 300,
+ 657, 393,
+ 417, 303,
+ 371, 630,
+ 469, 437,
+ 164, 407,
+ 537, 379,
+ 642, 407,
+ 318, 589,
+ 563, 309,
+ 328, 686,
+ 913, 14,
+ 93, 96,
+ 625, 194,
+ 581, 415,
+ 199, 490,
+ 930, 267,
+ 32, 351,
+ 67, 370,
+ 687, 56,
+ 227, 370,
+ 541, 370,
+ 796, 229,
+ 114, 417,
+ 765, 325,
+ 84, 444,
+ 837, 138,
+ 898, 80,
+ 126, 0,
+ 780, 419,
+ 659, 390,
+ 789, 187,
+ 527, 39,
+ 136, 102,
+ 659, 78,
+ 754, 339,
+ 692, 266,
+ 762, 608,
+ 913, 157,
+ 597, 23,
+ 290, 27,
+ 96, 221,
+ 61, 433,
+ 720, 561,
+ 694, 84,
+ 284, 192,
+ 68, 0,
+ 267, 21,
+ 466, 404,
+ 202, 167,
+ 649, 13,
+ 297, 358,
+ 520, 715,
+ 755, 598,
+ 327, 279,
+ 685, 529,
+ 109, 196,
+ 31, 296,
+ 10, 496,
+ 860, 280,
+ 279, 72,
+ 800, 362,
+ 865, 116,
+ 219, 547,
+ 69, 605,
+ 847, 26,
+ 66, 350,
+ 203, 165,
+ 432, 170,
+ 818, 661,
+ 804, 711,
+ 659, 33,
+ 861, 697,
+ 952, 555,
+ 917, 70,
+ 826, 544,
+ 944, 90,
+ 260, 144,
+ 143, 198,
+ 83, 142,
+ 927, 713,
+ 5, 135,
+ 914, 510,
+ 803, 732,
+ 81, 426,
+ 857, 184,
+ 590, 726,
+ 191, 215,
+ 495, 489,
+ 605, 529,
+ 312, 332,
+ 299, 324,
+ 57, 705,
+ 176, 53,
+ 920, 618,
+ 953, 608,
+ 35, 423,
+ 146, 432,
+ 83, 337,
+ 851, 244,
+ 338, 684,
+ 634, 122,
+ 863, 73,
+ 589, 473,
+ 330, 116,
+ 639, 288,
+ 809, 171,
+ 664, 430,
+ 80, 28,
+ 847, 111,
+ 103, 75,
+ 291, 395,
+ 854, 644,
+ 353, 69,
+ 438, 706,
+ 41, 229,
+ 776, 592,
+ 845, 405,
+ 588, 101,
+ 35, 113,
+ 752, 275,
+ 280, 177,
+ 863, 275,
+ 873, 701,
+ 864, 279,
+ 644, 323,
+ 727, 522,
+ 429, 300,
+ 600, 485,
+ 115, 525,
+ 357, 315,
+ 312, 424,
+ 42, 721,
+ 955, 212,
+ 655, 580,
+ 931, 497,
+ 450, 628,
+ 812, 689,
+ 698, 597,
+ 944, 289,
+ 727, 95,
+ 207, 657,
+ 692, 531,
+ 368, 432,
+ 84, 441,
+ 174, 312,
+ 237, 91,
+ 881, 672,
+ 625, 116,
+ 723, 733,
+ 770, 725,
+ 799, 313,
+ 465, 566,
+ 803, 106,
+ 80, 264,
+ 952, 513,
+ 23, 155,
+ 723, 502,
+ 39, 265,
+ 745, 644,
+ 936, 649,
+};
+} // namespace openCvData
diff --git a/services/camera/libcameraservice/utils/TagMonitor.cpp b/services/camera/libcameraservice/utils/TagMonitor.cpp
index dec97d7..c0a353f 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.cpp
+++ b/services/camera/libcameraservice/utils/TagMonitor.cpp
@@ -33,6 +33,8 @@
mVendorTagId(CAMERA_METADATA_INVALID_VENDOR_ID)
{}
+const String16 TagMonitor::kMonitorOption = String16("-m");
+
const char* TagMonitor::k3aTags =
"android.control.aeMode, android.control.afMode, android.control.awbMode,"
"android.control.aeState, android.control.afState, android.control.awbState,"
diff --git a/services/camera/libcameraservice/utils/TagMonitor.h b/services/camera/libcameraservice/utils/TagMonitor.h
index 7155314..2dece62 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.h
+++ b/services/camera/libcameraservice/utils/TagMonitor.h
@@ -38,6 +38,10 @@
* buffer log that can be dumped at will. */
class TagMonitor {
public:
+
+ // Monitor argument
+ static const String16 kMonitorOption;
+
enum eventSource {
REQUEST,
RESULT
diff --git a/services/mediaanalytics/Android.mk b/services/mediaanalytics/Android.mk
index 9e2813e..2eeb7fa 100644
--- a/services/mediaanalytics/Android.mk
+++ b/services/mediaanalytics/Android.mk
@@ -6,11 +6,6 @@
LOCAL_SRC_FILES:= \
main_mediametrics.cpp \
- MetricsSummarizerCodec.cpp \
- MetricsSummarizerExtractor.cpp \
- MetricsSummarizerPlayer.cpp \
- MetricsSummarizerRecorder.cpp \
- MetricsSummarizer.cpp \
MediaAnalyticsService.cpp
LOCAL_SHARED_LIBRARIES := \
diff --git a/services/mediaanalytics/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
index c7f9270..4b05395 100644
--- a/services/mediaanalytics/MediaAnalyticsService.cpp
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -74,26 +74,11 @@
#include "MediaAnalyticsService.h"
-#include "MetricsSummarizer.h"
-#include "MetricsSummarizerCodec.h"
-#include "MetricsSummarizerExtractor.h"
-#include "MetricsSummarizerPlayer.h"
-#include "MetricsSummarizerRecorder.h"
-
-
namespace android {
using namespace android::base;
using namespace android::content::pm;
-
-
-// summarized records
-// up to 36 sets, each covering an hour -- so at least 1.5 days
-// (will be longer if there are hours without any media action)
-static const nsecs_t kNewSetIntervalNs = 3600*(1000*1000*1000ll);
-static const int kMaxRecordSets = 36;
-
// individual records kept in memory: age or count
// age: <= 36 hours (1.5 days)
// count: hard limit of # records
@@ -108,66 +93,13 @@
String16(kServiceName), new MediaAnalyticsService());
}
-// handle sets of summarizers
-MediaAnalyticsService::SummarizerSet::SummarizerSet() {
- mSummarizers = new List<MetricsSummarizer *>();
-}
-
-MediaAnalyticsService::SummarizerSet::~SummarizerSet() {
- // empty the list
- List<MetricsSummarizer *> *l = mSummarizers;
- while (l->size() > 0) {
- MetricsSummarizer *summarizer = *(l->begin());
- l->erase(l->begin());
- delete summarizer;
- }
-}
-
-void MediaAnalyticsService::newSummarizerSet() {
- ALOGD("MediaAnalyticsService::newSummarizerSet");
- MediaAnalyticsService::SummarizerSet *set = new MediaAnalyticsService::SummarizerSet();
- nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
- set->setStarted(now);
-
- set->appendSummarizer(new MetricsSummarizerExtractor("extractor"));
- set->appendSummarizer(new MetricsSummarizerCodec("codec"));
- set->appendSummarizer(new MetricsSummarizerPlayer("nuplayer"));
- set->appendSummarizer(new MetricsSummarizerRecorder("recorder"));
-
- // ALWAYS at the end, since it catches everything
- set->appendSummarizer(new MetricsSummarizer(NULL));
-
- // inject this set at the BACK of the list.
- mSummarizerSets->push_back(set);
- mCurrentSet = set;
-
- // limit the # that we have
- if (mMaxRecordSets > 0) {
- List<SummarizerSet *> *l = mSummarizerSets;
- while (l->size() > (size_t) mMaxRecordSets) {
- ALOGD("Deleting oldest record set....");
- MediaAnalyticsService::SummarizerSet *oset = *(l->begin());
- l->erase(l->begin());
- delete oset;
- mSetsDiscarded++;
- }
- }
-}
-
MediaAnalyticsService::MediaAnalyticsService()
: mMaxRecords(kMaxRecords),
mMaxRecordAgeNs(kMaxRecordAgeNs),
- mMaxRecordSets(kMaxRecordSets),
- mNewSetInterval(kNewSetIntervalNs),
- mDumpProto(MediaAnalyticsItem::PROTO_V0) {
+ mDumpProto(MediaAnalyticsItem::PROTO_V1),
+ mDumpProtoDefault(MediaAnalyticsItem::PROTO_V1) {
ALOGD("MediaAnalyticsService created");
- // clear our queues
- mOpen = new List<MediaAnalyticsItem *>();
- mFinalized = new List<MediaAnalyticsItem *>();
-
- mSummarizerSets = new List<MediaAnalyticsService::SummarizerSet *>();
- newSummarizerSet();
mItemsSubmitted = 0;
mItemsFinalized = 0;
@@ -183,28 +115,13 @@
MediaAnalyticsService::~MediaAnalyticsService() {
ALOGD("MediaAnalyticsService destroyed");
- // clean out mOpen and mFinalized
- while (mOpen->size() > 0) {
- MediaAnalyticsItem * oitem = *(mOpen->begin());
- mOpen->erase(mOpen->begin());
+ while (mItems.size() > 0) {
+ MediaAnalyticsItem * oitem = *(mItems.begin());
+ mItems.erase(mItems.begin());
delete oitem;
mItemsDiscarded++;
mItemsDiscardedCount++;
}
- delete mOpen;
- mOpen = NULL;
-
- while (mFinalized->size() > 0) {
- MediaAnalyticsItem * oitem = *(mFinalized->begin());
- mFinalized->erase(mFinalized->begin());
- delete oitem;
- mItemsDiscarded++;
- mItemsDiscardedCount++;
- }
- delete mFinalized;
- mFinalized = NULL;
-
- // XXX: clean out the summaries
}
@@ -216,9 +133,14 @@
}
// caller surrenders ownership of 'item'
-MediaAnalyticsItem::SessionID_t MediaAnalyticsService::submit(MediaAnalyticsItem *item, bool forcenew) {
+MediaAnalyticsItem::SessionID_t MediaAnalyticsService::submit(MediaAnalyticsItem *item, bool forcenew)
+{
+ UNUSED(forcenew);
- MediaAnalyticsItem::SessionID_t id = MediaAnalyticsItem::SessionIDInvalid;
+ // fill in a sessionID if we do not yet have one
+ if (item->getSessionID() <= MediaAnalyticsItem::SessionIDNone) {
+ item->setSessionID(generateUniqueSessionID());
+ }
// we control these, generally not trusting user input
nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
@@ -231,9 +153,7 @@
int uid_given = item->getUid();
int pid_given = item->getPid();
- // although we do make exceptions for particular client uids
- // that we know we trust.
- //
+ // although we do make exceptions for some trusted client uids
bool isTrusted = false;
ALOGV("caller has uid=%d, embedded uid=%d", uid, uid_given);
@@ -259,7 +179,6 @@
break;
}
-
// Overwrite package name and version if the caller was untrusted.
if (!isTrusted) {
setPkgInfo(item, item->getUid(), true, true);
@@ -271,7 +190,7 @@
}
ALOGV("given uid %d; sanitized uid: %d sanitized pkg: %s "
- "sanitized pkg version: %d",
+ "sanitized pkg version: %" PRId64,
uid_given, item->getUid(),
item->getPkgName().c_str(),
item->getPkgVersionCode());
@@ -284,77 +203,23 @@
return MediaAnalyticsItem::SessionIDInvalid;
}
-
- // if we have a sesisonid in the new record, look to make
+ // XXX: if we have a sessionid in the new record, look to make
// sure it doesn't appear in the finalized list.
// XXX: this is for security / DOS prevention.
// may also require that we persist the unique sessionIDs
// across boots [instead of within a single boot]
-
- // match this new record up against records in the open
- // list...
- // if there's a match, merge them together
- // deal with moving the old / merged record into the finalized que
-
- bool finalizing = item->getFinalized();
-
- // if finalizing, we'll remove it
- MediaAnalyticsItem *oitem = findItem(mOpen, item, finalizing | forcenew);
- if (oitem != NULL) {
- if (forcenew) {
- // old one gets finalized, then we insert the new one
- // so we'll have 2 records at the end of this.
- // but don't finalize an empty record
- if (oitem->count() == 0) {
- // we're responsible for disposing of the dead record
- delete oitem;
- oitem = NULL;
- } else {
- oitem->setFinalized(true);
- summarize(oitem);
- saveItem(mFinalized, oitem, 0);
- }
- // new record could itself be marked finalized...
- if (finalizing) {
- summarize(item);
- saveItem(mFinalized, item, 0);
- mItemsFinalized++;
- } else {
- saveItem(mOpen, item, 1);
- }
- id = item->getSessionID();
- } else {
- // combine the records, send it to finalized if appropriate
- oitem->merge(item);
- if (finalizing) {
- summarize(oitem);
- saveItem(mFinalized, oitem, 0);
- mItemsFinalized++;
- }
- id = oitem->getSessionID();
-
- // we're responsible for disposing of the dead record
- delete item;
- item = NULL;
- }
- } else {
- // nothing to merge, save the new record
- id = item->getSessionID();
- if (finalizing) {
- if (item->count() == 0) {
- // drop empty records
- delete item;
- item = NULL;
- } else {
- summarize(item);
- saveItem(mFinalized, item, 0);
- mItemsFinalized++;
- }
- } else {
- saveItem(mOpen, item, 1);
- }
+ if (item->count() == 0) {
+ // drop empty records
+ delete item;
+ item = NULL;
+ return MediaAnalyticsItem::SessionIDInvalid;
}
+
+ // save the new record
+ MediaAnalyticsItem::SessionID_t id = item->getSessionID();
+ saveItem(item);
+ mItemsFinalized++;
return id;
}
@@ -376,29 +241,26 @@
}
// crack any parameters
- String16 summaryOption("-summary");
- bool summary = false;
String16 protoOption("-proto");
+ int chosenProto = mDumpProtoDefault;
String16 clearOption("-clear");
bool clear = false;
String16 sinceOption("-since");
nsecs_t ts_since = 0;
String16 helpOption("-help");
String16 onlyOption("-only");
- AString only;
+ std::string only;
int n = args.size();
for (int i = 0; i < n; i++) {
String8 myarg(args[i]);
if (args[i] == clearOption) {
clear = true;
- } else if (args[i] == summaryOption) {
- summary = true;
} else if (args[i] == protoOption) {
i++;
if (i < n) {
String8 value(args[i]);
- int proto = MediaAnalyticsItem::PROTO_V0; // default to original
+ int proto = MediaAnalyticsItem::PROTO_V0;
char *endp;
const char *p = value.string();
proto = strtol(p, &endp, 10);
@@ -408,8 +270,12 @@
} else if (proto > MediaAnalyticsItem::PROTO_LAST) {
proto = MediaAnalyticsItem::PROTO_LAST;
}
- mDumpProto = proto;
+ chosenProto = proto;
+ } else {
+ result.append("unable to parse value for -proto\n\n");
}
+ } else {
+ result.append("missing value for -proto\n\n");
}
} else if (args[i] == sinceOption) {
i++;
@@ -435,8 +301,7 @@
} else if (args[i] == helpOption) {
result.append("Recognized parameters:\n");
result.append("-help this help message\n");
- result.append("-proto X dump using protocol X (defaults to 1)");
- result.append("-summary show summary info\n");
+ result.append("-proto # dump using protocol #");
result.append("-clear clears out saved records\n");
result.append("-only X process records for component X\n");
result.append("-since X include records since X\n");
@@ -447,6 +312,9 @@
}
Mutex::Autolock _l(mLock);
+ // mutex between insertion and dumping the contents
+
+ mDumpProto = chosenProto;
// we ALWAYS dump this piece
snprintf(buffer, SIZE, "Dump of the %s process:\n", kServiceName);
@@ -454,19 +322,14 @@
dumpHeaders(result, ts_since);
- // want exactly 1, to avoid confusing folks that parse the output
- if (summary) {
- dumpSummaries(result, ts_since, only.c_str());
- } else {
- dumpRecent(result, ts_since, only.c_str());
- }
+ dumpRecent(result, ts_since, only.c_str());
if (clear) {
// remove everything from the finalized queue
- while (mFinalized->size() > 0) {
- MediaAnalyticsItem * oitem = *(mFinalized->begin());
- mFinalized->erase(mFinalized->begin());
+ while (mItems.size() > 0) {
+ MediaAnalyticsItem * oitem = *(mItems.begin());
+ mItems.erase(mItems.begin());
delete oitem;
mItemsDiscarded++;
}
@@ -480,7 +343,8 @@
}
// dump headers
-void MediaAnalyticsService::dumpHeaders(String8 &result, nsecs_t ts_since) {
+void MediaAnalyticsService::dumpHeaders(String8 &result, nsecs_t ts_since)
+{
const size_t SIZE = 512;
char buffer[SIZE];
@@ -497,7 +361,7 @@
snprintf(buffer, SIZE,
"Since Boot: Submissions: %8" PRId64
- " Finalizations: %8" PRId64 "\n",
+ " Accepted: %8" PRId64 "\n",
mItemsSubmitted, mItemsFinalized);
result.append(buffer);
snprintf(buffer, SIZE,
@@ -505,53 +369,17 @@
" (by Count: %" PRId64 " by Expiration: %" PRId64 ")\n",
mItemsDiscarded, mItemsDiscardedCount, mItemsDiscardedExpire);
result.append(buffer);
- snprintf(buffer, SIZE,
- "Summary Sets Discarded: %" PRId64 "\n", mSetsDiscarded);
- result.append(buffer);
if (ts_since != 0) {
snprintf(buffer, SIZE,
- "Dumping Queue entries more recent than: %" PRId64 "\n",
+ "Emitting Queue entries more recent than: %" PRId64 "\n",
(int64_t) ts_since);
result.append(buffer);
}
}
-// dump summary info
-void MediaAnalyticsService::dumpSummaries(String8 &result, nsecs_t ts_since, const char *only) {
- const size_t SIZE = 512;
- char buffer[SIZE];
- int slot = 0;
-
- snprintf(buffer, SIZE, "\nSummarized Metrics:\n");
- result.append(buffer);
-
- if (only != NULL && *only == '\0') {
- only = NULL;
- }
-
- // have each of the distillers dump records
- if (mSummarizerSets != NULL) {
- List<SummarizerSet *>::iterator itSet = mSummarizerSets->begin();
- for (; itSet != mSummarizerSets->end(); itSet++) {
- nsecs_t when = (*itSet)->getStarted();
- if (when < ts_since) {
- continue;
- }
- List<MetricsSummarizer *> *list = (*itSet)->getSummarizers();
- List<MetricsSummarizer *>::iterator it = list->begin();
- for (; it != list->end(); it++) {
- if (only != NULL && strcmp(only, (*it)->getKey()) != 0) {
- ALOGV("Told to omit '%s'", (*it)->getKey());
- }
- AString distilled = (*it)->dumpSummary(slot, only);
- result.append(distilled.c_str());
- }
- }
- }
-}
-
// the recent, detailed queues
-void MediaAnalyticsService::dumpRecent(String8 &result, nsecs_t ts_since, const char * only) {
+void MediaAnalyticsService::dumpRecent(String8 &result, nsecs_t ts_since, const char * only)
+{
const size_t SIZE = 512;
char buffer[SIZE];
@@ -562,30 +390,27 @@
// show the recently recorded records
snprintf(buffer, sizeof(buffer), "\nFinalized Metrics (oldest first):\n");
result.append(buffer);
- result.append(this->dumpQueue(mFinalized, ts_since, only));
-
- snprintf(buffer, sizeof(buffer), "\nIn-Progress Metrics (newest first):\n");
- result.append(buffer);
- result.append(this->dumpQueue(mOpen, ts_since, only));
+ result.append(this->dumpQueue(ts_since, only));
// show who is connected and injecting records?
// talk about # records fed to the 'readers'
// talk about # records we discarded, perhaps "discarded w/o reading" too
}
+
// caller has locked mLock...
-String8 MediaAnalyticsService::dumpQueue(List<MediaAnalyticsItem *> *theList) {
- return dumpQueue(theList, (nsecs_t) 0, NULL);
+String8 MediaAnalyticsService::dumpQueue() {
+ return dumpQueue((nsecs_t) 0, NULL);
}
-String8 MediaAnalyticsService::dumpQueue(List<MediaAnalyticsItem *> *theList, nsecs_t ts_since, const char * only) {
+String8 MediaAnalyticsService::dumpQueue(nsecs_t ts_since, const char * only) {
String8 result;
int slot = 0;
- if (theList->empty()) {
+ if (mItems.empty()) {
result.append("empty\n");
} else {
- List<MediaAnalyticsItem *>::iterator it = theList->begin();
- for (; it != theList->end(); it++) {
+ List<MediaAnalyticsItem *>::iterator it = mItems.begin();
+ for (; it != mItems.end(); it++) {
nsecs_t when = (*it)->getTimestamp();
if (when < ts_since) {
continue;
@@ -595,7 +420,7 @@
ALOGV("Omit '%s', it's not '%s'", (*it)->getKey().c_str(), only);
continue;
}
- AString entry = (*it)->toString(mDumpProto);
+ std::string entry = (*it)->toString(mDumpProto);
result.appendFormat("%5d: %s\n", slot, entry.c_str());
slot++;
}
@@ -606,25 +431,25 @@
//
// Our Cheap in-core, non-persistent records management.
-// XXX: rewrite this to manage persistence, etc.
// insert appropriately into queue
-void MediaAnalyticsService::saveItem(List<MediaAnalyticsItem *> *l, MediaAnalyticsItem * item, int front) {
+void MediaAnalyticsService::saveItem(MediaAnalyticsItem * item)
+{
Mutex::Autolock _l(mLock);
+ // mutex between insertion and dumping the contents
- // adding at back of queue (fifo order)
- if (front) {
- l->push_front(item);
- } else {
- l->push_back(item);
- }
+ // we want to dump 'in FIFO order', so insert at the end
+ mItems.push_back(item);
// keep removing old records the front until we're in-bounds (count)
if (mMaxRecords > 0) {
- while (l->size() > (size_t) mMaxRecords) {
- MediaAnalyticsItem * oitem = *(l->begin());
- l->erase(l->begin());
+ while (mItems.size() > (size_t) mMaxRecords) {
+ MediaAnalyticsItem * oitem = *(mItems.begin());
+ if (oitem == item) {
+ break;
+ }
+ mItems.erase(mItems.begin());
delete oitem;
mItemsDiscarded++;
mItemsDiscardedCount++;
@@ -632,17 +457,21 @@
}
// keep removing old records the front until we're in-bounds (count)
+ // NB: expired entries aren't removed until the next insertion, which could be a while
if (mMaxRecordAgeNs > 0) {
nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
- while (l->size() > 0) {
- MediaAnalyticsItem * oitem = *(l->begin());
+ while (mItems.size() > 0) {
+ MediaAnalyticsItem * oitem = *(mItems.begin());
nsecs_t when = oitem->getTimestamp();
+ if (oitem == item) {
+ break;
+ }
// careful about timejumps too
if ((now > when) && (now-when) <= mMaxRecordAgeNs) {
// this (and the rest) are recent enough to keep
break;
}
- l->erase(l->begin());
+ mItems.erase(mItems.begin());
delete oitem;
mItemsDiscarded++;
mItemsDiscardedExpire++;
@@ -650,85 +479,14 @@
}
}
-// are they alike enough that nitem can be folded into oitem?
-static bool compatibleItems(MediaAnalyticsItem * oitem, MediaAnalyticsItem * nitem) {
-
- // general safety
- if (nitem->getUid() != oitem->getUid()) {
- return false;
- }
- if (nitem->getPid() != oitem->getPid()) {
- return false;
- }
-
- // key -- needs to match
- if (nitem->getKey() == oitem->getKey()) {
- // still in the game.
- } else {
- return false;
- }
-
- // session id -- empty field in new is allowed
- MediaAnalyticsItem::SessionID_t osession = oitem->getSessionID();
- MediaAnalyticsItem::SessionID_t nsession = nitem->getSessionID();
- if (nsession != osession) {
- // incoming '0' matches value in osession
- if (nsession != 0) {
- return false;
- }
- }
-
- return true;
-}
-
-// find the incomplete record that this will overlay
-MediaAnalyticsItem *MediaAnalyticsService::findItem(List<MediaAnalyticsItem*> *theList, MediaAnalyticsItem *nitem, bool removeit) {
- if (nitem == NULL) {
- return NULL;
- }
-
- MediaAnalyticsItem *item = NULL;
-
- Mutex::Autolock _l(mLock);
-
- for (List<MediaAnalyticsItem *>::iterator it = theList->begin();
- it != theList->end(); it++) {
- MediaAnalyticsItem *tmp = (*it);
-
- if (!compatibleItems(tmp, nitem)) {
- continue;
- }
-
- // we match! this is the one I want.
- if (removeit) {
- theList->erase(it);
- }
- item = tmp;
- break;
- }
- return item;
-}
-
-
-// delete the indicated record
-void MediaAnalyticsService::deleteItem(List<MediaAnalyticsItem *> *l, MediaAnalyticsItem *item) {
-
- Mutex::Autolock _l(mLock);
-
- for (List<MediaAnalyticsItem *>::iterator it = l->begin();
- it != l->end(); it++) {
- if ((*it)->getSessionID() != item->getSessionID())
- continue;
- delete *it;
- l->erase(it);
- break;
- }
-}
-
-static AString allowedKeys[] =
+static std::string allowedKeys[] =
{
+ "audiopolicy",
+ "audiorecord",
+ "audiotrack",
"codec",
- "extractor"
+ "extractor",
+ "nuplayer",
};
static const int nAllowedKeys = sizeof(allowedKeys) / sizeof(allowedKeys[0]);
@@ -739,7 +497,7 @@
// untrusted uids can only send us a limited set of keys
if (isTrusted == false) {
// restrict to a specific set of keys
- AString key = item->getKey();
+ std::string key = item->getKey();
size_t i;
for(i = 0; i < nAllowedKeys; i++) {
@@ -764,50 +522,13 @@
return false;
}
-// insert into the appropriate summarizer.
-// we make our own copy to save/summarize
-void MediaAnalyticsService::summarize(MediaAnalyticsItem *item) {
-
- ALOGV("MediaAnalyticsService::summarize()");
-
- if (item == NULL) {
- return;
- }
-
- nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
- if (mCurrentSet == NULL
- || (mCurrentSet->getStarted() + mNewSetInterval < now)) {
- newSummarizerSet();
- }
-
- if (mCurrentSet == NULL) {
- return;
- }
-
- List<MetricsSummarizer *> *summarizers = mCurrentSet->getSummarizers();
- List<MetricsSummarizer *>::iterator it = summarizers->begin();
- for (; it != summarizers->end(); it++) {
- if ((*it)->isMine(*item)) {
- break;
- }
- }
- if (it == summarizers->end()) {
- ALOGD("no handler for type %s", item->getKey().c_str());
- return; // no handler
- }
-
- // invoke the summarizer. summarizer will make whatever copies
- // it wants; the caller retains ownership of item.
-
- (*it)->handleRecord(item);
-
-}
-
// how long we hold package info before we re-fetch it
#define PKG_EXPIRATION_NS (30*60*1000000000ll) // 30 minutes, in nsecs
// give me the package name, perhaps going to find it
-void MediaAnalyticsService::setPkgInfo(MediaAnalyticsItem *item, uid_t uid, bool setName, bool setVersion) {
+// manages its own mutex operations internally
+void MediaAnalyticsService::setPkgInfo(MediaAnalyticsItem *item, uid_t uid, bool setName, bool setVersion)
+{
ALOGV("asking for packagename to go with uid=%d", uid);
if (!setName && !setVersion) {
@@ -817,32 +538,36 @@
nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
struct UidToPkgMap mapping;
- mapping.uid = (-1);
+ mapping.uid = (uid_t)(-1);
- ssize_t i = mPkgMappings.indexOfKey(uid);
- if (i >= 0) {
- mapping = mPkgMappings.valueAt(i);
- ALOGV("Expiration? uid %d expiration %" PRId64 " now %" PRId64,
- uid, mapping.expiration, now);
- if (mapping.expiration < now) {
- // purge our current entry and re-query
- ALOGV("entry for uid %d expired, now= %" PRId64 "", uid, now);
- mPkgMappings.removeItemsAt(i, 1);
- // could cheat and use a goto back to the top of the routine.
- // a good compiler should recognize the local tail recursion...
- return setPkgInfo(item, uid, setName, setVersion);
+ {
+ Mutex::Autolock _l(mLock_mappings);
+ int i = mPkgMappings.indexOfKey(uid);
+ if (i >= 0) {
+ mapping = mPkgMappings.valueAt(i);
+ ALOGV("Expiration? uid %d expiration %" PRId64 " now %" PRId64,
+ uid, mapping.expiration, now);
+ if (mapping.expiration <= now) {
+ // purge the stale entry and fall into re-fetching
+ ALOGV("entry for uid %d expired, now= %" PRId64 "", uid, now);
+ mPkgMappings.removeItemsAt(i);
+ mapping.uid = (uid_t)(-1);
+ }
}
- } else {
- AString pkg;
+ }
+
+ // if we did not find it
+ if (mapping.uid == (uid_t)(-1)) {
+ std::string pkg;
std::string installer = "";
- int32_t versionCode = 0;
+ int64_t versionCode = 0;
struct passwd *pw = getpwuid(uid);
if (pw) {
pkg = pw->pw_name;
}
- // find the proper value -- should we cache this binder??
+ // find the proper value
sp<IBinder> binder = NULL;
sp<IServiceManager> sm = defaultServiceManager();
@@ -875,7 +600,7 @@
}
// strip any leading "shared:" strings that came back
- if (pkg.startsWith("shared:")) {
+ if (pkg.compare(0, 7, "shared:") == 0) {
pkg.erase(0, 7);
}
@@ -905,7 +630,7 @@
}
- ALOGV("package '%s' installed by '%s' versioncode %d / %08x",
+ ALOGV("package '%s' installed by '%s' versioncode %" PRId64 " / %" PRIx64,
pkg.c_str(), installer.c_str(), versionCode, versionCode);
if (strncmp(installer.c_str(), "com.android.", 12) == 0) {
diff --git a/services/mediaanalytics/MediaAnalyticsService.h b/services/mediaanalytics/MediaAnalyticsService.h
index 52e4631..b3c902a 100644
--- a/services/mediaanalytics/MediaAnalyticsService.h
+++ b/services/mediaanalytics/MediaAnalyticsService.h
@@ -28,9 +28,6 @@
#include <media/IMediaAnalyticsService.h>
-#include "MetricsSummarizer.h"
-
-
namespace android {
class MediaAnalyticsService : public BnMediaAnalyticsService
@@ -56,7 +53,6 @@
int64_t mItemsDiscarded;
int64_t mItemsDiscardedExpire;
int64_t mItemsDiscardedCount;
- int64_t mSetsDiscarded;
MediaAnalyticsItem::SessionID_t mLastSessionID;
// partitioned a bit so we don't over serialize
@@ -79,54 +75,15 @@
bool contentValid(MediaAnalyticsItem *item, bool isTrusted);
bool rateLimited(MediaAnalyticsItem *);
- // the ones that are still open
- // (newest at front) since we keep looking for them
- List<MediaAnalyticsItem *> *mOpen;
- // the ones we've finalized
// (oldest at front) so it prints nicely for dumpsys
- List<MediaAnalyticsItem *> *mFinalized;
- // searching within these queues: queue, key
- MediaAnalyticsItem *findItem(List<MediaAnalyticsItem *> *,
- MediaAnalyticsItem *, bool removeit);
-
- // summarizers
- void summarize(MediaAnalyticsItem *item);
- class SummarizerSet {
- nsecs_t mStarted;
- List<MetricsSummarizer *> *mSummarizers;
-
- public:
- void appendSummarizer(MetricsSummarizer *s) {
- if (s) {
- mSummarizers->push_back(s);
- }
- };
- nsecs_t getStarted() { return mStarted;}
- void setStarted(nsecs_t started) {mStarted = started;}
- List<MetricsSummarizer *> *getSummarizers() { return mSummarizers;}
-
- SummarizerSet();
- ~SummarizerSet();
- };
- void newSummarizerSet();
- List<SummarizerSet *> *mSummarizerSets;
- SummarizerSet *mCurrentSet;
- List<MetricsSummarizer *> *getFirstSet() {
- List<SummarizerSet *>::iterator first = mSummarizerSets->begin();
- if (first != mSummarizerSets->end()) {
- return (*first)->getSummarizers();
- }
- return NULL;
- }
-
- void saveItem(MediaAnalyticsItem);
- void saveItem(List<MediaAnalyticsItem *> *, MediaAnalyticsItem *, int);
- void deleteItem(List<MediaAnalyticsItem *> *, MediaAnalyticsItem *);
+ List<MediaAnalyticsItem *> mItems;
+ void saveItem(MediaAnalyticsItem *);
// support for generating output
int mDumpProto;
- String8 dumpQueue(List<MediaAnalyticsItem*> *);
- String8 dumpQueue(List<MediaAnalyticsItem*> *, nsecs_t, const char *only);
+ int mDumpProtoDefault;
+ String8 dumpQueue();
+ String8 dumpQueue(nsecs_t, const char *only);
void dumpHeaders(String8 &result, nsecs_t ts_since);
void dumpSummaries(String8 &result, nsecs_t ts_since, const char * only);
@@ -135,9 +92,9 @@
// mapping uids to package names
struct UidToPkgMap {
uid_t uid;
- AString pkg;
- AString installer;
- int32_t versionCode;
+ std::string pkg;
+ std::string installer;
+ int64_t versionCode;
nsecs_t expiration;
};
diff --git a/services/mediaanalytics/MetricsSummarizer.cpp b/services/mediaanalytics/MetricsSummarizer.cpp
deleted file mode 100644
index 93fe0ec..0000000
--- a/services/mediaanalytics/MetricsSummarizer.cpp
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MetricsSummarizer"
-#include <utils/Log.h>
-
-#include <stdlib.h>
-#include <stdint.h>
-#include <inttypes.h>
-
-#include <utils/threads.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-#include <utils/List.h>
-
-#include <media/IMediaAnalyticsService.h>
-
-#include "MetricsSummarizer.h"
-
-
-namespace android {
-
-#define DEBUG_SORT 0
-#define DEBUG_QUEUE 0
-
-
-MetricsSummarizer::MetricsSummarizer(const char *key)
- : mIgnorables(NULL)
-{
- ALOGV("MetricsSummarizer::MetricsSummarizer");
-
- if (key == NULL) {
- mKey = key;
- } else {
- mKey = strdup(key);
- }
-
- mSummaries = new List<MediaAnalyticsItem *>();
-}
-
-MetricsSummarizer::~MetricsSummarizer()
-{
- ALOGV("MetricsSummarizer::~MetricsSummarizer");
- if (mKey) {
- free((void *)mKey);
- mKey = NULL;
- }
-
- // clear the list of items we have saved
- while (mSummaries->size() > 0) {
- MediaAnalyticsItem * oitem = *(mSummaries->begin());
- if (DEBUG_QUEUE) {
- ALOGD("zap old record: key %s sessionID %" PRId64 " ts %" PRId64 "",
- oitem->getKey().c_str(), oitem->getSessionID(),
- oitem->getTimestamp());
- }
- mSummaries->erase(mSummaries->begin());
- delete oitem;
- }
-}
-
-// so we know what summarizer we were using
-const char *MetricsSummarizer::getKey() {
- const char *value = mKey;
- if (value == NULL) {
- value = "unknown";
- }
- return value;
-}
-
-// should the record be given to this summarizer
-bool MetricsSummarizer::isMine(MediaAnalyticsItem &item)
-{
- if (mKey == NULL)
- return true;
- AString itemKey = item.getKey();
- if (strcmp(mKey, itemKey.c_str()) != 0) {
- return false;
- }
- return true;
-}
-
-AString MetricsSummarizer::dumpSummary(int &slot)
-{
- return dumpSummary(slot, NULL);
-}
-
-AString MetricsSummarizer::dumpSummary(int &slot, const char *only)
-{
- AString value = "";
-
- List<MediaAnalyticsItem *>::iterator it = mSummaries->begin();
- if (it != mSummaries->end()) {
- char buf[16]; // enough for "#####: "
- for (; it != mSummaries->end(); it++) {
- if (only != NULL && strcmp(only, (*it)->getKey().c_str()) != 0) {
- continue;
- }
- AString entry = (*it)->toString();
- snprintf(buf, sizeof(buf), "%5d: ", slot);
- value.append(buf);
- value.append(entry.c_str());
- value.append("\n");
- slot++;
- }
- }
- return value;
-}
-
-void MetricsSummarizer::setIgnorables(const char **ignorables) {
- mIgnorables = ignorables;
-}
-
-const char **MetricsSummarizer::getIgnorables() {
- return mIgnorables;
-}
-
-void MetricsSummarizer::handleRecord(MediaAnalyticsItem *item) {
-
- ALOGV("MetricsSummarizer::handleRecord() for %s",
- item == NULL ? "<nothing>" : item->toString().c_str());
-
- if (item == NULL) {
- return;
- }
-
- List<MediaAnalyticsItem *>::iterator it = mSummaries->begin();
- for (; it != mSummaries->end(); it++) {
- bool good = sameAttributes((*it), item, getIgnorables());
- ALOGV("Match against %s says %d", (*it)->toString().c_str(), good);
- if (good)
- break;
- }
- if (it == mSummaries->end()) {
- ALOGV("save new record");
- MediaAnalyticsItem *nitem = item->dup();
- if (nitem == NULL) {
- ALOGE("unable to save MediaMetrics record");
- }
- sortProps(nitem);
- nitem->setInt32("aggregated",1);
- mergeRecord(*nitem, *item);
- mSummaries->push_back(nitem);
- } else {
- ALOGV("increment existing record");
- (*it)->addInt32("aggregated",1);
- mergeRecord(*(*it), *item);
- }
-}
-
-void MetricsSummarizer::mergeRecord(MediaAnalyticsItem &/*have*/, MediaAnalyticsItem &/*item*/) {
- // default is no further massaging.
- ALOGV("MetricsSummarizer::mergeRecord() [default]");
- return;
-}
-
-// keep some stats for things: sums, counts, standard deviation
-// the integer version -- all of these pieces are in 64 bits
-void MetricsSummarizer::minMaxVar64(MediaAnalyticsItem &summation, const char *key, int64_t value) {
- if (key == NULL)
- return;
- int len = strlen(key) + 32;
- char *tmpKey = (char *)malloc(len);
-
- if (tmpKey == NULL) {
- return;
- }
-
- // N - count of samples
- snprintf(tmpKey, len, "%s.n", key);
- summation.addInt64(tmpKey, 1);
-
- // zero - count of samples that are zero
- if (value == 0) {
- snprintf(tmpKey, len, "%s.zero", key);
- int64_t zero = 0;
- (void) summation.getInt64(tmpKey,&zero);
- zero++;
- summation.setInt64(tmpKey, zero);
- }
-
- // min
- snprintf(tmpKey, len, "%s.min", key);
- int64_t min = value;
- if (summation.getInt64(tmpKey,&min)) {
- if (min > value) {
- summation.setInt64(tmpKey, value);
- }
- } else {
- summation.setInt64(tmpKey, value);
- }
-
- // max
- snprintf(tmpKey, len, "%s.max", key);
- int64_t max = value;
- if (summation.getInt64(tmpKey,&max)) {
- if (max < value) {
- summation.setInt64(tmpKey, value);
- }
- } else {
- summation.setInt64(tmpKey, value);
- }
-
- // components for mean, stddev;
- // stddev = sqrt(1/4*(sumx2 - (2*sumx*sumx/n) + ((sumx/n)^2)))
- // sum x
- snprintf(tmpKey, len, "%s.sumX", key);
- summation.addInt64(tmpKey, value);
- // sum x^2
- snprintf(tmpKey, len, "%s.sumX2", key);
- summation.addInt64(tmpKey, value*value);
-
-
- // last thing we do -- remove the base key from the summation
- // record so we won't get confused about it having both individual
- // and summary information in there.
- summation.removeProp(key);
-
- free(tmpKey);
-}
-
-
-//
-// Comparators
-//
-
-// testing that all of 'single' is in 'summ'
-// and that the values match.
-// 'summ' may have extra fields.
-// 'ignorable' is a set of things that we don't worry about matching up
-// (usually time- or count-based values we'll sum elsewhere)
-bool MetricsSummarizer::sameAttributes(MediaAnalyticsItem *summ, MediaAnalyticsItem *single, const char **ignorable) {
-
- if (single == NULL || summ == NULL) {
- return false;
- }
- ALOGV("MetricsSummarizer::sameAttributes(): summ %s", summ->toString().c_str());
- ALOGV("MetricsSummarizer::sameAttributes(): single %s", single->toString().c_str());
-
- // keep different sources/users separate
- if (single->mUid != summ->mUid) {
- return false;
- }
-
- // this can be made better.
- for(size_t i=0;i<single->mPropCount;i++) {
- MediaAnalyticsItem::Prop *prop1 = &(single->mProps[i]);
- const char *attrName = prop1->mName;
-
- // is it something we should ignore
- if (ignorable != NULL) {
- const char **ig = ignorable;
- for (;*ig; ig++) {
- if (strcmp(*ig, attrName) == 0) {
- break;
- }
- }
- if (*ig) {
- ALOGV("we don't mind that it has attr '%s'", attrName);
- continue;
- }
- }
-
- MediaAnalyticsItem::Prop *prop2 = summ->findProp(attrName);
- if (prop2 == NULL) {
- ALOGV("summ doesn't have this attr");
- return false;
- }
- if (prop1->mType != prop2->mType) {
- ALOGV("mismatched attr types");
- return false;
- }
- switch (prop1->mType) {
- case MediaAnalyticsItem::kTypeInt32:
- if (prop1->u.int32Value != prop2->u.int32Value) {
- ALOGV("mismatch values");
- return false;
- }
- break;
- case MediaAnalyticsItem::kTypeInt64:
- if (prop1->u.int64Value != prop2->u.int64Value) {
- ALOGV("mismatch values");
- return false;
- }
- break;
- case MediaAnalyticsItem::kTypeDouble:
- // XXX: watch out for floating point comparisons!
- if (prop1->u.doubleValue != prop2->u.doubleValue) {
- ALOGV("mismatch values");
- return false;
- }
- break;
- case MediaAnalyticsItem::kTypeCString:
- if (strcmp(prop1->u.CStringValue, prop2->u.CStringValue) != 0) {
- ALOGV("mismatch values");
- return false;
- }
- break;
- case MediaAnalyticsItem::kTypeRate:
- if (prop1->u.rate.count != prop2->u.rate.count) {
- ALOGV("mismatch values");
- return false;
- }
- if (prop1->u.rate.duration != prop2->u.rate.duration) {
- ALOGV("mismatch values");
- return false;
- }
- break;
- default:
- ALOGV("mismatch values in default type");
- return false;
- }
- }
-
- return true;
-}
-
-
-int MetricsSummarizer::PropSorter(const void *a, const void *b) {
- MediaAnalyticsItem::Prop *ai = (MediaAnalyticsItem::Prop *)a;
- MediaAnalyticsItem::Prop *bi = (MediaAnalyticsItem::Prop *)b;
- return strcmp(ai->mName, bi->mName);
-}
-
-// we sort in the summaries so that it looks pretty in the dumpsys
-void MetricsSummarizer::sortProps(MediaAnalyticsItem *item) {
- if (item->mPropCount != 0) {
- qsort(item->mProps, item->mPropCount,
- sizeof(MediaAnalyticsItem::Prop), MetricsSummarizer::PropSorter);
- }
-}
-
-} // namespace android
diff --git a/services/mediaanalytics/MetricsSummarizer.h b/services/mediaanalytics/MetricsSummarizer.h
deleted file mode 100644
index a9f0786..0000000
--- a/services/mediaanalytics/MetricsSummarizer.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#ifndef ANDROID_METRICSSUMMARIZER_H
-#define ANDROID_METRICSSUMMARIZER_H
-
-#include <utils/threads.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-#include <utils/List.h>
-
-#include <media/IMediaAnalyticsService.h>
-
-
-namespace android {
-
-class MetricsSummarizer
-{
-
- public:
-
- MetricsSummarizer(const char *key);
- virtual ~MetricsSummarizer();
-
- // show the key
- const char * getKey();
-
- // should the record be given to this summarizer
- bool isMine(MediaAnalyticsItem &item);
-
- // hand the record to this summarizer
- void handleRecord(MediaAnalyticsItem *item);
-
- virtual void mergeRecord(MediaAnalyticsItem &have, MediaAnalyticsItem &incoming);
-
- // dump the summarized records (for dumpsys)
- AString dumpSummary(int &slot);
- AString dumpSummary(int &slot, const char *only);
-
- void setIgnorables(const char **);
- const char **getIgnorables();
-
- protected:
-
- // various comparators
- // "do these records have same attributes and values in those attrs"
- bool sameAttributes(MediaAnalyticsItem *summ, MediaAnalyticsItem *single, const char **ignoreables);
-
- void minMaxVar64(MediaAnalyticsItem &summ, const char *key, int64_t value);
-
- static int PropSorter(const void *a, const void *b);
- void sortProps(MediaAnalyticsItem *item);
-
- private:
- const char *mKey;
- const char **mIgnorables;
- List<MediaAnalyticsItem *> *mSummaries;
-
-
-};
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
-#endif // ANDROID_METRICSSUMMARIZER_H
diff --git a/services/mediaanalytics/MetricsSummarizerCodec.cpp b/services/mediaanalytics/MetricsSummarizerCodec.cpp
deleted file mode 100644
index 6af3c9a..0000000
--- a/services/mediaanalytics/MetricsSummarizerCodec.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MetricsSummarizerCodec"
-#include <utils/Log.h>
-
-#include <stdint.h>
-#include <inttypes.h>
-
-#include <utils/threads.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-#include <utils/List.h>
-
-#include <media/IMediaAnalyticsService.h>
-
-#include "MetricsSummarizer.h"
-#include "MetricsSummarizerCodec.h"
-
-
-
-
-namespace android {
-
-MetricsSummarizerCodec::MetricsSummarizerCodec(const char *key)
- : MetricsSummarizer(key)
-{
- ALOGV("MetricsSummarizerCodec::MetricsSummarizerCodec");
-}
-
-} // namespace android
diff --git a/services/mediaanalytics/MetricsSummarizerCodec.h b/services/mediaanalytics/MetricsSummarizerCodec.h
deleted file mode 100644
index c01196f..0000000
--- a/services/mediaanalytics/MetricsSummarizerCodec.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#ifndef ANDROID_METRICSSUMMARIZERCODEC_H
-#define ANDROID_METRICSSUMMARIZERCODEC_H
-
-#include <utils/threads.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-#include <utils/List.h>
-
-#include <media/IMediaAnalyticsService.h>
-#include "MetricsSummarizer.h"
-
-
-namespace android {
-
-class MetricsSummarizerCodec : public MetricsSummarizer
-{
-
- public:
-
- MetricsSummarizerCodec(const char *key);
- virtual ~MetricsSummarizerCodec() {};
-
-};
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
-#endif // ANDROID_METRICSSUMMARIZERCODEC_H
diff --git a/services/mediaanalytics/MetricsSummarizerExtractor.cpp b/services/mediaanalytics/MetricsSummarizerExtractor.cpp
deleted file mode 100644
index 190f87d..0000000
--- a/services/mediaanalytics/MetricsSummarizerExtractor.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MetricsSummarizerExtractor"
-#include <utils/Log.h>
-
-#include <utils/threads.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-#include <utils/List.h>
-
-#include <media/IMediaAnalyticsService.h>
-
-#include "MetricsSummarizer.h"
-#include "MetricsSummarizerExtractor.h"
-
-
-
-
-namespace android {
-
-MetricsSummarizerExtractor::MetricsSummarizerExtractor(const char *key)
- : MetricsSummarizer(key)
-{
- ALOGV("MetricsSummarizerExtractor::MetricsSummarizerExtractor");
-}
-
-} // namespace android
diff --git a/services/mediaanalytics/MetricsSummarizerExtractor.h b/services/mediaanalytics/MetricsSummarizerExtractor.h
deleted file mode 100644
index eee052b..0000000
--- a/services/mediaanalytics/MetricsSummarizerExtractor.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#ifndef ANDROID_METRICSSUMMARIZEREXTRACTOR_H
-#define ANDROID_METRICSSUMMARIZEREXTRACTOR_H
-
-#include <utils/threads.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-#include <utils/List.h>
-
-#include <media/IMediaAnalyticsService.h>
-#include "MetricsSummarizer.h"
-
-
-namespace android {
-
-class MetricsSummarizerExtractor : public MetricsSummarizer
-{
-
- public:
-
- MetricsSummarizerExtractor(const char *key);
- virtual ~MetricsSummarizerExtractor() {};
-
-};
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
-#endif // ANDROID_METRICSSUMMARIZEREXTRACTOR_H
diff --git a/services/mediaanalytics/MetricsSummarizerPlayer.cpp b/services/mediaanalytics/MetricsSummarizerPlayer.cpp
deleted file mode 100644
index f882cb9..0000000
--- a/services/mediaanalytics/MetricsSummarizerPlayer.cpp
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MetricsSummarizerPlayer"
-#include <utils/Log.h>
-
-#include <stdint.h>
-#include <inttypes.h>
-
-#include <utils/threads.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-#include <utils/List.h>
-
-#include <media/IMediaAnalyticsService.h>
-
-#include "MetricsSummarizer.h"
-#include "MetricsSummarizerPlayer.h"
-
-
-
-
-namespace android {
-
-static const char *player_ignorable[] = {
- "android.media.mediaplayer.durationMs",
- "android.media.mediaplayer.playingMs",
- "android.media.mediaplayer.frames",
- "android.media.mediaplayer.dropped",
- 0
-};
-
-MetricsSummarizerPlayer::MetricsSummarizerPlayer(const char *key)
- : MetricsSummarizer(key)
-{
- ALOGV("MetricsSummarizerPlayer::MetricsSummarizerPlayer");
- setIgnorables(player_ignorable);
-}
-
-// NB: this is also called for the first time -- so summation == item
-// Not sure if we need a flag for that or not.
-// In this particular mergeRecord() code -- we're' ok for this.
-void MetricsSummarizerPlayer::mergeRecord(MediaAnalyticsItem &summation, MediaAnalyticsItem &item) {
-
- ALOGV("MetricsSummarizerPlayer::mergeRecord()");
-
-
- int64_t duration = 0;
- if (item.getInt64("android.media.mediaplayer.durationMs", &duration)) {
- ALOGV("found durationMs of %" PRId64, duration);
- minMaxVar64(summation, "android.media.mediaplayer.durationMs", duration);
- }
-
- int64_t playing = 0;
- if (item.getInt64("android.media.mediaplayer.playingMs", &playing)) {
- ALOGV("found playingMs of %" PRId64, playing);
- }
- if (playing >= 0) {
- minMaxVar64(summation,"android.media.mediaplayer.playingMs",playing);
- }
-
- int64_t frames = 0;
- if (item.getInt64("android.media.mediaplayer.frames", &frames)) {
- ALOGV("found framess of %" PRId64, frames);
- }
- if (frames >= 0) {
- minMaxVar64(summation,"android.media.mediaplayer.frames",frames);
- }
-
- int64_t dropped = 0;
- if (item.getInt64("android.media.mediaplayer.dropped", &dropped)) {
- ALOGV("found dropped of %" PRId64, dropped);
- }
- if (dropped >= 0) {
- minMaxVar64(summation,"android.media.mediaplayer.dropped",dropped);
- }
-}
-
-} // namespace android
diff --git a/services/mediaanalytics/MetricsSummarizerPlayer.h b/services/mediaanalytics/MetricsSummarizerPlayer.h
deleted file mode 100644
index ad1bf74..0000000
--- a/services/mediaanalytics/MetricsSummarizerPlayer.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#ifndef ANDROID_METRICSSUMMARIZERPLAYER_H
-#define ANDROID_METRICSSUMMARIZERPLAYER_H
-
-#include <utils/threads.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-#include <utils/List.h>
-
-#include <media/IMediaAnalyticsService.h>
-#include "MetricsSummarizer.h"
-
-
-namespace android {
-
-class MetricsSummarizerPlayer : public MetricsSummarizer
-{
-
- public:
-
- MetricsSummarizerPlayer(const char *key);
- virtual ~MetricsSummarizerPlayer() {};
-
- virtual void mergeRecord(MediaAnalyticsItem &have, MediaAnalyticsItem &incoming);
-
-};
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
-#endif // ANDROID_METRICSSUMMARIZERPLAYER_H
diff --git a/services/mediaanalytics/MetricsSummarizerRecorder.cpp b/services/mediaanalytics/MetricsSummarizerRecorder.cpp
deleted file mode 100644
index c2919c3..0000000
--- a/services/mediaanalytics/MetricsSummarizerRecorder.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MetricsSummarizerRecorder"
-#include <utils/Log.h>
-
-#include <stdint.h>
-#include <inttypes.h>
-
-#include <utils/threads.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-#include <utils/List.h>
-
-#include <media/IMediaAnalyticsService.h>
-
-#include "MetricsSummarizer.h"
-#include "MetricsSummarizerRecorder.h"
-
-
-
-
-namespace android {
-
-MetricsSummarizerRecorder::MetricsSummarizerRecorder(const char *key)
- : MetricsSummarizer(key)
-{
- ALOGV("MetricsSummarizerRecorder::MetricsSummarizerRecorder");
-}
-
-} // namespace android
diff --git a/services/mediaanalytics/MetricsSummarizerRecorder.h b/services/mediaanalytics/MetricsSummarizerRecorder.h
deleted file mode 100644
index 963baab..0000000
--- a/services/mediaanalytics/MetricsSummarizerRecorder.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#ifndef ANDROID_METRICSSUMMARIZERRECORDER_H
-#define ANDROID_METRICSSUMMARIZERRECORDER_H
-
-#include <utils/threads.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-#include <utils/List.h>
-
-#include <media/IMediaAnalyticsService.h>
-#include "MetricsSummarizer.h"
-
-
-namespace android {
-
-class MetricsSummarizerRecorder : public MetricsSummarizer
-{
-
- public:
-
- MetricsSummarizerRecorder(const char *key);
- virtual ~MetricsSummarizerRecorder() {};
-
-};
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
-#endif // ANDROID_METRICSSUMMARIZERRECORDER_H
diff --git a/services/mediaanalytics/OWNERS b/services/mediaanalytics/OWNERS
new file mode 100644
index 0000000..9af258b
--- /dev/null
+++ b/services/mediaanalytics/OWNERS
@@ -0,0 +1 @@
+essick@google.com
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index 1ead944..db5f0ff 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -1,32 +1,38 @@
LOCAL_PATH := $(call my-dir)
-# service library
-include $(CLEAR_VARS)
-LOCAL_SRC_FILES := MediaCodecService.cpp
-LOCAL_SHARED_LIBRARIES := \
- libmedia_omx \
- libbinder \
- libgui \
- libutils \
- liblog \
- libstagefright_omx \
- libstagefright_xmlparser
-LOCAL_MODULE:= libmediacodecservice
-LOCAL_VENDOR_MODULE := true
-LOCAL_32_BIT_ONLY := true
-include $(BUILD_SHARED_LIBRARY)
+_software_codecs := \
+ libstagefright_soft_aacdec \
+ libstagefright_soft_aacenc \
+ libstagefright_soft_amrdec \
+ libstagefright_soft_amrnbenc \
+ libstagefright_soft_amrwbenc \
+ libstagefright_soft_avcdec \
+ libstagefright_soft_avcenc \
+ libstagefright_soft_flacdec \
+ libstagefright_soft_flacenc \
+ libstagefright_soft_g711dec \
+ libstagefright_soft_gsmdec \
+ libstagefright_soft_hevcdec \
+ libstagefright_soft_mp3dec \
+ libstagefright_soft_mpeg2dec \
+ libstagefright_soft_mpeg4dec \
+ libstagefright_soft_mpeg4enc \
+ libstagefright_soft_opusdec \
+ libstagefright_soft_rawdec \
+ libstagefright_soft_vorbisdec \
+ libstagefright_soft_vpxdec \
+ libstagefright_soft_vpxenc \
# service executable
include $(CLEAR_VARS)
# seccomp is not required for coverage build.
ifneq ($(NATIVE_COVERAGE),true)
-LOCAL_REQUIRED_MODULES_arm := mediacodec.policy
-LOCAL_REQUIRED_MODULES_x86 := mediacodec.policy
+LOCAL_REQUIRED_MODULES_arm := crash_dump.policy mediacodec.policy
+LOCAL_REQUIRED_MODULES_x86 := crash_dump.policy mediacodec.policy
endif
LOCAL_SRC_FILES := main_codecservice.cpp
LOCAL_SHARED_LIBRARIES := \
libmedia_omx \
- libmediacodecservice \
libbinder \
libutils \
liblog \
@@ -44,7 +50,17 @@
LOCAL_MODULE_RELATIVE_PATH := hw
LOCAL_VENDOR_MODULE := true
LOCAL_32_BIT_ONLY := true
+# Since this is 32-bit-only module, only 32-bit version of the codecs are installed.
+# TODO(b/72343507): eliminate the need for manually adding .vendor suffix. This should be done
+# by the build system.
+LOCAL_REQUIRED_MODULES += \
+$(foreach codec,$(_software_codecs),\
+ $(eval _vendor_suffix := $(if $(BOARD_VNDK_VERSION),.vendor))\
+ $(codec)$(_vendor_suffix)\
+)
+_software_codecs :=
LOCAL_INIT_RC := android.hardware.media.omx@1.0-service.rc
+
include $(BUILD_EXECUTABLE)
# service seccomp policy
@@ -56,7 +72,11 @@
# mediacodec runs in 32-bit combatibility mode. For 64 bit architectures,
# use the 32 bit policy
ifdef TARGET_2ND_ARCH
+ ifneq ($(TARGET_TRANSLATE_2ND_ARCH),true)
LOCAL_SRC_FILES := seccomp_policy/mediacodec-$(TARGET_2ND_ARCH).policy
+ else
+ LOCAL_SRC_FILES := seccomp_policy/mediacodec-$(TARGET_ARCH).policy
+ endif
else
LOCAL_SRC_FILES := seccomp_policy/mediacodec-$(TARGET_ARCH).policy
endif
diff --git a/services/mediacodec/MediaCodecService.cpp b/services/mediacodec/MediaCodecService.cpp
deleted file mode 100644
index 6b510c6..0000000
--- a/services/mediacodec/MediaCodecService.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MediaCodecService"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include "MediaCodecService.h"
-
-namespace android {
-
-sp<IOMX> MediaCodecService::getOMX() {
-
- Mutex::Autolock autoLock(mOMXLock);
-
- if (mOMX.get() == NULL) {
- mOMX = new OMX();
- }
-
- return mOMX;
-}
-
-sp<IOMXStore> MediaCodecService::getOMXStore() {
-
- Mutex::Autolock autoLock(mOMXStoreLock);
-
- if (mOMXStore.get() == NULL) {
- mOMXStore = new OMXStore();
- }
-
- return mOMXStore;
-}
-
-status_t MediaCodecService::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
- uint32_t flags)
-{
- return BnMediaCodecService::onTransact(code, data, reply, flags);
-}
-
-} // namespace android
diff --git a/services/mediacodec/MediaCodecService.h b/services/mediacodec/MediaCodecService.h
deleted file mode 100644
index 9301135..0000000
--- a/services/mediacodec/MediaCodecService.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_MEDIA_CODEC_SERVICE_H
-#define ANDROID_MEDIA_CODEC_SERVICE_H
-
-#include <binder/BinderService.h>
-#include <media/IMediaCodecService.h>
-#include <media/stagefright/omx/OMX.h>
-#include <media/stagefright/omx/OMXStore.h>
-
-namespace android {
-
-class MediaCodecService : public BinderService<MediaCodecService>,
- public BnMediaCodecService
-{
- friend class BinderService<MediaCodecService>; // for MediaCodecService()
-public:
- MediaCodecService() : BnMediaCodecService() { }
- virtual ~MediaCodecService() { }
- virtual void onFirstRef() { }
-
- static const char* getServiceName() { return "media.codec"; }
-
- virtual sp<IOMX> getOMX();
-
- virtual sp<IOMXStore> getOMXStore();
-
- virtual status_t onTransact(uint32_t code, const Parcel& data,
- Parcel* reply, uint32_t flags);
-
-private:
- Mutex mOMXLock;
- sp<IOMX> mOMX;
- Mutex mOMXStoreLock;
- sp<IOMXStore> mOMXStore;
-};
-
-} // namespace android
-
-#endif // ANDROID_MEDIA_CODEC_SERVICE_H
diff --git a/services/mediacodec/android.hardware.media.omx@1.0-service.rc b/services/mediacodec/android.hardware.media.omx@1.0-service.rc
index ec51d65..3ef9a85 100644
--- a/services/mediacodec/android.hardware.media.omx@1.0-service.rc
+++ b/services/mediacodec/android.hardware.media.omx@1.0-service.rc
@@ -1,4 +1,4 @@
-service mediacodec /vendor/bin/hw/android.hardware.media.omx@1.0-service
+service vendor.media.omx /vendor/bin/hw/android.hardware.media.omx@1.0-service
class main
user mediacodec
group camera drmrpc mediadrm
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index 79d6da5..51619f6 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -15,26 +15,19 @@
** limitations under the License.
*/
-#include <fcntl.h>
-#include <sys/prctl.h>
-#include <sys/wait.h>
-#include <binder/IPCThreadState.h>
-#include <binder/ProcessState.h>
-#include <binder/IServiceManager.h>
-#include <cutils/properties.h>
-
-#include <string>
-
#include <android-base/logging.h>
// from LOCAL_C_INCLUDES
-#include "MediaCodecService.h"
#include "minijail.h"
+#include <binder/ProcessState.h>
#include <hidl/HidlTransportSupport.h>
#include <media/stagefright/omx/1.0/Omx.h>
#include <media/stagefright/omx/1.0/OmxStore.h>
+#include <media/CodecServiceRegistrant.h>
+#include <dlfcn.h>
+
using namespace android;
// Must match location in Android.mk.
@@ -45,21 +38,32 @@
int main(int argc __unused, char** argv)
{
+ strcpy(argv[0], "media.codec");
LOG(INFO) << "mediacodecservice starting";
- bool treble = property_get_bool("persist.media.treble_omx", true);
- if (treble) {
- android::ProcessState::initWithDriver("/dev/vndbinder");
- }
-
signal(SIGPIPE, SIG_IGN);
SetUpMinijail(kSystemSeccompPolicyPath, kVendorSeccompPolicyPath);
- strcpy(argv[0], "media.codec");
+ android::ProcessState::initWithDriver("/dev/vndbinder");
+ android::ProcessState::self()->startThreadPool();
::android::hardware::configureRpcThreadpool(64, false);
- sp<ProcessState> proc(ProcessState::self());
- if (treble) {
+ // Registration of customized codec services
+ void *registrantLib = dlopen(
+ "libmedia_codecserviceregistrant.so",
+ RTLD_NOW | RTLD_LOCAL);
+ if (registrantLib) {
+ RegisterCodecServicesFunc registerCodecServices =
+ reinterpret_cast<RegisterCodecServicesFunc>(
+ dlsym(registrantLib, "RegisterCodecServices"));
+ if (registerCodecServices) {
+ registerCodecServices();
+ } else {
+ LOG(WARNING) << "Cannot register additional services "
+ "-- corrupted library.";
+ }
+ } else {
+ // Default codec services
using namespace ::android::hardware::media::omx::V1_0;
sp<IOmxStore> omxStore = new implementation::OmxStore();
if (omxStore == nullptr) {
@@ -73,13 +77,9 @@
} else if (omx->registerAsService() != OK) {
LOG(ERROR) << "Cannot register IOmx HAL service.";
} else {
- LOG(INFO) << "Treble OMX service created.";
+ LOG(INFO) << "IOmx HAL service created.";
}
- } else {
- MediaCodecService::instantiate();
- LOG(INFO) << "Non-Treble OMX service created.";
}
- ProcessState::self()->startThreadPool();
- IPCThreadState::self()->joinThreadPool();
+ ::android::hardware::joinRpcThreadpool();
}
diff --git a/services/mediacodec/seccomp_policy/mediacodec-arm.policy b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
index a751b4c..6ec8895 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-arm.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
@@ -55,18 +55,4 @@
getdents64: 1
getrandom: 1
-# for attaching to debuggerd on process crash
-sigaction: 1
-tgkill: 1
-socket: 1
-connect: 1
-fcntl64: 1
-rt_tgsigqueueinfo: 1
-geteuid32: 1
-getgid32: 1
-getegid32: 1
-getgroups32: 1
-recvmsg: 1
-getpid: 1
-gettid: 1
-process_vm_readv: 1
+@include /system/etc/seccomp_policy/crash_dump.arm.policy
diff --git a/services/mediacodec/seccomp_policy/mediacodec-x86.policy b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
index dc2c04f..bbbe552 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-x86.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
@@ -16,12 +16,14 @@
mprotect: 1
prctl: 1
openat: 1
+open: 1
getuid32: 1
writev: 1
ioctl: 1
close: 1
mmap2: 1
fstat64: 1
+stat64: 1
madvise: 1
fstatat64: 1
futex: 1
@@ -47,23 +49,10 @@
write: 1
nanosleep: 1
-# for attaching to debuggerd on process crash
-socketcall: 1
-sigaction: 1
-tgkill: 1
-rt_sigprocmask: 1
-fcntl64: 1
-rt_tgsigqueueinfo: 1
-geteuid32: 1
-getgid32: 1
-getegid32: 1
-getgroups32: 1
-getdents64: 1
-pipe2: 1
-ppoll: 1
-
# Required by AddressSanitizer
gettid: 1
sched_yield: 1
getpid: 1
gettid: 1
+
+@include /system/etc/seccomp_policy/crash_dump.x86.policy
diff --git a/services/mediadrm/Android.mk b/services/mediadrm/Android.mk
index 2daa829..e870965 100644
--- a/services/mediadrm/Android.mk
+++ b/services/mediadrm/Android.mk
@@ -28,7 +28,8 @@
libhidlbase \
libhidlmemory \
libhidltransport \
- android.hardware.drm@1.0
+ android.hardware.drm@1.0 \
+ android.hardware.drm@1.1
LOCAL_CFLAGS += -Wall -Wextra -Werror
diff --git a/services/mediadrm/MediaDrmService.cpp b/services/mediadrm/MediaDrmService.cpp
index a368c11..5afd079 100644
--- a/services/mediadrm/MediaDrmService.cpp
+++ b/services/mediadrm/MediaDrmService.cpp
@@ -24,8 +24,8 @@
#include <binder/IServiceManager.h>
#include <utils/Log.h>
-#include <media/CryptoHal.h>
-#include <media/DrmHal.h>
+#include <mediadrm/CryptoHal.h>
+#include <mediadrm/DrmHal.h>
namespace android {
diff --git a/services/mediadrm/MediaDrmService.h b/services/mediadrm/MediaDrmService.h
index ecc2da7..3607201 100644
--- a/services/mediadrm/MediaDrmService.h
+++ b/services/mediadrm/MediaDrmService.h
@@ -24,7 +24,7 @@
#include <media/Metadata.h>
#include <media/stagefright/foundation/ABase.h>
-#include <media/IMediaDrmService.h>
+#include <mediadrm/IMediaDrmService.h>
namespace android {
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index 3b883e7..37d6cc9 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -2,8 +2,11 @@
# service library
include $(CLEAR_VARS)
-LOCAL_SRC_FILES := MediaExtractorService.cpp
LOCAL_CFLAGS := -Wall -Werror
+LOCAL_SRC_FILES := \
+ MediaExtractorService.cpp \
+ MediaExtractorUpdateService.cpp \
+
LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils liblog
LOCAL_MODULE:= libmediaextractorservice
include $(BUILD_SHARED_LIBRARY)
@@ -12,9 +15,23 @@
# service executable
include $(CLEAR_VARS)
# seccomp filters are defined for the following architectures:
-LOCAL_REQUIRED_MODULES_arm := mediaextractor.policy
-LOCAL_REQUIRED_MODULES_arm64 := mediaextractor.policy
-LOCAL_REQUIRED_MODULES_x86 := mediaextractor.policy
+LOCAL_REQUIRED_MODULES_arm := crash_dump.policy mediaextractor.policy
+LOCAL_REQUIRED_MODULES_arm64 := crash_dump.policy mediaextractor.policy
+LOCAL_REQUIRED_MODULES_x86 := crash_dump.policy mediaextractor.policy
+
+# extractor libraries
+LOCAL_REQUIRED_MODULES += \
+ libaacextractor \
+ libamrextractor \
+ libflacextractor \
+ libmidiextractor \
+ libmkvextractor \
+ libmp3extractor \
+ libmp4extractor \
+ libmpeg2extractor \
+ liboggextractor \
+ libwavextractor \
+
LOCAL_SRC_FILES := main_extractorservice.cpp
LOCAL_SHARED_LIBRARIES := libmedia libmediaextractorservice libbinder libutils \
liblog libbase libicuuc libavservices_minijail
diff --git a/services/mediaextractor/MediaExtractorService.cpp b/services/mediaextractor/MediaExtractorService.cpp
index 08cbef6..f0f44f5 100644
--- a/services/mediaextractor/MediaExtractorService.cpp
+++ b/services/mediaextractor/MediaExtractorService.cpp
@@ -20,8 +20,11 @@
#include <utils/Vector.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaExtractor.h>
+#include <media/DataSource.h>
+#include <media/MediaExtractor.h>
+#include <media/stagefright/DataSourceFactory.h>
+#include <media/stagefright/InterfaceUtils.h>
+#include <media/stagefright/MediaExtractorFactory.h>
#include <media/stagefright/RemoteDataSource.h>
#include "MediaExtractorService.h"
@@ -31,29 +34,29 @@
const sp<IDataSource> &remoteSource, const char *mime) {
ALOGV("@@@ MediaExtractorService::makeExtractor for %s", mime);
- sp<DataSource> localSource = DataSource::CreateFromIDataSource(remoteSource);
+ sp<DataSource> localSource = CreateDataSourceFromIDataSource(remoteSource);
- sp<IMediaExtractor> ret = MediaExtractor::CreateFromService(localSource, mime);
+ sp<IMediaExtractor> extractor = MediaExtractorFactory::CreateFromService(localSource, mime);
ALOGV("extractor service created %p (%s)",
- ret.get(),
- ret == NULL ? "" : ret->name());
+ extractor.get(),
+ extractor == nullptr ? "" : extractor->name());
- if (ret != NULL) {
- registerMediaExtractor(ret, localSource, mime);
+ if (extractor != nullptr) {
+ registerMediaExtractor(extractor, localSource, mime);
+ return extractor;
}
-
- return ret;
+ return nullptr;
}
sp<IDataSource> MediaExtractorService::makeIDataSource(int fd, int64_t offset, int64_t length)
{
- sp<DataSource> source = DataSource::CreateFromFd(fd, offset, length);
- return source.get() != nullptr ? source->asIDataSource() : nullptr;
+ sp<DataSource> source = DataSourceFactory::CreateFromFd(fd, offset, length);
+ return CreateIDataSourceFromDataSource(source);
}
status_t MediaExtractorService::dump(int fd, const Vector<String16>& args) {
- return dumpExtractors(fd, args);
+ return MediaExtractorFactory::dump(fd, args) || dumpExtractors(fd, args);
}
status_t MediaExtractorService::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
diff --git a/services/mediaextractor/MediaExtractorUpdateService.cpp b/services/mediaextractor/MediaExtractorUpdateService.cpp
new file mode 100644
index 0000000..473a698
--- /dev/null
+++ b/services/mediaextractor/MediaExtractorUpdateService.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MediaExtractorUpdateService"
+#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <media/stagefright/MediaExtractorFactory.h>
+
+#include "MediaExtractorUpdateService.h"
+
+namespace android {
+namespace media {
+
+binder::Status MediaExtractorUpdateService::loadPlugins(const ::std::string& apkPath) {
+ ALOGV("loadPlugins %s", apkPath.c_str());
+ MediaExtractorFactory::LoadPlugins(apkPath);
+ return binder::Status::ok();
+}
+
+} // namespace media
+} // namespace android
diff --git a/services/mediaextractor/MediaExtractorUpdateService.h b/services/mediaextractor/MediaExtractorUpdateService.h
new file mode 100644
index 0000000..4115f6d
--- /dev/null
+++ b/services/mediaextractor/MediaExtractorUpdateService.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_EXTRACTOR_UPDATE_SERVICE_H
+#define ANDROID_MEDIA_EXTRACTOR_UPDATE_SERVICE_H
+
+#include <binder/BinderService.h>
+#include <android/media/BnMediaExtractorUpdateService.h>
+
+namespace android {
+namespace media {
+
+class MediaExtractorUpdateService
+ : public BinderService<MediaExtractorUpdateService>, public BnMediaExtractorUpdateService
+{
+ friend class BinderService<MediaExtractorUpdateService>;
+public:
+ MediaExtractorUpdateService() : BnMediaExtractorUpdateService() { }
+ virtual ~MediaExtractorUpdateService() { }
+ static const char* getServiceName() { return "media.extractor.update"; }
+ binder::Status loadPlugins(const ::std::string& apkPath);
+};
+
+} // namespace media
+} // namespace android
+
+#endif // ANDROID_MEDIA_EXTRACTOR_UPDATE_SERVICE_H
diff --git a/services/mediaextractor/main_extractorservice.cpp b/services/mediaextractor/main_extractorservice.cpp
index 6a5320d..8d3359a 100644
--- a/services/mediaextractor/main_extractorservice.cpp
+++ b/services/mediaextractor/main_extractorservice.cpp
@@ -25,11 +25,13 @@
#include <string>
#include <android-base/logging.h>
+#include <android-base/properties.h>
#include <utils/misc.h>
// from LOCAL_C_INCLUDES
#include "IcuUtils.h"
#include "MediaExtractorService.h"
+#include "MediaExtractorUpdateService.h"
#include "MediaUtils.h"
#include "minijail.h"
@@ -63,6 +65,12 @@
sp<ProcessState> proc(ProcessState::self());
sp<IServiceManager> sm = defaultServiceManager();
MediaExtractorService::instantiate();
+
+ std::string value = base::GetProperty("ro.build.type", "unknown");
+ if (value == "userdebug" || value == "eng") {
+ media::MediaExtractorUpdateService::instantiate();
+ }
+
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
}
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-arm.policy b/services/mediaextractor/seccomp_policy/mediaextractor-arm.policy
index 4fa69d7..87018ed 100644
--- a/services/mediaextractor/seccomp_policy/mediaextractor-arm.policy
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-arm.policy
@@ -41,25 +41,11 @@
nanosleep: 1
getrandom: 1
+# for dynamically loading extractors
+pread64: 1
+
# for FileSource
readlinkat: 1
_llseek: 1
-# for attaching to debuggerd on process crash
-sigaction: 1
-tgkill: 1
-socket: 1
-connect: 1
-recvmsg: 1
-fcntl64: 1
-rt_tgsigqueueinfo: 1
-geteuid32: 1
-getgid32: 1
-getegid32: 1
-getgroups32: 1
-getdents64: 1
-pipe2: 1
-ppoll: 1
-getpid: 1
-gettid: 1
-process_vm_readv: 1
+@include /system/etc/seccomp_policy/crash_dump.arm.policy
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy b/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
index eed804a..d70e27b 100644
--- a/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
@@ -34,23 +34,10 @@
# for FileSource
readlinkat: 1
-# for attaching to debuggerd on process crash
-tgkill: 1
-rt_sigprocmask: 1
-rt_sigaction: 1
-# socket: arg0 == AF_LOCAL
-socket: arg0 == 1
-connect: 1
-recvmsg: 1
-rt_tgsigqueueinfo: 1
-writev: 1
-geteuid: 1
-getgid: 1
-getegid: 1
-getgroups: 1
+# for dynamically loading extractors
getdents64: 1
-pipe2: 1
-ppoll: 1
-getpid: 1
-gettid: 1
-process_vm_readv: 1
+readlinkat: 1
+pread64: 1
+mremap: 1
+
+@include /system/etc/seccomp_policy/crash_dump.arm64.policy
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-x86.policy b/services/mediaextractor/seccomp_policy/mediaextractor-x86.policy
index 3b37f92..d739ba1 100644
--- a/services/mediaextractor/seccomp_policy/mediaextractor-x86.policy
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-x86.policy
@@ -39,28 +39,20 @@
nanosleep: 1
getrandom: 1
+# for dynamically loading extractors
+getdents64: 1
+readlinkat: 1
+pread64: 1
+mremap: 1
+
# for FileSource
readlinkat: 1
_llseek: 1
-# for attaching to debuggerd on process crash
-socketcall: 1
-sigaction: 1
-tgkill: 1
-rt_sigprocmask: 1
-fcntl64: 1
-rt_tgsigqueueinfo: 1
-geteuid32: 1
-getgid32: 1
-getegid32: 1
-getgroups32: 1
-getdents64: 1
-pipe2: 1
-ppoll: 1
-process_vm_readv: 1
-
# Required by AddressSanitizer
gettid: 1
sched_yield: 1
getpid: 1
gettid: 1
+
+@include /system/etc/seccomp_policy/crash_dump.x86.policy
diff --git a/services/medialog/Android.bp b/services/medialog/Android.bp
index 1f811d3..29e6dfc 100644
--- a/services/medialog/Android.bp
+++ b/services/medialog/Android.bp
@@ -11,6 +11,7 @@
"libbinder",
"liblog",
"libnbaio",
+ "libnblog",
"libutils",
],
diff --git a/services/medialog/MediaLogService.cpp b/services/medialog/MediaLogService.cpp
index a5512e1..1be5544 100644
--- a/services/medialog/MediaLogService.cpp
+++ b/services/medialog/MediaLogService.cpp
@@ -20,19 +20,25 @@
#include <sys/mman.h>
#include <utils/Log.h>
#include <binder/PermissionCache.h>
-#include <media/nbaio/NBLog.h>
+#include <media/nblog/NBLog.h>
#include <private/android_filesystem_config.h>
#include "MediaLogService.h"
namespace android {
- static const char kDeadlockedString[] = "MediaLogService may be deadlocked\n";
+static const char kDeadlockedString[] = "MediaLogService may be deadlocked\n";
+
+// mMerger, mMergeReader, and mMergeThread all point to the same location in memory
+// mMergerShared. This is the local memory FIFO containing data merged from all
+// individual thread FIFOs in shared memory. mMergeThread is used to periodically
+// call NBLog::Merger::merge() to collect the data and write it to the FIFO, and call
+// NBLog::MergeReader::getAndProcessSnapshot to process the merged data.
MediaLogService::MediaLogService() :
BnMediaLogService(),
mMergerShared((NBLog::Shared*) malloc(NBLog::Timeline::sharedSize(kMergeBufferSize))),
mMerger(mMergerShared, kMergeBufferSize),
mMergeReader(mMergerShared, kMergeBufferSize, mMerger),
- mMergeThread(new NBLog::MergeThread(mMerger))
+ mMergeThread(new NBLog::MergeThread(mMerger, mMergeReader))
{
mMergeThread->run("MergeThread");
}
@@ -123,15 +129,10 @@
} else {
ALOGI("%s:", namedReader.name());
}
- // TODO This code is for testing, remove it when done
- // namedReader.reader()->dump(fd, 0 /*indent*/);
}
-
mLock.unlock();
}
}
-
- // FIXME request merge to make sure log is up to date
mMergeReader.dump(fd);
return NO_ERROR;
}
diff --git a/services/medialog/MediaLogService.h b/services/medialog/MediaLogService.h
index 39d9cc0..c945d1f 100644
--- a/services/medialog/MediaLogService.h
+++ b/services/medialog/MediaLogService.h
@@ -19,7 +19,7 @@
#include <binder/BinderService.h>
#include <media/IMediaLogService.h>
-#include <media/nbaio/NBLog.h>
+#include <media/nblog/NBLog.h>
namespace android {
diff --git a/services/medialog/OWNERS b/services/medialog/OWNERS
index fb8b8ee..21723ba 100644
--- a/services/medialog/OWNERS
+++ b/services/medialog/OWNERS
@@ -1,3 +1,3 @@
elaurent@google.com
-gkasten@android.com
+gkasten@google.com
hunga@google.com
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 78bb587..28bfd3f 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -31,7 +31,8 @@
#include "ResourceManagerService.h"
#include "ServiceLog.h"
-
+#include "mediautils/SchedulingPolicyService.h"
+#include <cutils/sched_policy.h>
namespace android {
namespace {
@@ -111,6 +112,7 @@
ResourceInfo info;
info.clientId = clientId;
info.client = client;
+ info.cpuBoost = false;
infos.push_back(info);
return infos.editItemAt(infos.size() - 1);
}
@@ -201,7 +203,8 @@
: mProcessInfo(processInfo),
mServiceLog(new ServiceLog()),
mSupportsMultipleSecureCodecs(true),
- mSupportsSecureWithNonSecureCodec(true) {}
+ mSupportsSecureWithNonSecureCodec(true),
+ mCpuBoostCount(0) {}
ResourceManagerService::~ResourceManagerService() {}
@@ -239,6 +242,19 @@
ResourceInfo& info = getResourceInfoForEdit(clientId, client, infos);
// TODO: do the merge instead of append.
info.resources.appendVector(resources);
+
+ for (size_t i = 0; i < resources.size(); ++i) {
+ if (resources[i].mType == MediaResource::kCpuBoost && !info.cpuBoost) {
+ info.cpuBoost = true;
+ // Request it on every new instance of kCpuBoost, as the media.codec
+ // could have died, if we only do it the first time subsequent instances
+ // never gets the boost.
+ if (requestCpusetBoost(true, this) != OK) {
+ ALOGW("couldn't request cpuset boost");
+ }
+ mCpuBoostCount++;
+ }
+ }
if (info.deathNotifier == nullptr) {
info.deathNotifier = new DeathNotifier(this, pid, clientId);
IInterface::asBinder(client)->linkToDeath(info.deathNotifier);
@@ -270,6 +286,11 @@
ResourceInfos &infos = mMap.editValueAt(index);
for (size_t j = 0; j < infos.size(); ++j) {
if (infos[j].clientId == clientId) {
+ if (infos[j].cpuBoost && mCpuBoostCount > 0) {
+ if (--mCpuBoostCount == 0) {
+ requestCpusetBoost(false, this);
+ }
+ }
IInterface::asBinder(infos[j].client)->unlinkToDeath(infos[j].deathNotifier);
j = infos.removeAt(j);
found = true;
diff --git a/services/mediaresourcemanager/ResourceManagerService.h b/services/mediaresourcemanager/ResourceManagerService.h
index 9e97ac0..82d2a0b 100644
--- a/services/mediaresourcemanager/ResourceManagerService.h
+++ b/services/mediaresourcemanager/ResourceManagerService.h
@@ -38,6 +38,7 @@
sp<IResourceManagerClient> client;
sp<IBinder::DeathRecipient> deathNotifier;
Vector<MediaResource> resources;
+ bool cpuBoost;
};
typedef Vector<ResourceInfo> ResourceInfos;
@@ -112,6 +113,7 @@
PidResourceInfosMap mMap;
bool mSupportsMultipleSecureCodecs;
bool mSupportsSecureWithNonSecureCodec;
+ int32_t mCpuBoostCount;
};
// ----------------------------------------------------------------------------
diff --git a/services/minijail/Android.bp b/services/minijail/Android.bp
new file mode 100644
index 0000000..07a94cc
--- /dev/null
+++ b/services/minijail/Android.bp
@@ -0,0 +1,38 @@
+minijail_common_cflags = [
+ "-Wall",
+ "-Werror",
+]
+
+cc_defaults {
+ name: "libavservices_minijail_defaults",
+ srcs: ["minijail.cpp"],
+ cflags: minijail_common_cflags,
+ shared_libs: [
+ "libbase",
+ "libminijail",
+ ],
+}
+
+// Small library for media.extractor and media.codec sandboxing.
+cc_library_shared {
+ name: "libavservices_minijail",
+ defaults: ["libavservices_minijail_defaults"],
+ export_include_dirs: ["."],
+}
+
+// Small library for media.extractor and media.codec sandboxing.
+cc_library_shared {
+ name: "libavservices_minijail_vendor",
+ vendor: true,
+ defaults: ["libavservices_minijail_defaults"],
+ export_include_dirs: ["."],
+}
+
+// Unit tests.
+cc_test {
+ name: "libavservices_minijail_unittest",
+ defaults: ["libavservices_minijail_defaults"],
+ srcs: [
+ "av_services_minijail_unittest.cpp",
+ ],
+}
diff --git a/services/minijail/Android.mk b/services/minijail/Android.mk
deleted file mode 100644
index 67055a8..0000000
--- a/services/minijail/Android.mk
+++ /dev/null
@@ -1,30 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-minijail_common_cflags := -Wall -Werror
-
-# Small library for media.extractor and media.codec sandboxing.
-include $(CLEAR_VARS)
-LOCAL_MODULE := libavservices_minijail
-LOCAL_SRC_FILES := minijail.cpp
-LOCAL_CFLAGS := $(minijail_common_cflags)
-LOCAL_SHARED_LIBRARIES := libbase libminijail
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
-include $(BUILD_SHARED_LIBRARY)
-
-# Small library for media.extractor and media.codec sandboxing.
-include $(CLEAR_VARS)
-LOCAL_MODULE := libavservices_minijail_vendor
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := minijail.cpp
-LOCAL_CFLAGS := $(minijail_common_cflags)
-LOCAL_SHARED_LIBRARIES := libbase libminijail
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
-include $(BUILD_SHARED_LIBRARY)
-
-# Unit tests.
-include $(CLEAR_VARS)
-LOCAL_MODULE := libavservices_minijail_unittest
-LOCAL_SRC_FILES := minijail.cpp av_services_minijail_unittest.cpp
-LOCAL_CFLAGS := $(minijail_common_cflags)
-LOCAL_SHARED_LIBRARIES := libbase libminijail
-include $(BUILD_NATIVE_TEST)
diff --git a/services/minijail/OWNERS b/services/minijail/OWNERS
new file mode 100644
index 0000000..19f4f9f
--- /dev/null
+++ b/services/minijail/OWNERS
@@ -0,0 +1,2 @@
+jorgelo@google.com
+marcone@google.com
diff --git a/services/oboeservice/AAudioClientTracker.cpp b/services/oboeservice/AAudioClientTracker.cpp
index 75392bd..7264a9b 100644
--- a/services/oboeservice/AAudioClientTracker.cpp
+++ b/services/oboeservice/AAudioClientTracker.cpp
@@ -15,12 +15,14 @@
*/
-#define LOG_TAG "AAudioService"
+#define LOG_TAG "AAudioClientTracker"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
#include <assert.h>
#include <binder/IPCThreadState.h>
+#include <iomanip>
+#include <iostream>
#include <map>
#include <mutex>
#include <utils/Singleton.h>
@@ -39,7 +41,6 @@
: Singleton<AAudioClientTracker>() {
}
-
std::string AAudioClientTracker::dump() const {
std::stringstream result;
const bool isLocked = AAudio_tryUntilTrue(
@@ -64,8 +65,7 @@
// Create a tracker for the client.
aaudio_result_t AAudioClientTracker::registerClient(pid_t pid,
const sp<IAAudioClient>& client) {
- ALOGV("AAudioClientTracker::registerClient(), calling pid = %d, getpid() = %d\n",
- pid, getpid());
+ ALOGV("registerClient(), calling pid = %d, getpid() = %d\n", pid, getpid());
std::lock_guard<std::mutex> lock(mLock);
if (mNotificationClients.count(pid) == 0) {
@@ -74,18 +74,16 @@
sp<IBinder> binder = IInterface::asBinder(client);
status_t status = binder->linkToDeath(notificationClient);
- ALOGW_IF(status != NO_ERROR,
- "AAudioClientTracker::registerClient() linkToDeath = %d\n", status);
+ ALOGW_IF(status != NO_ERROR, "registerClient() linkToDeath = %d\n", status);
return AAudioConvert_androidToAAudioResult(status);
} else {
- ALOGW("AAudioClientTracker::registerClient(%d) already registered!", pid);
+ ALOGW("registerClient(%d) already registered!", pid);
return AAUDIO_OK; // TODO should this be considered an error
}
}
void AAudioClientTracker::unregisterClient(pid_t pid) {
- ALOGV("AAudioClientTracker::unregisterClient(), calling pid = %d, getpid() = %d\n",
- pid, getpid());
+ ALOGV("unregisterClient(), calling pid = %d, getpid() = %d\n", pid, getpid());
std::lock_guard<std::mutex> lock(mLock);
mNotificationClients.erase(pid);
}
@@ -103,12 +101,12 @@
aaudio_result_t
AAudioClientTracker::registerClientStream(pid_t pid, sp<AAudioServiceStreamBase> serviceStream) {
aaudio_result_t result = AAUDIO_OK;
- ALOGV("AAudioClientTracker::registerClientStream(%d, %p)\n", pid, serviceStream.get());
+ ALOGV("registerClientStream(%d, %p)\n", pid, serviceStream.get());
std::lock_guard<std::mutex> lock(mLock);
sp<NotificationClient> notificationClient = mNotificationClients[pid];
if (notificationClient == 0) {
// This will get called the first time the audio server registers an internal stream.
- ALOGV("AAudioClientTracker::registerClientStream(%d,) unrecognized pid\n", pid);
+ ALOGV("registerClientStream(%d,) unrecognized pid\n", pid);
notificationClient = new NotificationClient(pid);
mNotificationClients[pid] = notificationClient;
}
@@ -120,15 +118,15 @@
aaudio_result_t
AAudioClientTracker::unregisterClientStream(pid_t pid,
sp<AAudioServiceStreamBase> serviceStream) {
- ALOGV("AAudioClientTracker::unregisterClientStream(%d, %p)\n", pid, serviceStream.get());
+ ALOGV("unregisterClientStream(%d, %p)\n", pid, serviceStream.get());
std::lock_guard<std::mutex> lock(mLock);
auto it = mNotificationClients.find(pid);
if (it != mNotificationClients.end()) {
- ALOGV("AAudioClientTracker::unregisterClientStream(%d, %p) found NotificationClient\n",
+ ALOGV("unregisterClientStream(%d, %p) found NotificationClient\n",
pid, serviceStream.get());
it->second->unregisterClientStream(serviceStream);
} else {
- ALOGE("AAudioClientTracker::unregisterClientStream(%d, %p) missing NotificationClient\n",
+ ALOGE("unregisterClientStream(%d, %p) missing NotificationClient\n",
pid, serviceStream.get());
}
return AAUDIO_OK;
@@ -136,11 +134,11 @@
AAudioClientTracker::NotificationClient::NotificationClient(pid_t pid)
: mProcessId(pid) {
- //ALOGD("AAudioClientTracker::NotificationClient(%d) created %p\n", pid, this);
+ //ALOGD("NotificationClient(%d) created %p\n", pid, this);
}
AAudioClientTracker::NotificationClient::~NotificationClient() {
- //ALOGD("AAudioClientTracker::~NotificationClient() destroyed %p\n", this);
+ //ALOGD("~NotificationClient() destroyed %p\n", this);
}
int32_t AAudioClientTracker::NotificationClient::getStreamCount() {
@@ -172,14 +170,14 @@
{
std::lock_guard<std::mutex> lock(mLock);
- for (auto serviceStream : mStreams) {
+ for (const auto& serviceStream : mStreams) {
streamsToClose.insert(serviceStream);
}
}
- for (auto serviceStream : streamsToClose) {
+ for (const auto& serviceStream : streamsToClose) {
aaudio_handle_t handle = serviceStream->getHandle();
- ALOGW("AAudioClientTracker::binderDied() close abandoned stream 0x%08X\n", handle);
+ ALOGW("binderDied() close abandoned stream 0x%08X\n", handle);
aaudioService->closeStream(handle);
}
// mStreams should be empty now
@@ -200,8 +198,10 @@
}
result << " client: pid = " << mProcessId << " has " << mStreams.size() << " streams\n";
- for (auto serviceStream : mStreams) {
- result << " stream: 0x" << std::hex << serviceStream->getHandle() << std::dec << "\n";
+ for (const auto& serviceStream : mStreams) {
+ result << " stream: 0x" << std::setfill('0') << std::setw(8) << std::hex
+ << serviceStream->getHandle()
+ << std::dec << std::setfill(' ') << "\n";
}
if (isLocked) {
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
index f996f74..04fee13 100644
--- a/services/oboeservice/AAudioEndpointManager.cpp
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -67,11 +67,17 @@
result << "Exclusive MMAP Endpoints: " << mExclusiveStreams.size() << "\n";
index = 0;
- for (const auto &output : mExclusiveStreams) {
+ for (const auto &stream : mExclusiveStreams) {
result << " #" << index++ << ":";
- result << output->dump() << "\n";
+ result << stream->dump() << "\n";
}
+ result << " ExclusiveSearchCount: " << mExclusiveSearchCount << "\n";
+ result << " ExclusiveFoundCount: " << mExclusiveFoundCount << "\n";
+ result << " ExclusiveOpenCount: " << mExclusiveOpenCount << "\n";
+ result << " ExclusiveCloseCount: " << mExclusiveCloseCount << "\n";
+ result << "\n";
+
if (isExclusiveLocked) {
mExclusiveLock.unlock();
}
@@ -79,11 +85,17 @@
result << "Shared Endpoints: " << mSharedStreams.size() << "\n";
index = 0;
- for (const auto &input : mSharedStreams) {
+ for (const auto &stream : mSharedStreams) {
result << " #" << index++ << ":";
- result << input->dump() << "\n";
+ result << stream->dump() << "\n";
}
+ result << " SharedSearchCount: " << mSharedSearchCount << "\n";
+ result << " SharedFoundCount: " << mSharedFoundCount << "\n";
+ result << " SharedOpenCount: " << mSharedOpenCount << "\n";
+ result << " SharedCloseCount: " << mSharedCloseCount << "\n";
+ result << "\n";
+
if (isSharedLocked) {
mSharedLock.unlock();
}
@@ -95,15 +107,17 @@
sp<AAudioServiceEndpoint> AAudioEndpointManager::findExclusiveEndpoint_l(
const AAudioStreamConfiguration &configuration) {
sp<AAudioServiceEndpoint> endpoint;
+ mExclusiveSearchCount++;
for (const auto ep : mExclusiveStreams) {
if (ep->matches(configuration)) {
+ mExclusiveFoundCount++;
endpoint = ep;
break;
}
}
- ALOGV("AAudioEndpointManager.findExclusiveEndpoint_l(), found %p for device = %d",
- endpoint.get(), configuration.getDeviceId());
+ ALOGV("findExclusiveEndpoint_l(), found %p for device = %d, sessionId = %d",
+ endpoint.get(), configuration.getDeviceId(), configuration.getSessionId());
return endpoint;
}
@@ -111,15 +125,17 @@
sp<AAudioServiceEndpointShared> AAudioEndpointManager::findSharedEndpoint_l(
const AAudioStreamConfiguration &configuration) {
sp<AAudioServiceEndpointShared> endpoint;
+ mSharedSearchCount++;
for (const auto ep : mSharedStreams) {
if (ep->matches(configuration)) {
+ mSharedFoundCount++;
endpoint = ep;
break;
}
}
- ALOGV("AAudioEndpointManager.findSharedEndpoint_l(), found %p for device = %d",
- endpoint.get(), configuration.getDeviceId());
+ ALOGV("findSharedEndpoint_l(), found %p for device = %d, sessionId = %d",
+ endpoint.get(), configuration.getDeviceId(), configuration.getSessionId());
return endpoint;
}
@@ -134,7 +150,7 @@
}
sp<AAudioServiceEndpoint> AAudioEndpointManager::openExclusiveEndpoint(
- AAudioService &aaudioService __unused,
+ AAudioService &aaudioService,
const aaudio::AAudioStreamRequest &request) {
std::lock_guard<std::mutex> lock(mExclusiveLock);
@@ -146,24 +162,23 @@
// If we find an existing one then this one cannot be exclusive.
if (endpoint.get() != nullptr) {
- ALOGE("AAudioEndpointManager.openExclusiveEndpoint() already in use");
+ ALOGW("openExclusiveEndpoint() already in use");
// Already open so do not allow a second stream.
return nullptr;
} else {
- sp<AAudioServiceEndpointMMAP> endpointMMap = new AAudioServiceEndpointMMAP();
- ALOGE("AAudioEndpointManager.openEndpoint(),created MMAP %p", endpointMMap.get());
+ sp<AAudioServiceEndpointMMAP> endpointMMap = new AAudioServiceEndpointMMAP(aaudioService);
+ ALOGV("openExclusiveEndpoint(), no match so try to open MMAP %p for dev %d",
+ endpointMMap.get(), configuration.getDeviceId());
endpoint = endpointMMap;
aaudio_result_t result = endpoint->open(request);
if (result != AAUDIO_OK) {
- ALOGE("AAudioEndpointManager.openEndpoint(), open failed");
+ ALOGE("openExclusiveEndpoint(), open failed");
endpoint.clear();
} else {
mExclusiveStreams.push_back(endpointMMap);
+ mExclusiveOpenCount++;
}
-
- ALOGD("AAudioEndpointManager.openEndpoint(), created %p for device = %d",
- endpoint.get(), configuration.getDeviceId());
}
if (endpoint.get() != nullptr) {
@@ -203,14 +218,14 @@
if (endpoint.get() != nullptr) {
aaudio_result_t result = endpoint->open(request);
if (result != AAUDIO_OK) {
- ALOGE("AAudioEndpointManager.openEndpoint(), open failed");
endpoint.clear();
} else {
mSharedStreams.push_back(endpoint);
+ mSharedOpenCount++;
}
}
- ALOGD("AAudioEndpointManager.openSharedEndpoint(), created %p for device = %d, dir = %d",
- endpoint.get(), configuration.getDeviceId(), (int)direction);
+ ALOGV("%s(), created endpoint %p, requested device = %d, dir = %d",
+ __func__, endpoint.get(), configuration.getDeviceId(), (int)direction);
IPCThreadState::self()->restoreCallingIdentity(token);
}
@@ -239,15 +254,16 @@
int32_t newRefCount = serviceEndpoint->getOpenCount() - 1;
serviceEndpoint->setOpenCount(newRefCount);
- // If no longer in use then close and delete it.
+ // If no longer in use then actually close it.
if (newRefCount <= 0) {
mExclusiveStreams.erase(
std::remove(mExclusiveStreams.begin(), mExclusiveStreams.end(), serviceEndpoint),
mExclusiveStreams.end());
serviceEndpoint->close();
- ALOGD("AAudioEndpointManager::closeExclusiveEndpoint() %p for device %d",
- serviceEndpoint.get(), serviceEndpoint->getDeviceId());
+ mExclusiveCloseCount++;
+ ALOGV("%s() %p for device %d",
+ __func__, serviceEndpoint.get(), serviceEndpoint->getDeviceId());
}
}
@@ -261,14 +277,15 @@
int32_t newRefCount = serviceEndpoint->getOpenCount() - 1;
serviceEndpoint->setOpenCount(newRefCount);
- // If no longer in use then close and delete it.
+ // If no longer in use then actually close it.
if (newRefCount <= 0) {
mSharedStreams.erase(
std::remove(mSharedStreams.begin(), mSharedStreams.end(), serviceEndpoint),
mSharedStreams.end());
serviceEndpoint->close();
- ALOGD("AAudioEndpointManager::closeSharedEndpoint() %p for device %d",
- serviceEndpoint.get(), serviceEndpoint->getDeviceId());
+ mSharedCloseCount++;
+ ALOGV("%s() %p for device %d",
+ __func__, serviceEndpoint.get(), serviceEndpoint->getDeviceId());
}
}
diff --git a/services/oboeservice/AAudioEndpointManager.h b/services/oboeservice/AAudioEndpointManager.h
index 32c8454..193bdee 100644
--- a/services/oboeservice/AAudioEndpointManager.h
+++ b/services/oboeservice/AAudioEndpointManager.h
@@ -47,7 +47,7 @@
std::string dump() const;
/**
- * Find a service endpoint for the given deviceId and direction.
+ * Find a service endpoint for the given deviceId, sessionId and direction.
* If an endpoint does not already exist then try to create one.
*
* @param audioService
@@ -87,8 +87,17 @@
mutable std::mutex mExclusiveLock;
std::vector<android::sp<AAudioServiceEndpointMMAP>> mExclusiveStreams;
+ // Modified under a lock.
+ int32_t mExclusiveSearchCount = 0; // number of times we SEARCHED for an exclusive endpoint
+ int32_t mExclusiveFoundCount = 0; // number of times we FOUND an exclusive endpoint
+ int32_t mExclusiveOpenCount = 0; // number of times we OPENED an exclusive endpoint
+ int32_t mExclusiveCloseCount = 0; // number of times we CLOSED an exclusive endpoint
+ // Same as above but for SHARED endpoints.
+ int32_t mSharedSearchCount = 0;
+ int32_t mSharedFoundCount = 0;
+ int32_t mSharedOpenCount = 0;
+ int32_t mSharedCloseCount = 0;
};
-
} /* namespace aaudio */
#endif //AAUDIO_AAUDIO_ENDPOINT_MANAGER_H
diff --git a/services/oboeservice/AAudioMixer.cpp b/services/oboeservice/AAudioMixer.cpp
index 952aa82..b031888 100644
--- a/services/oboeservice/AAudioMixer.cpp
+++ b/services/oboeservice/AAudioMixer.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "AAudioService"
+#define LOG_TAG "AAudioMixer"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -49,10 +49,9 @@
memset(mOutputBuffer, 0, mBufferSizeInBytes);
}
-bool AAudioMixer::mix(int trackIndex, FifoBuffer *fifo, float volume) {
+int32_t AAudioMixer::mix(int streamIndex, FifoBuffer *fifo, bool allowUnderflow) {
WrappingBuffer wrappingBuffer;
float *destination = mOutputBuffer;
- fifo_frames_t framesLeft = mFramesPerBurst;
#if AAUDIO_MIXER_ATRACE_ENABLED
ATRACE_BEGIN("aaMix");
@@ -63,48 +62,57 @@
#if AAUDIO_MIXER_ATRACE_ENABLED
if (ATRACE_ENABLED()) {
char rdyText[] = "aaMixRdy#";
- char letter = 'A' + (trackIndex % 26);
+ char letter = 'A' + (streamIndex % 26);
rdyText[sizeof(rdyText) - 2] = letter;
ATRACE_INT(rdyText, fullFrames);
}
#else /* MIXER_ATRACE_ENABLED */
(void) trackIndex;
- (void) fullFrames;
#endif /* AAUDIO_MIXER_ATRACE_ENABLED */
+ // If allowUnderflow then always advance by one burst even if we do not have the data.
+ // Otherwise the stream timing will drift whenever there is an underflow.
+ // This actual underflow can then be detected by the client for XRun counting.
+ //
+ // Generally, allowUnderflow will be false when stopping a stream and we want to
+ // use up whatever data is in the queue.
+ fifo_frames_t framesDesired = mFramesPerBurst;
+ if (!allowUnderflow && fullFrames < framesDesired) {
+ framesDesired = fullFrames; // just use what is available then stop
+ }
+
// Mix data in one or two parts.
int partIndex = 0;
+ int32_t framesLeft = framesDesired;
while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
- fifo_frames_t framesToMix = framesLeft;
- fifo_frames_t framesAvailable = wrappingBuffer.numFrames[partIndex];
- if (framesAvailable > 0) {
- if (framesToMix > framesAvailable) {
- framesToMix = framesAvailable;
+ fifo_frames_t framesToMixFromPart = framesLeft;
+ fifo_frames_t framesAvailableFromPart = wrappingBuffer.numFrames[partIndex];
+ if (framesAvailableFromPart > 0) {
+ if (framesToMixFromPart > framesAvailableFromPart) {
+ framesToMixFromPart = framesAvailableFromPart;
}
- mixPart(destination, (float *)wrappingBuffer.data[partIndex], framesToMix, volume);
+ mixPart(destination, (float *)wrappingBuffer.data[partIndex],
+ framesToMixFromPart);
- destination += framesToMix * mSamplesPerFrame;
- framesLeft -= framesToMix;
+ destination += framesToMixFromPart * mSamplesPerFrame;
+ framesLeft -= framesToMixFromPart;
}
partIndex++;
}
- // Always advance by one burst even if we do not have the data.
- // Otherwise the stream timing will drift whenever there is an underflow.
- // This actual underflow can then be detected by the client for XRun counting.
- fifo->getFifoControllerBase()->advanceReadIndex(mFramesPerBurst);
+ fifo->getFifoControllerBase()->advanceReadIndex(framesDesired);
#if AAUDIO_MIXER_ATRACE_ENABLED
ATRACE_END();
#endif /* AAUDIO_MIXER_ATRACE_ENABLED */
- return (framesLeft > 0); // did not get all the frames we needed, ie. "underflow"
+ return (framesDesired - framesLeft); // framesRead
}
-void AAudioMixer::mixPart(float *destination, float *source, int32_t numFrames, float volume) {
+void AAudioMixer::mixPart(float *destination, float *source, int32_t numFrames) {
int32_t numSamples = numFrames * mSamplesPerFrame;
// TODO maybe optimize using SIMD
for (int sampleIndex = 0; sampleIndex < numSamples; sampleIndex++) {
- *destination++ += *source++ * volume;
+ *destination++ += *source++;
}
}
diff --git a/services/oboeservice/AAudioMixer.h b/services/oboeservice/AAudioMixer.h
index a8090bc..d5abc5b 100644
--- a/services/oboeservice/AAudioMixer.h
+++ b/services/oboeservice/AAudioMixer.h
@@ -33,22 +33,24 @@
/**
* Mix from this FIFO
- * @param fifo
- * @param volume
- * @return true if underflowed
+ * @param streamIndex for marking stream variables in systrace
+ * @param fifo to read from
+ * @param allowUnderflow if true then allow mixer to advance read index past the write index
+ * @return frames read from this stream
*/
- bool mix(int trackIndex, android::FifoBuffer *fifo, float volume);
-
- void mixPart(float *destination, float *source, int32_t numFrames, float volume);
+ int32_t mix(int streamIndex, android::FifoBuffer *fifo, bool allowUnderflow);
float *getOutputBuffer();
+ int32_t getFramesPerBurst() const { return mFramesPerBurst; }
+
private:
+ void mixPart(float *destination, float *source, int32_t numFrames);
+
float *mOutputBuffer = nullptr;
int32_t mSamplesPerFrame = 0;
int32_t mFramesPerBurst = 0;
int32_t mBufferSizeInBytes = 0;
};
-
#endif //AAUDIO_AAUDIO_MIXER_H
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 5a3488d..6a72e5b 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -32,7 +32,6 @@
#include "AAudioService.h"
#include "AAudioServiceStreamMMAP.h"
#include "AAudioServiceStreamShared.h"
-#include "AAudioServiceStreamMMAP.h"
#include "binding/IAAudioService.h"
#include "ServiceUtilities.h"
@@ -92,14 +91,14 @@
if (pid != mAudioClient.clientPid) {
int32_t count = AAudioClientTracker::getInstance().getStreamCount(pid);
if (count >= MAX_STREAMS_PER_PROCESS) {
- ALOGE("AAudioService::openStream(): exceeded max streams per process %d >= %d",
+ ALOGE("openStream(): exceeded max streams per process %d >= %d",
count, MAX_STREAMS_PER_PROCESS);
return AAUDIO_ERROR_UNAVAILABLE;
}
}
if (sharingMode != AAUDIO_SHARING_MODE_EXCLUSIVE && sharingMode != AAUDIO_SHARING_MODE_SHARED) {
- ALOGE("AAudioService::openStream(): unrecognized sharing mode = %d", sharingMode);
+ ALOGE("openStream(): unrecognized sharing mode = %d", sharingMode);
return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
}
@@ -114,7 +113,7 @@
result = serviceStream->open(request);
if (result != AAUDIO_OK) {
// Clear it so we can possibly fall back to using a shared stream.
- ALOGW("AAudioService::openStream(), could not open in EXCLUSIVE mode");
+ ALOGW("openStream(), could not open in EXCLUSIVE mode");
serviceStream.clear();
}
}
@@ -128,12 +127,12 @@
if (result != AAUDIO_OK) {
serviceStream.clear();
- ALOGE("AAudioService::openStream(): failed, return %d = %s",
+ ALOGE("openStream(): failed, return %d = %s",
result, AAudio_convertResultToText(result));
return result;
} else {
aaudio_handle_t handle = mStreamTracker.addStreamForHandle(serviceStream.get());
- ALOGD("AAudioService::openStream(): handle = 0x%08X", handle);
+ ALOGD("openStream(): handle = 0x%08X", handle);
serviceStream->setHandle(handle);
pid_t pid = request.getProcessId();
AAudioClientTracker::getInstance().registerClientStream(pid, serviceStream);
@@ -142,34 +141,48 @@
}
}
+// If a close request is pending then close the stream
+bool AAudioService::releaseStream(const sp<AAudioServiceStreamBase> &serviceStream) {
+ bool closed = false;
+ // decrementAndRemoveStreamByHandle() uses a lock so that if there are two simultaneous closes
+ // then only one will get the pointer and do the close.
+ sp<AAudioServiceStreamBase> foundStream = mStreamTracker.decrementAndRemoveStreamByHandle(
+ serviceStream->getHandle());
+ if (foundStream.get() != nullptr) {
+ foundStream->close();
+ pid_t pid = foundStream->getOwnerProcessId();
+ AAudioClientTracker::getInstance().unregisterClientStream(pid, foundStream);
+ closed = true;
+ }
+ return closed;
+}
+
+aaudio_result_t AAudioService::checkForPendingClose(
+ const sp<AAudioServiceStreamBase> &serviceStream,
+ aaudio_result_t defaultResult) {
+ return releaseStream(serviceStream) ? AAUDIO_ERROR_INVALID_STATE : defaultResult;
+}
+
aaudio_result_t AAudioService::closeStream(aaudio_handle_t streamHandle) {
// Check permission and ownership first.
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
- ALOGE("AAudioService::closeStream(0x%0x), illegal stream handle", streamHandle);
+ ALOGE("closeStream(0x%0x), illegal stream handle", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- ALOGD("AAudioService.closeStream(0x%08X)", streamHandle);
- // Remove handle from tracker so that we cannot look up the raw address any more.
- // removeStreamByHandle() uses a lock so that if there are two simultaneous closes
- // then only one will get the pointer and do the close.
- serviceStream = mStreamTracker.removeStreamByHandle(streamHandle);
- if (serviceStream.get() != nullptr) {
- serviceStream->close();
- pid_t pid = serviceStream->getOwnerProcessId();
- AAudioClientTracker::getInstance().unregisterClientStream(pid, serviceStream);
- return AAUDIO_OK;
- } else {
- ALOGW("AAudioService::closeStream(0x%0x) being handled by another thread", streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
+ pid_t pid = serviceStream->getOwnerProcessId();
+ AAudioClientTracker::getInstance().unregisterClientStream(pid, serviceStream);
+
+ serviceStream->markCloseNeeded();
+ (void) releaseStream(serviceStream);
+ return AAUDIO_OK;
}
-
sp<AAudioServiceStreamBase> AAudioService::convertHandleToServiceStream(
aaudio_handle_t streamHandle) {
- sp<AAudioServiceStreamBase> serviceStream = mStreamTracker.getStreamByHandle(streamHandle);
+ sp<AAudioServiceStreamBase> serviceStream = mStreamTracker.getStreamByHandleAndIncrement(
+ streamHandle);
if (serviceStream.get() != nullptr) {
// Only allow owner or the aaudio service to access the stream.
const uid_t callingUserId = IPCThreadState::self()->getCallingUid();
@@ -181,7 +194,9 @@
if (!allowed) {
ALOGE("AAudioService: calling uid %d cannot access stream 0x%08X owned by %d",
callingUserId, streamHandle, ownerUserId);
- serviceStream = nullptr;
+ // We incremented the reference count so we must check if it needs to be closed.
+ checkForPendingClose(serviceStream, AAUDIO_OK);
+ serviceStream.clear();
}
}
return serviceStream;
@@ -192,94 +207,97 @@
aaudio::AudioEndpointParcelable &parcelable) {
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
- ALOGE("AAudioService::getStreamDescription(), illegal stream handle = 0x%0x", streamHandle);
+ ALOGE("getStreamDescription(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
aaudio_result_t result = serviceStream->getDescription(parcelable);
// parcelable.dump();
- return result;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::startStream(aaudio_handle_t streamHandle) {
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
- ALOGE("AAudioService::startStream(), illegal stream handle = 0x%0x", streamHandle);
+ ALOGE("startStream(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->start();
+ aaudio_result_t result = serviceStream->start();
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::pauseStream(aaudio_handle_t streamHandle) {
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
- ALOGE("AAudioService::pauseStream(), illegal stream handle = 0x%0x", streamHandle);
+ ALOGE("pauseStream(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
aaudio_result_t result = serviceStream->pause();
- return result;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::stopStream(aaudio_handle_t streamHandle) {
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
- ALOGE("AAudioService::stopStream(), illegal stream handle = 0x%0x", streamHandle);
+ ALOGE("stopStream(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
aaudio_result_t result = serviceStream->stop();
- return result;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::flushStream(aaudio_handle_t streamHandle) {
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
- ALOGE("AAudioService::flushStream(), illegal stream handle = 0x%0x", streamHandle);
+ ALOGE("flushStream(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->flush();
+ aaudio_result_t result = serviceStream->flush();
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::registerAudioThread(aaudio_handle_t streamHandle,
pid_t clientThreadId,
int64_t periodNanoseconds) {
+ aaudio_result_t result = AAUDIO_OK;
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
- ALOGE("AAudioService::registerAudioThread(), illegal stream handle = 0x%0x", streamHandle);
+ ALOGE("registerAudioThread(), illegal stream handle = 0x%0x", streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
if (serviceStream->getRegisteredThread() != AAudioServiceStreamBase::ILLEGAL_THREAD_ID) {
ALOGE("AAudioService::registerAudioThread(), thread already registered");
- return AAUDIO_ERROR_INVALID_STATE;
- }
-
- const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
- serviceStream->setRegisteredThread(clientThreadId);
- int err = android::requestPriority(ownerPid, clientThreadId,
- DEFAULT_AUDIO_PRIORITY, true /* isForApp */);
- if (err != 0){
- ALOGE("AAudioService::registerAudioThread(%d) failed, errno = %d, priority = %d",
- clientThreadId, errno, DEFAULT_AUDIO_PRIORITY);
- return AAUDIO_ERROR_INTERNAL;
+ result = AAUDIO_ERROR_INVALID_STATE;
} else {
- return AAUDIO_OK;
+ const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
+ serviceStream->setRegisteredThread(clientThreadId);
+ int err = android::requestPriority(ownerPid, clientThreadId,
+ DEFAULT_AUDIO_PRIORITY, true /* isForApp */);
+ if (err != 0) {
+ ALOGE("AAudioService::registerAudioThread(%d) failed, errno = %d, priority = %d",
+ clientThreadId, errno, DEFAULT_AUDIO_PRIORITY);
+ result = AAUDIO_ERROR_INTERNAL;
+ }
}
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::unregisterAudioThread(aaudio_handle_t streamHandle,
pid_t clientThreadId) {
+ aaudio_result_t result = AAUDIO_OK;
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
- ALOGE("AAudioService::unregisterAudioThread(), illegal stream handle = 0x%0x",
- streamHandle);
+ ALOGE("%s(), illegal stream handle = 0x%0x", __func__, streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
if (serviceStream->getRegisteredThread() != clientThreadId) {
- ALOGE("AAudioService::unregisterAudioThread(), wrong thread");
- return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ ALOGE("%s(), wrong thread", __func__);
+ result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ } else {
+ serviceStream->setRegisteredThread(0);
}
- serviceStream->setRegisteredThread(0);
- return AAUDIO_OK;
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::startClient(aaudio_handle_t streamHandle,
@@ -287,20 +305,35 @@
audio_port_handle_t *clientHandle) {
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
- ALOGE("AAudioService::startClient(), illegal stream handle = 0x%0x",
- streamHandle);
+ ALOGE("%s(), illegal stream handle = 0x%0x", __func__, streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->startClient(client, clientHandle);
+ aaudio_result_t result = serviceStream->startClient(client, clientHandle);
+ return checkForPendingClose(serviceStream, result);
}
aaudio_result_t AAudioService::stopClient(aaudio_handle_t streamHandle,
- audio_port_handle_t clientHandle) {
+ audio_port_handle_t portHandle) {
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
- ALOGE("AAudioService::stopClient(), illegal stream handle = 0x%0x",
- streamHandle);
+ ALOGE("%s(), illegal stream handle = 0x%0x", __func__, streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
- return serviceStream->stopClient(clientHandle);
+ aaudio_result_t result = serviceStream->stopClient(portHandle);
+ return checkForPendingClose(serviceStream, result);
+}
+
+// This is only called internally when AudioFlinger wants to tear down a stream.
+// So we do not have to check permissions.
+aaudio_result_t AAudioService::disconnectStreamByPortHandle(audio_port_handle_t portHandle) {
+ ALOGD("%s(%d) called", __func__, portHandle);
+ sp<AAudioServiceStreamBase> serviceStream =
+ mStreamTracker.findStreamByPortHandleAndIncrement(portHandle);
+ if (serviceStream.get() == nullptr) {
+ ALOGE("%s(), could not find stream with portHandle = %d", __func__, portHandle);
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+ aaudio_result_t result = serviceStream->stop();
+ serviceStream->disconnect();
+ return checkForPendingClose(serviceStream, result);
}
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index eef0824..d21b1cd 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -83,6 +83,8 @@
aaudio_result_t stopClient(aaudio::aaudio_handle_t streamHandle,
audio_port_handle_t clientHandle) override;
+ aaudio_result_t disconnectStreamByPortHandle(audio_port_handle_t portHandle);
+
private:
/**
@@ -94,9 +96,15 @@
aaudio::aaudio_handle_t streamHandle);
- android::AudioClient mAudioClient;
- aaudio::AAudioStreamTracker mStreamTracker;
+ bool releaseStream(const sp<aaudio::AAudioServiceStreamBase> &serviceStream);
+
+ aaudio_result_t checkForPendingClose(const sp<aaudio::AAudioServiceStreamBase> &serviceStream,
+ aaudio_result_t defaultResult);
+
+ android::AudioClient mAudioClient;
+
+ aaudio::AAudioStreamTracker mStreamTracker;
enum constants {
DEFAULT_AUDIO_PRIORITY = 2
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index 3095bc9..0349034 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -39,7 +39,7 @@
using namespace aaudio; // TODO just import names needed
AAudioServiceEndpoint::~AAudioServiceEndpoint() {
- ALOGD("AAudioServiceEndpoint::~AAudioServiceEndpoint() destroying endpoint %p", this);
+ ALOGD("%s(%p) destroyed", __func__, this);
}
std::string AAudioServiceEndpoint::dump() const {
@@ -55,11 +55,17 @@
result << " Direction: " << ((getDirection() == AAUDIO_DIRECTION_OUTPUT)
? "OUTPUT" : "INPUT") << "\n";
- result << " Sample Rate: " << getSampleRate() << "\n";
- result << " Frames Per Burst: " << mFramesPerBurst << "\n";
- result << " Reference Count: " << mOpenCount << "\n";
result << " Requested Device Id: " << mRequestedDeviceId << "\n";
result << " Device Id: " << getDeviceId() << "\n";
+ result << " Sample Rate: " << getSampleRate() << "\n";
+ result << " Channel Count: " << getSamplesPerFrame() << "\n";
+ result << " Frames Per Burst: " << mFramesPerBurst << "\n";
+ result << " Usage: " << getUsage() << "\n";
+ result << " ContentType: " << getContentType() << "\n";
+ result << " InputPreset: " << getInputPreset() << "\n";
+ result << " Reference Count: " << mOpenCount << "\n";
+ result << " Session Id: " << getSessionId() << "\n";
+ result << " Connected: " << mConnected.load() << "\n";
result << " Registered Streams:" << "\n";
result << AAudioServiceStreamShared::dumpHeader() << "\n";
for (const auto stream : mRegisteredStreams) {
@@ -72,9 +78,22 @@
return result.str();
}
-void AAudioServiceEndpoint::disconnectRegisteredStreams() {
+// @return true if stream found
+bool AAudioServiceEndpoint::isStreamRegistered(audio_port_handle_t portHandle) {
std::lock_guard<std::mutex> lock(mLockStreams);
for (const auto stream : mRegisteredStreams) {
+ if (stream->getPortHandle() == portHandle) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void AAudioServiceEndpoint::disconnectRegisteredStreams() {
+ std::lock_guard<std::mutex> lock(mLockStreams);
+ mConnected.store(false);
+ for (const auto stream : mRegisteredStreams) {
+ ALOGD("disconnectRegisteredStreams() stop and disconnect %p", stream.get());
stream->stop();
stream->disconnect();
}
@@ -96,6 +115,9 @@
}
bool AAudioServiceEndpoint::matches(const AAudioStreamConfiguration& configuration) {
+ if (!mConnected.load()) {
+ return false; // Only use an endpoint if it is connected to a device.
+ }
if (configuration.getDirection() != getDirection()) {
return false;
}
@@ -103,6 +125,10 @@
configuration.getDeviceId() != getDeviceId()) {
return false;
}
+ if (configuration.getSessionId() != AAUDIO_SESSION_ID_ALLOCATE &&
+ configuration.getSessionId() != getSessionId()) {
+ return false;
+ }
if (configuration.getSampleRate() != AAUDIO_UNSPECIFIED &&
configuration.getSampleRate() != getSampleRate()) {
return false;
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index 2ef6234..253f290 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -49,9 +49,9 @@
virtual aaudio_result_t close() = 0;
- virtual aaudio_result_t registerStream(android::sp<AAudioServiceStreamBase> stream);
+ aaudio_result_t registerStream(android::sp<AAudioServiceStreamBase> stream);
- virtual aaudio_result_t unregisterStream(android::sp<AAudioServiceStreamBase> stream);
+ aaudio_result_t unregisterStream(android::sp<AAudioServiceStreamBase> stream);
virtual aaudio_result_t startStream(android::sp<AAudioServiceStreamBase> stream,
audio_port_handle_t *clientHandle) = 0;
@@ -97,7 +97,18 @@
mOpenCount = count;
}
+ bool isConnected() const {
+ return mConnected;
+ }
+
protected:
+
+ /**
+ * @param portHandle
+ * @return return true if a stream with the given portHandle is registered
+ */
+ bool isStreamRegistered(audio_port_handle_t portHandle);
+
void disconnectRegisteredStreams();
mutable std::mutex mLockStreams;
@@ -111,6 +122,7 @@
int32_t mOpenCount = 0;
int32_t mRequestedDeviceId = 0;
+ std::atomic<bool> mConnected{true};
};
} /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.cpp b/services/oboeservice/AAudioServiceEndpointCapture.cpp
index c7d9b8e..efac788 100644
--- a/services/oboeservice/AAudioServiceEndpointCapture.cpp
+++ b/services/oboeservice/AAudioServiceEndpointCapture.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "AAudioService"
+#define LOG_TAG "AAudioServiceEndpointCapture"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -57,8 +57,7 @@
// Read data from the shared MMAP stream and then distribute it to the client streams.
void *AAudioServiceEndpointCapture::callbackLoop() {
- ALOGD("AAudioServiceEndpointCapture(): callbackLoop() entering");
- int32_t underflowCount = 0;
+ ALOGD("callbackLoop() entering");
aaudio_result_t result = AAUDIO_OK;
int64_t timeoutNanos = getStreamInternal()->calculateReasonableTimeout();
@@ -73,7 +72,7 @@
disconnectRegisteredStreams();
break;
} else if (result != getFramesPerBurst()) {
- ALOGW("AAudioServiceEndpointCapture(): callbackLoop() read %d / %d",
+ ALOGW("callbackLoop() read %d / %d",
result, getFramesPerBurst());
break;
}
@@ -102,9 +101,10 @@
int64_t positionOffset = mmapFramesRead - clientFramesWritten;
streamShared->setTimestampPositionOffset(positionOffset);
+ // Is the buffer too full to write a burst?
if (fifo->getFifoControllerBase()->getEmptyFramesAvailable() <
- getFramesPerBurst()) {
- underflowCount++;
+ getFramesPerBurst()) {
+ streamShared->incrementXRunCount();
} else {
fifo->write(mDistributionBuffer, getFramesPerBurst());
}
@@ -125,6 +125,6 @@
}
}
- ALOGD("AAudioServiceEndpointCapture(): callbackLoop() exiting, %d underflows", underflowCount);
+ ALOGD("callbackLoop() exiting");
return NULL; // TODO review
}
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 4be25c8..f9e21fb 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -48,8 +48,10 @@
using namespace android; // TODO just import names needed
using namespace aaudio; // TODO just import names needed
-AAudioServiceEndpointMMAP::AAudioServiceEndpointMMAP()
- : mMmapStream(nullptr) {}
+
+AAudioServiceEndpointMMAP::AAudioServiceEndpointMMAP(AAudioService &audioService)
+ : mMmapStream(nullptr)
+ , mAAudioService(audioService) {}
AAudioServiceEndpointMMAP::~AAudioServiceEndpointMMAP() {}
@@ -72,13 +74,6 @@
aaudio_result_t AAudioServiceEndpointMMAP::open(const aaudio::AAudioStreamRequest &request) {
aaudio_result_t result = AAUDIO_OK;
- const audio_attributes_t attributes = {
- .content_type = AUDIO_CONTENT_TYPE_MUSIC,
- .usage = AUDIO_USAGE_MEDIA,
- .source = AUDIO_SOURCE_VOICE_RECOGNITION,
- .flags = AUDIO_FLAG_LOW_LATENCY,
- .tags = ""
- };
audio_config_base_t config;
audio_port_handle_t deviceId;
@@ -87,6 +82,28 @@
copyFrom(request.getConstantConfiguration());
+ aaudio_direction_t direction = getDirection();
+
+ const audio_content_type_t contentType =
+ AAudioConvert_contentTypeToInternal(getContentType());
+ // Usage only used for OUTPUT
+ const audio_usage_t usage = (direction == AAUDIO_DIRECTION_OUTPUT)
+ ? AAudioConvert_usageToInternal(getUsage())
+ : AUDIO_USAGE_UNKNOWN;
+ const audio_source_t source = (direction == AAUDIO_DIRECTION_INPUT)
+ ? AAudioConvert_inputPresetToAudioSource(getInputPreset())
+ : AUDIO_SOURCE_DEFAULT;
+
+ const audio_attributes_t attributes = {
+ .content_type = contentType,
+ .usage = usage,
+ .source = source,
+ .flags = AUDIO_FLAG_LOW_LATENCY,
+ .tags = ""
+ };
+ ALOGD("%s(%p) MMAP attributes.usage = %d, content_type = %d, source = %d",
+ __func__, this, attributes.usage, attributes.content_type, attributes.source);
+
mMmapClient.clientUid = request.getUserId();
mMmapClient.clientPid = request.getProcessId();
mMmapClient.packageName.setTo(String16(""));
@@ -108,7 +125,6 @@
int32_t aaudioSamplesPerFrame = getSamplesPerFrame();
- aaudio_direction_t direction = getDirection();
if (direction == AAUDIO_DIRECTION_OUTPUT) {
config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
? AUDIO_CHANNEL_OUT_STEREO
@@ -122,7 +138,7 @@
mHardwareTimeOffsetNanos = INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at ADC earlier
} else {
- ALOGE("openMmapStream - invalid direction = %d", direction);
+ ALOGE("%s() invalid direction = %d", __func__, direction);
return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
}
@@ -131,27 +147,42 @@
? MmapStreamInterface::DIRECTION_OUTPUT
: MmapStreamInterface::DIRECTION_INPUT;
+ aaudio_session_id_t requestedSessionId = getSessionId();
+ audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
+
// Open HAL stream. Set mMmapStream
status_t status = MmapStreamInterface::openMmapStream(streamDirection,
&attributes,
&config,
mMmapClient,
&deviceId,
+ &sessionId,
this, // callback
mMmapStream,
&mPortHandle);
- ALOGD("AAudioServiceEndpointMMAP::open() mMapClient.uid = %d, pid = %d => portHandle = %d\n",
- mMmapClient.clientUid, mMmapClient.clientPid, mPortHandle);
+ ALOGD("%s() mMapClient.uid = %d, pid = %d => portHandle = %d\n",
+ __func__, mMmapClient.clientUid, mMmapClient.clientPid, mPortHandle);
if (status != OK) {
- ALOGE("openMmapStream returned status %d", status);
+ ALOGE("%s() openMmapStream() returned status %d", __func__, status);
return AAUDIO_ERROR_UNAVAILABLE;
}
if (deviceId == AAUDIO_UNSPECIFIED) {
- ALOGW("AAudioServiceEndpointMMAP::open() - openMmapStream() failed to set deviceId");
+ ALOGW("%s() openMmapStream() failed to set deviceId", __func__);
}
setDeviceId(deviceId);
+ if (sessionId == AUDIO_SESSION_ALLOCATE) {
+ ALOGW("%s() - openMmapStream() failed to set sessionId", __func__);
+ }
+
+ aaudio_session_id_t actualSessionId =
+ (requestedSessionId == AAUDIO_SESSION_ID_NONE)
+ ? AAUDIO_SESSION_ID_NONE
+ : (aaudio_session_id_t) sessionId;
+ setSessionId(actualSessionId);
+ ALOGD("%s() deviceId = %d, sessionId = %d", __func__, getDeviceId(), getSessionId());
+
// Create MMAP/NOIRQ buffer.
int32_t minSizeFrames = getBufferCapacity();
if (minSizeFrames <= 0) { // zero will get rejected
@@ -159,14 +190,14 @@
}
status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
if (status != OK) {
- ALOGE("AAudioServiceEndpointMMAP::open() - createMmapBuffer() failed with status %d %s",
- status, strerror(-status));
+ ALOGE("%s() - createMmapBuffer() failed with status %d %s",
+ __func__, status, strerror(-status));
result = AAUDIO_ERROR_UNAVAILABLE;
goto error;
} else {
- ALOGD("createMmapBuffer status = %d, buffer_size = %d, burst_size %d"
+ ALOGD("%s() createMmapBuffer() returned = %d, buffer_size = %d, burst_size %d"
", Sharable FD: %s",
- status,
+ __func__, status,
abs(mMmapBufferinfo.buffer_size_frames),
mMmapBufferinfo.burst_size_frames,
mMmapBufferinfo.buffer_size_frames < 0 ? "Yes" : "No");
@@ -186,7 +217,7 @@
// Fallback is handled by caller but indicate what is possible in case
// this is used in the future
setSharingMode(AAUDIO_SHARING_MODE_SHARED);
- ALOGW("AAudioServiceEndpointMMAP::open() - exclusive FD cannot be used by client");
+ ALOGW("%s() - exclusive FD cannot be used by client", __func__);
result = AAUDIO_ERROR_UNAVAILABLE;
goto error;
}
@@ -201,7 +232,7 @@
// Assume that AudioFlinger will close the original shared_memory_fd.
mAudioDataFileDescriptor.reset(dup(mMmapBufferinfo.shared_memory_fd));
if (mAudioDataFileDescriptor.get() == -1) {
- ALOGE("AAudioServiceEndpointMMAP::open() - could not dup shared_memory_fd");
+ ALOGE("%s() - could not dup shared_memory_fd", __func__);
result = AAUDIO_ERROR_INTERNAL;
goto error;
}
@@ -219,12 +250,12 @@
burstMicros = mFramesPerBurst * static_cast<int64_t>(1000000) / getSampleRate();
} while (burstMicros < burstMinMicros);
- ALOGD("AAudioServiceEndpointMMAP::open() original burst = %d, minMicros = %d, to burst = %d\n",
- mMmapBufferinfo.burst_size_frames, burstMinMicros, mFramesPerBurst);
+ ALOGD("%s() original burst = %d, minMicros = %d, to burst = %d\n",
+ __func__, mMmapBufferinfo.burst_size_frames, burstMinMicros, mFramesPerBurst);
- ALOGD("AAudioServiceEndpointMMAP::open() actual rate = %d, channels = %d"
+ ALOGD("%s() actual rate = %d, channels = %d"
", deviceId = %d, capacity = %d\n",
- getSampleRate(), getSamplesPerFrame(), deviceId, getBufferCapacity());
+ __func__, getSampleRate(), getSamplesPerFrame(), deviceId, getBufferCapacity());
return result;
@@ -234,9 +265,8 @@
}
aaudio_result_t AAudioServiceEndpointMMAP::close() {
-
if (mMmapStream != 0) {
- ALOGD("AAudioServiceEndpointMMAP::close() clear() endpoint");
+ ALOGD("%s() clear() endpoint", __func__);
// Needs to be explicitly cleared or CTS will fail but it is not clear why.
mMmapStream.clear();
// Apparently the above close is asynchronous. An attempt to open a new device
@@ -249,35 +279,50 @@
}
aaudio_result_t AAudioServiceEndpointMMAP::startStream(sp<AAudioServiceStreamBase> stream,
- audio_port_handle_t *clientHandle) {
+ audio_port_handle_t *clientHandle __unused) {
// Start the client on behalf of the AAudio service.
// Use the port handle that was provided by openMmapStream().
- return startClient(mMmapClient, &mPortHandle);
+ audio_port_handle_t tempHandle = mPortHandle;
+ aaudio_result_t result = startClient(mMmapClient, &tempHandle);
+ // When AudioFlinger is passed a valid port handle then it should not change it.
+ LOG_ALWAYS_FATAL_IF(tempHandle != mPortHandle,
+ "%s() port handle not expected to change from %d to %d",
+ __func__, mPortHandle, tempHandle);
+ ALOGV("%s(%p) mPortHandle = %d", __func__, stream.get(), mPortHandle);
+ return result;
}
aaudio_result_t AAudioServiceEndpointMMAP::stopStream(sp<AAudioServiceStreamBase> stream,
- audio_port_handle_t clientHandle) {
+ audio_port_handle_t clientHandle __unused) {
mFramesTransferred.reset32();
+
+ // Round 64-bit counter up to a multiple of the buffer capacity.
+ // This is required because the 64-bit counter is used as an index
+ // into a circular buffer and the actual HW position is reset to zero
+ // when the stream is stopped.
+ mFramesTransferred.roundUp64(getBufferCapacity());
+
+ // Use the port handle that was provided by openMmapStream().
+ ALOGV("%s(%p) mPortHandle = %d", __func__, stream.get(), mPortHandle);
return stopClient(mPortHandle);
}
aaudio_result_t AAudioServiceEndpointMMAP::startClient(const android::AudioClient& client,
audio_port_handle_t *clientHandle) {
if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
- ALOGD("AAudioServiceEndpointMMAP::startClient(%p(uid=%d, pid=%d))",
- &client, client.clientUid, client.clientPid);
+ ALOGD("%s(%p(uid=%d, pid=%d))", __func__, &client, client.clientUid, client.clientPid);
audio_port_handle_t originalHandle = *clientHandle;
status_t status = mMmapStream->start(client, clientHandle);
aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);
- ALOGD("AAudioServiceEndpointMMAP::startClient() , %d => %d returns %d",
- originalHandle, *clientHandle, result);
+ ALOGD("%s() , portHandle %d => %d, returns %d", __func__, originalHandle, *clientHandle, result);
return result;
}
aaudio_result_t AAudioServiceEndpointMMAP::stopClient(audio_port_handle_t clientHandle) {
+ ALOGD("%s(portHandle = %d), called", __func__, clientHandle);
if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
aaudio_result_t result = AAudioConvert_androidToAAudioResult(mMmapStream->stop(clientHandle));
- ALOGD("AAudioServiceEndpointMMAP::stopClient(%d) returns %d", clientHandle, result);
+ ALOGD("%s(portHandle = %d), returns %d", __func__, clientHandle, result);
return result;
}
@@ -289,13 +334,13 @@
return AAUDIO_ERROR_NULL;
}
status_t status = mMmapStream->getMmapPosition(&position);
- ALOGV("AAudioServiceEndpointMMAP::getFreeRunningPosition() status= %d, pos = %d, nanos = %lld\n",
- status, position.position_frames, (long long) position.time_nanoseconds);
+ ALOGV("%s() status= %d, pos = %d, nanos = %lld\n",
+ __func__, status, position.position_frames, (long long) position.time_nanoseconds);
aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);
if (result == AAUDIO_ERROR_UNAVAILABLE) {
- ALOGW("sendCurrentTimestamp(): getMmapPosition() has no position data available");
+ ALOGW("%s(): getMmapPosition() has no position data available", __func__);
} else if (result != AAUDIO_OK) {
- ALOGE("sendCurrentTimestamp(): getMmapPosition() returned status %d", status);
+ ALOGE("%s(): getMmapPosition() returned status %d", __func__, status);
} else {
// Convert 32-bit position to 64-bit position.
mFramesTransferred.update32(position.position_frames);
@@ -310,17 +355,27 @@
return 0; // TODO
}
-
-void AAudioServiceEndpointMMAP::onTearDown() {
- ALOGD("AAudioServiceEndpointMMAP::onTearDown() called");
- disconnectRegisteredStreams();
+// This is called by AudioFlinger when it wants to destroy a stream.
+void AAudioServiceEndpointMMAP::onTearDown(audio_port_handle_t portHandle) {
+ ALOGD("%s(portHandle = %d) called", __func__, portHandle);
+ // Are we tearing down the EXCLUSIVE MMAP stream?
+ if (isStreamRegistered(portHandle)) {
+ ALOGD("%s(%d) tearing down this entire MMAP endpoint", __func__, portHandle);
+ disconnectRegisteredStreams();
+ } else {
+ // Must be a SHARED stream?
+ ALOGD("%s(%d) disconnect a specific stream", __func__, portHandle);
+ aaudio_result_t result = mAAudioService.disconnectStreamByPortHandle(portHandle);
+ ALOGD("%s(%d) disconnectStreamByPortHandle returned %d", __func__, portHandle, result);
+ }
};
void AAudioServiceEndpointMMAP::onVolumeChanged(audio_channel_mask_t channels,
android::Vector<float> values) {
- // TODO do we really need a different volume for each channel?
+ // TODO Do we really need a different volume for each channel?
+ // We get called with an array filled with a single value!
float volume = values[0];
- ALOGD("AAudioServiceEndpointMMAP::onVolumeChanged() volume[0] = %f", volume);
+ ALOGD("%s(%p) volume[0] = %f", __func__, this, volume);
std::lock_guard<std::mutex> lock(mLockStreams);
for(const auto stream : mRegisteredStreams) {
stream->onVolumeChanged(volume);
@@ -328,8 +383,7 @@
};
void AAudioServiceEndpointMMAP::onRoutingChanged(audio_port_handle_t deviceId) {
- ALOGD("AAudioServiceEndpointMMAP::onRoutingChanged() called with %d, old = %d",
- deviceId, getDeviceId());
+ ALOGD("%s(%p) called with dev %d, old = %d", __func__, this, deviceId, getDeviceId());
if (getDeviceId() != AUDIO_PORT_HANDLE_NONE && getDeviceId() != deviceId) {
disconnectRegisteredStreams();
}
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
index 16b6269..5e815e0 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.h
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -42,7 +42,7 @@
, public android::MmapStreamCallback {
public:
- AAudioServiceEndpointMMAP();
+ explicit AAudioServiceEndpointMMAP(android::AAudioService &audioService);
virtual ~AAudioServiceEndpointMMAP();
@@ -68,7 +68,7 @@
aaudio_result_t getTimestamp(int64_t *positionFrames, int64_t *timeNanos) override;
// -------------- Callback functions for MmapStreamCallback ---------------------
- void onTearDown() override;
+ void onTearDown(audio_port_handle_t handle) override;
void onVolumeChanged(audio_channel_mask_t channels,
android::Vector<float> values) override;
@@ -88,8 +88,12 @@
// Interface to the AudioFlinger MMAP support.
android::sp<android::MmapStreamInterface> mMmapStream;
struct audio_mmap_buffer_info mMmapBufferinfo;
+
+ // There is only one port associated with an MMAP endpoint.
audio_port_handle_t mPortHandle = AUDIO_PORT_HANDLE_NONE;
+ android::AAudioService &mAAudioService;
+
android::base::unique_fd mAudioDataFileDescriptor;
int64_t mHardwareTimeOffsetNanos = 0; // TODO get from HAL
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
index 9b1833a..a274466 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.cpp
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -34,6 +34,7 @@
#include "AAudioServiceStreamShared.h"
#include "AAudioServiceEndpointPlay.h"
#include "AAudioServiceEndpointShared.h"
+#include "AAudioServiceStreamBase.h"
using namespace android; // TODO just import names needed
using namespace aaudio; // TODO just import names needed
@@ -42,10 +43,12 @@
AAudioServiceEndpointPlay::AAudioServiceEndpointPlay(AAudioService &audioService)
: mStreamInternalPlay(audioService, true) {
+ ALOGD("%s(%p) created", __func__, this);
mStreamInternal = &mStreamInternalPlay;
}
AAudioServiceEndpointPlay::~AAudioServiceEndpointPlay() {
+ ALOGD("%s(%p) destroyed", __func__, this);
}
aaudio_result_t AAudioServiceEndpointPlay::open(const aaudio::AAudioStreamRequest &request) {
@@ -67,6 +70,7 @@
// Mix data from each application stream and write result to the shared MMAP stream.
void *AAudioServiceEndpointPlay::callbackLoop() {
+ ALOGD("%s() entering >>>>>>>>>>>>>>> MIXER", __func__);
aaudio_result_t result = AAUDIO_OK;
int64_t timeoutNanos = getStreamInternal()->calculateReasonableTimeout();
@@ -82,9 +86,13 @@
std::lock_guard <std::mutex> lock(mLockStreams);
for (const auto clientStream : mRegisteredStreams) {
int64_t clientFramesRead = 0;
+ bool allowUnderflow = true;
- if (!clientStream->isRunning()) {
- continue;
+ aaudio_stream_state_t state = clientStream->getState();
+ if (state == AAUDIO_STREAM_STATE_STOPPING) {
+ allowUnderflow = false; // just read what is already in the FIFO
+ } else if (state != AAUDIO_STREAM_STATE_STARTED) {
+ continue; // this stream is not running so skip it.
}
sp<AAudioServiceStreamShared> streamShared =
@@ -104,10 +112,19 @@
int64_t positionOffset = mmapFramesWritten - clientFramesRead;
streamShared->setTimestampPositionOffset(positionOffset);
- float volume = 1.0; // to match legacy volume
- bool underflowed = mMixer.mix(index, fifo, volume);
- if (underflowed) {
- streamShared->incrementXRunCount();
+ int32_t framesMixed = mMixer.mix(index, fifo, allowUnderflow);
+
+ if (streamShared->isFlowing()) {
+ // Consider it an underflow if we got less than a burst
+ // after the data started flowing.
+ bool underflowed = allowUnderflow
+ && framesMixed < mMixer.getFramesPerBurst();
+ if (underflowed) {
+ streamShared->incrementXRunCount();
+ }
+ } else if (framesMixed > 0) {
+ // Mark beginning of data flow after a start.
+ streamShared->setFlowing(true);
}
clientFramesRead = fifo->getReadCounter();
}
@@ -132,11 +149,13 @@
AAudioServiceEndpointShared::disconnectRegisteredStreams();
break;
} else if (result != getFramesPerBurst()) {
- ALOGW("AAudioServiceEndpoint(): callbackLoop() wrote %d / %d",
+ ALOGW("callbackLoop() wrote %d / %d",
result, getFramesPerBurst());
break;
}
}
+ ALOGD("%s() exiting, enabled = %d, state = %d, result = %d <<<<<<<<<<<<< MIXER",
+ __func__, mCallbackEnabled.load(), getStreamInternal()->getState(), result);
return NULL; // TODO review
}
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
index cd40066..63b9983 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.cpp
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -47,6 +47,7 @@
<< std::setfill('0') << std::setw(8)
<< std::hex << mStreamInternal->getServiceHandle()
<< std::dec << std::setfill(' ');
+ result << ", XRuns = " << mStreamInternal->getXRunCount();
result << "\n";
result << " Running Stream Count: " << mRunningStreamCount << "\n";
@@ -59,18 +60,16 @@
aaudio_result_t result = AAUDIO_OK;
const AAudioStreamConfiguration &configuration = request.getConstantConfiguration();
+ copyFrom(configuration);
mRequestedDeviceId = configuration.getDeviceId();
- setDirection(configuration.getDirection());
AudioStreamBuilder builder;
+ builder.copyFrom(configuration);
+
builder.setSharingMode(AAUDIO_SHARING_MODE_EXCLUSIVE);
// Don't fall back to SHARED because that would cause recursion.
builder.setSharingModeMatchRequired(true);
- builder.setDeviceId(mRequestedDeviceId);
- builder.setFormat(configuration.getFormat());
- builder.setSampleRate(configuration.getSampleRate());
- builder.setSamplesPerFrame(configuration.getSamplesPerFrame());
- builder.setDirection(configuration.getDirection());
+
builder.setBufferCapacity(DEFAULT_BUFFER_CAPACITY);
result = mStreamInternal->open(builder);
@@ -78,6 +77,7 @@
setSampleRate(mStreamInternal->getSampleRate());
setSamplesPerFrame(mStreamInternal->getSamplesPerFrame());
setDeviceId(mStreamInternal->getDeviceId());
+ setSessionId(mStreamInternal->getSessionId());
mFramesPerBurst = mStreamInternal->getFramesPerBurst();
return result;
@@ -88,13 +88,22 @@
}
// Glue between C and C++ callbacks.
-static void *aaudio_endpoint_thread_proc(void *context) {
- AAudioServiceEndpointShared *endpoint = (AAudioServiceEndpointShared *) context;
- if (endpoint != NULL) {
- return endpoint->callbackLoop();
- } else {
- return NULL;
+static void *aaudio_endpoint_thread_proc(void *arg) {
+ assert(arg != nullptr);
+
+ // The caller passed in a smart pointer to prevent the endpoint from getting deleted
+ // while the thread was launching.
+ sp<AAudioServiceEndpointShared> *endpointForThread =
+ static_cast<sp<AAudioServiceEndpointShared> *>(arg);
+ sp<AAudioServiceEndpointShared> endpoint = *endpointForThread;
+ delete endpointForThread; // Just use scoped smart pointer. Don't need this anymore.
+ void *result = endpoint->callbackLoop();
+ // Close now so that the HW resource is freed and we can open a new device.
+ if (!endpoint->isConnected()) {
+ endpoint->close();
}
+
+ return result;
}
aaudio_result_t aaudio::AAudioServiceEndpointShared::startSharingThread_l() {
@@ -103,7 +112,16 @@
* AAUDIO_NANOS_PER_SECOND
/ getSampleRate();
mCallbackEnabled.store(true);
- return getStreamInternal()->createThread(periodNanos, aaudio_endpoint_thread_proc, this);
+ // Pass a smart pointer so the thread can hold a reference.
+ sp<AAudioServiceEndpointShared> *endpointForThread = new sp<AAudioServiceEndpointShared>(this);
+ aaudio_result_t result = getStreamInternal()->createThread(periodNanos,
+ aaudio_endpoint_thread_proc,
+ endpointForThread);
+ if (result != AAUDIO_OK) {
+ // The thread can't delete it so we have to do it here.
+ delete endpointForThread;
+ }
+ return result;
}
aaudio_result_t aaudio::AAudioServiceEndpointShared::stopSharingThread() {
diff --git a/services/oboeservice/AAudioServiceEndpointShared.h b/services/oboeservice/AAudioServiceEndpointShared.h
index e3bd2c1..d671710 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.h
+++ b/services/oboeservice/AAudioServiceEndpointShared.h
@@ -30,8 +30,7 @@
namespace aaudio {
/**
- * This Service class corresponds to a Client stream that shares an MMAP device through a mixer
- * or an input distributor.
+ * This manages an AudioStreamInternal that is shared by multiple Client streams.
*/
class AAudioServiceEndpointShared : public AAudioServiceEndpoint {
@@ -55,12 +54,13 @@
virtual void *callbackLoop() = 0;
+
+protected:
+
AudioStreamInternal *getStreamInternal() const {
return mStreamInternal;
};
-protected:
-
aaudio_result_t startSharingThread_l();
aaudio_result_t stopSharingThread();
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index e670129..9af8af3 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -42,7 +42,7 @@
AAudioServiceStreamBase::AAudioServiceStreamBase(AAudioService &audioService)
: mUpMessageQueue(nullptr)
- , mTimestampThread()
+ , mTimestampThread("AATime")
, mAtomicTimestamp()
, mAudioService(audioService) {
mMmapClient.clientUid = -1;
@@ -51,7 +51,7 @@
}
AAudioServiceStreamBase::~AAudioServiceStreamBase() {
- ALOGD("AAudioServiceStreamBase::~AAudioServiceStreamBase() destroying %p", this);
+ ALOGD("~AAudioServiceStreamBase() destroying %p", this);
// If the stream is deleted when OPEN or in use then audio resources will leak.
// This would indicate an internal error. So we want to find this ASAP.
LOG_ALWAYS_FATAL_IF(!(getState() == AAUDIO_STREAM_STATE_CLOSED
@@ -61,7 +61,7 @@
}
std::string AAudioServiceStreamBase::dumpHeader() {
- return std::string(" T Handle UId Run State Format Burst Chan Capacity");
+ return std::string(" T Handle UId Port Run State Format Burst Chan Capacity");
}
std::string AAudioServiceStreamBase::dump() const {
@@ -70,6 +70,7 @@
result << " 0x" << std::setfill('0') << std::setw(8) << std::hex << mHandle
<< std::dec << std::setfill(' ') ;
result << std::setw(6) << mMmapClient.clientUid;
+ result << std::setw(7) << mClientHandle;
result << std::setw(4) << (isRunning() ? "yes" : " no");
result << std::setw(6) << getState();
result << std::setw(7) << getFormat();
@@ -93,7 +94,7 @@
{
std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
if (mUpMessageQueue != nullptr) {
- ALOGE("AAudioServiceStreamBase::open() called twice");
+ ALOGE("%s() called twice", __func__);
return AAUDIO_ERROR_INVALID_STATE;
}
@@ -104,14 +105,20 @@
goto error;
}
+ // This is not protected by a lock because the stream cannot be
+ // referenced until the service returns a handle to the client.
+ // So only one thread can open a stream.
mServiceEndpoint = mEndpointManager.openEndpoint(mAudioService,
request,
sharingMode);
if (mServiceEndpoint == nullptr) {
- ALOGE("AAudioServiceStreamBase::open() openEndpoint() failed");
+ ALOGE("%s() openEndpoint() failed", __func__);
result = AAUDIO_ERROR_UNAVAILABLE;
goto error;
}
+ // Save a weak pointer that we will use to access the endpoint.
+ mServiceEndpointWeak = mServiceEndpoint;
+
mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
copyFrom(*mServiceEndpoint);
}
@@ -130,13 +137,16 @@
stop();
- if (mServiceEndpoint == nullptr) {
+ sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+ if (endpoint == nullptr) {
result = AAUDIO_ERROR_INVALID_STATE;
} else {
- mServiceEndpoint->unregisterStream(this);
- AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
- mEndpointManager.closeEndpoint(mServiceEndpoint);
- mServiceEndpoint.clear();
+ endpoint->unregisterStream(this);
+ AAudioEndpointManager &endpointManager = AAudioEndpointManager::getInstance();
+ endpointManager.closeEndpoint(endpoint);
+
+ // AAudioService::closeStream() prevents two threads from closing at the same time.
+ mServiceEndpoint.clear(); // endpoint will hold the pointer until this method returns.
}
{
@@ -152,7 +162,12 @@
aaudio_result_t AAudioServiceStreamBase::startDevice() {
mClientHandle = AUDIO_PORT_HANDLE_NONE;
- return mServiceEndpoint->startStream(this, &mClientHandle);
+ sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+ if (endpoint == nullptr) {
+ ALOGE("%s() has no endpoint", __func__);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return endpoint->startStream(this, &mClientHandle);
}
/**
@@ -162,15 +177,12 @@
*/
aaudio_result_t AAudioServiceStreamBase::start() {
aaudio_result_t result = AAUDIO_OK;
+
if (isRunning()) {
return AAUDIO_OK;
}
- if (mServiceEndpoint == nullptr) {
- ALOGE("AAudioServiceStreamBase::start() missing endpoint");
- result = AAUDIO_ERROR_INVALID_STATE;
- goto error;
- }
+ setFlowing(false);
// Start with fresh presentation timestamps.
mAtomicTimestamp.clear();
@@ -198,23 +210,28 @@
if (!isRunning()) {
return result;
}
- if (mServiceEndpoint == nullptr) {
- ALOGE("AAudioServiceStreamShared::pause() missing endpoint");
- return AAUDIO_ERROR_INVALID_STATE;
- }
- result = mServiceEndpoint->stopStream(this, mClientHandle);
- if (result != AAUDIO_OK) {
- ALOGE("AAudioServiceStreamShared::pause() mServiceEndpoint returned %d", result);
- disconnect(); // TODO should we return or pause Base first?
- }
+ // Send it now because the timestamp gets rounded up when stopStream() is called below.
+ // Also we don't need the timestamps while we are shutting down.
sendCurrentTimestamp();
- mThreadEnabled.store(false);
- result = mTimestampThread.stop();
+
+ result = stopTimestampThread();
if (result != AAUDIO_OK) {
disconnect();
return result;
}
+
+ sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+ if (endpoint == nullptr) {
+ ALOGE("%s() has no endpoint", __func__);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ result = endpoint->stopStream(this, mClientHandle);
+ if (result != AAUDIO_OK) {
+ ALOGE("%s() mServiceEndpoint returned %d, %s", __func__, result, getTypeText());
+ disconnect(); // TODO should we return or pause Base first?
+ }
+
sendServiceEvent(AAUDIO_SERVICE_EVENT_PAUSED);
setState(AAUDIO_STREAM_STATE_PAUSED);
return result;
@@ -226,11 +243,10 @@
return result;
}
- if (mServiceEndpoint == nullptr) {
- ALOGE("AAudioServiceStreamShared::stop() missing endpoint");
- return AAUDIO_ERROR_INVALID_STATE;
- }
+ setState(AAUDIO_STREAM_STATE_STOPPING);
+ // Send it now because the timestamp gets rounded up when stopStream() is called below.
+ // Also we don't need the timestamps while we are shutting down.
sendCurrentTimestamp(); // warning - this calls a virtual function
result = stopTimestampThread();
if (result != AAUDIO_OK) {
@@ -238,10 +254,15 @@
return result;
}
+ sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+ if (endpoint == nullptr) {
+ ALOGE("%s() has no endpoint", __func__);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
// TODO wait for data to be played out
- result = mServiceEndpoint->stopStream(this, mClientHandle);
+ result = endpoint->stopStream(this, mClientHandle);
if (result != AAUDIO_OK) {
- ALOGE("AAudioServiceStreamShared::stop() mServiceEndpoint returned %d", result);
+ ALOGE("%s() stopStream returned %d, %s", __func__, result, getTypeText());
disconnect();
// TODO what to do with result here?
}
@@ -261,11 +282,11 @@
}
aaudio_result_t AAudioServiceStreamBase::flush() {
- if (getState() != AAUDIO_STREAM_STATE_PAUSED) {
- ALOGE("AAudioServiceStreamBase::flush() stream not paused, state = %s",
- AAudio_convertStreamStateToText(mState));
- return AAUDIO_ERROR_INVALID_STATE;
+ aaudio_result_t result = AAudio_isFlushAllowed(getState());
+ if (result != AAUDIO_OK) {
+ return result;
}
+
// Data will get flushed when the client receives the FLUSHED event.
sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
setState(AAUDIO_STREAM_STATE_FLUSHED);
@@ -274,7 +295,7 @@
// implement Runnable, periodically send timestamps to client
void AAudioServiceStreamBase::run() {
- ALOGD("AAudioServiceStreamBase::run() entering ----------------");
+ ALOGD("%s() %s entering >>>>>>>>>>>>>> TIMESTAMPS", __func__, getTypeText());
TimestampScheduler timestampScheduler;
timestampScheduler.setBurstPeriod(mFramesPerBurst, getSampleRate());
timestampScheduler.start(AudioClock::getNanoseconds());
@@ -292,7 +313,7 @@
AudioClock::sleepUntilNanoTime(nextTime);
}
}
- ALOGD("AAudioServiceStreamBase::run() exiting ----------------");
+ ALOGD("%s() %s exiting <<<<<<<<<<<<<< TIMESTAMPS", __func__, getTypeText());
}
void AAudioServiceStreamBase::disconnect() {
@@ -303,12 +324,19 @@
}
aaudio_result_t AAudioServiceStreamBase::sendServiceEvent(aaudio_service_event_t event,
- double dataDouble,
- int64_t dataLong) {
+ double dataDouble) {
AAudioServiceMessage command;
command.what = AAudioServiceMessage::code::EVENT;
command.event.event = event;
command.event.dataDouble = dataDouble;
+ return writeUpMessageQueue(&command);
+}
+
+aaudio_result_t AAudioServiceStreamBase::sendServiceEvent(aaudio_service_event_t event,
+ int64_t dataLong) {
+ AAudioServiceMessage command;
+ command.what = AAudioServiceMessage::code::EVENT;
+ command.event.event = event;
command.event.dataLong = dataLong;
return writeUpMessageQueue(&command);
}
@@ -316,25 +344,29 @@
aaudio_result_t AAudioServiceStreamBase::writeUpMessageQueue(AAudioServiceMessage *command) {
std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
if (mUpMessageQueue == nullptr) {
- ALOGE("writeUpMessageQueue(): mUpMessageQueue null! - stream not open");
+ ALOGE("%s(): mUpMessageQueue null! - stream not open", __func__);
return AAUDIO_ERROR_NULL;
}
int32_t count = mUpMessageQueue->getFifoBuffer()->write(command, 1);
if (count != 1) {
- ALOGE("writeUpMessageQueue(): Queue full. Did client die?");
+ ALOGE("%s(): Queue full. Did client die? %s", __func__, getTypeText());
return AAUDIO_ERROR_WOULD_BLOCK;
} else {
return AAUDIO_OK;
}
}
+aaudio_result_t AAudioServiceStreamBase::sendXRunCount(int32_t xRunCount) {
+ return sendServiceEvent(AAUDIO_SERVICE_EVENT_XRUN, (int64_t) xRunCount);
+}
+
aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp() {
AAudioServiceMessage command;
// Send a timestamp for the clock model.
aaudio_result_t result = getFreeRunningPosition(&command.timestamp.position,
&command.timestamp.timestamp);
if (result == AAUDIO_OK) {
- ALOGV("sendCurrentTimestamp() SERVICE %8lld at %lld",
+ ALOGV("%s() SERVICE %8lld at %lld", __func__,
(long long) command.timestamp.position,
(long long) command.timestamp.timestamp);
command.what = AAudioServiceMessage::code::TIMESTAMP_SERVICE;
@@ -345,7 +377,7 @@
result = getHardwareTimestamp(&command.timestamp.position,
&command.timestamp.timestamp);
if (result == AAUDIO_OK) {
- ALOGV("sendCurrentTimestamp() HARDWARE %8lld at %lld",
+ ALOGV("%s() HARDWARE %8lld at %lld", __func__,
(long long) command.timestamp.position,
(long long) command.timestamp.timestamp);
command.what = AAudioServiceMessage::code::TIMESTAMP_HARDWARE;
@@ -368,7 +400,7 @@
{
std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
if (mUpMessageQueue == nullptr) {
- ALOGE("getDescription(): mUpMessageQueue null! - stream not open");
+ ALOGE("%s(): mUpMessageQueue null! - stream not open", __func__);
return AAUDIO_ERROR_NULL;
}
// Gather information on the message queue.
@@ -381,3 +413,14 @@
void AAudioServiceStreamBase::onVolumeChanged(float volume) {
sendServiceEvent(AAUDIO_SERVICE_EVENT_VOLUME, volume);
}
+
+int32_t AAudioServiceStreamBase::incrementServiceReferenceCount_l() {
+ return ++mCallingCount;
+}
+
+int32_t AAudioServiceStreamBase::decrementServiceReferenceCount_l() {
+ int32_t count = --mCallingCount;
+ // Each call to increment should be balanced with one call to decrement.
+ assert(count >= 0);
+ return count;
+}
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index af435b4..a1815d0 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -55,7 +55,7 @@
, public Runnable {
public:
- AAudioServiceStreamBase(android::AAudioService &aAudioService);
+ explicit AAudioServiceStreamBase(android::AAudioService &aAudioService);
virtual ~AAudioServiceStreamBase();
@@ -129,11 +129,15 @@
// -------------------------------------------------------------------
/**
- * Send a message to the client.
+ * Send a message to the client with an int64_t data value.
*/
aaudio_result_t sendServiceEvent(aaudio_service_event_t event,
- double dataDouble = 0.0,
int64_t dataLong = 0);
+ /**
+ * Send a message to the client with an double data value.
+ */
+ aaudio_result_t sendServiceEvent(aaudio_service_event_t event,
+ double dataDouble);
/**
* Fill in a parcelable description of stream.
@@ -176,12 +180,62 @@
mHandle = handle;
}
+ audio_port_handle_t getPortHandle() const {
+ return mClientHandle;
+ }
+
aaudio_stream_state_t getState() const {
return mState;
}
void onVolumeChanged(float volume);
+ /**
+ * Set false when the stream is started.
+ * Set true when data is first read from the stream.
+ * @param b
+ */
+ void setFlowing(bool b) {
+ mFlowing = b;
+ }
+
+ bool isFlowing() const {
+ return mFlowing;
+ }
+
+ /**
+ * Atomically increment the number of active references to the stream by AAudioService.
+ *
+ * This is called under a global lock in AAudioStreamTracker.
+ *
+ * @return value after the increment
+ */
+ int32_t incrementServiceReferenceCount_l();
+
+ /**
+ * Atomically decrement the number of active references to the stream by AAudioService.
+ * This should only be called after incrementServiceReferenceCount_l().
+ *
+ * This is called under a global lock in AAudioStreamTracker.
+ *
+ * @return value after the decrement
+ */
+ int32_t decrementServiceReferenceCount_l();
+
+ bool isCloseNeeded() const {
+ return mCloseNeeded.load();
+ }
+
+ /**
+ * Mark this stream as needing to be closed.
+ * Once marked for closing, it cannot be unmarked.
+ */
+ void markCloseNeeded() {
+ mCloseNeeded.store(true);
+ }
+
+ virtual const char *getTypeText() const { return "Base"; }
+
protected:
/**
@@ -204,6 +258,8 @@
aaudio_result_t sendCurrentTimestamp();
+ aaudio_result_t sendXRunCount(int32_t xRunCount);
+
/**
* @param positionFrames
* @param timeNanos
@@ -228,15 +284,27 @@
int32_t mFramesPerBurst = 0;
android::AudioClient mMmapClient; // set in open, used in MMAP start()
+ // TODO rename mClientHandle to mPortHandle to be more consistent with AudioFlinger.
audio_port_handle_t mClientHandle = AUDIO_PORT_HANDLE_NONE;
SimpleDoubleBuffer<Timestamp> mAtomicTimestamp;
android::AAudioService &mAudioService;
+
+ // The mServiceEndpoint variable can be accessed by multiple threads.
+ // So we access it by locally promoting a weak pointer to a smart pointer,
+ // which is thread-safe.
android::sp<AAudioServiceEndpoint> mServiceEndpoint;
+ android::wp<AAudioServiceEndpoint> mServiceEndpointWeak;
private:
aaudio_handle_t mHandle = -1;
+ bool mFlowing = false;
+
+ // This is modified under a global lock in AAudioStreamTracker.
+ int32_t mCallingCount = 0;
+
+ std::atomic<bool> mCloseNeeded{false};
};
} /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index 44ba1ca..c845309 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -70,20 +70,23 @@
return result;
}
- result = mServiceEndpoint->registerStream(keep);
+ sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+ if (endpoint == nullptr) {
+ ALOGE("%s() has no endpoint", __func__);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+
+ result = endpoint->registerStream(keep);
if (result != AAUDIO_OK) {
- goto error;
+ return result;
}
setState(AAUDIO_STREAM_STATE_OPEN);
-error:
return AAUDIO_OK;
}
-/**
- * Start the flow of data.
- */
+// Start the flow of data.
aaudio_result_t AAudioServiceStreamMMAP::startDevice() {
aaudio_result_t result = AAudioServiceStreamBase::startDevice();
if (!mInService && result == AAUDIO_OK) {
@@ -93,9 +96,7 @@
return result;
}
-/**
- * Stop the flow of data such that start() can resume with loss of data.
- */
+// Stop the flow of data such that start() can resume with loss of data.
aaudio_result_t AAudioServiceStreamMMAP::pause() {
if (!isRunning()) {
return AAUDIO_OK;
@@ -122,21 +123,37 @@
aaudio_result_t AAudioServiceStreamMMAP::startClient(const android::AudioClient& client,
audio_port_handle_t *clientHandle) {
+ sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+ if (endpoint == nullptr) {
+ ALOGE("%s() has no endpoint", __func__);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
// Start the client on behalf of the application. Generate a new porthandle.
- aaudio_result_t result = mServiceEndpoint->startClient(client, clientHandle);
+ aaudio_result_t result = endpoint->startClient(client, clientHandle);
return result;
}
aaudio_result_t AAudioServiceStreamMMAP::stopClient(audio_port_handle_t clientHandle) {
- aaudio_result_t result = mServiceEndpoint->stopClient(clientHandle);
+ sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+ if (endpoint == nullptr) {
+ ALOGE("%s() has no endpoint", __func__);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ aaudio_result_t result = endpoint->stopClient(clientHandle);
return result;
}
// Get free-running DSP or DMA hardware position from the HAL.
aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition(int64_t *positionFrames,
int64_t *timeNanos) {
- sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP{
- static_cast<AAudioServiceEndpointMMAP *>(mServiceEndpoint.get())};
+ sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+ if (endpoint == nullptr) {
+ ALOGE("%s() has no endpoint", __func__);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
+ static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
+
aaudio_result_t result = serviceEndpointMMAP->getFreeRunningPosition(positionFrames, timeNanos);
if (result == AAUDIO_OK) {
Timestamp timestamp(*positionFrames, *timeNanos);
@@ -152,8 +169,15 @@
// Get timestamp that was written by getFreeRunningPosition()
aaudio_result_t AAudioServiceStreamMMAP::getHardwareTimestamp(int64_t *positionFrames,
int64_t *timeNanos) {
- sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP{
- static_cast<AAudioServiceEndpointMMAP *>(mServiceEndpoint.get())};
+
+ sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+ if (endpoint == nullptr) {
+ ALOGE("%s() has no endpoint", __func__);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
+ static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
+
// TODO Get presentation timestamp from the HAL
if (mAtomicTimestamp.isValid()) {
Timestamp timestamp = mAtomicTimestamp.read();
@@ -165,13 +189,16 @@
}
}
-/**
- * Get an immutable description of the data queue from the HAL.
- */
+// Get an immutable description of the data queue from the HAL.
aaudio_result_t AAudioServiceStreamMMAP::getAudioDataDescription(
AudioEndpointParcelable &parcelable)
{
- sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP{
- static_cast<AAudioServiceEndpointMMAP *>(mServiceEndpoint.get())};
+ sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+ if (endpoint == nullptr) {
+ ALOGE("%s() has no endpoint", __func__);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
+ static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
return serviceEndpointMMAP->getDownDataDescription(parcelable);
}
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.h b/services/oboeservice/AAudioServiceStreamMMAP.h
index e2415d0..1509f7d 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.h
+++ b/services/oboeservice/AAudioServiceStreamMMAP.h
@@ -69,9 +69,7 @@
aaudio_result_t close() override;
- /**
- * Send a MMAP/NOIRQ buffer timestamp to the client.
- */
+ const char *getTypeText() const override { return "MMAP"; }
protected:
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index 084f996..05c5735 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -74,7 +74,7 @@
int32_t framesPerBurst) {
if (requestedCapacityFrames > MAX_FRAMES_PER_BUFFER) {
- ALOGE("AAudioServiceStreamShared::calculateBufferCapacity() requested capacity %d > max %d",
+ ALOGE("calculateBufferCapacity() requested capacity %d > max %d",
requestedCapacityFrames, MAX_FRAMES_PER_BUFFER);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
@@ -99,7 +99,7 @@
}
// Check for numeric overflow.
if (numBursts > 0x8000 || framesPerBurst > 0x8000) {
- ALOGE("AAudioServiceStreamShared::calculateBufferCapacity() overflow, capacity = %d * %d",
+ ALOGE("calculateBufferCapacity() overflow, capacity = %d * %d",
numBursts, framesPerBurst);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
@@ -107,11 +107,11 @@
// Final sanity check.
if (capacityInFrames > MAX_FRAMES_PER_BUFFER) {
- ALOGE("AAudioServiceStreamShared::calculateBufferCapacity() calc capacity %d > max %d",
+ ALOGE("calculateBufferCapacity() calc capacity %d > max %d",
capacityInFrames, MAX_FRAMES_PER_BUFFER);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
- ALOGD("AAudioServiceStreamShared::calculateBufferCapacity() requested %d frames, actual = %d",
+ ALOGD("calculateBufferCapacity() requested %d frames, actual = %d",
requestedCapacityFrames, capacityInFrames);
return capacityInFrames;
}
@@ -122,39 +122,44 @@
aaudio_result_t result = AAudioServiceStreamBase::open(request, AAUDIO_SHARING_MODE_SHARED);
if (result != AAUDIO_OK) {
- ALOGE("AAudioServiceStreamBase open() returned %d", result);
+ ALOGE("%s() returned %d", __func__, result);
return result;
}
const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
+ sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+ if (endpoint == nullptr) {
+ result = AAUDIO_ERROR_INVALID_STATE;
+ goto error;
+ }
// Is the request compatible with the shared endpoint?
setFormat(configurationInput.getFormat());
if (getFormat() == AAUDIO_FORMAT_UNSPECIFIED) {
setFormat(AAUDIO_FORMAT_PCM_FLOAT);
} else if (getFormat() != AAUDIO_FORMAT_PCM_FLOAT) {
- ALOGE("AAudioServiceStreamShared::open() mAudioFormat = %d, need FLOAT", getFormat());
+ ALOGD("%s() mAudioFormat = %d, need FLOAT", __func__, getFormat());
result = AAUDIO_ERROR_INVALID_FORMAT;
goto error;
}
setSampleRate(configurationInput.getSampleRate());
if (getSampleRate() == AAUDIO_UNSPECIFIED) {
- setSampleRate(mServiceEndpoint->getSampleRate());
- } else if (getSampleRate() != mServiceEndpoint->getSampleRate()) {
- ALOGE("AAudioServiceStreamShared::open() mSampleRate = %d, need %d",
- getSampleRate(), mServiceEndpoint->getSampleRate());
+ setSampleRate(endpoint->getSampleRate());
+ } else if (getSampleRate() != endpoint->getSampleRate()) {
+ ALOGD("%s() mSampleRate = %d, need %d",
+ __func__, getSampleRate(), endpoint->getSampleRate());
result = AAUDIO_ERROR_INVALID_RATE;
goto error;
}
setSamplesPerFrame(configurationInput.getSamplesPerFrame());
if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
- setSamplesPerFrame(mServiceEndpoint->getSamplesPerFrame());
- } else if (getSamplesPerFrame() != mServiceEndpoint->getSamplesPerFrame()) {
- ALOGE("AAudioServiceStreamShared::open() mSamplesPerFrame = %d, need %d",
- getSamplesPerFrame(), mServiceEndpoint->getSamplesPerFrame());
+ setSamplesPerFrame(endpoint->getSamplesPerFrame());
+ } else if (getSamplesPerFrame() != endpoint->getSamplesPerFrame()) {
+ ALOGD("%s() mSamplesPerFrame = %d, need %d",
+ __func__, getSamplesPerFrame(), endpoint->getSamplesPerFrame());
result = AAUDIO_ERROR_OUT_OF_RANGE;
goto error;
}
@@ -173,17 +178,17 @@
mAudioDataQueue = new SharedRingBuffer();
result = mAudioDataQueue->allocate(calculateBytesPerFrame(), getBufferCapacity());
if (result != AAUDIO_OK) {
- ALOGE("AAudioServiceStreamShared::open() could not allocate FIFO with %d frames",
- getBufferCapacity());
+ ALOGE("%s() could not allocate FIFO with %d frames",
+ __func__, getBufferCapacity());
result = AAUDIO_ERROR_NO_MEMORY;
goto error;
}
}
ALOGD("AAudioServiceStreamShared::open() actual rate = %d, channels = %d, deviceId = %d",
- getSampleRate(), getSamplesPerFrame(), mServiceEndpoint->getDeviceId());
+ getSampleRate(), getSamplesPerFrame(), endpoint->getDeviceId());
- result = mServiceEndpoint->registerStream(keep);
+ result = endpoint->registerStream(keep);
if (result != AAUDIO_OK) {
goto error;
}
@@ -217,7 +222,7 @@
{
std::lock_guard<std::mutex> lock(mAudioDataQueueLock);
if (mAudioDataQueue == nullptr) {
- ALOGE("getAudioDataDescription(): mUpMessageQueue null! - stream not open");
+ ALOGE("%s(): mUpMessageQueue null! - stream not open", __func__);
return AAUDIO_ERROR_NULL;
}
// Gather information on the data queue.
@@ -250,13 +255,19 @@
int64_t *timeNanos) {
int64_t position = 0;
- aaudio_result_t result = mServiceEndpoint->getTimestamp(&position, timeNanos);
+ sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+ if (endpoint == nullptr) {
+ ALOGE("%s() has no endpoint", __func__);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+
+ aaudio_result_t result = endpoint->getTimestamp(&position, timeNanos);
if (result == AAUDIO_OK) {
int64_t offset = mTimestampPositionOffset.load();
// TODO, do not go below starting value
position -= offset; // Offset from shared MMAP stream
- ALOGV("getHardwareTimestamp() %8lld = %8lld - %8lld",
- (long long) position, (long long) (position + offset), (long long) offset);
+ ALOGV("%s() %8lld = %8lld - %8lld",
+ __func__, (long long) position, (long long) (position + offset), (long long) offset);
}
*positionFrames = position;
return result;
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
index 8499ea5..61769b5 100644
--- a/services/oboeservice/AAudioServiceStreamShared.h
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -1,4 +1,4 @@
-/*
+ /*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -43,7 +43,7 @@
class AAudioServiceStreamShared : public AAudioServiceStreamBase {
public:
- AAudioServiceStreamShared(android::AAudioService &aAudioService);
+ explicit AAudioServiceStreamShared(android::AAudioService &aAudioService);
virtual ~AAudioServiceStreamShared() = default;
static std::string dumpHeader();
@@ -80,13 +80,15 @@
}
void incrementXRunCount() {
- mXRunCount++;
+ sendXRunCount(++mXRunCount);
}
int32_t getXRunCount() const {
return mXRunCount.load();
}
+ const char *getTypeText() const override { return "Shared"; }
+
protected:
aaudio_result_t getAudioDataDescription(AudioEndpointParcelable &parcelable) override;
diff --git a/services/oboeservice/AAudioStreamTracker.cpp b/services/oboeservice/AAudioStreamTracker.cpp
index ef88b34..3328159 100644
--- a/services/oboeservice/AAudioStreamTracker.cpp
+++ b/services/oboeservice/AAudioStreamTracker.cpp
@@ -30,25 +30,52 @@
using namespace android;
using namespace aaudio;
-sp<AAudioServiceStreamBase> AAudioStreamTracker::removeStreamByHandle(
+sp<AAudioServiceStreamBase> AAudioStreamTracker::decrementAndRemoveStreamByHandle(
aaudio_handle_t streamHandle) {
std::lock_guard<std::mutex> lock(mHandleLock);
sp<AAudioServiceStreamBase> serviceStream;
auto it = mStreamsByHandle.find(streamHandle);
if (it != mStreamsByHandle.end()) {
- serviceStream = it->second;
- mStreamsByHandle.erase(it);
+ sp<AAudioServiceStreamBase> tempStream = it->second;
+ // Does the caller need to close the stream?
+ // The reference count should never be negative.
+ // But it is safer to check for <= 0 than == 0.
+ if ((tempStream->decrementServiceReferenceCount_l() <= 0) && tempStream->isCloseNeeded()) {
+ serviceStream = tempStream; // Only return stream if ready to be closed.
+ mStreamsByHandle.erase(it);
+ }
}
return serviceStream;
}
-sp<AAudioServiceStreamBase> AAudioStreamTracker::getStreamByHandle(
+sp<AAudioServiceStreamBase> AAudioStreamTracker::getStreamByHandleAndIncrement(
aaudio_handle_t streamHandle) {
std::lock_guard<std::mutex> lock(mHandleLock);
sp<AAudioServiceStreamBase> serviceStream;
auto it = mStreamsByHandle.find(streamHandle);
if (it != mStreamsByHandle.end()) {
serviceStream = it->second;
+ serviceStream->incrementServiceReferenceCount_l();
+ }
+ return serviceStream;
+}
+
+// The port handle is only available when the stream is started.
+// So we have to iterate over all the streams.
+// Luckily this rarely happens.
+sp<AAudioServiceStreamBase> AAudioStreamTracker::findStreamByPortHandleAndIncrement(
+ audio_port_handle_t portHandle) {
+ std::lock_guard<std::mutex> lock(mHandleLock);
+ sp<AAudioServiceStreamBase> serviceStream;
+ auto it = mStreamsByHandle.begin();
+ while (it != mStreamsByHandle.end()) {
+ auto candidate = it->second;
+ if (candidate->getPortHandle() == portHandle) {
+ serviceStream = candidate;
+ serviceStream->incrementServiceReferenceCount_l();
+ break;
+ }
+ it++;
}
return serviceStream;
}
@@ -66,7 +93,7 @@
aaudio_handle_t AAudioStreamTracker::addStreamForHandle(sp<AAudioServiceStreamBase> serviceStream) {
std::lock_guard<std::mutex> lock(mHandleLock);
- aaudio_handle_t handle = mPreviousHandle.load();
+ aaudio_handle_t handle = mPreviousHandle;
// Assign a unique handle.
while (true) {
handle = bumpHandle(handle);
@@ -78,7 +105,7 @@
break;
}
}
- mPreviousHandle.store(handle);
+ mPreviousHandle = handle;
return handle;
}
diff --git a/services/oboeservice/AAudioStreamTracker.h b/services/oboeservice/AAudioStreamTracker.h
index 70d440d..57ec426 100644
--- a/services/oboeservice/AAudioStreamTracker.h
+++ b/services/oboeservice/AAudioStreamTracker.h
@@ -32,18 +32,36 @@
public:
/**
- * Remove the stream associated with the handle.
+ * Find the stream associated with the handle.
+ * Decrement its reference counter. If zero and the stream needs
+ * to be closed then remove the stream and return a pointer to the stream.
+ * Otherwise return null if it does not need to be closed.
+ *
* @param streamHandle
- * @return strong pointer to the stream if found or to nullptr
+ * @return strong pointer to the stream if it needs to be closed, or nullptr
*/
- android::sp<AAudioServiceStreamBase> removeStreamByHandle(aaudio_handle_t streamHandle);
+ android::sp<AAudioServiceStreamBase> decrementAndRemoveStreamByHandle(
+ aaudio_handle_t streamHandle);
/**
* Look up a stream based on the handle.
+ * Increment its service reference count if found.
+ *
* @param streamHandle
- * @return strong pointer to the stream if found or to nullptr
+ * @return strong pointer to the stream if found, or nullptr
*/
- android::sp<aaudio::AAudioServiceStreamBase> getStreamByHandle(aaudio_handle_t streamHandle);
+ android::sp<aaudio::AAudioServiceStreamBase> getStreamByHandleAndIncrement(
+ aaudio_handle_t streamHandle);
+
+ /**
+ * Look up a stream based on the AudioPolicy portHandle.
+ * Increment its service reference count if found.
+ *
+ * @param portHandle
+ * @return strong pointer to the stream if found, or nullptr
+ */
+ android::sp<aaudio::AAudioServiceStreamBase> findStreamByPortHandleAndIncrement(
+ audio_port_handle_t portHandle);
/**
* Store a strong pointer to the stream and return a unique handle for future reference.
@@ -63,7 +81,9 @@
// Track stream using a unique handle that wraps. Only use positive half.
mutable std::mutex mHandleLock;
- std::atomic<aaudio_handle_t> mPreviousHandle{0};
+ // protected by mHandleLock
+ aaudio_handle_t mPreviousHandle = 0;
+ // protected by mHandleLock
std::map<aaudio_handle_t, android::sp<aaudio::AAudioServiceStreamBase>> mStreamsByHandle;
};
diff --git a/services/oboeservice/AAudioThread.cpp b/services/oboeservice/AAudioThread.cpp
index c6fb57d..ed7895b 100644
--- a/services/oboeservice/AAudioThread.cpp
+++ b/services/oboeservice/AAudioThread.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "AAudioService"
+#define LOG_TAG "AAudioThread"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -27,12 +27,26 @@
using namespace aaudio;
+std::atomic<uint32_t> AAudioThread::mNextThreadIndex{1};
-AAudioThread::AAudioThread()
- : mRunnable(nullptr)
- , mHasThread(false) {
+AAudioThread::AAudioThread(const char *prefix) {
+ setup(prefix);
+}
+
+AAudioThread::AAudioThread() {
+ setup("AAudio");
+}
+
+void AAudioThread::setup(const char *prefix) {
// mThread is a pthread_t of unknown size so we need memset().
memset(&mThread, 0, sizeof(mThread));
+
+ // Name the thread with an increasing index, "prefix_#", for debugging.
+ uint32_t index = mNextThreadIndex++;
+ // Wrap the index so that we do not hit the 16 char limit
+ // and to avoid hard-to-read large numbers.
+ index = index % 100000; // arbitrary
+ snprintf(mName, sizeof(mName), "%s_%u", prefix, index);
}
void AAudioThread::dispatch() {
@@ -53,7 +67,7 @@
aaudio_result_t AAudioThread::start(Runnable *runnable) {
if (mHasThread) {
- ALOGE("AAudioThread::start() - mHasThread already true");
+ ALOGE("start() - mHasThread already true");
return AAUDIO_ERROR_INVALID_STATE;
}
// mRunnable will be read by the new thread when it starts.
@@ -61,9 +75,11 @@
mRunnable = runnable;
int err = pthread_create(&mThread, nullptr, AAudioThread_internalThreadProc, this);
if (err != 0) {
- ALOGE("AAudioThread::start() - pthread_create() returned %d %s", err, strerror(err));
+ ALOGE("start() - pthread_create() returned %d %s", err, strerror(err));
return AAudioConvert_androidToAAudioResult(-err);
} else {
+ int err = pthread_setname_np(mThread, mName);
+ ALOGW_IF((err != 0), "Could not set name of AAudioThread. err = %d", err);
mHasThread = true;
return AAUDIO_OK;
}
@@ -71,13 +87,13 @@
aaudio_result_t AAudioThread::stop() {
if (!mHasThread) {
- ALOGE("AAudioThread::stop() but no thread running");
+ ALOGE("stop() but no thread running");
return AAUDIO_ERROR_INVALID_STATE;
}
int err = pthread_join(mThread, nullptr);
mHasThread = false;
if (err != 0) {
- ALOGE("AAudioThread::stop() - pthread_join() returned %d %s", err, strerror(err));
+ ALOGE("stop() - pthread_join() returned %d %s", err, strerror(err));
return AAudioConvert_androidToAAudioResult(-err);
} else {
return AAUDIO_OK;
diff --git a/services/oboeservice/AAudioThread.h b/services/oboeservice/AAudioThread.h
index 02f1459..dcce68a 100644
--- a/services/oboeservice/AAudioThread.h
+++ b/services/oboeservice/AAudioThread.h
@@ -43,7 +43,9 @@
{
public:
AAudioThread();
- AAudioThread(Runnable *runnable);
+
+ explicit AAudioThread(const char *prefix);
+
virtual ~AAudioThread() = default;
/**
@@ -66,10 +68,15 @@
void dispatch(); // called internally from 'C' thread wrapper
private:
- Runnable *mRunnable;
- bool mHasThread;
+
+ void setup(const char *prefix);
+
+ Runnable *mRunnable = nullptr;
+ bool mHasThread = false;
pthread_t mThread; // initialized in constructor
+ static std::atomic<uint32_t> mNextThreadIndex;
+ char mName[16]; // max length for a pthread_name
};
} /* namespace aaudio */
diff --git a/services/oboeservice/SharedMemoryProxy.cpp b/services/oboeservice/SharedMemoryProxy.cpp
index fb991bb..c43ed22 100644
--- a/services/oboeservice/SharedMemoryProxy.cpp
+++ b/services/oboeservice/SharedMemoryProxy.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "AAudioService"
+#define LOG_TAG "SharedMemoryProxy"
//#define LOG_NDEBUG 0
#include <log/log.h>
@@ -45,12 +45,12 @@
mProxyFileDescriptor = ashmem_create_region("AAudioProxyDataBuffer", mSharedMemorySizeInBytes);
if (mProxyFileDescriptor < 0) {
- ALOGE("SharedMemoryProxy::open() ashmem_create_region() failed %d", errno);
+ ALOGE("open() ashmem_create_region() failed %d", errno);
return AAUDIO_ERROR_INTERNAL;
}
int err = ashmem_set_prot_region(mProxyFileDescriptor, PROT_READ|PROT_WRITE);
if (err < 0) {
- ALOGE("SharedMemoryProxy::open() ashmem_set_prot_region() failed %d", errno);
+ ALOGE("open() ashmem_set_prot_region() failed %d", errno);
close(mProxyFileDescriptor);
mProxyFileDescriptor = -1;
return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
@@ -62,7 +62,7 @@
MAP_SHARED,
mOriginalFileDescriptor, 0);
if (mOriginalSharedMemory == MAP_FAILED) {
- ALOGE("SharedMemoryProxy::open() original mmap(%d) failed %d (%s)",
+ ALOGE("open() original mmap(%d) failed %d (%s)",
mOriginalFileDescriptor, errno, strerror(errno));
return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
}
@@ -73,7 +73,7 @@
MAP_SHARED,
mProxyFileDescriptor, 0);
if (mProxySharedMemory != mOriginalSharedMemory) {
- ALOGE("SharedMemoryProxy::open() proxy mmap(%d) failed %d", mProxyFileDescriptor, errno);
+ ALOGE("open() proxy mmap(%d) failed %d", mProxyFileDescriptor, errno);
munmap(mOriginalSharedMemory, mSharedMemorySizeInBytes);
mOriginalSharedMemory = nullptr;
close(mProxyFileDescriptor);
diff --git a/services/oboeservice/SharedRingBuffer.cpp b/services/oboeservice/SharedRingBuffer.cpp
index 83b25b3..2454446 100644
--- a/services/oboeservice/SharedRingBuffer.cpp
+++ b/services/oboeservice/SharedRingBuffer.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "AAudioService"
+#define LOG_TAG "SharedRingBuffer"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
@@ -46,14 +46,14 @@
mSharedMemorySizeInBytes = mDataMemorySizeInBytes + (2 * (sizeof(fifo_counter_t)));
mFileDescriptor.reset(ashmem_create_region("AAudioSharedRingBuffer", mSharedMemorySizeInBytes));
if (mFileDescriptor.get() == -1) {
- ALOGE("SharedRingBuffer::allocate() ashmem_create_region() failed %d", errno);
+ ALOGE("allocate() ashmem_create_region() failed %d", errno);
return AAUDIO_ERROR_INTERNAL;
}
- ALOGV("SharedRingBuffer::allocate() mFileDescriptor = %d\n", mFileDescriptor.get());
+ ALOGV("allocate() mFileDescriptor = %d\n", mFileDescriptor.get());
int err = ashmem_set_prot_region(mFileDescriptor.get(), PROT_READ|PROT_WRITE); // TODO error handling?
if (err < 0) {
- ALOGE("SharedRingBuffer::allocate() ashmem_set_prot_region() failed %d", errno);
+ ALOGE("allocate() ashmem_set_prot_region() failed %d", errno);
mFileDescriptor.reset();
return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
}
@@ -64,7 +64,7 @@
MAP_SHARED,
mFileDescriptor.get(), 0);
if (mSharedMemory == MAP_FAILED) {
- ALOGE("SharedRingBuffer::allocate() mmap() failed %d", errno);
+ ALOGE("allocate() mmap() failed %d", errno);
mFileDescriptor.reset();
return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
}
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index 10ee141..ad3666e 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -49,11 +49,16 @@
LOCAL_SHARED_LIBRARIES += \
libhwbinder \
libhidlbase \
+ libhidlmemory \
libhidltransport \
libbase \
libaudiohal \
+ libaudiohal_deathhandler \
android.hardware.soundtrigger@2.0 \
- android.hardware.audio.common@2.0
+ android.hardware.soundtrigger@2.1 \
+ android.hardware.audio.common@2.0 \
+ android.hidl.allocator@1.0 \
+ android.hidl.memory@1.0
endif
diff --git a/services/soundtrigger/SoundTriggerHalHidl.cpp b/services/soundtrigger/SoundTriggerHalHidl.cpp
index 0cd5cf7..adf252e 100644
--- a/services/soundtrigger/SoundTriggerHalHidl.cpp
+++ b/services/soundtrigger/SoundTriggerHalHidl.cpp
@@ -17,17 +17,87 @@
#define LOG_TAG "SoundTriggerHalHidl"
//#define LOG_NDEBUG 0
+#include <android/hidl/allocator/1.0/IAllocator.h>
#include <media/audiohal/hidl/HalDeathHandler.h>
#include <utils/Log.h>
#include "SoundTriggerHalHidl.h"
+#include <hidlmemory/mapping.h>
#include <hwbinder/IPCThreadState.h>
#include <hwbinder/ProcessState.h>
namespace android {
-using android::hardware::Return;
-using android::hardware::ProcessState;
-using android::hardware::audio::common::V2_0::AudioDevice;
+using ::android::hardware::ProcessState;
+using ::android::hardware::Return;
+using ::android::hardware::Status;
+using ::android::hardware::Void;
+using ::android::hardware::audio::common::V2_0::AudioDevice;
+using ::android::hardware::hidl_memory;
+using ::android::hidl::allocator::V1_0::IAllocator;
+using ::android::hidl::memory::V1_0::IMemory;
+
+namespace {
+
+// Backs up by the vector with the contents of shared memory.
+// It is assumed that the passed hidl_vector is empty, so it's
+// not cleared if the memory is a null object.
+// The caller needs to keep the returned sp<IMemory> as long as
+// the data is needed.
+std::pair<bool, sp<IMemory>> memoryAsVector(const hidl_memory& m, hidl_vec<uint8_t>* vec) {
+ sp<IMemory> memory;
+ if (m.size() == 0) {
+ return std::make_pair(true, memory);
+ }
+ memory = mapMemory(m);
+ if (memory != nullptr) {
+ memory->read();
+ vec->setToExternal(static_cast<uint8_t*>(static_cast<void*>(memory->getPointer())),
+ memory->getSize());
+ return std::make_pair(true, memory);
+ }
+ ALOGE("%s: Could not map HIDL memory to IMemory", __func__);
+ return std::make_pair(false, memory);
+}
+
+// Moves the data from the vector into allocated shared memory,
+// emptying the vector.
+// It is assumed that the passed hidl_memory is a null object, so it's
+// not reset if the vector is empty.
+// The caller needs to keep the returned sp<IMemory> as long as
+// the data is needed.
+std::pair<bool, sp<IMemory>> moveVectorToMemory(hidl_vec<uint8_t>* v, hidl_memory* mem) {
+ sp<IMemory> memory;
+ if (v->size() == 0) {
+ return std::make_pair(true, memory);
+ }
+ sp<IAllocator> ashmem = IAllocator::getService("ashmem");
+ if (ashmem == 0) {
+ ALOGE("Failed to retrieve ashmem allocator service");
+ return std::make_pair(false, memory);
+ }
+ bool success = false;
+ Return<void> r = ashmem->allocate(v->size(), [&](bool s, const hidl_memory& m) {
+ success = s;
+ if (success) *mem = m;
+ });
+ if (r.isOk() && success) {
+ memory = hardware::mapMemory(*mem);
+ if (memory != 0) {
+ memory->update();
+ memcpy(memory->getPointer(), v->data(), v->size());
+ memory->commit();
+ v->resize(0);
+ return std::make_pair(true, memory);
+ } else {
+ ALOGE("Failed to map allocated ashmem");
+ }
+ } else {
+ ALOGE("Failed to allocate %llu bytes from ashmem", (unsigned long long)v->size());
+ }
+ return std::make_pair(false, memory);
+}
+
+} // namespace
/* static */
sp<SoundTriggerHalInterface> SoundTriggerHalInterface::connectModule(const char *moduleName)
@@ -94,36 +164,62 @@
"loadSoundModel(): wrap around in sound model IDs, num loaded models %zd",
mSoundModels.size());
- ISoundTriggerHw::SoundModel *halSoundModel =
- convertSoundModelToHal(sound_model);
- if (halSoundModel == NULL) {
- return -EINVAL;
- }
-
Return<void> hidlReturn;
int ret;
SoundModelHandle halHandle;
- {
- AutoMutex lock(mHalLock);
- if (sound_model->type == SOUND_MODEL_TYPE_KEYPHRASE) {
+ sp<V2_1_ISoundTriggerHw> soundtrigger_2_1 = toService2_1(soundtrigger);
+ if (sound_model->type == SOUND_MODEL_TYPE_KEYPHRASE) {
+ if (!soundtrigger_2_1) {
+ ISoundTriggerHw::PhraseSoundModel halSoundModel;
+ convertPhraseSoundModelToHal(&halSoundModel, sound_model);
+ AutoMutex lock(mHalLock);
hidlReturn = soundtrigger->loadPhraseSoundModel(
- *(const ISoundTriggerHw::PhraseSoundModel *)halSoundModel,
+ halSoundModel,
this, modelId, [&](int32_t retval, auto res) {
- ret = retval;
- halHandle = res;
- });
-
+ ret = retval;
+ halHandle = res;
+ });
} else {
- hidlReturn = soundtrigger->loadSoundModel(*halSoundModel,
+ V2_1_ISoundTriggerHw::PhraseSoundModel halSoundModel;
+ auto result = convertPhraseSoundModelToHal(&halSoundModel, sound_model);
+ if (result.first) {
+ AutoMutex lock(mHalLock);
+ hidlReturn = soundtrigger_2_1->loadPhraseSoundModel_2_1(
+ halSoundModel,
+ this, modelId, [&](int32_t retval, auto res) {
+ ret = retval;
+ halHandle = res;
+ });
+ } else {
+ return NO_MEMORY;
+ }
+ }
+ } else {
+ if (!soundtrigger_2_1) {
+ ISoundTriggerHw::SoundModel halSoundModel;
+ convertSoundModelToHal(&halSoundModel, sound_model);
+ AutoMutex lock(mHalLock);
+ hidlReturn = soundtrigger->loadSoundModel(halSoundModel,
this, modelId, [&](int32_t retval, auto res) {
- ret = retval;
- halHandle = res;
- });
+ ret = retval;
+ halHandle = res;
+ });
+ } else {
+ V2_1_ISoundTriggerHw::SoundModel halSoundModel;
+ auto result = convertSoundModelToHal(&halSoundModel, sound_model);
+ if (result.first) {
+ AutoMutex lock(mHalLock);
+ hidlReturn = soundtrigger_2_1->loadSoundModel_2_1(halSoundModel,
+ this, modelId, [&](int32_t retval, auto res) {
+ ret = retval;
+ halHandle = res;
+ });
+ } else {
+ return NO_MEMORY;
+ }
}
}
- delete halSoundModel;
-
if (hidlReturn.isOk()) {
if (ret == 0) {
AutoMutex lock(mLock);
@@ -185,16 +281,27 @@
model->mRecognitionCallback = callback;
model->mRecognitionCookie = cookie;
- ISoundTriggerHw::RecognitionConfig *halConfig =
- convertRecognitionConfigToHal(config);
-
+ sp<V2_1_ISoundTriggerHw> soundtrigger_2_1 = toService2_1(soundtrigger);
Return<int32_t> hidlReturn(0);
- {
- AutoMutex lock(mHalLock);
- hidlReturn = soundtrigger->startRecognition(model->mHalHandle, *halConfig, this, handle);
- }
- delete halConfig;
+ if (!soundtrigger_2_1) {
+ ISoundTriggerHw::RecognitionConfig halConfig;
+ convertRecognitionConfigToHal(&halConfig, config);
+ {
+ AutoMutex lock(mHalLock);
+ hidlReturn = soundtrigger->startRecognition(model->mHalHandle, halConfig, this, handle);
+ }
+ } else {
+ V2_1_ISoundTriggerHw::RecognitionConfig halConfig;
+ auto result = convertRecognitionConfigToHal(&halConfig, config);
+ if (result.first) {
+ AutoMutex lock(mHalLock);
+ hidlReturn = soundtrigger_2_1->startRecognition_2_1(
+ model->mHalHandle, halConfig, this, handle);
+ } else {
+ return NO_MEMORY;
+ }
+ }
if (!hidlReturn.isOk()) {
ALOGE("startRecognition error %s", hidlReturn.description().c_str());
@@ -275,6 +382,12 @@
return mISoundTrigger;
}
+sp<V2_1_ISoundTriggerHw> SoundTriggerHalHidl::toService2_1(const sp<ISoundTriggerHw>& s)
+{
+ auto castResult_2_1 = V2_1_ISoundTriggerHw::castFrom(s);
+ return castResult_2_1.isOk() ? static_cast<sp<V2_1_ISoundTriggerHw>>(castResult_2_1) : nullptr;
+}
+
sp<SoundTriggerHalHidl::SoundModel> SoundTriggerHalHidl::getModel(sound_model_handle_t handle)
{
AutoMutex lock(mLock);
@@ -347,40 +460,52 @@
halTriggerPhrase->text = triggerPhrase->text;
}
-ISoundTriggerHw::SoundModel *SoundTriggerHalHidl::convertSoundModelToHal(
+
+void SoundTriggerHalHidl::convertTriggerPhrasesToHal(
+ hidl_vec<ISoundTriggerHw::Phrase> *halTriggerPhrases,
+ struct sound_trigger_phrase_sound_model *keyPhraseModel)
+{
+ halTriggerPhrases->resize(keyPhraseModel->num_phrases);
+ for (unsigned int i = 0; i < keyPhraseModel->num_phrases; i++) {
+ convertTriggerPhraseToHal(&(*halTriggerPhrases)[i], &keyPhraseModel->phrases[i]);
+ }
+}
+
+void SoundTriggerHalHidl::convertSoundModelToHal(ISoundTriggerHw::SoundModel *halModel,
const struct sound_trigger_sound_model *soundModel)
{
- ISoundTriggerHw::SoundModel *halModel = NULL;
- if (soundModel->type == SOUND_MODEL_TYPE_KEYPHRASE) {
- ISoundTriggerHw::PhraseSoundModel *halKeyPhraseModel =
- new ISoundTriggerHw::PhraseSoundModel();
- struct sound_trigger_phrase_sound_model *keyPhraseModel =
- (struct sound_trigger_phrase_sound_model *)soundModel;
- ISoundTriggerHw::Phrase *halPhrases =
- new ISoundTriggerHw::Phrase[keyPhraseModel->num_phrases];
-
-
- for (unsigned int i = 0; i < keyPhraseModel->num_phrases; i++) {
- convertTriggerPhraseToHal(&halPhrases[i],
- &keyPhraseModel->phrases[i]);
- }
- halKeyPhraseModel->phrases.setToExternal(halPhrases, keyPhraseModel->num_phrases);
- // FIXME: transfer buffer ownership. should have a method for that in hidl_vec
- halKeyPhraseModel->phrases.resize(keyPhraseModel->num_phrases);
-
- delete[] halPhrases;
-
- halModel = (ISoundTriggerHw::SoundModel *)halKeyPhraseModel;
- } else {
- halModel = new ISoundTriggerHw::SoundModel();
- }
halModel->type = (SoundModelType)soundModel->type;
convertUuidToHal(&halModel->uuid, &soundModel->uuid);
convertUuidToHal(&halModel->vendorUuid, &soundModel->vendor_uuid);
halModel->data.setToExternal((uint8_t *)soundModel + soundModel->data_offset, soundModel->data_size);
- halModel->data.resize(soundModel->data_size);
+}
- return halModel;
+std::pair<bool, sp<IMemory>> SoundTriggerHalHidl::convertSoundModelToHal(
+ V2_1_ISoundTriggerHw::SoundModel *halModel,
+ const struct sound_trigger_sound_model *soundModel)
+{
+ convertSoundModelToHal(&halModel->header, soundModel);
+ return moveVectorToMemory(&halModel->header.data, &halModel->data);
+}
+
+void SoundTriggerHalHidl::convertPhraseSoundModelToHal(
+ ISoundTriggerHw::PhraseSoundModel *halKeyPhraseModel,
+ const struct sound_trigger_sound_model *soundModel)
+{
+ struct sound_trigger_phrase_sound_model *keyPhraseModel =
+ (struct sound_trigger_phrase_sound_model *)soundModel;
+ convertTriggerPhrasesToHal(&halKeyPhraseModel->phrases, keyPhraseModel);
+ convertSoundModelToHal(&halKeyPhraseModel->common, soundModel);
+}
+
+std::pair<bool, sp<IMemory>> SoundTriggerHalHidl::convertPhraseSoundModelToHal(
+ V2_1_ISoundTriggerHw::PhraseSoundModel *halKeyPhraseModel,
+ const struct sound_trigger_sound_model *soundModel)
+{
+ struct sound_trigger_phrase_sound_model *keyPhraseModel =
+ (struct sound_trigger_phrase_sound_model *)soundModel;
+ convertTriggerPhrasesToHal(&halKeyPhraseModel->phrases, keyPhraseModel);
+ return convertSoundModelToHal(&halKeyPhraseModel->common, soundModel);
}
void SoundTriggerHalHidl::convertPhraseRecognitionExtraToHal(
@@ -390,52 +515,42 @@
halExtra->id = extra->id;
halExtra->recognitionModes = extra->recognition_modes;
halExtra->confidenceLevel = extra->confidence_level;
- ConfidenceLevel *halLevels =
- new ConfidenceLevel[extra->num_levels];
- for (unsigned int i = 0; i < extra->num_levels; i++) {
- halLevels[i].userId = extra->levels[i].user_id;
- halLevels[i].levelPercent = extra->levels[i].level;
- }
- halExtra->levels.setToExternal(halLevels, extra->num_levels);
- // FIXME: transfer buffer ownership. should have a method for that in hidl_vec
halExtra->levels.resize(extra->num_levels);
-
- delete[] halLevels;
+ for (unsigned int i = 0; i < extra->num_levels; i++) {
+ halExtra->levels[i].userId = extra->levels[i].user_id;
+ halExtra->levels[i].levelPercent = extra->levels[i].level;
+ }
}
-
-ISoundTriggerHw::RecognitionConfig *SoundTriggerHalHidl::convertRecognitionConfigToHal(
+void SoundTriggerHalHidl::convertRecognitionConfigToHal(
+ ISoundTriggerHw::RecognitionConfig *halConfig,
const struct sound_trigger_recognition_config *config)
{
- ISoundTriggerHw::RecognitionConfig *halConfig =
- new ISoundTriggerHw::RecognitionConfig();
-
halConfig->captureHandle = config->capture_handle;
halConfig->captureDevice = (AudioDevice)config->capture_device;
halConfig->captureRequested = (uint32_t)config->capture_requested;
- PhraseRecognitionExtra *halExtras =
- new PhraseRecognitionExtra[config->num_phrases];
-
+ halConfig->phrases.resize(config->num_phrases);
for (unsigned int i = 0; i < config->num_phrases; i++) {
- convertPhraseRecognitionExtraToHal(&halExtras[i],
+ convertPhraseRecognitionExtraToHal(&halConfig->phrases[i],
&config->phrases[i]);
}
- halConfig->phrases.setToExternal(halExtras, config->num_phrases);
- // FIXME: transfer buffer ownership. should have a method for that in hidl_vec
- halConfig->phrases.resize(config->num_phrases);
-
- delete[] halExtras;
halConfig->data.setToExternal((uint8_t *)config + config->data_offset, config->data_size);
+}
- return halConfig;
+std::pair<bool, sp<IMemory>> SoundTriggerHalHidl::convertRecognitionConfigToHal(
+ V2_1_ISoundTriggerHw::RecognitionConfig *halConfig,
+ const struct sound_trigger_recognition_config *config)
+{
+ convertRecognitionConfigToHal(&halConfig->header, config);
+ return moveVectorToMemory(&halConfig->header.data, &halConfig->data);
}
// ISoundTriggerHwCallback
::android::hardware::Return<void> SoundTriggerHalHidl::recognitionCallback(
- const ISoundTriggerHwCallback::RecognitionEvent& halEvent,
+ const V2_0_ISoundTriggerHwCallback::RecognitionEvent& halEvent,
CallbackCookie cookie)
{
sp<SoundModel> model;
@@ -459,7 +574,7 @@
}
::android::hardware::Return<void> SoundTriggerHalHidl::phraseRecognitionCallback(
- const ISoundTriggerHwCallback::PhraseRecognitionEvent& halEvent,
+ const V2_0_ISoundTriggerHwCallback::PhraseRecognitionEvent& halEvent,
CallbackCookie cookie)
{
sp<SoundModel> model;
@@ -471,14 +586,13 @@
}
}
- struct sound_trigger_recognition_event *event = convertRecognitionEventFromHal(
- (const ISoundTriggerHwCallback::RecognitionEvent *)&halEvent);
+ struct sound_trigger_phrase_recognition_event *event =
+ convertPhraseRecognitionEventFromHal(&halEvent);
if (event == NULL) {
return Return<void>();
}
-
- event->model = model->mHandle;
- model->mRecognitionCallback(event, model->mRecognitionCookie);
+ event->common.model = model->mHandle;
+ model->mRecognitionCallback(&event->common, model->mRecognitionCookie);
free(event);
@@ -486,7 +600,7 @@
}
::android::hardware::Return<void> SoundTriggerHalHidl::soundModelCallback(
- const ISoundTriggerHwCallback::ModelEvent& halEvent,
+ const V2_0_ISoundTriggerHwCallback::ModelEvent& halEvent,
CallbackCookie cookie)
{
sp<SoundModel> model;
@@ -511,9 +625,37 @@
return Return<void>();
}
+::android::hardware::Return<void> SoundTriggerHalHidl::recognitionCallback_2_1(
+ const ISoundTriggerHwCallback::RecognitionEvent& event, CallbackCookie cookie) {
+ // The data vector in the 'header' part of V2.1 structure is empty, thus copying is cheap.
+ V2_0_ISoundTriggerHwCallback::RecognitionEvent event_2_0 = event.header;
+ auto result = memoryAsVector(event.data, &event_2_0.data);
+ return result.first ? recognitionCallback(event_2_0, cookie) : Void();
+}
+
+::android::hardware::Return<void> SoundTriggerHalHidl::phraseRecognitionCallback_2_1(
+ const ISoundTriggerHwCallback::PhraseRecognitionEvent& event, int32_t cookie) {
+ V2_0_ISoundTriggerHwCallback::PhraseRecognitionEvent event_2_0;
+ // The data vector in the 'header' part of V2.1 structure is empty, thus copying is cheap.
+ event_2_0.common = event.common.header;
+ event_2_0.phraseExtras.setToExternal(
+ const_cast<PhraseRecognitionExtra*>(event.phraseExtras.data()),
+ event.phraseExtras.size());
+ auto result = memoryAsVector(event.common.data, &event_2_0.common.data);
+ return result.first ? phraseRecognitionCallback(event_2_0, cookie) : Void();
+}
+
+::android::hardware::Return<void> SoundTriggerHalHidl::soundModelCallback_2_1(
+ const ISoundTriggerHwCallback::ModelEvent& event, CallbackCookie cookie) {
+ // The data vector in the 'header' part of V2.1 structure is empty, thus copying is cheap.
+ V2_0_ISoundTriggerHwCallback::ModelEvent event_2_0 = event.header;
+ auto result = memoryAsVector(event.data, &event_2_0.data);
+ return result.first ? soundModelCallback(event_2_0, cookie) : Void();
+}
+
struct sound_trigger_model_event *SoundTriggerHalHidl::convertSoundModelEventFromHal(
- const ISoundTriggerHwCallback::ModelEvent *halEvent)
+ const V2_0_ISoundTriggerHwCallback::ModelEvent *halEvent)
{
struct sound_trigger_model_event *event = (struct sound_trigger_model_event *)malloc(
sizeof(struct sound_trigger_model_event) +
@@ -550,37 +692,55 @@
}
-struct sound_trigger_recognition_event *SoundTriggerHalHidl::convertRecognitionEventFromHal(
- const ISoundTriggerHwCallback::RecognitionEvent *halEvent)
+struct sound_trigger_phrase_recognition_event* SoundTriggerHalHidl::convertPhraseRecognitionEventFromHal(
+ const V2_0_ISoundTriggerHwCallback::PhraseRecognitionEvent *halPhraseEvent)
{
- struct sound_trigger_recognition_event *event;
-
- if (halEvent->type == SoundModelType::KEYPHRASE) {
- struct sound_trigger_phrase_recognition_event *phraseEvent =
- (struct sound_trigger_phrase_recognition_event *)malloc(
- sizeof(struct sound_trigger_phrase_recognition_event) +
- halEvent->data.size());
- if (phraseEvent == NULL) {
- return NULL;
- }
- const ISoundTriggerHwCallback::PhraseRecognitionEvent *halPhraseEvent =
- (const ISoundTriggerHwCallback::PhraseRecognitionEvent *)halEvent;
-
- for (unsigned int i = 0; i < halPhraseEvent->phraseExtras.size(); i++) {
- convertPhraseRecognitionExtraFromHal(&phraseEvent->phrase_extras[i],
- &halPhraseEvent->phraseExtras[i]);
- }
- phraseEvent->num_phrases = halPhraseEvent->phraseExtras.size();
- event = (struct sound_trigger_recognition_event *)phraseEvent;
- event->data_offset = sizeof(sound_trigger_phrase_recognition_event);
- } else {
- event = (struct sound_trigger_recognition_event *)malloc(
- sizeof(struct sound_trigger_recognition_event) + halEvent->data.size());
- if (event == NULL) {
- return NULL;
- }
- event->data_offset = sizeof(sound_trigger_recognition_event);
+ if (halPhraseEvent->common.type != SoundModelType::KEYPHRASE) {
+ ALOGE("Received non-keyphrase event type as PhraseRecognitionEvent");
+ return NULL;
}
+ struct sound_trigger_phrase_recognition_event *phraseEvent =
+ (struct sound_trigger_phrase_recognition_event *)malloc(
+ sizeof(struct sound_trigger_phrase_recognition_event) +
+ halPhraseEvent->common.data.size());
+ if (phraseEvent == NULL) {
+ return NULL;
+ }
+ phraseEvent->common.data_offset = sizeof(sound_trigger_phrase_recognition_event);
+
+ for (unsigned int i = 0; i < halPhraseEvent->phraseExtras.size(); i++) {
+ convertPhraseRecognitionExtraFromHal(&phraseEvent->phrase_extras[i],
+ &halPhraseEvent->phraseExtras[i]);
+ }
+ phraseEvent->num_phrases = halPhraseEvent->phraseExtras.size();
+
+ fillRecognitionEventFromHal(&phraseEvent->common, &halPhraseEvent->common);
+ return phraseEvent;
+}
+
+struct sound_trigger_recognition_event *SoundTriggerHalHidl::convertRecognitionEventFromHal(
+ const V2_0_ISoundTriggerHwCallback::RecognitionEvent *halEvent)
+{
+ if (halEvent->type == SoundModelType::KEYPHRASE) {
+ ALOGE("Received keyphrase event type as RecognitionEvent");
+ return NULL;
+ }
+ struct sound_trigger_recognition_event *event;
+ event = (struct sound_trigger_recognition_event *)malloc(
+ sizeof(struct sound_trigger_recognition_event) + halEvent->data.size());
+ if (event == NULL) {
+ return NULL;
+ }
+ event->data_offset = sizeof(sound_trigger_recognition_event);
+
+ fillRecognitionEventFromHal(event, halEvent);
+ return event;
+}
+
+void SoundTriggerHalHidl::fillRecognitionEventFromHal(
+ struct sound_trigger_recognition_event *event,
+ const V2_0_ISoundTriggerHwCallback::RecognitionEvent *halEvent)
+{
event->status = (int)halEvent->status;
event->type = (sound_trigger_sound_model_type_t)halEvent->type;
// event->model to be set by caller
@@ -597,8 +757,6 @@
uint8_t *dst = (uint8_t *)event + event->data_offset;
uint8_t *src = (uint8_t *)&halEvent->data[0];
memcpy(dst, src, halEvent->data.size());
-
- return event;
}
} // namespace android
diff --git a/services/soundtrigger/SoundTriggerHalHidl.h b/services/soundtrigger/SoundTriggerHalHidl.h
index 0c68cf1..0b44ae0 100644
--- a/services/soundtrigger/SoundTriggerHalHidl.h
+++ b/services/soundtrigger/SoundTriggerHalHidl.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_HARDWARE_SOUNDTRIGGER_HAL_HIDL_H
#define ANDROID_HARDWARE_SOUNDTRIGGER_HAL_HIDL_H
+#include <utility>
+
#include <stdatomic.h>
#include <utils/RefBase.h>
#include <utils/KeyedVector.h>
@@ -24,21 +26,29 @@
#include <utils/threads.h>
#include "SoundTriggerHalInterface.h"
#include <android/hardware/soundtrigger/2.0/types.h>
-#include <android/hardware/soundtrigger/2.0/ISoundTriggerHw.h>
+#include <android/hardware/soundtrigger/2.1/ISoundTriggerHw.h>
#include <android/hardware/soundtrigger/2.0/ISoundTriggerHwCallback.h>
+#include <android/hardware/soundtrigger/2.1/ISoundTriggerHwCallback.h>
namespace android {
-using android::hardware::audio::common::V2_0::Uuid;
-using android::hardware::soundtrigger::V2_0::ConfidenceLevel;
-using android::hardware::soundtrigger::V2_0::PhraseRecognitionExtra;
-using android::hardware::soundtrigger::V2_0::SoundModelType;
-using android::hardware::soundtrigger::V2_0::SoundModelHandle;
-using android::hardware::soundtrigger::V2_0::ISoundTriggerHw;
-using android::hardware::soundtrigger::V2_0::ISoundTriggerHwCallback;
+using ::android::hardware::audio::common::V2_0::Uuid;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::soundtrigger::V2_0::ConfidenceLevel;
+using ::android::hardware::soundtrigger::V2_0::PhraseRecognitionExtra;
+using ::android::hardware::soundtrigger::V2_0::SoundModelType;
+using ::android::hardware::soundtrigger::V2_0::SoundModelHandle;
+using ::android::hardware::soundtrigger::V2_0::ISoundTriggerHw;
+using V2_0_ISoundTriggerHwCallback =
+ ::android::hardware::soundtrigger::V2_0::ISoundTriggerHwCallback;
+using V2_1_ISoundTriggerHw =
+ ::android::hardware::soundtrigger::V2_1::ISoundTriggerHw;
+using V2_1_ISoundTriggerHwCallback =
+ ::android::hardware::soundtrigger::V2_1::ISoundTriggerHwCallback;
+using ::android::hidl::memory::V1_0::IMemory;
class SoundTriggerHalHidl : public SoundTriggerHalInterface,
- public virtual ISoundTriggerHwCallback
+ public virtual V2_1_ISoundTriggerHwCallback
{
public:
@@ -84,11 +94,17 @@
// ISoundTriggerHwCallback
virtual ::android::hardware::Return<void> recognitionCallback(
- const ISoundTriggerHwCallback::RecognitionEvent& event, CallbackCookie cookie);
+ const V2_0_ISoundTriggerHwCallback::RecognitionEvent& event, CallbackCookie cookie);
virtual ::android::hardware::Return<void> phraseRecognitionCallback(
- const ISoundTriggerHwCallback::PhraseRecognitionEvent& event, int32_t cookie);
+ const V2_0_ISoundTriggerHwCallback::PhraseRecognitionEvent& event, int32_t cookie);
virtual ::android::hardware::Return<void> soundModelCallback(
- const ISoundTriggerHwCallback::ModelEvent& event, CallbackCookie cookie);
+ const V2_0_ISoundTriggerHwCallback::ModelEvent& event, CallbackCookie cookie);
+ virtual ::android::hardware::Return<void> recognitionCallback_2_1(
+ const RecognitionEvent& event, CallbackCookie cookie);
+ virtual ::android::hardware::Return<void> phraseRecognitionCallback_2_1(
+ const PhraseRecognitionEvent& event, int32_t cookie);
+ virtual ::android::hardware::Return<void> soundModelCallback_2_1(
+ const ModelEvent& event, CallbackCookie cookie);
private:
class SoundModel : public RefBase {
public:
@@ -124,25 +140,48 @@
void convertTriggerPhraseToHal(
ISoundTriggerHw::Phrase *halTriggerPhrase,
const struct sound_trigger_phrase *triggerPhrase);
- ISoundTriggerHw::SoundModel *convertSoundModelToHal(
+ void convertTriggerPhrasesToHal(
+ hidl_vec<ISoundTriggerHw::Phrase> *halTriggerPhrases,
+ struct sound_trigger_phrase_sound_model *keyPhraseModel);
+ void convertSoundModelToHal(ISoundTriggerHw::SoundModel *halModel,
const struct sound_trigger_sound_model *soundModel);
+ std::pair<bool, sp<IMemory>> convertSoundModelToHal(
+ V2_1_ISoundTriggerHw::SoundModel *halModel,
+ const struct sound_trigger_sound_model *soundModel)
+ __attribute__((warn_unused_result));
+ void convertPhraseSoundModelToHal(ISoundTriggerHw::PhraseSoundModel *halKeyPhraseModel,
+ const struct sound_trigger_sound_model *soundModel);
+ std::pair<bool, sp<IMemory>> convertPhraseSoundModelToHal(
+ V2_1_ISoundTriggerHw::PhraseSoundModel *halKeyPhraseModel,
+ const struct sound_trigger_sound_model *soundModel)
+ __attribute__((warn_unused_result));
void convertPhraseRecognitionExtraToHal(
PhraseRecognitionExtra *halExtra,
const struct sound_trigger_phrase_recognition_extra *extra);
- ISoundTriggerHw::RecognitionConfig *convertRecognitionConfigToHal(
+ void convertRecognitionConfigToHal(ISoundTriggerHw::RecognitionConfig *halConfig,
const struct sound_trigger_recognition_config *config);
+ std::pair<bool, sp<IMemory>> convertRecognitionConfigToHal(
+ V2_1_ISoundTriggerHw::RecognitionConfig *halConfig,
+ const struct sound_trigger_recognition_config *config)
+ __attribute__((warn_unused_result));
struct sound_trigger_model_event *convertSoundModelEventFromHal(
- const ISoundTriggerHwCallback::ModelEvent *halEvent);
+ const V2_0_ISoundTriggerHwCallback::ModelEvent *halEvent);
void convertPhraseRecognitionExtraFromHal(
struct sound_trigger_phrase_recognition_extra *extra,
const PhraseRecognitionExtra *halExtra);
+ struct sound_trigger_phrase_recognition_event* convertPhraseRecognitionEventFromHal(
+ const V2_0_ISoundTriggerHwCallback::PhraseRecognitionEvent *halPhraseEvent);
struct sound_trigger_recognition_event *convertRecognitionEventFromHal(
- const ISoundTriggerHwCallback::RecognitionEvent *halEvent);
+ const V2_0_ISoundTriggerHwCallback::RecognitionEvent *halEvent);
+ void fillRecognitionEventFromHal(
+ struct sound_trigger_recognition_event *event,
+ const V2_0_ISoundTriggerHwCallback::RecognitionEvent *halEvent);
uint32_t nextUniqueId();
sp<ISoundTriggerHw> getService();
+ sp<V2_1_ISoundTriggerHw> toService2_1(const sp<ISoundTriggerHw>& s);
sp<SoundModel> getModel(sound_model_handle_t handle);
sp<SoundModel> removeModel(sound_model_handle_t handle);
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 22519a3..a7d6e83 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -206,9 +206,10 @@
service->sendRecognitionEvent(event, module);
}
-sp<IMemory> SoundTriggerHwService::prepareRecognitionEvent_l(
+sp<IMemory> SoundTriggerHwService::prepareRecognitionEvent(
struct sound_trigger_recognition_event *event)
{
+ AutoMutex lock(mMemoryDealerLock);
sp<IMemory> eventMemory;
//sanitize event
@@ -216,21 +217,21 @@
case SOUND_MODEL_TYPE_KEYPHRASE:
ALOGW_IF(event->data_size != 0 && event->data_offset !=
sizeof(struct sound_trigger_phrase_recognition_event),
- "prepareRecognitionEvent_l(): invalid data offset %u for keyphrase event type",
+ "prepareRecognitionEvent(): invalid data offset %u for keyphrase event type",
event->data_offset);
event->data_offset = sizeof(struct sound_trigger_phrase_recognition_event);
break;
case SOUND_MODEL_TYPE_GENERIC:
ALOGW_IF(event->data_size != 0 && event->data_offset !=
sizeof(struct sound_trigger_generic_recognition_event),
- "prepareRecognitionEvent_l(): invalid data offset %u for generic event type",
+ "prepareRecognitionEvent(): invalid data offset %u for generic event type",
event->data_offset);
event->data_offset = sizeof(struct sound_trigger_generic_recognition_event);
break;
case SOUND_MODEL_TYPE_UNKNOWN:
ALOGW_IF(event->data_size != 0 && event->data_offset !=
sizeof(struct sound_trigger_recognition_event),
- "prepareRecognitionEvent_l(): invalid data offset %u for unknown event type",
+ "prepareRecognitionEvent(): invalid data offset %u for unknown event type",
event->data_offset);
event->data_offset = sizeof(struct sound_trigger_recognition_event);
break;
@@ -251,30 +252,19 @@
void SoundTriggerHwService::sendRecognitionEvent(struct sound_trigger_recognition_event *event,
Module *module)
- {
- AutoMutex lock(mServiceLock);
- if (module == NULL) {
- return;
- }
- sp<IMemory> eventMemory = prepareRecognitionEvent_l(event);
- if (eventMemory == 0) {
- return;
- }
- sp<Module> strongModule;
- for (size_t i = 0; i < mModules.size(); i++) {
- if (mModules.valueAt(i).get() == module) {
- strongModule = mModules.valueAt(i);
- break;
- }
- }
- if (strongModule == 0) {
- return;
- }
+{
+ if (module == NULL) {
+ return;
+ }
+ sp<IMemory> eventMemory = prepareRecognitionEvent(event);
+ if (eventMemory == 0) {
+ return;
+ }
sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_RECOGNITION,
eventMemory);
- callbackEvent->setModule(strongModule);
- sendCallbackEvent_l(callbackEvent);
+ callbackEvent->setModule(module);
+ sendCallbackEvent(callbackEvent);
}
// static
@@ -293,8 +283,9 @@
service->sendSoundModelEvent(event, module);
}
-sp<IMemory> SoundTriggerHwService::prepareSoundModelEvent_l(struct sound_trigger_model_event *event)
+sp<IMemory> SoundTriggerHwService::prepareSoundModelEvent(struct sound_trigger_model_event *event)
{
+ AutoMutex lock(mMemoryDealerLock);
sp<IMemory> eventMemory;
size_t size = event->data_offset + event->data_size;
@@ -311,30 +302,20 @@
void SoundTriggerHwService::sendSoundModelEvent(struct sound_trigger_model_event *event,
Module *module)
{
- AutoMutex lock(mServiceLock);
- sp<IMemory> eventMemory = prepareSoundModelEvent_l(event);
+ sp<IMemory> eventMemory = prepareSoundModelEvent(event);
if (eventMemory == 0) {
return;
}
- sp<Module> strongModule;
- for (size_t i = 0; i < mModules.size(); i++) {
- if (mModules.valueAt(i).get() == module) {
- strongModule = mModules.valueAt(i);
- break;
- }
- }
- if (strongModule == 0) {
- return;
- }
sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_SOUNDMODEL,
eventMemory);
- callbackEvent->setModule(strongModule);
- sendCallbackEvent_l(callbackEvent);
+ callbackEvent->setModule(module);
+ sendCallbackEvent(callbackEvent);
}
-sp<IMemory> SoundTriggerHwService::prepareServiceStateEvent_l(sound_trigger_service_state_t state)
+sp<IMemory> SoundTriggerHwService::prepareServiceStateEvent(sound_trigger_service_state_t state)
{
+ AutoMutex lock(mMemoryDealerLock);
sp<IMemory> eventMemory;
size_t size = sizeof(sound_trigger_service_state_t);
@@ -347,45 +328,33 @@
return eventMemory;
}
-// call with mServiceLock held
-void SoundTriggerHwService::sendServiceStateEvent_l(sound_trigger_service_state_t state,
+void SoundTriggerHwService::sendServiceStateEvent(sound_trigger_service_state_t state,
Module *module)
{
- sp<IMemory> eventMemory = prepareServiceStateEvent_l(state);
+ sp<IMemory> eventMemory = prepareServiceStateEvent(state);
if (eventMemory == 0) {
return;
}
- sp<Module> strongModule;
- for (size_t i = 0; i < mModules.size(); i++) {
- if (mModules.valueAt(i).get() == module) {
- strongModule = mModules.valueAt(i);
- break;
- }
- }
- if (strongModule == 0) {
- return;
- }
sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_SERVICE_STATE,
eventMemory);
- callbackEvent->setModule(strongModule);
- sendCallbackEvent_l(callbackEvent);
+ callbackEvent->setModule(module);
+ sendCallbackEvent(callbackEvent);
}
-void SoundTriggerHwService::sendServiceStateEvent_l(sound_trigger_service_state_t state,
- ModuleClient *moduleClient)
+void SoundTriggerHwService::sendServiceStateEvent(sound_trigger_service_state_t state,
+ ModuleClient *moduleClient)
{
- sp<IMemory> eventMemory = prepareServiceStateEvent_l(state);
+ sp<IMemory> eventMemory = prepareServiceStateEvent(state);
if (eventMemory == 0) {
return;
}
sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_SERVICE_STATE,
eventMemory);
callbackEvent->setModuleClient(moduleClient);
- sendCallbackEvent_l(callbackEvent);
+ sendCallbackEvent(callbackEvent);
}
-// call with mServiceLock held
-void SoundTriggerHwService::sendCallbackEvent_l(const sp<CallbackEvent>& event)
+void SoundTriggerHwService::sendCallbackEvent(const sp<CallbackEvent>& event)
{
mCallbackThread->sendCallbackEvent(event);
}
@@ -404,6 +373,19 @@
if (moduleClient == 0) {
return;
}
+ } else {
+ // Sanity check on this being a Module we know about.
+ bool foundModule = false;
+ for (size_t i = 0; i < mModules.size(); i++) {
+ if (mModules.valueAt(i).get() == module.get()) {
+ foundModule = true;
+ break;
+ }
+ }
+ if (!foundModule) {
+ ALOGE("onCallbackEvent for unknown module");
+ return;
+ }
}
}
if (module != 0) {
@@ -757,11 +739,12 @@
return;
}
+ Vector< sp<ModuleClient> > clients;
+
switch (event->mType) {
case CallbackEvent::TYPE_RECOGNITION: {
struct sound_trigger_recognition_event *recognitionEvent =
(struct sound_trigger_recognition_event *)eventMemory->pointer();
- sp<ISoundTriggerClient> client;
{
AutoMutex lock(mLock);
sp<Model> model = getModel(recognitionEvent->model);
@@ -776,16 +759,12 @@
recognitionEvent->capture_session = model->mCaptureSession;
model->mState = Model::STATE_IDLE;
- client = model->mModuleClient->client();
- }
- if (client != 0) {
- client->onRecognitionEvent(eventMemory);
+ clients.add(model->mModuleClient);
}
} break;
case CallbackEvent::TYPE_SOUNDMODEL: {
struct sound_trigger_model_event *soundmodelEvent =
(struct sound_trigger_model_event *)eventMemory->pointer();
- sp<ISoundTriggerClient> client;
{
AutoMutex lock(mLock);
sp<Model> model = getModel(soundmodelEvent->model);
@@ -793,29 +772,26 @@
ALOGW("%s model == 0", __func__);
return;
}
- client = model->mModuleClient->client();
- }
- if (client != 0) {
- client->onSoundModelEvent(eventMemory);
+ clients.add(model->mModuleClient);
}
} break;
case CallbackEvent::TYPE_SERVICE_STATE: {
- Vector< sp<ISoundTriggerClient> > clients;
{
AutoMutex lock(mLock);
for (size_t i = 0; i < mModuleClients.size(); i++) {
if (mModuleClients[i] != 0) {
- clients.add(mModuleClients[i]->client());
+ clients.add(mModuleClients[i]);
}
}
}
- for (size_t i = 0; i < clients.size(); i++) {
- clients[i]->onServiceStateChange(eventMemory);
- }
} break;
default:
LOG_ALWAYS_FATAL("onCallbackEvent unknown event type %d", event->mType);
}
+
+ for (size_t i = 0; i < clients.size(); i++) {
+ clients[i]->onCallbackEvent(event);
+ }
}
sp<SoundTriggerHwService::Model> SoundTriggerHwService::Module::getModel(
@@ -878,7 +854,7 @@
event.common.type = model->mType;
event.common.model = model->mHandle;
event.common.data_size = 0;
- sp<IMemory> eventMemory = service->prepareRecognitionEvent_l(&event.common);
+ sp<IMemory> eventMemory = service->prepareRecognitionEvent(&event.common);
if (eventMemory != 0) {
events.add(eventMemory);
}
@@ -889,7 +865,7 @@
event.common.type = model->mType;
event.common.model = model->mHandle;
event.common.data_size = 0;
- sp<IMemory> eventMemory = service->prepareRecognitionEvent_l(&event.common);
+ sp<IMemory> eventMemory = service->prepareRecognitionEvent(&event.common);
if (eventMemory != 0) {
events.add(eventMemory);
}
@@ -900,7 +876,7 @@
event.common.type = model->mType;
event.common.model = model->mHandle;
event.common.data_size = 0;
- sp<IMemory> eventMemory = service->prepareRecognitionEvent_l(&event.common);
+ sp<IMemory> eventMemory = service->prepareRecognitionEvent(&event.common);
if (eventMemory != 0) {
events.add(eventMemory);
}
@@ -915,11 +891,11 @@
sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_RECOGNITION,
events[i]);
callbackEvent->setModule(this);
- service->sendCallbackEvent_l(callbackEvent);
+ service->sendCallbackEvent(callbackEvent);
}
exit:
- service->sendServiceStateEvent_l(state, this);
+ service->sendServiceStateEvent(state, this);
}
@@ -1064,7 +1040,7 @@
return;
}
}
- service->sendServiceStateEvent_l(state, this);
+ service->sendServiceStateEvent(state, this);
}
void SoundTriggerHwService::ModuleClient::onCallbackEvent(const sp<CallbackEvent>& event)
@@ -1077,19 +1053,26 @@
return;
}
- switch (event->mType) {
- case CallbackEvent::TYPE_SERVICE_STATE: {
- sp<ISoundTriggerClient> client;
- {
- AutoMutex lock(mLock);
- client = mClient;
- }
- if (client !=0 ) {
+ sp<ISoundTriggerClient> client;
+ {
+ AutoMutex lock(mLock);
+ client = mClient;
+ }
+
+ if (client != 0) {
+ switch (event->mType) {
+ case CallbackEvent::TYPE_RECOGNITION: {
+ client->onRecognitionEvent(eventMemory);
+ } break;
+ case CallbackEvent::TYPE_SOUNDMODEL: {
+ client->onSoundModelEvent(eventMemory);
+ } break;
+ case CallbackEvent::TYPE_SERVICE_STATE: {
client->onServiceStateChange(eventMemory);
+ } break;
+ default:
+ LOG_ALWAYS_FATAL("onCallbackEvent unknown event type %d", event->mType);
}
- } break;
- default:
- LOG_ALWAYS_FATAL("onCallbackEvent unknown event type %d", event->mType);
}
}
diff --git a/services/soundtrigger/SoundTriggerHwService.h b/services/soundtrigger/SoundTriggerHwService.h
index 95efc4b..708fc98 100644
--- a/services/soundtrigger/SoundTriggerHwService.h
+++ b/services/soundtrigger/SoundTriggerHwService.h
@@ -214,19 +214,19 @@
};
static void recognitionCallback(struct sound_trigger_recognition_event *event, void *cookie);
- sp<IMemory> prepareRecognitionEvent_l(struct sound_trigger_recognition_event *event);
+ sp<IMemory> prepareRecognitionEvent(struct sound_trigger_recognition_event *event);
void sendRecognitionEvent(struct sound_trigger_recognition_event *event, Module *module);
static void soundModelCallback(struct sound_trigger_model_event *event, void *cookie);
- sp<IMemory> prepareSoundModelEvent_l(struct sound_trigger_model_event *event);
+ sp<IMemory> prepareSoundModelEvent(struct sound_trigger_model_event *event);
void sendSoundModelEvent(struct sound_trigger_model_event *event, Module *module);
- sp<IMemory> prepareServiceStateEvent_l(sound_trigger_service_state_t state);
- void sendServiceStateEvent_l(sound_trigger_service_state_t state, Module *module);
- void sendServiceStateEvent_l(sound_trigger_service_state_t state,
- ModuleClient *moduleClient);
+ sp<IMemory> prepareServiceStateEvent(sound_trigger_service_state_t state);
+ void sendServiceStateEvent(sound_trigger_service_state_t state, Module *module);
+ void sendServiceStateEvent(sound_trigger_service_state_t state,
+ ModuleClient *moduleClient);
- void sendCallbackEvent_l(const sp<CallbackEvent>& event);
+ void sendCallbackEvent(const sp<CallbackEvent>& event);
void onCallbackEvent(const sp<CallbackEvent>& event);
private:
@@ -238,6 +238,7 @@
DefaultKeyedVector< sound_trigger_module_handle_t, sp<Module> > mModules;
sp<CallbackThread> mCallbackThread;
sp<MemoryDealer> mMemoryDealer;
+ Mutex mMemoryDealerLock;
bool mCaptureState;
};
diff --git a/tools/OWNERS b/tools/OWNERS
index 6dcb035..f9cb567 100644
--- a/tools/OWNERS
+++ b/tools/OWNERS
@@ -1 +1 @@
-gkasten@android.com
+gkasten@google.com
diff --git a/tools/resampler_tools/OWNERS b/tools/resampler_tools/OWNERS
new file mode 100644
index 0000000..b4a6798
--- /dev/null
+++ b/tools/resampler_tools/OWNERS
@@ -0,0 +1 @@
+hunga@google.com